query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Registers a backward hook on the used to save the gradients of the embeddings for use in get_gradients() when there are multiple inputs (e.g., a passage and question), the hook will be called multiple times. We append all the embeddings gradients to a list.
def _register_embedding_gradient_hooks(self, embedding_gradients): def hook_layers(module, grad_in, grad_out): embedding_gradients.append(grad_out[0]) backward_hooks = [] embedding_layer = self.get_embeddings_layer() backward_hooks.append(embedding_layer.register_backward_hook(hook_layers)) return backward_hooks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _register_post_backward_hooks(self) -> None:\n if not torch.is_grad_enabled():\n return # don't register grad hooks if grad isn't enabled\n for p in self.full_params:\n if p.requires_grad:\n if hasattr(p, \"_shard_bwd_hook\"):\n continue\n # Register a hook on the first call, empirically, autograd\n # fires it at the end for this param, which makes sense.\n p_tmp = p.expand_as(p) # Get a grad_fn on p_tmp.\n assert p_tmp.grad_fn is not None\n grad_acc = p_tmp.grad_fn.next_functions[0][\n 0] # Gets its GradAccumulation object.\n handle = grad_acc.register_hook(\n functools.partial(self._post_backward_hook, p))\n p._shard_bwd_hook = (grad_acc, handle)", "def _fp32_register_post_backward_hooks(self):\n\n # Helper function to avoid issues with late binding closures\n def make_post_backward_hook(param):\n def post_backward_hook(*unused):\n self._fp32_optim_grad_sync_needed = True\n if hasattr(param, 'main_grad'):\n with torch.no_grad():\n if param.grad is not None:\n param.main_grad += param.grad\n param.grad = None\n\n return post_backward_hook\n\n # Construct hooks and register with params\n self._fp32_grad_accs = []\n for param in self._fp32_optim_main_params.keys():\n param_tmp = param.expand_as(param)\n grad_acc = param_tmp.grad_fn.next_functions[0][0]\n hook = make_post_backward_hook(param)\n grad_acc.register_hook(hook)\n self._fp32_grad_accs.append(grad_acc)", "def _register_pre_backward_hooks(self, outputs: Any) -> Any:\n if not torch.is_grad_enabled():\n return outputs # don't register hooks if grad isn't enabled\n\n if self._is_root:\n # This actually means that only root instance has\n # _post_backward_callback_queued defined. Accidentally accessing this field\n # will assert on all other instances, giving us a nice bug checker.\n self._post_backward_callback_queued = False\n\n def _pre_backward_hook(t_grad: torch.Tensor) -> None:\n # try to queue final backward callback only once for root, so\n # that final backward callback is attached to the outer most\n # backward graph task and called after all the backward\n # calls are completed.\n if self._is_root:\n self._queue_wait_for_post_backward()\n\n if self.optimization_barrier_in_backward:\n self._try_adding_to_backward_opt_barrier_lists(t_grad)\n # All-gather full parameters or switching to the full params.\n # Note, ``self._rebuild_full_params`` is idempotent. So in case it is called\n # unnecessarily, it doesn't incur much overhead.\n if self.reshard_after_forward:\n dependency_tensors = []\n if self.optimization_barrier_in_backward:\n # Ensure that backward pass ops of feature gradients, parameter\n # gradient and sharding, and full-param freeing (which are usually\n # performed in previous modules and are registered to\n # self._backward_opt_barrier_tensors in _grad_opt_barrier_hook,\n # _pre_backward_hook, and _post_backward_hook) are finished before\n # rebuilding the full params of this FSDP module.\n dependency_tensors = self._backward_opt_barrier_tensors\n self._rebuild_full_params(\n dependency_tensors=dependency_tensors,\n apply_opt_barrier=self.optimization_barrier_in_backward)\n self._clear_backward_opt_barrier_lists()\n\n # Only run the following once per iteration (i.e. in case\n # it is multiple outputs or multiple forward passes).\n if not self._pre_backward_hook_has_run:\n self._pre_backward_hook_has_run = True\n # Start of a backward pass for the first time in an iteration.\n self.assert_state([TrainingState.IDLE, TrainingState.BACKWARD_PRE])\n # Check p.grad to make sure that it is in the right shape, device, etc.\n for p, p_shard in zip(self.full_params, self.sharded_params):\n if p.grad is not None:\n assert p.grad.device == p_shard.device\n assert p.grad.size() == p_shard._orig_size\n\n # Transition to BACKWARD_PRE state if currently IDLE. We can transition from BACKWARD_POST\n # to IDLE when FSDP is within activation checkpointing and called multiple times, due to the\n # extra forward pass for re-computation.\n if self.training_state == TrainingState.IDLE:\n self.training_state = TrainingState.BACKWARD_PRE\n self.assert_state(\n [TrainingState.BACKWARD_PRE, TrainingState.BACKWARD_POST])\n\n if self.optimization_barrier_in_backward:\n self._try_adding_to_backward_opt_barrier_lists(t_grad)\n self.optimization_barrier_op([t_grad])\n t_grad = t_grad.view(t_grad.size()) # a view with barrier applied\n return t_grad\n\n _registered = 0\n\n def _register_hook(t: torch.Tensor) -> torch.Tensor:\n # We don't register the pre_backward hook on the same tensor that has been\n # returned from an inner FSDP, unless it is the first one.\n nonlocal _registered\n assert self._output_pre_backward_hook_registered is not None\n if t.requires_grad and (_registered == 0 or id(t)\n not in self._output_pre_backward_hook_registered):\n t.register_hook(_pre_backward_hook)\n self._output_pre_backward_hook_registered.add(id(t))\n _registered += 1\n return t\n\n # Attach hooks to Tensor outputs.\n outputs = apply_to_tensors(_register_hook, outputs)\n\n return outputs", "def _get_gradients(self, batch):\n embedding_gradients = []\n original_param_name_to_requires_grad_dict = {}\n \n for param_name, param in self.model.named_parameters():\n original_param_name_to_requires_grad_dict[param_name] = param.requires_grad\n param.requires_grad = True\n \n hooks = self._register_embedding_gradient_hooks(embedding_gradients)\n loss = self.forward_step(batch)\n\n self.model.zero_grad()\n loss.backward()\n\n for hook in hooks:\n hook.remove()\n\n # restore the original requires_grad values of the parameters\n for param_name, param in self.model.named_parameters():\n param.requires_grad = original_param_name_to_requires_grad_dict[param_name]\n\n return embedding_gradients[0]", "def _register_post_backward_hooks(\n state: _State,\n handles: List[FlatParamHandle],\n) -> None:\n # If there is no gradient computation, then there is no need for\n # post-backward logic\n if not torch.is_grad_enabled():\n return\n for handle in handles:\n flat_param = handle.flat_param\n already_registered = hasattr(flat_param, \"_post_backward_hook_state\")\n if already_registered or not flat_param.requires_grad:\n continue\n # Get the `AccumulateGrad` object\n temp_flat_param = flat_param.expand_as(flat_param)\n p_assert(\n temp_flat_param.grad_fn is not None,\n \"The `grad_fn` is needed to access the `AccumulateGrad` and \"\n \"register the post-backward hook\",\n )\n acc_grad = temp_flat_param.grad_fn.next_functions[0][0]\n hook_handle = acc_grad.register_hook(\n functools.partial(_post_backward_hook, state, handle)\n )\n flat_param._post_backward_hook_state = (acc_grad, hook_handle) # type: ignore[attr-defined]", "def _register_post_backward_hooks(\n self,\n handles: List[FlatParamHandle],\n ) -> None:\n # If there is no gradient computation, then there is no need for\n # post-backward logic\n if not torch.is_grad_enabled():\n return\n for handle in handles:\n flat_param = handle.flat_param\n already_registered = hasattr(flat_param, \"_post_backward_hook_state\")\n if already_registered or not flat_param.requires_grad:\n continue\n # Get the `AccumulateGrad` object\n temp_flat_param = flat_param.expand_as(flat_param)\n p_assert(\n temp_flat_param.grad_fn is not None,\n \"The `grad_fn` is needed to access the `AccumulateGrad` and \"\n \"register the post-backward hook\"\n )\n acc_grad = temp_flat_param.grad_fn.next_functions[0][0]\n hook_handle = acc_grad.register_hook(\n functools.partial(self._post_backward_hook, handle)\n )\n flat_param._post_backward_hook_state = (acc_grad, hook_handle) # type: ignore[attr-defined]", "def backward_gradient(\n self, input: np.ndarray, head_gradients: Dict[str, np.ndarray]\n ) -> np.ndarray:\n raise NotImplementedError", "def _register_pre_backward_hooks(\n state: _State,\n outputs: Any,\n handles: List[FlatParamHandle],\n) -> None:\n # If there is no gradient computation, then there is no need for\n # pre-backward logic\n if not torch.is_grad_enabled():\n return outputs\n if state._is_root:\n state._post_backward_callback_queued = False # only defined on the root\n\n handles_key = tuple(handles)\n if handles_key:\n # Since these handles' `FlatParameter`s participated in a forward, we\n # conservatively assume that they will be used in the backward\n state._needs_pre_backward_unshard[handles_key] = False\n state._ran_pre_backward_hook[handles_key] = False\n\n def _register_hook(t: torch.Tensor) -> torch.Tensor:\n if t.requires_grad:\n t.register_hook(functools.partial(_pre_backward_hook, state, handles))\n state._needs_pre_backward_unshard[handles_key] = True\n return t\n\n return _apply_to_tensors(_register_hook, outputs)", "def on_backward_end(self, batch):\n if self.updater == \"backward\":\n grads = OrderedDict((name, param.grad.data.cpu(\n )) for name, param in self.model.model.named_parameters() if param.grad is not None)\n try:\n self.update(grads)\n except KeyboardInterrupt:\n raise\n except:\n pass", "def backward(ctx, grad_output):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n # Retrieve saved tensors and constants\n gamma, ivar, mean, input = ctx.saved_tensors\n eps = ctx.saved_tensors\n\n # Check which inputs need gradients\n input_needs_grad, gamma_needs_grad, beta_needs_grad = ctx.needs_input_grad\n\n # Get the batch size (=N)\n N, _ = grad_output.shape\n\n # reconstruct the input_norm\n input_norm = (input - mean) * ivar\n grand_input_norm = grad_output * gamma\n\n ##### Gradient wrt beta #####\n grad_beta = grad_output.sum(dim=0) if beta_needs_grad else None\n\n #### Gradient wrt gamma ####\n grad_gamma = (input_norm*grad_output).sum(dim=0) if gamma_needs_grad else None\n \n #### Gradient wrt input ####\n term1 = N*grand_input_norm \n term2 = torch.sum(grand_input_norm, dim=0)\n term3 = input_norm*torch.sum(grand_input_norm*input_norm, dim=0)\n grad_input = (1. / N) * ivar * (term1 - term2 - term3) if input_needs_grad else None\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n # return gradients of the three tensor inputs and None for the constant eps\n return grad_input, grad_gamma, grad_beta, None", "def backward(self, gradient):\n #TODO\n pass", "def backward(self, gradient):\n #TODO\n pass", "def backward(ctx, G):\n backend = ctx.backend\n aliases = ctx.aliases\n formula = ctx.formula\n signature = ctx.signature\n sum_index = ctx.sum_index\n args = ctx.saved_tensors # Unwrap the saved variables\n\n # number of arguments (including parameters)\n nvars = 0;\n for sig in signature[1:]:\n nvars += 1\n\n # If formula takes 5 variables (numbered from 0 to 4), then the gradient\n # wrt. the output, G, should be given as a 6-th variable (numbered 5),\n # with the same dim-cat as the formula's output.\n eta = \"Var(\" + str(nvars) + \",\" + str(signature[0][0]) + \",\" + str(signature[0][1]) + \")\"\n grads = [] # list of gradients wrt. args;\n arg_ind = 5 # current arg index (4 since backend, ... are in front of the tensors); \n var_ind = 0 # current Variable index;\n\n for sig in signature[1:]: # Run through the actual parameters, given in *args in the forward.\n if not ctx.needs_input_grad[arg_ind]: # If the current gradient is to be discarded immediatly...\n grads.append(None) # Don't waste time computing it.\n else: # Otherwise, the current gradient is really needed by the user:\n # adding new aliases is waaaaay too dangerous if we want to compute\n # second derivatives, etc. So we make explicit references to Var<ind,dim,cat> instead.\n var = \"Var(\" + str(var_ind) + \",\" + str(sig[0]) + \",\" + str(sig[1]) + \")\" # V\n formula_g = \"Grad(\" + formula + \",\" + var + \",\" + eta + \")\" # Grad<F,V,G>\n args_g = args + (G,) # Don't forget the gradient to backprop !\n \n # N.B.: if I understand PyTorch's doc, we should redefine this function every time we use it?\n genconv = GenericSum().apply\n\n if sig[1] == 2: # we're referring to a parameter, so we'll have to sum both wrt 'i' and 'j'\n sumindex_g = 1 # The first sum will be done wrt 'i'\n signature_g = [ [sig[0],1] ] + signature[1:] + signature[:1]\n grad = genconv(backend, aliases, formula_g, signature_g, sumindex_g, *args_g)\n # Then, sum 'grad' wrt 'j' :\n # I think that \".sum\"'s backward introduces non-contiguous arrays,\n # and is thus non-compatible with GenericSum:\n # grad = grad.sum(0) \n # We replace it with a \"handmade hack\" :\n grad = Variable(torch.ones(1, grad.shape[0]).type_as(grad.data)) @ grad\n grad = grad.view(-1)\n else :\n # sumindex is \"the index that stays in the end\", not \"the one in the sum\"\n # (It's ambiguous, I know... But it's the convention chosen by Joan, which makes\n # sense if we were to expand our model to 3D tensors or whatever.)\n sumindex_g = sig[1] # The sum will be \"eventually indexed just like V\".\n signature_g = [sig] + signature[1:] + signature[:1]\n grad = genconv(backend, aliases, formula_g, signature_g, sumindex_g, *args_g)\n grads.append(grad)\n\n # increment the Variable counts\n arg_ind += 1 ; var_ind += 1 \n\n # Grads wrt. backend, aliases, formula, signature, sum_index, *args\n return (None, None, None, None, None, *grads)", "def backward(self, gradient):\n raise NotImplementedError()", "def backward(ctx, grad_output):\n\n # This is a pattern that is very convenient - at the top of backward\n # unpack saved_tensors and initialize all gradients w.r.t. inputs to\n # None. Thanks to the fact that additional trailing Nones are\n # ignored, the return statement is simple even when the function has\n # optional inputs.\n # input, weight, bias = ctx.saved_variables\n\n return grad_output", "def layer_backward(d_output, cache):\n\n # Unpack cache values\n x, w, z, output = cache\n\n # Compute derivatives (gradients)\n d_x, d_w = None, None\n\n return d_x, d_w", "def _register_pre_backward_hooks(\n self,\n outputs: Any,\n handles: List[FlatParamHandle],\n ) -> Any:\n # If there is no gradient computation, then there is no need for\n # pre-backward logic\n if not torch.is_grad_enabled():\n return outputs\n\n if self._is_root:\n self._post_backward_callback_queued = False # only defined on the root\n\n handles_key = tuple(handles)\n if handles_key:\n # Since these handles' `FlatParameter`s participated in a forward,\n # we conservatively assume that they will be used in the backward\n self._needs_pre_backward_unshard[handles_key] = False\n self._ran_pre_backward_hook[handles_key] = False\n\n def _pre_backward_hook(_handles: List[FlatParamHandle], *unused: Any) -> None:\n \"\"\"Prepares ``_handles`` 's ``FlatParameter`` s for gradient\n computation.\"\"\"\n _handles_key = tuple(_handles) # avoid shadowing `handles_key`\n # Only run the pre-backward hook once per group of handles involved\n # in the same module forward computation\n if _handles_key and self._ran_pre_backward_hook.get(_handles_key, False):\n return\n\n with torch.autograd.profiler.record_function(\n \"FullyShardedDataParallel._pre_backward_hook\"\n ):\n # Queue the post-backward callback once for the root FSDP\n # instance to attach it to the outermost backward graph task so\n # that it is called after all backward calls complete\n if self._is_root and not self._post_backward_callback_queued:\n self._queue_wait_for_post_backward()\n elif _handles_key:\n self._assert_state([TrainingState_.IDLE])\n self.training_state = TrainingState_.BACKWARD_PRE\n # Queueing the post-backward callback is the only logic that is\n # not per-handle in the pre-backward hook, so we can return\n # early here if there are no handles.\n if not _handles_key:\n return\n for handle in _handles:\n handle._training_state = HandleTrainingState.BACKWARD_PRE\n\n # If the handles have been prefetched, this `_unshard()` simply\n # switches to using the unsharded parameter\n self._unshard(_handles)\n torch.cuda.current_stream().wait_stream(self._streams[\"all_gather\"])\n\n # Set this to `False` to ensure that a mistargeted prefetch\n # does not actually unshard these handles\n self._needs_pre_backward_unshard[_handles_key] = False\n self._prefetch_handles(_handles_key)\n for handle in _handles:\n handle.prepare_gradient()\n self._ran_pre_backward_hook[_handles_key] = True\n\n def _register_hook(t: torch.Tensor) -> torch.Tensor:\n if t.requires_grad:\n t.register_hook(functools.partial(_pre_backward_hook, handles))\n self._needs_pre_backward_unshard[handles_key] = True\n return t\n\n return _apply_to_tensors(_register_hook, outputs)", "def backward(self):\n gradient = blah\n return gradient", "def backward(self):\n gradient = blah\n return gradient", "def backward(self, inputs, gradients, **kwargs):\n grad_relu = inputs > 0\n return gradients * grad_relu", "def backward(self, gradient: Tensor) -> Tensor:\n self.b_grad = np.sum(gradient, axis=0)\n self.w_grad = self.inputs.T @ gradient\n return gradient @ self.w.T", "def _post_backward_hook(\n state: _State,\n handle: FlatParamHandle,\n *unused: Any,\n):\n param = handle.flat_param\n param._post_backward_called = True\n with torch.autograd.profiler.record_function(\n \"FullyShardedDataParallel._post_backward_hook\"\n ):\n _assert_in_training_states(state, [TrainingState.FORWARD_BACKWARD])\n state.training_state = TrainingState.FORWARD_BACKWARD\n p_assert(\n handle._training_state == HandleTrainingState.BACKWARD_PRE,\n f\"Expects `BACKWARD_PRE` state but got {handle._training_state}\",\n )\n handle._training_state = HandleTrainingState.BACKWARD_POST\n\n if param.grad is None:\n return\n if param.grad.requires_grad:\n raise RuntimeError(\"FSDP does not support gradients of gradients\")\n\n free_unsharded_flat_param = _should_free_in_backward(state, handle)\n _reshard(state, [handle], [free_unsharded_flat_param])\n\n # TODO: Post-backward prefetching does not support the multiple handles\n # per module case since the post-backward hook runs per handle, not per\n # group of handles.\n handles_key = (handle,)\n _prefetch_handles(state, handles_key)\n\n if not state._sync_gradients:\n return\n\n # Wait for all ops in the current stream (e.g. gradient\n # computation) to finish before reduce-scattering the gradient\n state._streams[\"post_backward\"].wait_stream(torch.cuda.current_stream())\n\n with torch.cuda.stream(state._streams[\"post_backward\"]):\n unsharded_grad_data = param.grad.data\n if state._exec_order_data.is_first_iter: # only check once\n _check_comm_hook(\n state._communication_hook, state._communication_hook_state\n )\n if handle._uses_reduce_mixed_precision and not _low_precision_hook_enabled(\n state\n ):\n # TODO: Use the low precision communication hook directly\n param.grad.data = param.grad.to(state.mixed_precision.reduce_dtype)\n\n if handle.uses_sharded_strategy:\n # We clear `.grad` to permit multiple backwards. This avoids a\n # race where the second backward pass computation precedes\n # ahead of the first backward pass reduction, which is possible\n # since the reduction is issued in a separate stream and is\n # async and would result in reducing the wrong gradient.\n unsharded_grad = param.grad.data\n param.grad = None\n p_assert(\n len(unsharded_grad.size()) == 1,\n f\"Expects gradient to be flattened but got {unsharded_grad.size()}\",\n )\n chunks = list(unsharded_grad.chunk(state.world_size))\n numel_to_pad = (\n state.world_size * chunks[0].numel() - unsharded_grad.numel()\n )\n padded_unsharded_grad = F.pad(unsharded_grad, [0, numel_to_pad])\n new_sharded_grad = torch.zeros_like(chunks[0]) # padded\n state._communication_hook(\n state._communication_hook_state,\n padded_unsharded_grad,\n new_sharded_grad,\n )\n _cast_grad_to_param_dtype(state, handle, new_sharded_grad, param)\n\n # Save the sharded gradient in `_saved_grad_shard` to support\n # gradient accumulation -- for multiple backwards, the gradient\n # reductions may happen in arbitrary order\n accumulate_grad = hasattr(param, \"_saved_grad_shard\")\n if accumulate_grad:\n _check_grad_to_accumulate(new_sharded_grad, param._saved_grad_shard)\n param._saved_grad_shard += new_sharded_grad\n else:\n param._saved_grad_shard = new_sharded_grad\n sharded_grad = param._saved_grad_shard\n else:\n state._communication_hook(state._communication_hook_state, param.grad)\n # For `NO_SHARD`, we can keep the low precision gradients by\n # simply omitting the cast altogether\n if not handle._keep_low_precision_grads:\n _cast_grad_to_param_dtype(state, handle, param.grad, param)\n sharded_grad = param.grad.data\n\n if handle._config.offload_params:\n # Offload the gradient to CPU to ensure parameters and\n # gradients are on the same device as required by the optimizer\n param._cpu_grad.copy_( # type: ignore[attr-defined]\n sharded_grad.detach(), non_blocking=True\n ) # synchronized in the post-backward callback\n # Since the sharded gradient is produced in the post-backward\n # stream and consumed later in the computation stream, inform\n # the caching allocator\n sharded_grad.data.record_stream(torch.cuda.current_stream())\n\n # Since the unsharded gradient is produced in the computation\n # stream and consumed in the post-backward stream, inform the\n # caching allocator (before it goes out of scope)\n unsharded_grad_data.record_stream(state._streams[\"post_backward\"])\n\n if handle._use_orig_params:\n # Since the handle's `FlatParameter` completed its gradient\n # computation, we should reset the gradient noneness mask\n handle._reset_is_grad_none()\n # Delay using sharded gradient views until after the\n # reduce-scatter instead of immediately after resharding\n handle._use_sharded_grad_views()", "def backward(ctx, grad_output):\n diff, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input = grad_input + diff\n return grad_input", "def _wait_for_post_backward(self) -> None:\n assert self._is_root\n # Check if the root module has params and if any of them has\n # the `requires_grad` field set. If `requires_grad=False` for\n # all the params, the post_backward hook will not fire and the\n # state will remain in `TrainingState.BACKWARD_PRE`.\n if any([p.requires_grad for p in self.full_params]):\n self.assert_state(TrainingState.BACKWARD_POST)\n else:\n self.assert_state(TrainingState.BACKWARD_PRE)\n\n # A backward pass is done, clean up below.\n def _finalize_parameters(fsdp_module: XlaFullyShardedDataParallel) -> None:\n \"\"\"Helper used below on all fsdp modules.\"\"\"\n for p in fsdp_module.full_params:\n if not p.requires_grad:\n continue\n if hasattr(p, \"_shard_bwd_hook\"):\n assert len(p._shard_bwd_hook) == 2, len(p._shard_bwd_hook)\n p._shard_bwd_hook[1].remove()\n delattr(p, \"_shard_bwd_hook\")\n\n # Update root and nested FSDP's hooks and flags.\n for m in self.modules(): # includes self\n if isinstance(m, XlaFullyShardedDataParallel):\n _finalize_parameters(m)\n if not m._pre_backward_hook_has_run:\n m.assert_state(TrainingState.IDLE)\n # The module won't trigger post_backward_hook, so we free the\n # full params here.\n m._free_full_params(\n m.full_params,\n apply_opt_barrier=self.optimization_barrier_in_backward)\n elif any(p.requires_grad for p in m.parameters()):\n # Check if the module has params and if any of them has\n # the `requires_grad` field set. If `requires_grad=False` for\n # all the params, the post_backward hook will not fire and the\n # state will remain in `TrainingState.BACKWARD_PRE`.\n if any([p.requires_grad for p in m.full_params]):\n m.assert_state(TrainingState.BACKWARD_POST)\n else:\n m.assert_state(TrainingState.BACKWARD_PRE)\n else:\n # When `m` and its children has no params or has params but\n # none with `requires_grad==True`, there are two cases:\n # 1. output tensors are `requires_grad==True`. In this case,\n # pre-backward hook is still registered, so it is in BACKWARD_PRE state.\n # 2. output tensors are `requires_grad==False`. In this case,\n # pre-backward hook is not registered, so it is in IDLE state.\n m.assert_state([TrainingState.BACKWARD_PRE, TrainingState.IDLE])\n\n m.training_state = TrainingState.IDLE\n m._pre_backward_hook_has_run = False\n if m._is_root:\n # reset this flag for cases like \"one forward pass + multiple backward passes\"\n self._post_backward_callback_queued = False\n # clear this list for next iteration\n assert self._output_pre_backward_hook_registered is not None\n self._output_pre_backward_hook_registered.clear()\n if self.optimization_barrier_in_backward:\n # Ensure that backward pass ops of feature gradients, parameter\n # gradient and sharding, and full-param freeing (which are usually\n # performed in previous modules and are registered to\n # self._backward_opt_barrier_tensors in _grad_opt_barrier_hook,\n # _pre_backward_hook, and _post_backward_hook) are finished before\n # accessing the sharded gradients of this FSDP module.\n params_with_grad = [\n p for p in self._all_sharded_params if p.grad is not None\n ]\n grad_data = [p.grad for p in params_with_grad]\n dependency_tensors = params_with_grad + grad_data\n dependency_tensors.extend(self._backward_opt_barrier_tensors)\n self.optimization_barrier_op(dependency_tensors)\n self._clear_backward_opt_barrier_lists()\n\n if self.mark_step_on_finalization:\n # Forcing an execution at the end of backward pass to avoid any XLA compiler\n # fusion between backward and optimizer (e.g. AdamW and SGD) step.\n # Here `xm.mark_step` is only called once for the entire backward pass and\n # should therefore only moderately increase the execution time.\n # It may help prevent undesired fusion in backward pass and save more memory.\n if self._debug_print:\n xm.master_print(\n f\"mark_step called in FSDP _wait_for_post_backward (_debug_msg: {self._debug_msg})\",\n flush=True,\n )\n xm.mark_step()", "def word_embedding_backward(dout, cache):\n dW = None\n ##############################################################################\n # TODO: Implement the backward pass for word embeddings. #\n # #\n # HINT: Look up the function np.add.at #\n ##############################################################################\n x, W = cache\n # create a copy since add.at changes the matrix\n W_new = W.copy()\n # it is just adding the derivates specified in dout at proper index\n # x gives the indices . dout gives the derivates that needs to be added.\n np.add.at(W_new, x, dout)\n dW = W_new - W\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return dW", "def backward(ctx, grad_L):\n A, T = ctx.saved_tensors\n\n grad_A = None\n grad_T = None\n\n B = A.shape[0]\n\n # We only need to compute gradients for tensors that are flagged to\n # require gradients!\n if ctx.needs_input_grad[0]:\n grad_A = (A - T) / B\n\n if ctx.needs_input_grad[1]:\n grad_T = (T - A) / B\n\n return grad_A, grad_T", "def backward(self, inputs, grad_loss_input):\n raise NotImplementedError", "def forward_backward(self, data_batch):\n self.forward(data_batch, is_train=True)\n self.backward()\n if self.use_l2norm_grad_clip:\n # 2-Norm Grad Clip\n self.l2norm_grad_clip()", "def apply_gradients(self,\n grads_and_vars,\n global_step=None,\n name=None,\n decay_var_list=None):\n self._decay_var_list = set(decay_var_list) if decay_var_list else False\n return super(DecoupledWeightDecayExtension, self).apply_gradients(\n grads_and_vars, global_step=global_step, name=name)", "def backward(self, inGradient, lr=0.001): # batchSize = 1\n wGradient = np.dot(inGradient.T, self.data)\n bGradient = np.sum(inGradient, axis=0)\n outGradient = np.dot(inGradient, self.weights)\n\n self.weights = self.weights - lr * wGradient\n self.bias = self.bias - lr * bGradient\n self.wGradient = wGradient\n self.bGradient = bGradient\n\n #print \"weight gradient \", wGradient\n #print \"bias gradient \", bGradient\n\n return outGradient", "def backward(ctx, grad_output):\n \n grad_input = None # set output to None\n\n input, = ctx.saved_tensors\n if ctx.needs_input_grad[0]:\n grad_input = input.clone()\n\n return grad_input", "def _register_relu_hooks(self):\n\n # Save forward propagation output of the ReLU layer\n def _record_output(module, input_, output):\n self.relu_outputs.append(output)\n\n def _clip_gradients(module, grad_in, grad_out):\n # keep positive forward propagation output\n relu_output = self.relu_outputs.pop()\n relu_output[relu_output > 0] = 1\n\n # keep positive backward propagation gradient\n positive_grad_out = torch.clamp(grad_out[0], min=0.0)\n\n # generate modified guided gradient\n modified_grad_out = positive_grad_out * relu_output\n\n return (modified_grad_out, )\n\n for _, module in self.model.named_modules():\n if isinstance(module, nn.ReLU):\n forward_handle = module.register_forward_hook(_record_output)\n backward_handle = module.register_backward_hook(_clip_gradients)\n self.handle.append(forward_handle)\n self.handle.append(backward_handle)", "def backward(ctx, grad_output):\n loss, reg, u, lbda = ctx.saved_tensors\n\n device = u.device\n\n # do clever computations\n eps = 1e-10\n grad, = torch.autograd.grad(loss, u, only_inputs=True,\n retain_graph=True)\n x = (u - eps * grad).data\n lbda = lbda.data\n\n prox_x = check_tensor(\n np.array([prox_tv.tv1_1d(xx, eps * lbda) for xx in x]),\n device=device,\n )\n grad_u = (u - prox_x) / eps\n grad_lbda = reg.clone()\n return (torch.ones(0), grad_u, grad_lbda)", "def backward(ctx, grad_output):\n batch_size, n_dim = grad_output.shape\n sign_z, = ctx.saved_tensors\n device = grad_output.device\n S = sign_z != 0\n S[:, 0] = True\n sign_z[:, 0] = 0\n # XXX do clever computations\n L = torch.triu(torch.ones((n_dim, n_dim), dtype=torch.float64,\n device=device))\n\n grad_x, grad_lbda = [], []\n for i in range(batch_size):\n L_S = L[:, S[i]] # n_dim x |S|\n grad_u = grad_output[i].matmul(L_S) # 1 x |S|\n H_S = torch.inverse(L_S.t().matmul(L_S))\n grad_x.append(grad_u.matmul(H_S.matmul(L_S.t())))\n grad_lbda.append(grad_u.matmul(H_S.matmul(-sign_z[i][S[i]])))\n grad_x = torch.stack(grad_x)\n grad_lbda = torch.stack(grad_lbda)\n return (grad_x, grad_lbda)", "def backward(self, out_tensors: List[Tensor], in_tensors: List[Tensor]):\n pass", "def backward(self, out_grad, input):\n raise NotImplementedError", "def _register_grad_opt_barrier_hooks(\n self, dependency_tensors: List[torch.Tensor]) -> None:\n if not torch.is_grad_enabled():\n return # don't register hooks if grad isn't enabled\n\n def _grad_opt_barrier_hook(t_grad: torch.Tensor):\n self._try_adding_to_backward_opt_barrier_lists(t_grad)\n self.optimization_barrier_op([t_grad])\n return t_grad.view(t_grad.size()) # a view with barrier applied\n\n for t in dependency_tensors:\n if t.requires_grad:\n t.register_hook(_grad_opt_barrier_hook)", "def backward(ctx, grad_A):\n Z, = ctx.saved_tensors\n\n grad_Z = None\n\n # We only need to compute gradients for tensors that are flagged to\n # require gradients!\n if ctx.needs_input_grad[0]:\n sigmoid = 1. / (1. + torch.exp(-Z))\n grad_Z = grad_A * sigmoid * (1. - sigmoid)\n\n return grad_Z", "def forward_backward(self, data_batch):\n total_feature, total_label = self.forward(data_batch, is_train=True)\n self.backward_all(total_feature, total_label)", "def _post_backward_hook(\n self,\n handle: FlatParamHandle,\n *unused: Any,\n ) -> None:\n param = handle.flat_param\n param._post_backward_called = True\n with torch.autograd.profiler.record_function(\n \"FullyShardedDataParallel._post_backward_hook\"\n ):\n # First hook callback will see PRE state. If we have multiple params,\n # then subsequent hook callbacks will see POST state.\n self._assert_state([TrainingState_.BACKWARD_PRE, TrainingState_.BACKWARD_POST])\n self.training_state = TrainingState_.BACKWARD_POST\n handle._training_state = HandleTrainingState.BACKWARD_POST\n\n if self._use_param_exec_order_policy() and self._param_exec_order_prep_stage:\n # In self._fsdp_params_exec_order, the parameters are ordered based on\n # the execution order in the backward pass in the first iteration.\n self._fsdp_params_exec_order.append(param)\n\n if param.grad is None:\n return\n if param.grad.requires_grad:\n raise RuntimeError(\n \"FSDP only works with gradients that don't require gradients\"\n )\n\n free_unsharded_flat_param = self._should_free_unsharded_flat_param(handle)\n self._reshard([handle], [free_unsharded_flat_param])\n\n # TODO (awgu): Post-backward prefetching does not support the\n # multiple handles per module case (which was why we keyed by\n # *tuple*). The post-backward hook runs per handle, not per group\n # of handles. To generalize this, we may need a 2-level mapping,\n # where we map each individual handle to its groups of handles and\n # then from the groups of handles to their indices in the order.\n handles_key = (handle,)\n self._prefetch_handles(handles_key)\n\n if not self._sync_gradients:\n return\n\n # Wait for all ops in the current stream (e.g. gradient\n # computation) to finish before reduce-scattering the gradient\n self._streams[\"post_backward\"].wait_stream(torch.cuda.current_stream())\n\n with torch.cuda.stream(self._streams[\"post_backward\"]):\n orig_grad_data = param.grad.data\n if (\n self._mixed_precision_enabled_for_reduce()\n and not self._low_precision_hook_enabled()\n ):\n # Cast gradient to precision in which it should be communicated.\n # If a low precision hook is registered and reduce_dtype is specified\n # in `MixedPrecision`, communication hook will take care of\n # casting to lower precision and back.\n # TODO: Make this a communication hook when communication hooks\n # are implemented for FSDP. Note that this is a noop if the\n # reduce_dtype matches the param dtype.\n param.grad.data = param.grad.data.to(self.mixed_precision.reduce_dtype)\n\n if self._exec_order_data.is_first_iter:\n # For all sharding strategies communication is performed through `_communication_hook`:\n # default comm hooks are: `reduce_scatter` for sharded strategies and\n # `all_reduce` for non-sharded strategies. This checks asserts that `_communication_hook`\n # and `_communication_hook_state`, required for communication not `None`.`\n p_assert(\n self._communication_hook is not None,\n \"Communication hook should not be None\"\n )\n p_assert(\n self._communication_hook_state is not None,\n \"Communication hook state should not be None\"\n )\n grad = param.grad.data\n if handle.uses_sharded_strategy:\n # We clear `param.grad` to permit repeated gradient\n # computations when this FSDP module is called multiple times.\n # This is to avoid a race among multiple re-entrant backward\n # passes. For example, the second backward pass computation\n # precedes ahead of the first backward pass reduction, which is\n # possible since the reduction is in a different stream and is\n # async. Then, the first backward pass may be incorrectly\n # reducing the second backward pass's `param.grad`.\n # The reduced gradients are accumulated in\n # `param._saved_grad_shard`, and the gradient reductions can\n # happen in arbitrary order, though we tolerate this due to the\n # (approximate) commutativity of floating-point addition.\n param.grad = None\n grad_flatten = torch.flatten(grad)\n chunks = list(grad_flatten.chunk(self.world_size))\n num_pad = self.world_size * chunks[0].numel() - grad.numel()\n input_flattened = F.pad(grad_flatten, [0, num_pad])\n output = torch.zeros_like(chunks[0])\n self._communication_hook(self._communication_hook_state, input_flattened, output)\n\n self._cast_grad_to_param_dtype(output, param)\n\n # To support gradient accumulation outside `no_sync()`, we save\n # the gradient data to `param._saved_grad_shard` before the\n # backward pass, accumulate gradients into it here, and set\n # `param.grad` with the accumulated value at the end of the\n # backward pass in preparation for the optimizer step.\n accumulate_grad = hasattr(param, \"_saved_grad_shard\")\n if accumulate_grad:\n p_assert(\n param._saved_grad_shard.shape == output.shape, # type: ignore[attr-defined]\n \"Shape mismatch when accumulating gradients: \" # type: ignore[attr-defined]\n f\"existing grad shape={param._saved_grad_shard.shape} \"\n f\"new grad shape={output.shape}\" # type: ignore[attr-defined]\n )\n p_assert(\n param._saved_grad_shard.device == output.device, # type: ignore[attr-defined]\n \"Device mismatch when accumulating gradients: \" # type: ignore[attr-defined]\n f\"existing grad device={param._saved_grad_shard.device} \"\n f\"new grad device={output.device}\" # type: ignore[attr-defined]\n )\n param._saved_grad_shard += output # type: ignore[attr-defined]\n else:\n param._saved_grad_shard = output # type: ignore[attr-defined]\n grad = param._saved_grad_shard # type: ignore[attr-defined]\n else:\n if self.sharding_strategy == ShardingStrategy.NO_SHARD:\n self._communication_hook(self._communication_hook_state, param.grad)\n\n # For NO_SHARD keeping grads in the reduced precision, we\n # can simply omit the cast as needed, we can't do this for\n # other sharding strategies because grad field is assigned\n # in _finalize_params. TODO (rvarm1) this divergence in\n # logic is not ideal.\n if not self._mixed_precision_keep_low_precision_grads():\n self._cast_grad_to_param_dtype(param.grad, param)\n\n # Regardless of sharding or not, offload the grad to CPU if we are\n # offloading params. This is so param and grad reside on same device\n # which is needed for the optimizer step.\n if handle._config.offload_params:\n # We specify non_blocking=True\n # and ensure the appropriate synchronization is done by waiting\n # streams in _wait_for_post_backward.\n param._cpu_grad.copy_( # type: ignore[attr-defined]\n grad.detach(), non_blocking=True\n )\n # Don't let this memory get reused until after the transfer.\n grad.data.record_stream(torch.cuda.current_stream())\n\n # After _post_backward_hook returns, orig_grad_data will eventually\n # go out of scope, at which point it could otherwise be freed for\n # further reuse by the main stream while the div/reduce_scatter/copy\n # are underway in the post_backward stream. See:\n # github.com/NVIDIA/apex/blob/master/apex/parallel/distributed.py\n orig_grad_data.record_stream(self._streams[\"post_backward\"])", "def backward(ctx, grad_output):\n if PROFILE:\n batch_tic = time.time()\n tic = time.time()\n timings = defaultdict(float)\n\n feats1, feats2, xxyy, batch_grid_u, params, pow = ctx.saved_tensors\n\n \"\"\"We needed to store the integers as part of a tensor, so the\n unpacking code here is a little convoluted.\"\"\"\n B, C, H, W, stride, norm = [x.item() for x in params]\n h, w = H, W\n pow = pow.item()\n\n \"\"\"This is a pattern that is very convenient - at the top of backward\n unpack saved_tensors and initialize all gradients w.r.t. inputs to\n None. Thanks to the fact that additional trailing Nones are\n ignored, the return statement is simple even when the function has\n optional inputs.\"\"\"\n grad_feats1 = grad_feats2 = grad_xxyy = grad_batch_u = None\n grad_stride = grad_norm = grad_pow = None\n\n \"\"\"Returning gradients for inputs that don't require it is\n not an error.\"\"\"\n assert ctx.needs_input_grad[0], \"expected feats1 to need grad\"\n assert ctx.needs_input_grad[1], \"expected feats2 to need grad\"\n assert not ctx.needs_input_grad[2], \"expected xxyy does not need grad\"\n assert not ctx.needs_input_grad[3], \"expected batch_grid_u does not need grad\"\n assert not ctx.needs_input_grad[4], \"expected stride does not need grad\"\n\n if PROFILE:\n timings[\"back-init\"] = time.time() - tic\n tic = time.time()\n\n with torch.no_grad():\n\n if feats1.is_cuda:\n # TODO: clean up types here\n if feats1.dtype == torch.float32:\n grad_feats1 = torch.cuda.FloatTensor(B, C, H, W).fill_(0)\n grad_feats2 = torch.cuda.FloatTensor(B, C, h, w).fill_(0)\n elif feats1.dtype == torch.float16:\n grad_feats1 = torch.cuda.HalfTensor(B, C, H, W).fill_(0)\n grad_feats2 = torch.cuda.HalfTensor(B, C, h, w).fill_(0)\n else:\n grad_feats1 = torch.zeros((B, C, H, W), dtype=feats1.dtype)\n grad_feats2 = torch.zeros((B, C, h, w), dtype=feats2.dtype)\n\n grad_loss = grad_output / (H * W * B)\n\n if PROFILE:\n timings[\"data transfer\"] = time.time() - batch_tic\n\n for b in range(B):\n\n if PROFILE:\n tic = time.time()\n\n with torch.no_grad():\n diff = batch_grid_u[b, :, :, None, None, :] - \\\n xxyy[None, None, ::stride, ::stride, :]\n diff = (diff * diff).sum(4).sqrt()\n diff = diff.pow(pow)\n\n if PROFILE:\n timings[\"diff-grid\"] += time.time() - tic\n tic = time.time()\n\n # loss gradient for the current minibatch element (expand to tensor)\n grad_loss_b = grad_loss\n grad_smcorr2 = grad_loss_b * diff\n\n if LOCAL_CHECKS:\n ones = torch.ones(diff.shape, dtype=diff.dtype)\n grad_loss_b_ = ones * grad_loss\n smcorr_ = torch.randn(\n diff.shape,\n dtype=torch.double,\n requires_grad=True)\n with torch.autograd.enable_grad():\n L_ = diff * smcorr_\n d_smcorr = torch.autograd.grad(\n outputs=L_,\n inputs=smcorr_,\n grad_outputs=grad_loss_b_,\n )\n rel_diff(grad_smcorr2, d_smcorr[0], \"smax\")\n if torch.any(torch.isnan(grad_smcorr2[0])):\n import ipdb; ipdb.set_trace()\n\n\n if PROFILE:\n timings[\"scale-feats\"] += time.time() - tic\n tic = time.time()\n\n # Re-compute intermediate values\n grad_smcorr2 = grad_smcorr2.view(H, W, -1)\n f1_ = feats1[b].view(C, H * W)\n f2_ = feats2[b].view(C, h * w)\n fa_ = feats1[(b + 1) % B].reshape(C, h * w) # auxiliary\n\n if norm:\n f1_norm = F.normalize(f1_, p=2, dim=0) * JDT_FACTOR\n f2_norm = F.normalize(f2_, p=2, dim=0) * JDT_FACTOR\n fa_norm = F.normalize(fa_, p=2, dim=0) * JDT_FACTOR\n else:\n f1_norm = f1_.clone()\n f2_norm = f2_.clone()\n fa_norm = fa_.clone()\n\n if PROFILE:\n timings[\"fwd-norm\"] += time.time() - tic\n tic = time.time()\n\n # Match the source features against the auxiliaries\n corr = torch.matmul(f1_norm.t(), fa_norm)\n corr = corr.reshape(H, W, h, w)\n\n if PROFILE:\n timings[\"f1-aux-correlation\"] += time.time() - tic\n tic = time.time()\n\n smcorr = F.softmax(corr.view(H, W, -1), dim=2)\n smcorr = smcorr.view(corr.shape)\n if LOCAL_CHECKS:\n # cache a copy of the mega tensor for numerical checks\n smcorr_fa = smcorr[None, ...] * fa_norm.view(-1, 1, 1, h, w)\n f1_via_fa = smcorr_fa.sum((3, 4))\n else:\n \"\"\"This is one of the largest tensors.....\"\"\"\n f1_via_fa = (smcorr[None, ...] *\n fa_norm.view(-1, 1, 1, h, w)).sum((3, 4))\n\n f1_via_fa = f1_via_fa.view(C, H * W)\n\n # Main correlation computation\n corr2 = torch.matmul(f1_via_fa.t(), f2_norm).view(corr.shape)\n\n # Direct backward pass for second softmax\n smcorr2 = F.softmax(corr2.view(H, W, -1), dim=2)\n sum_term = torch.sum(grad_smcorr2 * smcorr2, dim=2, keepdim=True)\n grad_corr2 = smcorr2 * (grad_smcorr2 - sum_term)\n\n if not LOCAL_CHECKS:\n del smcorr2\n\n if PROFILE:\n timings[\"softmax\"] += time.time() - tic\n tic = time.time()\n\n # safety checks\n if LOCAL_CHECKS:\n with torch.enable_grad():\n corr2_num = corr2.clone().requires_grad_()\n corr2_num = corr2_num.reshape(H, W, -1)\n smcorr2_num = F.softmax(corr2_num, dim=2)\n grad_corr2_num = torch.autograd.grad(\n outputs=smcorr2_num,\n inputs=(corr2_num,),\n grad_outputs=grad_smcorr2,\n )\n rel_diff(grad_corr2, grad_corr2_num[0], \"smax-corr2\")\n\n \"\"\"Derivatives through the main correlation correlation\"\"\"\n grad_corr2 = grad_corr2.view(H * W, H * W)\n grad_f1_via_fa = torch.matmul(grad_corr2, f2_norm.t()).t()\n grad_f2_norm = torch.matmul(f1_via_fa, grad_corr2)\n\n if not LOCAL_CHECKS:\n del grad_corr2\n\n if PROFILE:\n timings[\"corr-back\"] += time.time() - tic\n tic = time.time()\n\n if LOCAL_CHECKS:\n with torch.enable_grad():\n f1_via_fa_num = f1_via_fa.clone().requires_grad_()\n f2_norm_num = f2_norm.clone().requires_grad_()\n corr2_num = torch.matmul(f1_via_fa_num.t(), f2_norm_num)\n grad_f1_via_fa_num, grad_f2_norm_num = torch.autograd.grad(\n outputs=corr2_num,\n inputs=(f1_via_fa_num, f2_norm_num),\n grad_outputs=grad_corr2,\n )\n rel_diff(grad_f1_via_fa, grad_f1_via_fa_num,\n \"corr-f1-via-fa\")\n rel_diff(grad_f2_norm, grad_f2_norm_num,\n \"corr->f2-norm\")\n\n if OLD_METHOD:\n # (may be able to collapse all this later)\n grad_f1_via_fa = grad_f1_via_fa.view(-1, H, W, 1, 1)\n\n # This tensor is crashing the GPU\n grad_smcorr_fa = grad_f1_via_fa.repeat(1, 1, 1, h, w)\n\n # safety checks over the summation\n if LOCAL_CHECKS:\n with torch.enable_grad():\n\n smcorr_fa_num = smcorr_fa.clone().requires_grad_()\n f1_via_fa_num = smcorr_fa_num.sum((3, 4))\n # f1_via_fa_num = f1_via_fa_num.view(C, H * W)\n\n grad_smcorr_fa_num = torch.autograd.grad(\n outputs=f1_via_fa_num,\n inputs=(smcorr_fa_num,),\n grad_outputs=grad_f1_via_fa.view(-1, H, w),\n )\n rel_diff(grad_smcorr_fa, grad_smcorr_fa_num[0],\n \"summation of grad_smcorr-fa\")\n\n # smcorr_fa = smcorr[None, ...] * fa_.view(-1, 1, 1, h, w)\n grad_smcorr = (grad_smcorr_fa * fa_norm.view(-1, 1, 1, h, w)).sum(0)\n grad_fa_ = (grad_smcorr_fa * smcorr[None, ...]).sum(1).sum(1)\n grad_fa_ = grad_fa_.reshape(C, h * w)\n\n # safety checks over the weighted sum\n if LOCAL_CHECKS:\n with torch.enable_grad():\n\n smcorr_num = smcorr.clone().requires_grad_()\n fa_norm_num = fa_norm.clone().requires_grad_()\n smcorr_fa_num = smcorr_num[None, ...] \\\n * fa_norm_num.view(-1, 1, 1, h, w)\n\n (grad_smcorr_num, grad_fa_num) = torch.autograd.grad(\n outputs=smcorr_fa_num,\n inputs=(smcorr_num, fa_norm_num),\n grad_outputs=grad_smcorr_fa,\n )\n rel_diff(grad_fa_, grad_fa_num,\n \"product of grad_fa_\")\n rel_diff(grad_smcorr, grad_smcorr_num,\n \"product of grad_smcor\")\n else:\n # -------------------------------------------------------\n # Collapsed summation method\n # -------------------------------------------------------\n # Fwd ops ->\n # smcorr_fa = smcorr[None, ...] * fa.reshape(-1, 1, 1, h, w)\n # f1_via_fa = smcorr_fa.sum((3, 4)).reshape(C, H * w)\n\n # Given gradient ->\n # (grad_f1_via_fa)\n\n # Desired gradients ->\n # (grad_fa_, grad_smcorr)\n\n grad_f1_via_fa = grad_f1_via_fa.view(-1, H, W, 1, 1)\n\n # safety checks over the summation\n if LOCAL_CHECKS:\n # This tensor is crashing the GPU, so should only be\n # used for numerical checks\n grad_smcorr_fa = grad_f1_via_fa.repeat(1, 1, 1, h, w)\n with torch.enable_grad():\n\n smcorr_fa_num = smcorr_fa.clone().requires_grad_()\n f1_via_fa_num = smcorr_fa_num.sum((3, 4))\n # f1_via_fa_num = f1_via_fa_num.view(C, H * W)\n\n grad_smcorr_fa_num = torch.autograd.grad(\n outputs=f1_via_fa_num,\n inputs=(smcorr_fa_num,),\n grad_outputs=grad_f1_via_fa.view(-1, H, w),\n )\n rel_diff(grad_smcorr_fa, grad_smcorr_fa_num[0],\n \"summation of grad_smcorr-fa\")\n\n # Use for-loop over EVC dimension to avoid memory issues\n if feats1.is_cuda:\n if grad_f1_via_fa.dtype == torch.float64:\n grad_smcorr = torch.cuda.DoubleTensor(H, W, h, w).fill_(0)\n grad_fa_ = torch.cuda.DoubleTensor(C, h, w).fill_(0)\n else:\n grad_smcorr = torch.cuda.FloatTensor(H, W, h, w).fill_(0)\n grad_fa_ = torch.cuda.FloatTensor(C, h, w).fill_(0)\n else:\n grad_smcorr = torch.zeros((H, W, h, w), dtype=feats1.dtype)\n grad_fa_ = torch.zeros((C, h, w), dtype=feats1.dtype)\n\n for cc in range(C):\n grad_smcorr += (grad_f1_via_fa[cc] * fa_norm[cc].view(1, 1, h, w))\n grad_fa_[cc] = (grad_f1_via_fa[cc] * smcorr).sum((0, 1))\n grad_fa_ = grad_fa_.reshape(C, h * w)\n\n # safety checks over the weighted sum\n if LOCAL_CHECKS:\n with torch.enable_grad():\n\n smcorr_num = smcorr.clone().requires_grad_()\n fa_norm_num = fa_norm.clone().requires_grad_()\n smcorr_fa_num = smcorr_num[None, ...] \\\n * fa_norm_num.view(-1, 1, 1, h, w)\n\n (grad_smcorr_num, grad_fa_num) = torch.autograd.grad(\n outputs=smcorr_fa_num,\n inputs=(smcorr_num, fa_norm_num),\n grad_outputs=grad_smcorr_fa,\n )\n rel_diff(grad_fa_, grad_fa_num,\n \"product of grad_fa_\")\n rel_diff(grad_smcorr, grad_smcorr_num,\n \"product of grad_smcor\")\n\n if PRINT_MEM:\n key = None\n val = None\n shape_mems = {}\n for key, val in locals().items():\n if hasattr(val, \"shape\"):\n shape_mems[key] = estimate_mem(val)\n\n sorted_mems = sorted(shape_mems.items(), key=lambda kv: -kv[1])\n for key, val in sorted_mems:\n print(\"{}: {:.4f} GiB\".format(key, val))\n\n # Direct backward pass for first softmax\n # smcorr = F.softmax(corr.view(H, W, -1), dim=2)\n grad_smcorr = grad_smcorr.view(H, W, -1)\n smcorr = smcorr.view(H, W, -1)\n sum_term = torch.sum(grad_smcorr * smcorr, dim=2, keepdim=True)\n grad_corr = smcorr * (grad_smcorr - sum_term)\n\n if not LOCAL_CHECKS:\n del grad_smcorr\n del grad_smcorr2\n del smcorr\n del corr\n\n if LOCAL_CHECKS:\n with torch.enable_grad():\n corr_num = corr.clone().requires_grad_()\n smcorr_num = F.softmax(corr_num.view(H, W, -1), dim=2)\n smcorr_num = smcorr_num.reshape(corr_num.shape)\n grad_corr_num = torch.autograd.grad(\n outputs=smcorr_num,\n inputs=(corr_num,),\n grad_outputs=grad_smcorr.view(H, W, h, w),\n )\n rel_diff(grad_corr, grad_corr_num[0].view(H, W, -1),\n \"smax-corr\")\n\n # Back through the first correlation\n # [Fwd op] -> `corr = torch.matmul(f1_norm.t(), fa_norm)`\n grad_corr = grad_corr.view(H * W, h * w)\n grad_f1_norm = torch.matmul(grad_corr, fa_norm.t()).t()\n grad_fa_norm = torch.matmul(f1_norm, grad_corr)\n\n if not LOCAL_CHECKS:\n del grad_corr\n\n\n if LOCAL_CHECKS:\n with torch.enable_grad():\n f1_norm_num = f1_norm.clone().requires_grad_()\n fa_norm_num = fa_norm.clone().requires_grad_()\n corr_num = torch.matmul(f1_norm_num.t(), fa_norm_num)\n grad_f1_norm_num, grad_fa_norm_num = torch.autograd.grad(\n outputs=corr_num,\n inputs=(f1_norm_num, fa_norm_num),\n grad_outputs=grad_corr,\n )\n rel_diff(grad_f1_norm, grad_f1_norm_num, \"corr->f1n-orm\")\n rel_diff(grad_fa_norm, grad_fa_norm_num, \"corr->fa-norm\")\n\n # Combine gradients for two ops using aux features\n grad_fa_norm = grad_fa_norm + grad_fa_\n\n # Back through the norms\n # [Fwd op] -> `f1_norm = F.normalize(f1_, p=2, dim=0) * JDT_FACTOR`\n # [Fwd op] -> `f2_norm = F.normalize(f2_, p=2, dim=0) * JDT_FACTOR`\n # [Fwd op] -> `fa_norm = F.normalize(fa_, p=2, dim=0) * JDT_FACTOR`\n # xNorm = sqrt(sum(x.*x, 3) + opts.epsilon) ;\n\n if norm:\n f1_norm_val = torch.norm(f1_, p=2, dim=0).clamp(min=EPS)\n f2_norm_val = torch.norm(f2_, p=2, dim=0).clamp(min=EPS)\n fa_norm_val = torch.norm(fa_, p=2, dim=0).clamp(min=EPS)\n\n max_val_f1 = torch.max(f1_norm_val)\n max_val_f2 = torch.max(f2_norm_val)\n max_val_fa = torch.max(fa_norm_val)\n if max_val_f1 + max_val_f2 + max_val_fa > 1E8:\n import ipdb; ipdb.set_trace()\n\n grad_f1_norm_ = grad_f1_norm / f1_norm_val\n grad_f1 = JDT_FACTOR * (grad_f1_norm_ -\n (grad_f1_norm_ * f1_).sum(0) * (f1_ / (f1_norm_val ** 2)))\n\n grad_f2_norm_ = grad_f2_norm / f2_norm_val\n grad_f2 = JDT_FACTOR * (grad_f2_norm_ -\n (grad_f2_norm_ * f2_).sum(0) * (f2_ / (f2_norm_val ** 2)))\n\n grad_fa_norm_ = grad_fa_norm / fa_norm_val\n grad_fa = JDT_FACTOR * (grad_fa_norm_ -\n (grad_fa_norm_ * fa_).sum(0) * (fa_ / (fa_norm_val ** 2)))\n\n if LOCAL_CHECKS:\n with torch.enable_grad():\n f1_num = f1_.clone().requires_grad_()\n f2_num = f2_.clone().requires_grad_()\n fa_num = fa_.clone().requires_grad_()\n\n f1_norm_num = F.normalize(f1_num, p=2, dim=0) * JDT_FACTOR\n f2_norm_num = F.normalize(f2_num, p=2, dim=0) * JDT_FACTOR\n fa_norm_num = F.normalize(fa_num, p=2, dim=0) * JDT_FACTOR\n\n grad_f1_num = torch.autograd.grad(\n outputs=f1_norm_num,\n inputs=(f1_num,),\n grad_outputs=grad_f1_norm,\n )\n grad_f2_num = torch.autograd.grad(\n outputs=f2_norm_num,\n inputs=(f2_num,),\n grad_outputs=grad_f2_norm,\n )\n grad_fa_num = torch.autograd.grad(\n outputs=fa_norm_num,\n inputs=(fa_num,),\n grad_outputs=grad_fa_norm,\n )\n rel_diff(grad_f1, grad_f1_num[0], \"norm-f1\")\n rel_diff(grad_f2, grad_f2_num[0], \"norm-f2\")\n rel_diff(grad_fa, grad_fa_num[0], \"norm-fa\")\n else:\n grad_f1 = grad_f1_norm\n grad_f2 = grad_f2_norm\n grad_fa = grad_fa_norm\n\n\n if PRINT_MEM:\n key = None\n val = None\n shape_mems = {}\n print(\"=======================\")\n for key, val in locals().items():\n if hasattr(val, \"shape\"):\n shape_mems[key] = estimate_mem(val)\n\n sorted_mems = sorted(shape_mems.items(), key=lambda kv: -kv[1])\n for key, val in sorted_mems:\n print(\"{}: {:.4f} GiB\".format(key, val))\n import ipdb; ipdb.set_trace()\n\n\n # safety checks over the whole inner loop\n if LOCAL_CHECKS:\n with torch.enable_grad():\n\n f1_num = feats1[b].clone().detach().requires_grad_().reshape(C, H * W)\n f2_num = feats2[b].clone().detach().requires_grad_().reshape(C, h * w)\n fa_num = feats1[(b + 1) % B].clone().detach().requires_grad_().reshape(C, h * w)\n\n if norm:\n f1_norm_num = F.normalize(f1_num, p=2, dim=0) * JDT_FACTOR\n f2_norm_num = F.normalize(f2_num, p=2, dim=0) * JDT_FACTOR\n fa_norm_num = F.normalize(fa_num, p=2, dim=0) * JDT_FACTOR\n else:\n f1_norm_num = f1_num\n f2_norm_num = f2_num\n fa_norm_num = fa_num\n\n # BLock 1 ------------------------------------------\n corr_num = torch.matmul(f1_norm_num.t(), fa_norm_num)\n corr_num = corr_num.reshape(H, W, H, W)\n smcorr_num = F.softmax(corr_num.reshape(H, W, -1), dim=2)\n smcorr_num = smcorr_num.reshape(corr_num.shape)\n # BLock 1 ------------------------------------------\n\n\n # BLock 2 ------------------------------------------\n smcorr_fa_num = smcorr_num[None, ...] * \\\n fa_norm_num.reshape(-1, 1, 1, h, w)\n # BLock 2 ------------------------------------------\n\n\n # BLock 3 ------------------------------------------\n f1_via_fa_num = smcorr_fa_num.sum((3, 4)).reshape(C, H * W)\n # BLock 3 ------------------------------------------\n\n # BLock 4 ------------------------------------------\n corr2_num = torch.matmul(f1_via_fa_num.t(), f2_norm_num)\n corr2_num = corr2_num.reshape(corr_num.shape)\n smcorr2_num = F.softmax(corr2_num.reshape(H, W, -1), dim=2)\n smcorr2_num = smcorr2_num.reshape(corr_num.shape)\n # BLock 4 ------------------------------------------\n\n grad_f1_num, grad_fa_num, grad_f2_num = torch.autograd.grad(\n outputs=(smcorr2_num,),\n inputs=(f1_num, fa_num, f2_num),\n grad_outputs=(grad_smcorr2.view(corr_num.shape)),\n )\n\n rel_diff(grad_f1, grad_f1_num, \"df1_\")\n rel_diff(grad_f2, grad_f2_num, \"df2_\")\n rel_diff(grad_fa, grad_fa_num, \"dfa_\")\n\n \"\"\"Distribute the gradients back among the input tensor\n features that require them.\"\"\"\n grad_feats1[b] += grad_f1.reshape((C, H, W))\n grad_feats1[(b + 1) % B] += grad_fa.reshape((C, h, w))\n grad_feats2[b] += grad_f2.reshape((C, h, w))\n\n if PROFILE:\n timings[\"feat-assign\"] += time.time() - tic\n\n\n if LOCAL_CHECKS_INNER_LOOP:\n with torch.enable_grad():\n loss = 0.\n grad_loss_ = grad_loss * (H * W * B) # unscale\n for b in range(B):\n f1 = feats1[b].reshape(C, H * W) # source\n f2 = feats2[b].reshape(C, h * w) # target\n fa = feats1[(b + 1) % B].reshape(C, h * w) # auxiliary\n\n if norm:\n f1 = F.normalize(f1, p=2, dim=0) * JDT_FACTOR\n f2 = F.normalize(f2, p=2, dim=0) * JDT_FACTOR\n fa = F.normalize(fa, p=2, dim=0) * JDT_FACTOR\n\n corr = torch.matmul(f1.t(), fa)\n corr = corr.reshape(H, W, h, w)\n smcorr = F.softmax(corr.reshape(H, W, -1), dim=2).reshape(corr.shape)\n smcorr_fa = smcorr[None, ...] * fa.reshape(-1, 1, 1, h, w)\n # del smcorr\n\n f1_via_fa = smcorr_fa.sum((3, 4)).reshape(C, H * w)\n # del smcorr_fa\n\n corr2 = torch.matmul(f1_via_fa.t(), f2).reshape(corr.shape)\n smcorr2 = F.softmax(corr2.reshape(H, W, -1), dim=2).reshape(corr.shape)\n # del corr2\n\n with torch.no_grad():\n diff = batch_grid_u[b, :, :, None, None, :] - \\\n xxyy[None, None, ::stride, ::stride, :]\n diff = (diff * diff).sum(4).sqrt()\n diff = diff.pow(pow)\n L = diff * smcorr2\n loss += L.float().sum()\n\n loss = loss / (H * W * B)\n grad_f1_num, grad_f2_num = torch.autograd.grad(\n outputs=loss,\n inputs=(feats1, feats2),\n grad_outputs=grad_loss_,\n )\n\n rel_diff(grad_feats1, grad_f1_num, \"full-loop f2\")\n rel_diff(grad_feats2, grad_f2_num, \"full-loop f2\")\n\n if PROFILE:\n tic = time.time()\n\n if PRINT_MEM:\n key = None\n val = None\n shape_mems = {}\n for key, val in locals().items():\n if hasattr(val, \"shape\"):\n shape_mems[key] = estimate_mem(val)\n\n sorted_mems = sorted(shape_mems.items(), key=lambda kv: -kv[1])\n for key, val in sorted_mems:\n print(\"{}: {:.4f} GiB\".format(key, val))\n\n if PROFILE:\n timings[\"cleanup\"] += time.time() - tic\n\n if PROFILE:\n timings[\"minibatch\"] = time.time() - batch_tic\n print(\"==============\")\n total_ratios = 0\n for key in timings:\n ratio = 100 * timings[key] / timings[\"minibatch\"]\n msg = \"{:.3f} ({:.2f}%) >>> {}\"\n print(msg.format(timings[key], ratio, key))\n total_ratios += ratio\n msg = \"{:.3f}s >>> ratio total {}\"\n print(msg.format(timings[\"minibatch\"], total_ratios - 100))\n print(\"==============\")\n\n return (grad_feats1, grad_feats2, grad_xxyy, grad_batch_u,\n grad_stride, grad_norm, grad_pow)", "def _backward_hook(\n self,\n module: Module,\n grad_input: Tensor,\n grad_output: Tensor,\n ) -> Tensor:\n # before accessing the attributes from the module we want\n # to ensure that the properties exist, if not, then it is\n # likely that the module is being reused.\n attr_criteria = self.satisfies_attribute_criteria(module)\n if not attr_criteria:\n raise RuntimeError(\n \"A Module {} was detected that does not contain some of \"\n \"the input/output attributes that are required for DeepLift \"\n \"computations. This can occur, for example, if \"\n \"your module is being used more than once in the network.\"\n \"Please, ensure that module is being used only once in the \"\n \"network.\".format(module)\n )\n\n multipliers = SUPPORTED_NON_LINEAR[type(module)](\n module,\n module.input,\n module.output,\n grad_input,\n grad_output,\n eps=self.eps,\n )\n # remove all the properies that we set for the inputs and output\n del module.input\n del module.output\n\n return multipliers", "def backward_step(activations, targets, layers):\n param_grads = collections.deque() # List of parameter gradients for each layer\n output_grad = None # The error gradient at the output of the current layer\n # Propagate the error backwards through all the layers.\n # Use reversed to iterate backwards over the list of layers.\n for i, layer in enumerate(reversed(layers)):\n cur_layer_idx = len(layers) - i - 1\n if cur_layer_idx <= NUM_LAYERS_SKIP:\n # implement short circuit here\n if layer.is_fc_layer:\n grads = [0.0 for _ in range(layer.W.shape[0]*layer.W.shape[1]+layer.W.shape[1])]\n else:\n # normal gradient computation \n Y = activations.pop() # Get the activations of the last layer on the stack\n # Compute the error at the output layer.\n # The output layer error is calculated different then hidden layer error.\n if output_grad is None:\n input_grad = layer.get_input_grad(Y, targets)\n else: # output_grad is not None (layer is not output layer)\n input_grad = layer.get_input_grad(Y, output_grad)\n # Get the input of this layer (activations of the previous layer)\n X = activations[-1]\n # Compute the layer parameter gradients used to update the parameters\n grads = layer.get_params_grad(X, output_grad)\n param_grads.appendleft(grads)\n # Compute gradient at output of previous layer (input of current layer):\n output_grad = input_grad\n return list(param_grads) # Return the parameter gradients", "def L_model_backward(AL, Y, caches, use_batchnorm, batchnorm_cache, dropout_cache):\n\n grads = {}\n num_layers = len(caches)\n use_dropout = len(dropout_cache) != 0\n\n last_layer_idx = num_layers\n dA, dW, db = linear_backward(AL - Y, caches[-1]['linear_cache'])\n grads['dA' + str(last_layer_idx)] = dA\n grads['dW' + str(last_layer_idx)] = dW\n grads['db' + str(last_layer_idx)] = db\n\n for layer_idx in reversed(range(1, num_layers)):\n if use_dropout:\n dA = dropout_backward(dA, dropout_cache[layer_idx])\n\n dA, dW, db = linear_activation_backward(dA , caches[layer_idx - 1], \"relu\", use_batchnorm, batchnorm_cache[layer_idx])\n grads['dA' + str(layer_idx)] = dA\n grads['dW' + str(layer_idx)] = dW\n grads['db' + str(layer_idx)] = db\n\n return grads", "def forward_backward(self, x):\n observation_log_probs = self._observation_log_probs(x, mask=None)\n with tf.name_scope('forward_belief_propagation'):\n self.forward_log_probs = self._forward(observation_log_probs)\n\n with tf.name_scope('backward_belief_propagation'):\n self.backward_log_probs = self._backward(observation_log_probs)", "def backward(self, grad_output):\n grad_input = grad_output\n for module in reversed(self.modules):\n grad_input = module.backward(grad_input)\n return grad_input", "def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input[torch.abs(input) > 1.001] = 0\n return grad_input", "def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input[torch.abs(input) > 1.001] = 0\n return grad_input", "def backwards(delta,params,name='',activation_deriv=sigmoid_deriv):\n # everything you may need for this layer\n W = params['W' + name]\n b = params['b' + name]\n X, pre_act, post_act = params['cache_' + name]\n # your code here\n # do the derivative through activation first\n # then compute the derivative W,b, and X\n \n delta_pre = activation_deriv(post_act) * delta\n # (in_dim, out_dim) = (in_dim, examples) @ (examples, out_dim)\n grad_W = X.transpose() @ delta_pre\n grad_b = np.sum(delta_pre, axis=0, keepdims=True) # (1, out_dim)\n # (examples, in_dim) = (examples, out_dim) @ (out_dim, in_dim)\n grad_X = delta_pre @ W.transpose()\n\n # store the gradients\n params['grad_W' + name] = grad_W\n params['grad_b' + name] = grad_b\n return grad_X", "def backward(self, d_out):\n # TODO: Implement backward pass\n # Compute both gradient with respect to input\n # and gradients with respect to W and B\n # Add gradients of W and B to their `grad` attribute\n\n # It should be pretty similar to linear classifier from\n # the previous assignment\n \n dW = np.dot(self.X.T, d_out);\n dB = np.dot(np.ones((1, d_out.shape[0])), d_out);\n \n d_input = np.dot(d_out, self.W.value.T);\n #print(\"self.X = \", self.X);\n #print(\"self.W.grad.T = \", self.W.grad.T);\n #print(\"dW.T = \", dW.T);\n \n self.W.grad += dW;\n self.B.grad += dB;\n \n return d_input;", "def late_gradient_fusion():\n pass", "def on_after_backward(self):\n for callback in self.callbacks:\n callback.on_after_backward(self, self.get_model())", "def _apply_gradient_decay():\n parameter_not_included = ['seg_emb', 'query_key_bias', 'query_emb_bias', 'query_seg_bias']\n num_layers = len(xlnet_base._net.transformer_cells)\n for (i, layer_parameters) in enumerate(xlnet_base._net.transformer_cells):\n layer_params = layer_parameters.collect_params()\n for key, value in layer_params.items():\n skip = False\n for pn in parameter_not_included:\n if pn in key:\n skip = True\n if skip:\n continue\n if value.grad_req != 'null':\n for arr in value.list_grad():\n arr *= args.layerwise_decay**(num_layers - i - 1)", "def _register_conv_hook(self):\n\n def _record_gradients(module, grad_in, grad_out):\n self.gradients = grad_in[0]\n\n for _, module in self.model.named_modules():\n if isinstance(module, nn.modules.conv.Conv2d) and module.in_channels == 3:\n backward_handle = module.register_backward_hook(_record_gradients)\n self.handle.append(backward_handle)", "def backward_step(activations, targets, layers):\n param_grads = collections.deque() # List of parameter gradients for each layer\n output_grad = None # The error gradient at the output of the current layer\n # Propagate the error backwards through all the layers.\n # Use reversed to iterate backwards over the list of layers.\n for layer in reversed(layers): \n Y = activations.pop() # Get the activations of the last layer on the stack\n # Compute the error at the output layer.\n # The output layer error is calculated different then hidden layer error.\n if output_grad is None:\n input_grad = layer.get_input_grad(Y, targets)\n else: # output_grad is not None (layer is not output layer)\n input_grad = layer.get_input_grad(Y, output_grad)\n # Get the input of this layer (activations of the previous layer)\n X = activations[-1]\n # Compute the layer parameter gradients used to update the parameters\n grads = layer.get_params_grad(X, output_grad)\n param_grads.appendleft(grads)\n # Compute gradient at output of previous layer (input of current layer):\n output_grad = input_grad\n return list(param_grads) # Return the parameter gradients", "def save_feedback_gradients(self, reconstruction_loss):\n self.reconstruction_loss = reconstruction_loss.item()\n if self.feedbackbias is not None:\n grads = torch.autograd.grad(reconstruction_loss, [\n self.feedbackweights, self.feedbackbias], retain_graph=False)\n self._feedbackbias.grad = grads[1].detach()\n else:\n grads = torch.autograd.grad(reconstruction_loss,\n self.feedbackweights,\n retain_graph=False\n )\n self._feedbackweights.grad = grads[0].detach()", "def backward(ctx, grad_output):\n inds, wgts = ctx.saved_tensors\n grad_inputs = trilinear_devoxelize_backward(grad_output.contiguous(),\n inds, wgts, ctx.r)\n return grad_inputs.view(grad_output.size(0), grad_output.size(1), ctx.r,\n ctx.r, ctx.r), None, None, None", "def backward(self, d_out):\n # TODO: Implement backward pass\n # Compute both gradient with respect to input\n # and gradients with respect to W and B\n # Add gradients of W and B to their `grad` attribute\n\n # It should be pretty similar to linear classifier from\n # the previous assignment\n\n d_input = np.dot(d_out, self.W.value.T)\n self.W.grad = np.dot(self.X.T, d_out)\n self.B.grad = np.sum(d_out, axis=0, keepdims=True)\n\n return d_input", "def network_backward(self, dloss, cache_list):\n \n #############################################################################\n # TODO: Implement the backward pass. #\n #############################################################################\n grads = {}\n\n # Set Activation Function\n ActivationFunction_Backward = None\n if self.hidden_activation_fn == \"sigmoid\":\n ActivationFunction_Backward = lambda dx, cache: self.sigmoid_backward(dx, cache)\n elif self.hidden_activation_fn == \"tanh\":\n ActivationFunction_Backward = lambda dx, cache: self.tanh_backward(dx, cache)\n elif self.hidden_activation_fn == \"relu\":\n ActivationFunction_Backward = lambda dx, cache: self.relu_backward(dx, cache)\n\n # X1 => Result from X1\n # X1A => Result from X1 Activation\n X1, X1A, X2 = cache_list\n\n dX, grads[\"W2\"], grads[\"b2\"] = self.fully_connected_backward(dloss, X2)\n dX1A = ActivationFunction_Backward(dX, X1A)\n _, grads[\"W1\"], grads[\"b1\"] = self.fully_connected_backward(dX1A, X1)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return grads", "def backward(ctx: Any, grad_output: Any) -> Any:\n return grad_output, None", "def backward(self, grad_output):\n input, = self.saved_tensors\n grad_input = grad_output.clone()\n grad_input[input < -1] = 0\n grad_input[input > 1] = 0\n return grad_input, None", "def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input[input < 0] = 0\n return grad_input", "def backward(self, i):\n \n #Compute gradient for w1, w2, w3\n w1_grad = np.zeros((2, 3))\n w2_grad = np.zeros((3, 3))\n w3_grad = np.zeros((3, 1))\n \n \n w3_backward_pass = np.zeros((1, 1))\n w2_backward_pass = np.zeros((1, 3))\n \n #print(\"self.error shape\",self.error.shape)\n #Compute w3 gradient\n for i, w in enumerate(w3_grad): # 3 x 1 \n w3_forward_pass = self.a2[0][i]\n w3_backward_pass = self.error * der_sigmoid(self.y)\n w3_grad[i] = w3_forward_pass * w3_backward_pass\n \n #Compute w2 gradient\n for i, w_row in enumerate(w2_grad): # 3 x 3 \n for j, w in enumerate(w2_grad[i]):# 1 x 3 \n w2_forward_pass = self.a1[0][i]\n w2_backward_pass[0][i] = der_sigmoid(self.a2[0][i]) * self.w3[i][0] * w3_backward_pass\n w2_grad[i][j] = w2_forward_pass * w2_backward_pass[0][i]\n \n \n #Compute w1 gradient \n for i, w_rol in enumerate(w1_grad): # 2 x 3\n for j, w in enumerate(w1_grad[i]): # 1 x 3\n w1_forward_pass = self.input[0][i]\n w1_backward_pass = der_sigmoid(self.a1[0][i]) * self.w2[i][j] * w2_backward_pass[0][i]\n w1_grad[i][j] = w1_forward_pass * w1_backward_pass\n \n \n #Update \n for i, w in enumerate(w3_grad): \n self.w3[i] -= self.learning_rate * w3_grad[i]\n \n for i, w_row in enumerate(w2_grad): # 3 x 3 \n for j, w in enumerate(w2_grad[i]):# 1 x 3 \n self.w2[i][j] -= self.learning_rate * w2_grad[i][j]\n \n for i, w_rol in enumerate(w1_grad): # 2 x 3\n for j, w in enumerate(w1_grad[i]): # 1 x 3\n self.w1[i][j] -= self.learning_rate * w1_grad[i][j]\n \n #print(\"w3 grad : \", w3_grad)\n #print(\"w3.shape :\", self.w3.shape)", "def backward(self, *output_grads):\n raise NotImplementedError", "def pass_gradients(self):\n return self.last_grads", "def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n\n sp = F.softplus(input)\n grad_sp = -torch.expm1(sp)\n\n tsp = F.tanh(sp)\n grad_tsp = (1 - tsp * tsp) * grad_sp\n grad = input * grad_tsp + tsp\n return grad", "def get_forward_gradients(self):\n\n if self.bias is not None:\n return (self.weights.grad, self.bias.grad)\n else:\n return (self.weights.grad, )", "def __backward(self, dA, cache, derivative_activate_fn):\n A_prev, W, b, Z, D = cache\n\n m = A_prev.shape[1]\n\n # Mask\n dA = np.multiply(dA, D) / self.keep_prob\n\n dZ = dA * derivative_activate_fn(Z)\n dW = (1.0 / m) * np.dot(dZ, A_prev.T)\n db = (1.0 / m) * np.sum(dZ, axis=1, keepdims=True)\n dA_prev = np.dot(W.T, dZ)\n\n\n assert (dW.shape == W.shape)\n assert (db.shape == b.shape)\n assert (dA_prev.shape == A_prev.shape)\n\n return dA_prev, dW, db", "def backward(self, grad_output):\n raise NotImplementedError", "def backward(self, index, gradOutput):\n self.gradInput = np.dot(gradOutput, self.weight.T)\n if index != -1:\n self.gradWeight[index] += gradOutput #the gradients wrt. the params are accumulated\n self.gradBias += gradOutput\n return self.gradInput", "def backward(ctx, grad_output_var):\n xmin = 0\n xmax = 1\n grad_output = grad_output_var.data\n gamma_mu,kappa,uTx,x = ctx.saved_tensors\n n = kappa.size()[0]\n nx = grad_output.size()[2]\n u = 1/nx**2*torch.linspace(1,nx,nx)\n norm_u = torch.norm(u)**2\n torch_u = u.view(1,1,-1)+torch.zeros(n,1,nx)#broadcast\n denom = (xmin-kappa)*(xmax-kappa)\\\n -(kappa-uTx)*(xmin+xmax-2*kappa)\\\n -2*gamma_mu*norm_u \n #\n idx = (denom.abs()>1e-7)\n ind = (denom.abs()>1e-7)+ torch.zeros(n,1,nx)#broadcasting\n ind = ind>0\n denom[~idx] = denom[~idx]+1\n grad_input_gamma_mu = (2*kappa-(xmin+xmax))/denom*torch_u\n coeff = (xmax-kappa)*(xmin-kappa)/denom - 1\n grad_input_u = torch.eye(nx) \\\n +coeff*torch.matmul(torch_u.view(1,1,-1,1),u.view(1,-1))/norm_u\n # if denom is very small, it means that gamma_mu is very small and u is very close to one of the bounds,\n # there is a discontinuity when gamma_mu tends to zero, if 0<u<1 the derivative wrt x is approximately equal to \n # 1 and the derivative wrt gamma_mu is approximated by 10^3 times the error 2kappa-xmin-xmax\n grad_input_gamma_mu[~ind] = 0*grad_input_gamma_mu[~ind]+1e3*(2*kappa[~idx]-(xmin+xmax))\n grad_input_u[~ind] = 0*grad_input_u[~ind]+1\n \n grad_input_gamma_mu = grad_input_gamma_mu*grad_output#.sum(1).sum(1).unsqueeze(1).unsqueeze(2)\n grad_input_u = grad_input_u*grad_output\n \n # safety check for numerical instabilities\n if (grad_input_gamma_mu!=grad_input_gamma_mu).any():\n print('there is a nan in grad_input_gamma_mu')\n if (x!=x).any():\n print('there is a nan in x')\n sys.exit()\n if (grad_input_u!=grad_input_u).any():\n print('there is a nan in grad_input_u')\n sys.exit()\n \n grad_input_gamma_mu = Variable(grad_input_gamma_mu,requires_grad=True)\n grad_input_u = Variable(grad_input_u,requires_grad=True)\n \n return grad_input_gamma_mu, grad_input_u, None", "def backward(self, d_out):\n # TODO: Implement backward pass\n # Compute both gradient with respect to input\n # and gradients with respect to W and B\n # Add gradients of W and B to their `grad` attribute\n\n # It should be pretty similar to linear classifier from\n # the previous assignment\n self.W.grad += np.dot(self.X.T, d_out)\n self.B.grad += np.sum(d_out, axis=0)[np.newaxis, :]\n return np.dot(d_out, self.W.value.T)", "def _backward(self, w=None):\n grad = self.w # Should be I * self.w . We keep a vector for simplicity\n\n # Left multiply input `w` with normalizer gradient\n return w * grad if w is not None else grad", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def batchnorm_backward_alt(dout, cache):\n dx, dgamma, dbeta = None, None, None\n #############################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # #\n # After computing the gradient with respect to the centered inputs, you #\n # should be able to compute gradients with respect to the inputs in a #\n # single statement; our implementation fits on a single 80-character line. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n return dx, dgamma, dbeta", "def backward(self, loss, update_hp_grads=True, clear_lp_grads=False, **bwd_kwargs):\n self.clear_lp_grads()\n loss.backward(**bwd_kwargs)\n\n if update_hp_grads:\n self.update_hp_grads(clear_lp_grads=clear_lp_grads)", "def backward(last_layer: str) -> Callable:\n\n def closure() -> Tuple[Optional[torch.Tensor], torch.Tensor]:\n optimizer.zero_grad()\n output = model(data)\n if last_layer == \"output\":\n output.backward(torch.ones_like(target))\n return None, output\n elif last_layer == 'loss':\n loss = compute_loss(output - target)\n loss.backward()\n return loss, output\n else:\n assert False, 'last layer must be \"output\" or \"loss\"'\n\n return closure", "def backward(last_layer: str) -> Callable:\n\n def closure() -> Tuple[Optional[torch.Tensor], torch.Tensor]:\n optimizer.zero_grad()\n output = model(data)\n if last_layer == \"output\":\n output.backward(torch.ones_like(target))\n return None, output\n elif last_layer == 'loss':\n loss = compute_loss(output - target)\n loss.backward()\n return loss, output\n else:\n assert False, 'last layer must be \"output\" or \"loss\"'\n\n return closure", "def backward_pass(self, grad):\n pass", "def backward(self, grad_output):\n input, = self.saved_tensors\n grad_input = grad_output.clone()\n grad_input[input < 0] = 0\n return grad_input", "def backward(self, previous_grad, learning_rate):\n conv_grad = np.zeros(self.conv_filter.shape)\n for patch, i, j in self.image_patch(self.current_image):\n for k in range(self.num_filters):\n conv_grad[k] += patch*previous_grad[i,j,k]\n \n self.conv_filter -= learning_rate*conv_grad\n return conv_grad", "def L_model_backward(AL, Y, caches, X):\n grads = {}\n L = len(caches) # the number of layers\n m = AL.shape[1]\n Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL\n \n # Initializing the backpropagation \n dZ = AL - Y # Derivative of Cross Entropy Loss with Softmax\n \n # Lth layer (SOFTMAX -> LINEAR) gradients. Inputs: \"AL, Y, caches\". Outputs: \"grads[\"dAL\"], grads[\"dWL\"], grads[\"dbL\"]\n current_cache = caches[L-1]\n grads[\"dA\" + str(L)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_activation_backward(dZ, AL, current_cache, activation = \"softmax\")\n \n for l in reversed(range(L-1)):\n # lth layer: (RELU -> LINEAR) gradients.\n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads[\"dA\" + str(l + 2)], AL, current_cache, activation = \"relu\")\n grads[\"dA\" + str(l + 1)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n\n return grads", "def backward_pass(self):\r\n # the gradient of cross-entropy on top of softmax is (t-y)\r\n back_output = (self.targets - self.y) / self.y.shape[0]\r\n\r\n for layer in reversed(self.layers):\r\n back_output = layer.backward_pass(back_output)", "def backward(cls, grad_out, activated_out):\n raise Exception(\"Unimplemented\")", "def backward(cls, grad_out, activated_out):\n raise Exception(\"Unimplemented\")", "def backward(self, dout):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)-1,-1,-1):\n act_dout = self.activations[l].backward(dout)\n dout = self.layers[l].backward(act_dout)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return", "def backward_pass(architecture,gradient_layerwise,grad_weights,grad_bias):\n \n for layer in range(len(architecture)-1,-1,-1):\n X_input,X_output,weightsi,biasi,X_input_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imxi = architecture['layer{}'.format(layer+1)]\n# print(\"Operation is:{} and Layer is: {}\".format(operationi,layer+1))\n if operationi == 'softmax': # Last layer -> Dont apply softmax in any layer other than the last layer!\n # not taking gradients here because we need dz_dX(secondlastlayer) which is y_pred - y\n continue\n \n if operationi == 'conv_bn_relu' or operationi == 'conv_relu' or operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if operationi__1 == 'softmax':\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # .\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # .\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input_im2col)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input_im2col)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi #\n elif operationi__1 == 'maxpool': # need to do something here to fix the problem\n None\n\n elif 'flatten' in operationi__1:\n # we currently have dz_doutput of flatten -> we want dz_doutput of the conv_bn_relu before flatten\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2] # weights2\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput of flatten\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5] # i\n try:\n dz_dXi = torch.t(weightsi__1).mm(dz_dXi__1)\n except:\n dz_dXi = weightsi__1.mm(dz_dXi__1)\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n\n dz_dXi = torch.reshape(dz_dXi,(output_shapei[1]*output_shapei[2],-1))\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n dz_dweightsi = X_input_im2col.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n dz_dbi = dz_dXi\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)# Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi) # Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi) # Can also set this to layer like in line ~800\n \n else:\n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dX2 -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n \n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n if 'sigmoid' in operationi__1: # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi__1: # ...\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dXi = torch.reshape(dz_dXi,(output_shape_current_layer[1]*output_shape_current_layer[2],-1))\n dz_dbi = torch.reshape(dz_dXi,bias_current_layer.shape)\n dz_dweightsi = X_im2col_current_layer.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n \n if operationi == 'maxpool':\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n \n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n try:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n except:\n Y = torch.t(weightsi__1).mm(dz_dXi__1) # Ensuring valid matrix multiplication here\n \n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n \n if operationi__1 == 'conv_sigmoid' or operationi__1 == 'conv_bn_sigmoid': # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n else:\n dz_dXi[X_output <= 0] = 0\n\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n \n dz_dXinput = torch.zeros((X_input.shape))\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+1)][0] # output = output of maxpool\n\n dz_dXoutput = torch.reshape(dz_dXoutput,(output_shapei[0],X_input_im2col.shape[2]))\n \n for i in range(output_shapei[0]):\n for j in range(X_input_im2col.shape[2]):\n Xi2ci = X_im2col_current_layer[i,:,:]\n idx = torch.argmax(Xi2ci[:,j]).item()\n value = imxi[i][(idx,j)]\n dz_dXinput[value[0],value[1],value[2]] += float(dz_dXoutput[i,j])\n\n# dz_dXinput = torch.reshape(dz_dXinput,output_shapei)\n \n X_prev_im2col = architecture['layer{}'.format(layer)][4]\n X_output_prev = architecture['layer{}'.format(layer)][1]\n X_output_prev = torch.reshape(X_output_prev,dz_dXinput.shape)\n X_input_prev = architecture['layer{}'.format(layer)][0]\n prev_bias = architecture['layer{}'.format(layer)][3]\n output_shape_prev = architecture['layer{}'.format(layer)][6]\n prev_operation = architecture['layer{}'.format(layer)][9]\n \n if prev_operation == 'conv_sigmoid' or prev_operation == 'conv_bn_sigmoid':\n dz_dXinput *= sigmoid(X_output_prev)*(1-sigmoid(X_output_prev)) # Taking the derivative of the sigmoid function\n else:\n dz_dXinput[X_output_prev <= 0] = 0\n \n if len(dz_dXinput.shape) == 3:\n dz_dXinput = torch.reshape(dz_dXinput,(-1,output_shape_prev[0]))\n \n dz_dbi = torch.reshape(dz_dXinput,prev_bias.shape)\n dz_dweightsi = X_prev_im2col.mm(dz_dXinput)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer)][0] = torch.Tensor(dz_dXinput) # ...\n \n if 'flatten_dense' in operationi:\n \n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n \n if operationi__1 == 'softmax':\n \n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n X_output = torch.reshape(X_output,(-1,1))\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if 'sigmoid' in operationi:\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # Can also set this to layer like in line ~800\n \n else:\n # Have to modify and test this before implementation -> Specifically\n # the backprop implementation is not consistent with the ones above\n #\n X_output = torch.reshape(X_output,(-1,1))\n weights__i = architecture['layer{}'.format(layer+2)][2]\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+2)][0]\n dz_dXoutput = torch.reshape(torch.Tensor(dz_dXoutput),X_output.shape)\n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n\n if 'relu' in operationi:\n dz_dXoutput[X_output<0] = 0\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n if 'sigmoid' in operationi:\n dz_dXoutput*= sigmoid(X_output)*(1-sigmoid(X_output))\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n else:\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n \n unflattened_Xinput = architecture['layer{}'.format(layer+1)][0]\n dz_dXinput = torch.reshape(dz_dXinput,unflattened_Xinput.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXinput)\n \n if gradient_layerwise['layer{}'.format(layer+1)][1] is not None:\n try:\n grad_weights['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][1]\n except:\n grad_weights['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][1])\n if gradient_layerwise['layer{}'.format(layer+1)][2] is not None:\n try:\n grad_bias['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][2]\n except:\n grad_bias['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][2])\n \n gc.collect()\n return", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def relu_backward_hook_function(module, grad_in, grad_out):\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)", "def _poputil_recompute_backward(op, grads):\n return grads", "def backward(self, bottom, top, propagate_down):\n # get diff\n top_diff = top[0].diff()\n features = bottom[0].data()\n # compute the gradient\n weight_diff = self._weight.init_diff(setzero=False)\n blasdot.dot_firstdims(features, top_diff, out=weight_diff)\n if self._has_bias:\n bias_diff = self._bias.init_diff(setzero=False)\n bias_diff[:] = top_diff.reshape(\n np.prod(top_diff.shape[:-1]), top_diff.shape[-1]).sum(0)\n # If necessary, compute the bottom Blob gradient.\n if propagate_down:\n bottom_diff = bottom[0].init_diff(setzero=False)\n blasdot.dot_lastdim(top_diff, self._weight.data().T,\n out=bottom_diff)\n if self._reg is not None:\n return self._reg.reg(self._weight)\n else:\n return 0.", "def _linear_activation_backward(self, dA, cache, activation_backward):\n linear_cache, activation_cache = cache\n dZ = activation_backward[1](dA, activation_cache)\n dA_prev, dW, db = self._linear_backward(dZ, linear_cache)\n\n return dA_prev, dW, db", "def _pre_backward_hook(_handles: List[FlatParamHandle], *unused: Any) -> None:\n _handles_key = tuple(_handles) # avoid shadowing `handles_key`\n # Only run the pre-backward hook once per group of handles involved\n # in the same module forward computation\n if _handles_key and self._ran_pre_backward_hook.get(_handles_key, False):\n return\n\n with torch.autograd.profiler.record_function(\n \"FullyShardedDataParallel._pre_backward_hook\"\n ):\n # Queue the post-backward callback once for the root FSDP\n # instance to attach it to the outermost backward graph task so\n # that it is called after all backward calls complete\n if self._is_root and not self._post_backward_callback_queued:\n self._queue_wait_for_post_backward()\n elif _handles_key:\n self._assert_state([TrainingState_.IDLE])\n self.training_state = TrainingState_.BACKWARD_PRE\n # Queueing the post-backward callback is the only logic that is\n # not per-handle in the pre-backward hook, so we can return\n # early here if there are no handles.\n if not _handles_key:\n return\n for handle in _handles:\n handle._training_state = HandleTrainingState.BACKWARD_PRE\n\n # If the handles have been prefetched, this `_unshard()` simply\n # switches to using the unsharded parameter\n self._unshard(_handles)\n torch.cuda.current_stream().wait_stream(self._streams[\"all_gather\"])\n\n # Set this to `False` to ensure that a mistargeted prefetch\n # does not actually unshard these handles\n self._needs_pre_backward_unshard[_handles_key] = False\n self._prefetch_handles(_handles_key)\n for handle in _handles:\n handle.prepare_gradient()\n self._ran_pre_backward_hook[_handles_key] = True", "def grad_wrapper(*wrapper_args, variables=None):\n\n @custom_gradient\n def inner_recompute_grad(*dresult):\n \"\"\"Nested custom gradient function for computing grads in reverse and forward mode autodiff.\"\"\"\n # Gradient calculation for reverse mode autodiff.\n with backprop.GradientTape() as t:\n id_args = nest.map_structure(gen_array_ops.identity, args)\n # Tuple `dresult` should contain at least one tensor.\n assert len(dresult) >= 1\n\n if not context.executing_eagerly():\n # XLA doesn't respect `tf.control_dependencies`. The code block\n # below manually adds a data dependency to `dresult` to ensure\n # recomputation of `f(*args, **kwargs)` happens after `dresult`.\n\n # This works even if `dresult[0]` is a size 0 tensor as reduce_max\n # of a size 0 tensor returns -inf. Use reshape here to avoid reading\n # the entire `dresult[0]`.\n elem = math_ops.reduce_max(array_ops.reshape(dresult[0], [-1])[:1])\n # Cast elem to bool in case elem is NaN.\n elem_bool = math_ops.cast(elem, dtypes.bool)\n dresult_dep = array_ops.where_v2(\n elem_bool == elem_bool, 0., float(\"nan\")) # pylint: disable=comparison-with-itself\n id_args = nest.map_structure(\n lambda x: x + math_ops.cast(dresult_dep, x.dtype), id_args)\n\n t.watch(id_args)\n if variables is not None:\n t.watch(variables)\n with variable_scope.variable_scope(current_var_scope):\n recomputed_result = f(*id_args, **kwargs)\n kw_vars = []\n if variables is not None:\n kw_vars = list(variables)\n grads = t.gradient(\n recomputed_result,\n list(id_args) + kw_vars,\n output_gradients=dresult,\n unconnected_gradients=UnconnectedGradients.ZERO)\n\n def transpose(*t_args, **t_kwargs):\n \"\"\"Gradient function calculation for forward mode autodiff.\"\"\"\n # Just throw an error since gradients / activations are not stored on\n # tape for recompute.\n raise NotImplementedError(\n \"recompute_grad tried to transpose grad of {}. \"\n \"Consider not using recompute_grad in forward mode\"\n \"autodiff\".format(f.__name__))\n\n return (grads[:len(id_args)], grads[len(id_args):]), transpose\n\n return inner_recompute_grad(*wrapper_args)", "def _backward(outputs, grad_outputs, retain_graph=False):\n # Collect forward tapes.\n inputs = list(outputs)\n op_tape = tape.OrderedTape()\n graph_leaves = set()\n memo = set()\n while len(inputs) > 0:\n input = inputs.pop(0)\n if id(input) in memo:\n continue\n memo.add(id(input))\n if input._tape:\n op_tape.merge_from(input._tape)\n inputs.extend(input._tape.get_sources())\n input._tape = None\n if input._retains_grad:\n graph_leaves.add(input.id)\n elif input._requires_grad:\n graph_leaves.add(input.id)\n\n # Run backward computations reversely.\n op_defs = op_tape.get_op_defs()\n execute_ws = workspace.get_workspace()\n execute_ws.run_backward(\n op_defs=op_defs,\n targets=[y.id for y in outputs],\n grad_targets=[dy.id for dy in grad_outputs],\n sources=list(graph_leaves),\n )\n\n # Free the forward handles if allowed.\n if not retain_graph:\n handle_pool = execute_ws._handle_pool\n for op_def in op_defs:\n handle_pool.release(op_def.name)", "def post_backward_generator(self):\n pass", "def _tpu_embedding_lookup(self, features: Any, weights: Any) -> Any:\n # Each call to this function increments the _tpu_call_id by 1, this allows\n # us to tag each of the main embedding ops with this call id so that we know\n # during graph rewriting passes which ops correspond to the same layer call.\n self._tpu_call_id += 1\n name = \"{}\".format(self._tpu_call_id)\n\n # Set training to true, even during eval. When name is set, this will\n # trigger a pass that updates the training based on if there is a send\n # gradients with the same name.\n self._tpu_embedding.enqueue(features, weights, training=True, name=name)\n\n # The gradient trap is a trick used to ensure we can compute the gradients\n # at the correct point of the model. By default GradientTape only tracks\n # the calculations which descend from variables. e.g. if you call\n # tape.gradient on something that does not come from a variable involved in\n # the computation, it will fail.\n # We need to call tpu_embedding.apply_gradients on the gradients computed\n # at tpu_embedding.dequeue. Since tpu_embedding.dequeue has no inputs, we\n # can't compute the gradient at its output. To get around that we wrap\n # the dequeue in a function with a custom gradient. This function takes one\n # input, throws it away and returns the result of the dequeue. If we pass a\n # dummy variable to this function and compute the gradient at the dummy\n # variable, then the custom gradient function will be called with the\n # graidents that we need to pass to tpu_embedding.apply_gradients.\n @tf.custom_gradient\n def gradient_trap(dummy):\n \"\"\"Register a gradient function for activation.\n\n Its purpose is to send gradients back to TPU.\n\n Args:\n dummy: a variable to prevent this backward pass from being pruned.\n\n Returns:\n a tuple of list of activations and their gradient function.\n \"\"\"\n activations = self._tpu_embedding.dequeue(name=name)\n\n def grad(*grad_wrt_activations):\n \"\"\"Gradient function.\"\"\"\n # Since the output of the function is flattened, the gradients\n # are also flattened. Hence we have to pack them back in to the correct\n # nested structure.\n gradients = tf.nest.pack_sequence_as(self._feature_config,\n grad_wrt_activations)\n self._tpu_embedding.apply_gradients(gradients, name=name)\n\n # This is the gradient for the input variable.\n return tf.zeros_like(dummy)\n\n # Custom gradient functions don't like nested structures of tensors, so we\n # flatten them here.\n return tf.nest.flatten(activations), grad\n\n activations_with_trap = gradient_trap(getattr(self, _DUMMY_NAME))\n return tf.nest.pack_sequence_as(self._feature_config, activations_with_trap)" ]
[ "0.744036", "0.7207515", "0.68466437", "0.6619593", "0.6545222", "0.65270805", "0.6505459", "0.64318466", "0.6385007", "0.63628876", "0.63604456", "0.63604456", "0.62412906", "0.6231469", "0.62253386", "0.61755383", "0.609307", "0.60908824", "0.60908824", "0.60785884", "0.6035776", "0.59760696", "0.59733164", "0.5969636", "0.5931047", "0.5928084", "0.59047455", "0.5880499", "0.5870969", "0.5865129", "0.58627874", "0.585131", "0.5844866", "0.582785", "0.5802947", "0.58006454", "0.57776093", "0.57433456", "0.57353103", "0.56951374", "0.56830597", "0.56744576", "0.5669027", "0.5667289", "0.56624043", "0.5662042", "0.56522226", "0.56522226", "0.5638999", "0.56369287", "0.5634871", "0.5631715", "0.5607632", "0.5602362", "0.55927855", "0.5590035", "0.5589209", "0.5586272", "0.55848366", "0.55843365", "0.5565991", "0.5548005", "0.55433154", "0.55384547", "0.5518571", "0.55101365", "0.55005497", "0.54933476", "0.54824066", "0.5471384", "0.5464496", "0.5460655", "0.5448385", "0.54442555", "0.5442669", "0.5442054", "0.54370236", "0.54370236", "0.54296774", "0.54186136", "0.5416317", "0.5411819", "0.5408469", "0.5404049", "0.5404049", "0.54001707", "0.539798", "0.5396803", "0.5396803", "0.5396803", "0.5396803", "0.5396803", "0.5382179", "0.5379283", "0.53745496", "0.5372405", "0.53723997", "0.53717", "0.53683954", "0.5366596" ]
0.8470942
0
some tokenizers don't have 'eos_token' and 'bos_token' attributes. Thus, we need some trick to get them.
def special_tokens(self, ): if self.tokenizer.bos_token is None or self.tokenizer.eos_token is None: special_tokens = self.tokenizer.build_inputs_with_special_tokens([]) special_tokens_ids = self.tokenizer.convert_ids_to_tokens(special_tokens) self.tokenizer.bos_token, self.tokenizer.eos_token = special_tokens_ids special_tokens = self.tokenizer.eos_token, self.tokenizer.bos_token return special_tokens
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def synth_tokens(self):\n if self.lliagraph:\n return self.lliagraph.synth_tokens.items()\n else:\n return []", "def parse(self, tokenizer):\n pass", "def tokens():\n pass", "def get_tokens(self, document):\n raise NotImplementedError()", "def get_tokens(self):\r\n return self.token_set", "def get_tokens(data_clean):\n #sentence tokenization\n data_sent = sent_tokenize(data_clean)\n #tokenizer\n data_tokenized_punc = [word for sent in data_sent for word in nltk.word_tokenize(sent)]\n data_word = [word.lower() for word in data_tokenized_punc if word.isalpha()]\n\n return data_word, data_sent", "def getTokens(self):\n return self.__token", "def bos_token(self):\r\n if self._bos_token is None:\r\n logger.error(\"Using bos_token, but it is not set yet.\")\r\n return self._bos_token", "def __get_token_data__(self):\n raise Exception(\"Implement me!\")", "def __init__(self):\n self.tokens = []", "def eos_token(self):\r\n if self._eos_token is None:\r\n logger.error(\"Using eos_token, but it is not set yet.\")\r\n return self._eos_token", "def getTokens(self):\n # NOTE: seems to be used by the evitaNominalTrainer only\n tokenList = []\n for chunkOrToken in self.dtrs:\n if chunkOrToken.isToken():\n tokenList += [chunkOrToken]\n elif chunkOrToken.isChunk():\n tokenList += chunkOrToken.dtrs\n else:\n logger.warn(\"Sentence element that is not a chunk or token\")\n return tokenList", "def _gettoken(c,chars,knownsigils):\n verbose = False\n token = None\n if (c!= \"end\"):\n toktext = []\n matches = knownsigils[c][0]\n toktype = knownsigils[c][1]\n if verbose: print(\"BEF toktype:\",toktype,\" matches:\",matches)\n while (True):\n c = next(chars, \"end\")\n if verbose: print(\"c->\",c)\n if c in matches:\n toktext.append(c)\n else:\n break\n if verbose: print(\"AFT toktype:\",toktype,\" toktext:\",toktext)\n token = (''.join(toktext), toktype)\n return (c,token)", "def init_tokens(self):\n raise NotImplementedError('Abstract method.')", "def token(self) -> str:", "def tokens(self):\n return self.__tokens", "def tokenize(G, w):\n if not w:\n return [G.EOF]\n\n w = normalize(w)\n w = w[:-1].split(' ')\n \n f = G.symbDict\n\n tokens = []\n for token in w:\n if f.get(token) and f[token].IsTerminal:\n tokens.append(f[token])\n else:\n return \"token no definido: \" + token\n tokens.append(G.EOF)\n return tokens", "def token_key(token):\n morphotagged = analysis(token).get('raw')\n lemma_pos = (analysis(token).get('lemma'), analysis(token).get('partOfSpeech'))\n return morphotagged or lemma_pos", "def tokens(self):\n tokens = [k for k in self.tok2ind.keys()\n if k not in {'<NULL>', '<UNK>'}]\n return tokens", "def get_tokens(self):\n\t\treturn self.get_starttokens() + self.get_endtokens()", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self._tokens", "def tokens(self):\r\n return self.iter_tokens(self._blob)", "def get_token(self):\n key = self.kwargs.get(self.token_field_name, '').strip()\n if key in EMPTY_VALUES:\n key = self.request.GET.get(self.token_field_name, '').strip()\n if key in EMPTY_VALUES:\n key = self.request.POST.get(self.token_field_name, '').strip()\n if key in EMPTY_VALUES:\n key = None\n return key", "def test_tokenization():\n X = Tokenizer().transform([[\"A test\"]])\n assert X[\"corpus\"][0] == [\"A\", \"test\"]", "def get_tokenizer_and_model(model_name: str):\r\n tokenizer = AutoTokenizer.from_pretrained(model_name)\r\n model = AutoModel.from_pretrained(model_name)\r\n model.output_hidden_states = True\r\n return tokenizer, model", "def token(self):\n print(\"getter of token called\")\n return self._token", "def _next_tokens(self, head):\n state = head.state\n input_str = self.input_str\n position = head.position\n actions = state.actions\n in_len = len(input_str)\n tokens = []\n\n # add special STOP token if they are applicable\n if STOP in actions:\n if not self.consume_input \\\n or (self.consume_input and position == in_len):\n tokens.append(STOP_token)\n\n if position < in_len:\n # Get tokens by trying recognizers - but only if we are not at\n # the end, because token cannot be empty\n if self.custom_token_recognition:\n def get_tokens():\n return self._token_recognition(head)\n\n custom_tokens = self.custom_token_recognition(\n head, get_tokens,\n )\n if custom_tokens is not None:\n tokens.extend(custom_tokens)\n else:\n tokens.extend(self._token_recognition(head))\n\n # do lexical disambiguation if it is enabled\n if self.lexical_disambiguation:\n tokens = self._lexical_disambiguation(tokens)\n\n return tokens", "def tokens(self):\n\t\tlabels_and_synonyms = list(itertools.chain.from_iterable(list(self.term_to_tokens.values())))\n\t\ttokens = set(list(itertools.chain.from_iterable([word_tokenize(x) for x in labels_and_synonyms])))\n\t\treturn(list(tokens))", "def token(uncapped_token):\n return uncapped_token", "def tag(self, tokens):\n (yyhat, _) = self.tag_with_features(tokens)\n return yyhat", "def tokens(self):\n return self._sentrep.tokens()", "def get_tokenizer_result(blob):\n return list(blob.words)", "def get_token(self):\n return self.__token", "def get_token(self):\n return self.__token", "def tokenizer(s):\n\n tokens = tokenize(s.lower()) # apply the nltk tokenizer\n tokens = [t for t in tokens if doc_frequency[t]>5 and t not in stop_words]# and doc_frequency[t]<3000]\n \n return tokens", "def get_tokens(self) -> List[str]:\n return self.tokens", "def _parse_tokens(self, body):\n\n old_token = self.token\n old_json_token = self.json_token\n\n self.token = self._parse_token(body)\n self.json_token = self._parse_json_token(body)\n\n logger.debug('Token set to: %s (Old: %s)', self.token, old_token)\n logger.debug('JSON token set to: %s (Old: %s)', self.json_token,\n old_json_token)", "def get_word_and_lemma(token):\n form_element = token.find('form')\n lemma_element = token.find('lemma')\n\n return normalize(form_element.text), normalize(lemma_element.text)", "def _get_all_possible_tokens_ahead(self, context):\n tokens = []\n if context.position < len(context.input_str):\n for terminal in self.grammar.terminals.values():\n try:\n tok = terminal.recognizer(context.input_str,\n context.position)\n except TypeError:\n tok = terminal.recognizer(context, context.input_str,\n context.position)\n additional_data = ()\n if type(tok) is tuple:\n tok, *additional_data = tok\n if tok:\n tokens.append(Token(terminal, tok, additional_data))\n return tokens", "def build_tokenizer(self):\n def tokenizer(doc):\n token_pattern = re.compile(self.token_pattern)\n return token_pattern.findall(doc)\n \n return tokenizer", "def _next_tokens(self, context):\n state = context.state\n input_str = context.input_str\n position = context.position\n actions = state.actions\n in_len = len(input_str)\n tokens = []\n\n # add special tokens (EMPTY and STOP) if they are applicable\n if EMPTY in actions:\n tokens.append(EMPTY_token)\n if STOP in actions:\n if not self.consume_input \\\n or (self.consume_input and position == in_len):\n tokens.append(STOP_token)\n\n if position < in_len:\n # Get tokens by trying recognizers - but only if we are not at\n # the end, because token cannot be empty\n if self.custom_token_recognition:\n def get_tokens():\n return self._token_recognition(context)\n\n custom_tokens = self.custom_token_recognition(\n context, get_tokens,\n )\n if custom_tokens is not None:\n tokens.extend(custom_tokens)\n else:\n tokens.extend(self._token_recognition(context))\n\n # do lexical disambiguation if it is enabled\n if self.lexical_disambiguation:\n tokens = self._lexical_disambiguation(context, tokens)\n\n return tokens", "def custom_tokenizer(nlp, infix_reg):\n return Tokenizer(nlp.vocab, infix_finditer=infix_reg.finditer)", "def _get_all_possible_tokens_ahead(self, context):\n tokens = []\n if context.position < len(context.input_str):\n for terminal in self.grammar.terminals.values():\n try:\n tok = terminal.recognizer(context.input_str,\n context.position)\n except TypeError:\n tok = terminal.recognizer(context, context.input_str,\n context.position)\n additional_data = ()\n if type(tok) is tuple:\n tok, *additional_data = tok\n if tok:\n tokens.append(Token(terminal, tok, context.position,\n additional_data))\n return tokens", "def gettok(self):\n try:\n self.next = next(self.tokens)\n except StopIteration:\n self.next = None", "def token(self):\r\n return self._token", "def next_token(self, context, token):", "def getToken(self):\n \n raise NotImplementedError", "def token(self):\n token = self.lex.token()\n if token is not None:\n print(token)\n return token", "def build_tokens(self):\n self.advance()\n while self.__token != \"\":\n self.__tokens.append(self.token_type())\n self.advance()", "def get_tokens(sent):\n return word_tokenize(sent)", "def _get_tokens(morph, stopwords, document):\n\n PortSt = PorterStemmer()\n\n invalid_chars = string.punctuation + u'»' + u'«' + u'—' + u'“' + u'„'\n translation_table = {ord(c): None for c in invalid_chars if c != u'-'}\n\n # parse rss body\n soup = BeautifulSoup(document, 'html.parser')\n body = ' '.join(\n [tag.string.replace('\\\\n', ' ').replace('\\\\r', ' ')\n for tag in soup.descendants if tag.string]\n )\n\n if body == \"\":\n return []\n\n body_clean = body.translate(translation_table).lower().strip()\n words = word_tokenize(body_clean)\n tokens = []\n\n # stemming and text normalization\n for word in words:\n if re.match('^[a-z0-9-]+$', word) is not None:\n tokens.append(PortSt.stem(word))\n elif word.count('-') > 1:\n tokens.append(word)\n else:\n normal_forms = morph.normal_forms(word)\n tokens.append(normal_forms[0] if normal_forms else word)\n\n tokens = filter(lambda token: token not in stopwords, set(tokens))\n\n # remove all words with less than 4 chars\n tokens = filter(lambda token: len(token) >= 4, tokens)\n\n return tokens", "def multiword_tokens(self):\n return self._mwts", "def process_bereich_from(self):\r\n return self._tokens[0]", "def semcor2token(args):\r\n input_files = list_files(*args.input_files)\r\n output_dir = Path(args.output_dir)\r\n if not output_dir.is_dir():\r\n try:\r\n output_dir.mkdir()\r\n except:\r\n print('Invalid output directory name. Files will be stored in default directory.', file = stderr)\r\n output_dir = output_default / 'typetoken'\r\n if not output_dir.is_dir():\r\n output_dir.mkdir()\r\n multiword = args.multiword\r\n for input_file in input_files:\r\n corpus_file = CorpusFile(input_file)\r\n filename = corpus_file.shortname + '.txt'\r\n dirname = output_dir / corpus_file.concordance\r\n if not dirname.exists():\r\n dirname.mkdir()\r\n output_file_name = dirname / filename\r\n with output_file_name.open('w') as output_file:\r\n for word in corpus_file.text.find_all(['wf', 'punc']):\r\n if word.name == 'punc':\r\n output_file.write('\\t'.join([word.string, word.string, 'punc\\n']))\r\n elif not multiword:\r\n for token in Token.from_tag(word).get_components():\r\n if args.verbose and type(token.status)==tuple:\r\n token_id = '/'.join([corpus_file.shortname, token.wordform])\r\n report_token_status(token, token_id)\r\n output_file.write('\\t'.join([token.wordform, token.lemma, token.pos]) + '\\n')\r\n else:\r\n token = Token.from_tag(word)\r\n if args.verbose and type(token.status)==tuple:\r\n token_id = '/'.join([corpus_file.shortname, token.wordform])\r\n report_token_status(token, token_id)\r\n output_file.write('\\t'.join([token.wordform, token.lemma, token.pos]) + '\\n')", "def tokenize(self):\n count = 0\n for entry in self._entries:\n token_pairs = []\n for relation in entry['relations']:\n assert len(relation) == 3\n token_pairs.append((relation[0][0],relation[1][0],relation[2][0]))\n\n num_rels = len(entry['relations'])\n num_random_rels = (self._max_seq_length - 2) // 3 - num_rels\n\n if num_random_rels>0:\n pass\n # gt_pairs = {(rel[0],rel[2]) for rel in entry['relations']}\n # random_pairs = self._get_random_pair(entry['objects'], gt_pairs, num_random_rels)\n # for pair in list(random_pairs):\n # token_pairs.append((pair[0][0],'background', pair[1][0]))\n else:\n for i in range(-num_random_rels):\n token_pairs.pop()\n\n random.shuffle(token_pairs)\n tokens = []\n for pair in token_pairs:\n tokens.extend(pair)\n\n tokens = ['[CLS]'] + tokens + ['[SEP]']\n tokens_char = tokens\n\n target = [self._tokenizer.vocab.get(self._tokenizer.tokenize(x)[0], self._tokenizer.vocab['[UNK]']) if i%3==2 else -1 for i, x in enumerate(tokens)]\n tokens = [self._tokenizer.vocab.get(self._tokenizer.tokenize(x)[0], self._tokenizer.vocab['[UNK]']) if i%3!=2 else self._tokenizer.vocab.get('[MASK]', self._tokenizer.vocab['[UNK]']) for i, x in enumerate(tokens)]\n \n for i in range(len(tokens)):\n if target[i] != -1:\n print(tokens_char[i],tokens[i],target[i])\n\n segment_ids = [0] * len(tokens)\n input_mask = [1] * len(tokens)\n # input_mask = [1 if i%3==2 else 0 for i in range(len(tokens))]\n # co_attention_mask = [-1 if i%3==2 else 1 for i in range(len(tokens))]\n # co_attention_mask = torch.zeros((self._max_region_num, self._max_seq_length))\n # co_attention_mask[0] = -1\n # co_attention_mask[-1] = -1\n \n if len(tokens) < self._max_seq_length:\n padding = [self._padding_index] * (self._max_seq_length - len(tokens))\n tokens = tokens + padding\n input_mask += padding\n segment_ids += padding \n target += [-1] * len(padding) \n\n assert_eq(len(tokens), self._max_seq_length)\n entry['input_ids'] = tokens \n entry[\"input_mask\"] = input_mask\n entry['segment_ids'] = segment_ids\n # entry[\"co_attention_mask\"] = co_attention_mask\n entry['target'] = target\n\n sys.stdout.write('%d/%d\\r' % (count, len(self._entries)))\n sys.stdout.flush()\n count += 1", "def _get_tok_emb(tok, w2v):\n if tok not in w2v.vocab:\n return torch.zeros(w2v.vectors.shape[1])\n return torch.tensor(w2v.vectors[w2v.vocab[tok].index])", "def get_top_tokens(sequence_switched, indexer, tokenizer, model, suggestion_num):\n token_input = tokenizer.encode(sequence_switched, return_tensors=\"pt\")\n mask_token_index = torch.where(token_input == tokenizer.mask_token_id)[1]\n token_logits = model(token_input).logits\n mask_token_logits = token_logits[0, mask_token_index, :]\n top_tokens = torch.topk(mask_token_logits, suggestion_num, dim=1).indices[indexer].tolist()\n return top_tokens", "def preprocess_sent(sent):\n #tokenized = word_tokenize(sent.lower())\n tokenizer = Tok()\n tokenized = tokenizer.tokenize(sent.lower())\n return tokenized", "def token(self):\n return self[\"token\"]", "def __next__(self):\n if self.gen is None:\n self.gen = self.token_generator()\n\n tok = next(self.gen)\n while tok.type in self.IGNORED_TOKENS:\n tok = next(self.gen)\n return tok", "def token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token\")", "def tokens(self) -> \"Stream<str>\":\n raise NotImplementedError", "def token(self):\n return self._token", "def token(self):\n return self._token", "def token(self):\n return self._token", "def tokens(self):\n tokens = []\n for index in range(len(self.sentrep)):\n tokens.append(self.sentrep.getWord(index).lexeme())\n return tokens", "def test_get_tokens():\n pass", "def _GetSupportedTokens(self):\n supported_tokens, supported_sub_tokens = self._BuildTokens()\n # make sure we don't have subtokens that are not listed. This should not\n # occur unless a platform's tokens/subtokens are changed.\n undefined_st = set(supported_sub_tokens) - supported_tokens\n if undefined_st:\n raise UnsupportedFilterError(\n 'Found undefined sub tokens missing from the supported token list! '\n 'These must match. (%s)' % ' '.join(undefined_st))\n # all good.\n return supported_tokens, supported_sub_tokens", "def tokenizer(self):\n tokenizer = RegexpTokenizer(r'\\w+')\n \n self.tweet_tokenized_train = [tokenizer.tokenize(x.lower()) for x in self.tweet_prepro_train]\n self.tweet_tokenized_test = [tokenizer.tokenize(x.lower()) for x in self.tweet_prepro_test]", "def token(self) -> Optional[str]:\n return self._builder._token", "def token(self) -> Token:\n return getattr(self, \"tok\", None)", "def get_tokens_with_heads(self, snlp_doc):\n tokens = []\n heads = []\n offset = 0\n for sentence in snlp_doc.sentences:\n for token in sentence.tokens:\n for word in token.words:\n # Here, we're calculating the absolute token index in the doc,\n # then the *relative* index of the head, -1 for zero-indexed\n # and if the governor is 0 (root), we leave it at 0\n if word.head:\n head = word.head + offset - len(tokens) - 1\n else:\n head = 0\n heads.append(head)\n tokens.append(word)\n offset += sum(len(token.words) for token in sentence.tokens)\n return tokens, heads", "def token_filter(tok):\n return tok is token or \\\n tok.dep_.endswith(\"mod\") or \\\n tok.dep_ == \"compound\"", "def override_special_tokens(self, opt: Opt):\n # now override\n self.start_token = self.hf_tokenizer.cls_token\n self.end_token = self.hf_tokenizer.sep_token\n self.null_token = self.hf_tokenizer.pad_token\n self.unk_token = self.hf_tokenizer.unk_token\n\n self._unk_token_idx = self.hf_tokenizer.unk_token_id\n\n self.start_idx = self[self.start_token]\n self.end_idx = self[self.end_token]\n self.null_idx = self[self.null_token]", "def get_class_tokens(docs, drop=0.0):\n xp = get_array_module(docs[0]._.get(ATTRS.last_hidden_state))\n outputs = []\n doc_class_tokens = []\n for doc in docs:\n class_tokens = []\n for i, wp in enumerate(doc._.get(ATTRS.word_pieces_)):\n if is_class_token(wp):\n class_tokens.append(i)\n doc_class_tokens.append(xp.array(class_tokens, dtype=\"i\"))\n wp_tensor = doc._.get(ATTRS.last_hidden_state)\n outputs.append(wp_tensor[doc_class_tokens[-1]])\n\n def backprop_class_tokens(d_outputs, sgd=None):\n for doc, class_tokens, dY in zip(docs, doc_class_tokens, d_outputs):\n if doc._.get(ATTRS.d_last_hidden_state).size == 0:\n xp = get_array_module(doc._.get(ATTRS.last_hidden_state))\n grads = xp.zeros(doc._.get(ATTRS.last_hidden_state).shape, dtype=\"f\")\n doc._.set(ATTRS.d_last_hidden_state, grads)\n doc._.get(ATTRS.d_last_hidden_state)[class_tokens] += dY\n return None\n\n return outputs, backprop_class_tokens", "def _get_token(self):\n # Skip initial whitespace.\n pos = self._skip_whitespace()\n\n # Find the token here, if there's one.\n token = None\n\n for (token_type, regex) in TOKEN_REGEXEN:\n re_match = regex.match(self.body, pos)\n if re_match:\n token_content = next(g for g in re_match.groups() if g is not None)\n token = Token(token_type, token_content, re_match.end())\n break\n\n return token", "def add_special_tokens_(model, tokenizer, update_model=True):\n orig_num_tokens = len(tokenizer)\n num_added_tokens = tokenizer.add_special_tokens(ATTR_TO_SPECIAL_TOKEN) # doesn't add if they are already there\n #print(\"coab::\",len(tokenizer.vocab))\n if (num_added_tokens > 0 and update_model):\n model.encoder.resize_token_embeddings(new_num_tokens=orig_num_tokens + num_added_tokens)\n model.decoder.resize_token_embeddings(new_num_tokens=orig_num_tokens + num_added_tokens)\n #print(model.encoder.embeddings.word_embeddings.weight.shape)\n #print(model.decoder.bert.embeddings.word_embeddings.weight.shape)", "def get_token(self, res):\n token = res.xpath('//*[@name=\"_csrf-app\"]')[0].attrs['value']\n return token", "def read_token(self):\n self._skip_white_space()\n return self._get_token()", "def token_values(self):\n return self._token_values", "def _tokens(self):\n # get my renderer\n renderer = self.renderer\n # sign on\n yield \"\"\n yield renderer.commentLine(\"tokens\")\n # simple tokens\n yield from renderer.set(name=\"empty\")\n yield from renderer.set(name=\"comma\", value=\",\")\n yield from renderer.set(name=\"space\", value=\"$(empty) $(empty)\")\n\n # characters that don't render easily and make the makefile less readable\n yield from renderer.set(name=\"esc\", value='\"\\x1b\"')\n\n # all done\n return", "def get_Tokens(self):\n return self._output.get('Tokens', None)", "def get_tokenizer_class(model_name):\n return OpenAIGPTTokenizer if model_name == 'openai-gpt' else GPT2Tokenizer", "def features(self, tokens, index, history):\r\n # for more details see: http://nlpforhackers.io/named-entity-extraction/\r\n\r\n # init the stemmer\r\n stemmer = SnowballStemmer('english')\r\n\r\n # Pad the sequence with placeholders\r\n tokens = [('[START2]', '[START2]'), ('[START1]', '[START1]')] + list(tokens) + [('[END1]', '[END1]'), ('[END2]', '[END2]')]\r\n history = ['[START2]', '[START1]'] + list(history)\r\n\r\n # shift the index with 2, to accommodate the padding\r\n index += 2\r\n\r\n word, pos = tokens[index]\r\n prevword, prevpos = tokens[index - 1]\r\n prevprevword, prevprevpos = tokens[index - 2]\r\n nextword, nextpos = tokens[index + 1]\r\n nextnextword, nextnextpos = tokens[index + 2]\r\n previob = history[index - 1]\r\n contains_dash = '-' in word\r\n contains_dot = '.' in word\r\n allascii = all([True for c in word if c in string.ascii_lowercase])\r\n\r\n allcaps = word == word.capitalize()\r\n capitalized = word[0] in string.ascii_uppercase\r\n\r\n prevallcaps = prevword == prevword.capitalize()\r\n prevcapitalized = prevword[0] in string.ascii_uppercase\r\n\r\n nextallcaps = nextword == nextword.capitalize()\r\n nextcapitalized = nextword[0] in string.ascii_uppercase\r\n\r\n return {\r\n 'word': word,\r\n 'lemma': stemmer.stem(word),\r\n 'pos': pos,\r\n 'all-ascii': allascii,\r\n\r\n 'next-word': nextword,\r\n 'next-lemma': stemmer.stem(nextword),\r\n 'next-pos': nextpos,\r\n\r\n 'next-next-word': nextnextword,\r\n 'next-next-pos': nextnextpos,\r\n\r\n 'prev-word': prevword,\r\n 'prev-lemma': stemmer.stem(prevword),\r\n 'prev-pos': prevpos,\r\n\r\n 'prev-prev-word': prevprevword,\r\n 'prev-prev-pos': prevprevpos,\r\n\r\n 'prev-iob': previob,\r\n\r\n 'contains-dash': contains_dash,\r\n 'contains-dot': contains_dot,\r\n\r\n 'all-caps': allcaps,\r\n 'capitalized': capitalized,\r\n\r\n 'prev-all-caps': prevallcaps,\r\n 'prev-capitalized': prevcapitalized,\r\n\r\n 'next-all-caps': nextallcaps,\r\n 'next-capitalized': nextcapitalized,\r\n }", "def token_vector(self, token):\n unit_id = self.svecs.vocab.unit2id(token.text)\n return self.svecs.emb[unit_id]", "def get_token_names(self) -> List[str]:\n return list(self._tokens.keys())", "def test_TreebankTokenReader():", "def token(self):\n\n return self.__token", "def get_tokens():\n return (HDFS_SITE_KEY, NAMESERVICE_KEY, NN_HTTP_ADDRESS_KEY, DFS_POLICY_KEY,\n EXECUTABLE_SEARCH_PATHS, NN_HTTPS_ADDRESS_KEY, SMOKEUSER_KEY,\n KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY,\n METRICS_COLLECTOR_VIP_HOST_KEY, METRICS_COLLECTOR_VIP_PORT_KEY,\n METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY, AMS_HTTP_POLICY)", "def parse(token):\n\n pass", "def __init__(self, token):\n self.token = token", "def __init__(self, token):\n self.token = token", "def __init__(self, token):\n self.token = token", "def processes_and_tokenize(raw_document):\n\ttokenizer = RegexpTokenizer(r'\\w+')\n\ttokens = tokenizer.tokenize(raw_document.lower())\t\t# tokens = nltk.word_tokenize(corpus.lower()) # without removing punctiation\n\n\t#remove stop words\n\tstop_words = set(nltk.corpus.stopwords.words('english'))\n\t#stop_words = set(stopwords.words('english'))\n\tfiltered_tokens = [w for w in tokens if not w in stop_words]\n\treturn filtered_tokens", "def _candidates(self, token):", "def filter_pos(self):\n all_tokens = []\n for zettel in self.lemma_tokens:\n tokens = []\n for word in zettel:\n if word[1] in ['NN', 'NNS', 'NNP', 'NNPS', 'NG']: # NG = n_gram\n tokens.append(word)\n all_tokens.append(tokens)\n self.lemma_tokens = all_tokens", "def xml2tokens(xml_tagged_sent, tokenized_sent, raw_sent):\n raw, entities = get_entities(xml_tagged_sent)\n if re.search(r\"ENAMEX\", raw):\n print(xml_tagged_sent)\n print(raw)\n # count += 1\n\n tokens, syllables = word_tokenize(tokenized_sent, raw_sent)\n level1_syl_tags = [\"O\" for i in range(len(syllables))]\n level2_syl_tags = [\"O\" for i in range(len(syllables))]\n level3_syl_tags = [\"O\" for i in range(len(syllables))]\n\n level1_token_tags = [\"O\" for i in range(len(tokens))]\n level2_token_tags = [\"O\" for i in range(len(tokens))]\n level3_token_tags = [\"O\" for i in range(len(tokens))]\n\n flag = False\n for entity in entities:\n value = entity[\"value\"]\n start = entity[\"start\"]\n end = entity[\"end\"]\n entity_type = entity[\"type\"]\n start_syl_id, end_syl_id = find_syl_index(start, end, syllables)\n start_tok_id, end_tok_id = find_tok_index(start_syl_id, end_syl_id, tokens)\n\n if start_syl_id != None and end_syl_id != None:\n if entity[\"level\"] == 1:\n level1_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level1_syl_tags[i] = \"I-\" + entity_type\n elif entity[\"level\"] == 2:\n level2_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level2_syl_tags[i] = \"I-\" + entity_type\n else:\n level3_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level3_syl_tags[i] = \"I-\" + entity_type\n else:\n print(\"{},{},\\\"{}\\\" in '{}' ({})\".format(start,end,value,raw,xml_tagged_sent))\n flag = True\n\n if start_tok_id != None and end_tok_id != None:\n if entity[\"level\"] == 1:\n level1_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id+1, end_tok_id):\n level1_token_tags[i] = \"I-\" + entity_type\n elif entity[\"level\"] == 2:\n level2_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id + 1, end_tok_id):\n level2_token_tags[i] = \"I-\" + entity_type\n else:\n level3_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id + 1, end_tok_id):\n level3_token_tags[i] = \"I-\" + entity_type\n else:\n pass\n # print(\"{},{},\\\"{}\\\" in '{}' ({})\".format(start_syl_id, end_syl_id, value, raw, xml_tagged_sent))\n\n ret_syllables = list(zip([ s.text for s in syllables], level1_syl_tags, level2_syl_tags, level3_syl_tags))\n ret_tokens = list(zip( [tk.text for tk in tokens], level1_token_tags, level2_token_tags, level3_token_tags))\n return ret_syllables, ret_tokens, raw, flag", "def extractTokensToPredict(data_path):\n \n sentences = []\n ids = []\n \n for event, element in etree.iterparse(data_path, tag=\"sentence\"):\n current_sentence = []\n current_ids = []\n if event == 'end':\n #For every child of the sentence tag\n for child in element:\n #Get the lemma of the token\n lemma = child.attrib['lemma']\n if '&apos;' in lemma:\n #If it is present, substitute it\n lemma = re.sub(r'(&apos;)', '\\'', lemma)\n #Check also for &apos;&apos = \"\"\n if '\\'\\'' in word:\n lemma = re.sub(r'(\\'\\')', '\\'', lemma)\n if child.tag == 'instance':\n current_ids.append(child.attrib['id'])\n else:\n current_ids.append('0')\n current_sentence.append(lemma)\n if current_sentence and current_ids:\n sentences.append(current_sentence)\n ids.append(current_ids)\n #Clear to save memory\n element.clear()\n \n print(\"File completely parsed. Total number of sentences %i \\n\" %len(sentences))\n print()\n return sentences, ids" ]
[ "0.6021182", "0.5951054", "0.58588576", "0.5789484", "0.5745784", "0.56824887", "0.5654901", "0.5648043", "0.5638188", "0.5631488", "0.5630968", "0.56133425", "0.5593994", "0.5592292", "0.5575906", "0.55579054", "0.5544896", "0.549606", "0.54886556", "0.54866934", "0.5471206", "0.5471206", "0.5471206", "0.546802", "0.54580086", "0.54476875", "0.5431728", "0.54306537", "0.5413535", "0.5402866", "0.5389173", "0.53838086", "0.5376138", "0.536514", "0.536221", "0.536221", "0.5326106", "0.5319723", "0.53186214", "0.5314929", "0.5295591", "0.5290737", "0.5286225", "0.5279546", "0.5276048", "0.5275135", "0.5273017", "0.5267176", "0.5265483", "0.5264967", "0.5264429", "0.5254477", "0.52492046", "0.5246821", "0.5230367", "0.522517", "0.5213375", "0.52127683", "0.5207999", "0.5205646", "0.51933503", "0.51888853", "0.51840603", "0.5182786", "0.51826423", "0.51826423", "0.51826423", "0.5153466", "0.5150604", "0.5133623", "0.5129595", "0.5129476", "0.5124563", "0.51233333", "0.5122788", "0.512165", "0.51119035", "0.51103514", "0.5105322", "0.5103113", "0.5100753", "0.5100517", "0.5100124", "0.5094238", "0.50828785", "0.5081751", "0.5078834", "0.5078642", "0.5065399", "0.5062084", "0.50608504", "0.5059243", "0.5058422", "0.5058422", "0.5058422", "0.5054277", "0.5053879", "0.50517225", "0.5051011", "0.5050827" ]
0.62158376
0
Compute the cosine similarity between each word in the vocab and each word in the source
def _pairwise_dot_product(self, src_embeds, vocab_embeds, cosine=False): if cosine: src_embeds = F.normalize(src_embeds, dim=-1, p=2) vocab_embeds = F.normalize(vocab_embeds, dim=-1, p=2) # dot product dot_product = torch.einsum("bij,kj->bik", (src_embeds, vocab_embeds)) return dot_product
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cosine_similarity(self, source_doc, input_doc):\n vectorizer = self.vectorizer or TfidfVectorizer(tokenizer=PlagiarismDetector.tokenize_and_stem, stop_words='english')\n tfidf = vectorizer.fit_transform([source_doc, input_doc])\n return ((tfidf * tfidf.T).A)[0, 1]", "def cosine_similarity(vec_x, vec_y):\n sim_prod = 0.0\n len_x = 0\n len_y = 0\n\n for ngram in vec_x:\n len_x += vec_x[ngram] ** 2\n\n for ngram in vec_y:\n len_y += vec_y[ngram] ** 2\n\n len_x = math.sqrt(len_x)\n len_y = math.sqrt(len_y)\n\n for ngram in vec_x:\n if ngram in vec_y:\n sim_prod += vec_x[ngram] * vec_y[ngram]\n\n return sim_prod / (len_x * len_y)", "def getGloveoCosineSimilarity(question1, question2):\n questions = [question1, question2]\n\n ## for the sentences we need to get the count vectors\n vec = CountVectorizer(max_features=5000, stop_words=None,binary=True)\n count_vectors = vec.fit_transform(questions)\n\n ## get the vocabulary of words from the questions\n vocab_index = vec.vocabulary_\n\n ## get the index of the words and embeddings\n index_word = {v:k for k, v in vocab_index.items()}\n\n ## get the question vectors\n question_vectors = np.zeros((count_vectors.shape[0], 300))\n\n ## iterate through count vectors for each word get the embeddings\n ## for each embedding, we will then average by the number of words\n ## this will be then used for cosine similarity\n for i in range(count_vectors.shape[0]):\n row = count_vectors[i, :].toarray()\n word_ids = np.where(row > 0)[1]\n word_counts = row[:, word_ids][0]\n numWords = np.sum(word_counts)\n\n ## if there are no words, continue\n if numWords == 0:\n continue\n\n ## initialize the word embeddings to 0\n word_embeddings = np.zeros((word_ids.shape[0], 300))\n\n ## update the word embeddings\n for j in range(word_ids.shape[0]):\n word_id = word_ids[j]\n word_embeddings[j, :] = word_counts[j] * gloveDict[index_word[word_id]]\n question_vectors[i, :] = np.sum(word_embeddings, axis=0) / numWords\n\n return(cosine_similarity(question_vectors[0], question_vectors[1])[0][0])", "def get_cosine_similarity(doc1, doc2):\n count_vectorizer = CountVectorizer(stop_words='english')\n sparse_matrix = count_vectorizer.fit_transform(raw_documents=[doc1, doc2])\n dtm = sparse_matrix.todense()\n df_dtm = pd.DataFrame(data=dtm, \n columns=count_vectorizer.get_feature_names(), \n index=['doc1', 'doc2'])\n similarity_matrix = cosine_similarity(df_dtm, df_dtm)\n similarity_score = round(similarity_matrix[0][1], 6)\n return similarity_score", "def CosineSimilarity(test_vec, source_vecs):\n cos_dist = 0\n for source_vec in source_vecs:\n cos_dist += FacePredictor.findCosineDistance(test_vec, source_vec)\n return cos_dist / len(source_vecs)", "def cosine_similarity(v1, v2):\n # Cosine Sim:\n # Get the words that both have in common\n\n v1words = set(v1.keys())\n v2words = set(v2.keys())\n\n numerator_words = v1words.intersection(v2words)\n\n # Multiply and sum those counts\n numerator = 0.0\n for word in numerator_words:\n numerator += v1[word] * v2[word]\n\n\n # Divide by the sqrt of the product of the sum of the squares of the counts\n denominator = math.sqrt(math.magnitude(list(v1.values())) * math.magnitude(list(v2.values())))\n\n return numerator/denominator", "def get_cosine_similarity(word2vec: Word2Vec) -> np.ndarray:\n return cosine_similarity(word2vec.wv.vectors)", "def get_similarity(df):\n count = CountVectorizer()\n count_matrix = count.fit_transform(df[\"bag_of_words\"])\n cosine_sim = cosine_similarity(count_matrix, count_matrix)\n return cosine_sim", "def cosine_similarity(x1, x2, dim=1, eps=1e-8):\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()", "def cosine_similarity(cls, vec_a, vec_b):\n return np.dot(vec_a, vec_b) / \\\n (np.linalg.norm(vec_a) * np.linalg.norm(vec_b))", "def calculate_cosine_dist(main_text, new_text):\n wordbag = set(\" \".join([main_text, new_text]).split(\" \"))\n dot_prod = 0\n main_text = main_text.split(\" \")\n new_text = new_text.split(\" \")\n\n for word in wordbag:\n if word in main_text and word in new_text:\n # only worth looking at if word is in both. Otherwise dot prod = 0\n count_A = sum(np.array(main_text) == word)\n count_B = sum(np.array(new_text) == word)\n dot_prod += count_A * count_B\n\n return float(dot_prod) / (len(main_text) * len(new_text))", "def cosine_similarity(x1, x2, dim=1, eps=1e-8):\r\n w12 = torch.sum(x1 * x2, dim)\r\n w1 = torch.norm(x1, 2, dim)\r\n w2 = torch.norm(x2, 2, dim)\r\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()", "def calculate_cosine_similarity(self):\n tfidf_matrix = self.calculate_tfidf()\n\n cosine_similarity = linear_kernel(tfidf_matrix, tfidf_matrix) # Cosine similarity matrix calculation\n\n return cosine_similarity", "def cosine_similarity(self, x, y):\n return np.dot(x, y) / (np.linalg.norm(x) * np.linalg.norm(y))", "def cosine_similarity(v1: Vector, v2: Vector) -> float:\n return dot_product(v1, v2) / (vector_len(v1) * vector_len(v2))", "def compute_cosine_similarity(self):\n cos_matrix = []\n for i in range(len(self.train_vec)):\n val = self.vec1 * self.train_vec[i]\n cos_matrix.append(val[0])\n out = np.argmax(cos_matrix)\n print(self.train_output[out])", "def cosine_similarity(vec1, vec2) -> float:\n return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))", "def cosine_similarity_tensorflow(tf_word_representation_A, tf_words_representation_B):\n a_normalized = tf.nn.l2_normalize(tf_word_representation_A, axis=-1)\n b_normalized = tf.nn.l2_normalize(tf_words_representation_B, axis=-1)\n similarity = tf.reduce_sum(\n tf.multiply(a_normalized, b_normalized), \n axis=-1\n )\n \n return similarity", "def cosine_similarity(v1, v2):\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "def get_cosine_similarities(keywords: List[str],\n matrices: Matrices,\n word2onehot: Dict[str, int]\n ) -> None:\n for i in combinations(keywords, 2):\n print(i[0], i[1], cosine_sim(\n matrices.embedding[\n word2onehot[i[0]]], matrices.embedding[word2onehot[i[1]]\n ]\n ))", "def cosine_similarity(a, b):\n cs = dot_product(a, b)/(norm(a) * norm(b))\n return cs", "def cosine_scoring(query, doc_lengths, index):\n idf_dict_vector = compute_idf_vector(len(doc_lengths), index)\n doc_scores = {}\n\n for q in query:\n if q in idf_dict_vector:\n wt_q = idf_dict_vector[q] * query[q]\n else:\n wt_q = 0\n\n for tup in index[q][1:]:\n wf_q = idf_dict_vector[q] * tup[1]\n if tup[0] in doc_scores:\n doc_scores[tup[0]] += wt_q * wf_q\n else:\n doc_scores[tup[0]] = wt_q * wf_q\n\n for doc in doc_scores:\n doc_scores[doc] = doc_scores[doc] / doc_lengths[doc]\n\n return doc_scores", "def cosine_similarity(self, v1: np.ndarray, v2: np.ndarray) -> float:\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n # return cosine_similarity(v1, v2)[0][0]", "def cosine_similarity(v1, v2):\n v1_len = 0\n v2_len = 0\n dot_product = 0\n\n for context_id, count in v1.items():\n v1_len += count ** 2\n if context_id in v2:\n dot_product += count*v2[context_id]\n for count in v2.values():\n v2_len += count ** 2\n\n v1_len = math.sqrt(v1_len)\n v2_len = math.sqrt(v2_len)\n return dot_product/(v1_len * v2_len)", "def word_similarity(self):\n y_true = []\n y_pred = []\n for i in open(\"data/word_sim_dataset.txt\").read().split('\\n'):\n i = self.preprocessor(i)\n w1 = i.split()[-1]\n w2 = i.split()[-2] \n st = float(i.split()[-3]) / 4 #dataset has scale from 0 to 4\n \n try:\n w1 = self.embeddings_index[w1] \n w2 = self.embeddings_index[w2] \n w1 = w1 / np.linalg.norm(w1)\n w2 = w2 / np.linalg.norm(w2)\n y_pred.append(np.dot(w1,w2))\n y_true.append(st)\n except:\n pass\n if y_true == []:\n return 1.0\n return mean_squared_error(y_true, y_pred, squared=False)", "def cosine_similarity(vector_x, vector_y):\n if(len(vector_x)!=len(vector_y)):\n raise Exception('Vectors must be the same dimensions')\n \n return 1-np.dot(vector_x,vector_y)/(np.linalg.norm(vector_x)*np.linalg.norm(vector_y))", "def similarity(self, word1: str, word2: str, metric='cosine') -> float:\n if 0 == self.word2idx.get(word1, 0) or 0 == self.word2idx.get(word2, 0):\n return 0.\n\n return self.similarity_vec(self[word1], self[word2], metric=metric)\n # vec1 = self.__getitem__(word1).reshape((1, -1))\n # vec2 = self.__getitem__(word2).reshape((1, -1))\n # return 1 - distance.cdist(vec1, vec2, metric=metric).reshape(-1)", "def cosine_value(self,doc_vector,query_vector):\n\t\tvalue=0;i=0;\n\t\tunit_vector_query=self.unit_vector(query_vector);\n\t\tunit_vector_doc=self.unit_vector(doc_vector);\n\t\titerate=0\n\t\tfor word in query_vector:\n\t\t\tif word in doc_vector:\n\t\t\t\tvalue+=query_vector[word]*doc_vector[word]\n\t\tif unit_vector_query != 0:\n\t\t\tvalue = value/(unit_vector_query*unit_vector_doc)\n\t\telse:\n\t\t\tvalue = 0\n\t\treturn value", "def cosine_similarities(q, k = 10):\n\n query = []\n query.append(q)\n query = [word.lower() for word in query]\n table = str.maketrans('', '', string.punctuation)\n query = [word.translate(table) for word in query]\n query = [re.sub(r'\\d+', 'num', word) for word in query]\n query = [[word for word in data.split() if word not in stopwords] for data in query]\n query = [\" \".join([stemmer.stem(word) for word in data]) for data in query]\n queryVectorizer = vectorizer.transform(query).toarray()\n\n #cosine similarity formula below, not needed because importing cosine_similarity from sklearn\n #cx = lambda a,b : np.inner(a, b)/(np.linalg.norm(a)*np.linalg.norm(b)) \n \n cosine_similarities = cosine_similarity(queryVectorizer[:], tfidf)\n related_docs_indices = cosine_similarities.flatten().argsort()[-k][::-1]\n return (related_docs_indices)", "def fashion_similarity(input_txt, features, keys):\n feature_index = keys.index(input_txt)\n input_vector = features[feature_index]\n\n scores = [similarity_function(input_vector, partner) for partner in features]\n return scores", "def cosin_sim_pairs(a, b):\n wordsA = set(a.keys())\n wordsB = set(b.keys())\n inter = wordsA.intersection(wordsB)\n if(len(inter) == 0):\n return 0.0\n aa, bb, ab = 0, 0, 0\n for k in inter:\n aa += a[k] ** 2\n bb += b[k] ** 2\n ab += a[k] * b[k]\n for k in wordsA - inter:\n aa += a[k] ** 2\n for k in wordsB - inter:\n bb += b[k] ** 2\n return ab / float(math.sqrt(aa) * math.sqrt(bb))", "def cosine_similarity(y_true, y_pred, axis=-1):\n y_true = nn.l2_normalize(y_true, axis=axis)\n y_pred = nn.l2_normalize(y_pred, axis=axis)\n return -math_ops.reduce_sum(y_true * y_pred, axis=axis)", "def predict_cosine_similarities(sess, word_A, words_B):\n\n word_A_id, _ = sentence_to_word_ids(word_A, word_to_index)\n words_B_ids, split_sentence = sentence_to_word_ids(words_B, word_to_index)\n\n evaluated_cos_similarities = sess.run(\n cosine_similarities, \n feed_dict={\n tf_word_A_id: word_A_id,\n tf_words_B_ids: words_B_ids\n }\n )\n return evaluated_cos_similarities, split_sentence", "def compute_cosine(pred, obs):\n assert(pred.numel() == obs.numel()), \\\n 'Size of observation and prediction tensors much match. Received: pred %s, obs %s.'%(\n str(pred.size()), str(obs.size()))\n\n def normalise(x, dim=1):\n \"\"\" compute L2 norm and normalise x \"\"\"\n norm = torch.sqrt( torch.pow(x,2.).sum(dim) )\n if dim>0:\n x /= norm.unsqueeze(dim)\n return x\n\n # if we have one-dimensional tensors, compute cosine similarity along first dimension (0).\n # if we have two-dimensional tensors, compute cosine similarity along second dimension (1).\n # if we have three-dimensional tensors, compute cosine similarity along third dimension (2).\n # i.e. first dimension is considered the feature vector (will be reduced to a scalar, the cos.sim.)\n dim = len(pred.size()) - 1\n assert(dim>=0 and dim <=2), \\\n 'This function only computes cosine similarity between 1D, 2D or 3D tensors! Received dim==%i'%(dim)\n\n p_norm = normalise(pred, dim=dim)\n v_norm = normalise(obs, dim=dim)\n return torch.nn.functional.cosine_similarity( p_norm, v_norm, dim=dim )", "def cosineSimilarity(index, nPages, query): \n scores = defaultdict(int)\n terms = query.split()\n qw = {t: tf_idf(1, nPages, len(index[t])) for t in terms if t in index}\n query_len = np.linalg.norm(list(qw.values()))\n for term in qw:\n query_weight = qw[term] / query_len\n for url, weight in index[term]:\n scores[url] += weight * query_weight\n return sorted(scores.items(), key=lambda x: x[1], reverse=True)", "def cosine_similarity(X):\n matrix = X.dot(X.transpose()).todense()\n mat_len = len(matrix)\n norms = [0] * mat_len\n for i in range(0, mat_len):\n norms[i] = 1.0 / np.sqrt(matrix.item((i, i)))\n norm_mat = np.matrix(norms)\n return np.multiply(norm_mat.transpose().dot(norm_mat), matrix)", "def distance(self, word1, word2):\n\n return scipy.spatial.distance.cosine(self.vectors.get(word1), self.vectors.get(word2))", "def similarity(self, query, documents):\n\n bow_query = self.dictionary.doc2bow(query)\n bow_docs = [self.dictionary.doc2bow(document) for document in documents]\n\n index = SoftCosineSimilarity(bow_docs, self.matrix)\n similarities = index[bow_query]\n\n return similarities", "def by_distance_vectors(self, string_1, string_2):\n string_1 = self.kywrds.by_frequency(string_1)\n string_2 = self.kywrds.by_frequency(string_2)\n model = self.doc2vec_model[0]\n doc_vec_1 = model.infer_vector(string_1)\n doc_vec_2 = model.infer_vector(string_2)\n return spatial.distance.cosine(doc_vec_1, doc_vec_2)", "def cosine_similarity(v1, v2):\n sim = np.sum(v1*v2)/np.sqrt(np.sum(v1**2))/np.sqrt(np.sum(v2**2))\n return sim", "def pairwise_cosine_similarity(x, y):\n x = torch.div(x, torch.sqrt(torch.max(torch.sum(x ** 2), 1e-12)))\n y = torch.div(y, torch.sqrt(torch.max(torch.sum(y ** 2), 1e-12)))\n return torch.mm(x, torch.transpose(y, 1, 0))", "def cosine_sim_collections(a, b):\n setab = sorted(set(a) | set(b))\n countera, counterb = Counter(a), Counter(b)\n veca = [countera[element] if element in a else 0 for element in setab]\n vecb = [counterb[element] if element in b else 0 for element in setab]\n return dot(veca, vecb) / (norm(veca) * norm(vecb))", "def cosine_sim_counters(a, b):\n union_ab = sorted((a | b).keys())\n veca = np.array([a[element] if element in a else 0 for element in union_ab])\n vecb = np.array([b[element] if element in b else 0 for element in union_ab])\n return np.dot(veca, vecb) / (np.linalg.norm(veca) * np.linalg.norm(vecb))", "def get_cosine_sim(self):\r\n return CosineSimilarity().calculate_similarity(self.tweets)", "def compute_cosine_sim(vec1, vec2):\n numer = np.dot(vec1.reshape((300,)), vec2.reshape((300,)))\n denom = np.sqrt(np.sum(np.square(vec1.reshape(300, )))) * np.sqrt(\n np.sum(np.square(vec2.reshape(300, ))))\n\n similarity = numer / denom\n\n return similarity", "def sentence_distance(sentence_a, sentence_b):\n \n sent_a = np.sum([projections[word_ids.get(word, 0)] \n if word in word_ids else [0] \n for word in sentence_a+bigrams(sentence_a)+trigrams(sentence_a)], axis=0)\n sent_b = np.sum([projections[word_ids.get(word, 0)] \n if word in word_ids else [0] \n for word in sentence_b+bigrams(sentence_b)+trigrams(sentence_b)], axis=0)\n \n \n return float(cosine(sent_a, sent_b))", "def getLexicalSimilarityScore(corpus:List[str]):\n\n # Generate the tf-idf vectors for the corpus\n tfidf = TfidfVectorizer()\n X = tfidf.fit_transform(corpus)\n\n # Compute cosine similarity score\n cosine_sim_score = cosine_similarity(X, X) # 2d matrix of scores, each vector against all other vector\n \n # return student's scores only\n return cosine_sim_score[0][1:]", "def calculate_similarity(self, tweets):\r\n if (len(tweets) == 1):\r\n return 0\r\n vectors = self.vectorizer.vectorize_data(tweets, False)\r\n\r\n temp = cosine_similarity(vectors[0:-1], vectors)\r\n temp = [item for sublist in temp for item in sublist]\r\n sim = sum(temp) / len(temp)\r\n return sim", "def get_cosine_similarity(self, query: list):\n question_vector = self.get_vector(query)\n\n return cosine_similarity(question_vector, self.vectors).flatten()", "def cosine_similarity(a, b):\n\n numerator = tf.reduce_sum(tf.multiply(a, b), axis=1)\n denominator = tf.multiply(tf.norm(a, axis=1), tf.norm(b, axis=1))\n cos_similarity = numerator/denominator\n return cos_similarity", "def compute_cosine_similarity(base_vector, target_vector):\n\n np.seterr(all='print')\n cosine_similarity = 0\n\n try:\n base_vector = np.longdouble(base_vector)\n target_vector = np.longdouble(target_vector)\n vector_dot_products = np.dot(base_vector, target_vector)\n vector_norms = np.linalg.norm(base_vector) * np.linalg.norm(target_vector)\n cosine_similarity = np.divide(vector_dot_products, vector_norms)\n\n if vector_norms == 0.0:\n print 'Error in vec in compute_cosine_similarity'\n print target_vector\n\n except Exception, e:\n print(str(e))\n\n return cosine_similarity", "def wordSimilarityRatio(sent_1,sent_2):", "def cosine_similarity(a, b):\n if a.ndim != 1 or b.ndim != 1:\n raise InvalidShapeException(a,b)\n\n if len(a) != len(b):\n raise InvalidLengthException(a,b)\n \n mag_a = np.linalg.norm(a)\n mag_b = np.linalg.norm(b)\n\n return np.dot(a,b)/(mag_a*mag_b)", "def cossim(corpus):\n files = os.listdir()\n vectorizer = TfidfVectorizer()\n trsfm = vectorizer.fit_transform(corpus)\n columns = vectorizer.get_feature_names()\n df_tfidf = pd.DataFrame(trsfm.toarray(), columns = columns, index = corpus)\n out = cosine_similarity(trsfm)\n df_result = pd.DataFrame(out, columns = files, index = files)\n return df_result", "def get_cosin_sim(question, contexts):\r\n cos_sim_for_question = []\r\n for context in contexts :\r\n cv = CountVectorizer(stop_words=MY_STOPWORDS, lowercase=False)\r\n matrix = cv.fit_transform(pd.DataFrame([question, context])[0]).toarray()\r\n cos_sim = dot(matrix[0], matrix[1])/(norm(matrix[0])*norm(matrix[1]))\r\n cos_sim_for_question.append(cos_sim)\r\n return pd.Series(cos_sim_for_question)", "def similarities (self, listOfWords):\n \n # building the query dictionary\n queryDict = collections.defaultdict(int)\n for w in listOfWords:\n queryDict [w] += + 1.0\n \n # normalizing the query\n length = float (len (listOfWords))\n for k in queryDict:\n queryDict [k] /= length\n \n # computing the list of similarities\n sims = []\n for doc in self.documents:\n score = 0.0\n docDict = doc [1]\n for k in queryDict:\n if docDict.has_key (k):\n score += (queryDict [k] / self.corpusDict [k]) + (docDict [k] / self.corpusDict [k])\n sims.append ([doc [0], score])\n \n return sims", "def get_similarity_score(text_vectors, X, factor=None):\n similarity_scores = cosine_similarity(X, text_vectors, dense_output=True)\n return similarity_scores * factor", "def cosine_distance(x1, x2, dim=1, eps=1e-8):\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return 1 - (w12 / (w1 * w2).clamp(min=eps)).squeeze()", "def similarity(self, word1, word2):\n common_vect = +np.ones(self.nEmbed) * 10000\n if word1 not in self.vocab and word2 in self.vocab:\n id_word_2 = self.w2id[word2]\n w1 = common_vect\n w2 = self.U[id_word_2]\n elif word1 in self.vocab and word2 not in self.vocab:\n id_word_1 = self.w2id[word1]\n w1 = self.U[id_word_1]\n w2 = common_vect\n elif word1 not in self.vocab and word2 not in self.vocab:\n w1 = common_vect\n w2 = common_vect\n else:\n id_word_1 = self.w2id[word1]\n id_word_2 = self.w2id[word2]\n w1 = self.U[id_word_1]\n w2 = self.U[id_word_2]\n\n # scalair = w1.dot(w2)/np.linalg.norm(w1,w2)\n similarity = w1.dot(w2) / (np.linalg.norm(w1) * np.linalg.norm(w2))\n # similarity = 1 / (1 + np.exp(-scalair))\n # similarity = scalair / (np.linalg.norm(w1) * np.linalg.norm(w2))\n return similarity", "def cosine(arr1, arr2):\n\n if arr1 is None or arr2 is None:\n return np.NaN\n if not isinstance(arr1, list):\n arr1 = [arr1]\n if any(pd.isnull(arr1)):\n return np.NaN\n if not isinstance(arr2, list):\n arr2 = [arr2]\n if any(pd.isnull(arr2)):\n return np.NaN\n # Create cosine measure object\n measure = sm.Cosine()\n # Call the function to compute the cosine measure.\n return measure.get_raw_score(arr1, arr2)", "def similarity(self, token1, token2):\n vec1 = self.get_vector(token1)\n vec2 = self.get_vector(token2)\n assert vec1 is not None and vec2 is not None, \"Cannot compute similarity between None type vectors.\"\n if not self.normalize:\n # if model not loaded as normalized embeddings \n vec1 = vec1 / np.linalg.norm(vec1)\n vec2 = vec2 / np.linalg.norm(vec2)\n return np.dot(vec1, vec2)", "def cosine_similarity(document, cluster):\n num = dot(document.vector, cluster.centroid)\n den = linalg.norm(document.vector) * linalg.norm(cluster.centroid)\n\n return num / den", "def tf_cosine_distance(self, a, b):\n normalize_a = tf.nn.l2_normalize(a, -1)\n normalize_b = tf.nn.l2_normalize(b, -1)\n cos_similarity = tf.reduce_sum(\n tf.multiply(normalize_a, normalize_b), axis=-1, keep_dims=True\n )\n return (1.0 - cos_similarity) / 2.0", "def cosine_collection_distance(x1, x2):\n x1 = tf.cast(x1, dtype=tf.float32)\n x2 = tf.cast(x2, dtype=tf.float32)\n\n # dot product between rows of `x1` and columns of `x2` transpose\n cos_thetas = tf.linalg.matmul(x1, x2, transpose_b=True)\n pairwise_distances = 1 - cos_thetas\n\n # deal with numerical inaccuracies setting small negatives to zero\n pairwise_distances = tf.maximum(pairwise_distances, 0.0)\n\n return pairwise_distances", "def cosine_similarity(user, business):\n return len(user.intersection(business)) / (math.sqrt(len(user)) * math.sqrt(len(business)))", "def calculate_similarity(self, request_vocab, request_vocab_full, weighting, measure):\n n_q = 0\n nb_docs = len(self.documents)\n sim = {document_id: 0 for document_id in self.documents.keys()}\n n_d = weighting.nd(self.documents, request_vocab, self.index, self.terms)\n\n for request_term in request_vocab:\n try:\n term_id = self.terms[request_term]\n\n tf_q = term_frequency(request_term, request_vocab_full) # term frequency in request\n ptf_q = weighting.ptf(tf_q)\n\n df = document_frequency(term_id, self.index)\n pdf = weighting.pdf(df, nb_docs)\n\n w_t_q = ptf_q * pdf # tf*idf\n n_q += w_t_q * w_t_q\n\n posting_list = self.index[term_id]\n for doc in posting_list:\n doc_id = doc[0]\n tf_d = doc[1] # term frequency in document\n ptf_d = weighting.ptf(tf_d)\n w_t_d = n_d[doc_id] * ptf_d * pdf\n sim[doc_id] += w_t_q * w_t_d\n\n except KeyError:\n # We get here when the query has a term which is not in the index, so we ignore it.\n pass\n\n for j in self.documents.keys():\n # compute similarity between request vector and documents vectors\n if sim[j] != 0:\n measure = SimilarityMeasure(measure)\n sim[j] = measure.compute(sim[j], n_d[j], n_q)\n\n sorted_docs = []\n sorted_sim = []\n # sort documents by similarity (greatest to lowest)\n for doc_id, similarity in sorted(sim.items(), key=lambda x: x[1], reverse=True):\n sorted_docs.append(doc_id)\n sorted_sim.append(similarity)\n return sorted_docs, sorted_sim", "def spacy_similarity(docs: List[Doc],\n text: str,\n nlp: Language,\n norm: Union[None, str] = \"l2\") -> List[float]:\n\n def normalise(norm):\n def _normalise(v):\n # Numba linear algebra operations are only supported on\n # contiguous arrays\n v = np.ascontiguousarray(v)\n return normalise_vector(v, order)\n if norm == 'l1':\n order = 1\n if norm == 'l2':\n order = 2\n return _normalise\n\n query_doc = nlp(text) # Convert text into a spaCy Doc object\n u = query_doc.vector\n vectors = list(map(\n lambda doc: np.average([np.array(token.vector_) for token\n in doc._.filtered_matches], axis=0), docs))\n if norm:\n func = normalise(norm)\n u = func(u)\n vectors = list(map(lambda v: func(v), vectors))\n scores = list(map(lambda v: cosine_similarity(u, v)\n if not(np.isnan(v).any()) else 0, vectors))\n return scores", "def content_similarity(self, movie1, movie2):\n v1, v2 = self.get_tfidf(movie1), self.get_tfidf(movie2)\n return self.cosine_similarity(v1, v2)", "def cosine_similarity(u: np.ndarray, v: np.ndarray) -> np.float64:\n assert u.shape[0] == v.shape[0], \"Input vector must have same shape.\"\n uv = 0\n uu = 0\n vv = 0\n for i in range(u.shape[0]):\n uv += u[i] * v[i]\n uu += u[i] * u[i]\n vv += v[i] * v[i]\n cosine_score = 0\n if uu != 0 and vv != 0:\n cosine_score = uv / np.sqrt(uu * vv)\n return np.float64(cosine_score)", "def get_distance_metrics(source_embeddings, target_embeddings):\n cosine_avg, euclidean_avg = 0.0, 0.0\n for i in range(len(source_embeddings)):\n cosine_avg += cosine(source_embeddings[i], target_embeddings[i])\n euclidean_avg += euclidean(source_embeddings[i], target_embeddings[i])\n return (cosine_avg / len(source_embeddings)), (euclidean_avg / len(source_embeddings))", "def WordSim(self,testDF,listCourse,inCourse):\r\n #Obtain a single vector embedding for each course description (calculated by taking an average of each word \r\n #embedding that makes up each description)\r\n \r\n #Get the embedding from the dictionary for the list (reference) course\r\n aVec = self.VDF[\"Word\"][listCourse]\r\n #Calculate the embedding with the doc2Vec model.\r\n bVec = self._WordSimAveVec(testDF,inCourse)\r\n #Convert vectors to column vectors to be fed into the cosine_similarity function.\r\n A = np.expand_dims(aVec,0)\r\n B = np.expand_dims(bVec,0)\r\n #Calculate the cosine similarity between the two vectors.\r\n sim = cosine_similarity(A,B)\r\n return float(sim)", "def cosine_sim(news_data_dict: dict, song_data_dict: dict):\n cosine_dict = defaultdict(list)\n\n for news_yr, news_txt in news_data_dict.items():\n news_txt_flat = []\n for nt in news_txt:\n news_txt_flat += nt\n news_tf = Counter(news_txt_flat)\n\n if news_yr <= 2011:\n song_txt_flat = []\n for i in range(5):\n song_yr = news_yr+i\n for st in song_data_dict[song_yr]:\n song_txt_flat += st\n song_tf = Counter(song_txt_flat)\n\n all_words = list(set(song_txt_flat + news_txt_flat))\n news_array = []\n song_array = []\n for aw in all_words:\n if aw in news_tf.keys():\n news_array.append(news_tf[aw])\n else:\n news_array.append(0)\n if aw in song_tf.keys():\n song_array.append(song_tf[aw])\n else:\n song_array.append(0)\n\n norm_news = norm(np.asarray(news_array))\n norm_song = norm(np.asarray(song_array))\n\n cosine = np.dot(news_array, song_array) / (norm_news * norm_song)\n cosine = round(cosine, 3)\n cosine_dict[news_yr].append(cosine)\n\n return cosine_dict", "def _cosine_similarity_update(preds: Tensor, target: Tensor) ->Tuple[Tensor, Tensor]:\n _check_same_shape(preds, target)\n preds = preds.float()\n target = target.float()\n return preds, target", "def evaluate_similarity(kv: KeyedVectors, X, y):\n mean_vector = np.mean(kv.vectors, axis=0, keepdims=True)\n missing_words = np.sum(np.isin(X, kv.index2word, invert=True))\n if missing_words > 0:\n logging.warning(\"Missing {} words. Will replace them with mean vector\".format(missing_words))\n get = np.vectorize(gensim_helper.get_vector, signature='(),(),(m)->(m)')\n timer = mytimer.Timer(\"getting vectors for words\")\n wv_x = get(X, kv, mean_vector)\n timer.stop()\n a = wv_x[:, 0]\n b = wv_x[:, 1]\n # timer = mytimer.Timer()\n # a = np_helper.normalize_over_cols_2d(a)\n # b = np_helper.normalize_over_cols_2d(b)\n # scores = np.diag(np.matmul(a, b.T))\n # timer.stop()\n # print(scores.shape)\n #\n # A = np.vstack(kv.get(word, mean_vector) for word in X[:, 0])\n # B = np.vstack(kv.get(word, mean_vector) for word in X[:, 1])\n timer = mytimer.Timer()\n scores = np.array([v1.dot(v2.T) / (np.linalg.norm(v1) * np.linalg.norm(v2)) for v1, v2 in zip(a, b)])\n timer.stop()\n # print(scores.shape)\n return scipy.stats.spearmanr(scores, y)", "def cosine_dist(d1, d2):\n suma=0\n for x in d1:\n if x in d2:\n suma+=(d1[x]*d2[x])\n sqrt1=0\n sqrt2=0\n for i in d1:\n sqrt1+=math.pow(d1[i],2)\n for i in d2:\n sqrt2+=math.pow(d2[i],2)\n return 1-suma/(math.sqrt(sqrt1)*math.sqrt(sqrt2))", "def cosine_distance(x1, x2):\n x1 = tf.cast(x1, dtype=tf.float32)\n x2 = tf.cast(x2, dtype=tf.float32)\n\n # dot product between rows of `x_1` and rows of `x_2`\n # \"ij,ij->i\" := output[i] = sum_j x1[i, j] * x2[i, j]\n cos_thetas = tf.linalg.einsum(\"ij,ij->i\", x1, x2)\n cos_distances = 1 - cos_thetas\n\n # deal with numerical inaccuracies setting small negatives to zero\n cos_distances = tf.maximum(cos_distances, 0.0)\n\n return cos_distances", "def test_cosine_similarity_compiled():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity(vector1, vector1)\n score12 = cosine_similarity(vector1, vector2)\n score22 = cosine_similarity(vector2, vector2)\n\n assert score12 == 2 / np.sqrt(2 * 4), \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"", "def idf_modified_cosine(x, y, idf):\n result = 0\n try:\n tf_x = [dict([word, int(tf)] for word, tf in dict(\n np.array(np.unique(x, return_counts=True)).T).items())][0]\n tf_y = [dict([word, int(tf)] for word, tf in dict(\n np.array(np.unique(y, return_counts=True)).T).items())][0]\n result = sum([tf_x[w] * tf_y[w] * (idf[w]**2)\n\t\t for w in tf_x.keys() & tf_y.keys()]) / ((\n sum([(tf_x[w] * idf[w])**2\n for w in tf_x.keys()])**0.5) * (\n sum([(tf_y[w] * idf[w])**2\n for w in tf_y.keys()])**0.5))\n except:\n print(r'x:', x, r'y:', y)\n pass\n return result", "def cosineSimilarity(dict1,dict2):\n product1 = 0.0\n product2 = 0.0\n for key in dict1.keys():\n product1 += (dict1[key] * dict1[key])\n for key in dict2.keys():\n product2 += (dict2[key] * dict2[key])\n product1 = math.sqrt(product1)\n product2 = math.sqrt(product2)\n fenmu = product1 * product2\n fenzi = 0.0\n for key in dict1.keys():\n if key in dict2:\n fenzi += (dict1[key] * dict2[key])\n cosSim = fenzi / fenmu\n return cosSim", "def compute_cosine_sim(vec1, vec2):\r\n\r\n vec1 = np.array(vec1)\r\n vec2 = np.array(vec2)\r\n return np.dot(vec1, vec2)/(norm(vec1) * norm(vec2))", "def similarity(query,word_dict,dictionary,number_of_docs,id):\n similarity = 0.0\n scalar_leng = 0.0\n for term in query:\n if term in dictionary:\n similarity += word_dict[term][1]*imp(term,word_dict,number_of_docs,id)\n\n for term in dictionary:\n scalar_leng += imp(term, word_dict, number_of_docs, id) ** 2\n\n final_scalar_leng = math.sqrt(scalar_leng)\n similarity = similarity / final_scalar_leng\n #print(similarity)\n return similarity", "def _cosine_matrix(self, x1, x2):\n # expand h1 shape to (batch_size, x1_timesteps, 1, embedding_size)\n x1 = K.expand_dims(x1, axis=2)\n # expand x2 shape to (batch_size, 1, x2_timesteps, embedding_size)\n x2 = K.expand_dims(x2, axis=1)\n # cosine matrix (batch_size, h1_timesteps, h2_timesteps)\n cos_matrix = self._cosine_similarity(x1, x2)\n return cos_matrix", "def cosine_sim(im, s):\n return im.mm(s.t()) #image.mm(sentence.t()) & mm() Performs a matrix multiplication of the matrices ", "def test_cosine_similarity():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = cosine_similarity.py_func(vector1, vector1)\n score12 = cosine_similarity.py_func(vector1, vector2)\n score22 = cosine_similarity.py_func(vector2, vector2)\n\n assert score12 == 2 / np.sqrt(2 * 4), \"Expected different score.\"\n assert score11 == score22 == 1.0, \"Expected different score.\"", "def cosine_sim(a: np.ndarray, \n b: np.ndarray \n ) -> float:\n return (\n 1 + a.dot(b) / \n (np.linalg.norm(a)*np.linalg.norm(b))\n ) / 2", "def cosine_similarity(u, v):\n\n distance = 0.0\n\n ### START CODE HERE ###\n # Compute the dot product between u and v (≈1 line)\n dot = np.dot(u, v)\n # Compute the L2 norm of u (≈1 line)\n norm_u = np.sqrt(np.dot(u, u))\n\n # Compute the L2 norm of v (≈1 line)\n norm_v = np.sqrt(np.dot(v, v)) ##np.linalg.norm(u)\n # Compute the cosine similarity defined by formula (1) (≈1 line)\n cosine_similarity = dot / (norm_u * norm_v)\n ### END CODE HERE ###\n\n return cosine_similarity", "def _do_action_calculate_similarity_cosine_express(self):\n self._run_express_job(\"com.directv.recommend.express.CosineCFTrainer\")\n self._scan_table(\"content/item_item_similarities\")", "def word_analogy(self):\n data = open(\"data/word_analogy_subset.en.ar.txt\").read().split('\\n')\n data = [x for x in data if len(x.split()) == 4]\n cnt = 0\n keys = list(self.embeddings_index.keys())\n vectors = np.array(list(self.embeddings_index.values()))\n norms = np.linalg.norm(vectors, axis=1)\n for i in data:\n i = self.preprocessor(i).split()\n try:\n v = self.embeddings_index[i[0]] - self.embeddings_index[i[1]] + self.embeddings_index[i[2]]\n except:\n continue\n unit = v / np.linalg.norm(v)\n dists = np.dot(vectors, unit) / norms\n best = np.argpartition(-dists, 10)[:10 + 1]\n best = best.take(np.argsort((-dists).take(best)))\n result = [(keys[sim], float(dists[sim]))\n for sim in best]\n sbv = result[:10]\n for j in sbv:\n if j[0] == i[3]:\n cnt += 1\n return cnt/ len(data)", "def score(self, sentence):\n score = 0.0\n V = len(self.f1) # vocabulary size\n for token in sentence:\n if token in self.f1: score += self.f1[token]\n else: score -= math.log10(self.total + V)\t\t # OOV \n return score", "def total_char_similarity(a,b):\n\ta_words, b_words = map(norm.set_clean_tokens, [a,b])\n\n\ttotal_score = 0\n\tfor ai in a_words:\n\t\tfor bi in b_words:\n\t\t\ttotal_score += similar(ai, bi)\n\treturn total_score", "def word_rotator_similarity(x, y):\n return 1 - word_rotator_distance(x, y)", "def similarity(self, wSet1, wSet2, idf): \n if len(wSet1) == 0 or len(wSet2) == 0:\n return 0.0\n else:\n defaultIDF = idf['unknownToken']\n intersection = wSet1.intersection(wSet2)\n# intersection = self.synonymIntersection(wSet1, wSet2, idf)\n if len(intersection) == 0:\n return 0\n sum1 = 0\n sum2 = 0\n intersectionSum = 0\n for word in wSet1:\n sum1 += (idf.get(word, defaultIDF))**2\n for word in wSet2:\n sum2 += (idf.get(word, defaultIDF))**2\n for word in intersection:\n intersectionSum += (idf.get(word, defaultIDF))**2\n \n if sum1 == 0 or sum2 == 0:\n return 0.0\n else:\n return intersectionSum/(math.sqrt(sum1) * math.sqrt(sum2))", "def cosine(X,Y=None,dense_output=True):\n #Reemplace NaN with Zero\n X[np.isnan(X)] = 0\n if Y is not None:\n Y[np.isnan(Y)] = 0\n \n return cosine_similarity(X,Y,dense_output)", "def word_order_similarity(self,sentence_1, sentence_2):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = list(set(words_1).union(set(words_2)))\n\t windex = {x[1]: x[0] for x in enumerate(joint_words)}\n\t r1 = self.word_order_vector(words_1, joint_words, windex)\n\t r2 = self.word_order_vector(words_2, joint_words, windex)\n\t return 1.0 - (np.linalg.norm(r1 - r2) / np.linalg.norm(r1 + r2))", "def dist_cosine(src, dest, qgraml=None):\n\treturn 1 - sim_cosine(src, dest, qgraml)", "def cosineDistanceMatrix():\n\n\tmatrix = movieMatrix()\n\tsimilarity = np.dot(matrix, matrix.T)\n\tsquareMag = np.diag(similarity)\n\tinvSquareMag = 1/squareMag\n\tinvSquareMag[np.isinf(invSquareMag)]=0\n\tinvMag = np.sqrt(invSquareMag)\n\tcosine = similarity * invMag\n\tcosine = cosine.T * invMag\n\treturn cosine", "def embedding_similarity(model, validation_pairs):\n scores = dict()\n for pair in validation_pairs:\n author1 = pair[0]\n author2 = pair[1]\n scores[author1 + ' ' +\n author2] = cosine_similarity(model.wv[author1], model.wv[author2])\n return scores", "def similarity_vec(self, vec1: numpy.ndarray, vec2: numpy.ndarray, metric='cosine') -> float:\n if numpy.count_nonzero(vec1) == 0 or numpy.count_nonzero(vec2) == 0:\n if metric == 'cosine':\n return 0.\n else:\n return 0.\n\n vec1 = vec1.reshape((1, -1))\n vec2 = vec2.reshape((1, -1))\n if metric == 'cosine':\n return (1 - distance.cdist(vec1, vec2, metric=metric).reshape(-1))[0]\n else:\n return distance.cdist(vec1, vec2, metric=metric).reshape(-1)[0]", "def similarity(query,id):\n similarity = 0.0\n for term in query:\n if term in dictionary:\n similarity += inverse_document_frequency(term)*imp(term,id)\n similarity = similarity / length[id]\n return similarity", "def getSemanticSimilarityScore(corpus:List[str]):\n\n model_ans = corpus[0]\n model_ans_embedding = model.encode(model_ans, convert_to_tensor=True)\n\n student_responses = corpus[1:]\n semantic_score = []\n\n for student_res in student_responses:\n student_res_embedding = model.encode(student_res, convert_to_tensor=True)\n\n #Compute semantic cosine-similarity\n cosine_score = util.pytorch_cos_sim(model_ans_embedding, student_res_embedding) # cosine_score is a 2D tensor object\n semantic_score.append(cosine_score.item())\n return semantic_score", "def find_nn_cos(self, v, Wv, nnk=10):\n Wv = Wv[:self.dataClass.vocab.size,:]\n dot_products = np.dot(Wv, v)\n l2norm_products = np.multiply(np.linalg.norm(Wv, axis=1),np.linalg.norm(v))\n cos_sim = np.divide(dot_products,l2norm_products)\n \n nns_idx = np.argsort(cos_sim)[-nnk:][::-1]\n \n similarities = np.take(cos_sim, nns_idx)\n nns_words = self.dataClass.vocab.ids_to_words(nns_idx)\n\n return nns_idx, nns_words, similarities" ]
[ "0.8221675", "0.7763575", "0.74975723", "0.7489508", "0.7445475", "0.7369543", "0.73244905", "0.728064", "0.7235221", "0.7210119", "0.7207516", "0.7196803", "0.71352804", "0.7082981", "0.7055071", "0.70352226", "0.69762695", "0.6966904", "0.69279665", "0.6920698", "0.69188845", "0.6914976", "0.691342", "0.68816966", "0.68555546", "0.6767521", "0.67609054", "0.67590904", "0.67478514", "0.6725601", "0.67243886", "0.67234236", "0.6721793", "0.6689478", "0.6687348", "0.6666908", "0.66511875", "0.6618218", "0.6608107", "0.66076314", "0.6594659", "0.6571447", "0.65589863", "0.65543824", "0.6548737", "0.65466386", "0.6536999", "0.6519954", "0.64639467", "0.64544034", "0.6451373", "0.64455837", "0.6436026", "0.64315027", "0.64235336", "0.64161366", "0.64112216", "0.63939875", "0.63870686", "0.6386919", "0.63868403", "0.6383936", "0.6367464", "0.63614416", "0.6346451", "0.63369346", "0.63361514", "0.63253963", "0.6312889", "0.6310572", "0.629859", "0.6294341", "0.6282764", "0.62804294", "0.6268411", "0.626184", "0.62356097", "0.6230095", "0.621914", "0.6217071", "0.62166286", "0.619805", "0.61838853", "0.61744964", "0.6155539", "0.615468", "0.6152895", "0.6150306", "0.614859", "0.6147206", "0.61458975", "0.6142963", "0.61280733", "0.6114867", "0.61130846", "0.6105478", "0.608348", "0.6074837", "0.60463375", "0.6041189", "0.6039977" ]
0.0
-1
Compute the euclidean distance between each word in the vocab and each word in the source.
def _pairwise_distance(self, src_embeds, vocab_embeds, squared=False): # compute square norm to avoid compute all the directions vocab_sq_norm = vocab_embeds.norm(p=2, dim=-1) ** 2 src_sq_norm = src_embeds.norm(p=2, dim=-1) ** 2 # dot product dot_product = self._pairwise_dot_product(src_embeds, vocab_embeds) # reshape for broadcasting vocab_sq_norm = vocab_sq_norm.unsqueeze(0).unsqueeze(0) # 1, 1, vocab size src_sq_norm = src_sq_norm.unsqueeze(2) # batch, seq length, 1 # compute squared difference sq_norm = vocab_sq_norm + src_sq_norm - 2 * dot_product if squared: return sq_norm else: # relu + epsilon for numerical stability sq_norm = F.relu(sq_norm) + 1e-20 # take the square root return sq_norm.sqrt()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = dist[ii, jj] \n \n # return\n return dist", "def get_distance_metrics(source_embeddings, target_embeddings):\n cosine_avg, euclidean_avg = 0.0, 0.0\n for i in range(len(source_embeddings)):\n cosine_avg += cosine(source_embeddings[i], target_embeddings[i])\n euclidean_avg += euclidean(source_embeddings[i], target_embeddings[i])\n return (cosine_avg / len(source_embeddings)), (euclidean_avg / len(source_embeddings))", "def distance(self, word1, word2):\n\n return scipy.spatial.distance.cosine(self.vectors.get(word1), self.vectors.get(word2))", "def by_distance_vectors(self, string_1, string_2):\n string_1 = self.kywrds.by_frequency(string_1)\n string_2 = self.kywrds.by_frequency(string_2)\n model = self.doc2vec_model[0]\n doc_vec_1 = model.infer_vector(string_1)\n doc_vec_2 = model.infer_vector(string_2)\n return spatial.distance.cosine(doc_vec_1, doc_vec_2)", "def diff(self, word1, word2):\n v = self._vecs[self._index[word1]] - self._vecs[self._index[word2]]\n return v / np.linalg.norm(v)", "def euclidean_distance(x1: np.ndarray, x2: np.ndarray) -> float:\n return np.sqrt(np.square(x1 - x2).sum())", "def euclidean_distance(s1,s2): \n tmpsum = 0\n \n for index,value in enumerate(s1):\n tmpsum += (s1[index]-s2[index])**2\n \n return math.sqrt(tmpsum)", "def wordMoversDistance(model, document1, document2):\n # If pyemd C extension is available, import it.\n # If pyemd is attempted to be used, but isn't installed, ImportError will be raised in wmdistance\n from pyemd import emd\n # Remove out-of-vocabulary words.\n len_pre_oov1 = len(document1)\n len_pre_oov2 = len(document2)\n document1 = [token for token in document1 if token in model]\n document2 = [token for token in document2 if token in model]\n diff1 = len_pre_oov1 - len(document1)\n diff2 = len_pre_oov2 - len(document2)\n if diff1 > 0 or diff2 > 0:\n print('Remove ' + str(diff1) + ' and ' + str(diff2) + ' OOV words from document 1 and 2 ('\n 'respectively).')\n return float('inf')\n\n if not document1 or not document2:\n print(\"At least one of the documents had no words that were in the vocabulary. Aborting (returning \"\n \"inf).\")\n return float('inf')\n\n dictionary = Dictionary(documents=[document1, document2])\n vocab_len = len(dictionary)\n\n if vocab_len == 1:\n # Both documents are composed by a single unique token\n return 0.0\n\n # Sets for faster look-up.\n docset1 = set(document1)\n docset2 = set(document2)\n\n # Compute distance matrix.\n distance_matrix = zeros((vocab_len, vocab_len), dtype=double)\n for i, t1 in dictionary.items():\n if t1 not in docset1:\n continue\n\n for j, t2 in dictionary.items():\n if t2 not in docset2 or distance_matrix[i, j] != 0.0:\n continue\n\n # Compute Euclidean distance between word vectors.\n distance_matrix[i, j] = distance_matrix[j, i] = sqrt(np_sum((model[t1] - model[t2]) ** 2))\n\n if np_sum(distance_matrix) == 0.0:\n # `emd` gets stuck if the distance matrix contains only zeros.\n print('The distance matrix is all zeros. Aborting (returning inf).')\n return float('inf')\n\n def nbow(document):\n d = zeros(vocab_len, dtype=double)\n nbow = dictionary.doc2bow(document) # Word frequencies.\n doc_len = len(document)\n for idx, freq in nbow:\n d[idx] = freq / float(doc_len) # Normalized word frequencies.\n return d\n\n # Compute nBOW representation of documents.\n d1 = nbow(document1)\n d2 = nbow(document2)\n\n # Compute WMD.\n return emd(d1, d2, distance_matrix)", "def euclidean(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not structurally equal.'\n\n\tdistance = np.sum( np.sqrt( ( x.flat_cpt() - y.flat_cpt() )**2 ) )\n\treturn distance", "def euclidean_distance(x, y):\n return sqrt(sum(pow(a - b, 2) for a, b in zip(x, y)))", "def euclidean_metric(x, y):\n if len(x) != len(y):\n raise ValueError(\"Incompatible dimensions.\")\n return np.linalg.norm(x - y)\n \n # Or a slightly longer way:\n return np.sqrt(np.sum(np.subtract(x, y)**2))\n # Or the longest/worst way:\n total = 0\n for i in xrange(len(x)):\n term = x[i] - y[i]\n term = term**2\n total += term\n total = np.sqrt(total)\n return total", "def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.square(np.subtract(x1, x2))))", "def word_similarity(self):\n y_true = []\n y_pred = []\n for i in open(\"data/word_sim_dataset.txt\").read().split('\\n'):\n i = self.preprocessor(i)\n w1 = i.split()[-1]\n w2 = i.split()[-2] \n st = float(i.split()[-3]) / 4 #dataset has scale from 0 to 4\n \n try:\n w1 = self.embeddings_index[w1] \n w2 = self.embeddings_index[w2] \n w1 = w1 / np.linalg.norm(w1)\n w2 = w2 / np.linalg.norm(w2)\n y_pred.append(np.dot(w1,w2))\n y_true.append(st)\n except:\n pass\n if y_true == []:\n return 1.0\n return mean_squared_error(y_true, y_pred, squared=False)", "def _distance_from_weights(self, data):\n input_data = array(data)\n weights_flat = self._weights.reshape(-1, self._weights.shape[2])\n input_data_sq = power(input_data, 2).sum(axis=1, keepdims=True)\n weights_flat_sq = power(weights_flat, 2).sum(axis=1, keepdims=True)\n cross_term = dot(input_data, weights_flat.T)\n return sqrt(-2 * cross_term + input_data_sq + weights_flat_sq.T)", "def sentence_distance(sentence_a, sentence_b):\n \n sent_a = np.sum([projections[word_ids.get(word, 0)] \n if word in word_ids else [0] \n for word in sentence_a+bigrams(sentence_a)+trigrams(sentence_a)], axis=0)\n sent_b = np.sum([projections[word_ids.get(word, 0)] \n if word in word_ids else [0] \n for word in sentence_b+bigrams(sentence_b)+trigrams(sentence_b)], axis=0)\n \n \n return float(cosine(sent_a, sent_b))", "def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n\n distance = np.linalg.norm(x - y)\n\n return distance", "def compute_euclidean_dist(vec1, vec2):\r\n assert len(vec1) == len(vec2)\r\n vec1 = np.array(vec1)\r\n vec2 = np.array(vec2)\r\n return np.sqrt(np.sum(np.square(vec2 - vec1)))", "def distance(dest_words, page_words):\n dest_hist = histogram(dest_words)\n page_hist = histogram(page_words)\n\n\n # positive difference means the word appears more on the destination\n difference_hist = {}\n for word in dest_hist:\n difference_hist[word] = dest_hist[word] - page_hist.get(word, 0.0)\n\n dist = 0.0\n for word in difference_hist:\n dist += abs(difference_hist[word])\n return dist", "def word_analogy(self):\n data = open(\"data/word_analogy_subset.en.ar.txt\").read().split('\\n')\n data = [x for x in data if len(x.split()) == 4]\n cnt = 0\n keys = list(self.embeddings_index.keys())\n vectors = np.array(list(self.embeddings_index.values()))\n norms = np.linalg.norm(vectors, axis=1)\n for i in data:\n i = self.preprocessor(i).split()\n try:\n v = self.embeddings_index[i[0]] - self.embeddings_index[i[1]] + self.embeddings_index[i[2]]\n except:\n continue\n unit = v / np.linalg.norm(v)\n dists = np.dot(vectors, unit) / norms\n best = np.argpartition(-dists, 10)[:10 + 1]\n best = best.take(np.argsort((-dists).take(best)))\n result = [(keys[sim], float(dists[sim]))\n for sim in best]\n sbv = result[:10]\n for j in sbv:\n if j[0] == i[3]:\n cnt += 1\n return cnt/ len(data)", "def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n distance_vector: np.ndarray = x - y\n distance = compute_norm(distance_vector)\n return distance", "def euclidean_distance(vec1, vec2):\n return numpy.linalg.norm(vec1 - vec2)", "def test_distances(self):\n distances = self.vectors.distances('dog.n.01', ['mammal.n.01', 'dog.n.01'])\n self.assertTrue(np.allclose(distances, [4.5278745, 0]))\n\n distances = self.vectors.distances('dog.n.01')\n self.assertEqual(len(distances), len(self.vectors.vocab))\n self.assertTrue(np.allclose(distances[-1], 10.04756))", "def euclidean_distance(x1, x2):\n return (x2[0] - x1[0])**2 + (x2[1] - x1[1])**2", "def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.power(x1 - x2, 2)))", "def calcEuclideanDistance(d1, d2):\n #initiate empty list\n result = []\n #for each index in the list, each position in both list minus each other\n #and to the power of two. Add this in the result list\n for idx in range(len(d1)):\n result.append((d1[idx]-d2[idx])**2)\n\n #Return the square of the sum of all values in the result list\n return math.sqrt(sum(result))", "def hellinger_distance(doca, docb, axis=1):\n return np.sum((doca**.5 - docb**.5)**2, axis=axis)", "def EuclideanDistanceSq( self, a, b ):\n if not (type(a) == list or type(a) == Vector):\n a = [a]\n if not (type(b) == list or type(a) == Vector):\n b = [b]\n assert len(a) == len(b)\n sqDist = 0\n for x,y in zip(a,b):\n sqDist += (x-y)**2\n return sqDist", "def _distorted_distance(self):\n distance = 0\n for i, pixel in enumerate(self.training_set):\n distance += self._euclid_distance(\n pixel, self.clusters[self.labels[i]], axis=0)\n return distance", "def euclidean_distance(x1, x2):\n\tdistance = 0\n\t# Squared distance between each coordinate\n\tfor i in range(len(x1)):\n\t\tdistance += pow((x1[i], x2[i]), 2)\n\treturn math.sqrt(distance)", "def euclidean_distance(a, b):\n return np.linalg.norm(a - b)", "def euclidean_distance(v, w):\n\n # Guard against empty lists.\n if len(v) is 0:\n return 0\n\n # Note that this is the same as vector subtraction.\n differences = [v[idx] - w[idx] for idx in range(len(v))]\n squares = [diff ** 2 for diff in differences]\n sum_of_squares = sum(squares)\n\n return 1 / (1 + math.sqrt(sum_of_squares))", "def euclidean_distance(x, y):\n distance = 0\n for i, j in zip(x, y):\n distance += (i - j) ** 2\n return math.sqrt(distance)", "def euclidean_dist(self, example1, example2, length):\n dist = 0\n for i in xrange(example1.size - 1):\n nominal = isinstance(self.attributes[i], NominalAttribute)\n if not nominal:\n dist += (example1[i] - example2[i])**2\n elif nominal and (example2[i] != example1[i]):\n dist += 1\n return sqrt(dist)", "def euclidean_distance(a, b, axis=1):\n return np.sum((a-b)**2, axis=axis)**.5\n #NOTE: the below be preferred for \"big\" comparisons in dim 1 of b\n #return np.apply_along_axis(np.linalg.norm, axis, doca-docb)", "def get_distance(self, vec):\r\n\r\n sum = 0\r\n if len(self.weights) == len(vec):\r\n for i in range(len(vec)):\r\n sum += (self.weights[i] - vec[i]) * (self.weights[i] - vec[i])\r\n return np.sqrt(sum)\r\n else:\r\n sys.exit(\"Error: dimension of nodes != input data dimension!\")", "def euclideanDistance(data1, data2):\n distance = 0\n for x in range(14):\n data1[x] = truncate(data1[x], 3)\n data2[x] = truncate(data2[x], 3)\n dist = truncate((data1[x] - data2[x]) ** 2, 3)\n distance = truncate(distance + dist, 3)\n\n # Final Euclidean distance between train poing and test point:\n distance = truncate(np.sqrt(distance), 3)\n return distance", "def embedding_distance(embedding_1: Embedding,\n embedding_2: Embedding,\n distance_metric: DistanceMetric) -> float:\n distance = embedding_distance_bulk(embedding_1.reshape(\n 1, -1), embedding_2.reshape(1, -1), distance_metric=distance_metric)[0]\n return distance", "def _vector_dist(self, vec1, vec2):\r\n return sqrt(sum([(float(v1) - float(v2)) ** 2 for v1, v2 in\r\n zip(vec1, vec2)]))", "def distance(self, word_a, word_b):\n word_a, word_b = word_a.upper(), word_b.upper()\n s_a = self.word_lookup[word_a]\n s_b = self.word_lookup[word_b]\n j = 1\n max_len = min(len(s_a), len(s_b))\n while j <= max_len:\n if s_a[-j] != s_b[-j]:\n break\n j += 1\n return j", "def euclidean_distance(vector_x, vector_y):\n if len(vector_x) != len(vector_y):\n raise Exception('Vectors must be same dimensions')\n return math.sqrt(sum((vector_x[dim] - vector_y[dim]) ** 2 for dim in range(len(vector_x))))", "def euclidean_distance(a, b):\n return sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)", "def euclideanDistance(a, b):\n vec = [pow(a[i] - b[i], 2) for i in range(len(a)) if None not in [a[i],b[i]]]\n return (sum(vec) / len(vec)) if len(vec) > 0 else NaN", "def euclidean_distance(a: Tuple[float, ...], b: Tuple[float, ...]) -> float:\n assert len(a) == len(b)\n return sqrt(sum(pow(x[0] - x[1], 2) for x in zip(a, b)))", "def wordSimilarityRatio(sent_1,sent_2):", "def EuclideanDistance( self, a, b ):\n return sqrt( self.EuclideanDistanceSq(a,b) )", "def euclidian_distance(x: np.arrays, y: np.arrays):\r\n diff = x - np.mean(y, axis=0)\r\n return np.sqrt(np.dot(diff.T, diff))", "def similarity(self, word1: str, word2: str, metric='cosine') -> float:\n if 0 == self.word2idx.get(word1, 0) or 0 == self.word2idx.get(word2, 0):\n return 0.\n\n return self.similarity_vec(self[word1], self[word2], metric=metric)\n # vec1 = self.__getitem__(word1).reshape((1, -1))\n # vec2 = self.__getitem__(word2).reshape((1, -1))\n # return 1 - distance.cdist(vec1, vec2, metric=metric).reshape(-1)", "def euclidean_dist(X, y):\n return np.sqrt(np.sum((X - y) ** 2, 1)) # broadcasted calculations", "def pairwise_distances(embeddings, squared=False):\n dot_product = tf.matmul(embeddings, tf.transpose(embeddings))\n square_norm = tf.diag_part(dot_product)\n\n # ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2\n # shape (batch_size, batch_size)\n distances = tf.expand_dims(square_norm, 1) - 2.0 * \\\n dot_product + tf.expand_dims(square_norm, 0)\n\n distances = tf.maximum(distances, 0.0)\n\n if not squared:\n mask = tf.to_float(tf.equal(distances, 0.0))\n distances = distances + mask * 1e-16\n distances = tf.sqrt(distances)\n distances = distances * (1.0 - mask)\n\n return distances", "def euclidean_distances(X, Y):\r\n\r\n D = np.zeros((X.shape[0],Y.shape[0]))\r\n \r\n for X_idx in range(X.shape[0]):\r\n for Y_idx in range(Y.shape[0]): \r\n \r\n D[X_idx,Y_idx] = np.sqrt(np.sum((X[X_idx,:]-Y[Y_idx,:])**2))\r\n \r\n return D", "def euclideanDistance(loc1, loc2):\n return math.sqrt(sum([(a - b) ** 2 for a, b in zip(loc1, loc2)]))", "def compute_one_tfidf_distance(arg):\n all_terms = arg.global_state['all_terms']\n idf_map = arg.global_state['idf_map']\n\n vec_a = get_tf_idf_vector(arg.tags_a, all_terms, idf_map)\n vec_b = get_tf_idf_vector(arg.tags_b, all_terms, idf_map)\n # Cosine of angle between term vectors.\n dot_product = sum(x * y for x, y in zip(vec_a, vec_b))\n norms_product = (math.sqrt(sum(x*x for x in vec_a)) *\n math.sqrt(sum(y*y for y in vec_b)))\n return (arg.shred_a_id, arg.shred_b_id, 1 - dot_product / norms_product)", "def calculate_cosine_dist(main_text, new_text):\n wordbag = set(\" \".join([main_text, new_text]).split(\" \"))\n dot_prod = 0\n main_text = main_text.split(\" \")\n new_text = new_text.split(\" \")\n\n for word in wordbag:\n if word in main_text and word in new_text:\n # only worth looking at if word is in both. Otherwise dot prod = 0\n count_A = sum(np.array(main_text) == word)\n count_B = sum(np.array(new_text) == word)\n dot_prod += count_A * count_B\n\n return float(dot_prod) / (len(main_text) * len(new_text))", "def euclidean_distance(arr1,arr2):\n distance = np.sqrt(np.sum((arr1 - arr2)**2))\n return distance", "def euclidean(x, y):\n ed = np.sqrt(np.sum((x-y)**2))\n # print ed\n return ed", "def calc_distance(sentence_1, sentence_2):\n print(sentence_1)\n print(sentence_2)\n # sentence_1 = sentence_1.replace(\"'\", \"\")\n # sentence_2 = sentence_2.replace(\"'\", \"\")\n sentence_1 = sentence_1.replace(\",\", \"\")\n sentence_2 = sentence_2.replace(\",\", \"\")\n sentence_1 = sentence_1.replace(\";\", \"\")\n sentence_2 = sentence_2.replace(\";\", \"\")\n print(sentence_1)\n print(sentence_2)\n sentence_1 = sentence_1.lower().split()\n sentence_2 = sentence_2.lower().split()\n sentence_1 = [w for w in sentence_1 if w not in stop_words]\n sentence_2 = [w for w in sentence_2 if w not in stop_words]\n return wordMoversDistance(trained_model, sentence_1, sentence_2)", "def cosine_similarity(vec_x, vec_y):\n sim_prod = 0.0\n len_x = 0\n len_y = 0\n\n for ngram in vec_x:\n len_x += vec_x[ngram] ** 2\n\n for ngram in vec_y:\n len_y += vec_y[ngram] ** 2\n\n len_x = math.sqrt(len_x)\n len_y = math.sqrt(len_y)\n\n for ngram in vec_x:\n if ngram in vec_y:\n sim_prod += vec_x[ngram] * vec_y[ngram]\n\n return sim_prod / (len_x * len_y)", "def wer(self, s1, s2):\n\n # build mapping of words to integers\n b = set(s1.split() + s2.split())\n word2char = dict(zip(b, range(len(b))))\n\n # map the words to a char array (Levenshtein packages only accepts\n # strings)\n w1 = [chr(word2char[w]) for w in s1.split()]\n w2 = [chr(word2char[w]) for w in s2.split()]\n\n return Lev.distance(''.join(w1), ''.join(w2))", "def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))", "def euclidean_distance(p1, p2):\n distance = 0\n for i in range(len(p1)-1):\n distance += (p1[i]-p2[i])**(2)\n return sqrt(distance)", "def calc_distances_in_embedding(cluster, embedding, reference_name=None):\n\n if reference_name is None:\n reference_name = get_central_pc_name(embedding, cluster)\n\n assert reference_name in cluster\n\n ref_coords = embedding.loc[reference_name]\n w = embedding.copy()\n w = w-ref_coords\n return w.apply(eucl_norm, axis=1)", "def pairwise_euclidean_distance(x, y):\n m, n = x.size(0), y.size(0)\n dist_mat = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) + \\\n torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() \\\n - 2 * torch.matmul(x, y.t())\n # for numerical stability\n dist_mat = dist_mat.clamp(min=1e-12).sqrt()\n return dist_mat", "def total_char_similarity(a,b):\n\ta_words, b_words = map(norm.set_clean_tokens, [a,b])\n\n\ttotal_score = 0\n\tfor ai in a_words:\n\t\tfor bi in b_words:\n\t\t\ttotal_score += similar(ai, bi)\n\treturn total_score", "def euclidean_distance(vec):\n\n x, y = vec\n distance = tf.math.sqrt(tf.math.reduce_sum(tf.math.square(x - y), axis=-1, keepdims=True))\n return distance", "def _dist(x, a, w):\n m_xa = 0\n for k in range(len(x)):\n m_xa += (x[k] - a[k])**2 * w[k]\n return m_xa", "def euclidean_distance(list1, list2):\n # Make sure we're working with lists\n # Sorry, no other iterables are permitted\n assert isinstance(list1, list)\n assert isinstance(list2, list)\n\n dist = 0\n\n # 'zip' is a Python builtin, documented at\n # <http://www.python.org/doc/lib/built-in-funcs.html>\n for item1, item2 in zip(list1, list2):\n dist += (item2 - item1)**2\n return math.sqrt(dist)", "def dist(self, a, b, l):\n # works for non-arrays\n return sum( ((i-j)/k)**2 for i,j,k in zip(a, b, l) )", "def _calc_distance(self, X):\n distances = np.zeros((X.shape[0], self.n_clusters))\n print(distances.shape)\n for i, centroid in enumerate(self.centroids):\n distances[:, i] = np.linalg.norm(X - centroid, axis=1)\n return distances", "def test_distance(self):\n for emb_vals, point, dist_gt in self.DISTANCE_EXAMPLES:\n print(emb_vals, point, dist_gt)\n emb = to_emb(emb_vals)\n dist = emb.distance(point)\n assert np.allclose(dist, dist_gt), \\\n (\"Wrong distance for point {}: expected {} but was {};\"\n \"\\nembedding:\\n{}\").format(point, dist_gt, dist, str(emb))", "def closest_words(self, word, n):\n\n vector = self.get_vector(word)\n\n if vector is None:\n return None\n\n distances = [\n (w, torch.dist(vector, self.get_vector(w)).item())\n for w in self.glove.itos\n ]\n\n return [w for w, v in sorted(distances, key=lambda w: w[1])[:n]]", "def euclidean(x, y):\n return np.sqrt(np.sum((x - y) ** 2))", "def _levenshtein_distance(t1: Trace, t2: Trace):\n if t1.length > t2.length:\n t1, t2 = t2, t1\n\n distances = range(t1.length + 1)\n for i2, c2 in enumerate(t2.event_list):\n distances_ = [i2 + 1]\n for i1, c1 in enumerate(t1.event_list):\n if c1 == c2:\n distances_.append(distances[i1])\n else:\n distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))\n distances = distances_\n return distances[-1]", "def distance(v, w):\n\treturn magnitude(vector_subtract(v, w))", "def euclidean_dist(self):\r\n\r\n real_cat, synth_cat = self.to_cat(self.origdst, self.synthdst)\r\n\r\n real_cat_dem = self.get_demographics(real_cat)\r\n synth_cat_dem = self.get_demographics(synth_cat)\r\n\r\n corr_real_obj = associations(real_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n corr_synth_obj = associations(synth_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n\r\n corr_real = corr_real_obj['corr']\r\n corr_rand = corr_synth_obj['corr']\r\n\r\n eucl_matr = distance.cdist(corr_real, corr_rand, 'euclidean')\r\n\r\n eucl = LA.norm(eucl_matr)\r\n\r\n return eucl, eucl_matr", "def test_distances_with_vector_input(self):\n input_vector = self.vectors['dog.n.01']\n distances = self.vectors.distances(input_vector, ['mammal.n.01', 'dog.n.01'])\n self.assertTrue(np.allclose(distances, [4.5278745, 0]))\n\n distances = self.vectors.distances(input_vector)\n self.assertEqual(len(distances), len(self.vectors.vocab))\n self.assertTrue(np.allclose(distances[-1], 10.04756))", "def distance(v: Vector, w: Vector) -> float:\n return magnitude(subtract(v, w))", "def cosine_similarity(self, source_doc, input_doc):\n vectorizer = self.vectorizer or TfidfVectorizer(tokenizer=PlagiarismDetector.tokenize_and_stem, stop_words='english')\n tfidf = vectorizer.fit_transform([source_doc, input_doc])\n return ((tfidf * tfidf.T).A)[0, 1]", "def edist(a, b):\n return euclidean(np.array(a), np.array(b))", "def cosine_distance(x1, x2, dim=1, eps=1e-8):\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return 1 - (w12 / (w1 * w2).clamp(min=eps)).squeeze()", "def euclidean(p1, p2):\n return p1.distance(p2)", "def euclidean_dist_vec(y1, x1, y2, x2):\n\n # euclid's formula\n distance = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5\n return distance", "def distance(x,y):\n return np.sqrt( np.power(np.array(x) - np.array(y), 2).sum() )", "def euclidean_squared_distance(input1, input2):\n m, n = input1.size(0), input2.size(0)\n mat1 = torch.pow(input1, 2).sum(dim=1, keepdim=True).expand(m, n)\n mat2 = torch.pow(input2, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n distmat = mat1 + mat2\n distmat.addmm_(input1, input2.t(), beta=1, alpha=-2)\n return distmat", "def test_euclidean_distance(self):\n knn = Knn(n_neighbors=3)\n knn.fit(np.array(little_X), little_Y)\n d = knn._euclidean_distance(np.array([5, 6]))\n assert (d == [5,5]).all(), \"Euclidean Distance is not correct\"", "def edit_distance(left_word: str, right_word: str) -> int:\n if len(left_word) != len(right_word):\n raise ValueError(\"Word ladder words must be same length\")\n\n distance = 0;\n for i in range(len(left_word)):\n if left_word[i] != right_word[i]:\n distance += 1\n return distance", "def get_word_containement_measure(self,l2,l1):\n count = 0\n found_idfs = []\n unfound_idfs = []\n for w in l1:\n val = self.idf.get_tfidf_val(w)\n if (val > 10):\n val = 10\n if w in l2:\n count += 1\n found_idfs.append(val)\n else:\n unfound_idfs.append(val)\n if (len(found_idfs) == 0):\n avg_found = 0\n else:\n avg_found = np.mean(found_idfs)\n if (len(unfound_idfs) ==0):\n avg_unfound = 0\n else:\n avg_unfound = np.mean(unfound_idfs)\n\n\n\n return count / self.normalize_factor, avg_found, avg_unfound", "def node_distance(self, inputs):\n tmp = 0\n for i in len(self.inputs):\n tmp += np.power(data[i] - self.weights[i], 2)\n return np.sqrt(tmp)", "def getGloveoCosineSimilarity(question1, question2):\n questions = [question1, question2]\n\n ## for the sentences we need to get the count vectors\n vec = CountVectorizer(max_features=5000, stop_words=None,binary=True)\n count_vectors = vec.fit_transform(questions)\n\n ## get the vocabulary of words from the questions\n vocab_index = vec.vocabulary_\n\n ## get the index of the words and embeddings\n index_word = {v:k for k, v in vocab_index.items()}\n\n ## get the question vectors\n question_vectors = np.zeros((count_vectors.shape[0], 300))\n\n ## iterate through count vectors for each word get the embeddings\n ## for each embedding, we will then average by the number of words\n ## this will be then used for cosine similarity\n for i in range(count_vectors.shape[0]):\n row = count_vectors[i, :].toarray()\n word_ids = np.where(row > 0)[1]\n word_counts = row[:, word_ids][0]\n numWords = np.sum(word_counts)\n\n ## if there are no words, continue\n if numWords == 0:\n continue\n\n ## initialize the word embeddings to 0\n word_embeddings = np.zeros((word_ids.shape[0], 300))\n\n ## update the word embeddings\n for j in range(word_ids.shape[0]):\n word_id = word_ids[j]\n word_embeddings[j, :] = word_counts[j] * gloveDict[index_word[word_id]]\n question_vectors[i, :] = np.sum(word_embeddings, axis=0) / numWords\n\n return(cosine_similarity(question_vectors[0], question_vectors[1])[0][0])", "def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut:\n return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2))", "def cosine_collection_distance(x1, x2):\n x1 = tf.cast(x1, dtype=tf.float32)\n x2 = tf.cast(x2, dtype=tf.float32)\n\n # dot product between rows of `x1` and columns of `x2` transpose\n cos_thetas = tf.linalg.matmul(x1, x2, transpose_b=True)\n pairwise_distances = 1 - cos_thetas\n\n # deal with numerical inaccuracies setting small negatives to zero\n pairwise_distances = tf.maximum(pairwise_distances, 0.0)\n\n return pairwise_distances", "def euclidean(x,y): \n\treturn np.sqrt(np.sum((x-y)**2))", "def distance(v, w):\n return magnitude_of_vector(vector_subtract(v, w))", "def hamming_distance(words: Iterator[str], vocabulary: Dict[str, int]):\n\n for word in words:\n distances = []\n suggestions = []\n vocab_list = list(vocabulary)\n for (i,vocab) in enumerate(vocab_list):\n if len(vocab) == len(word):\n distances.append(hamming(word, vocab))\n else:\n distances.append(120)\n \n idx = np.array(distances).argsort()[:5]\n \n for i in range(5):\n for j in range(i+1,5):\n if distances[idx[i]] == distances[idx[j]]:\n if vocabulary.get(vocab_list[idx[i]]) < vocabulary.get(vocab_list[idx[j]]):\n temp = idx[i] \n idx[i] = idx[j]\n idx[j] = temp \n\n for i in idx:\n suggestions.append(vocab_list[i])\n\n output(\"{misspelled}\\t{corrections}\".format(\n misspelled=word,\n corrections=\"\\t\".join(suggestions)\n )) # may cause IO bottleneck", "def squared_distance(v: Vector, w: Vector) -> float:\n return sum_of_squares(subtract(v, w))", "def distance(a,b): \r\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)", "def compute_feature_distances(features1: np.ndarray, \r\n features2: np.ndarray) -> np.ndarray:\r\n #broadcasting trick\r\n a = features1[:, np.newaxis, :]\r\n b = features2[np.newaxis, :, :]\r\n \r\n return np.linalg.norm( (a-b), axis=-1)", "def query(self, word):\n node = self\n sums = 0\n for c in word:\n node = node[c]\n sums += node.go\n return sums", "def word_embedding_levenshtein(seq1, seq2, embeddings, average_distance, r=0.9, normalise=False):\n\tx1 = 1 + len(seq1)\n\tx2 = 1 + len(seq2)\n\n\talpha = r / ((1 - r) * average_distance)\n\n\t# Initialisation of the matrix\n\td = [] # Using Numpy structures for this is probably not more efficient\n\td.append(list(range(x2)))\n\tfor i in range(1, x1):\n\t\td.append([i] * x2)\n\n\t# Core of the algorithm\n\tfor i in range(1, x1):\n\t\tfor j in range(1, x2):\n\t\t\te1 = seq1[i-1]\n\t\t\te2 = seq2[j-1]\n\n\t\t\tif(e1 == e2): c = 0\n\t\t\telse:\n\t\t\t\tv1 = embeddings[e1]\n\t\t\t\tv2 = embeddings[e2]\n\n\t\t\t\tif((v1 is None) or (v2 is None)): c = 1\n\t\t\t\telse:\n\t\t\t\t\tdst = np.linalg.norm(v1 - v2) # Distance 2 (or L2 norm of the difference)\n\n\t\t\t\t\t# Now, we need a function increasing function mapping 0 to 0 and +inf to 1\n\t\t\t\t\tc = 1 - (1 / (1 + (alpha * dst)))\n\n\t\t\t\t\t#c /= r # If you uncomment this line, the cost of a substitution at distance `average_distance` will be 1 and substitutions might have higher cost, up to 1/r. This might be justified as long as `r` is above 0.5 (otherwise, some substitutions might be more expensive than an insertion followed by a deletion).\n\n\t\t\td[i][j] = min(\n\t\t\t\t(d[(i-1)][j] + 1), # Deletion of seq1[i]\n\t\t\t\t(d[i][(j-1)] + 1), # Insertion of seq2[j]\n\t\t\t\t(d[(i-1)][(j-1)] + c) # Substitution from seq1[i] to seq2[j]\n\t\t\t)\n\n\traw = d[-1][-1]\n\n\tif(normalise): return (raw / (len(seq1) + len(seq2)))\n\treturn raw", "def get_closest(target_word: str, word_to_idx: Dict, embeddings: torch.Tensor, n: int = 5) -> List[Tuple[str, torch.Tensor]]:\n\n # Calculate distances to all other words\n\n word_embedding = embeddings[word_to_idx[target_word.lower()]]\n distances = []\n for word, index in word_to_idx.items():\n if word == \"<MASK>\" or word == target_word:\n continue\n distances.append((word, torch.dist(word_embedding, embeddings[index])))\n\n results = sorted(distances, key=lambda x: x[1])[1:n + 2]\n return results", "def euclidean_distance(cls, y, y_target):\n return np.linalg.norm(y - y_target)" ]
[ "0.7123041", "0.6798625", "0.6672034", "0.64250857", "0.6401553", "0.6378859", "0.63724816", "0.6371111", "0.6301999", "0.6265582", "0.6255312", "0.62346095", "0.62243164", "0.62200135", "0.62105745", "0.6194787", "0.6186329", "0.6180648", "0.6174867", "0.61723065", "0.61706024", "0.61403537", "0.60908735", "0.6083031", "0.60731995", "0.60549587", "0.6042237", "0.60385555", "0.6035777", "0.6027672", "0.6026617", "0.6021991", "0.60111797", "0.6000642", "0.6000549", "0.5985946", "0.598373", "0.59683734", "0.596283", "0.5952152", "0.5943208", "0.5917211", "0.5916726", "0.5915161", "0.59087217", "0.5883761", "0.5880354", "0.5879503", "0.58792555", "0.5863927", "0.5858227", "0.5851442", "0.58493894", "0.58481455", "0.58412945", "0.5837179", "0.582166", "0.5821548", "0.57990813", "0.5798835", "0.5796779", "0.57944715", "0.5793499", "0.5792241", "0.57746077", "0.57584566", "0.575799", "0.5751091", "0.57509446", "0.5739756", "0.5729299", "0.5728548", "0.5728072", "0.57270205", "0.5723192", "0.5719019", "0.57170063", "0.5715869", "0.5711535", "0.57112443", "0.5707308", "0.570199", "0.5701345", "0.5689651", "0.5689202", "0.5687206", "0.5683343", "0.5682952", "0.56799656", "0.56788427", "0.5677468", "0.5668401", "0.5667825", "0.56646323", "0.5664006", "0.56537026", "0.5650537", "0.564709", "0.5647007", "0.5645305" ]
0.747233
0
If your model receive inputs in another way or you computing not like in this example simply override this method.
def forward_step(self, batch): input_ids = torch.as_tensor(batch.input_ids).to(self.device).reshape((1, -1)) # batch.get('input_ids').to(self.device) attention_mask = torch.as_tensor(batch.attention_mask).to(self.device).reshape((1, -1)) # batch.get('attention_mask').to(self.device) outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)[0] _, _, num_label = outputs.shape """ outputs : (batch, seq_length, feat_dim) => (seq_length, feat_dim) labels : (batch, seq_length) => (seq_length,) """ outputs = outputs.view(-1, num_label) labels = torch.argmax(outputs, dim=1) # torch.argmax(outputs, dim=1) batch_losses = self.criterion(outputs, labels) loss = torch.mean(batch_losses) # mean average self.batch_output = [input_ids, outputs] return loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dynamic_model(self, input_val: float) -> float:\n pass", "def inputs(self):\n return NotImplementedError", "def __call__(self, *inputs):\n raise NotImplementedError", "def processInputs(self):", "def __call__(self, n_input: int) -> Model:\n\n raise NotImplementedError()", "def process_inputs(self, inputs):", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n settings, notFound = paramInput.findNodesAndExtractValues(['epsilon', 'dual', 'loss', 'tol', 'fit_intercept',\n 'intercept_scaling', 'max_iter'])\n # notFound must be empty\n assert(not notFound)\n self.initializeModel(settings)", "def reconstruct_input_ext(self, model_in):", "def __call__(self, inputs, states, **kwargs):\n raise NotImplementedError()", "def _TransformInputs(self, _):\n raise NotImplementedError()", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n settings, notFound = paramInput.findNodesAndExtractValues(['C', 'dual', 'penalty', 'l1_ratio', 'tol', 'fit_intercept',\n 'solver','intercept_scaling', 'max_iter', 'multi_class',\n 'class_weight', 'random_state'])\n # notFound must be empty\n assert(not notFound)\n self.initializeModel(settings)", "def n_inputs(self):", "def apply(self, inputs):\n raise NotImplementedError()", "def inputs(self):\n return super().inputs", "def inputs(self):\n return super().inputs", "def inputs(self):\n return super().inputs", "def inputs(self):\n return super().inputs", "def input(self):", "def build_model_fn(self):", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n settings, notFound = paramInput.findNodesAndExtractValues(['nu','C', 'kernel', 'degree', 'gamma', 'coef0',\n 'tol', 'cache_size', 'shrinking', 'max_iter'])\n # notFound must be empty\n assert(not notFound)\n self.initializeModel(settings)", "def __calculation(self, *args, **kwargs):\n pass", "def inputs(self):\n pass", "def fit(self, input):\n raise NotImplementedError()", "def __call__(self, do_jacobian=True):\n raise NotImplementedError(\"The Model call is not implemented\")", "def predict_only(self):", "def fit(self):\n raise NotImplementedError('')", "def val_acc(self):\n raise Exception(\" not implemented in base model\")", "def modify_input(self, raw_input_par):\r\n\r\n return self.meta_model.modify_input(raw_input_par)", "def call(self, inputs):\n raise NotImplementedError", "def get_inputs(self):\r\n raise NotImplementedError", "def get_inputs(self):\r\n raise NotImplementedError", "def __set_inputs__(self):\n self.__set_in_out_var__(None, 0) # TODO: inspect None", "def impress(self):\n raise NotImplementedError", "def __call__(self, # pylint: disable=arguments-differ, useless-super-delegation\n features, labels, params=None, config=None):\n return super(BaseModel, self).__call__(features, labels, params, config)", "def fit(self):\n raise NotImplementedError # pragma: no cover", "def _train_model(self):\n raise NotImplementedError()", "def compute(self, **kwargs):\n raise NotImplementedError", "def fit(self):\n raise NotImplementedError", "def forward(self, inputs):\n raise NotImplementedError", "def input(self):\r\n pass", "def compute(self, **kwargs: Any) -> Any:\n raise NotImplementedError(\"Inheriting class must implement this method.\")", "def __init__(self,*args,**kwargs):\n super(CompositeModel1D,self).__init__(*args,**kwargs)\n for m in self._models:\n if not isinstance(m,FunctionModel1D):\n raise ModelTypeError('Input model %s is not a 1D model'%m)\n self._filters = None", "def __call__(self, X):\n return self.model(X)", "def forward(self, *inputs):\n raise NotImplementedError", "def __init__(self, inputs, model, low=-1.0, high=1.0):\n super().__init__(inputs, model)\n # Split inputs into mean and log(std).\n mean, log_std = torch.chunk(self.inputs, 2, dim=-1)\n # Clip `scale` values (coming from NN) to reasonable values.\n log_std = torch.clamp(log_std, MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT)\n std = torch.exp(log_std)\n self.dist = torch.distributions.normal.Normal(mean, std)\n assert np.all(np.less(low, high))\n self.low = low\n self.high = high", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def train(self):\n\t\traise NotImplementedError", "def _propagateInputClassification(self,input):\n Y,Z = self._propagateInputRegression(input)\n \n #apply softmax function\n try:\n \n expY = [_exp(y) for y in Y]\n \n #if the exp of the outputs starts getting too big just normalize the outputs\n except OverflowError: \n expY = Y\n sumExpY = sum(expY)\n \n Y = [y/sumExpY for y in Y]\n \n return Y,Z", "def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)", "def update(self, *inputs):\n raise NotImplementedError", "def d_input(self):\n pass", "def calculate(self):\r\n\r\n pass", "def do_manipulations(self, *args, **kwargs):\n pass", "def forward(self, *inputs) -> torch.Tensor:\n return self.model(*inputs)", "def apply(self):", "def calculate(self):", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def call(self, inputs, training=True):\n pass", "def train(self, ):\n raise NotImplementedError", "def call(self, model):\n raise NotImplementedError('Define your score here')", "def __init__(self):\n self.inputs = {}", "def __init__(self, inputs, outputs):\n super(ELM, self).__init__(inputs, outputs)", "def E_step_precompute(self, model_params, my_suff_stat, my_data):", "def _compute_outputs(self, *args, **kwargs):\n pass\n # self.outputs = self.model(input_ids=self.input_ids, masked_lm_labels=self.input_ids)\n # self.logits = self.outputs[0][0]\n # self.probs = torch.softmax(self.logits, 1)", "def train(self)->None:", "def calculate(self):\r\n pass", "def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:\n raise NotImplementedError(\n f\"{self.__class__} is an abstract class. Only classes inheriting this class can be called.\"\n )", "def transform(self, original_input):\n raise NotImplementedError()", "def logic(self):\r\n raise NotImplementedError", "def __init__(self):\n # Initializing the Model with the class\n super(Model, self).__init__()\n # torch.nn.Linear applies a Linear transformation. The first parameter is the size of each input sample. The second is the size of the output sample\n self.linear = torch.nn.Linear(1, 1)", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n lambdaFind = paramInput.findFirst('lambda')\n if lambdaFind != None:\n self.lambdaVar = lambdaFind.value\n else:\n self.raiseAnError(IOError,'lambda (scale) value needed for Weibull distribution')\n kFind = paramInput.findFirst('k')\n if kFind != None:\n self.k = kFind.value\n else:\n self.raiseAnError(IOError,'k (shape) value needed for Weibull distribution')\n lowFind = paramInput.findFirst('low')\n if lowFind != None:\n self.low = lowFind.value\n else:\n self.low = 0.0\n self.initializeDistribution()", "def update(self, inputs): # pragma: no cover\n return inputs", "def train(self, *args, **kwargs):\n raise NotImplementedError", "def __call__(self, x, **kwargs):\n return self.model(x)", "def compute(self, *args):\n\n pass", "def _get_inputs(self):\n return self.__inputs", "def _get_inputs(self):\n return self.__inputs", "def _get_inputs(self):\n return self.__inputs", "def train(self):\n raise NotImplementedError()", "def apply(self): # pragma: no cover\n raise NotImplementedError", "def investigate(self, inputs):\r\n # Optional behavior\r\n return inputs", "def train(self):\n raise NotImplementedError", "def localGenerateInput(self, model, myInput):\n # create values dictionary\n weight = 1.0\n for key in sorted(self.distDict):\n # check if the key is a comma separated list of strings\n # in this case, the user wants to sample the comma separated variables with the same sampled value => link the value to all comma separated variables\n totDim = self.variables2distributionsMapping[key]['totDim']\n dist = self.variables2distributionsMapping[key]['name']\n reducedDim = self.variables2distributionsMapping[key]['reducedDim']\n weight = 1.0\n if totDim == 1:\n if self.samplingType == 'uniform':\n distData = self.distDict[key].getCrowDistDict()\n if ('xMin' not in distData.keys()) or ('xMax' not in distData.keys()):\n self.raiseAnError(IOError,\"In the Monte-Carlo sampler a uniform sampling type has been chosen;\"\n + \" however, one or more distributions have not specified either the lowerBound or the upperBound\")\n lower = distData['xMin']\n upper = distData['xMax']\n rvsnum = lower + (upper - lower) * randomUtils.random()\n # TODO (wangc): I think the calculation for epsilon need to be updated as following\n # epsilon = (upper-lower)/(self.limit+1) * 0.5\n epsilon = (upper-lower)/self.limit\n midPlusCDF = self.distDict[key].cdf(rvsnum + epsilon)\n midMinusCDF = self.distDict[key].cdf(rvsnum - epsilon)\n weight *= midPlusCDF - midMinusCDF\n else:\n rvsnum = self.distDict[key].rvs()\n for kkey in key.split(','):\n self.values[kkey] = np.atleast_1d(rvsnum)[0]\n self.inputInfo['SampledVarsPb'][key] = self.distDict[key].pdf(rvsnum)\n self.inputInfo['ProbabilityWeight-' + key] = 1.\n elif totDim > 1:\n if reducedDim == 1:\n if self.samplingType is None:\n rvsnum = self.distDict[key].rvs()\n coordinate = np.atleast_1d(rvsnum).tolist()\n else:\n coordinate = np.zeros(totDim)\n for i in range(totDim):\n lower = self.distDict[key].returnLowerBound(i)\n upper = self.distDict[key].returnUpperBound(i)\n coordinate[i] = lower + (upper - lower) * randomUtils.random()\n if reducedDim > len(coordinate):\n self.raiseAnError(IOError, \"The dimension defined for variables drew from the multivariate normal distribution is exceeded by the dimension used in Distribution (MultivariateNormal) \")\n probabilityValue = self.distDict[key].pdf(coordinate)\n self.inputInfo['SampledVarsPb'][key] = probabilityValue\n for var in self.distributions2variablesMapping[dist]:\n varID = utils.first(var.keys())\n varDim = var[varID]\n for kkey in varID.strip().split(','):\n self.values[kkey] = np.atleast_1d(rvsnum)[varDim-1]\n self.inputInfo[f'ProbabilityWeight-{dist}'] = 1.\n else:\n self.raiseAnError(IOError, \"Total dimension for given distribution should be >= 1\")\n\n if len(self.inputInfo['SampledVarsPb'].keys()) > 0:\n self.inputInfo['PointProbability'] = reduce(mul, self.inputInfo['SampledVarsPb'].values())\n else:\n self.inputInfo['PointProbability'] = 1.0\n if self.samplingType == 'uniform':\n self.inputInfo['ProbabilityWeight' ] = weight\n else:\n self.inputInfo['ProbabilityWeight' ] = 1.0 # MC weight is 1/N => weight is one\n self.inputInfo['SamplerType'] = 'MonteCarlo'", "def call(self, inputs, training=None, mask=None):\n # pylint: disable=arguments-differ\n raise NotImplementedError()", "def __matmul__(self, other: 'ModelParameters') -> 'ModelParameters':\n raise NotImplementedError()", "def SetInput(self, , , p_float_6):\n ...", "def __call__(self, input: Union[np.ndarray, float]):\n if self.state is not None:\n self.state = self.alpha * input + (1 - self.alpha) * self.state\n else:\n self.state = input\n return self.state", "def apply(self) -> None:", "def apply(self) -> None:", "def _model_compute_all(self, inputs):\n\n return self.model.compute_all(inputs)", "def localGenerateInput(self, model, myInput):\n if self.counter < 2:\n MCMC.localGenerateInput(self, model, myInput)\n else:\n self._localReady = False\n for key, value in self._updateValues.items():\n # update value based on proposal distribution\n newVal = value + self._proposal[key].rvs() * self._scaling\n self.values[key] = newVal\n if key in self.distDict:\n ## check the lowerBound and upperBound\n lowerBound = self.distDict[key].lowerBound\n upperBound = self.distDict[key].upperBound\n if lowerBound is not None and self.values[key] < lowerBound:\n self.values[key] = lowerBound\n if upperBound is not None and self.values[key] > upperBound:\n self.values[key] = upperBound\n self.inputInfo['SampledVarsPb'][key] = self.distDict[key].pdf(newVal)\n else:\n self.inputInfo['SampledVarsPb'][key] = self._priorFuns[key].evaluate(\"pdf\", self.values)\n self.inputInfo['ProbabilityWeight-' + key] = 1.\n self.inputInfo['PointProbability'] = 1.0\n self.inputInfo['ProbabilityWeight' ] = 1.0\n self.inputInfo['SamplerType'] = 'Metropolis'\n self.inputInfo['LogPosterior'] = self.netLogPosterior\n self.inputInfo['AcceptRate'] = self._acceptRate", "def calculate(self):\n pass", "def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')", "def op(self, *input_tensors, **constants):\n raise NotImplementedError(\"Abstract class\")", "def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r", "def control_law( self, inputs ):", "def forward(self, x: Tensor) -> Any: # type: ignore[override]\n return self.model(x)" ]
[ "0.687905", "0.6863665", "0.6783461", "0.66524166", "0.6625633", "0.660951", "0.6529107", "0.65070593", "0.65055317", "0.6484396", "0.6459398", "0.64080626", "0.63803595", "0.636314", "0.636314", "0.636314", "0.636314", "0.6327075", "0.63064533", "0.62982094", "0.6289941", "0.62757826", "0.6249541", "0.6238206", "0.6220166", "0.6220087", "0.6218263", "0.62066686", "0.6196635", "0.6179733", "0.6179733", "0.6161424", "0.61528575", "0.61402756", "0.61393464", "0.61336017", "0.6098301", "0.609333", "0.6051246", "0.60398597", "0.60104525", "0.59948057", "0.5989933", "0.5973947", "0.5962339", "0.5957505", "0.5957505", "0.5957505", "0.5957505", "0.5957505", "0.59512365", "0.59428436", "0.59409344", "0.5919686", "0.5906201", "0.5898185", "0.5891819", "0.5882332", "0.588106", "0.5878153", "0.5872094", "0.586458", "0.5863425", "0.5855693", "0.58526826", "0.5843774", "0.584164", "0.5840761", "0.5833616", "0.58265495", "0.58203954", "0.58107615", "0.5802415", "0.58021224", "0.57975656", "0.57966435", "0.57949096", "0.5791037", "0.57910055", "0.5787179", "0.5787179", "0.5787179", "0.5782669", "0.5779292", "0.57675296", "0.5765087", "0.5764927", "0.57641935", "0.57623434", "0.5761178", "0.5760386", "0.57600904", "0.57600904", "0.5744841", "0.5741266", "0.57401806", "0.5735785", "0.5735173", "0.57334644", "0.573308", "0.5708112" ]
0.0
-1
You can override this method if you want to change the format of outputs (e.g., storing gradients)
def update_output(self, ): input_ids, outputs, grads, adv_tokens = self.batch_output probs = softmax(outputs, dim=-1) probs, labels = torch.max(probs, dim=-1) tokens = [ self.tokenizer.convert_ids_to_tokens(input_ids_) for input_ids_ in input_ids ] embedding_grads = grads.sum(dim=2) # norm for each sequence norms = torch.norm(embedding_grads, dim=1, p=2) # need check hyperparameter # normalizing for i, norm in enumerate(norms): embedding_grads[i] = torch.abs(embedding_grads[i]) / norm batch_output = [] # check probs, labels shape labels = torch.reshape(labels, (1, -1)) probs = torch.reshape(probs, (1, -1)) iterator = zip(tokens, probs, embedding_grads, labels) for example_tokens, example_prob, example_grad, example_label in iterator: example_dict = dict() # as we do it by batches we has a padding so we need to remove it example_tokens = [t for t in example_tokens if t != self.tokenizer.pad_token] example_dict['tokens'] = example_tokens example_dict['grad'] = example_grad.cpu().tolist()[:len(example_tokens)] example_dict['label'] = example_label.cpu().tolist()[:len(example_tokens)] # example_label.item() example_dict['prob'] = example_prob.cpu().tolist()[:len(example_tokens)] # example_prob.item() batch_output.append(example_dict) return batch_output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def out(self, inputs):", "def _save_grad_output(self, mod, grad_input, grad_output):\n if mod.training:\n self.state[mod][\"gy\"] = grad_output[0] * grad_output[0].size(0)", "def _output_update(self):\n self._outputtype = self.inputs.outputtype", "def outputs(self):\n return super().outputs", "def outputs(self):\n return super().outputs", "def outputs(self):\n return super().outputs", "def outputs(self):\n return super().outputs", "def format_model_output(self, output, batch_size=1):\r\n return output", "def _model_output(inputs, data_format):\n if data_format == 'channels_first':\n return tf.transpose(a=inputs, perm=[0, 2, 3, 1])\n else:\n return inputs", "def output_shape(self):\n raise NotImplementedError", "def outputs(self):\n pass", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n if self.incoming_shape == self.scale_size:\n self.out = incoming\n else:\n self.out = resize2d(incoming, size=self.scale_size, method=self.method,\n align_corners=self.align_corners)\n if self.method_name == 'AREA':\n self.out = tf.stop_gradient(self.out)\n \n return self.out", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [ output_grad / node.inputs[0] ]", "def gradient(self, node, output_grad):\r\n return [output_grad]\r\n \"\"\"higher accuracy notice notice here\"\"\"", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = tf.stop_gradient(incoming)\n return self.out", "def calculate_output(self):", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = incoming[..., 0::3] * 0.299 + incoming[..., 1::3] * 0.587 + incoming[..., 2::3] * 0.114\n return self.out", "def convert_outputs(self):\n self.out('relaxed_structure', self.ctx.workchain.outputs.output_structure)\n self.out('total_energy', get_total_energy(self.ctx.workchain.outputs.output_parameters))\n self.out('forces', get_forces_from_trajectory(self.ctx.workchain.outputs.output_trajectory))\n self.out('stress', get_stress_from_trajectory(self.ctx.workchain.outputs.output_trajectory))", "def outputs(self, inputs):\n return inputs", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = tf.reshape(incoming, self.shape)\n \n return self.out", "def gradient(self, node, output_grad):\r\n return [output_grad]", "def gradient(self, node, output_grad):\r\n return [output_grad]", "def get_output_shape(self):\n return []", "def _create_outputs(self) -> ComponentOutputs:\n raise NotImplementedError", "def __call__(self, gradient):\n audio_out = self.modem.convert_data_to_audio(gradient.flatten())\n decoded_gradients = self.modem.convert_audio_to_floats(audio_out)\n\n # if you want to regret being alive,\n # self.stream.write(audio_out.tobytes())\n\n return decoded_gradients.reshape(gradient.shape)", "def gradient(self, inputs):\n raise NotImplementedError", "def outputs_convert_hook(\n self,\n outputs: Any,\n ) -> Any:\n outputs = self._maybe_mod_outputs_dtype_transform(outputs)\n return outputs", "def gradient(self, node, output_grad):\n return [output_grad]", "def output_layer(self, features, **kwargs):\n raise NotImplementedError", "def output_layer(self, features, **kwargs):\n raise NotImplementedError", "def _update_output_type(self):\n pass", "def add_output_ops(self, graph, output):\n return output", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [output_grad * exp(node.inputs[0])]", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [output_grad / node.const_attr ]", "def output_data(self):\n pass", "def result(self) -> Dict[str, tf.Tensor]:\n return super().result()", "def result(self) -> Dict[str, tf.Tensor]:\n return super().result()", "def gradient(self, node, output_grad):\r\n return [ - output_grad]", "def _format(self, state):\n x = state\n if not isinstance(x, torch.Tensor):\n x = torch.tensor(x,\n device=self.device,\n dtype=torch.float32)\n x = x.unsqueeze(0)\n return x", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [output_grad * node.const_attr ]", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n hahaha233 = MatMulOp()\r\n return [ hahaha233( output_grad, node.inputs[1], False , True) , hahaha233( node.inputs[0] , output_grad , True , False ) ]\r\n #return [output_grad * node.inputs[1] , output_grad * node.inputs[0] ]\r", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = incoming * self.factor\n return self.out", "def interpret_output(self, batch_output):\n raise NotImplementedError", "def _generate_output(self):\n raise NotImplementedError()", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incomings = [incoming(prev_layers=prev_layers, **kwargs) for incoming in self.incomings]\n with tf.variable_scope(self.layer_scope):\n self.out = self.a(tf.concat(axis=len(self.incoming_shapes[0]) - 1, values=incomings))\n \n return self.out", "def __init__(self, inputs=[]):\n self.inputs = inputs # input_list <- C, Java <- 匈牙利命名法 -> Python 特别不建议\n # self.outputs = outputs # output_list\n self.value = None\n self.outputs = []\n self.gradients = {}\n\n for node in self.inputs:\n node.outputs.append(self) # build a connection relationship", "def gradient(self, node, output_grad):\r\n return [reshape_op(output_grad , get_shape_op(node.inputs[0]))]", "def gradient(self, node, output_grad):\n \"\"\"TODO: Your code here\"\"\"\n return [output_grad * node.const_attr]", "def gradients(self):\n return {}", "def output(self, inputs):\n self._in_j = self._input(inputs) #Previous weighted inputs\n return self._g(self._in_j)", "def _convert_raw_outputs(self, raw_output):\n outputs = [\n np.array(raw_output.getLayerFp16(self._output_layers[i]),\n dtype=np.float32).reshape((1, -1) + self._output_shape)\n for i in range(len(self._output_layers))\n ]\n return outputs", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [ - output_grad * node.const_attr / node.inputs[0] / node.inputs[0] ]", "def get_outputs(self):\n raise NotImplementedError", "def gradient(self, node, output_grad):\r\n return [reshape_op(output_grad , get_shape_op(node.inputs[0])), zeroslike_op(node.inputs[1])]", "def GraphFn(self, inp):\n tensor = inp * 2.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[1])\n tensor = tensor + 3.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[2])\n tensor = tensor * 4.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[3])\n tensor += tensor + 5.0\n return array_ops.identity(tensor, name='output_0')", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [auto_sum_op(output_grad * node.inputs[1] , get_shape_op(node.inputs[0])), auto_sum_op(output_grad * node.inputs[0] , get_shape_op(node.inputs[1]))]", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n \n if self not in prev_layers:\n prev_layers += [self]\n self.out = tf.tile(self.incoming, multiples=self.multiples)\n \n return self.out", "def set_output_shape(self):\n self.output_shape = (reduce(mul, self.input_shape),)", "def gradient(self, node, output_grad):\n return [output_grad, output_grad]", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [auto_sum_op(output_grad / node.inputs[1] ,get_shape_op(node.inputs[0])), auto_sum_op(-output_grad * node.inputs[0] / node.inputs[1] / node.inputs[1] , get_shape_op(node.inputs[1]) ) ]", "def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False):\n raise Exception(\n \"currently_unsupported: layer_output method is not yet supported for \"\n + \"graph neural networks in ktrain\"\n )", "def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False):\n raise Exception(\n \"currently_unsupported: layer_output method is not yet supported for \"\n + \"graph neural networks in ktrain\"\n )", "def get_output(self, prev_layers=None, **kwargs):\n \n noise = self.noisefct(shape=tf.shape(self.incoming()), **self.noiseparams)\n \n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n if self.backprop_noise:\n out = incoming * noise\n else:\n out = incoming * tf.stop_gradient(noise)\n self.out = self.a(out)\n \n return self.out", "def output_tensors(self):\r\n return self._output_tensors", "def outputDataType(self):\n raise NotImplementedError()", "def output(self, input, in_features, out_features,reuse=False):\n # with tf.variable_scope(self.name):\n # print('f'*20,input.get_shape().as_list(),in_features,out_features)\n w=self._create_weight([self.cnn_size,self.cnn_size,in_features,out_features],name='Wfn')\n out=self._conv2d(input,w,[1, self.cnn_stride, self.cnn_stride, 1],pre_name='convfn')\n return out", "def gradient(self, node, output_grad):\r\n #return [output_grad]\r\n return [broadcast_to(output_grad,get_shape_op(node.inputs[0]),node.const_attr)]", "def forward(self, outputs: Dict[str, Any], labels: Dict[str, Any]):\n utils.check_condition(self.output_name in outputs,\n \"output '%s' not found. Loss requires this output key\" % self.output_name)\n utils.check_condition(self.label_name in labels,\n \"label '%s' not found. Loss requires this label key\" % self.output_name)\n output = outputs[self.output_name]\n label = labels[self.label_name]\n return super().forward(output.astype(label, copy=False), label)", "def set_output_shape(self):\n self.output_shape = ((self.input_shape[0] // self.stride[0],\n self.input_shape[1] // self.stride[1],\n self.input_shape[2]\n ))", "def compute_output(self):\n x, y = self.input_nodes\n print(x.name, y.name)\n self.output_value = backend.dot(x.output_value, y.output_value)\n return self.output_value", "def _store_feats(layer, inp, output):\n _model_feats.append(output.cpu().numpy())", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is not None:\n self.prev_layers.extend(prev_layers)\n \n if self not in self.prev_layers:\n self.prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = tf.layers.batch_normalization(incoming, axis=self.axis, training=self.training,\n name=self.name)\n \n return self.out", "def __set_outputs__(self):\n self.__set_in_out_var__(None, 1)", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n if (len(incoming.shape) > 2 and self.flatten_input) or (len(incoming.shape) > 3):\n # Flatten all but first dimension (e.g. flat seq_pos and features)\n X = tf.reshape(incoming, self.incoming_shape)\n else:\n X = incoming\n net = dot_product(X, self.W)\n if self.b is not None:\n net += self.b\n self.out = self.a(net)\n \n return self.out", "def TensorRepresentations(self) -> tensor_adapter.TensorRepresentations:", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = self.a(incoming)\n \n return self.out", "def update_output(self, latent_mat, weight_mat, y_list):", "def to_tensor(self): \n raise NotImplementedError", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = self.a(conv2d_transpose2d(incoming, W=self.W, output_shape=self.output_shape,\n strides=self.strides, padding=self.padding,\n data_format=self.data_format) + self.b)\n return self.out", "def test_param_to_gradient(self):\n pass", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = avgpool2D(incoming, ksize=self.ksize, strides=self.strides, padding=self.padding,\n data_format=self.data_format)\n return self.out", "def inputs(self):\n pass", "def gradient(self, node, output_grad):\r\n return [relu_op(output_grad, node.inputs[1]) , zeroslike_op(node.inputs[1])]\r\n #assert True\r", "def outputs(self):\r\n return self._outputs", "def __generate_output_data(self):\n if not len(self.output_data) == 0:\n return\n try:\n self.output_data = s.load(open('output/output_data.p', 'rb'))\n self.class_indices = s.load(open('output/class_indices.p', 'rb'))\n if not self.classes_to_visualise == None:\n self.__filter_output_data(self.classes_to_visualise)\n except:\n self.output_data = generate_output_for_test_data(image_data=self.image_data,\n binary_output=self.binary_output) if self.testing else generate_output_for_train_data(\n image_data=self.image_data, binary_output=self.binary_output)\n self.class_indices = get_all_class_indices(training=False) if self.testing else get_all_class_indices()\n if not self.classes_to_visualise == None:\n self.__filter_output_data(self.classes_to_visualise)\n s.dump([out.tolist() for out in self.output_data], open('output/output_data.p', 'wb'))\n s.dump(self.class_indices, open('output/class_indices.p', 'wb'))\n\n self.legend = get_class_names_for_class_indices(list(set(sorted(self.class_indices))))", "def gradient(self, node, output_grad):\r\n #return [output_grad]\r\n return [broadcast_mean_to(output_grad,get_shape_op(node.inputs[0]),node.const_attr)]", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n \n if self not in prev_layers:\n prev_layers += [self]\n incomings = [incoming(prev_layers=prev_layers, **kwargs) for incoming in self.incomings]\n with tf.variable_scope(self.layer_scope):\n if np.all([i_s == self.incoming_shapes[0] for i_s in self.incoming_shapes]):\n out = tf.add_n(incomings)\n else:\n out = incomings[0]\n shape_len = len(out.shape)\n for incoming in incomings[1:]:\n while len(incoming.shape) < shape_len:\n incoming = tf.expand_dims(incoming, axis=-2)\n out += incoming\n self.out = self.a(out)\n return self.out", "def inputs(self):\n return NotImplementedError", "def build_outputs(self, **inputs):\n print(\"Building all outputs, \", self.name)\n# invscale, _ = self.build_output('invscale', **inputs)\n# loc, _ = self.build_output('loc', invscale=invscale, **inputs)\n# samp, _ = self.build_output('main', invscale=invscale, loc=loc)\n self.build_output('invscale', **inputs)\n self.build_output('loc', **inputs)\n self.build_output('main', **inputs)", "def gradient(self, node, output_grad):\r\n return [auto_broadcast_op(output_grad, get_shape_op(node.inputs[0])) , zeroslike_op(node.inputs[1])]\r\n #assert True\r", "def model_output(model, t, s, i):\n return 0, 0, 0, 0", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n \n if self not in prev_layers:\n prev_layers += [self]\n incomings = [incoming(prev_layers=prev_layers, **kwargs) for incoming in self.incomings]\n with tf.variable_scope(self.layer_scope):\n self.out = self.a(tf.add_n(incomings) / len(incomings))\n \n return self.out", "def get_output(self):\n raise NotImplementedError", "def set_outputs_from_pairs(self, new_outputs: Iterable[Tuple[tf.DType,\n tf.shape]]):\n self._outputs = []\n i = 0\n for (dtype, shape) in new_outputs:\n self._outputs.append(tensor.Tensor(self, i, dtype, shape))\n i += 1\n self._graph.increment_version_counter() # Just in case", "def op_output_values(self):\n return self.solid_output_values", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def get_output_shape(self):\n return self.output_shape", "def output(self):\n # print \"Neuron output\"\n\n if self.output_cache is not None:\n # print self, \"returning from cache\"\n return self.output_cache\n\n self.inputs_cache = []\n\n sum = 0\n for input_edge in self.inputs:\n input = input_edge.from_.output()\n self.inputs_cache.append(input)\n sum += input * input_edge.w\n\n self.output_cache = sigmoid(sum)\n # print \"node output:\", self.output_cache, sum\n return self.output_cache" ]
[ "0.6842611", "0.63906467", "0.62235004", "0.62234104", "0.62234104", "0.62234104", "0.62234104", "0.6200916", "0.61630815", "0.6138572", "0.6102943", "0.6075281", "0.6048927", "0.60099345", "0.60045785", "0.59973353", "0.5995878", "0.59723157", "0.5926936", "0.5917695", "0.5914432", "0.5914432", "0.58857656", "0.58500564", "0.5824264", "0.5800219", "0.57999325", "0.57863903", "0.5773613", "0.5773613", "0.5773016", "0.57678294", "0.57665503", "0.57539237", "0.5750291", "0.57384825", "0.57384825", "0.57310355", "0.5722749", "0.57198733", "0.5712609", "0.5706907", "0.5706432", "0.5700613", "0.56768346", "0.5665394", "0.5658785", "0.56586784", "0.5657553", "0.56450546", "0.5643102", "0.5642493", "0.56347245", "0.5632942", "0.5632138", "0.5629101", "0.5626614", "0.56186134", "0.5615739", "0.560224", "0.56013536", "0.56013536", "0.55942726", "0.55928427", "0.5584313", "0.5572084", "0.5567754", "0.5565766", "0.5564226", "0.5562288", "0.55591583", "0.55560386", "0.55517167", "0.5544731", "0.5535732", "0.55347335", "0.5533216", "0.5531994", "0.5528886", "0.5525778", "0.55193454", "0.55173814", "0.55162483", "0.5514704", "0.54962164", "0.54786855", "0.54748416", "0.5463207", "0.5457929", "0.54441845", "0.5443755", "0.544239", "0.5436623", "0.5427044", "0.542296", "0.5420061", "0.5420061", "0.5420061", "0.5420061", "0.5420061", "0.5409702" ]
0.0
-1
Convert a single position. This is done for easy code sharing with other tools. Skyfield does support arrays of positions.
def _convert_radec_to_altaz(ra, dec, lon, lat, height, time): radec = Star(ra=Angle(degrees=ra), dec=Angle(degrees=dec)) earth = load(EPHEMERIS)['earth'] location = earth + Topos(longitude_degrees=lon, latitude_degrees=lat, elevation_m=height * 1000.0) ts = load.timescale() obstime = ts.from_astropy(Time(time, scale='utc')) alt, az, _ = location.at(obstime).observe(radec).apparent().altaz(pressure_mbar=0) return dict(az=az.degrees, alt=alt.degrees)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_position(data) -> Position:\n return (data[\"x\"], data[\"y\"])", "def get_position(self, position):", "def to_position(self, x, y, i, j):\n return (x * self.SIZE + i, y * self.SIZE + j)", "def positions_to_coords(self, positions):\n return [self.to_coords(px, py) for (px, py) in positions]", "def coords_to_structure(self) -> None:\n ...", "def world_coord(self, position, len):\n\n if len > 1:\n x_world = []\n y_world = []\n\n for item in position:\n x_world.append(self.cell_size*item[0]+self.cell_size/2-2)\n y_world.append(self.cell_size*item[1]+self.cell_size/2-6)\n\n else:\n x_world = self.cell_size*position[0]+self.cell_size/2-2\n y_world = self.cell_size*position[1]+self.cell_size/2-6\n\n\n return np.array([x_world, y_world])", "def normalize(position):\n x, y, z = position\n x, y, z = (int(round(x)), int(round(y)), int(round(z)))\n return (x, y, z)", "def local_coordinates(self, position: np.ndarray) -> Tuple[float, float]:\n raise NotImplementedError()", "def get_position(pos):\n if type(pos) is str:\n return list(map(lambda x: float(x),pos.split(\",\")))\n return pos", "def transform_coordinates(self, p):\n if type(p) == PhysicalObject:\n return self.transform_coordinates(p.position)\n elif type(p) == Vector:\n return tuple(map(\n lambda x: int(x),\n (p / SCALE_FACTOR - self.pos_shift))\n )", "def translate(self, pos):\n if isinstance(pos, list) and len(pos) == 3:\n # Nur wenn das Array drei werte hat, weitermachen\n if pos[0] >= 0:\n self.position[0] += pos[0]\n if pos[1] >= 0:\n self.position[1] += pos[1]\n if pos[2] >= 0:\n self.position[2] += pos[2]\n else:\n raise TypeError(\"Array with len 3 needed\")", "def make_simple_coords():\n \n x = np.array([144, 124, 97, 165, 114, 60, 165, 0, 76, 50, 147])\n y = np.array([ 0, 3, 21, 28, 34, 38, 51, 54, 58, 56, 61])\n coords = np.vstack((x,y)).T\n return coords", "def ConvertToCoordinate(pos):\n\n return pos // BOARD_DIMENSION, pos % BOARD_DIMENSION", "def _get_current_pos_in_1d(self):\n pos = self.network.get_current_pos()\n pos = np.squeeze(np.reshape(pos, (self.input_shape[0]*self.input_shape[1],1)))\n\n return pos", "def _getitem_2d(self, pos):\n # If pos contains multiple coordinates (or objects), convert recursively.\n if isinstance(pos, list):\n if isinstance(pos[0], (int, np.integer)): # It's actually a single coordinate.\n return self[pos[0], pos[1]]\n else:\n return [self[p] for p in pos]\n elif isinstance(pos, np.ndarray):\n if isinstance(pos[0], (int, np.integer)): # It's actually a single coordinate.\n return np.array(self[pos[0], pos[1]])\n else:\n return np.array([self[p] for p in pos])\n # If pos contains only one physical object, convert its bounding box to abstract coordinates\n if (pos.__class__.__name__ == 'PhysicalObject') or (issubclass(pos.__class__, laygo2.object.PhysicalObject)):\n return self.bbox(pos)\n # If pos contains only one coordinate, convert it to abstract grid.\n m = self.master.x == pos[0]\n n = self.master.y == pos[1]\n # refactor the following code to avoid the use of double for-loops and list comprehensions.\n if (not isinstance(m, np.ndarray)) and (not isinstance(n, np.ndarray)): # x and y are scalars.\n return np.array([m, n])\n if not isinstance(m, np.ndarray): # x is a scalar.\n return np.array([np.array([m, _n]) for _n in n])\n elif not isinstance(n, np.ndarray): # y is a scalar.\n return np.array([np.array([_m, n]) for _m in m])\n else:\n mn = []\n for _m in m: # vectorize this operation.\n row = []\n for _n in n:\n row.append(np.array([_m, _n]))\n mn.append(np.array(row))\n return np.array(mn)", "def s(self, position: Vector) -> float:\n return self.local_coordinates(position)[0]", "def convert_coords(self, coords):\n xPos = int(int(coords[0]) / 8)\n yPos = int(coords[1])\n zPos = int(int(coords[2]) / 8)\n return list(xPos, yPos, zPos)", "def convert_letter_coord(player_1: bool, position: str) -> Any:\r\n if player_1:\r\n x_axis = letter_coordinates.get(position[0])[0]\r\n y_axis = number_to_coordinate.get(position[1])\r\n return (x_axis, y_axis)\r\n\r\n else:\r\n x_axis = letter_coordinates.get(position[0])[1]\r\n y_axis = number_to_coordinate.get(position[1])\r\n return (x_axis, y_axis)", "def prepare_coords(coords):\n coords = np.asarray(coords).astype(np.float)\n if coords.ndim == 1:\n coords = np.array([coords])\n return coords", "def point(self) -> Point:\n return Point.from_np_array(self.position)", "def coords_to_point(\n self, *coords: float | Sequence[float] | Sequence[Sequence[float]] | np.ndarray\n ) -> np.ndarray:\n coords = np.asarray(coords)\n origin = self.x_axis.number_to_point(\n self._origin_shift([self.x_axis.x_min, self.x_axis.x_max]),\n )\n\n # Is coords in the format ([[x1 y1 z1] [x2 y2 z2] ...])? (True)\n # Or is coords in the format (x, y, z) or ([x1 x2 ...], [y1 y2 ...], [z1 z2 ...])? (False)\n # The latter is preferred.\n are_coordinates_transposed = False\n\n # If coords is in the format ([[x1 y1 z1] [x2 y2 z2] ...]):\n if coords.ndim == 3:\n # Extract from original tuple: now coords looks like [[x y z]] or [[x1 y1 z1] [x2 y2 z2] ...].\n coords = coords[0]\n # If there's a single coord (coords = [[x y z]]), extract it so that\n # coords = [x y z] and coords_to_point returns a single point.\n if coords.shape[0] == 1:\n coords = coords[0]\n # Else, if coords looks more like [[x1 y1 z1] [x2 y2 z2] ...], transform them (by\n # transposing) into the format [[x1 x2 ...] [y1 y2 ...] [z1 z2 ...]] for later processing.\n else:\n coords = coords.T\n are_coordinates_transposed = True\n # Otherwise, coords already looked like (x, y, z) or ([x1 x2 ...], [y1 y2 ...], [z1 z2 ...]),\n # so no further processing is needed.\n\n # Now coords should either look like [x y z] or [[x1 x2 ...] [y1 y2 ...] [z1 z2 ...]],\n # so it can be iterated directly. Each element is either a float representing a single\n # coordinate, or a float ndarray of coordinates corresponding to a single axis.\n # Although \"points\" and \"nums\" are in plural, there might be a single point or number.\n points = self.x_axis.number_to_point(coords[0])\n other_axes = self.axes.submobjects[1:]\n for axis, nums in zip(other_axes, coords[1:]):\n points += axis.number_to_point(nums) - origin\n\n # Return points as is, except if coords originally looked like\n # ([x1 x2 ...], [y1 y2 ...], [z1 z2 ...]), which is determined by the conditions below. In\n # that case, the current implementation requires that the results have to be transposed.\n if are_coordinates_transposed or points.ndim == 1:\n return points\n return points.T", "def ConvertToPosition(coor):\n\n return coor[0] * BOARD_DIMENSION + coor[1]", "def __init__(self, direction, position):\n self.direction = normalize(direction)\n self.position = np.array(position)", "def convert_coords(self):\n if self.coordsys in ['image', 'physical']:\n coords = self._convert_pix_coords()\n else:\n coords = self._convert_sky_coords()\n\n if self.region_type == 'line':\n coords = [coords[0][0], coords[0][1]]\n\n if self.region_type == 'text':\n coords.append(self.meta['text'])\n\n return coords", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def position(self, x, y):\n self.x = x \n self.y = y\n self.pos[0] = x \n self.pos[1] = y", "def get_single_location(chrom, pos):\n return CHROMOSOME_TO_CODE[chrom] * int(1e9) + pos", "def position_px4_to_gazebo(position_x, position_y, position_z):\n return (position_y, position_x, -position_z)", "def spot_coords(self,spot):\n if spot == '1':\n return (330 - 60 ,335 - 15)\n if spot == '2':\n return (419 - 60, 335 - 15)\n if spot == '3':\n return (591 - 60, 159 - 15)\n if spot == '4':\n return (588 - 60, 248 - 15)", "def getPoint(self):\n return Point(*self.position)", "def target_position(self, time):\n\n x_pos = self.r*sin(self.w*time)+self.ar_tag_pos[0]\n y_pos = self.r*cos(self.w*time)+self.ar_tag_pos[1]\n z_pos = self.ar_tag_pos[2]\n # print(x_pos,y_pos)\n # raise NotImplementedError\n return np.array([x_pos,y_pos,z_pos])", "def __convert_position(self, row_position: int = None, col_position: int = None) -> int:\n if row_position is None or col_position is None:\n return self.__row_position * len(self.__labyrinth[0]) + self.__col_position\n\n return row_position * len(self.__labyrinth[0]) + col_position", "def convert_to_pygame(pos):\n return int(pos.x), int(-pos.y+600)", "def position_to_index(self, position, grid_size):\n x, y = position\n return x * grid_size + y", "def _getitem_1d(self, pos):\n # Check if pos has multiple elements.\n if isinstance(pos, OneDimGrid):\n return self._getitem_1d(pos=pos.elements)\n elif isinstance(pos, slice):\n return self._getitem_1d(_conv_slice_to_list(slice_obj=pos, stop_def=self.master.shape[0]))\n elif isinstance(pos, np.ndarray):\n return self._getitem_1d(pos.tolist())\n elif isinstance(pos, list):\n return np.array([self._getitem_1d(p) for p in pos])\n elif pos is None:\n raise TypeError(\"_AbsToPhyConverter._getitem_1d does not accept None as its input.\")\n else:\n # pos is a single element.\n for i, e in np.ndenumerate(self.master.elements):\n if (pos - e) % self.master.width == 0:\n return int(round((pos - e) / self.master.width)) * self.master.elements.shape[0] + i[0]\n return None # no matched coordinate", "def coordinates(self):", "def convert_coords(x, y, conversion):\n if conversion == \"cartesian\" :\n # convert to cartesian plane coordinates \n x_new = x - (width/2)\n y_new = (height/2) + y \n\n elif conversion == \"pygame\":\n # only needed to place images in pygame\n x_new = x + (width/2)\n y_new = (height/2) - y\n \n return x_new, y_new", "def ion1_position(self,x,y,z):\n axes_vector = np.array([self.a,self.b,self.c])\n self.ion1 = x*self.a + y*self.b + z*self.c\n self.position['1A'] = np.dot(self.position_map[1],axes_vector) + self.ion1\n self.position['2A'] = np.dot(self.position_map[2],axes_vector) + self.ion1\n self.position['3A'] = np.dot(self.position_map[3],axes_vector) + self.ion1\n self.position['4A'] = np.dot(self.position_map[4],axes_vector) + self.ion1\n self.position['5A'] = np.dot(self.position_map[5],axes_vector) + self.ion1\n self.position['6A'] = np.dot(self.position_map[6],axes_vector) + self.ion1\n self.position['7A'] = np.dot(self.position_map[7],axes_vector) + self.ion1\n self.position['8A'] = np.dot(self.position_map[8],axes_vector) + self.ion1", "def ion1_position(self,x,y,z):\n axes_vector = np.array([self.a,self.b,self.c])\n self.ion1 = x*self.a + y*self.b + z*self.c\n self.position['1A'] = np.dot(self.position_map[1],axes_vector) + self.ion1\n self.position['2A'] = np.dot(self.position_map[2],axes_vector) + self.ion1\n self.position['3A'] = np.dot(self.position_map[3],axes_vector) + self.ion1\n self.position['4A'] = np.dot(self.position_map[4],axes_vector) + self.ion1\n self.position['5A'] = np.dot(self.position_map[5],axes_vector) + self.ion1\n self.position['6A'] = np.dot(self.position_map[6],axes_vector) + self.ion1\n self.position['7A'] = np.dot(self.position_map[7],axes_vector) + self.ion1\n self.position['8A'] = np.dot(self.position_map[8],axes_vector) + self.ion1", "def position_gazebo_to_px4(position_x, position_y, position_z):\n return (position_y, position_x, -position_z)", "def to_world(self, x, y, **kwargs):", "def ndarray_to_location(array: np.ndarray) -> carla.Location: # pylint: disable=no-member\n return carla.Location(*list(map(float, array))) # pylint: disable=no-member", "def marker_at_position(self, position):\n\n return self._slice(position, (1,1))[0][0]", "def position_trajectory(self):\n return self._read(MX_POSITION_TRAJECTORY)", "def position(self, value):\n if type(value) is not tuple or len(value) != 2 \\\n or type(value[0]) is not int or type(value[1]) is not int \\\n or value[0] < 0 or value[1] < 0:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n else:\n self.__position = value", "def set_position(self, position):\n self.position = tuple(position)", "def position_from_basis_coords(self, basis_coords: CoordT) -> PositionT:\n ids = self.id_from_basis_coords(basis_coords)\n return self.positions[ids]", "def position(self):\n return self.atoms.reshape((1,-1))", "def position(self, position):\n if type(position) is not tuple or len(position) is not 2\\\n or type(position[0]) is not int or position[0] < 0\\\n or type(position[1]) is not int or position[1] < 0:\n\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n\n self.__position = position", "def convert_pose(coordinates, cartesian=True):\n if not cartesian:\n coordinates = convert(coordinates)\n else:\n coordinates[:, 0] = normalize(\n coordinates[:, 0].reshape(1, -1), norm='max')\n coordinates[:, 1] = normalize(\n coordinates[:, 1].reshape(1, -1), norm='max')\n\n return coordinates", "def _get_plunger_position(self, position):\n try:\n value = self.positions[position]\n if isinstance(value, (int, float, complex)):\n return value\n else:\n raise RuntimeError(\n 'Plunger position \"{}\" not yet calibrated'.format(\n position))\n except KeyError:\n raise RuntimeError(\n 'Plunger position \"{}\" does not exist'.format(\n position))", "def position(self, value):\n if type(value) is not tuple or len(value) != 2 or \\\n type(value[0]) is not int or value[0] < 0 or \\\n type(value[1]) is not int or value[1] < 0:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n else:\n self.__position = value", "def maze_position(self):\n pos = self._env.observations()['DEBUG.POS.TRANS']\n x, y = self._to_maze_coord(pos[0], pos[1])\n return np.array([x, y])", "def to_pygame(point):\n return int(point.x), int(-point.y+500)", "def get_position():\n function = LegacyFunctionSpecification()\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)\n for par in [\"x\", \"y\", \"z\"]:\n function.addParameter(par, dtype='float64', unit=generic_unit_system.length, direction=function.OUT, \n description = \"The current position vector of the particle\")\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n function.must_handle_array = True\n return function", "def set_position():\n\n global character\n return character['Position']", "def readXYZPos(self,phys,xyzname):\r\n XYZReader.XYZReader(self.checkPath(xyzname)).read(phys.myXYZ)\r\n\tphys.posvec.resize(phys.myXYZ.coords.size())\r\n for ii in range(0, phys.myXYZ.coords.size()*3, 3):\r\n phys.positions[ii] = phys.myXYZ.coords[ii]\r\n phys.positions[ii+1] = phys.myXYZ.coords[ii+1]\r\n phys.positions[ii+2] = phys.myXYZ.coords[ii+2]", "def get_position(self, units=\"bohr\"):\n from numpy import array\n\n # Grab the position from the store\n if 'r' in self.store:\n pos = self.store['r']\n else:\n pos = self.store[self.sym]\n pos = array([float(x) for x in pos])\n\n # Make sure the units are correct\n if _IsAngstroem(self):\n pos /= AU_to_A\n if _IsAngstroem(units):\n pos *= AU_to_A\n\n return [float(x) for x in pos]", "def position_to_stone(self, position):\n if len(position) != 2:\n stone = -1\n return stone\n h = position[0]\n w = position[1]\n stone = h * self.width + w\n if stone not in self.blanks:\n stone = -1 # -1 means the current position is blank.\n return stone", "def position(self, array):\n self.app.position = array", "def _fixupPosition(self, position):\n if \"latitudeI\" in position:\n position[\"latitude\"] = position[\"latitudeI\"] * 1e-7\n if \"longitudeI\" in position:\n position[\"longitude\"] = position[\"longitudeI\"] * 1e-7", "def get(name):\n position = Position.objects.get_or_create(name=name.lower())\n if isinstance(position, tuple):\n position = position[0]\n position.auto_title()\n return position", "def _transform_point(self, x, y):\n return (x, y)", "def xyz_position_creator(shape, verbose = True):\n # type: (object) -> object\n if shape[0] == \"square\" or shape[0] == 'doublesquare':\n if verbose:\n print(\"\")\n print(\"Creating x- y- z-positions of a square array\")\n x_coordinates = numpy.arange(-shape[1], shape[1], shape[2])\n y_coordinates = numpy.arange(-shape[1], shape[1], shape[2])\n\n block1 = numpy.zeros((len(x_coordinates) * len(y_coordinates), 4))\n k = 0\n for i in range(len(x_coordinates)):\n for j in range(len(y_coordinates)):\n block1[k, 0] = 1001 + k\n block1[k, 1] = x_coordinates[i]\n block1[k, 2] = y_coordinates[j]\n block1[k, 3] = 0\n k += 1\n if shape[0] == 'square':\n block1[:, 1] += shape[3]\n block1[:, 2] += shape[4]\n xyz_coordinates = block1.copy()\n elif shape[0] == 'doublesquare':\n block2 = block1.copy()\n\n block2[:, 0] += 1000 + len(block1[:, 0])\n block2[:, 1] += shape[3]\n block2[:, 2] += shape[4]\n xyz_coordinates = numpy.vstack((block1, block2))\n\n elif shape[0] == 'hex' or shape[0] == 'doublehex':\n if verbose:\n print(\"\")\n print(\"Creating x- y- z-positions of a \" + shape[0] + \" array\")\n\n dx = shape[1]\n dy = dx * numpy.sqrt(3.) / 2.\n\n line1 = numpy.array([numpy.arange(4) * dx, numpy.zeros(4), numpy.zeros(4)]).transpose()\n\n # define the second line\n line2 = line1[0:3, :].copy()\n line2[:, 0] += dx / 2.\n line2[:, 1] += dy\n # define the third line\n line3 = line1[0:3].copy()\n line3[:, 1] += 2 * dy\n # define the fourth line\n line4 = line2[0:2, :].copy()\n line4[:, 1] += 2 * dy\n\n block1 = numpy.vstack((line1[1:], line2, line3, line4))\n\n block2 = numpy.vstack((line1[1:], line2, line3[1:], line4))\n block2[:, 0] *= -1\n\n block3 = numpy.vstack((line2, line3, line4))\n block3[:, 1] *= -1\n\n block4 = numpy.vstack((line2, line3[1:], line4))\n block4[:, 0] *= -1\n block4[:, 1] *= -1\n hex_block = numpy.vstack((block1, block2, block3, block4))\n\n if shape[0] == 'hex':\n hex_block[:, 0] += shape[2]\n hex_block[:, 1] += shape[3]\n antenna_numbers = numpy.arange(len(hex_block[:, 0])) + 1001\n xyz_coordinates = numpy.vstack((antenna_numbers, hex_block.T)).T\n elif shape[0] == 'doublehex':\n antenna_numbers = numpy.arange(len(hex_block[:, 0])) + 1001\n first_hex = numpy.vstack((antenna_numbers, hex_block.T)).T\n\n second_hex = first_hex.copy()\n\n first_hex[:, 1] += shape[2]\n first_hex[:, 2] += shape[3]\n\n second_hex[:, 0] += 1000 + len(first_hex[:, 0])\n second_hex[:, 1] += shape[4]\n second_hex[:, 2] += shape[5]\n xyz_coordinates = numpy.vstack((first_hex, second_hex))\n\n elif shape[0] == 'linear':\n if verbose:\n print(\"\")\n print(\"Creating x- y- z-positions of a \" + str(shape[2]) + \" element linear array\")\n xyz_coordinates = numpy.zeros((shape[2], 4))\n xyz_coordinates[:, 0] = numpy.arange(shape[2]) + 1001\n xyz_coordinates[:, 1] = numpy.linspace(-shape[1], shape[1], shape[2])\n elif shape[0] == 'file':\n xyz_coordinates = antenna_table_loader(shape[1])\n\n return xyz_coordinates", "def __get_position(self, value, state):\n coords = np.argwhere(state == value).flatten()\n return coords", "def position(self, value):\n if (not isinstance(value, tuple) or\n len(value) != 2 or\n not all(isinstance(num, int) for num in value) or\n not all(num >= 0 for num in value)):\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n else:\n self.__position = value", "def loc_from_tuple(self, coords):\n self.x, self.y = coords", "def get_pos(x, y):\r\n return normalize(x) // 2, normalize(y) // 4", "def set_position():\n function = LegacyFunctionSpecification()\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)\n for par in [\"x\", \"y\", \"z\"]:\n function.addParameter(par, dtype='float64', unit=generic_unit_system.length, direction=function.IN, \n description = \"The new position vector of the particle\")\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n function.must_handle_array = True\n return function", "def position(self):\n return pm.datatypes.Point(self.transform.getTranslation(ws=True))", "def index_from_position_tuple(self, position):\n x = self.base_values.index(position[0])\n y = self.base_values.index(position[1])\n return y * self.size + x", "def _parse_coordinate(c):\n if hasattr(c, \"center\"):\n return c.center\n elif np.array(c).size == 2:\n return c\n else:\n raise ValueError(\n \"Could not parse coordinate, input should be array-like (e.g. [1.5,2.3] or a Port\"\n )", "def __get_position(self, value):\r\n if len(self.__matrix) > 5:\r\n number = self.AminoAcids()\r\n else:\r\n number = self.Bases()\r\n\r\n if value.upper() == self.A:\r\n return number.A\r\n\r\n elif value.upper() == self.R:\r\n return number.R\r\n\r\n elif value.upper() == self.N:\r\n return number.N\r\n\r\n elif value.upper() == self.D:\r\n return number.D\r\n\r\n elif value.upper() == self.C:\r\n return number.C\r\n\r\n elif value.upper() == self.Q:\r\n return number.Q\r\n\r\n elif value.upper() == self.E:\r\n return number.E\r\n\r\n elif value.upper() == self.G:\r\n return number.G\r\n\r\n elif value.upper() == self.H:\r\n return number.H\r\n\r\n elif value.upper() == self.I:\r\n return number.I\r\n\r\n elif value.upper() == self.L:\r\n return number.L\r\n\r\n elif value.upper() == self.K:\r\n return number.K\r\n\r\n elif value.upper() == self.M:\r\n return number.M\r\n\r\n elif value.upper() == self.F:\r\n return number.F\r\n\r\n elif value.upper() == self.P:\r\n return number.P\r\n\r\n elif value.upper() == self.S:\r\n return number.S\r\n\r\n elif value.upper() == self.T:\r\n return number.T\r\n\r\n elif value.upper() == self.W:\r\n return number.W\r\n\r\n elif value.upper() == self.Y:\r\n return number.Y\r\n\r\n elif value.upper() == self.V:\r\n return number.V\r\n\r\n else:\r\n return number.Star", "def atm241(coord: numpy.array) -> numpy.array:\n ...", "def toPosition(self, pos):\n return [ord(pos[0])-ord('a'), int(pos[1])]", "def toPosition(self, pos):\n return [ord(pos[0])-ord('a'), int(pos[1])]", "def roi2point(self, msg):\n p = Vector([0,0,0])\n if self.camerainfo.width > 0:\n p.x = 0.5 - (msg.x_offset+(msg.width/2.0))/self.camerainfo.width\n if self.camerainfo.height > 0:\n p.z = 0.5 - (msg.y_offset+(msg.height/2.0))/self.camerainfo.height\n return p", "def positional_encoding(position, d_model):\r\n def get_angles(position, i, d_model):\r\n angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))\r\n return position * angle_rates # (position, d_model)\r\n\r\n angle_rads = get_angles(position=np.arange(position)[:, np.newaxis],\r\n i=np.arange(d_model)[np.newaxis, :],\r\n d_model=d_model)\r\n\r\n # Apply sin to even indices in the array\r\n angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])\r\n\r\n # Apply cos to odd indices in the array\r\n angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])\r\n\r\n pos_encoding = angle_rads[np.newaxis, ...]\r\n return tf.cast(pos_encoding, dtype=tf.float32) # (1, position, d_model)\r", "def xy_to_XYZ(xy: ArrayLike) -> NDArrayFloat:\n\n return xyY_to_XYZ(xy_to_xyY(xy))", "def change_position(board: Board, position: Position, character: str) -> Board:\n board = list(board)\n \n row = board[position[0]]\n new_row = row[:position[-1]] + character + row[position[-1] + 1:]\n board[position[0]] = new_row\n\n board = tuple(board) \n\n return board", "def _position_spherical2cartesian(pos):\n \n r=pos[:,0]\n theta=pos[:,1]\n phi=pos[:,2]\n\n if any(theta>np.pi) or any(theta<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n\n x=r*np.sin(theta)*np.cos(phi)\n y=r*np.sin(theta)*np.sin(phi)\n z=r*np.cos(theta)\n\n return np.dstack((x,y,z))[0]", "def switch_playing_direction(position_coords):\n # just mirrors the x-coordinate in place\n position_coords[:,0::2] *= -1", "def XYZ_to_xy(XYZ: ArrayLike) -> NDArrayFloat:\n\n return xyY_to_xy(XYZ_to_xyY(XYZ))", "def _getitem_1d(self, pos):\n # Check if pos has multiple elements.\n if isinstance(pos, slice):\n return self._getitem_1d(_conv_slice_to_list(slice_obj=pos, stop_def=self.master.shape[0]))\n elif isinstance(pos, np.ndarray):\n return self._getitem_1d(pos.tolist())\n elif isinstance(pos, list):\n return np.array([self._getitem_1d(p) for p in pos])\n elif pos is None:\n raise TypeError(\"_AbsToPhyConverter._getitem_1d does not accept None as its input.\")\n else:\n # pos is a single element. Compute quotient and modulo for grid extension.\n quo = 0\n mod = int(round(pos))\n if pos >= self.master.shape[0]:\n mod = int(round(pos % self.master.shape[0]))\n quo = int(round((pos-mod) / self.master.shape[0]))\n elif pos < 0:\n mod = int(round(pos % self.master.shape[0]))\n quo = int(round((pos-mod)) / self.master.shape[0])\n return quo * self.master.range[1] + self.master.elements[mod]\n # the following command cannot handle the size extension of the grid, disabled.\n # return self.master.elements.take(pos, mode='wrap')", "def test_first_pos() -> None:\n assert sw.walk_to(1) == sw.Coordinate(0, 0)", "def coords_to_positions(self, coords):\n return [self.to_position(x, y, i, j) for (x, y, i, j) in coords]", "def coordinate_point_to_coordinate(self, point):\n return self.gen(self._point_to_ray[point])", "def get_position(self, message):\n #print('**************** pos ')\n self.position = message.data\n self.state[self.ndegres:] = self.position[0:self.ndegres]", "def _pose_from_odom(self, odom): \n pose = odom.pose.pose.position\n return [pose.x, pose.y, pose.z]", "def data_xy(position) -> dict:\n\n return {\"x\": position[0], \"y\": position[1]}", "def create_position(self):\n raise NotImplementedError" ]
[ "0.6353608", "0.62130916", "0.6069126", "0.60215694", "0.6008328", "0.59653753", "0.59585685", "0.59379125", "0.59244585", "0.5815883", "0.5807676", "0.5784127", "0.57744235", "0.5750637", "0.57444346", "0.5740382", "0.5734146", "0.57155126", "0.5699236", "0.5683348", "0.5664236", "0.564457", "0.56331265", "0.56086266", "0.5553832", "0.5553832", "0.5553832", "0.5553832", "0.5553832", "0.5553832", "0.5553832", "0.5553832", "0.5553832", "0.5553832", "0.5553832", "0.55479425", "0.5546355", "0.5541547", "0.55351526", "0.5513552", "0.551054", "0.55059403", "0.5503833", "0.5491592", "0.5488986", "0.54845214", "0.5452893", "0.5445071", "0.5445071", "0.5441704", "0.5430838", "0.5427175", "0.54194224", "0.54019636", "0.53936917", "0.53871405", "0.53817445", "0.5381399", "0.53772295", "0.5374526", "0.53729427", "0.53726125", "0.53725123", "0.5366584", "0.53649765", "0.53606564", "0.53548765", "0.5347061", "0.53332317", "0.53328586", "0.5332554", "0.53290415", "0.5326907", "0.532664", "0.53191775", "0.5317937", "0.53062093", "0.52986324", "0.5296806", "0.52883667", "0.5286003", "0.5281783", "0.52759695", "0.5273254", "0.5270444", "0.5270444", "0.52607304", "0.5259274", "0.5257297", "0.52555746", "0.5252064", "0.52455723", "0.5245566", "0.5240812", "0.5238173", "0.5238076", "0.5226728", "0.52229834", "0.5222819", "0.5219418", "0.5217049" ]
0.0
-1
If TASK_USE_PATH is set rely on PATH to look for task binaries. Otherwise ../src/ is used by default.
def task_binary_location(cmd="task"): return binary_location(cmd, TASK_USE_PATH)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_task_dir(self):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tasks')", "def test_taskmod_no_taskfile(modpath):\n sys.meta_path.append(TaskImporter())\n task = import_module(modpath)\n assert modpath in sys.modules\n assert sys.modules[modpath] is task\n assert task.__taskmodules__ == []", "def get_celery_path():\n\n return get_executable_path('celery')", "def TaskRelativeName(cls, task):\n if not task: return None\n return os.path.relpath(cls.TaskNormalizedName(task),\n PipelineConfig.Instance().pipeline_base_dir())", "def task(path, **kwargs):\n\n # Get model configuration\n config = None\n if isinstance(path, (list, tuple)) and hasattr(path[0], \"config\"):\n config = path[0].config\n elif isinstance(path, str):\n config = AutoConfig.from_pretrained(path, **kwargs)\n\n # Attempt to resolve task using configuration\n task = None\n if config:\n architecture = config.architectures[0] if config.architectures else None\n if architecture:\n if any(x for x in [\"LMHead\", \"CausalLM\"] if x in architecture):\n task = \"language-generation\"\n elif \"QuestionAnswering\" in architecture:\n task = \"question-answering\"\n elif \"ConditionalGeneration\" in architecture:\n task = \"sequence-sequence\"\n\n return task", "def find_taskfile(self):\n filename = self.cmdline.file\n curdir = self.cmdline.dir\n\n if \"load\" in self.cmdline.verbose:\n self.env.errorln(\"Taskrun search directory: {0}\".format(curdir))\n self.env.errorln(\"Taskrun search filename: {0}\".format(filename))\n self.env.errorln(\"Taskrun walk path: {0}\".format(str(self.cmdline.walk)))\n\n self.taskfile = None\n while True:\n taskfile = os.path.join(curdir, filename)\n if os.path.isfile(taskfile):\n if \"load\" in self.cmdline.verbose:\n self.env.errorln(\"Task file found: {0}\".format(taskfile))\n self.taskfile = taskfile\n return\n\n if not self.cmdline.walk:\n return\n\n (head, _) = os.path.split(curdir)\n if head and head != curdir:\n curdir = head\n else:\n break", "def test_taskmod_taskfiles_only(monkeypatch, modpath):\n\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfiles = ['a_{}.py'.format(i) for i in range(10)]\n names = [splitext(n)[0] for n in taskfiles]\n pypath = ['{}.{}'.format(modpath, n) for n in names]\n\n sys.meta_path.append(TaskImporter(*taskfiles))\n task = import_module(modpath)\n\n assert modpath in sys.modules\n assert sys.modules[modpath] is task\n assert task.__taskmodules__ == pypath\n for n in names:\n assert hasattr(task, n)\n assert getattr(task, n).TEST == '{}.{}'.format(modpath, n)", "def get_python():\n return path.join(TaskCreator.bin_dir, \"python\")", "def prepare_taskfile(taskfile):\n path = os.path.dirname(taskfile)\n taskmodulename = os.path.splitext(os.path.basename(taskfile))[0]\n logging.info(\"Loading task file %s from %s\", taskmodulename, path)\n fp, pathname, description = imp.find_module(taskmodulename, [path])\n try:\n return imp.load_module(taskmodulename, fp, pathname, description)\n finally:\n if fp: \n fp.close()", "def test_findtasks_none(monkeypatch, modpath):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n # monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n # FakeModuleWithTasks)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfile = 'a_0.py'\n\n sys.meta_path.append(TaskImporter(taskfile))\n taskmod = import_module(modpath)\n\n assert hasattr(taskmod, '__tasks__')\n assert taskmod.__tasks__ == []", "def discover_tasks(app):\n\n task_arguments.add_argument(\n \"preload-defaults-from-site\",\n type=str,\n required=False,\n default=\"\",\n choices=preload_defaults_from_site_choices,\n help=\"Select site within environment to load defaults from, argument format is <environment_name>/<site_name>\",\n )\n\n for tasks_base_dir in app.config[\"JINJAMATOR_TASKS_BASE_DIRECTORIES\"]:\n for file_ext in [\"py\", \"j2\"]:\n for tasklet_dir in glob.glob(\n os.path.join(tasks_base_dir, \"**\", f\"*.{file_ext}\"), recursive=True\n ):\n task_dir = os.path.dirname(tasklet_dir)\n append = True\n for dir_chunk in task_dir.replace(tasks_base_dir, \"\").split(\n os.path.sep\n ): # filter out hidden directories\n if dir_chunk.startswith(\".\") or dir_chunk in [\"__pycache__\"]:\n append = False\n break\n\n dir_name = task_dir.replace(tasks_base_dir, \"\")[1:]\n if append and dir_name not in available_tasks_by_path:\n\n task_id = xxhash.xxh64(task_dir).hexdigest()\n\n task_info = {\n \"id\": task_id,\n \"path\": dir_name,\n \"base_dir\": tasks_base_dir,\n \"description\": get_section_from_task_doc(task_dir)\n or \"no description\",\n }\n available_tasks_by_path[dir_name] = task_info\n try:\n task = JinjamatorTask()\n log.debug(app.config[\"JINJAMATOR_FULL_CONFIGURATION\"])\n task._configuration.merge_dict(\n app.config[\"JINJAMATOR_FULL_CONFIGURATION\"]\n )\n\n task.load(\n os.path.join(task_info[\"base_dir\"], task_info[\"path\"])\n )\n with app.app_context():\n data = json.loads(\n jsonify(\n task.get_jsonform_schema()[\"schema\"]\n ).data.decode(\"utf-8\")\n )\n task_models[task_info[\"path\"]] = api.schema_model(task_id, data)\n del task\n\n log.info(f\"registered model for task {task_dir}\")\n\n dynamic_role_name = f\"task_{dir_name}\"\n new_role = JinjamatorRole(name=dynamic_role_name)\n\n with app.app_context():\n db.session.add(new_role)\n try:\n db.session.commit()\n except Exception:\n pass\n\n @ns.route(f\"/{task_info['path']}\", endpoint=task_info[\"path\"])\n class APIJinjamatorTask(Resource):\n @api.doc(\n f\"get_task_{task_info['path'].replace(os.path.sep,'_')}_schema\"\n )\n @api.expect(task_arguments)\n @api.doc(\n params={\n \"Authorization\": {\n \"in\": \"header\",\n \"description\": \"A valid access token\",\n }\n }\n )\n @require_role(\n role=or_(\n User.roles.any(\n JinjamatorRole.name == dynamic_role_name\n ),\n User.roles.any(JinjamatorRole.name == \"tasks_all\"),\n )\n )\n def get(self):\n \"\"\"\n Returns the json-schema or the whole alpacajs configuration data for the task\n \"\"\"\n\n args = task_arguments.parse_args(request)\n schema_type = args.get(\"schema-type\", \"full\")\n try:\n preload_data = json.loads(\n args.get(\"preload-data\", \"{}\")\n )\n except TypeError:\n preload_data = {}\n preload_data = remove_redacted(preload_data)[1]\n environment_site = args.get(\n \"preload-defaults-from-site\"\n )\n relative_task_path = request.endpoint.replace(\n \"api.\", \"\"\n )\n inner_task = JinjamatorTask()\n\n inner_task._configuration.merge_dict(\n app.config[\"JINJAMATOR_FULL_CONFIGURATION\"]\n )\n inner_task.configuration.merge_dict(preload_data)\n\n inner_task.load(relative_task_path)\n\n if environment_site not in [None, \"None\", \"\"]:\n inner_task._configuration[\n \"jinjamator_site_path\"\n ] = site_path_by_name.get(environment_site)\n inner_task._configuration[\n \"jinjamator_site_name\"\n ] = environment_site\n env_name, site_name = environment_site.split(\"/\")\n roles = [\n role[\"name\"]\n for role in g._user.get(\"roles\", [])\n ]\n if (\n f\"environment_{env_name}|site_{site_name}\"\n in roles\n or f\"environments_all\" in roles\n or f\"administrator\" in roles\n ):\n inner_task.configuration.merge_yaml(\n \"{}/defaults.yaml\".format(\n site_path_by_name.get(environment_site)\n )\n )\n else:\n abort(\n 403,\n f\"User neither has no role environment_{env_name}|site_{site_name} nor environments_all nor administrator. Access denied.\",\n )\n\n full_schema = inner_task.get_jsonform_schema()\n\n if schema_type in [\"\", \"full\"]:\n response = jsonify(full_schema)\n elif schema_type in [\"schema\"]:\n response = jsonify(full_schema.get(\"schema\", {}))\n elif schema_type in [\"data\"]:\n response = jsonify(full_schema.get(\"data\", {}))\n elif schema_type in [\"options\"]:\n response = jsonify(full_schema.get(\"options\", {}))\n elif schema_type in [\"view\"]:\n response = jsonify(full_schema.get(\"view\", {}))\n del inner_task\n return response\n\n @api.doc(\n f\"create_task_instance_for_{task_info['path'].replace(os.path.sep,'_')}\"\n )\n @api.expect(task_models[task_info[\"path\"]], validate=False)\n @api.doc(\n params={\n \"Authorization\": {\n \"in\": \"header\",\n \"description\": \"A valid access token\",\n }\n }\n )\n @require_role(\n role=or_(\n User.roles.any(\n JinjamatorRole.name == dynamic_role_name\n ),\n User.roles.any(JinjamatorRole.name == \"tasks_all\"),\n )\n )\n def post(self):\n \"\"\"\n Creates an instance of the task and returns the job_id\n \"\"\"\n\n from jinjamator.task.celery import run_jinjamator_task\n from jinjamator.daemon.database import db\n\n relative_task_path = request.endpoint.replace(\n \"api.\", \"\"\n )\n data = request.get_json()\n job_id = str(uuid.uuid4())\n user_id = g._user[\"id\"]\n\n job = run_jinjamator_task.apply_async(\n [\n relative_task_path,\n data,\n data.get(\"output_plugin\", \"console\"),\n user_id,\n ],\n task_id=job_id,\n created_by_user_id=user_id,\n )\n\n db_job = list(\n db.session.query(DB_Job).filter(\n DB_Job.task_id == job.id\n )\n )\n db_job = db_job and db_job[0]\n if not db_job:\n db_job = DB_Job(job.id)\n db_job.status = \"SCHEDULED\"\n db_job.configuration = data\n db_job.jinjamator_task = relative_task_path\n db_job.created_by_user_id = user_id\n db.session.add(db_job)\n db.session.flush()\n db.session.commit()\n\n return jsonify({\"job_id\": job.id})\n\n if task_info[\"description\"]:\n post.__doc__ += task_info[\"description\"]\n get.__doc__ += task_info[\"description\"]\n\n except Exception as e:\n import traceback\n\n log.error(\n f\"unable to register {task_dir}: {e} {traceback.format_exc()}\"\n )", "def app_tasks(name, path):\n @task(pre=reset_project.pre, name=\"reset_project\")\n def _reset_project(ctx):\n reset_project(ctx, path)\n\n _reset_project.__doc__ = \"Reset Mynewt project files for {}\".format(name)\n\n @task(pre=install_project.pre, name=\"install_project\")\n def _install_project(ctx):\n install_project(ctx, path)\n\n _install_project.__doc__ = \"Install Mynewt project dependencies for {}\".format(name)\n\n @task(pre=build.pre, name=\"build\")\n def _build(ctx, export_path=None, board=None):\n build(ctx, name, path, export_path, board)\n\n _build.__doc__ = \"Build {} for Pylon\".format(name)\n\n @task(pre=run.pre, name=\"run\")\n def _run(ctx, sn=None, board=None): # pylint: disable=C0103\n run(ctx, name, path, sn, board)\n\n _run.__doc__ = \"Flash and run {} on Pylon\".format(name)\n\n @task(pre=debug.pre, name=\"debug\")\n def _debug(ctx, sn=None, port=None, board=None): # pylint: disable=C0103\n debug(ctx, name, path, sn, port, board)\n\n _debug.__doc__ = \"Debug {} on Pylon\".format(name)\n\n return _install_project, _reset_project, _build, _run, _debug", "def default_tasks():\n tasks = {'run': run, 'bash': bash}\n for entry_point in pkg_resources.iter_entry_points('jarbas_task'):\n tasks[entry_point.name] = entry_point.load()\n return tasks", "def test_relative_paths(self):\n command_line = self._MENU + [\n \"some_pool\",\n \"../dev\",\n \"./fake\",\n \"/abc\",\n ]\n TEST_RUNNER(command_line)", "def modpath():\n return 'loadlimit.task'", "def _load_defined_tasks():\n task_path = Path(__file__).parent.resolve() / \"nalu_tasks\"\n py_files = glob.glob(str(task_path / \"[a-z]*.py\"))\n modset = {Path(ff).stem for ff in py_files}\n for pymod in modset:\n importlib.import_module(\".%s\"%pymod, 'exawind.nalu.nalu_tasks')", "def task(ctx, config):\n pass", "def generate_tasks(self, task):", "def test_findtasks_found(monkeypatch, modpath):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeModuleWithTasks)\n\n taskfile = 'a_0.py'\n\n sys.meta_path.append(TaskImporter(taskfile))\n taskmod = import_module(modpath)\n\n assert len(taskmod.__tasks__) == 1\n task = taskmod.__tasks__[0]\n assert task.__name__ == 'TestTask'\n assert isinstance(task, type)\n assert issubclass(task, TaskABC)", "def GetTaskOutputRelativeDir(cls, task):\n task = os.path.dirname(cls.TaskRelativeName(task))\n if not task: return ''\n\n parts = task.split(os.sep)\n res_parts = []\n for part in parts:\n priority_name = part.split('_', 1)\n res_parts += [priority_name[1]]\n return os.sep.join(res_parts)", "def test_taskfile_import(monkeypatch, modpath):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfiles = ['a_{}.py'.format(i) for i in range(10)]\n names = [splitext(n)[0] for n in taskfiles]\n pypath = ['{}.{}'.format(modpath, n) for n in names]\n randpath = choice(pypath)\n\n assert modpath not in sys.modules\n assert all(not p.startswith(modpath) for p in sys.modules)\n\n sys.meta_path.append(TaskImporter(*taskfiles))\n taskfile = import_module(randpath)\n\n expected = set(pypath) | set([modpath])\n result = set(p for p in sys.modules if p.startswith(modpath))\n\n assert modpath in sys.modules\n assert result == expected\n assert taskfile.TEST == randpath", "def set_executable_options(self, task):\n pass", "def addTask(self, task):\n if isinstance(task, ShREEKTask):\n self._ShREEKConfig.addTask(task)\n return \n if type(task) == type(\"string\"):\n dirname = os.path.dirname(task)\n exename = os.path.basename(task)\n taskObject = ShREEKTask(Directory = dirname,\n Executable = exename)\n \n self._ShREEKConfig.addTask(taskObject)\n return \n \n msg = \"Unknown Task type added to ShREEKInterface\\n\"\n msg += \"\\t%s\\n\" % task\n msg += \"Argument must be a ShREEKTask Object or a path to\\n\"\n msg += \"an executable script\\n\"\n raise ShREEKException(msg, ClassInstance = self,\n BadObject = task)", "def ns_foreach_task_subdir(c):\n from slugify import slugify\n from metapack_build.tasks.package import make_ns\n\n for d in _build_order(c):\n print(\"⏩ \", d)\n incl_path = d.joinpath('tasks.py')\n\n if not incl_path.exists():\n continue\n\n module_name = f'tasks.{slugify(d.name)}'\n\n make_ns() # Reset the package namespace\n\n spec = importlib.util.spec_from_file_location(module_name, incl_path)\n sp_tasks = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(sp_tasks)\n\n curdir = os.getcwd()\n\n os.chdir(d)\n\n try:\n yield sp_tasks.ns\n except AttributeError as e:\n if module_name not in str(e):\n raise\n finally:\n os.chdir(curdir)", "def TaskDirName(cls, task):\n if not task: return None\n return os.path.dirname(task)", "def task_4_2_1():\n # TODO Task 4.2.1: Your code goes here\n pass", "def get_pytest():\n return path.join(TaskCreator.bin_dir, \"py.test\")", "def test_py_task_config(exopy_qtbot, task_workbench):\n plugin = task_workbench.get_plugin('exopy.tasks')\n\n root = RootTask()\n config = PyTaskConfig(manager=plugin,\n task_class=plugin.get_task('exopy.ComplexTask'),\n future_parent=root)\n\n assert config.task_name\n assert config.ready\n assert config.task_doc\n\n config.task_name = ''\n assert not config.ready\n\n config.task_name = 'Test'\n assert config.ready\n task = config.build_task()\n assert task.name == 'Test'\n\n root.add_child_task(0, task)\n config2 = PyTaskConfig(manager=plugin,\n task_class=plugin.get_task('exopy.ComplexTask'),\n future_parent=root)\n\n config2.task_name = 'Test'\n assert not config2.ready\n\n config2.task_name = 'ADifferentName'\n assert config2.ready\n\n plugin.auto_task_names = []\n config = PyTaskConfig(manager=plugin,\n task_class=plugin.get_task('exopy.ComplexTask'),\n future_parent=root)\n\n assert not config.task_name\n assert not config.ready\n\n show_and_close_widget(exopy_qtbot, PyConfigView(config=config))\n show_and_close_widget(exopy_qtbot, PyConfigView(config=config, loop=True))", "def task(*args, **kwargs):\n print(f\"task declared, args: {args}, kwargs:{kwargs}\")\n return FalseCeleryApp", "def task_4_3_2():\n # TODO Task 4.3.2: Your code goes here\n pass", "def task():\n pass", "def task():\n pass", "def __init__(self, *args, **kwargs):\n super(PythonTaskWrapper, self).__init__(*args, **kwargs)\n\n self.setOption(\n 'executableName',\n self.__pythonExecutable\n )", "def task_4_3_1():\n # TODO Task 4.3.1: Your code goes here\n pass", "def __init__(self, *args, **kwargs):\n super(PythonTaskWrapper, self).__init__(*args, **kwargs)\n\n self.setOption(\n 'executableName',\n os.environ.get(\n 'KOMBI_PYTHON2_EXECUTABLE',\n 'python2'\n )\n )", "def _run_system(task):\n\n cmd = task.task.format(*task.get_args(), **task.get_kwargs())\n\n print(\"Running: {}\".format(cmd))\n os.system(cmd)", "def add_task_to_task():\n # get task label from user\n responses = accept_inputs([\"Task label\"])\n child_label = responses[\"Task label\"]\n # check for existence of task\n results = query_with_results(\"select * from task where label = ?\", [child_label])\n if len(results) == 0:\n print(\"No task found with label '%s' that we could use.\" % child_label)\n return\n # get task label from user\n responses = accept_inputs([\"New parent task label\"])\n parent_label = responses[\"New parent task label\"]\n # check for existence of task\n results = query_with_results(\"select * from task where label = ?\", [parent_label])\n if len(results) == 0:\n print(\"No task found with label '%s' that we could use.\" % parent_label)\n return\n # update the task to remove the parent\n query_no_results(\"update task set parent = ? where label = ?\", [parent_label, child_label])\n print(\"Set parent of task with label '%s' to task with label '%s'.\" % (child_label, parent_label))", "def task(self):\n return import_path_to_callable(self.func)", "def requireTask(self, name):\n t = self.getTask(name)\n if t is None:\n raise Exception(\"Task %s not found in service\" % name)\n return t", "def update_taskset(\n source,\n target,\n config,\n force=True,\n):\n\n source = Path(source)\n target = Path(target)\n\n managed_files = (\n '__init__.py',\n 'sysconfig.py',\n 'modules',\n )\n\n if (any([osp.exists(target / f)\n for f in managed_files])\n and not force):\n\n raise OSError(\"Project taskset file exists, not overwriting\")\n\n elif force:\n for f in managed_files:\n f = target / f\n if osp.isdir(f):\n print(f\"Cleaning {f}\")\n shutil.rmtree(f)\n elif osp.isfile(f):\n print(f\"Cleaning {f}\")\n os.remove(f)\n\n # then get modules we need and replace the ones in this project\n # with them\n print(\"Updating tasks/sysconfig.py\")\n shutil.copyfile(\n source / \"sysconfig.py\",\n target / \"sysconfig.py\"\n )\n\n print(\"Updating tasks/__init__.py\")\n shutil.copyfile(\n source / \"__init__.py\",\n target / \"__init__.py\"\n )\n\n print(\"Updating tasks/modules\")\n shutil.copytree(\n source / \"modules\",\n target / \"modules\",\n )", "def setup_task(self, *args, **kwargs):\n pass", "def _run_single_task(task_id, run_task, task_info, sys_params):\n import numpy, random\n import traceback, tempfile\n if 'seed' in task_info:\n numpy.random.seed(int(task_info['seed']))\n random.seed(int(task_info['seed']))\n\n tempdir_original = sys_params['tempdir_original']\n tempdir_task = sys_params['tempdir_task']\n PYTHONCOMPILED_original = sys_params['PYTHONCOMPILED_original']\n PYTHONCOMPILED_subproc = sys_params['PYTHONCOMPILED_subproc']\n\n cwd = os.getcwd()\n\n exception_info = ''\n result = {}\n try:\n # Change the temp directories used to build the weave stuff.\n # Without this the build will fail, due to weave_imp.o being accessed\n # by multiple processes.\n # if os.path.exists(tempdir_task):\n # shutil.rmtree(tempdir_task)\n # os.mkdir(tempdir_task)\n assert os.path.exists(tempdir_task), f\"The temp dir {tempdir_task} does not exist for task {task_id}\"\n tempfile.tempdir = tempdir_task\n os.environ['PYTHONCOMPILED'] = PYTHONCOMPILED_subproc\n except:\n traceback.print_exc()\n exception_info += f'\\nEXCEPTION SETTING TEMPDIRS: {traceback.format_exc()}'\n\n t_start = time.time()\n try:\n result = run_task(task_info, sys_params['taskdir'], sys_params['tempdir_task'])\n except:\n traceback.print_exc()\n exception_info += f'\\nEXCEPTION IN RUN: {traceback.format_exc()}'\n status = 'error'\n else:\n status = 'finished'\n t_end = time.time()\n tasktime = t_end - t_start\n\n os.chdir(cwd) # Make sure we restore the original CWD\n\n try:\n if PYTHONCOMPILED_original:\n os.environ['PYTHONCOMPILED'] = PYTHONCOMPILED_original\n elif 'PYTHONCOMPILED' in os.environ:\n del os.environ['PYTHONCOMPILED']\n except:\n traceback.print_exc()\n exception_info += f'\\nEXCEPTION RESETTING PYTHONCOMPILED: {traceback.format_exc()}'\n\n try:\n tempfile.tempdir = tempdir_original\n # Do not delete tmp directory, it will be deleted after entire job is done\n # shutil.rmtree(tempdir_task)\n except:\n traceback.print_exc()\n exception_info += f'\\nEXCEPTION RESETTING TEMPDIR: {traceback.format_exc()}'\n\n if exception_info != '':\n result.setdefault('log_file', {'exc_info': ''})['exc_info'] += exception_info\n result = SimulationResult(task_id, result, status, tasktime)\n # try:\n # result_q.put(simresult, block=True)\n # except IOError:\n # traceback.print_exc()\n # simresult.status = 'error_ipc'\n # log_file = simresult.finaldata.setdefault('log_file', {})\n # exc_info = log_file.setdefault('exc_info', '')\n # exc_info += traceback.format_exc()\n # result_q.put(simresult, block=True)\n result_file_path = sys_params['result_file_path']\n\n with open_data_file(result_file_path) as f:\n f.store_data_root(result.to_dict())\n\n # with open(result_file_path, 'wb') as f:\n # # cPickle.dump(result, f, cPickle.HIGHEST_PROTOCOL)\n # p = pickle.Pickler(f, protocol=pickle.HIGHEST_PROTOCOL)\n # p.dump(result)\n return # Don't return anything", "def test_taskfile_taskmod_loaded(monkeypatch, modpath):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfiles = ['a_{}.py'.format(i) for i in range(10)]\n names = [splitext(n)[0] for n in taskfiles]\n pypath = ['{}.{}'.format(modpath, n) for n in names]\n randpath = choice(pypath)\n\n sys.meta_path.append(TaskImporter(*taskfiles))\n import_module(modpath)\n\n # Forcibly remove the generated taskfile\n sys.modules.pop(randpath)\n\n import_module(randpath)", "def create_task():", "def task_4_3_3():\n # TODO Task 4.3.3: Your code goes here\n pass", "def generate_curriculum(self,target_task, sourceFolder,workFolder):\n self.target_task = target_task\n self.usedTask = False", "def get_task(self, code: str) -> \"Task\": # noqa: F821\n if code not in self.tasks:\n raise PyDSTaskNoFoundException(\n \"Task with code %s can not found in process definition %\",\n (code, self.name),\n )\n return self.tasks[code]", "def __init__(self, *args, **kwargs):\n super(PythonTaskWrapper, self).__init__(*args, **kwargs)\n\n self.setOption(\n 'executableName',\n os.environ.get(\n 'KOMBI_PYTHON3_EXECUTABLE',\n 'python3'\n )\n )", "def get_kernel_path():\n path = \"/\".join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])\n return path+'/src/'", "def run(self, task):\n\n self._setup()\n\n runnable = load_from_module(task.task)\n runnable(*task.get_args(), **task.get_kwargs())", "def assert_task_args(cls, args: \"DictConfig\", shared_state: \"SharedTaskState\") -> None:\n assert isinstance(\n shared_state, SharedStaticTaskState\n ), \"Cannot assert args on a non-static state\"\n super().assert_task_args(args, shared_state)\n\n found_task_source = args.blueprint.task_source\n assert (\n found_task_source is not None\n ), \"Must provide a path to a javascript bundle in `task_source`\"\n\n found_task_path = os.path.expanduser(found_task_source)\n assert os.path.exists(\n found_task_path\n ), f\"Provided task source {found_task_path} does not exist.\"\n\n link_task_source = args.blueprint.link_task_source\n current_architect = args.architect._architect_type\n allowed_architects = [\"local\"]\n assert link_task_source == False or (\n link_task_source == True and current_architect in allowed_architects\n ), f\"`link_task_source={link_task_source}` is not compatible with architect type: {args.architect._architect_type}. Please check your task configuration.\"\n\n if link_task_source == False and current_architect in allowed_architects:\n logger.info(\n \"If you want your server to update on reload whenever you make changes to your webapp, then make sure to set \\n\\nlink_task_source: [blue]true[/blue]\\n\\nin your task's hydra configuration and run \\n\\n[purple]cd[/purple] webapp [red]&&[/red] [green]npm[/green] run dev:watch\\n\\nin a separate terminal window. For more information check out:\\nhttps://mephisto.ai/docs/guides/tutorials/custom_react/#12-launching-the-task\\n\",\n extra={\"markup\": True},\n )", "def task_build(argv):\n pytaskmaster.generator(\"setup.py.in\", \"setup.py\", config)\n pytaskmaster.generator(\"pytaskmaster/version.py.in\", \"pytaskmaster/version.py\", config)\n shell(\"python setup.py bdist_wheel\")\n if \"--sign\" in argv:\n for file in os.listdir(\"dist\"):\n asc_file = \"dist/\" + file + \".asc\"\n if file.endswith(\".whl\") and not os.path.isfile(asc_file):\n shell(\"gpg --detach-sign -a dist/{}\".format(file))", "def task():", "def task_test(argv):\n run_tests(\"python2\", argv)\n run_tests(\"python3\", argv)", "def task_prepare_build():\n\n import sys\n\n python_path = sys.executable.split(os.sep)\n venv_path = str(Path(os.sep.join(python_path[:-2])))\n\n def get_dst_path():\n import platform\n\n print(f\"Going on with {venv_path} as the virtual environment exclusively used for using pyinstaller.\")\n arch = platform.system()\n if arch == \"Windows\":\n return Path(venv_path) / \"Lib/site-packages/mad_gui/qt_designer/build/\"\n if arch in [\"Linux\", \"Darwin\"]:\n python_dirs = os.listdir(Path(venv_path) / \"lib/\")\n warnings.warn(\n f\"dodo.py: Assuming your python 3.7 installation is in {Path(venv_path)}/lib/{python_dirs[0]}\"\n )\n return Path(venv_path) / \"lib\" / python_dirs[0] / \"site-packages/mad_gui/qt_designer/build/\"\n raise ValueError(\"What operating system is this?!\")\n\n def set_up_paths():\n if not os.path.exists(get_dst_path().parent):\n raise FileNotFoundError(\n \"Apparently mad_gui is not installed in this environemnt. Use `pip install . ` to do so.\"\n )\n dst_path = get_dst_path()\n os.makedirs(dst_path, exist_ok=True)\n\n def convert_ui_to_py():\n dst_path = get_dst_path()\n ui_files = [file for file in os.listdir(dst_path.parent) if \".ui\" in file]\n print(\"\\n\")\n for file in ui_files:\n print(f\"Converting from: {dst_path.parent}{os.sep}{file}\")\n print(f\"To: {dst_path}{os.sep}{file.split('.')[0]}.py\\n\")\n os.popen(f\"pyside2-uic -o {dst_path}{os.sep}{file.split('.')[0]}.py {dst_path.parent}{os.sep}{file}\")\n\n print(\n \"Info: These conversion should take place in the virutal environment you are going to use with \"\n \"pyinstaller.\"\n )\n\n return {\n \"actions\": [set_up_paths, convert_ui_to_py],\n \"verbosity\": 2,\n }", "def binary_location(cmd, USE_PATH=False):\n if USE_PATH:\n return cmd\n else:\n return os.path.join(BIN_PREFIX, cmd)", "def task2_3():", "def task():\n\n\tprint('Example task executed.')", "def activateLocalFastPath() -> None:\n global _FAST_PATH, _FAST_PATH_IS_TEMPORARY, APP_DATA\n\n # Try to fix pathing issues in Windows.\n if os.name == \"nt\":\n APP_DATA = APP_DATA.replace(\"/\", \"\\\\\")\n\n _FAST_PATH = os.path.join(\n APP_DATA,\n \"{}{}-{}\".format(\n MPI_RANK,\n os.environ.get(\"PYTEST_XDIST_WORKER\", \"\"), # for parallel unit testing,\n datetime.datetime.now().strftime(\"%Y%m%d%H%M%S%f\"),\n ),\n )\n\n _FAST_PATH_IS_TEMPORARY = True", "def test_not_github(self):\n project_src_path = 'project-src'\n os.environ['PROJECT_SRC_PATH'] = project_src_path\n generic_ci_env = platform_config.BasePlatformConfig()\n self.assertEqual(generic_ci_env.project_src_path, project_src_path)", "def run_task(self) -> Task:", "def task(func=None, *args, **kwargs):\n if not func:\n return partial(task, *args, **kwargs)\n\n try:\n name = kwargs.pop('name').lower()\n except KeyError:\n name = func.__name__.lower()\n\n # Extract docs for the given func\n help = inspect.getdoc(func)\n add_task(name, func, help, kwargs.get('default'))\n\n # If task has args store it in TASK_WITH_ARGS\n # Todo: Move this logic also to `click`\n\n if is_args_in_task(func):\n TASKS_WITH_ARGS.add(name)\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n return wrapper", "def load_org_task_file():\n nodelist = orgnode.makelist(TASKS_ORG_FILE)\n return nodelist", "def load_task_specific_eval(task):\n if task == 'vrp':\n from evaluation.eval_VRP import eval_google_or,eval_Clarke_Wright\n\n return [(eval_google_or.EvalGoogleOR,'or_tools'), (eval_Clarke_Wright.EvalClarkeWright,'Clarke_Wright')]\n\n elif task == 'vrptw':\n from evaluation.eval_VRPTW import eval_tw_google_or,eval_I1_heuristics\n\n return [(eval_tw_google_or.EvalTWGoogleOR,'or_tools_tw'),(eval_I1_heuristics.EvalI1Heuristics,'I1_heuristic')]\n\n else:\n raise Exception('Task is not implemented')", "def __init__(self, includes=None, excludes=None):\n super(PackageTask, self).__init__()\n if includes is None:\n self.includes = [\n '*.py',\n 'base.txt',\n '*.xml'\n ]\n else:\n self.includes = includes\n if excludes is None:\n self.excludes = [\n 'package/**/*',\n 'tests/**/*',\n ]\n else:\n self.excludes = excludes", "def linkpath(srcdir, pkg):\n home = os.getenv('HOME')\n if srcdir:\n rval = '{}/{}'.format(srcdir, pkg)\n else:\n rval = '{}/bin/{}'.format(home, pkg)\n return rval", "def tasks():", "def extract_autotvm_tasks(mod, target):\n return tvm.autotvm.task.extract_from_program(mod, target=target, params=None)", "def get_pip():\n return path.join(TaskCreator.bin_dir, \"pip\")", "def task_run(taskname,mynodes):\n print \"FULLRUN\"\n task = task_self()\n print \"Booting task: \" , taskname\n \n # first initiate environment to run our python+java\n os.chdir(CASSANDRA_HOME)\n \n #FIXME: set init_environment to actually work\n #task.shell(\"cluster_config/init_environment.sh\",nodes=mynodes)\n cmdenv = \"export PYTHONHOME=/opt/python2.7.2; \\\n export JAVA_HOME=/opt/jdk1.6.0_27; \\\n export PYTHONPATH=/opt/python2.7.2/lib; \\\n export \\\n PATH=/opt/python2.7.2/lib:/opt/python2.7.2/bin:/opt/jdk1.6.0_27/bin:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin;\"\n \n\n \n task.run(cmdenv+taskname,nodes=mynodes)\n print \":\\n\".join([\"%s=%s\" % (i,j) for j,i in task.iter_buffers()])", "def task_gen(self):\n pass", "def task_stagnant(task):", "def test_remote_sys_path(pytester: pytest.Pytester) -> None:\n pytester.makepyfile(\n \"\"\"\n import sys\n\n def test_sys_path():\n assert \"\" not in sys.path\n \"\"\"\n )\n result = pytester.runpytest(\"-n1\")\n assert result.ret == 0", "def get_path_to_pyflow() -> Path:\n return Path(os.environ[\"PYFLOW\"])", "def _get_classpath(self, executor, workunit_factory):\r\n if not self._classpath:\r\n self._classpath = self._bootstrap_ivy_classpath(executor, workunit_factory)\r\n return self._classpath", "def test_get_task_output(self):\n pass", "def get_exec_path(self):\n bin_name = 'test_hint_time'\n # Look for in place build\n script_dir = os.path.dirname(os.path.realpath(__file__))\n bin_path = os.path.join(script_dir, '.libs', bin_name)\n if not os.path.exists(bin_path):\n # Look for out of place build from using apps/build_func.sh\n int_dir = os.path.dirname(script_dir)\n bin_path_op = os.path.join(int_dir, 'build/integration/test/.libs', bin_name)\n if not os.path.exists(bin_path_op):\n msg = 'Could not find application binary, tried \\n \"{}\"\\n \"{}\"'.format(\n bin_path, bin_path_op)\n raise RuntimeError(msg)\n bin_path = bin_path_op\n return bin_path", "def task_test():\n return {\n 'actions': ['py.test tests/'],\n }", "def _get_task(self, task):\n try:\n return TASKS[task]\n except KeyError:\n raise ValueError(\"task %s \"\n \"is not supported. \" % task)", "def testnodes_path() -> str:\n return os.path.abspath(\n os.path.join(os.path.dirname(__file__), \"..\", \"test\", \"testnodes\")\n )", "def get_task(option_set):\n return option_set & TASK_MASK", "def binary_location(cmd, USE_PATH=False):\n return os.path.join(BIN_PREFIX, cmd)", "def _processCmdLine(ctx, taskParams):\n\n startNode = ctx.getStartDirNode(taskParams['$startdir'])\n\n bconf = ctx.getbconf(startNode)\n btypeDir = bconf.selectedBuildTypeDir\n startdir = bconf.startdir\n cmdArgs = taskParams['run']\n\n # By default 'shell' is True to get rid of some problems with Waf and Windows\n shell = cmdArgs.get('shell', True)\n\n cmdline = cmdArgs.get('cmd', '').strip()\n if not cmdline and taskParams['$runnable']:\n cmdline = taskParams['$real.target']\n\n if not cmdline:\n return cmdline, shell\n\n extraArgs = cmdArgs.get('extra-args')\n if extraArgs:\n cmdline = '%s %s' % (cmdline, ' '.join(extraArgs))\n\n shell = cmdHasShellSymbols(cmdline) if not shell else shell\n cmdSplitted = _splitCmdLine(cmdline, taskParams['name'], bconf.path)\n if not shell:\n # Waf cannot work correctly with paths with whitespaces when\n # 'shell' is False.\n # TODO: try to make solution for 'shell' == False\n if any(' ' in s for s in cmdSplitted):\n shell = True\n\n paths = [cmdArgs['cwd'], startdir, btypeDir]\n paths.extend(os.environ.get('PATH', '').split(os.pathsep))\n fkw = {\n 'path_list' : paths, 'quiet' : True,\n 'exts' : EXE_FILE_EXTS, 'mandatory' : False\n }\n\n partsCount = len(cmdSplitted)\n launcher = cmdSplitted[0]\n cmdExt = os.path.splitext(launcher)[1]\n if partsCount == 1 and cmdExt:\n find = lambda x: ctx.find_program(x, **fkw)\n result = _makeCmdForScript(cmdline, cmdExt, find)\n if result:\n cmdline = result\n\n elif partsCount > 1 and not shell and not _RE_STARTS_WITH_SUBST.match(launcher):\n # Waf raises exception in verbose mode with 'shell' == False if it\n # cannot find full path to executable and on windows cmdline\n # like 'python file.py' doesn't work.\n # So here is the attempt to find full path for such a case.\n result = ctx.find_program(launcher, **fkw)\n if result:\n launcher = result[0]\n cmdSplitted[0] = launcher\n cmdSplitted = [ x.replace(r'\"', r'\\\"') for x in cmdSplitted]\n cmdline = ' '.join('\"%s\"' % s if ' ' in s else s for s in cmdSplitted)\n\n return cmdline, shell", "def calltask(self, name, **vars):\n if name in self._tasks:\n for entry in self._tasks[name]:\n entry.execute(vars)\n else:\n raise Error(\"No such task: {0}\".format(name))", "def _is_python_task(task, pidstr):\n if str(task.pid) != pidstr:\n return False\n else:\n return True", "def task(self, name):\n pass", "def task_generate_tasks():\n \n yield {\n 'basename': 'generate_tasks',\n 'name': None,\n # 'doc': 'docs for X',\n 'watch': ['trains/'],\n 'task_dep': ['create_folders'],\n }\n \n for root, dirs, files in os.walk('trains/',topdown=False):\n for f in files:\n #print(f)\n yield template_train_model(os.path.join(root,f))", "def task_generate_job_batch():\n return {\n # force doit to always mark the task\n # as not up-to-date (unless target removed)\n 'uptodate': [False],\n 'file_dep': ['generate_job_batch.py'],\n 'task_dep': ['create_folders'],\n #'targets': ['.running_jobs/list_of_jobs.txt'],\n 'actions': ['python generate_job_batch.py'],\n }", "def main(self):\n\n env = self.env\n\n # Initial setup\n self.parse_args()\n self.find_taskfile()\n\n if self.taskfile is None:\n env.abort(\"Unable to find {0}\".format(self.cmdline.file))\n\n # Set command line NAME=VALUE variables before loading the file\n (tasks, params) = self.get_tasks_params()\n env.update(**params)\n\n env[\"_TOP_\"] = os.path.dirname(self.taskfile)\n env[\"_ABSTOP_\"] = os.path.abspath(env[\"_TOP_\"])\n env[\"_CWD_\"] = os.path.abspath(self.cwd)\n env._load(self.taskfile)\n\n # Print tasks list if requested\n if self.cmdline.list:\n for name in env._tasks:\n if not name.startswith(\"_\"):\n env.outputln(name)\n env.exit()\n\n # Print var/task help if requested\n if self.cmdline.varhelp:\n self.show_varhelp()\n\n if self.cmdline.taskhelp:\n names = sorted(set([i[0] for i in tasks]))\n self.show_taskhelp(names)\n\n if self.cmdline.taskhelp or self.cmdline.varhelp:\n env.exit()\n\n # Execute the requested tasks setting task specific variables\n for (task, params) in tasks:\n env.calltask(task, **params)", "def prepare_task(self,\n config=None,\n args=None,\n targets=None,\n build_graph=None,\n build_file_parser=None,\n address_mapper=None,\n console_outstream=None,\n workspace=None):\n\n task_type = self.task_type()\n assert issubclass(task_type, Task), 'task_type must be a Task subclass, got %s' % task_type\n\n config = create_config(config or '')\n workdir = os.path.join(config.getdefault('pants_workdir'), 'test', task_type.__name__)\n\n bootstrap_options = OptionsBootstrapper().get_bootstrap_options()\n\n options = Options(env={}, config=config, known_scopes=['', 'test'], args=args or [])\n # A lot of basic code uses these options, so always register them.\n register_bootstrap_options(options.register_global)\n\n # We need to wrap register_global (can't set .bootstrap attr on the bound instancemethod).\n def register_global_wrapper(*args, **kwargs):\n return options.register_global(*args, **kwargs)\n\n register_global_wrapper.bootstrap = bootstrap_options.for_global_scope()\n register_global_options(register_global_wrapper)\n\n task_type.options_scope = 'test'\n task_type.register_options_on_scope(options)\n\n run_tracker = create_run_tracker()\n\n context = Context(config,\n options,\n run_tracker,\n targets or [],\n build_graph=build_graph,\n build_file_parser=build_file_parser,\n address_mapper=address_mapper,\n console_outstream=console_outstream,\n workspace=workspace)\n return task_type(context, workdir)", "def __init__(self, conf, python, requirements, tagged_env_vars):\n self._env_dir = conf.env_dir\n self._repo_subdir = conf.repo_subdir\n self._install_timeout = conf.install_timeout # gh-391\n self._default_benchmark_timeout = conf.default_benchmark_timeout # gh-973\n self._tagged_env_vars = tagged_env_vars\n self._path = os.path.abspath(os.path.join(\n self._env_dir, self.dir_name))\n self._project = conf.project\n\n self._is_setup = False\n\n self._cache = build_cache.BuildCache(conf, self._path)\n self._build_root = os.path.abspath(os.path.join(self._path, 'project'))\n\n self._requirements = requirements\n # These are needed for asv to build and run the project, not part of\n # benchmark name mangling\n self._base_requirements = {}\n # gh-1314\n asv_runner_path = os.getenv(\"ASV_RUNNER_PATH\", \"\")\n module_path = Path(asv_runner_path) / \"asv_runner\"\n\n # Check if the path points to a directory containing the \"asv_runner\" module\n if module_path.is_dir() and (module_path / \"__init__.py\").is_file():\n spec = importlib.util.spec_from_file_location(\"asv_runner\",\n module_path / \"__init__.py\")\n # Attempt to load the module\n asv_runner_module = importlib.util.module_from_spec(spec)\n try:\n spec.loader.exec_module(asv_runner_module)\n self._base_requirements[\"pip+asv_runner\"] = asv_runner_path\n except Exception as e:\n self._base_requirements[\"pip+asv_runner\"] = \"\"\n log.warning(f\"Failed to load module from ASV_RUNNER_PATH: {e}\")\n else:\n self._base_requirements[\"pip+asv_runner\"] = \"\"\n if asv_runner_path:\n log.warning(\"ASV_RUNNER_PATH does not point\"\n \"to a directory containing the 'asv_runner' module\")\n if not util.ON_PYPY:\n # XXX: What if pypy installed asv tries to benchmark a cpython\n # python?\n self._base_requirements[\"pip+pympler\"] = \"\"\n if (Path.cwd() / \"poetry.lock\").exists():\n self._base_requirements[\"poetry-core\"] = \"\"\n\n if (Path.cwd() / \"pdm.lock\").exists():\n self._base_requirements[\"pdm\"] = \"\"\n\n # Update the _base_requirements if needed\n for key in list(self._requirements.keys()):\n if key in self._base_requirements:\n self._base_requirements[key] = self._requirements[key]\n del self._requirements[key]\n\n self._build_command = conf.build_command\n self._install_command = conf.install_command\n self._uninstall_command = conf.uninstall_command\n\n self._global_env_vars = {}\n self._global_env_vars['ASV'] = 'true'\n self._global_env_vars['ASV_PROJECT'] = conf.project\n self._global_env_vars['ASV_CONF_DIR'] = os.path.abspath(os.getcwd())\n self._global_env_vars['ASV_ENV_NAME'] = self.name\n self._global_env_vars['ASV_ENV_DIR'] = self._path\n self._global_env_vars['ASV_ENV_TYPE'] = self.tool_name\n\n installed_commit_hash = self._get_installed_commit_hash()\n self._set_commit_hash(installed_commit_hash)", "def add_task(name, func, help, is_default=False):\n cmd = click.Command(name=name, callback=func, help=help)\n cli.add_command(cmd)\n\n if is_default:\n # Store all functions here without name.\n DEFAULT_TASKS_KEY.append(func)\n\n return cli", "def load_task(self, task):\n with self._driver.session() as session:\n session.write_transaction(tx.create_task, task=task)\n session.write_transaction(tx.create_task_hint_nodes, task=task)\n session.write_transaction(tx.create_task_requirement_nodes, task=task)\n session.write_transaction(tx.create_task_input_nodes, task=task)\n session.write_transaction(tx.create_task_output_nodes, task=task)\n session.write_transaction(tx.create_task_metadata_node, task=task)\n session.write_transaction(tx.add_dependencies, task=task)", "def main():\n\n parser = argparse.ArgumentParser(description=\"generateTestStubs\")\n\n parser.add_argument(\"taskFile\",\n help=\"Path for assignment file.\")\n\n args = parser.parse_args()\n\n if not os.path.exists(args.taskFile):\n print(\"Task file does not exist.\")\n sys.exit(1)\n\n taskMgr = EEWebLPProject()\n taskMgr.initLP()\n\n #taskMgr.listProjects()\n #taskMgr.loadTree([\"project_id=8008922\"])\n tasks = taskMgr.getTasks([\"project_id=6890048\"],parent_id=8008922)\n\n fileByAssignee = taskMgr.getTaskOwners(args.taskFile)\n taskMgr.updateTaskOwners(fileByAssignee,tasks)", "def run_task(task, *args, **kwargs):\n ps = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)\n grep1 = subprocess.Popen(['grep', '-v', 'grep'], stdin=ps.stdout, stdout=subprocess.PIPE)\n grep2 = subprocess.Popen(['grep', 'celery.*worker'], stdin=grep1.stdout, stdout=subprocess.PIPE)\n\n if len(list(grep2.stdout)) > 0 and not in_tests():\n run_on_commit(lambda: task.delay(*args, **kwargs))\n else:\n run_on_commit(lambda: task.run(*args, **kwargs))", "def get_task_output_path(request_id: str, task_id: str) -> str:\n request_output_path = get_request_output_path(request_id)\n if task_id:\n try:\n request_dirs = os.listdir(request_output_path)\n except FileNotFoundError as exception:\n message = 'Output path {0:s} for task {1:s} could not be found.'.format(\n request_output_path, task_id)\n log.error(message)\n raise HTTPException(status_code=404, detail=message) from exception\n\n for request_dir in request_dirs:\n if task_id in request_dir:\n request_output_path = os.path.join(request_output_path, request_dir)\n break\n return request_output_path", "def init_package(self, pkg_name, version=None):\n\n with self._lock:\n pkg_dir = os.path.join(self.tasks_dir_internal, pkg_name)\n if os.path.isdir(pkg_dir):\n versions = os.listdir(pkg_dir)\n if not versions:\n raise TaskNotFoundException(pkg_name)\n\n elif version:\n # load specified version if available\n if not version in versions:\n raise TaskNotFoundException(pkg_name)\n v = version\n elif len(versions) != 1:\n # load the newest version\n logger.warn('Internal task cache contains more than one version of the task')\n v = versions[0]\n for dir in versions:\n if os.path.getmtime('%s/%s' % (pkg_dir,dir)) > \\\n os.path.getmtime('%s/%s' % (pkg_dir,v)):\n v = dir\n else:\n v = versions[0]\n\n # load this version\n full_pkg_dir = os.path.join(pkg_dir, v)\n pkg = packaging.TaskPackage(pkg_name, full_pkg_dir, v)\n if pkg.version != v:\n # verification\n logger.warn('Invalid package %s:%s' % (pkg_name, v))\n self._add_package(pkg)\n\n # invoke attached task callbacks\n callbacks = self._task_callbacks[pkg_name]\n module_path, cycle = self._compute_module_search_path(pkg_name)\n while len(callbacks):\n task_key, d = callbacks.pop(0)\n if cycle:\n d.errback((task_key, pkg.version,\n 'Cycle detected in dependency'))\n else:\n d.callback((task_key, pkg.version, pkg.tasks[task_key],\n module_path))\n return pkg\n return None", "def _get_current_task():\r\n return current_task", "def _get_current_task():\r\n return current_task", "def GetPublishCurrentDirForTask(cls, task):\n if not PipelineConfig.Instance().pipeline_publish_dir(): return ''\n\n out_dir = cls.GetOutDirForTask(task)\n if not out_dir: return ''\n out_dir = out_dir.replace(cls.GetOutSubDir(), PipelineConfig.Instance().pipeline_publish_dir())\n return os.path.join(os.path.dirname(out_dir), 'current')" ]
[ "0.6477157", "0.57612556", "0.5689633", "0.56765836", "0.55889344", "0.55805033", "0.55210114", "0.5507863", "0.5440326", "0.5436243", "0.54087734", "0.5403501", "0.5345792", "0.5335856", "0.53229564", "0.5279437", "0.5270501", "0.5263024", "0.5223266", "0.52160585", "0.5170007", "0.5158954", "0.5104897", "0.509643", "0.50870895", "0.50826484", "0.5057847", "0.5048503", "0.5047269", "0.5045533", "0.5033358", "0.5033358", "0.5032264", "0.50032306", "0.49988335", "0.49966368", "0.49861997", "0.4985928", "0.49829796", "0.49626586", "0.49541417", "0.49509227", "0.49494547", "0.49489152", "0.4946229", "0.49406365", "0.493897", "0.4925766", "0.49146938", "0.49133044", "0.49116287", "0.49063736", "0.48949516", "0.48855224", "0.48836437", "0.48830095", "0.4878693", "0.48786366", "0.48573032", "0.48522034", "0.48506364", "0.48490047", "0.48344743", "0.48273808", "0.48164606", "0.4814589", "0.48127553", "0.48114115", "0.48100546", "0.48090628", "0.47927663", "0.47878844", "0.4777407", "0.47662964", "0.47613505", "0.47608674", "0.47605148", "0.47583088", "0.4747053", "0.47454184", "0.47333488", "0.4732156", "0.47266826", "0.4725472", "0.47239944", "0.47224584", "0.47182697", "0.47119743", "0.47011247", "0.4697929", "0.4693273", "0.46773517", "0.46771064", "0.46699017", "0.46674123", "0.46660265", "0.46659213", "0.4665226", "0.4665226", "0.4664357" ]
0.71928936
0
If USE_PATH is True rely on PATH to look for binaries. Otherwise ../src/ is used by default.
def binary_location(cmd, USE_PATH=False): if USE_PATH: return cmd else: return os.path.join(BIN_PREFIX, cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def linkpath(srcdir, pkg):\n home = os.getenv('HOME')\n if srcdir:\n rval = '{}/{}'.format(srcdir, pkg)\n else:\n rval = '{}/bin/{}'.format(home, pkg)\n return rval", "def binary_location(cmd, USE_PATH=False):\n return os.path.join(BIN_PREFIX, cmd)", "def set_path():\n import os\n import sys\n\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), \"..\"))", "def set_proto_src(path):\n if sys.path.count(path) == 0:\n sys.path.append(path)", "def load_libsrc():\n import sys\n ops_dir = os.path.dirname(os.path.realpath(__file__))\n fst_package = ops_dir + '/../lib_src/fst_pipeline'\n sys.path.append(fst_package)\n return", "def test_relative_paths(self):\n command_line = self._MENU + [\n \"some_pool\",\n \"../dev\",\n \"./fake\",\n \"/abc\",\n ]\n TEST_RUNNER(command_line)", "def path_which(args):\n print(header(\"$PATH Lookup: {}\".format(args.look)))\n loop_fmt = \"{color}{path}\"\n\n cnt = 0\n for part in os.environ[\"PATH\"].split(\":\"):\n color = u\"\"\n if args.color:\n color = CODES[cnt]\n\n msg = check_exec(part, args.look, args.version)\n if msg:\n print(header(loop_fmt.format(color=color, path=part), '-'))\n print(msg)\n cnt = (cnt + 1) % len(CODES)", "def get_kernel_path():\n path = \"/\".join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])\n return path+'/src/'", "def main():\n arg0 = sys.argv[0]\n if not os.path.isfile(arg0):\n sys.exit(\"sys.argv[0] is not a path to a file: \\\"\" + str(arg0) + \"\\\". Exiting now.\")\n absolute_path_to_file = os.path.realpath(arg0) # realpath follows symlinks, which is what we want in this case.\n absolute_path_to_src = os.path.dirname(absolute_path_to_file)\n (absolute_path_to_repo, src_dirname) = os.path.split(absolute_path_to_src)\n if src_dirname != \"src\":\n sys.exit(\"The driver script should be located in directory \\\"src\\\". It is instead in \\\"\" + src_dirname + \"\\\". Exiting now.\")\n os.chdir(absolute_path_to_repo)", "def SearchPath(name, path=None):\n path = path or os.environ['PATH']\n for dir in path.split(os.pathsep):\n binpath = os.path.join(dir, name)\n if os.path.exists(binpath):\n return os.path.abspath(binpath)\n return None", "def srcdir(path):\n if not workflow.included_stack:\n return None\n return workflow.current_basedir.join(path).get_path_or_uri()", "def test_add_srcdirs_to_syspath(root_path: Path) -> None:\n add_srcdirs_to_syspath()\n\n # Test to see if runtime_syspath's 'src' directory in now in sys.path\n src_path: Path = root_path / \"src\"\n src_path_str: str = os.fspath(src_path)\n sys_paths: List[str] = list()\n found_src_path: bool = False\n syspath_member: str\n for syspath_member in sys.path:\n sys_paths.append(syspath_member)\n if src_path_str == syspath_member:\n found_src_path = True\n break\n\n if not found_src_path:\n msg: str = f\"{src_path.as_posix()} is not in:\"\n syspath_mem: str\n for syspath_mem in sorted(sys_paths):\n msg += f\"\\n\\t{Path(syspath_mem).as_posix()}\"\n pytest.fail(msg)", "def syspath():\n import sys\n pprint(sys.path)", "def activateLocalFastPath() -> None:\n global _FAST_PATH, _FAST_PATH_IS_TEMPORARY, APP_DATA\n\n # Try to fix pathing issues in Windows.\n if os.name == \"nt\":\n APP_DATA = APP_DATA.replace(\"/\", \"\\\\\")\n\n _FAST_PATH = os.path.join(\n APP_DATA,\n \"{}{}-{}\".format(\n MPI_RANK,\n os.environ.get(\"PYTEST_XDIST_WORKER\", \"\"), # for parallel unit testing,\n datetime.datetime.now().strftime(\"%Y%m%d%H%M%S%f\"),\n ),\n )\n\n _FAST_PATH_IS_TEMPORARY = True", "def getPythonPath():\n python_path = os.environ.get(\"PYTHONPATH\",\"\")\n \n if os.path.basename(os.path.abspath(os.curdir)) == \"Test\":\n new_python_path = os.path.pathsep.join([\n python_path,os.path.normpath(\"../Lib/external/SQLObject-compat\"),\n os.path.normpath(\"../Lib/external\"),\n os.path.normpath(\"../Lib\"),\n ])\n else:\n new_python_path = os.path.pathsep.join([\n python_path,os.path.normpath(\"./Lib/external/SQLObject-compat\"),\n os.path.normpath(\"./Lib/external\"),\n os.path.normpath(\"./Lib\"),\n ])\n \n return new_python_path", "def add_path(package):\n\n path_file_name = '../{0}/test/path.txt'.format(package)\n\n if os.path.exists(path_file_name):\n with open(path_file_name, 'r') as path_file:\n for directory in path_file.readlines():\n sys.path.insert(0, os.path.abspath(\n '../{0}/{1}'.format(package, directory.strip('\\n'))\n ))", "def _whicha(cmd, paths=None):\n import os\n if paths is None:\n paths = os.environ['PATH'].split(':')\n possibilities = [os.path.expanduser(os.path.join(p, cmd)) for p in paths]\n return filter(lambda bin: os.path.exists(bin), possibilities)", "def thepath = getProgramPath(theprog):\r\n\r\n theprog = lower(theprog);\r\n\r\n if strcmp(theprog,'POV-Ray')\r\n # install location for POV-Ray\r\n thepath = '/usr/local/bin';\r\n\r\n else if strcmp(theprog,'quietpov')\r\n # install location for the QuietPOV add-on\r\n thepath = 'C:\\Program Files\\POV-Ray for Windows v3.6\\guiext\\QuietPOV';\r\n\r\n else if strcmp(theprog,'imagemagick')\r\n # install location for ImageMagick\r\n thepath = '/home/kieran/Downloads/ImageMagick-6.8.5-8';\r\n\r\n else if strcmp(theprog,'ffmpeg')\r\n # install location for the ffmpeg library\r\n thepath = '/usr/bin/ffmpeg';\r\n\r\n else\r\n thepath = '';", "def FindBinary( binary, user_options ):\n\n def _FindPath():\n key = '{0}_binary_path'.format( binary )\n if user_options.get( key ):\n return user_options[ key ]\n return GO_BINARIES.get( binary )\n\n binary_path = _FindPath()\n if os.path.isfile( binary_path ):\n return binary_path\n return None", "def where(self, exe, path=None):\n if exe is None:\n return None\n if path is None:\n path = os.environ['PATH']\n paths = path.split(os.pathsep)\n extlist = ['']\n\n def is_executable(path):\n return os.path.isfile(path) and os.access(path, os.X_OK)\n\n if sys.platform == 'win32':\n pathext = os.environ['PATHEXT'].lower().split(os.pathsep)\n (base, ext) = os.path.splitext(exe)\n if ext.lower() not in pathext:\n extlist = pathext\n for ext in extlist:\n exe_name = exe + ext\n for p in paths:\n exe_path = os.path.join(p, exe_name)\n if is_executable(exe_path):\n return exe_path\n\n return None", "def test_remote_sys_path(pytester: pytest.Pytester) -> None:\n pytester.makepyfile(\n \"\"\"\n import sys\n\n def test_sys_path():\n assert \"\" not in sys.path\n \"\"\"\n )\n result = pytester.runpytest(\"-n1\")\n assert result.ret == 0", "def shared_binary_location(cmd=\"shared\"):\n return os.path.join(BIN_PREFIX, cmd)\n return binary_location(cmd, SHARED_USE_PATH)", "def module_path() -> Path:\n if hasattr(sys, \"frozen\"):\n return Path(sys.executable).resolve().parent\n else:\n return (Path(__file__) / \"..\").resolve().parent", "def init_env_path(path=None) -> None:\n if path is None:\n sys.path.insert(1, file_dir_dir())\n else:\n sys.path.insert(1, path)", "def insert_package_path():\n sys.path.insert(0, ospdn(ospdn(ospdn(ospap(__file__)))))", "def check_module_path(pkg):\n src_dir_root = ''\n print(\"[root-get] DEBUG: Checking module path\")\n check_module_name = os.system('find %s -mindepth 2 -type d -name \"%s\" ! -path \"*tutorials*\" ! -path \"*dictpch*\"' % (ROOT_SOURCES, pkg))\n if check_module_name != 0:\n print(\"Not a ROOT package (we are working only with ROOT packages for now.)\")\n return False\n else:\n # if have such directory in root then we can try to get it's real path\n path = PathChecker()\n src_dir_root = path.path4module(pkg, ROOT_SOURCES)\n if src_dir_root != None:\n print(\"[root-get] We would use a module from {0:s}\".format(src_dir_root))\n else:\n print(\"Package not present in rootbase.\")\n print(\"Please provide manifest file path, else enter 'NA'\")\n p_manifest = raw_input()\n if p_manifest != 'NA':\n value = yaml_validator(p_manifest)\n if value == 1:\n print(\"Not a valid yml. Please provide valid yml. Exiting now.\")\n else:\n print(\"Downloading package using url.\")\n dn_path = downloader(p_manifest)\n #get path for downloaded directory\n filepath = Path(dn_path + \"/CMakeLists.txt\")\n if filepath.is_file():\n src_dir_root = dn_path\n else:\n print(\"No CMakeLists.txt present. Creating using manifest.\")\n rule_name = re.compile(\".*name:.*\")\n with open(p_manifest) as mn:\n read = mn.read()\n name = rule_name.findall(read)\n parc_name = [x.lstrip(' name: ') for x in name]\n cml = open(dn_path + \"/CMakeLists.txt\", 'a')\n cml.write(\"ROOT_STANDARD_LIBRARY_PACKAGE(\" + parc_name[0] + \" DEPENDENCIES RIO)\")\n src_dir_root = dn_path\n\n else:\n print(\"Can you provide package path..(if available)\")\n dir_path = raw_input()\n filepath = Path(dir_path + \"/CMakeLists.txt\")\n if filepath.is_file():\n src_dir_root = dir_path\n else:\n print(\"No CMakeLists.txt present. Creating using manifest.\")\n rule_name = re.compile(\".*name:.*\")\n with open(p_manifest) as mn:\n read = mn.read()\n name = rule_name.findall(read)\n parc_name = [x.lstrip(' name: ') for x in name]\n cml = open(dn_path + \"/CMakeLists.txt\", 'a')\n cml.write(\"ROOT_STANDARD_LIBRARY_PACKAGE(\" + parc_name[0] + \" DEPENDENCIES RIO)\")\n src_dir_root = dn_path\n\n print(\"[root-get] We would use a module from {0:s}\".format(src_dir_root))\n return src_dir_root", "def path(src, name='default'):\n try:\n return get_output(['hg', 'path', name], cwd=src).strip()\n except subprocess.CalledProcessError:\n return None", "def find_executable(binary):\n\n\tfor syspath in os.environ.get('PATH', default_path).split(':'):\n\t\tif os.path.exists(os.path.join(syspath, binary)):\n\t\t\treturn os.path.join(syspath, binary)\n\n\treturn None", "def main():\n if getattr(sys, 'frozen', False):\n folderCurrent = os.path.dirname(sys.executable)\n else:\n folderCurrent = os.path.abspath(os.path.dirname(__file__))\n\n replaceAll(folderCurrent)", "def get_golem_path():\r\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"../\"))", "def GetPath(path_from_src):\n path = os.path.join(os.path.dirname(__file__), '../..', path_from_src)\n if not os.path.isfile(path):\n print 'WARNING: %s does not exist. Maybe moved or renamed?' % path\n return path", "def module_path():\r\n if hasattr(sys, \"frozen\"):\r\n return os.path.dirname(sys.executable)\r\n return os.path.dirname(__file__)", "def which():\n\n location = None\n if os.path.basename(_git_path) != _git_path:\n if os.path.isfile(_git_path):\n location = _git_path\n else:\n paths = [x for x in os.environ[\"PATH\"].split(os.pathsep) if not x.isspace()]\n for path in paths:\n exe = os.path.join(path, _git_path)\n if os.path.isfile(exe):\n location = exe\n break\n return location", "def cwd_in_path():\n ...", "def get_pythonpath(working_set, buildout, prefixes):\n\n # get all paths available in the current working set\n paths = list(working_set.entries)\n\n if hasattr(zc.buildout.easy_install, 'distribute_loc'):\n prepend_path(zc.buildout.easy_install.distribute_loc, paths)\n elif hasattr(zc.buildout.easy_install, 'setuptools_loc'):\n prepend_path(zc.buildout.easy_install.setuptools_loc, paths)\n else:\n prepend_path(zc.buildout.easy_install.setuptools_path, paths)\n\n return [k for k in working_set.entries \\\n if os.path.realpath(k) not in site_paths(buildout, prefixes)]", "def get_lex_path(env, append_paths: bool=False) -> Optional[str]:\n for prog in BINS:\n bin_path = SCons.Tool.find_program_path(\n env,\n prog,\n default_paths=DEFAULT_PATHS,\n add_path=append_paths,\n )\n if bin_path:\n return bin_path\n\n SCons.Warnings.warn(\n SCons.Warnings.SConsWarning,\n 'lex tool requested, but lex or flex binary not found in ENV PATH'\n )", "def AddScriptDirToPath():\n path = os.path.abspath(__file__)\n\n for _ in range(3):\n path, _ = os.path.split(path)\n\n if not path in sys.path:\n sys.path.append(path)", "def relative_to_buildroot(self):\n return [os.path.join(self.rel_path, source) for source in self.source_paths]", "def sublime_haskell_package_path():\n return os.path.dirname(os.path.realpath(__file__))", "def GetSrc():\n return os.path.abspath(os.path.join(_THIS_DIR, os.pardir, os.pardir,\n os.pardir))", "def _env_with_python_module_search_path():\n e = os.environ\n module_search_path = os.path.join(vmcheckerpaths.root, 'bin')\n if 'PYTHONPATH' in e.keys():\n module_search_path = os.pathsep.join(\n e['PYTHONPATH'], module_search_path)\n e['PYTHONPATH'] = module_search_path\n return e", "def Which(binary, path=None):\n if path is None:\n path = os.environ.get('PATH', '')\n for p in path.split(':'):\n p = os.path.join(p, binary)\n if os.access(p, os.X_OK):\n return p\n return None", "def relative_path(__file__, path):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), path))", "def task_binary_location(cmd=\"task\"):\n return binary_location(cmd, TASK_USE_PATH)", "def find_executable(name, paths):\n for path in paths:\n full_path = os.path.join(path, name)\n if os.path.isfile(full_path):\n return full_path\n # If not found, just assume it's in the PATH.\n return name", "def test():\n # usage()\n path_obj = Env('PATH')\n path_obj.pclean()\n path_obj.padd('/home/mahmud/downloads///')\n path_obj.padd('/home/mahmud/apps//', -1)\n path_obj.premove('/abcd')\n path_obj.premove('/cad/tools/platform/lsf/7.0/linux2.6-glibc2.3-x86_64/etc')\n path_obj.premove('/cad/tools/platform/lsf/7.0/linux2.6-glibc2.3-x86_64/bin')\n path_obj.plist()\n cmd = 'add /usr/bin/'\n cmd = 'clean abcd'\n cmd = 'ld_clean'\n cmd = 'lic_add /bin /tmp'\n cmd = ''\n cmd = 'env_remove CADENCE_PATH /some/arbitrary/dir'\n cmd = 'env_list CADENCE_PATH'\n cmd = 'ld_remove /cad/tools/cliosoft/sos_5.31_linux/lib /cad/tools/cadence/soc/SOC71/tools/lib'\n (cmd, var, args) = process_options(cmd.split())\n print(\"Executing: \", cmd, var, args)\n execute (cmd, var, args)", "def find_binary_in_path(filename: str) -> str:\n if \"PATH\" not in os.environ:\n raise PATHNotFoundError\n for directory in os.environ[\"PATH\"].split(os.pathsep):\n binary = os.path.abspath(os.path.join(directory, filename))\n if os.path.isfile(binary) and os.access(binary, os.X_OK):\n return binary\n raise BinaryNotFoundError", "def patch_sys_path():\n this_dir = os.path.dirname(__file__)\n to_add = os.path.join(this_dir, \"..\")\n to_add = os.path.abspath(to_add)\n sys.path.insert(0, to_add)", "def _rel_path(fn):\n return os.path.join('./eng-edu/ml/cc/src', fn)", "def get_tools_path(work_dir=consts.WORK_DIR):\r\n return join_path(work_dir, consts.TOOLS_DIR)", "def fix_sys_path():\n sys.path = EXTRA_PATHS + sys.path", "def __find_tool_path(self):\n tool_path = Path(os.path.dirname(os.path.realpath(__file__)))\n # We asume the installion path is relative to our installation path\n tool_path = tool_path / '../../../bin'\n if os.name == 'posix':\n ret = tool_path / 'fast-discovery-server'\n if not os.path.exists(ret):\n print('fast-discovery-server tool not installed')\n sys.exit(1)\n elif os.name == 'nt':\n ret = tool_path / 'fast-discovery-server.exe'\n if not os.path.exists(ret):\n ret = tool_path / 'fast-discovery-server.bat'\n if not os.path.exists(ret):\n print('fast-discovery-server tool not installed')\n sys.exit(1)\n else:\n print(f'{os.name} not supported')\n sys.exit(1)\n\n return ret", "def get_exec_path(self):\n bin_name = 'test_hint_time'\n # Look for in place build\n script_dir = os.path.dirname(os.path.realpath(__file__))\n bin_path = os.path.join(script_dir, '.libs', bin_name)\n if not os.path.exists(bin_path):\n # Look for out of place build from using apps/build_func.sh\n int_dir = os.path.dirname(script_dir)\n bin_path_op = os.path.join(int_dir, 'build/integration/test/.libs', bin_name)\n if not os.path.exists(bin_path_op):\n msg = 'Could not find application binary, tried \\n \"{}\"\\n \"{}\"'.format(\n bin_path, bin_path_op)\n raise RuntimeError(msg)\n bin_path = bin_path_op\n return bin_path", "def localPython ( localPath ) :\r\n\r\n if not type( localPath ) == str : return\r\n\r\n if not localPath.endswith( os.sep ) : localPath = localPath + os.sep\r\n\r\n # reads the paths to add to sys.path\r\n \r\n try :\r\n\r\n handler = open( localPath + \"sysPath.txt\", \"r\" )\r\n\r\n text = handler.read()\r\n\r\n handler.close()\r\n\r\n items = text.splitlines()\r\n\r\n except Exception, exception :\r\n\r\n items = [ ]\r\n\r\n\r\n # places the local paths before the previous search paths. only those that exist\r\n\r\n sysPath = [ ]\r\n\r\n for item in items :\r\n\r\n item = item.strip().replace( \"\\\\\", os.sep ).replace( \"/\", os.sep )\r\n\r\n if len( item ) == 0 : continue\r\n\r\n item = item.strip( os.sep )\r\n\r\n item = localPath + item\r\n\r\n if item in sysPath : continue\r\n\r\n if not os.path.exists( item ) : continue\r\n\r\n sysPath.append( item )\r\n\r\n # places the previous paths. only those that exist\r\n\r\n\r\n for item in sys.path :\r\n\r\n if item in sysPath : continue\r\n\r\n if not os.path.exists( item ) : continue\r\n\r\n sysPath.append( item )\r\n\r\n sys.path = sysPath", "def which(file, env=os.environ):\n if file is None:\n return None\n for path in env.get('PATH', '').split(os.pathsep):\n if path:\n result = os.path.join(path, file)\n if os.path.exists(result):\n return os.path.realpath(result)\n return None", "def add_path(path):\n if path not in sys.path:\n sys.path.insert(0, path)", "def initLibPath():\n libHash = {\n 'Framework': 1,\n 'UserControlleLib': 1,\n 'CaseLib': 1\n }\n\n binPath = os.path.split(os.path.realpath(__file__))[0]\n\n for key in libHash:\n sys.path.append(os.path.join(__getLibAbsPath(binPath, libHash[key]), key))", "def path(self):\n if not self._path:\n logger.spam(\"Checking for helper executable %s\", self.name)\n self._path = distutils.spawn.find_executable(self.name)\n if self._path:\n logger.debug(\"%s is at %s\", self.name, self.path)\n self._installed = True\n else:\n logger.debug(\"No path to %s found\", self.name)\n return self._path", "def _which(self, program):\n\n def is_exe(path):\n return os.path.isfile(path) and os.access(path, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n basedir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n os.environ[\"PATH\"] += os.pathsep + '%s/bin/' % basedir\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n print 'ishakesumd not found, build it or place it in the PATH before using this tool.'\n exit(1)", "def find_example_dir(python):\n # Replace %s with directory to check for shoebot menus.\n paths = [\n path.format(sys_prefix=sys.prefix, cwd=os.getcwd())\n for path in [\n 'share/shoebot/examples', # default\n 'examples/', # user installed shoebot with -e\n ]\n ]\n code = textwrap.dedent(\"\"\"\n from os.path import isdir\n from pkg_resources import resource_filename, Requirement, DistributionNotFound\n \n for path in {paths}:\n try:\n res_path = resource_filename(Requirement.parse('shoebot'), path)\n if isdir(res_path):\n print(res_path)\n break\n except DistributionNotFound:\n pass\n \"\"\".format(paths=paths))\n\n # Needs to run in same python env as shoebot (may be different to gedits)\n cmd = [python, \"-c\", code]\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, errors = p.communicate()\n if errors:\n sys.stderr.write('Shoebot experienced errors searching for install and examples.\\n')\n sys.stderr.write('Errors:\\n{0}'.format(errors.decode('utf-8')))\n return None\n else:\n examples_dir = output.decode('utf-8').strip()\n if os.path.isdir(examples_dir):\n return examples_dir\n\n if examples_dir:\n sys.stderr.write('Shoebot could not find examples at: {0}\\n'.format(examples_dir))\n else:\n sys.stderr.write('Shoebot could not find install dir and examples.\\n')", "def find_lib_path():\n curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\n # make pythonpack hack: copy this directory one level upper for setup.py\n dll_path = [curr_path, os.path.join(curr_path, '../../lib/'),\n os.path.join(curr_path, './lib/'),\n os.path.join(sys.prefix, 'xlearn')]\n if sys.platform == 'win32':\n if platform.architecture()[0] == '64bit':\n dll_path.append(os.path.join(curr_path, '../../windows/x64/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/x64/Release/'))\n else:\n dll_path.append(os.path.join(curr_path, '../../windows/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/Release/'))\n dll_path = [os.path.join(p, 'xlearn_api.dll') for p in dll_path]\n elif sys.platform.startswith('linux'):\n dll_path = [os.path.join(p, 'libxlearn_api.so') for p in dll_path]\n elif sys.platform == 'darwin':\n dll_path = [os.path.join(p, 'libxlearn_api.dylib') for p in dll_path]\n\n lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]\n\n # From github issues, most of installation errors come from machines w/o compilers\n if not lib_path:\n raise XLearnLibraryNotFound(\n 'Cannot find xlearn Library in the candidate path'\n )\n return lib_path", "def setSysPath():\n c = os.path.abspath(os.path.dirname(__file__))\n\n add = [\n ['lib'],\n ]\n\n for item in add:\n p = os.path.join(c, *item)\n if not p in sys.path:\n sys.path[1:1] = [p]\n\n remove = ['django', 'simplejson']\n\n # Remove unwanted paths\n for item in sys.path:\n for r in remove:\n if item.find(r) > 0:\n sys.path.remove(item)", "def scan_path(executable=\"mongod\"):\n for path in os.environ.get(\"PATH\", \"\").split(\":\"):\n path = os.path.abspath(path)\n executable_path = os.path.join(path, executable)\n if os.path.exists(executable_path):\n return executable_path", "def get_pack_path():\r\n return get_package_path().replace(\"\\\\\", \"/\").replace(\"src\", \"\")", "def set_search_path(fname, *args):\n \n # Prepare\n args = [arg.lstrip('/') for arg in args if arg]\n args = [arg for arg in args if arg != '.'] # Because we add empty dir anyway\n args.append('') # make libs search next to themselves\n command = get_command_to_set_search_path()\n \n if sys.platform.startswith('linux'):\n # Create search path value\n rpath = ':'.join( ['$ORIGIN/'+arg for arg in args] )\n # Modify rpath using a call to patchelf utility\n cmd = [command, '--set-rpath', rpath, fname]\n subprocess.check_call(cmd)\n print('Set RPATH for %r' % os.path.basename(fname))\n #print('Set RPATH for %r: %r' % (os.path.basename(fname), rpath))\n \n elif sys.platform.startswith('darwin'):\n # ensure write permissions\n mode = os.stat(fname).st_mode\n if not (mode & stat.S_IWUSR):\n os.chmod(fname, mode | stat.S_IWUSR)\n # let the file itself know its place (simpyl on rpath)\n name = os.path.basename(fname)\n subprocess.call(('install_name_tool', '-id', '@rpath/'+name, fname))\n # find the references: call otool -L on the file\n otool = subprocess.Popen(('otool', '-L', fname),\n stdout = subprocess.PIPE)\n references = otool.stdout.readlines()[1:]\n \n # Replace each reference\n rereferencedlibs = []\n for reference in references:\n # find the actual referenced file name\n referencedFile = reference.decode().strip().split()[0]\n if referencedFile.startswith('@'):\n continue # the referencedFile is already a relative path\n # Get lib name\n _, name = os.path.split(referencedFile)\n if name.lower() == 'python':\n name = 'libpython' # Rename Python lib on Mac\n # see if we provided the referenced file\n potentiallibs = [os.path.join(os.path.dirname(fname), arg, name) \n for arg in args]\n # if so, change the reference and rpath\n if any([os.path.isfile(p) for p in potentiallibs]):\n subprocess.call(('install_name_tool', '-change',\n referencedFile, '@rpath/'+name, fname))\n for arg in args:\n mac_add_rpath(fname, '@loader_path/' + arg)\n mac_add_rpath(fname, '@executable_path/') # use libpython next to exe\n rereferencedlibs.append(name)\n if rereferencedlibs:\n print('Replaced refs for \"%s\": %s' % \n (os.path.basename(fname), ', '.join(rereferencedlibs)) )\n \n elif sys.platform.startswith('win'):\n raise RuntimeError('Windows has no way of setting the search path on a library or exe.')\n else:\n raise RuntimeError('Do not know how to set search path of library or exe on %s' % sys.platform)", "def test_get_pyrin_main_package_path():\n\n pyrin_root_path = application_services.get_pyrin_root_path()\n pyrin_main_package_path = os.path.abspath(os.path.join(pyrin_root_path, 'pyrin'))\n assert application_services.get_pyrin_main_package_path() == pyrin_main_package_path", "def find_binary(binary: str, paths=None, fallback=None) -> str:\n\n if os.path.isabs(binary):\n if not (os.path.isfile(binary) and access(binary, os.X_OK)):\n raise CommandNotFound(binary)\n return binary\n\n if paths is None:\n paths = os.environ.get(\"PATH\", \"\").split(\":\")\n\n for path in paths:\n filename = os.path.join(os.path.abspath(path), binary)\n if access(filename, os.X_OK) and os.path.isfile(filename):\n return filename\n\n if fallback is not None:\n return fallback\n\n raise CommandNotFound(binary)", "def windows_dll_path_setup():\n global WINDOWS_PATH_SET\n if IS_WINDOWS and not WINDOWS_PATH_SET:\n try:\n out = subprocess.run(\n [\"where.exe\", \"tbb.dll\"], check=True, capture_output=True\n )\n tbb_path = os.path.dirname(out.stdout.decode().splitlines()[0])\n os.add_dll_directory(tbb_path)\n except:\n try:\n tbb_path = os.path.abspath(\n os.path.join(\n get_bridgestan_path(), \"stan\", \"lib\", \"stan_math\", \"lib\", \"tbb\"\n )\n )\n os.environ[\"PATH\"] = tbb_path + \";\" + os.environ[\"PATH\"]\n os.add_dll_directory(tbb_path)\n WINDOWS_PATH_SET = True\n except:\n warnings.warn(\n \"Unable to set path to TBB's DLL. Loading BridgeStan models may fail. \"\n f\"Tried path '{tbb_path}'\",\n RuntimeWarning,\n )\n WINDOWS_PATH_SET = False\n try:\n out = subprocess.run(\n [\n \"where.exe\",\n \"libwinpthread-1.dll\",\n \"libgcc_s_seh-1.dll\",\n \"libstdc++-6.dll\",\n ],\n check=True,\n capture_output=True,\n )\n mingw_dir = os.path.abspath(\n os.path.dirname(out.stdout.decode().splitlines()[0])\n )\n os.add_dll_directory(mingw_dir)\n WINDOWS_PATH_SET &= True\n except:\n # no default location\n warnings.warn(\n \"Unable to find MinGW's DLL location. Loading BridgeStan models may fail.\",\n RuntimeWarning,\n )\n WINDOWS_PATH_SET = False", "def _Which(program, paths):\n if sys.platform == 'win32' and not program.lower().endswith('.exe'):\n program += '.exe'\n\n for path in paths:\n candidate = os.path.join(os.path.normpath(path), program)\n if os.path.isfile(candidate):\n return candidate\n\n return None", "def SetToolPaths(toolpaths):\n global tool_search_paths\n\n tool_search_paths = toolpaths", "def add_project_path() -> bool:\n project_path = Path('.')\n cur_path = Path(project_path.absolute())\n for parent in cur_path.parents:\n if 'Pipfile' in [obj.name for obj in parent.glob('*')]:\n project_path = Path(parent.absolute())\n break\n\n src_path = project_path.joinpath('src')\n\n if project_path == '.':\n LOGGER.warning(\"Can't find project_path\")\n return False\n\n if src_path not in sys.path:\n sys.path.append(str(src_path.absolute()))\n return project_path", "def get_python_path():\n\n return get_executable_path('python')", "def check_package_path(pkg):\n src_dir_root = ''\n print(\"[root-get] DEBUG: Checking package path\")\n check_package_name = os.system('find %s -maxdepth 1 -type d -name \"%s\" ! -path \"*tutorials*\" ! -path \"*dictpch*\"' % (ROOT_SOURCES, pkg))\n if check_package_name != 0:\n print(\"Not a ROOT package (we are working only with ROOT packages for now.)\")\n return False\n else:\n # if have such directory in root then we can try to get it's real path\n path = PathChecker()\n src_dir_root = path.path4pkg(pkg, ROOT_SOURCES)\n print(\"[root-get] We would use a package from {0:s}\".format(src_dir_root))\n return src_dir_root", "def test_find_in_current_path(self):\n directory = os.path.dirname(os.path.realpath(__file__))\n result = steptest.find_project_directory(directory)\n self.assertEqual(directory, result)", "def get_package_dir():\n return Path(__file__).parent", "def modify_path():\r\n currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\r\n parentdir = os.path.dirname(currentdir)\r\n sys.path.insert(0,parentdir)", "def FindBinary(module_space, bin_name):\n if not bin_name:\n return None\n if bin_name.startswith(\"//\"):\n # Case 1: Path is a label. Not supported yet.\n raise AssertionError(\n \"Bazel does not support execution of Python interpreters via labels yet\"\n )\n elif os.path.isabs(bin_name):\n # Case 2: Absolute path.\n return bin_name\n # Use normpath() to convert slashes to os.sep on Windows.\n elif os.sep in os.path.normpath(bin_name):\n # Case 3: Path is relative to the repo root.\n return os.path.join(module_space, bin_name)\n else:\n # Case 4: Path has to be looked up in the search path.\n return SearchPath(bin_name)", "def find_path():\n __dir_path__ = os.path.dirname(os.path.realpath(__file__))\n return __dir_path__", "def check_path():\n print('[GenHub] Checking PATH for executables and scripts.')\n\n execs = ['gt', 'cd-hit', 'tidygff3', 'locuspocus', 'xtractore',\n 'canon-gff3', 'pmrna', 'lpdriver.py', 'uloci.py', 'seq-reg.py']\n paths = list()\n for exe in execs:\n try:\n proc = subprocess.Popen(['which', exe], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)\n resultcode = proc.wait()\n if resultcode == 0:\n procpath = next(proc.stdout)\n procpath = str(procpath).rstrip()\n paths.append((exe, procpath))\n else:\n paths.append((exe, None))\n except subprocess.CalledProcessError:\n paths.append((exe, None))\n\n missing = False\n for exe, path in paths:\n char = '+'\n if path is None:\n char = '-'\n path = '???'\n missing = True\n print('%s %-20s: %s' % (char, exe, path))\n if missing:\n print('Executables / scripts cannot be found in your PATH.', end='')\n print(' Certain build commands will not work.')", "def constrain_path_relative_to(path):\n environ_backup = os.environ\n environ = os.environ\n\n if path:\n environ = os.environ.copy()\n environ[\"PATH\"] = path\n\n os.environ = environ\n\n try:\n yield\n finally:\n os.environ = environ_backup", "def update_path():\n\timport sys\n\tsys.path.append(directory_root())", "def fix_sys_path(extra_extra_paths=()):\n sys.path[1:1] = EXTRA_PATHS\n fix_google_path()", "def here(*args):\n return os.path.join(os.path.dirname(__file__), *args)", "def _fix_sys_path():\n global _fix_sys_path_done\n\n if _fix_sys_path_done:\n return\n _fix_sys_path_done = True\n if not (sys.argv and sys.path):\n # Not enough information\n return\n d = os.path.dirname(os.path.realpath(sys.argv[0]))\n if sys.path[0] == d:\n sys.path.pop(0)", "def set_include_path(space, paths):\n interp = space.ec.interpreter\n old = os.pathsep.join(interp.include_path)\n interp.include_path = []\n for p in paths.split(os.pathsep):\n interp.include_path.append(p)\n return space.newstr(old)", "def get_bin_dir():\n return os.path.abspath(os.path.join(get_root_dir(), 'bin/'))", "def dir_bin():\n return abspath('bin')", "def get_path_to(self, *args):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def find_executable(cls, name, cmd, dry_run=False):\n if cls.PATH is None:\n cls.PATH = os.environ[\"PATH\"].split(\":\")\n for pdir in cls.PATH:\n pcmd = os.path.join(pdir, cmd)\n if os.path.exists(pcmd):\n return pcmd\n if dry_run:\n return cmd\n raise SystemExit(\"%s '%s' does not exist\" % (name, cmd))", "def get_relative_source_path(self, source_path=None):\r\n if not source_path:\r\n source_path = self.source_path\r\n if source_path is None:\r\n return None\r\n\r\n return os.path.relpath(\r\n os.path.abspath(os.path.join(self.settings['PATH'], source_path)),\r\n os.path.abspath(self.settings['PATH'])\r\n )", "def get_relative_source_path(self, source_path=None):\n if not source_path:\n source_path = self.source_path\n if source_path is None:\n return None\n\n return posixize_path(\n os.path.relpath(\n os.path.abspath(os.path.join(\n self.settings['PATH'],\n source_path)),\n os.path.abspath(self.settings['PATH'])\n ))", "def _path(self):\n if self.target[-1] != \"/\":\n self.target += \"/\"\n\n if \"/\" in self.source:\n self.path = self.target + self.source.split(\"/\")[-1]\n else:\n raise NotImplementedError(\"This software is not done for Windows\")\n if self.method == \"git\":\n self.path = self.path.replace(\".git\", \"\")", "def absPath(path):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), path)", "def determine_python_path():\n if git_install_requested():\n projects_yaml = config('openstack-origin-git')\n projects_yaml = git_default_repos(projects_yaml)\n return os.path.join(git_pip_venv_dir(projects_yaml),\n 'lib/python2.7/site-packages')\n else:\n return None", "def run_import(path: Path) -> None:\n if not (path / \"__main__.py\").exists():\n return\n try:\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"install\", \"--no-input\", path.parent.as_posix()],\n stdout=subprocess.DEVNULL,\n )\n if (path / \"__main__.py\").exists():\n subprocess.check_call(\n [sys.executable, \"-c\", f\"import {path.name}\"],\n stdout=subprocess.DEVNULL,\n )\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"uninstall\", \"--no-input\", \"-y\", path.name],\n stdout=subprocess.DEVNULL,\n )\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f\"Path {path} cannot be imported: {e}\") from None", "def which(cmd, path=None):\n if path is None:\n path = os.environ[\"PATH\"].split(os.pathsep)\n\n for prefix in path:\n filename = os.path.join(prefix, cmd)\n executable = os.access(filename, os.X_OK)\n is_not_directory = os.path.isfile(filename)\n if executable and is_not_directory:\n return True\n\n return False", "def include_path():\n include_dir = os.path.dirname(os.path.dirname(numba.__file__))\n path = os.path.abspath(include_dir)\n return path", "def get_relative_path(path, start_path=\"\"):\r\n if start_path:\r\n rel_path = lib_path.relpath(path, start_path)\r\n else:\r\n rel_path = lib_path.relpath(path)\r\n return rel_path", "def set_output_path(path):\n\n if not os.path.exists(path):\n cmdline_main.message(\"Creating %s\",path)\n try:\n os.makedirs(path)\n except OSError, e:\n if e.errno != errno.EEXIST:\n cmdline_main.warning(\"Unable to set output path %s\",path)\n\n param.normalize_path.prefix=path\n\n if not path in param.resolve_path.search_paths:\n param.resolve_path.search_paths+=[path]" ]
[ "0.64583826", "0.6119704", "0.60500836", "0.5732794", "0.5658576", "0.56554246", "0.5512849", "0.5505444", "0.54935056", "0.5483672", "0.5481498", "0.5455168", "0.5439266", "0.5433778", "0.54287785", "0.5424196", "0.5423426", "0.5394256", "0.53781176", "0.5329684", "0.5325822", "0.52742755", "0.52715653", "0.52624583", "0.5246451", "0.5242646", "0.52409595", "0.52304184", "0.5229665", "0.5225051", "0.52033633", "0.5201149", "0.5197319", "0.519542", "0.51940525", "0.5182969", "0.5170419", "0.5169428", "0.51537764", "0.5149106", "0.5148469", "0.51409435", "0.5134104", "0.5123738", "0.5110238", "0.51027423", "0.5095024", "0.50935596", "0.5091986", "0.5090286", "0.50842994", "0.50825363", "0.5076648", "0.5071093", "0.506935", "0.5067634", "0.5065574", "0.5060455", "0.50602055", "0.50593334", "0.5059273", "0.5048375", "0.50456494", "0.5044838", "0.5043009", "0.5038083", "0.5030513", "0.50158584", "0.5011537", "0.5007856", "0.50030893", "0.5002455", "0.49968073", "0.49913317", "0.49834475", "0.49799818", "0.49773663", "0.49772424", "0.49733955", "0.496623", "0.49623707", "0.4952024", "0.49502504", "0.49447647", "0.49397078", "0.49392042", "0.4923048", "0.4923018", "0.49152303", "0.49084365", "0.49080262", "0.4903503", "0.49026972", "0.48919156", "0.48878804", "0.48876777", "0.48875", "0.48855394", "0.48821542", "0.4880285" ]
0.6302872
1
Wait for condition to return anything other than None
def wait_condition(cond, timeout=1, sleeptime=.01): # NOTE Increasing sleeptime can dramatically increase testsuite runtime # It also reduces CPU load significantly if timeout is None: timeout = 1 if timeout < sleeptime: print("Warning, timeout cannot be smaller than", sleeptime) timeout = sleeptime # Max number of attempts until giving up tries = int(timeout / sleeptime) for i in range(tries): val = cond() if val is not None: break sleep(sleeptime) return val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def waitUntilSuccess():", "def _wait_for(self, check_func, desc, result=False, timeout=200):\r\n if result:\r\n return Promise(check_func, desc, timeout=timeout).fulfill()\r\n else:\r\n return EmptyPromise(check_func, desc, timeout=timeout).fulfill()", "def await_condition(condition, timeout=2000):\n\n for _ in range(timeout):\n if condition():\n return True\n time.sleep(0.001)\n return False", "def wait_fluently(condition: Callable, timeout: TimeoutType, err_msg: str):\n if timeout is None:\n timeout = 0\n start_time = time.time()\n while True:\n res = condition()\n if res:\n return res\n if time.time() - start_time >= timeout:\n raise TimeoutException(err_msg)\n time.sleep(0.3)", "def __bool__(self):\n return self.wait(0)", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def wait_until(self, check, timeout=None):\n self._wait_in_process_loop(lambda: (check(),None),timeout=timeout)", "def wait():\n pass", "def wait_for(self, condition, *args):\n start_time = int(time.time())\n while True:\n try:\n condition(*args)\n except Exception:\n pass\n else:\n return\n if int(time.time()) - start_time >= self.build_timeout:\n condition(*args)\n return\n time.sleep(self.build_interval)", "def _wait_on_condition(self, timeout):\n self.__condition.wait(timeout)", "def until_true(condition, timeout, error_msg):\n timeout = timestr_to_secs(timeout)\n max_wait = time.time() + timeout\n while True:\n if condition():\n break\n if time.time() > max_wait:\n raise AssertionError(error_msg)\n time.sleep(0.1)", "def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")", "def wait_until(self, condition, timeout=None):\n if condition():\n return True\n t_start = time.time()\n while not condition():\n if timeout is not None and time.time() > t_start + timeout:\n return False\n if threading.current_thread() is self.kernel.parent.control_thread:\n # Wait for a reply on the comm channel.\n self.poll_one()\n else:\n # Wait 10ms for a reply\n time.sleep(0.01)\n return True", "def do_wait(self):\n pass", "def _wait_until(self, condition, timeout: TimeoutType = DEFAULT_TIMEOUT):\n if not timeout:\n timeout = 0\n return wait.WebDriverWait(self._webdriver, timeout).until(condition)", "async def _do_if_possible(self, coroutine: Awaitable[None]) -> None:\n try:\n await coroutine\n except IncorrectStateException:\n pass", "def wait(self, timeout):\n if not hasattr(self, '_value'):\n try:\n value = self.broker.pop_result(self, timeout=timeout)\n except KeyError:\n return False\n except TaskExpired as err:\n value = err\n self._value = value\n return hasattr(self, '_value')", "async def wait_until_done(self) -> None:\n ...", "def wait(self):\n\t\twhile True:\n\t\t\tr1 = self.zaberSend(self.translation[\"hor\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tr2 = self.zaberSend(self.translation[\"ver\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tif r1[2] == 0 and r2[2] == 0:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ttime.sleep(.01)", "def wait_for(func):\n \n while not func() and not rospy.is_shutdown():\n time.sleep(0.01)", "def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()", "def _wait_until(cond: Callable[[], bool], timeout: float = 15, interval: float = 0.1):\n start = time()\n end = start + timeout\n while time() <= end:\n if cond() is True:\n return\n sleep(interval)\n\n raise AssertionError(\"Condition not true in {} seconds\".format(timeout))", "def wait_until(func, wait_for=None, sleep_for=0.5):\n res = func()\n\n if res:\n return res\n\n if wait_for:\n deadline = time.time() + wait_for\n while not res and time.time() <= deadline:\n gevent.sleep(sleep_for)\n res = func()\n\n else:\n while not res:\n gevent.sleep(sleep_for)\n res = func()\n\n return res", "def wait_until(self, func_condition: (), timeout_sec: int=WAIT_TIMEOUT_SEC):\n return WebDriverWait(self.driver, timeout_sec).until(lambda driver: func_condition())", "def ensure_condition(callback, *args,\n sleep_step=0.001, max_wait_time=30, **kwargs):\n event = Event()\n condition = ConditionWaiter(event, callback, *args,\n sleep_step=sleep_step, **kwargs)\n condition.start()\n result = event.wait(max_wait_time)\n condition.stop()\n\n return result", "def wait_for(predicate_func, **kwargs):\n if len(kwargs) == 0:\n while not predicate_func():\n pass\n else:\n while not predicate_func(**kwargs):\n pass", "def _blocking(self, timeout, func):\n ret = func(True)\n if ret is not None or self._in_transaction:\n return ret\n if timeout:\n deadline = time.time() + timeout\n else:\n deadline = None\n while True:\n timeout = deadline - time.time() if deadline is not None else None\n if timeout is not None and timeout <= 0:\n return None\n # Python <3.2 doesn't return a status from wait. On Python 3.2+\n # we bail out early on False.\n if self._db.condition.wait(timeout=timeout) is False:\n return None # Timeout expired\n ret = func(False)\n if ret is not None:\n return ret", "def fake_poll_until(retriever, condition=lambda value: value,\n sleep_time=1, time_out=0):\n from trove.common import exception\n slept_time = 0\n while True:\n resource = retriever()\n if condition(resource):\n return resource\n fake_sleep(sleep_time)\n slept_time += sleep_time\n if time_out and slept_time >= time_out:\n raise exception.PollTimeOut()", "def wait(self):\n pass", "def wait(self):\n pass", "def take_until(condition):\n return partial(takewhile, pipe | condition | operator.not_)", "def wait_for_event(event):\r\n return event.accept()", "def wait(self, timeout=None):\n with self.condition:\n if not self.ready:\n self.condition.wait(timeout)", "def waitForCompletion(self):\n\n while(json.loads(self.robot.device())['state']!=0):\n time.sleep(0.1)\n continue\n\n return", "def check_completion(self):\n\n time.sleep(3)\n while self.status == 0:\n pass", "def _wait_for_element(self, element_selector, promise_desc):\r\n\r\n def _is_element_present():\r\n \"\"\"\r\n Check if web-element present in DOM.\r\n\r\n Returns:\r\n bool: Tells elements presence.\r\n\r\n \"\"\"\r\n return self.q(css=element_selector).present\r\n\r\n EmptyPromise(_is_element_present, promise_desc, timeout=200).fulfill()", "def wait():\n time.sleep(1)", "def _wait_for(\n func: Callable,\n expected_result: Any = True,\n timeout: int = 10,\n print_error: bool = True,\n sleep_for: int = 1,\n **kwargs,\n) -> None:\n end = time() + timeout\n\n while time() <= end:\n try:\n retval = func(**kwargs)\n except Exception as err: # pylint: disable=broad-except\n if print_error:\n logger.error(err)\n else:\n if retval == expected_result:\n return None\n sleep(sleep_for)\n\n raise WaitForException(\n f\"func: {func}, didn't return {expected_result} within specified timeout: {timeout}\"\n )", "def Blocking(self) -> bool:", "def wait(self):\n return (self.status == self.STATUS_WAIT)", "def functionThatShouldNotTimeout():\n return None", "def test_success_result(self):\n dr = EventualResult(succeed(123), None)\n self.assertEqual(dr.wait(0.1), 123)", "def wait_on_function ( func, func_args, func_wait_value, sleep_time, max_wait ) :\n func_response = func( *func_args )\n while func_response != func_wait_value :\n if max_wait <= 0 :\n break\n time.sleep( sleep_time )\n max_wait -= sleep_time\n func_response = func( *func_args )\n\n return func_response == func_wait_value", "def wait(result):\n if is_result_proxy(result):\n result.__wrapped__ # force the evaluation", "async def wait_value(self, value_or_predicate, *, held_for=0) -> VT:\n predicate = _ValueWrapper(value_or_predicate)\n while True:\n if not predicate(self._value):\n value = await self._wait_predicate(self._level_results, predicate)\n else:\n value = self._value\n await trio.sleep(0)\n if held_for > 0:\n with trio.move_on_after(held_for):\n await self.wait_value(lambda v: not predicate(v))\n continue\n break\n return value", "def wait_for_func_status(self, result):\n try:\n for res in self:\n if result == res:\n return True\n\n except self.timeout_exc_cls:\n log.error(\n f\"({self.func.__name__}) return incorrect status after timeout\"\n )\n return False", "def wait_or_fail(self, evt, timeout=2.0, msg=''):\n res = evt.wait(timeout)\n if not res:\n self.fail(msg)", "def wait(self, timeoout=None, state=\"C-completed\"):", "def _wait_what(self, expected):\r\n \r\n self._msg_server(cb.WAITWHATSERVER % (expected))", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def answer_waiting_call(self) -> None:", "def wait_for(self, condition, timeout_message='', time_for_stop=None):\n\n if self._loaded:\n time_for_stop = time_for_stop or self.operate_timeout\n else:\n time_for_stop = time_for_stop or self.loading_timeout\n\n started_at = time.time()\n while not condition():\n if time_for_stop != -1 and time.time() > (started_at + time_for_stop):\n if self._loaded:\n raise OperateTimeout, timeout_message\n else:\n # raise LoadingTimeout, timeout_message\n self.trigger_action('Stop') #QWebPage::Stop\n self._loaded = True\n logger.warning(\"Page loading timeout.Force to stop the page\")\n break\n\n gevent.sleep(2)", "def wait_for(test, timeout_seconds=DEFAULT_TIMEOUT):\n start = time.time()\n while True:\n if test():\n return True\n if time.time() - start > timeout_seconds:\n return False\n time.sleep(0.5)", "def is_waitable(self):\n return self._condition is not None", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_any_message(self, timeout=None):\n self._wait_in_process_loop(lambda: (True,None),timeout=timeout)", "def wait_until_predicate(\n predicate: Callable[[], bool], timeout: int, interval: float = 1.0\n):\n try:\n res = wait(predicate, timeout, interval, raise_on_timeout=True)\n except TimeoutException:\n return\n else:\n raise RuntimeError(\n \"Early finish of wait(), predicate: {}.\".format(res)\n )", "def test_wait_race(self):\n mock_handler = mock.Mock()\n async_result = self._makeOne(mock_handler)\n\n async_result.set(\"immediate\")\n\n cv = threading.Event()\n\n def wait_for_val():\n # NB: should not sleep\n async_result.wait(20)\n cv.set()\n th = threading.Thread(target=wait_for_val)\n th.daemon = True\n th.start()\n\n # if the wait() didn't sleep (correctly), cv will be set quickly\n # if it did sleep, the cv will not be set yet and this will timeout\n cv.wait(10)\n eq_(cv.is_set(), True)\n th.join()", "def wait(self):\n\t\tself.wait_window(self)\n\t\treturn self.result", "async def wait_for(self, predicate, timeout=None):\n await self._event.acquire()\n await asyncio.wait_for(\n self._event.wait_for(lambda: predicate(self)),\n timeout=timeout,\n loop=self._loop,\n )\n self._event.release()", "def _wait_until(predicate: Callable, timeout_in_seconds: int, *args, **kwargs):\n\n # Invoke the method once first before starting any countdown, to guarantee that we can call the method at\n # least twice before timing out with an error.\n # (Handle cases in which the duration to execute the given predicate is longer than the allotted timeout.)\n result = predicate(*args, **kwargs)\n if bool(result):\n return result\n\n # Begin to repeatedly invoke the predicate within a countdown.\n end_time = time.time() + timeout_in_seconds\n while time.time() < end_time:\n result = predicate(*args, **kwargs)\n if bool(result):\n return result\n else:\n time.sleep(0.25)\n raise TimeoutError()", "def wait_element(self, wait_time):\n time.sleep(wait_time)\n if self.is_exist():\n return True\n else:\n return False", "def wait_until_not_raised(condition, delay, max_attempts):\n def wrapped_condition():\n try:\n result = condition()\n except:\n return False, None\n\n return True, result\n\n attempt = 0\n while attempt < (max_attempts-1):\n attempt += 1\n success, result = wrapped_condition()\n if success:\n return result\n\n time.sleep(delay)\n\n # last attempt, let the exception raise\n return condition()", "async def test_wait_for(self) -> None:\n trigger = auraxium.Trigger(auraxium.event.Death)\n\n def do_nothing(_: auraxium.event.Event) -> None:\n pass\n\n trigger.action = do_nothing\n\n await self.client.wait_for(trigger, timeout=-1.0)\n\n with self.assertRaises(TimeoutError):\n await self.client.wait_for(trigger, timeout=0.00001)", "def _wait(self):\n conn = None\n try:\n conn = libvirt.open(\"qemu:///system\")\n while True:\n time.sleep(10)\n try:\n state = conn.lookupByName(self.domain).info()[0]\n except (libvirt.libvirtError, TypeError, IndexError):\n break\n if state in [4, 5, 6]: # crashed or shutdown\n break\n finally:\n if conn is not None:\n conn.close()", "def _wait(self,):\n #modlogger.debug( \"%s: waiting\"%self)\n self.closing = True\n with self.not_complete_lock:\n if not self.not_complete: return\n self._checkpoint()", "def wait_for(self, timeout):\n ready = False\n # Dividing sleep time by 300 instead of 30 double CPU load but cuts\n # IMU timestamp variation from about 20% to less than 1%\n sleep_time = (timeout / 1000.0) / 30\n stop_time = time.monotonic_ns() + (timeout * 1000000.0)\n while not ready and time.monotonic_ns() < stop_time:\n ready = GPIO.input(self.gpio_pin)\n time.sleep(sleep_time)\n return ready", "def wait(self, ms=None):\r\n util.raiseNotDefined()", "def _check_result(self, fut, *data):\n return fut", "def waituntil(condition, timeout_in_seconds,\n period_in_seconds=DEFAULT_PERIOD, raise_on_timeout=False):\n end_time = time.time() + timeout_in_seconds\n\n while time.time() < end_time:\n try:\n if condition():\n return True\n except:\n pass\n time.sleep(period_in_seconds)\n\n if raise_on_timeout:\n raise RuntimeError(\"Condition was never met\")\n return False", "def __await__(self):\n return self.waiter.__await__()", "def __await__(self):\n return self.waiter.__await__()", "def _wait_until_not(self, condition, timeout: TimeoutType = DEFAULT_TIMEOUT):\n if not timeout:\n timeout = 0\n return wait.WebDriverWait(self._webdriver, timeout).until_not(condition)", "def wait_for_task(task):\n task_done = False\n while not task_done:\n if task.info.state == 'success':\n return task.info.result\n\n if task.info.state == 'error':\n print \"there was an error\"\n #task_error = task.info.error.msg\n task_done = True", "def waiting(self) -> bool: # pylint: disable=W0221\n return True", "def _selenium_wait_for(fn):\n start_time = time.time()\n while True:\n try:\n return fn()\n except (AssertionError, WebDriverException) as e:\n if time.time() - start_time > 10:\n raise e\n time.sleep(0.5)", "def wait_until_ready(self):\n while not self.is_ready():\n time.sleep(0.01)", "def wait(self, task: RemoteTask) -> None:\n raise NotImplementedError()", "def eventually(condition, timeout=30.0, catch_assertions=False):\n start_time = time()\n lastValue = None\n while time() - start_time < timeout:\n if catch_assertions:\n try:\n lastValue = condition()\n except AssertionError as e:\n lastValue = e\n else:\n lastValue = condition()\n if lastValue is True:\n return\n sleep(0.01)\n if isinstance(lastValue, AssertionError):\n raise lastValue\n else:\n raise AssertionError(\n \"Test failed due to timeout after %g sec, with last condition returning: %s\"\n % (timeout, lastValue)\n )", "def result(self, wait=0):\n while True:\n # TODO: I don't like polling, we could use LISTEN here, even\n # globally so that any waiters would check if their future was\n # complete. Even if all were awakened for each completed future, it\n # would be more efficient than polling.\n result = get_result(self.uid)\n if result is not None:\n return result\n if wait == 0:\n break\n if wait > 0:\n wait = max(0, wait - 0.5)\n time.sleep(wait if wait > 0 else 0.5)", "def await_condition(description, condition_eval_callable, on_failure=lambda: True, timeout=10, poll_s=0.1):\n start_time = time.time()\n\n def should_continue():\n return time.time() - start_time < timeout\n\n while not condition_eval_callable():\n if not should_continue():\n on_failure()\n raise AssertionError(\n \"Awaiting condition {0} has timed out after {1} seconds\".format(description, timeout)\n )\n time.sleep(poll_s)", "def wait_for(self, state, predicate, timeout=None):\n\n self.__lock.acquire()\n try:\n result = self.__state.value == state or predicate()\n if result:\n return result\n end_time = None if timeout is None else monotonic() + timeout\n wait_time = 1\n while not result:\n if end_time is not None:\n wait_time = min(end_time - monotonic(), 1)\n if wait_time <= 0:\n break\n result = self.__lock.wait_for(lambda: self.__state.value == state, wait_time) or predicate()\n return result\n finally:\n self.__lock.release()", "def wait_until(self, callback, timeout=10):\n from selenium.webdriver.support.wait import WebDriverWait\n\n WebDriverWait(self.selenium, timeout).until(callback)", "def wait(self, time):\n self._wait = Event()\n return not self._wait.wait(time)", "def wait(self, timeout=600):\n s = datetime.datetime.now()\n status = json.loads(self.get())\n while status['status'] != 'COMPLETE':\n status = self.get()\n e = datetime.datetime.now()\n if (e - s).seconds > timeout:\n raise RuntimeError('timeout')\n return status", "def wait(self):\n if self._thrd is not None:\n self._thrd.join()\n\n return self.__exit_code", "def wait_until_empty(self):\n while not self.is_empty():\n self.sleep(10)", "def wait_to_be_ready(self):\n count = 0\n while count < 6:\n try:\n line = self.stdout_reader.get(timeout=10)\n if \"waiting for input\" in line:\n self.cec_logger.info('CEC is ready')\n break\n except Empty:\n self.cec_logger.warning(\"haven't received a line from CEC\")\n count += 3", "def test_success_result_twice(self):\n dr = EventualResult(succeed(123), None)\n self.assertEqual(dr.wait(0.1), 123)\n self.assertEqual(dr.wait(0.1), 123)", "def wait_for_data(receiver):\n\n while not receiver.available(pipes[1]):\n time.sleep(0.01)", "def wait_for_completion(self, timeout=10):\n cur_status = self.runtime_status()\n while cur_status not in ['FAILED', 'KILLED', 'FINISHED']:\n time.sleep(0.2)\n timeout -= 0.2\n cur_status = self.runtime_status()\n if timeout < 0:\n break\n\n return timeout > 0", "def non_blocking_get(self):\n try:\n return self.q.get(block=False)\n except queue.Empty:\n time.sleep(0)\n return None", "def wait_until(condition, delay, max_attempts):\n attempt = 0\n while not condition() and attempt < max_attempts:\n attempt += 1\n time.sleep(delay)\n\n if attempt >= max_attempts:\n raise Exception(\"Condition is still False after {} attempts.\".format(max_attempts))", "def wait(self):\n time.sleep(self.next())" ]
[ "0.7600536", "0.71974915", "0.69235563", "0.6832884", "0.68265367", "0.66899425", "0.66899425", "0.66899425", "0.66899425", "0.6640798", "0.66129583", "0.655346", "0.6533913", "0.6519288", "0.6512367", "0.6503648", "0.6491821", "0.6419775", "0.6390421", "0.63593", "0.6358844", "0.63544", "0.63519055", "0.6324726", "0.6315711", "0.6300969", "0.62290096", "0.6213684", "0.61915016", "0.61775106", "0.6175457", "0.6174626", "0.6174626", "0.6134105", "0.6121241", "0.6110393", "0.6101476", "0.6101166", "0.6071322", "0.6066186", "0.60460377", "0.6041609", "0.60152656", "0.5999887", "0.59957385", "0.5988882", "0.5984719", "0.5977278", "0.5971873", "0.596993", "0.596628", "0.594576", "0.5941489", "0.5931268", "0.5924822", "0.5922725", "0.59192854", "0.5918916", "0.5918916", "0.5918916", "0.59051377", "0.590474", "0.5902006", "0.59006286", "0.58745474", "0.587109", "0.5857352", "0.5851978", "0.5850194", "0.58447355", "0.5839828", "0.5838834", "0.5829177", "0.5825803", "0.58166367", "0.57854974", "0.57854974", "0.5781036", "0.5775597", "0.57686186", "0.57441956", "0.574287", "0.5738821", "0.5735731", "0.5733843", "0.57268935", "0.5724846", "0.57235426", "0.57218134", "0.57190436", "0.5716305", "0.5716035", "0.57101125", "0.57087284", "0.5708427", "0.57045025", "0.5690048", "0.5685169", "0.5679293" ]
0.66056544
11
Wait for process to finish
def wait_process(pid, timeout=None): def process(): try: os.kill(pid, 0) except OSError: # Process is dead return True else: # Process is still ticking return None return wait_condition(process, timeout)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait_finish(self):\r\n self.proc.join()", "def wait(self):\n self.Popen.wait()", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def wait(self):\n\n for output in self.proc.communicate():\n if output is not None:\n self.output += output", "def wait_all_process_done(self) -> None:\n while len(self.process_queue) > 0:\n self.check_process_done()", "def wait_until_finished(self):\n for processor in self._processors.values():\n while not processor.done:\n time.sleep(0.1)", "async def wait_until_done(self) -> None:\n ...", "def wait_process_running(process):\n assert process.is_running()", "def wait():\n pass", "def wait_complete(self):\n self.join()", "async def wait(self):\n if self._state in (JobState.PENDING, JobState.RUNNING):\n await self._process.wait()", "def wait_to_complete(self, timeout: float = 5) -> None:\n if self.proc.poll() is not None: # type: ignore\n return\n\n start_time = time.time()\n\n while start_time + timeout > time.time() and self.proc.poll() is None: # type: ignore\n time.sleep(0.001)\n\n if self.proc.poll() is None: # type: ignore\n self.terminate(force=True)\n self.wait()\n self.exitstatus = \"Terminated!\" # type: ignore", "def wait(self):\r\n self.jobs.join()", "def poll_process_done(self) -> None:\n while len(self.process_queue) >= self.max_processes:\n self.check_process_done()", "def wait():\r\n win32event.WaitForSingleObject(hProcess,\r\n win32event.INFINITE)\r\n returncode = win32process.GetExitCodeProcess(hProcess)\r\n return returncode", "def wait(self):\n self.queue.join()", "def wait(self):\n pass", "def wait(self):\n pass", "def do_wait(self):\n pass", "def wait_process_completion(remote_command_executor, pid):\n logging.info(\"Waiting for performance test to complete\")\n command = f\"\"\"\n ps --pid {pid} > /dev/null\n [ \"$?\" -ne 0 ] && echo \"COMPLETE\" || echo \"RUNNING\"\n \"\"\"\n result = remote_command_executor.run_remote_command(command)\n if result.stdout == \"RUNNING\":\n raise Exception(\"The process is still running\")\n else:\n return result.stdout.strip()", "def loop_wait(self):\n self.log_debug(\"Waiting for loop to finish\")\n if self.loop_state() != LState.Stopped:\n self.event_loop_proc.Wait()\n self.log_debug(\"Loop finished\")", "def wait_until_finished(self) -> None:\n if not self._parent_signal_conn:\n raise ValueError(\"Process not started.\")\n if self._async_mode:\n raise RuntimeError(\"wait_until_finished should only be called in sync_mode\")\n while self._parent_signal_conn.poll(timeout=None):\n try:\n result = self._parent_signal_conn.recv()\n except EOFError:\n return\n self._process_message(result)\n if isinstance(result, DagParsingStat):\n # In sync mode (which is the only time we call this function) we don't send this message from\n # the Manager until all the running processors have finished\n return", "def finish(self):\r\n self.start_finish()\r\n self.wait_finish()", "def finalize(self):\n self.busy = False\n self.pipe_start.send((\"FINISH\",None))\n self.process.join()\n if self.process.is_alive():\n self.process.terminate()", "def wait(self):\n self.mainloop().wait()", "def wait():\n time.sleep(1)", "def Wait(self):\n try:\n # Flush stdout and stderr to be sure no output is interleaved.\n sys.stdout.flush()\n sys.stderr.flush()\n\n # File position pointers are shared across processes, so we must open\n # our own file descriptor to ensure output is not lost.\n self._WaitForStartup()\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n results = []\n with open(self._output.name, 'r') as output:\n pos = 0\n running, exited_cleanly, task_errors, all_errors = (True, False, [], [])\n while running:\n # Check whether the process is still alive.\n running = self.is_alive()\n\n try:\n errors, results = \\\n self._queue.get(True, self.PRINT_INTERVAL)\n if errors:\n task_errors.extend(errors)\n all_errors.extend(errors)\n\n running = False\n exited_cleanly = True\n except Queue.Empty:\n pass\n\n if not running:\n # Wait for the process to actually exit. If the child doesn't exit\n # in a timely fashion, kill it.\n self.join(self.EXIT_TIMEOUT)\n if self.exitcode is None:\n msg = '%r hung for %r seconds' % (self, self.EXIT_TIMEOUT)\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessExitTimeout(msg), ''))\n self._KillChildren([self])\n elif not exited_cleanly:\n msg = ('%r exited unexpectedly with code %s'\n % (self, self.exitcode))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessUnexpectedExit(msg), ''))\n\n # Read output from process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n\n if len(buf) > 0:\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n elif running and time.time() > silent_death_time:\n msg = ('No output from %r for %r seconds' %\n (self, self.SILENT_TIMEOUT))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessSilentTimeout(msg), ''))\n self._KillChildren([self])\n\n # Read remaining output from the process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n running = False\n\n # Print output so far.\n while len(buf) > 0:\n sys.stdout.write(buf)\n pos += len(buf)\n if len(buf) < _BUFSIZE:\n break\n buf = output.read(_BUFSIZE)\n\n # Print error messages if anything exceptional occurred.\n if len(all_errors) > len(task_errors):\n logging.PrintBuildbotStepFailure()\n msg = '\\n'.join(x.str for x in all_errors if x)\n logging.warning(msg)\n traceback.print_stack()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n # Propagate any results.\n for result in results:\n results_lib.Results.Record(*result)\n\n finally:\n self.Cleanup(silent=True)\n\n # If an error occurred, return it.\n return all_errors", "async def wait(self):\n if self.poll() is None:\n await wait_child_exiting(self)\n self._proc.wait()\n else:\n await _core.checkpoint()\n return self.returncode", "def wait(self):\n try:\n self.relay.wait()\n self.responder.wait()\n except KeyboardInterrupt:\n print_notification(\"Stopping\")\n finally:\n self.terminate_processes()", "def check_process_full(self) -> None:\n if len(self.process_queue) >= self.max_processes:\n task_name, sp = self.process_queue.pop()\n sp.wait()", "def wait(self):\n\t\twhile True:\n\t\t\tr1 = self.zaberSend(self.translation[\"hor\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tr2 = self.zaberSend(self.translation[\"ver\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tif r1[2] == 0 and r2[2] == 0:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ttime.sleep(.01)", "def wait_for_processing(self, task):\n DesktopBrowser.wait_for_processing(self, task)", "def wait(self, stdin=None):\n if not self._process:\n raise JubaTestFixtureFailedError('this instance has not been started yet')\n\n log.debug('waiting for process to complete: %s', self.args)\n (self.stdout, self.stderr) = self._process.communicate(stdin)\n returncode = self._process.returncode\n self._process = None\n log.debug('process completed: %s with status %d', self.args, returncode)\n return returncode", "def _wait(self,):\n #modlogger.debug( \"%s: waiting\"%self)\n self.closing = True\n with self.not_complete_lock:\n if not self.not_complete: return\n self._checkpoint()", "def wait_completion(self):\r\n self.tasks.join()", "def wait(self):\n logger.debug(\"Tracing...\")\n self.tracerProcess.wait()\n logger.debug(\"Post processing...\")\n self.postThread.join()\n logger.debug(\"Post processing done!\")", "def _poll_process(self, box_config):\n try:\n p = psutil.Process(box_config.pid)\n\n return_code = p.wait(timeout=0.01)\n if return_code is None:\n # process is already terminated\n self.logger.info(f'Process {box_config.process_name} is terminated')\n return\n else:\n # process is terminated; possibly by OS\n box_config.pid = None\n self.bc_dao.update(box_config)\n self.logger.info(f'Process {box_config.process_name} got terminated. Cleaning up')\n except TimeoutExpired:\n # process is alive and OK\n pass\n except Exception:\n self.logger.error(f'Exception on polling: {box_config.process_name}', exc_info=True)", "def check_process_done(self):\n for task_name, sp in self.process_queue:\n if sp.poll() is not None:\n self.process_queue.remove((task_name, sp))\n print(definitions.PRINT_CODES[0] + blue(\"Query done: \"), blue(task_name))\n if self.handler is not None:\n self.handler(task_name, sp.returncode)", "def wait_for_goma_ctl(self):\n if self._goma_ctl_process is None:\n return\n sleep_count = 0\n while self._goma_ctl_process.poll() is None:\n time.sleep(0.1)\n sleep_count += 1\n if sleep_count > 50:\n print 'killing goma_ctl because it took too long at shutdown'\n self._goma_ctl_process.kill()\n return\n\n # Note that it is safe to wait a subprocess multiple times.\n if self._goma_ctl_process.wait():\n print self._goma_ctl_process.stdout.read()\n print 'goma_ctl %s failed!' % self._get_goma_ensure_start_command()\n sys.exit(1)", "def wait(self):\n if self._thrd is not None:\n self._thrd.join()\n\n return self.__exit_code", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()", "def wait(self):\n self.drain_call_queue()", "def wait_progress(self):\n pass", "def wait_progress(self):\n pass", "def wait_for_completion(self):\n self.logger.debug(\"Waiting for completion\")\n finished = False\n while not finished:\n if self._all_workers_are_idle():\n self.logger.info(\"Finished\")\n finished = True", "def waitUntilSuccess():", "def wait(self):\n self.event.wait()", "def wait(self) -> None:\n\n self.event_.wait()", "async def async_wait_for_process(loop, process: psutil.Process, timeout):\n try:\n await asyncio.wait_for(loop.run_in_executor(None, process.wait), timeout=timeout)\n except asyncio.exceptions.TimeoutError as e:\n raise e", "def wait(self):\n with self.__lock:\n while not self.__complete:\n self.__lock.wait()", "def wait(self, timeout):\n if not self._is_active:\n raise StopIteration\n try:\n self._pid.wait(timeout=timeout)\n self._is_active = False\n except subprocess.TimeoutExpired:\n pass", "def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)", "def wait() -> None:\n\n process_input(input())", "def wait(self):\n\t\tself.wait_window(self)\n\t\treturn self.result", "def wait(self):\n time.sleep(0.010)", "def wait_until(self, check, timeout=None):\n self._wait_in_process_loop(lambda: (check(),None),timeout=timeout)", "def ensure_process_results(self, process_object, label):\n self.logger.debug(\"Waiting for %s to complete.\", label)\n process_object.wait()\n\n self.logger.debug(\"%s completed with return code: %d.\", label, process_object.returncode)\n\n if process_object.returncode != 0:\n # This is an error condition.\n raise CronException(\"Process did not finish with a non-error (0) return code.\")", "def waitFinish(self):\n while self.job_queue_count > 0:\n sleep(0.5)\n\n # If there was a failure, we don't want to wait for possibly halted threads\n # while performing a 'join'. So just exit now with a failure.\n if self.failure:\n sys.exit(1)\n\n self.worker_pool.close()\n self.worker_pool.join()\n self.status_pool.close()\n self.status_pool.join()", "def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")", "def finish(self, pid, sts):\r\n retval = Subprocess.finish(self, pid, sts)\r\n self.after_finish()\r\n return retval", "def _try_finish(self, wait):\n counter = 0\n while not self._stop_signal and counter < wait:\n time.sleep(1)\n counter += 1\n res = self.process.poll()\n res_bsr = self.process_bsr.poll() if self.process_bsr else True\n if res is not None and res_bsr is not None:\n logger.info('Finish try succeeded')\n self.return_code = res\n time.sleep(15)\n return True\n else:\n logger.warning('Killing scrapy process manually, task id is %s',\n self.task_data.get('task_id', 0))\n # kill process group, if not finished in allowed time\n if self.process_bsr:\n try:\n self.process_bsr.terminate()\n except OSError as e:\n logger.error('Kill process bsr error in task #%s: %s',\n self.task_data.get('task_id', 0), e)\n try:\n self.process.terminate()\n except OSError as e:\n logger.error('Kill process error in task #%s: %s',\n self.task_data.get('task_id', 0), e)\n return False", "def wait(self) -> None:\n self._executor.shutdown(wait=True)", "def wait(self, timeout=0):\n if timeout:\n self._finished.wait(timeout=timeout)\n else:\n self._finished.wait()", "def finishWait(self):\r\n self.scheduler.finishWait()", "def ServeUntilSubprocessDies(self, process):\n child_result = 0\n try:\n while True:\n if process.poll() is not None:\n child_result = 0\n break\n if self.conn.poll():\n child_result = self.conn.recv()\n break\n time.sleep(0)\n except KeyboardInterrupt:\n pass\n finally:\n self.Shutdown()\n return child_result", "def wait(self):\n while not self.done:\n self.device._handle_events(1000)", "def wait_timeout(proc, seconds):\n start = time.time()\n end = start + seconds\n interval = 0.01\n\n while True:\n result = proc.poll()\n #print \"waiting\"\n if result is not None:\n return result\n if time.time() >= end:\n\n os.killpg(proc.pid, signal.SIGTERM)\n raise RuntimeError(\"Process timed out\")\n time.sleep(interval)", "def wait(self, timeoout=None, state=\"C-completed\"):", "def wait_for_termination(self):\n self.server.wait_for_termination()", "def wait_till_read_out():\n\n\trespond = send_command('waitreadout')", "def compute(self):\n parfile = self.create_parfile()\n self._command = [self.class_exe, parfile]\n process = subprocess.Popen(self._command)\n try:\n # process.wait(timeout=300)\n process.wait()\n # except (KeyboardInterrupt, subprocess.TimeoutExpired) as e: # TimeoutExpired only in Python >= 3.3\n except Exception as e:\n process.kill()\n raise e\n return", "def _execute(self):\n LOG.info(\"Waiting for a message...\")", "def wait_until_done(self, timeout=10.0):\r\n cfunc = lib_importer.windll.DAQmxWaitUntilTaskDone\r\n if cfunc.argtypes is None:\r\n with cfunc.arglock:\r\n if cfunc.argtypes is None:\r\n cfunc.argtypes = [lib_importer.task_handle, ctypes.c_double]\r\n\r\n error_code = cfunc(self._handle, timeout)\r\n check_for_error(error_code)", "def wait_for_termination(self):\n self.server.wait_for_termination()", "def _exec_and_wait(command):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n proc.wait()\n return proc.stdout.read()", "def wait_process_termination(p_pid):\n try:\n _, stdout, _ = run_cmd(\"ps --pid {} -o comm=\".format(p_pid))\n except ChildProcessError:\n return\n raise Exception(\"{} process is still alive: \".format(stdout.strip()))", "def wait(self, timeout=None):\n assert type(timeout) in (\n int, type(None)), 'Wrong type for [timeout], should be an int or None [was {0}]'.format(type(timeout))\n\n self._process.join(timeout)", "def wait(self):\n\n self.sem = threading.Semaphore(0)\n self.sem.acquire()", "def waitForThreadCompletion(self) -> None:\n self.writer.waitForThreadCompletion()", "def wait_for_shutdown(self, timeout=5):\n # pylint: disable=E1101\n self._process.join(timeout=timeout) # type: ignore\n # pylint: enable=E1101", "def _wait(self):\n conn = None\n try:\n conn = libvirt.open(\"qemu:///system\")\n while True:\n time.sleep(10)\n try:\n state = conn.lookupByName(self.domain).info()[0]\n except (libvirt.libvirtError, TypeError, IndexError):\n break\n if state in [4, 5, 6]: # crashed or shutdown\n break\n finally:\n if conn is not None:\n conn.close()", "def wait(self):\n self.drain_call_queue()\n DaskWrapper.wait(self._data)", "def on_timeout_cb(self):\n returncode = self.process.poll()\n if returncode is None:\n self.progress_bar.pulse()\n return True\n\n self.response(gtk.RESPONSE_ACCEPT)\n return False", "def wait(self):\n self.__prcs.wait()\n return self.poll()", "def wait(self):\n num_pings = 0\n # Some streams seem to start fine with up to 4 pings before beginning download?\n # More investigation is needed\n max_pings = 1 + self._pingouts\n # timeout after 1 minute\n timeout = datetime.datetime.now() + datetime.timedelta(minutes=1)\n try:\n for line in self._process.stderr:\n # TODO: add mpegts or other variants depending on the container settings? or no?\n # if \"Output #0, mp4\" in line:\n if \"Output #0\" in line:\n self._process.communicate()\n self.move_to_dest()\n self._pingouts = 0\n break\n elif \"HandleCtrl, Ping\" in line:\n num_pings += 1\n if num_pings > max_pings:\n # The main issue with this is that the slain processes will not have their files moved\n # But I think this is preferable to the other solutions I've come up with.\n # For future reference, those were:\n #\n # 1) Sending SIGINT then continuing to read stderr until it exited (sometimes it doesn't)\n # 2) Sending SIGINT, storing a reference to the process, then restarting the download.\n # This prevents the process from being garbage collected until the Watcher is\n # 3) Sending SIGINT, then storing info about src and dest paths for the stopped download.\n # If a reference to the process is NOT stored, there's no way to be sure it has finished writing\n # (if it's writing at all). The only way was to give them a grace period and then just start\n # moving, but this adds undesirable time to the cleanup phase, when we may want to restart\n # a falsely completed Watcher asap.\n # 4) Just moving the file straightaway. This is obviously bad since ffmpeg takes a few moments to\n # finish.\n # NOTE: only option #1 was actually tried, the others were partially written before being\n # abandoned as their problems became clear\n #\n # Two additional options exist (not mutually exclusive):\n # 1) Passing the dead processes off to a queue and having another thread clean up.\n # 2) Having regular maintenance sweep the active folder and move files it can be sure are done\n # to their proper folders.\n #\n # I *probably* need to use 1) eventually, especially once I figure out how to actually end\n # stuck processes without killing the parent. But it requires a lot more code.\n # Until then let's just see how this works.\n #\n # When that time does come, a Downloader copy constructor may be useful.\n download_logger.debug(\"Download pinged {} times: Stopping\".format(num_pings))\n self._pingouts += 1\n self.stop()\n\n # close stderr to force the loop to exit\n time.sleep(0.1)\n self._process.stderr.close()\n time.sleep(0.1)\n # process will be garbage collected when the next one is started, or the Watcher dies\n # self._process = None\n # This *should* work for newer builds of FFmpeg without librtmp.\n # Only question is whether 1 minute is too long (or too short).\n # UPDATE: Why doesn't this ever seem to work?\n # is it because FFmpeg freezes output and hangs now? so we're never getting another line to iterate over\n # elif datetime.datetime.now() > timeout:\n # download_logger.debug(\"Download of {} timed out\".format(self.outfile))\n # self.stop()\n # time.sleep(0.1)\n # self._process.stderr.close()\n # time.sleep(0.1)\n else:\n time.sleep(0.2)\n\n except ValueError:\n download_logger.debug('ffmpeg stderr closed unexpectedly')\n\n # Is it possible for the process to end prematurely?\n return self._process.returncode", "def waitForCompletion(self):\n\n while(json.loads(self.robot.device())['state']!=0):\n time.sleep(0.1)\n continue\n\n return", "def wait(self):\r\n self.scheduler.wait()", "def Finish(self):\n\t\n self.queue.join()", "async def wait_async(self):\n await self._future" ]
[ "0.8497128", "0.7932346", "0.7532554", "0.7532554", "0.7532554", "0.7532554", "0.7501503", "0.73955196", "0.72686297", "0.72191155", "0.7064616", "0.7028927", "0.701474", "0.69731134", "0.6924637", "0.68755513", "0.68720436", "0.6823285", "0.6817343", "0.68091136", "0.68091136", "0.6790015", "0.6769383", "0.67475235", "0.67407924", "0.6739854", "0.6736196", "0.6730194", "0.66544366", "0.66501474", "0.6616263", "0.6601879", "0.6562054", "0.65561056", "0.6547476", "0.65279496", "0.6514303", "0.6507609", "0.6495829", "0.6481494", "0.6454554", "0.6453204", "0.6447288", "0.64468205", "0.64468205", "0.64468205", "0.64468205", "0.64468205", "0.64468205", "0.64468205", "0.64468205", "0.64468205", "0.6439614", "0.6438415", "0.642916", "0.642916", "0.6412113", "0.6411221", "0.6408346", "0.63958824", "0.6395198", "0.63873255", "0.63571197", "0.6353189", "0.6335358", "0.6333562", "0.6328127", "0.6324782", "0.63225466", "0.63144666", "0.63047016", "0.62888473", "0.6283269", "0.6282621", "0.6276761", "0.62759244", "0.6263007", "0.6232277", "0.62258625", "0.6225654", "0.62230873", "0.620402", "0.62035084", "0.61940163", "0.61779237", "0.61687803", "0.6141398", "0.6137184", "0.61278903", "0.6126179", "0.6101305", "0.6100833", "0.60956174", "0.6090459", "0.6066241", "0.60599583", "0.60570747", "0.60507125", "0.6047171", "0.60470194", "0.6045672" ]
0.0
-1
Read/Write output/input of given process. This function is meant to be executed in a thread as it may block
def _queue_output(arguments, pidq, outputq): kwargs = arguments["process"] input_data = arguments["input"].encode("utf-8") if arguments["input"] else None try: proc = Popen(**kwargs) except OSError as e: # pid None is read by the main thread as a crash of the process pidq.put(None) outputq.put(( "", ("Unexpected exception caught during execution of taskw: '{0}' . " "If you are running out-of-tree tests set TASK_USE_PATH=1 " "in shell env before execution and add the " "location of the task(d) binary to the PATH".format(e)), 255)) # false exitcode return # Put the PID in the queue for main process to know. pidq.put(proc.pid) # Send input and wait for finish out, err = proc.communicate(input_data) if sys.version_info > (3,): out, err = out.decode('utf-8'), err.decode('utf-8') # Give the output back to the caller outputq.put((out, err, proc.returncode))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(process, line_handler):\n\n io_q = queue.Queue(5)\n threads = {\n \"stdout\": threading.Thread(\n target=read_stream, args=(\"stdout\", process.stdout, io_q)\n ),\n \"stderr\": threading.Thread(\n target=read_stream, args=(\"stderr\", process.stderr, io_q)\n ),\n }\n # Unfortunately, stdout and stderr are not synchronised with each other.\n # This makes capturing both for real-time processing useless. So it is\n # currently all captured under stdout. Even more unfortunately, stderr\n # comes through first before stdout. This means writes that are made first\n # to stdout will not be first through the pipe if there is stderr output.\n #\n # This lack of sychronisation between stdout and stderr output makes\n # real-time display useless because they aren't captured and passed\n # through to the handler as they are encountered.\n #\n # Worse still, there appear to be issues with subprocess output capture on\n # Windows.\n #\n # A proper resolution would be to provide a custom subprocess module but\n # since the common usage does not require real-time capture of\n # stdout/stderr, this is not worth the effort. Manually running whatever\n # was intended for the subprocess outside ttt is the only recourse.\n #\n for thread in threads.values():\n thread.start()\n\n stdout = []\n stderr = []\n while threads:\n try:\n item = io_q.get(True, 1)\n except queue.Empty:\n if process.poll() is not None:\n break\n else:\n outstream, message = item\n if message == \"EXIT\":\n threads[outstream].join()\n del threads[outstream]\n else:\n message = message.rstrip(os.linesep)\n channel = sys.stdout if outstream == \"stdout\" else sys.stderr\n (stdout if outstream == \"stdout\" else stderr).append(message)\n if line_handler is not None:\n line_handler(channel, message)\n else:\n channel.write(message)\n channel.flush()\n\n for t in threads.values():\n t.join()\n process.wait()\n return (process.returncode, stdout, stderr)", "def running_output(process, outputs):\n state = type(\"State\",\n (object, ),\n {\n \"printed_message\": False,\n \"read_first_byte\": False\n })\n\n def output_printer(file_handle):\n \"\"\"Thread that prints the output of this process.\"\"\"\n character = bytearray()\n while True:\n character += file_handle.read(1)\n try:\n if character:\n if not state.read_first_byte:\n state.read_first_byte = True\n\n if character != \"\\n\":\n IndentedLogger.message(\"\\n\")\n\n # If this fails, then we will just read further characters\n # until the decode succeeds.\n IndentedLogger.message(character.decode(\"utf-8\"))\n state.printed_message = True\n character = bytearray()\n else:\n return\n except UnicodeDecodeError:\n continue\n\n stdout = threading.Thread(target=output_printer, args=(outputs[0], ))\n\n stdout.start()\n stderr_lines = list(outputs[1])\n\n try:\n status = process.wait()\n finally:\n stdout.join()\n\n # Print a new line before printing any stderr messages\n if len(stderr_lines):\n IndentedLogger.message(\"\\n\")\n\n for line in stderr_lines:\n IndentedLogger.message(line.decode(\"utf-8\"))\n state.printed_message = True\n\n if state.printed_message:\n print_message(\"\\n\")\n\n return status", "def _read_rs(self, process, append):\n print('read_rs thread started')\n for line in iter(process.stdout.readline, \"\"):\n if 'value 1' in line.decode('utf-8'):\n self.vol_up()\n if 'value -1' in line.decode('utf-8'):\n self.vol_down()\n print('read_rs thread stopped')", "def non_blocking_streamlit(process: psutil.Popen) -> None:\n while process.is_running():\n process.communicate()", "def _read_pb(self, process, append):\n print('read_pb thread started')\n for line in iter(process.stdout.readline, \"\"):\n if 'value 1' in line.decode('utf-8'):\n self.vol_mute()\n print('read_pb thread stopped')", "def _read_thread(proc, ready_event):\n ready = False\n while True:\n line = proc.stdout.readline()\n if not line:\n break\n\n if output_lines is not None:\n output_lines.append(line)\n\n if not ready and indicator in line:\n ready = True\n ready_event.set()", "def _queue_output(arguments, pidq, outputq):\n kwargs = arguments[\"process\"]\n input = arguments[\"input\"]\n\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n # pid None is read by the main thread as a crash of the process\n pidq.put(None)\n\n outputq.put((\n \"\",\n (\"Unexpected exception caught during execution: '{0}' . \".format(e)),\n 255)) # false exitcode\n\n return\n\n # Put the PID in the queue for main process to know.\n pidq.put(proc.pid)\n\n # Send input and wait for finish\n out, err = proc.communicate(input)\n\n out, err = out.decode('utf-8'), err.decode('utf-8')\n\n # Give the output back to the caller\n outputq.put((out, err, proc.returncode))", "def reader_thread(self, q):\r\n try:\r\n with self.process.stdout as pipe:\r\n for line in iter(pipe.readline, b''):\r\n q.put(line)\r\n finally:\r\n q.put(None)", "def process(cmd_string, stdin=None):\n return process_results(process_run(cmd_string, stdin=stdin))", "def run_process(self, inp=\"\"):\n return subprocess.run(self.binary,\n input=inp,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)", "def run(self):\n def target():\n # Pass these inputs to STDIN with delays\n for i in self.delayed_inputs:\n if type(i) is int or type(i) is float:\n time.sleep(i)\n elif type(i) is bytes:\n try:\n self.process.stdin.write(i) \n except IOError as e:\n lg.info(\n \"Input: {} failed to write to stdin due to\\n{}\".format(i, e)\n )\n break\n if self.disable_communicate:\n self.process.wait()\n else:\n self.stdout_res, self.stderr_res = self.process.communicate(\n input=self.inputs)\n\n try:\n self.process = Popen(self.command, stdin=self.stdin,\n stdout=self.stdout, stderr=self.stderr,\n start_new_session=True, cwd=self.cwd, env=self.env)\n except OSError:\n lg.error(\"Couldn't Popen command {}\".format(self.command))\n raise\n self.thread = Thread(target=target)\n self.thread.start()", "def _stdout_reader(self):\n self._is_launched.wait()\n stdout_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in stdout_iterator:\n self._log(\"raw\", \"stdout : {0}\".format(line.strip()))\n self.stdout_queue.put_nowait(line.strip())\n self.stdout_queue.put_nowait(None) # Stop queue consumers", "def atomic_io(cmd, in_file, out_file, err_file, prog=None):\n with open(in_file, 'r') as inp, open(out_file, 'w') as out, open(err_file, 'w') as err:\n p = subprocess.Popen(\n cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=err)\n while True:\n line = inp.readline()\n if not line:\n break\n p.stdin.write(line)\n out.write(p.stdout.readline())\n out.flush()\n if prog:\n prog.inc()\n p.stdin.close()\n p.wait()", "def run(self, arguments=None, debug=False):\n\n # kill the child process if we receive a terminate signal\n def terminate_child_process(child, signum, frame):\n try:\n if child and signum != signal.SIGINT:\n child.terminate()\n child.wait()\n finally:\n sys.exit()\n\n # poll the pty for available data to read, then push to a queue and signal ready\n def produce_queue(queue, master_fd, slave_fd, evt, proc):\n with os.fdopen(master_fd, 'rb', 0) as task_stream:\n while 1:\n ready = select.select([master_fd], [], [], 0)[0]\n\n # exit if our process has terminated and no more input\n if not ready and proc.poll() is not None:\n os.close(slave_fd)\n evt.set()\n break\n\n if master_fd in ready:\n # POSIX.1 requires PIPE_BUF to be at least 512 bytes, but Linux uses 4096 bytes\n data = os.read(master_fd, 4096)\n\n if not data:\n # reached EOF, signal data ready in case the queue is not empty, then exit\n evt.set()\n break\n else:\n # put data in the queue and signal the consumer thread\n queue.put(data)\n evt.set()\n\n # wait for ready signal, then read data from queue and save to a buffer\n # once the buffer contains an end of line, send that to a callback if defined,\n # then send the line to a file for later processing\n def consume_queue(queue, filename, evt, proc, callback=None):\n streambuffer = []\n with open(filename, 'w+') as fileobj:\n while 1:\n # wait for a signal at most one second at a time so we can check the child process status\n evt.wait(1)\n if queue.empty() and proc.poll() is not None:\n # make sure the last part of the buffer is written out\n if streambuffer:\n if callback:\n callback(streambuffer[0])\n\n fileobj.write(streambuffer[0])\n fileobj.flush()\n break\n elif queue.empty():\n # the queue is empty, but our child process has not exited yet, so data may show up still\n continue\n\n data = queue.get_nowait()\n streambuffer.append(data)\n queue.task_done()\n\n # As soon as we see an end of line from the stream, we should write.\n # Since we could receive many lines per queue chunk, we want to pass\n # a line at a time to our callback.\n if '\\n' in data:\n merged = \"\".join(streambuffer)\n lines = merged.split('\\n')\n\n if len(lines) > 1 and '\\n' not in lines[-1]:\n streambuffer = [lines[-1]]\n lines.pop()\n else:\n streambuffer = []\n\n if callback:\n for x in lines:\n if not x:\n continue\n callback(x)\n\n fileobj.write(\"\".join(lines))\n fileobj.flush()\n\n command_list = self._build_command_list(arguments,debug)\n\n self.logger.info(\"Executing {0}\".format(\" \".join(command_list)))\n\n stdout_name = 'task_stdout_{}'.format(datetime.datetime.utcnow().isoformat())\n stderr_name = 'task_stderr_{}'.format(datetime.datetime.utcnow().isoformat())\n\n stderr = open(stderr_name, 'w+')\n\n # Use pty to provide a workaround for buffer overflow in stdio when monitoring stdout\n master_stdout_fd, slave_stdout_fd = pty.openpty()\n #master_stderr_fd, slave_stderr_fd = pty.openpty()\n #task = subprocess.Popen(command_list, stdout=slave_stdout_fd, stderr=slave_stderr_fd, close_fds=True)\n task = subprocess.Popen(command_list, stdout=slave_stdout_fd, stderr=stderr.fileno(), close_fds=True)\n\n # force termination signal handling of the child process\n signal_handler = functools.partial(cleanup, task)\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n\n stdout_queue = Queue.Queue()\n stdout_data_ready = threading.Event()\n\n t1 = threading.Thread(target=produce_queue, args=(stdout_queue, master_stdout_fd, slave_stdout_fd, stdout_data_ready, task))\n t1.daemon = True\n t1.start()\n\n t2 = threading.Thread(target=consume_queue, args=(stdout_queue, stdout_name, stdout_data_ready, task, self.callback))\n t2.daemon = True\n t2.start()\n\n #stderr_queue = Queue.Queue()\n #stderr_data_ready = threading.Event()\n\n #t3 = threading.Thread(target=produce_queue, args=(stderr_queue, master_stderr_fd, slave_stderr_fd, stderr_data_ready, task))\n #t3.daemon = True\n #t3.start()\n\n #t4 = threading.Thread(target=consume_queue, args=(stderr_queue, stderr_name, stderr_data_ready, task))\n #t4.daemon = True\n #t4.start()\n\n task.wait()\n\n t1.join()\n t2.join()\n #t3.join()\n #t4.join()\n\n stdout = open(stdout_name, 'rb')\n #stderr = open(stderr_name, 'rb')\n stderr.seek(0)\n\n task_output = {}\n task_output[\"stdout\"] = \"\".join(stdout.readlines())\n task_output[\"stderr\"] = \"\".join(stderr.readlines())\n\n stdout.close()\n stderr.close()\n os.remove(stdout_name)\n os.remove(stderr_name)\n\n if task.returncode != 0:\n self.logger.error(task.returncode)\n raise Exception(task_output[\"stdout\"], task_output[\"stderr\"])\n else:\n return task_output", "def process():", "def read(self):\n # now read stderr for log messages, we could buffer here but since\n # we're just logging the messages, I don't care to\n try:\n out = self.proc.stderr.read()\n if out:\n LOG.debug('reading %s got %d bytes on stderr', self.name,\n len(out))\n for line in out.splitlines():\n LOG.warning('%s: %s', self.name, line)\n except IOError as err:\n if err.errno != errno.EAGAIN:\n # allowing a caller to handle the exception as well\n raise\n except:\n LOG.exception('uncaught exception in stderr read')\n\n # This read call is non-blocking\n try:\n self.buffer += self.proc.stdout.read()\n if len(self.buffer):\n LOG.debug('reading %s, buffer now %d bytes',\n self.name, len(self.buffer))\n except IOError as err:\n if err.errno != errno.EAGAIN:\n raise\n except:\n # sometimes the process goes away in another thread and we don't\n # have it anymore\n LOG.exception('uncaught exception in stdout read')\n return\n\n # iterate for each line we have\n while self.buffer:\n idx = self.buffer.find('\\n')\n if idx == -1:\n break\n\n line = self.buffer[0:idx].strip()\n if line:\n self.datalines.append(line)\n self.buffer = self.buffer[idx+1:]", "def Wait(self):\n try:\n # Flush stdout and stderr to be sure no output is interleaved.\n sys.stdout.flush()\n sys.stderr.flush()\n\n # File position pointers are shared across processes, so we must open\n # our own file descriptor to ensure output is not lost.\n self._WaitForStartup()\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n results = []\n with open(self._output.name, 'r') as output:\n pos = 0\n running, exited_cleanly, task_errors, all_errors = (True, False, [], [])\n while running:\n # Check whether the process is still alive.\n running = self.is_alive()\n\n try:\n errors, results = \\\n self._queue.get(True, self.PRINT_INTERVAL)\n if errors:\n task_errors.extend(errors)\n all_errors.extend(errors)\n\n running = False\n exited_cleanly = True\n except Queue.Empty:\n pass\n\n if not running:\n # Wait for the process to actually exit. If the child doesn't exit\n # in a timely fashion, kill it.\n self.join(self.EXIT_TIMEOUT)\n if self.exitcode is None:\n msg = '%r hung for %r seconds' % (self, self.EXIT_TIMEOUT)\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessExitTimeout(msg), ''))\n self._KillChildren([self])\n elif not exited_cleanly:\n msg = ('%r exited unexpectedly with code %s'\n % (self, self.exitcode))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessUnexpectedExit(msg), ''))\n\n # Read output from process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n\n if len(buf) > 0:\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n elif running and time.time() > silent_death_time:\n msg = ('No output from %r for %r seconds' %\n (self, self.SILENT_TIMEOUT))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessSilentTimeout(msg), ''))\n self._KillChildren([self])\n\n # Read remaining output from the process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n running = False\n\n # Print output so far.\n while len(buf) > 0:\n sys.stdout.write(buf)\n pos += len(buf)\n if len(buf) < _BUFSIZE:\n break\n buf = output.read(_BUFSIZE)\n\n # Print error messages if anything exceptional occurred.\n if len(all_errors) > len(task_errors):\n logging.PrintBuildbotStepFailure()\n msg = '\\n'.join(x.str for x in all_errors if x)\n logging.warning(msg)\n traceback.print_stack()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n # Propagate any results.\n for result in results:\n results_lib.Results.Record(*result)\n\n finally:\n self.Cleanup(silent=True)\n\n # If an error occurred, return it.\n return all_errors", "def run(self):\n out_fd = self.output()\n out_dir = os.path.join(self.LOCAL_ROOT, self.SHARED_RELATIVE_PATH)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n in_fd = self.input()\n\n with in_fd.open('r') as fd:\n result = fd.read()\n with out_fd.open('w') as o_fd:\n o_fd.write(result)", "def process():\n pass", "def read_stream(self, output_queue, stream_type):\n output = []\n\n # Get all available output off the queue.\n try:\n while 1:\n output.append(output_queue.get_nowait())\n except Empty:\n pass\n\n # If we read any output, toss it out to the logger\n if len(output):\n logger = logging.getLogger('taskmaster.processes.{}'.format(self.process_index))\n\n if stream_type == StreamType.Stdout:\n for line in output:\n logger.info(line)\n elif stream_type == StreamType.Stderr:\n for line in output:\n logger.error(line)\n\n # Get the current status to determine if we should try to read more or stop.\n current_status = psutil.STATUS_DEAD\n try:\n current_status = self.process.status()\n except psutil.NoSuchProcess:\n pass\n\n if current_status != psutil.STATUS_DEAD:\n # Process still alive, schedule the call to read more output.\n self.ioloop.call_later(0.1, self.read_stream, *[output_queue, stream_type])\n else:\n # Process has died. Flush the iostreams so the BlockingStreamReader triggers one last time and\n # nicely exits.\n self.process.stdout.flush()\n self.process.stderr.flush()", "async def communicate(self):\n assert self._input.is_file()\n self._output.open(\"w\").close()\n return (None, None)", "def output_on_fail(process, outputs):\n status = _maybe_use_running_output(process, outputs)\n if status is not None:\n return status\n\n def reader(handle, input_queue):\n \"\"\"Thread which reads handle, until EOF.\"\"\"\n input_queue.put(handle.read())\n\n with thread_output(target=reader, args=(outputs[0], )) as stdout_queue:\n with thread_output(target=reader,\n args=(outputs[1], )) as stderr_queue:\n stdout = stdout_queue.get()\n stderr = stderr_queue.get()\n\n status = process.wait()\n\n if status != 0:\n IndentedLogger.message(\"\\n\")\n IndentedLogger.message(stdout.decode(\"utf-8\"))\n IndentedLogger.message(stderr.decode(\"utf-8\"))\n\n return status", "def __readStdout(self):\n if self.process is not None:\n self.process.setReadChannel(QProcess.StandardOutput)\n \n while self.process.canReadLine():\n s = str(self.process.readLine(),\n Preferences.getSystem(\"IOEncoding\"),\n 'replace')\n if (\n self.currentChangelist != \"\" and\n self.rx_status.exactMatch(s)\n ):\n file = self.rx_status.cap(5).strip()\n filename = file.replace(self.path + os.sep, \"\")\n if filename not in self.changeListsDict[\n self.currentChangelist\n ]:\n self.changeListsDict[self.currentChangelist].append(\n filename)\n elif (\n self.currentChangelist != \"\" and\n self.rx_status2.exactMatch(s)\n ):\n file = self.rx_status2.cap(2).strip()\n filename = file.replace(self.path + os.sep, \"\")\n if filename not in self.changeListsDict[\n self.currentChangelist\n ]:\n self.changeListsDict[self.currentChangelist].append(\n filename)\n elif self.rx_changelist.exactMatch(s):\n self.currentChangelist = self.rx_changelist.cap(1)\n if self.currentChangelist not in self.changeListsDict:\n self.changeListsDict[self.currentChangelist] = []", "def run_cmd(command, inputStream = \"\"):\n timeoutSecs = 3600\n timePassed = 0.0\n increment = 0.01\n\n stderrFD, errFile = tempfile.mkstemp()\n stdoutFD, outFile = tempfile.mkstemp()\n\n process = Popen(command, shell=True, stdin=PIPE, stdout=stdoutFD, \n stderr=stderrFD, close_fds=False)\n\n if process == None:\n print \"Could not create process\"\n sys.exit(1)\n\n try:\n if inputStream != \"\":\n for line in inputStream:\n process.stdin.write(line)\n process.stdin.flush()\n\n while True:\n status = process.poll()\n if status != None:\n # Process terminated succesfully.\n stdoutSize = os.lseek(stdoutFD, 0, 2)\n stderrSize = os.lseek(stderrFD, 0, 2)\n\n os.lseek(stdoutFD, 0, 0)\n os.lseek(stderrFD, 0, 0)\n\n stdoutContents = os.read(stdoutFD, stdoutSize)\n stderrContents = os.read(stderrFD, stderrSize)\n\n os.close(stdoutFD)\n os.remove(outFile)\n os.close(stderrFD)\n os.remove(errFile)\n return (False, stdoutContents, stderrContents, process.returncode)\n\n if timePassed < timeoutSecs:\n time.sleep(increment)\n timePassed = timePassed + increment\n else:\n # time out, kill the process.\n stdoutSize = os.lseek(stdoutFD, 0, 2)\n stderrSize = os.lseek(stderrFD, 0, 2)\n\n os.lseek(stdoutFD, 0, 0)\n os.lseek(stderrFD, 0, 0)\n\n stdoutContents = os.read(stdoutFD, stdoutSize)\n stderrContents = os.read(stderrFD, stderrSize)\n\n os.close(stdoutFD)\n os.remove(outFile)\n os.close(stderrFD)\n os.remove(errFile)\n os.kill(process.pid, signal.SIGTSTP)\n return (True, stdoutContents, stderrContents, process.returncode)\n except Exception, e:\n # if something threw exception (e.g. ctrl-c)\n print e\n os.kill(process.pid, signal.SIGTSTP)\n try:\n # time out, kill the process.\n # time out, kill the process.\n stdoutSize = os.lseek(stdoutFD, 0, 2)\n stderrSize = os.lseek(stderrFD, 0, 2)\n\n os.lseek(stdoutFD, 0, 0)\n os.lseek(stderrFD, 0, 0)\n\n stdoutContents = os.read(stdoutFD, stdoutSize)\n stderrContents = os.read(stderrFD, stderrSize)\n\n os.close(stdoutFD)\n os.remove(outFile)\n os.close(stderrFD)\n os.remove(errFile)\n os.kill(process.pid, signal.SIGTSTP) \n except:\n pass\n\n return (False, stdoutContents, stderrContents, process.returncode)", "def _stdin_writer(self):\n self._is_launched.wait()\n while True:\n message = self.stdin_queue.get()\n if message is None or self._is_stopping or not self._is_running.is_set():\n if message is not None:\n log.debug(\"Ignore {0} on process {1} because it's stopped\".format(message, self.name))\n break\n self._direct_stdin_writer(message)\n self._log(\"raw\", \"write to stdin : {0}\".format(message.encode(\"utf-8\")))", "def __init__(self, process=None, parent=None, **kwargs):\n super(ProcessIO, self).__init__(**kwargs)\n self.process = process\n self.parent = parent\n self.default_output = process.default_output", "def pipestring_process(cmd_string, stdin_string=''):\n f=SpooledTemporaryFile()\n f.write(stdin_string)\n f.seek(0)\n results=process(cmd_string, stdin=f)\n f.close()\n return results", "def process_output(self, stdout=True, final_read=False):\n if stdout:\n pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee\n else:\n pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee\n\n if final_read:\n # read in all the data we can from pipe and then stop\n data = []\n while select.select([pipe], [], [], 0)[0]:\n data.append(os.read(pipe.fileno(), 1024))\n if len(data[-1]) == 0:\n break\n data = \"\".join(data)\n else:\n # perform a single read\n data = os.read(pipe.fileno(), 1024)\n buf.write(data)\n tee.write(data)", "def _launch_command(args, out_cb, err_cb, done=None, **kwargs):\n\n def pump_stream(callback, stream):\n \"\"\"Pump the stream\"\"\"\n for line in stream:\n callback(line)\n callback(None)\n\n def joiner():\n \"\"\"Wait for streams to finish, then call done callback\"\"\"\n for th in threads:\n th.join()\n done(process)\n\n kwargs = kwargs.copy()\n in_data = kwargs.get(\"input\")\n if \"input\" in kwargs:\n del kwargs[\"input\"]\n assert kwargs.get(\"stdin\") is None, kwargs[\"stdin\"]\n kwargs[\"stdin\"] = PIPE\n elif \"stdin\" not in kwargs:\n kwargs[\"stdin\"] = DEVNULL\n kwargs.setdefault(\"stdout\", PIPE)\n kwargs.setdefault(\"stderr\", PIPE)\n kwargs[\"universal_newlines\"] = True # Text streams, not byte streams\n process = Popen(args, **kwargs)\n threads = []\n if process.stdout:\n thread = Thread(\n target=pump_stream, args=(out_cb, process.stdout), daemon=True\n )\n thread.start()\n threads.append(thread)\n if process.stderr:\n thread = Thread(\n target=pump_stream, args=(err_cb, process.stderr), daemon=True\n )\n thread.start()\n threads.append(thread)\n if done and threads:\n Thread(target=joiner, daemon=True).start()\n if in_data:\n process.stdin.write(str(in_data, \"utf-8\"))\n process.stdin.close()\n return process", "def cmd_iter(cmd):\n\n def thread_enqueue(label, f, q):\n t = threading.Thread(target=enqueue_output, args=(label, f, q))\n t.daemon = True ## thread dies with the program\n t.start()\n return t\n\n def enqueue_output(label, out, queue):\n prev_line = None\n for line in out.read():\n if prev_line is not None:\n queue.put((label, \"%s\\n\" % prev_line))\n prev_line = line\n # print(\"%s: %r\" % (label, line))\n # print(\"END of %s\" % (label, ))\n if prev_line:\n queue.put((label, prev_line))\n out.close()\n\n proc = Proc(cmd)\n proc.stdin.close()\n q = Queue()\n t1 = thread_enqueue(\"out\", proc.stdout, q)\n t2 = thread_enqueue(\"err\", proc.stderr, q)\n running = True\n while True:\n try:\n yield q.get(True, 0.001)\n except Empty:\n if not running:\n break\n proc.poll()\n running = proc.returncode is None or \\\n any(t.is_alive() for t in (t1, t2))\n\n # print(\"%s: %r\" % (\"errlvl\", proc.returncode))\n yield \"errorlevel\", proc.returncode", "def watch(self):\n reader, writer = os.pipe2(0)\n\n pid = os.fork()\n\n # In the child\n if pid == 0:\n tty.setraw(0)\n os.close(reader)\n os.close(2)\n\n os.dup2(writer, 1)\n\n os.execlp(self.__program, self.__program, *self.__args)\n\n sys.exit(1)\n else:\n os.close(writer)\n\n while True:\n result = os.read(reader, 1024)\n if len(result) == 0:\n break\n sys.stdout.write(result.decode('utf-8'))\n\n os.waitpid(pid, 0)", "def ServeUntilSubprocessDies(self, process):\n child_result = 0\n try:\n while True:\n if process.poll() is not None:\n child_result = 0\n break\n if self.conn.poll():\n child_result = self.conn.recv()\n break\n time.sleep(0)\n except KeyboardInterrupt:\n pass\n finally:\n self.Shutdown()\n return child_result", "def run_processing_engine(input_file):\n from subprocess import Popen, PIPE\n p = Popen([\"python\", \"process.py\", input_file, \"-v\"], stdout=PIPE)\n return p.wait()", "def __init__(self):\n # Open stata as pipe; make a queue for non-blocking. Start the thread.\n self.proc = sp.Popen(['stata-mp'], stdin=sp.PIPE, stdout=sp.PIPE, bufsize=1)\n\n self.qu = Queue()\n\n self.thread = Thread(target = self.enqueue_output, args = (self.proc.stdout,\n self.qu))\n self.thread.daemon = True\n self.thread.start()\n\n # Read the initial stdout content.\n self.genout()", "def hook() -> None:\n real_recv = process.recv_raw\n\n def recv(self: process, numb: int) -> bytes:\n data = real_recv(self, numb)\n # Sometimes the returned data is of type str\n # Accept them by converting them to bytes\n if type(data) == str:\n data = data.encode()\n try:\n stdout_all = self.stdout_all\n except Exception: # pylint: disable=broad-except\n stdout_all = b\"\"\n stdout_all += data\n self.stdout_all = stdout_all\n return data\n\n process.recv_raw = recv", "def run_command(self):\n\n while True:\n current_line = self.process.stdout.readline().rstrip()\n\n if not current_line:\n break\n\n yield self.decode_output(current_line)", "def run_process(cmd, out_log=None, err_log=None):\r\n return run_multi_processes([cmd], out_log=out_log, err_log=err_log)", "def get_value(Runner, input_str):\n Runner.stdin.write(input_str)\n output = Runner.stdout.readline() \n return output", "def __init__(self):\n self.write_queue = Manager().Queue()\n\n BaseManager.register('Arduino',Arduino)\n BaseManager.register('Algorithm',Algorithm)\n BaseManager.register('Android',Android)\n BaseManager.register('ImageCV', ImageCV)\n manager = BaseManager()\n manager.start()\n shared_ard = manager.Arduino()\n shared_alg = manager.Algorithm()\n shared_and = manager.Android()\n shared_icv = manager.ImageCV()\n \n p1 = Process(target=self.read_algorithm, args=(shared_alg, shared_icv))\n p1.start()\n p2 = Process(target=self.read_arduino, args=[shared_ard])\n p2.start()\n p3 = Process(target=self.read_android, args=[shared_and])\n p3.start()\n p4 = Process(target=self.read_imagecv, args=[shared_icv])\n p4.start()\n p5 = Process(target=self.write_target, args=(shared_ard, shared_alg, shared_and, shared_icv))\n p5.start()\n p5.join()", "def wait(self):\n\n for output in self.proc.communicate():\n if output is not None:\n self.output += output", "def clean_output(self, process, queue):\n while True:\n try:\n dirty = process.getline()\n clean = self.parse(dirty)\n except Queue.Empty:\n process.queueHasData.wait()\n except ValueError as inst:\n print(\"Error: \" + str(inst))\n else:\n if clean != None:\n self.cleanOutput.append(clean)", "def _get_output(arguments, timeout=None):\n # NOTE Increase this value if tests fail with None being received as\n # stdout/stderr instead of the expected content\n output_timeout = 0.1 # seconds\n\n pidq = Queue()\n outputq = Queue()\n\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n\n # Process crashed or timed out for some reason\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq,\n \"Program to start\")\n\n # Wait for process to finish (normal execution)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"Program thread to join\")\n\n # If we reach this point we assume the process got stuck or timed out\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n # Start with lower signals and escalate if process ignores them\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n # 3 means the process finished/died between last check and now\n if e.errno != 3:\n raise\n\n # Wait for process to finish (should die/exit after signal)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"Program to die\")\n\n # This should never happen but in case something goes really bad\n raise OSError(\"Program stopped responding and couldn't be killed\")", "def _get_output(arguments, timeout=None):\n # NOTE Increase this value if tests fail with None being received as\n # stdout/stderr instead of the expected content\n output_timeout = 0.1 # seconds\n\n pidq = Queue()\n outputq = Queue()\n\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n\n # Process crashed or timed out for some reason\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior to start\")\n\n # Wait for process to finish (normal execution)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior thread to join\")\n\n # If we reach this point we assume the process got stuck or timed out\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n # Start with lower signals and escalate if process ignores them\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n # ESRCH means the process finished/died between last check and now\n if e.errno != errno.ESRCH:\n raise\n\n # Wait for process to finish (should die/exit after signal)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior to die\")\n\n # This should never happen but in case something goes really bad\n raise OSError(\"TaskWarrior stopped responding and couldn't be killed\")", "def _stream(cmd):\n # color_print(getuser() + '$ ' + cmd, COLOR.BLUE)\n output = [] # used to collect o/p from both stdout and stderr\n\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)\n except subprocess.CalledProcessError as ex:\n print(\"Status : FAIL\", ex.returncode, ex.output)\n else:\n with proc.stdout:\n for line in iter(proc.stdout.readline, b''):\n # print(line)\n output.append(line)\n\n # Note: output is streamed to the user as and when it occurs.\n with proc.stderr:\n for line in iter(proc.stderr.readline, b''):\n # print(line)\n output.append(line)\n\n return output", "def main(input_dir, output_dir):\n\n process(input_dir, output_dir)", "def call_and_feed(cmd, data):\n p = Popen(cmd, shell=True, stdin=PIPE)\n p.stdin.write(data)\n p.stdin.close()\n return p.wait()", "def poll_process(process, suppress_errors=False):\n\n while True:\n data_to_stdout(\".\")\n time.sleep(1)\n\n returncode = process.poll()\n\n if returncode is not None:\n if not suppress_errors:\n if returncode == 0:\n data_to_stdout(\" done\\n\")\n elif returncode < 0:\n data_to_stdout(\" process terminated by signal %d\\n\" % returncode)\n elif returncode > 0:\n data_to_stdout(\" quit unexpectedly with return code %d\\n\" % returncode)\n\n break", "def write( shell, data ):\n #print 'cmd: ' + data\n global waiting\n os.write( shell.stdin.fileno(), data )\n waiting = True", "async def read_console(self):\n while self.proc is not None and self.proc.poll() is None:\n line = await self.loop.run_in_executor(None, self.proc.stdout.readline) # Async readline\n # Parse the command output and get the time in epoch format\n match = re.match(r'\\[([0-9]{2}):([0-9]{2}):([0-9]{2})\\] \\[([^][]*)\\]: (.*)$', line.decode())\n if match is None:\n return\n h, m, s, log, text = match.groups()\n local = time.localtime()\n if h == 23 and local.tm_hour == 0: # In case a line from 23:59 gets parsed at 00:00\n local = time.localtime(time.time()-3600)\n log_t = list(local)\n log_t[3:6] = map(int, (h, m, s))\n log_time = time.mktime(tuple(log_t))\n self.loop.create_task(self.on_line(log_time, log, text))", "def read(self):\n if self.alive:\n with self._register_poll():\n with _unblock_read(self._proc):\n return self._yield_ready_read()\n else:\n raise ProcessIsDeadError('Can not read. The process is already dead.')", "def spinupoutputprocess():\n if __name__ == '__main__':\n _hwmgr = HardwareController(OUTPUT_SETTINGS)\n PROCESSES.append(_hwmgr)\n _hwmgr.start()", "def main():\n\tports = glob.glob(\"/dev/tty.wchusbserial*\") + glob.glob(\"/dev/tty.usbserial*\") + glob.glob(\"COM3\") + glob.glob(\"COM4\")\n\tBAUDRATE = 9600\n\tchoice = int(input((str(ports) + \" enter numerical index for port: \")))\n\tportname = ports[choice]\n\tport = None\n\tsending_queue = None\n\treceiving_process_on = None\n\treceiving_process = None\n\ttry:\n\t\tsending_queue = multiprocessing.Queue()\n\t\treceiving_process_on = multiprocessing.Value(c_bool,False)\n\t\treceiving_process = multiprocessing.Process(target = communication, args = (portname,BAUDRATE,sending_queue,receiving_process_on))\n\t\treceiving_process.start()\n\t\twhile True:\n\t\t\tword = input(\"Enter a message: \")\n\t\t\tsending_queue.put(create_chunk(word)) #sending 32 bytes to the process queue\n\t\t\t\n\texcept Exception as e:\n\t\tprint(\"ERROR:\", e)\n\tfinally:\n\t\treceiving_process_on.value = False\n\t\tfor i in range(10): #wait for the process to stop\n\t\t\tpass\n\t\tif receiving_process != None:\n\t\t\treceiving_process.join()\n\t\t\n\t\tif sending_queue != None:\n\t\t\tsending_queue.close()", "def communicate(self, process, timeout):\n\n self.timeout = timeout\n self.process = process\n self.start() # Start watchdog\n result = self.process.communicate()\n if self.finished.is_set():\n raise TestException('Test timed out')\n else:\n self.finished.set() # Stop watchdog\n\n if self.process.poll():\n # Non-zero return code. Probably target program crash.\n raise TestException(\n 'Process returned error: ' + result[0].decode())\n\n return result", "def _IterProcessStdoutFcntl(process,\n iter_timeout=None,\n timeout=None,\n buffer_size=4096,\n poll_interval=1):\n # pylint: disable=too-many-nested-blocks\n import fcntl\n try:\n # Enable non-blocking reads from the child's stdout.\n child_fd = process.stdout.fileno()\n fl = fcntl.fcntl(child_fd, fcntl.F_GETFL)\n fcntl.fcntl(child_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)\n\n end_time = (time.time() + timeout) if timeout else None\n iter_end_time = (time.time() + iter_timeout) if iter_timeout else None\n\n while True:\n if end_time and time.time() > end_time:\n raise TimeoutError()\n if iter_end_time and time.time() > iter_end_time:\n yield None\n iter_end_time = time.time() + iter_timeout\n\n if iter_end_time:\n iter_aware_poll_interval = min(poll_interval,\n max(0, iter_end_time - time.time()))\n else:\n iter_aware_poll_interval = poll_interval\n\n read_fds, _, _ = select.select([child_fd], [], [],\n iter_aware_poll_interval)\n if child_fd in read_fds:\n data = _read_and_decode(child_fd, buffer_size)\n if not data:\n break\n yield data\n\n if process.poll() is not None:\n # If process is closed, keep checking for output data (because of timing\n # issues).\n while True:\n read_fds, _, _ = select.select([child_fd], [], [],\n iter_aware_poll_interval)\n if child_fd in read_fds:\n data = _read_and_decode(child_fd, buffer_size)\n if data:\n yield data\n continue\n break\n break\n finally:\n try:\n if process.returncode is None:\n # Make sure the process doesn't stick around if we fail with an\n # exception.\n process.kill()\n except OSError:\n pass\n process.wait()", "def popenCommunicate(args, data='', outputs=None, ignoreErrors=False, poll_interval=0.01):\n stdError = None\n if not ignoreErrors:\n stdError = subprocess.STDOUT\n p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=stdError)\n fcntl.fcntl(p.stdin, fcntl.F_SETFL, os.O_NONBLOCK) # make the file nonblocking\n fcntl.fcntl(p.stdout, fcntl.F_SETFL, os.O_NONBLOCK) # make the file nonblocking\n\n bytesTotal = len(data)\n bytesWritten = 0\n while bytesWritten < bytesTotal:\n try:\n # p.stdin.write() doesn't return anything, so use os.write.\n bytesWritten += os.write(p.stdin.fileno(), data[bytesWritten:])\n except IOError, ex:\n if ex[0] != errno.EAGAIN:\n raise\n sys.exc_clear()\n socket.wait_write(p.stdin.fileno())\n\n p.stdin.close()\n\n if outputs is not None:\n while True:\n try:\n chunk = p.stdout.read(4096) \n if not chunk:\n break\n for output in outputs:\n output.write(chunk)\n except IOError, ex:\n if ex[0] != errno.EAGAIN:\n raise\n sys.exc_clear()\n socket.wait_read(p.stdout.fileno()) \n\n p.stdout.close()\n\n length = None\n try:\n length = len(outputs[0])\n except:\n length = 0\n\n logging.getLogger().debug(\"popenCommunicate() finished. Args: %s, Output Length: %d\" % (args,length))", "def generate_stream(processArgs):\n process = subprocess.Popen(processArgs, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n stdout, _ = process.communicate()\n return StringIO(stdout), process.returncode", "def stdin_read(self, data):\n self.write_master(data)", "def pipemeter(cmd1, cmd2):\n\n proc1 = subprocess.Popen(cmd1, bufsize=0, shell=True, stdout=subprocess.PIPE)\n proc2 = subprocess.Popen(cmd2, bufsize=0, shell=True, stdin=subprocess.PIPE)\n bytes_piped = 0\n\n while True:\n data = proc1.stdout.read(CHUNKSIZE)\n length = len(data)\n if length == 0:\n break\n\n written = proc2.stdin.write(data)\n if written != length:\n raise RuntimeError(\"Write failed, wanted to write: {}, written={}\".format(length, written))\n\n bytes_piped += length\n\n proc1.stdout.close()\n proc2.stdin.close()\n\n return proc1.wait(), proc2.wait(), bytes_piped", "def __init__(self, status_in, data_out):\n Process.__init__(self)\n self.input_stream = status_in\n self.data_out = data_out\n self._UPDATE_INTERVAL_MS = 10\n self._status_labels = {} # A dictionary, whose keys are strings and whose values are Tkinter label variables", "def Start(self):\n self.CallClient(standard.ReadBuffer, next_state=\"WrongProcess\")", "def run(self):\r\n self.env.process(self.rw_pifo_sm())", "def _IterProcessStdoutQueue(process,\n iter_timeout=None,\n timeout=None,\n buffer_size=4096,\n poll_interval=1):\n # pylint: disable=unused-argument\n if six.PY3:\n import queue\n else:\n import Queue as queue\n import threading\n\n stdout_queue = queue.Queue()\n\n def read_process_stdout():\n # TODO(jbudorick): Pick an appropriate read size here.\n while True:\n try:\n output_chunk = _read_and_decode(process.stdout.fileno(), buffer_size)\n except IOError:\n break\n stdout_queue.put(output_chunk, True)\n if not output_chunk and process.poll() is not None:\n break\n\n reader_thread = threading.Thread(target=read_process_stdout)\n reader_thread.start()\n\n end_time = (time.time() + timeout) if timeout else None\n\n try:\n while True:\n if end_time and time.time() > end_time:\n raise TimeoutError()\n try:\n s = stdout_queue.get(True, iter_timeout)\n if not s:\n break\n yield s\n except queue.Empty:\n yield None\n finally:\n try:\n if process.returncode is None:\n # Make sure the process doesn't stick around if we fail with an\n # exception.\n process.kill()\n except OSError:\n pass\n process.wait()\n reader_thread.join()", "def run(self, filename):\n\n # make a tempdir so I can use a file-like object\n # instead of an OS-level handle (like with mkstemp)\t\t\n tdir = make_tempdir()\n tname = os.path.join(tdir,'process.out')\n\n #print \"TEMPDIR = \",tdir\n #print \"TEMPFILE = \",tname\n \n f_out = open(tname,'wb')\n f_in = open(filename,'rb')\n\n # process in->out\n self.process(f_out, f_in)\n\n del f_out\n del f_in\n\n # copy tempfile -> filename\n\n #print \"COPY %s -> %s\" % (tname,filename)\n \n # I think this is secure ... since caller owns filename\n # there isn't a race, right? (unlike writing into a tempdir\n # which could have malicious symlinks in it)\n copy2( tname, filename )\n\n #print \"RMDIR %s\" % tname\n \n # clean up tempdir\n unlink(tname)\n os.rmdir(tdir)", "def process(q, results, iolock, func, args, kwargs):\n\n kwargs[\"iolock\"] = iolock\n\n while True:\n\n line = q.get()\n\n if line is None:\n break\n\n result = func(line, *args, **kwargs)\n results.put(result)\n\n return", "def wait() -> None:\n\n process_input(input())", "def main():\n os.write(2,bytes(process_infos(sys.argv[0])+\"\\n\",'utf-8'))\n # Some signals of interest. Extend the list at will...\n signal.signal(signal.SIGALRM,sigtrace_handler)\n signal.signal(signal.SIGUSR1,sigtrace_handler)\n signal.signal(signal.SIGUSR2,sigtrace_handler)\n signal.signal(signal.SIGINT,sigtrace_handler)\n signal.signal(signal.SIGTERM,sigtrace_handler)\n signal.signal(signal.SIGCONT,sigtrace_handler)\n signal.signal(signal.SIGTSTP,sigtrace_handler)\n signal.signal(signal.SIGTTOU,sigtrace_handler)\n signal.signal(signal.SIGTTIN,sigtrace_handler)\n signal.signal(signal.SIGHUP,sigtrace_handler)\n\n\n while True:\n # Receiving a signal while in select will raise an exception\n try:\n # Wait for new input for at most one second\n rrdy,wrdy,erdy = select.select([sys.stdin],[],[],1.0)\n if len(rrdy) > 0:\n # select returns with stdin ready (before one second)\n buf = os.read(0,1024)\n os.write(1, bytes(sys.argv[0]+\":\",'utf-8')+buf)\n else:\n # select return with nothing to read on stdin\n os.write(1,bytes(sys.argv[0]+\"\\n\",\"utf-8\"))\n except InterruptedError:\n # select interrupted by signal: simply ignore and proceed\n pass", "def _run_monitor_thread(self):\n while True:\n chunk = self.stream.read(1024)\n if not chunk:\n # EOF - subprocess has exited, so trigger shutdown\n trigger_exit(ExitMode.CHILD)\n break\n self.output_deque.appendleft(chunk)", "def run(self,input=None,stdin=None,stdout=None,stderr=None,timeout=None,\n check=False,encoding=None):\n for name,f in self.files.items():\n mode=f.get('mode','wt')\n logger.info(f'{f[\"name\"]}: write mode {mode}')\n with open(f['name'],mode) as fd:\n fd.write(str(f['content']))\n\n env=None\n if self.env:\n env=dict(os.environ)\n env.update(self.env)\n\n logger.info(f'Popen {repr(self.command)}')\n pipe=Popen(args=self.command,stdin=stdin,stdout=stdout,\n stderr=stderr,encoding=encoding,\n cwd=self.cwd,env=env)\n (stdout, stderr) = pipe.communicate(input=input,timeout=timeout)\n cp=CompletedProcess(self.command,pipe.returncode,stdout,stderr)\n if check:\n cp.check_returncode()\n return cp", "def test_1_single_process():\n\n # ********************************************************\n # We will put this function in its own thread in test_1()\n def put_data_in_stream(stream):\n num_steps=5\n step_size=4\n for i in range(num_steps):\n data = list(range(i*step_size, (i+1)*step_size))\n stream.extend(data)\n run()\n return\n\n # ********************************************************\n # We will put these lines in a separate process in test_1()\n x = Stream('x')\n y = Stream('y')\n double(x, y)\n\n # *********************************************************\n # We will put these lines in a separate process in test_1().\n s = Stream(name='s')\n increment(y, s)\n print_stream(s, name=s.name)\n\n # *********************************************************\n # This function is executed in a separate thread in test_1().\n put_data_in_stream(x)", "def test_1_single_process():\n\n # ********************************************************\n # We will put this function in its own thread in test_1()\n def put_data_in_stream(stream):\n num_steps=5\n step_size=4\n for i in range(num_steps):\n data = list(range(i*step_size, (i+1)*step_size))\n stream.extend(data)\n run()\n return\n\n # ********************************************************\n # We will put these lines in a separate process in test_1()\n x = Stream('x')\n y = Stream('y')\n double(x, y)\n\n # *********************************************************\n # We will put these lines in a separate process in test_1().\n s = Stream(name='s')\n increment(y, s)\n print_stream(s, name=s.name)\n\n # *********************************************************\n # This function is executed in a separate thread in test_1().\n put_data_in_stream(x)", "def RunExternal(command, str_stdin=\"\"):\n\n logging.info(\"Running external command: %s\" % command)\n popen_inst = Popen3(command, True)\n logging.debug(\"stdin = %s\" % str_stdin)\n str_stdout = str_stderr = \"\"\n while 1:\n read_from_child = -1\n if not popen_inst.tochild.closed:\n (rlist, wlist, xlist) = select([popen_inst.fromchild, popen_inst.childerr], \\\n [popen_inst.tochild], [])\n else:\n (rlist, wlist, xlist) = select([popen_inst.fromchild, popen_inst.childerr], [], [])\n\n if popen_inst.fromchild in rlist:\n tmpread = popen_inst.fromchild.read(4096)\n read_from_child = len(tmpread)\n str_stdout += tmpread\n \n if popen_inst.childerr in rlist:\n tmpread = popen_inst.childerr.read(4096)\n read_from_child += len(tmpread)\n str_stderr += tmpread\n \n if popen_inst.tochild in wlist and len(str_stdin) > 0:\n popen_inst.tochild.write(str_stdin[:min( [ len(str_stdin), 4096])])\n str_stdin = str_stdin[min( [ len(str_stdin), 4096]):]\n read_from_child += 1\n elif popen_inst.tochild in wlist:\n popen_inst.tochild.close()\n\n #logging.debug(\"len(str_stdin) = %i, read_from_child = %i, rlist = %s, wlist = %s\", len(str_stdin), read_from_child, rlist, wlist)\n if popen_inst.poll() != -1 and len(str_stdin) == 0 and (read_from_child == -1 or read_from_child == 0):\n break\n \n logging.debug(\"Exit code: %i\", popen_inst.wait())\n logging.debug(\"stdout: %s\", str_stdout)\n logging.debug(\"strerr: %s\", str_stderr)\n return str_stdout, str_stderr", "def process_input_in_python(inputf, outputf):\n value=-1\n with open(inputf, 'r') as f:\n value = int(f.readline().strip())\n print(outputf)\n time.sleep(5)\n with open(outputf, 'w') as f:\n f.write(str(value**2))", "def readOutput(self):\n while True:\n char = os.read(self.pipe_out, 1).decode(self.encoding)\n if not char or self.escape_char in char:\n break\n self.capturedtext += char", "def run(self, process=lambda frm: None, quit_key='q'):\n while True:\n _, self.frm = self.cap.read()\n process(self.frm)\n if waitKey(1) & 0xFF == ord(quit_key):\n break", "def process_results(process_object):\n (stdout, stderr)=process_object.communicate()\n return (process_object.returncode, stdout, stderr)", "def _retrieve_output(thread, timeout, queue, thread_error):\n # Try to join the thread on failure abort\n thread.join(timeout)\n if thread.isAlive():\n # Join should have killed the thread. This is unexpected\n raise TimeoutWaitingFor(thread_error + \". Unexpected error\")\n\n # Thread died so we should have output\n try:\n # data = (stdout, stderr, exitcode)\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor(\"streams from program\")\n\n return data", "def start_console_reader():\n\n def console_reader():\n global console_input\n console_input = None\n\n while console_input is not False:\n sys.stderr.write(\"reading\\n\")\n if console_input is None:\n console_input = sys.stdin.readline()\n else:\n time.sleep(1)\n\n console_reader_thread = threading.Thread(target=console_reader)", "def inout(input_, output_):\n while True:\n chunk = input_.read(1024)\n if not chunk:\n break\n output_.write(chunk)", "def open_sku_stream(args, verbose=False, dip_home=None, add_env={}):\n logging.info(\"Running SKU with %s\" % args)\n process = _run_sku(args, verbose, dip_home, add_env=add_env)\n stderr_chunks = deque(maxlen=100)\n\n # we need to have a thread read stderr to make sure that\n # the child process does not block when OS buffer is full.\n def read_stderr():\n while True:\n # Read should be nice enough to block.\n data = process.stderr.read()\n if not data:\n # child process closed stderr\n return\n stderr_chunks.append(data)\n stderr_thread = threading.Thread(target=read_stderr)\n # our process shouldn't wait for this thread to terminate.\n stderr_thread.daemon = True\n stderr_thread.start()\n error = None\n error_details = None\n exited_regularly = False\n try:\n yield process.stdout\n exited_regularly = True\n except Exception as e:\n error = e\n error_details = sys.exc_info()\n except GeneratorExit as e:\n # generator exit is considered regular.\n # it happens for with the timeout exception.\n exited_regularly = True\n finally:\n #\n # Either something bad happened while reading the pipe,\n # or we finished reading SKU's stdout.\n #\n # Let's check for it terminate and check for some\n # possible errors.\n #\n # Give 10s to the process to put sensible stuff on the stderr\n # terminate and close it.\n #\n # Is the process finished\n return_code = process.poll()\n logging.info(\"Returned with %s\" % return_code)\n if return_code is None:\n # the process is still running.\n # this happens when someone does not consume the process entirely before\n # releasing its stdout.\n #\n # Either explicitly, or because he reached a timeout.\n try:\n process.stdout.close()\n except:\n pass\n return_code = process.poll()\n if return_code is None:\n pass\n # wait 1sec for the program to terminate\n time.sleep(1)\n return_code = process.poll()\n if return_code is None:\n # still not terminated ? kill\n # the subprocess and all of its children.\n os.killpg(process.pid, signal.SIGTERM)\n\n stderr_thread.join(2)\n stderr_join = \"\".join(stderr_chunks)\n\n if exited_regularly and return_code == 0:\n logging.info(\"Exit OK\")\n return\n msg = []\n if return_code:\n msg.append(\"+ SKU Process did not end properly (retcode=%s)\" % return_code)\n if not exited_regularly:\n msg.append(\"+ Reading SKU Process encountered the following Exception.\")\n msg.append(str(error.__class__.__name__) + \": \" + str(error))\n if error_details is not None:\n msg.append(\"Stack\")\n msg.append(traceback.format_exc(error_details))\n else:\n msg.append(\"No stack available ?\")\n msg.append(\"Command was :\")\n msg.append(\" \" + str(args))\n if return_code is not None:\n msg.append(\"Return code %i\" % return_code)\n else:\n msg.append(\"Had to kill the process.\")\n msg.append(\"Std err %s\" % stderr_join)\n raise SKUProcessException(\"\\n\".join(msg), args, return_code, stderr_join, cause=error)", "def run(self):\n while True:\n cmd, flag = self.Q['in'].get()\n if flag == 'stop':\n break\n try:\n if flag == 'process':\n sshCmd = \"ssh -q %s \\\"cd %s; %s\\\"\" % (self.host, self.cwd, cmd)\n fp = os.popen(sshCmd)\n output = fp.read()\n #output = fp.readlines()\n fp.close()\n else:\n raise ValueError, 'Unknown flag %r' % flag\n except:\n # unconditional except is right, since we report *all* errors\n self.reportError()\n else:\n if output:\n self.Q['out'].put(output)", "def read_pipe(self, read_data):\n self.logger.info(read_data)", "def run(outs, ins_filter='/dev/ttyUSB.*', newport=lambda conn: None, write_queue=None):\r\n data_queue = multiprocessing.Queue()\r\n\r\n multiprocessing.Process(\r\n target=writer,\r\n args=(data_queue, write_queue, outs)\r\n ).start()\r\n\r\n readers = {}\r\n\r\n while True:\r\n\r\n for (path, _, _) in serial.tools.list_ports.grep(ins_filter):\r\n\r\n if path not in readers.keys() or not readers[path].is_alive():\r\n\r\n readers[path] = multiprocessing.Process(\r\n target=reader, args=(data_queue, path, newport)\r\n )\r\n readers[path].start()", "def master_read(self, data):\n self.write_stdout(data)", "def continuous_shell_reader(self):\n\n while not self.thread_stop.is_set():\n out = self.shell_reader()\n\n if not out == \"\":\n print(\"IPC: Received: {}\".format(out))", "def recieve(self):\n return self.__proc.stdout.readline().strip('\\n')", "def start_process():\n global command, process\n\n def on_data(data):\n data = data.decode().strip()\n print('{}'.format(data))\n\n cmd = command.split(' ')\n\n if process:\n process.terminate()\n\n process = MySubprocess(cmd, -1, functools.partial(on_data), None, None)", "def communicate(self, std_in=None, timeout=0):\n if timeout <= 0:\n return super(Popen, self).communicate(input=std_in)\n\n fds = []\n stdout = []\n stderr = []\n\n if self.stdout is not None:\n set_file_nonblock(self.stdout)\n fds.append(self.stdout)\n if self.stderr is not None:\n set_file_nonblock(self.stderr)\n fds.append(self.stderr)\n\n if std_in is not None and sys.stdin is not None:\n sys.stdin.write(std_in)\n\n returncode = None\n inactive = 0\n while returncode is None:\n (rlist, dummy_wlist, dummy_xlist) = select.select(\n fds, [], [], 1.0)\n\n if not rlist:\n inactive += 1\n if inactive >= timeout:\n raise TimeoutError\n else:\n inactive = 0\n for fd in rlist:\n if fd is self.stdout:\n stdout.append(fd.read())\n elif fd is self.stderr:\n stderr.append(fd.read())\n\n returncode = self.poll()\n\n if self.stdout is not None:\n stdout = ''.join(stdout)\n else:\n stdout = None\n if self.stderr is not None:\n stderr = ''.join(stderr)\n else:\n stderr = None\n\n return (stdout, stderr)", "def execute_process(uuid, output, hadoop):\n # Ruta del proceso\n backend_path = \"/home/bigdata07/backend\"\n # Path para el proceso de log\n path = \"%s/logs/%s.txt\" % (backend_path, uuid)\n # Comando para crear la carpeta para guardar los resultados del proceso de Hadoop\n backend_output_dir = \"%s/output/%s\" % (backend_path, uuid)\n mkdir_output = \"mkdir -p %s\" % (backend_output_dir)\n # Comando para hacer get de HDFS al home\n get_output = \"hdfs dfs -get %s/* %s/\" % (output, backend_output_dir)\n with open(path, \"w\") as file:\n # Ejecutar Hadoop\n subprocess.run(hadoop.split(\" \"), check=True, stdout=file, stderr=file)\n subprocess.run(mkdir_output.split(\" \"), check=True, stdout=file, stderr=file)\n subprocess.run(get_output.split(\" \"), check=True, stdout=file, stderr=file)\n # Resolve() de una promesa en JS\n return backend_output_dir", "def _read_output(self, node_name, server, startup_event):\n while True:\n l = server.stdout.readline().decode(\"utf-8\")\n if len(l) == 0:\n break\n l = l.rstrip()\n\n if l.find(\"Initialization Failed\") != -1:\n startup_event.set()\n\n logger.info(\"%s: %s\" % (node_name, l.replace(\"\\n\", \"\\n%s (stdout): \" % node_name)))\n if l.endswith(\"started\") and not startup_event.isSet():\n startup_event.set()\n logger.info(\"%s: started\" % node_name)", "def _process(self):\n\n while True:\n try:\n sockets = [self.master_fd]\n if self.sock:\n sockets.append(self.sock)\n # Don't handle user input while a side command is running.\n if len(self.filter) == 1:\n sockets.append(pty.STDIN_FILENO)\n rfds, _, _ = select.select(sockets, [], [], 0.25)\n except select.error as ex:\n if ex[0] == errno.EAGAIN: # Interrupted system call.\n continue\n raise\n\n if not rfds:\n self._timeout()\n else:\n # Handle one packet at a time to mitigate the side channel\n # breaking into user input.\n if self.master_fd in rfds:\n data = os.read(self.master_fd, 1024)\n self.master_read(data)\n elif pty.STDIN_FILENO in rfds:\n data = os.read(pty.STDIN_FILENO, 1024)\n self.stdin_read(data)\n elif self.sock in rfds:\n data, self.last_addr = self.sock.recvfrom(65536)\n if data[-1] == b'\\n':\n self.log(\"WARNING: the command ending with <nl>. \"\n \"The StreamProxy filter known to fail.\")\n self.log(\"Got command '%s'\" % data.decode('utf-8'))\n command = self.filter_command(data)\n self.log(\"Translated command '{}'\"\n .format(command.decode('utf-8')))\n if command:\n self.write_master(command)\n self.write_master(b'\\n')", "def _wait_with_timeout(process, displayName, timeout, read):\n\t# read output from the command, with a timeout\n\t# returns (out, err, timedout) if read else returncode; out/err are byte buffers\n\t\n\t_processCleanupMonitor.add(process)\n\ttimedOut = [False]\n\t\n\tparentThreadName = threading.currentThread().name\n\t\n\tdef kill_proc(): # executed on a background thread\n\t\ttry:\n\t\t\tthreading.currentThread().name = parentThreadName+'-timer'\n\t\t\tlog.info('Process timeout handler for %s invoked after %s s; still running=%s', displayName, timeout, process.poll()==None)\n\t\t\tif process.poll()!=None: return # has terminated, so nothing to do - this happen on loaded machines sometimes\n\t\t\t\n\t\t\t# this will cause us to throw an exception\n\t\t\ttimedOut[0] = True\n\t\t\ttry:\n\t\t\t\tprocess.kill()\n\t\t\t\tlog.info('Process kill completed successfully for %s'%displayName)\n\t\t\texcept Exception as e:\n\t\t\t\t# only log if process is still running (Windows Access Denied 5 are seen occasionally in kill()) - should not happen\n\t\t\t\ttime.sleep(2)\n\t\t\t\tif process.poll() == None:\n\t\t\t\t\tlog.error('Failed to kill process %s (pid %s) after %d second timeout: %s', displayName, process.pid, timeout, e)\n\t\t\t\telse:\n\t\t\t\t\tlog.debug('Process kill failed but process is now stopped anyway: %s', e)\n\t\texcept Exception as e: # should never happen but make sure we notice if it does\n\t\t\tlog.exception('Unexpected error in process timeout monitoring thread for %s: '%displayName)\n\t\t\t\n\ttimer = threading.Timer(timeout, kill_proc, [])\n\ttimer.start()\n\ttry:\n\t\tif read:\n\t\t\tstdout, stderr = process.communicate()\n\t\telse:\n\t\t\trv = process.wait()\n\tfinally:\n\t\ttimer.cancel()\n\t\t_processCleanupMonitor.remove(process)\n\t\t\n\tif timedOut[0]:\n\t\tif read:\n\t\t\treturn stdout, stderr, True\n\t\telse:\n\t\t\traise BuildException('Terminating process %s after hitting %d second timout' % (displayName, timeout))\n\telse:\n\t\tif read:\n\t\t\treturn stdout, stderr, False\n\t\telse:\n\t\t\treturn rv", "def readProcessStdoutLog(self, name, offset, length):\r\n self._update('readProcessStdoutLog')\r\n return self._readProcessLog(name, offset, length, 'stdout')", "def process_run(cmd_string, stdin=None):\n process_object=subprocess.Popen(shlex.split(cmd_string),\n stdin=stdin,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return process_object", "def run_mndo(stdin):\n\n # TODO diskless mndo\n # TODO From procs write \"heredocs\" << EOF EOF to mndo?\n\n cmd = MNDOCMD\n\n stdin = stdin.encode()\n\n cmd += \" << EOF\\nPM3 \\nEOF\"\n\n print(cmd)\n print()\n\n\n # a = \"A String of Text\"\n # p = subprocess.Popen(\"wc <<DATA\\n\" + a + \"\\nDATA\", shell=True)\n # stdout, stderr = p.communicate()\n\n\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n # proc.stdin.close()\n # stdout, stderr = proc.communicate(input=\"PM3\")\n # stdout, stderr = proc.communicate(input=stdin)\n stdout, stderr = proc.communicate()\n # proc.stdin.close()\n\n print(stdout)\n\n return stdout, stderr", "def run(self):\n self.read_from_serial()", "def readProcessJson(args: ExecArgs) -> Any:\n proc = Popen(args, stdout=PIPE, stderr=PIPE, cwd='/')\n stdout, _ = proc.communicate()\n if proc.wait():\n return None\n return json.loads(stdout)", "def enqueue_output(self, out, queue):\n\n started = False\n finished = False\n\n while not self.stop:\n line = out.readline()\n queue.put(line)\n # Test if we have reached the end of the output\n if started and IPMITOOL_SHELL_PROMPT in line.decode('ascii'):\n finished = True\n if IPMITOOL_SHELL_PROMPT in line.decode('ascii'):\n started = True\n if finished and self.comms_lock.locked():\n self.comms_lock.release()\n started = False\n finished = False\n\n time.sleep(QUEUE_THREAD_SLEEP_TIME)", "async def open_process(\r\n cls, args: \"Union[str, List[str]]\", env_additions: Dict[str, str] = {}\r\n ) -> \"AsyncIterator[Expect]\":\r\n printer_channels: (\r\n \"Tuple[MemorySendChannel[bytes], MemoryReceiveChannel[bytes]]\"\r\n ) = trio.open_memory_channel(1)\r\n printer_send_channel, printer_receive_channel = printer_channels\r\n notifier_channels: (\r\n \"Tuple[MemorySendChannel[bytes], MemoryReceiveChannel[bytes]]\"\r\n ) = trio.open_memory_channel(0)\r\n notifier_send_channel, notifier_receive_channel = notifier_channels\r\n\r\n async with notifier_receive_channel:\r\n\r\n with patch.dict(\"os.environ\", values=env_additions) as patched_env:\r\n async with await trio.open_process(\r\n args, stdin=PIPE, stdout=PIPE, stderr=STDOUT, env=patched_env\r\n ) as process:\r\n async with trio.open_nursery() as nursery:\r\n expect = cls(\r\n process=process,\r\n printer_send_channel=printer_send_channel,\r\n printer_receive_channel=printer_receive_channel,\r\n notifier_send_channel=notifier_send_channel,\r\n opened_notifier_receive_channel=notifier_receive_channel,\r\n )\r\n nursery.start_soon(expect.copier_recorder)\r\n nursery.start_soon(expect.printer)\r\n\r\n yield expect\r\n\r\n # print(\"waiting for process\") # debug\r\n await expect.process.wait()", "def run(self):\r\n while self._go.isSet(): #while app is running\r\n if self._check_console_input(): #if something to read on the console\r\n cmd = sys.stdin.readline() #read it\r\n self.inq.put(cmd) #dispatch it tpo the server\r\n response = self.outq.get(timeout=2.0) #wait for an answer\r\n sys.stdout.write(response) #write the answer on the console\r", "def communicate(args, **kwargs):\n stdin = None\n # When stdin is passed as an argument, use it as the actual input data and\n # set the Popen() parameter accordingly.\n if 'stdin' in kwargs and isinstance(kwargs['stdin'], basestring):\n stdin = kwargs['stdin']\n kwargs['stdin'] = PIPE\n\n proc = Popen(args, **kwargs)\n return proc.communicate(stdin), proc.returncode" ]
[ "0.6465506", "0.6412228", "0.62609875", "0.6253763", "0.61260885", "0.6119722", "0.60304314", "0.59641206", "0.59629256", "0.5924618", "0.59051806", "0.5897396", "0.5815422", "0.57975465", "0.57944274", "0.578926", "0.57733274", "0.5732314", "0.5685502", "0.56787324", "0.5650317", "0.5623363", "0.56068045", "0.5589298", "0.5583329", "0.5580154", "0.55209124", "0.551534", "0.550309", "0.5436409", "0.5434123", "0.5422263", "0.541854", "0.5402434", "0.54022044", "0.5372", "0.53695047", "0.53675467", "0.53639597", "0.535803", "0.53470784", "0.53409433", "0.5339122", "0.5329098", "0.5307848", "0.5294649", "0.5289709", "0.5289602", "0.52883214", "0.5280609", "0.527968", "0.52772856", "0.52715516", "0.5270544", "0.5257652", "0.5244141", "0.5236895", "0.5230647", "0.5190702", "0.51866555", "0.51864177", "0.5182326", "0.5180791", "0.5179724", "0.51795876", "0.5165802", "0.51644653", "0.5154809", "0.51391876", "0.51391876", "0.5138205", "0.51162857", "0.5106704", "0.510461", "0.5099088", "0.5081786", "0.507421", "0.50713456", "0.50680614", "0.50668055", "0.5063061", "0.5059704", "0.5055023", "0.50519663", "0.50489527", "0.5047353", "0.5031848", "0.50307983", "0.5002247", "0.49824628", "0.4976617", "0.4969294", "0.49679527", "0.49660242", "0.49628332", "0.49573994", "0.49437156", "0.49426985", "0.49376556", "0.4934129" ]
0.6050411
6
Fetch output from taskw subprocess queues
def _retrieve_output(thread, timeout, queue, thread_error): # Try to join the thread on failure abort thread.join(timeout) if thread.is_alive(): # Join should have killed the thread. This is unexpected raise TimeoutWaitingFor(thread_error + ". Unexpected error") # Thread died so we should have output try: # data = (stdout, stderr, exitcode) data = queue.get(timeout=timeout) except Empty: data = TimeoutWaitingFor("streams from TaskWarrior") return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_output_queue(self):\n return self.__output_queue", "def _get_output_queue(self):\n return self.__output_queue", "def _get_output_queue(self):\n return self.__output_queue", "def _get_output_queue(self):\n return self.__output_queue", "def _get_output_queue(self):\n return self.__output_queue", "def _get_output_queue(self):\n return self.__output_queue", "def get_results_from_message_queue():\n message_queue.get_result_length()\n logger.info(\"get task results from task queue\")", "def main():\n output_queue = Queue()\n\n out_list = list()\n\n logging.info('Retrieving news...')\n download = DownloadNewsWorker(output_queue)\n download.retrieve_news()\n\n while not output_queue.empty():\n item = output_queue.get()\n out_list.append(item)\n\n return out_list", "def _stdout_reader(self):\n self._is_launched.wait()\n stdout_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in stdout_iterator:\n self._log(\"raw\", \"stdout : {0}\".format(line.strip()))\n self.stdout_queue.put_nowait(line.strip())\n self.stdout_queue.put_nowait(None) # Stop queue consumers", "def get_queue(queue_name=\"\"):\n print(get_qstat_arg(queue_name))\n q = subprocess.Popen(\n _get_qstat_arg(queue_name), stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, stdin=subprocess.PIPE\n )\n o, e = q.communicate()\n\n return o", "def test_get_task_output(self):\n pass", "def _queue_output(arguments, pidq, outputq):\n kwargs = arguments[\"process\"]\n input_data = arguments[\"input\"].encode(\"utf-8\") if arguments[\"input\"] else None\n\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n # pid None is read by the main thread as a crash of the process\n pidq.put(None)\n\n outputq.put((\n \"\",\n (\"Unexpected exception caught during execution of taskw: '{0}' . \"\n \"If you are running out-of-tree tests set TASK_USE_PATH=1 \"\n \"in shell env before execution and add the \"\n \"location of the task(d) binary to the PATH\".format(e)),\n 255)) # false exitcode\n\n return\n\n # Put the PID in the queue for main process to know.\n pidq.put(proc.pid)\n\n # Send input and wait for finish\n out, err = proc.communicate(input_data)\n\n if sys.version_info > (3,):\n out, err = out.decode('utf-8'), err.decode('utf-8')\n\n # Give the output back to the caller\n outputq.put((out, err, proc.returncode))", "def _pull_batch_from_queue(self):\n rollout = self.explorer.queue.get( timeout = 600.0 )\n while not rollout.terminal:\n try: \n rollout.extend( self.explorer.queue.get_nowait() )\n except queue.Empty:\n break\n print(rollout.size())\n return rollout", "def _retrieve_output(thread, timeout, queue, thread_error):\n # Try to join the thread on failure abort\n thread.join(timeout)\n if thread.isAlive():\n # Join should have killed the thread. This is unexpected\n raise TimeoutWaitingFor(thread_error + \". Unexpected error\")\n\n # Thread died so we should have output\n try:\n # data = (stdout, stderr, exitcode)\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor(\"streams from program\")\n\n return data", "def running_jobs_sherlock():\n user = os.environ['USER']\n\n return subprocess.check_output(['squeue', '-u',user,'-o','%Z']).split()[1:]", "def get_from_queue(self):\n while not self.receive_queue.empty():\n cmd, kwargs = bcp.decode_command_string(\n self.receive_queue.get(False))\n self._process_command(cmd, **kwargs)", "def check_plugin(work_queue, result_queue):\n while work_queue.qsize():\n host = work_queue.get()\n result = commands.getoutput(plugin_cmd + \" -H \" + host)\n result_queue.put([host, result])", "def task_stdout(self, task_id):\n result, _ = self.task_collect(task_id, wait=False)\n return result['shards'][0]['output']", "def get(self):\n return dumps(AQ.queue()), 200", "def local_job(cmd, pollpath, name, queue):\r\n to_submit = '%s; echo $? > %s' % (cmd, pollpath)\r\n\r\n return to_submit", "def get_current_buffer(raw_output):\n\n # get the ouptut from the system call\n output = os.popen(\"sudo tc -s qdisc ls dev enp3s0\").read()\n has_skipped_netem = False\n print(output, file=raw_output)\n queue = get_current_queue(output)\n (sent, drops) = get_current_drops(output)\n return (queue, sent, drops)", "def _get_output(arguments, timeout=None):\n # NOTE Increase this value if tests fail with None being received as\n # stdout/stderr instead of the expected content\n output_timeout = 0.1 # seconds\n\n pidq = Queue()\n outputq = Queue()\n\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n\n # Process crashed or timed out for some reason\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior to start\")\n\n # Wait for process to finish (normal execution)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior thread to join\")\n\n # If we reach this point we assume the process got stuck or timed out\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n # Start with lower signals and escalate if process ignores them\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n # ESRCH means the process finished/died between last check and now\n if e.errno != errno.ESRCH:\n raise\n\n # Wait for process to finish (should die/exit after signal)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior to die\")\n\n # This should never happen but in case something goes really bad\n raise OSError(\"TaskWarrior stopped responding and couldn't be killed\")", "def control(batch_info):\n \n sbatch, script, cps, mem = batch_info\n qsub_name = qsub_prep(sbatch, script, cps, mem)\n\n \n batch_id = os.popen(\"sbatch \" + qsub_name).read()\n batch_id = batch_id.strip().split()[-1]\n\n \n while True:\n output = os.popen(\"squeue -u koerstz\").read()\n \n if batch_id in output:\n time.sleep(5)\n continue \n \n else:\n break\n\n # create data frame:\n df = pd.read_pickle(sbatch + \".pkl\")\n \n return df", "def get_external_result(self):\n while True:\n if len(self.result_queue) > 0:\n result = copy.deepcopy(self.result_queue[0])\n del self.result_queue[0]\n return result", "def run(self):\n proc_name = self.name\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n debug('{}: Exiting'.format(proc_name))\n self.task_queue.task_done()\n break\n debug('{}: {}'.format(proc_name, next_task))\n answer = next_task()\n self.task_queue.task_done()\n self.result_queue.put(answer)\n return", "def PthreadShowKsynQueue(cmd_args=None):\n\tif not cmd_args:\n\t\traise ArgumentError(\"No arguments passed\")\n\n\tkwq = kern.GetValueFromAddress(cmd_args[0], \"ksyn_wait_queue_t\")\n\tprint GetKwqSummary.header\n\tprint GetKwqSummary(kwq)", "def qoutput(self):\n jobid = self.jobid()\n ou = os.path.join(self.directory, jobid + '.OU')\n if not self.in_queue() and os.path.exists(ou):\n with open(ou) as f:\n return f.read()\n else:\n return \"In queue or no output found.\"", "def get_jobs():\n \n rate_limit()\n command = [\"bjobs\", \"-o\", \"\\\"JOBID\", \"USER\", \"STAT\", \"QUEUE\", \"JOB_NAME\", \\\n \"delimiter=';'\\\"\"]\n command = \" \".join(command)\n jobs = subprocess.check_output(command, shell=True, stderr=open(os.devnull))\n \n # if there aren't any currently running or pending jobs, then the output\n if jobs == \"\":\n return set([])\n \n jobs = jobs.decode().strip().split(\"\\n\")\n \n current_jobs = set([])\n for line in jobs:\n if line.startswith(\"JOBID\"): # ignore the header line\n continue\n \n line = line.split(\";\")\n job_name = line[4]\n current_jobs.add(job_name)\n \n return current_jobs", "def torque_job(cmd, pollpath, name, queue):\r\n qsub_call = \"qsub -k oe -N %s -q %s\" % (\"MOTU\", queue)\r\n to_submit = 'echo \"%s; echo $? > %s\" | %s' % (cmd, pollpath, qsub_call)\r\n\r\n return to_submit", "def get_jobs_in_queue() -> List[int]:\n output = subprocess.check_output([\"qstat\"]).decode().splitlines()\n job_ids = []\n for line in output:\n m = REGEX_QSTAT.match(line)\n if m:\n job_ids.append(int(m.group(1)))\n return job_ids", "def _getqueue(self):\n go = self.tickqueue.get()\n for index in range(len(self.outqueues)):\n if not self.outqueues[index].empty(): return self.outqueues[index]", "def _qprocess(self):\n while 1:\n t, args, kw = self.inq.get()\n ret = self.__call__(*args, **kw)\n self.outq.put((t, ret))", "def worker_function(ident, work):\n\n\t\tdef exec_debug(command_string) :\n\t\t\t\"\"\" \n\t\t\twhen the --debug option is set this outputs the command string rather than execute the command\n\t\t\t\n\t\t\t\tArgs:\n\t\t\t\t\tcommand_string (string) : the command and all args as a simple string\n\t\t\t\t\n\t\t\t\tOuter scope access:\n\t\t\t\t\tnone\n\n\t\t\t\tReturns:\n\t\t\t\t\tstring\n\t\t\t\"\"\"\n\t\t\tline += cmd_string + \"\\n\"\n\t\t\treturn line\n\n\t\tdef exec_lines(command_list, mark_flag):\n\t\t\t\"\"\" \n\t\t\twhen the --lines option is set this function outputs every line of output from the command to the output_queue as soon as it is avaliable\n\t\t\trather then wait for the command to complete and puts the command with all options on the fron of each outout\n\t\t\tline so it can be reconciles with the command that generated it. \n\n\t\t\tArgs:\n\t\t\t\tcommand list (dictionary) \t: the result of applying shlex.split() to command_string\n\t\t\t\tmark_flag(bool)\t\t\t\t: if true adds \n\n\t\t\tReturns:\n\t\t\t\tNothing\n\n\t\t\tOuter scope access:\n\t\t\t\toutput_queue\n\n\t\t\t\"\"\"\t\n\n\t\t\toutput = \"\"\n\t\t\tcommand_string = \" \".join(command_list)\n\t\t\ttry:\n\t\t\t\tprocess = subprocess.Popen(command_list, stdout=subprocess.PIPE)\n\t\t\t\tpipe = process.stdout\n\t\t\t\toutput = \"\"\n\n\t\t\t\twhile True:\n\n\t\t\t\t\toutput = pipe.readline()\n\t\t\t\t\tif len(output) == 0 : #and (proc.process.poll() is not None ):\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif mark_flag:\n\t\t\t\t\t\tmark = \"OUTPUT[\" + cmd_string + \"]: \"\n\t\t\t\t\t\toutput = mark + output\n\t\n\t\t\t\t\toutput_queue.put(output)\n\t\n\t\t\t\t# while\n\t\n\t\t\t\tprocess.wait()\n\t\t\t\treturn\n\t\t\t#\n\t\t\t# trying to catch some helpful output if the command fails\n\t\t\t#\n\t\t\texcept (subprocess.CalledProcessError) as cperror:\n\t\t\t\toutput += \"LINES \"+cperror.output\n\t\t\t\t# retcode = cperror.returncode\n\t\t\texcept (exceptions.OSError) as err:\n\t\t\t\toutput += \"LINES command : {0} gave error {1} \".format(command_string, str(err))\n\t\t\texcept: # npqa E722\n\t\t\t\toutput += \"LINES command : {0} gave error {1} \".format(command_string, sys.exc_info()[0])\n\n\t\t\tif mark_flag:\n\t\t\t\tmark = \"OUTPUT[\" + cmd_string + \"]: \"\n\t\t\t\toutput = mark + output + \"\\n\"\n\n\t\t\toutput_queue.put(output)\n\n\n\t\t# def exec_and_output_each_line\n\n\t\tdef exec_not_lines(command_string, mark_flag):\n\t\t\t\"\"\" \n\t\t\twhen neither the --debug or the --lines options are set this function runs the command and collects all the output\n\t\t\twaits for the command to complete and then returns all the output as a single string\n\n\t\t\tArgs:\n\t\t\t\tcommand_string (string) - \tthe complete command to be executed\n\t\t\t\tmark_flag(bool)\t\t\t- \twhen true the output has additional text on the start and end of the\n\t\t\t\t\t\t\t\t\t\t\toutput so that \n\n\t\t\t\t\t\t\t\t\t\t\t-\tthe start of command execution is marked\n\t\t\t\t\t\t\t\t\t\t\t-\tthe begionning and end of command output is marked\n\t\t\tReturns:\n\t\t\t\tall output as a single string\n\n\t\t\tOuter scope access:\n\t\t\t\tnone\n\n\t\t\t\"\"\"\n\t\t\ttry:\n\t\t\t\toutput = \"\"\n\t\t\t\tif mark_flag:\n\t\t\t\t\tmarker = \"\\nMARK \" + command_string + \"================================\\n\"\n\t\t\t\t\toutput_queue.put(marker)\n\n\t\t\t\t# subprocess.check_output returns a single string with all the output\n\t\t\t\t# if its multi line output there are line breaks in the string\n\t\t\t\toutput += subprocess.check_output(command_string, shell=True)\n\t\t\t\t#\n\t\t\t\t# trying to catch some helpful output if the command fails\n\t\t\t\t#\n\t\t\texcept (subprocess.CalledProcessError) as cperror:\n\t\t\t\toutput += cperror.output\n\t\t\t\t# retcode = cperror.returncode\n\t\t\texcept (exceptions.OSError) as err:\n\t\t\t\toutput += \"command : {0} gave error {1} \".format(command_string, str(err))\n\t\t\texcept: # npqa E722\n\t\t\t\toutput += \"command : {0} gave error {1} \".format(command_string, sys.exc_info()[0])\n\t\t\t\n\t\t\tif mark_flag:\n\t\t\t\toutput = output.replace(\"\\n\", \"\\n\\t\")\n\t\t\t\toutput = \"OUTPUT START[\" + command_string + \"]: \\n\" + output + \"\\nOUTPUT END[\" + command_string + \"]\" \n\n\t\t\treturn output\n\n\t\t# def exec_and_output_each_line\n\n\n\t\t#\n\t\t# we are going to exec the command with subprocess.check_output\n\t\t# this is best done with a single command string holding\n\t\t# the command opetions and all args\n\t\t#\n\t\tcmd_string = \" \".join([cmd] + work)\n\t\tcmd_list = shlex.split(cmd_string)\n\t\tline = \"\"\n\n\t\tif input_options['debug']:\n\n\t\t\toutput = exec_debug(cmd_string)\n\t\t\toutput_queue.put(output)\n\n\t\telif input_options['lines']:\n\n\t\t\toutput = exec_lines(cmd_list, input_options['mark'])\n\t\t\t# output_queue.put() not required it is done line by line inside exec_lines()\n\n\t\telse:\n\n\t\t\toutput = exec_not_lines(cmd_string, input_options['mark'])\n\t\t\toutput_queue.put(output)\n\n\t\treturn\n\n\t\t# semaphore.acquire()\n\t\t# print \"do_work:: {id} {work}\".format(id=ident, work=work)\n\t\t# semaphore.release()", "def get_process(self):\n\n self.log.debug('Getting application process data')\n cmd_output = admin_tasks.get_process(self.app_name)\n if cmd_output:\n self.log.info('Application process is running')\n print(\"Command output: \\n\" + cmd_output)\n else:\n self.log.info('Application process is not running')", "def Wait(self):\n try:\n # Flush stdout and stderr to be sure no output is interleaved.\n sys.stdout.flush()\n sys.stderr.flush()\n\n # File position pointers are shared across processes, so we must open\n # our own file descriptor to ensure output is not lost.\n self._WaitForStartup()\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n results = []\n with open(self._output.name, 'r') as output:\n pos = 0\n running, exited_cleanly, task_errors, all_errors = (True, False, [], [])\n while running:\n # Check whether the process is still alive.\n running = self.is_alive()\n\n try:\n errors, results = \\\n self._queue.get(True, self.PRINT_INTERVAL)\n if errors:\n task_errors.extend(errors)\n all_errors.extend(errors)\n\n running = False\n exited_cleanly = True\n except Queue.Empty:\n pass\n\n if not running:\n # Wait for the process to actually exit. If the child doesn't exit\n # in a timely fashion, kill it.\n self.join(self.EXIT_TIMEOUT)\n if self.exitcode is None:\n msg = '%r hung for %r seconds' % (self, self.EXIT_TIMEOUT)\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessExitTimeout(msg), ''))\n self._KillChildren([self])\n elif not exited_cleanly:\n msg = ('%r exited unexpectedly with code %s'\n % (self, self.exitcode))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessUnexpectedExit(msg), ''))\n\n # Read output from process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n\n if len(buf) > 0:\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n elif running and time.time() > silent_death_time:\n msg = ('No output from %r for %r seconds' %\n (self, self.SILENT_TIMEOUT))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessSilentTimeout(msg), ''))\n self._KillChildren([self])\n\n # Read remaining output from the process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n running = False\n\n # Print output so far.\n while len(buf) > 0:\n sys.stdout.write(buf)\n pos += len(buf)\n if len(buf) < _BUFSIZE:\n break\n buf = output.read(_BUFSIZE)\n\n # Print error messages if anything exceptional occurred.\n if len(all_errors) > len(task_errors):\n logging.PrintBuildbotStepFailure()\n msg = '\\n'.join(x.str for x in all_errors if x)\n logging.warning(msg)\n traceback.print_stack()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n # Propagate any results.\n for result in results:\n results_lib.Results.Record(*result)\n\n finally:\n self.Cleanup(silent=True)\n\n # If an error occurred, return it.\n return all_errors", "def _getqueue(self):\n\n go = self.tickqueue.get()\n for index in range(len(self.outqueues)):\n if not self.outqueues[index].empty():\n return self.outqueues[index]", "def ls(self):\n server = jenkins_server.get_jenkins_server()\n queue = server.get_queue_info()\n print('任务ID\\t%s\\t原因' % '任务链接'.ljust(50))\n for q in queue:\n print('%d\\t%s\\t%s' % (q['id'], q['task']['url'].ljust(50), q['why']))", "def _queue_output(arguments, pidq, outputq):\n kwargs = arguments[\"process\"]\n input = arguments[\"input\"]\n\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n # pid None is read by the main thread as a crash of the process\n pidq.put(None)\n\n outputq.put((\n \"\",\n (\"Unexpected exception caught during execution: '{0}' . \".format(e)),\n 255)) # false exitcode\n\n return\n\n # Put the PID in the queue for main process to know.\n pidq.put(proc.pid)\n\n # Send input and wait for finish\n out, err = proc.communicate(input)\n\n out, err = out.decode('utf-8'), err.decode('utf-8')\n\n # Give the output back to the caller\n outputq.put((out, err, proc.returncode))", "def task():", "def dump_queue(queue):\n result = []\n queue.put(\"STOP\")\n for i in iter(queue.get, 'STOP'):\n result.append(i)\n # time.sleep(.1)\n return result", "def dump_queue(queue):\n result = []\n queue.put(\"STOP\")\n for i in iter(queue.get, 'STOP'):\n result.append(i)\n # time.sleep(.1)\n return result", "def tasks():", "def process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n workers_dict = {} # keep track of worker processes\n input_queue = Queue() # asynchronously feed workers task to do \n worker_output_queue = Queue() # output queue from workers\n ack_queue = Queue()\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n # Directory is created, Okay to pass\n pass\n\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\\\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n # uses a pool nodesurl' workers\n # curl_worker_pool = Pool(processes=8)\n # manager = Manager()\n # curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n try:\n saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n\n for i in range(num_of_workers):\n # send stop signal\n input_queue.put((\"STOP\",))\n \n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n \n while not worker_output_queue.empty():\n # receive results from the worker\n cbug = worker_output_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def collect(self):\n while True:\n if not self._queue.empty():\n message = self._queue.get()\n self.working_on = message['job_type']\n else:\n break\n logging.info(\"Popped off message: {}\\n\".format(str(message)))\n\n if message['job_type'] == 'STOP':\n break\n\n if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':\n raise ValueError('{} is not a recognized task type'.format(message['job_type']))\n pass\n\n # Query all repos with repo url of given task\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'\n \"\"\".format(message['given']['github_url']))\n repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])\n\n try:\n if message['models'][0] == 'pull_requests':\n self.pull_requests_model(message, repo_id)\n elif message['models'][0] == 'pull_request_commits':\n self.pull_request_commits_model(message, repo_id)\n elif message['models'][0] == 'pull_request_files':\n self.pull_requests_graphql(message, repo_id)\n except Exception as e:\n register_task_failure(self, message, repo_id, e)\n pass", "def worker_function(taskQ, resultQ):\n \n while True:\n try: ivel = taskQ.get(block=True, timeout=10)# try to get the next task, allow some time for process clash (ivel number)\n except queue.Empty: break# kill process if no more tasks left\n example = generate_example(ivel)\n resultQ.put(example)# push the example to the results queue", "def worker(self, q, return_dict):\n pid = os.getpid()\n while True:\n qqq = q.get()\n if qqq == 'DONE':\n # print('proc =', os.getpid())\n break\n\n (idx, d) = qqq\n mol_id = d[0]\n smi = d[1]\n # print screening processing in every pout step\n if self.pout != 0:\n if idx % self.pout == self.pout-1:\n print(\"processing: \", idx+1, flush=True)\n result_dict = self.simulation_process(idx, mol_id, smi, pid)\n return_dict[idx] = result_dict", "def enqueue_output(self, out, queue):\n\n started = False\n finished = False\n\n while not self.stop:\n line = out.readline()\n queue.put(line)\n # Test if we have reached the end of the output\n if started and IPMITOOL_SHELL_PROMPT in line.decode('ascii'):\n finished = True\n if IPMITOOL_SHELL_PROMPT in line.decode('ascii'):\n started = True\n if finished and self.comms_lock.locked():\n self.comms_lock.release()\n started = False\n finished = False\n\n time.sleep(QUEUE_THREAD_SLEEP_TIME)", "def run_tool(args, quiet=False):\n pipe = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n result = \"\"\n for line in iter(pipe.stdout.readline, \"\"):\n if not line and pipe.poll() is not None:\n break\n output = line.decode(encoding='UTF-8').rstrip()\n if output != \"\":\n if not quiet:\n print(\"\\t * \" + output)\n result = output\n return result", "def get_output(self, *args, **kwargs):\n self.send(*args, **kwargs)\n return self.process.before.split(\"\\r\\n\")", "def task_parse_results():\n pass", "def __call__(self) -> buffer.Buffer:\n processed_buffer = self.output_queue.get()\n\n return processed_buffer", "def waitOutput( self, verbose=False ):\n log = info if verbose else debug\n output = ''\n while self.waiting:\n data = self.monitor()\n output += data\n log( data )\n return output", "def _get_output(self):\n idx = self.current_idx # Local copy to avoid race condition updates\n\n try: # Default case: Return the command output\n self.outputs[idx] = subprocess.check_output(self.commands[idx], stderr=subprocess.STDOUT).splitlines()\n except subprocess.CalledProcessError as e: # Command error: Return error message\n self.outputs[idx] = e.output.splitlines()\n\n self.updated = True\n time.sleep(self.interval)", "def serial_worker(jobs_queue):\n return (get_and_format(**job) for job in jobs_queue)", "def executeCommand(cmd,loopsleep):\n\tsleep(loopsleep)\n\tresult = subprocess.getoutput(cmd)\n\treturn(result.split(\"\\n\"))", "def collect(self):\n while True:\n if not self._queue.empty():\n message = self._queue.get()\n self.working_on = message['job_type']\n else:\n break\n logging.info(\"Popped off message: {}\\n\".format(str(message)))\n\n if message['job_type'] == 'STOP':\n break\n\n if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':\n raise ValueError('{} is not a recognized task type'.format(message['job_type']))\n pass\n\n \"\"\" Query all repos with repo url of given task \"\"\"\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'\n \"\"\".format(message['given']['git_url']))\n repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])\n\n try:\n if message['models'][0] == 'badges':\n self.badges_model(message, repo_id)\n except Exception as e:\n register_task_failure(self, logging, message, repo_id, e)\n pass", "def wait(self):\n\n for output in self.proc.communicate():\n if output is not None:\n self.output += output", "def Worker(queue, out_queue):\n while not queue.empty() and Worker.running:\n item = queue.get(False)\n if not item:\n break\n results = RunGCC(item[0], item[1])\n out_queue.put(results)", "async def get_jobs(): \n return mngr.getAllJobs()", "def output_function(**kwargs):\n\n\t\toutput_queue = kwargs['q']\n\t\twhile True:\n\t\t\titem = output_queue.get()\n\t\t\t# expects to get a string or None\n\t\t\tif item is None:\n\t\t\t\tbreak\n\t\t\toutfile.write(item)\n\t\t\t# outfile.write(\"output_function:: {item}\".format(item=item)+\"\\n\")\n\t\t\toutput_queue.task_done()", "def read_stream(self, output_queue, stream_type):\n output = []\n\n # Get all available output off the queue.\n try:\n while 1:\n output.append(output_queue.get_nowait())\n except Empty:\n pass\n\n # If we read any output, toss it out to the logger\n if len(output):\n logger = logging.getLogger('taskmaster.processes.{}'.format(self.process_index))\n\n if stream_type == StreamType.Stdout:\n for line in output:\n logger.info(line)\n elif stream_type == StreamType.Stderr:\n for line in output:\n logger.error(line)\n\n # Get the current status to determine if we should try to read more or stop.\n current_status = psutil.STATUS_DEAD\n try:\n current_status = self.process.status()\n except psutil.NoSuchProcess:\n pass\n\n if current_status != psutil.STATUS_DEAD:\n # Process still alive, schedule the call to read more output.\n self.ioloop.call_later(0.1, self.read_stream, *[output_queue, stream_type])\n else:\n # Process has died. Flush the iostreams so the BlockingStreamReader triggers one last time and\n # nicely exits.\n self.process.stdout.flush()\n self.process.stderr.flush()", "def processTask(self):\n #Util.set_color(Util.FOREGROUND_YELLOW | Util.FOREGROUND_INTENSITY)\n #logging.info(\"cmd : %s\", self.ExecutionTask.get_cmd())\n #logging.info(\"param : %s\", self.ExecutionTask.get_param())\n #logging.info(\"ret : %s\", str(self.ExecutionTask.get_ret()))\n #logging.info(\"ipport : %s\", self.ExecutionTask.get_ipport())\n #Util.set_color(Util.FOREGROUND_WHITE)\n\n ##############################################################\n # Process for any commands without received messages.....\n ##############################################################\n if self.ExecutionTask.get_cmd() == 'PASS' or self.ExecutionTask.get_cmd() == 'FAIL':\n logging.debug(\"result is %s\", self.ExecutionTask.get_cmd())\n self.setStatus('STOP')\n self.setTestResult(self.ExecutionTask.get_cmd())\n return\n\n if self.ExecutionTask.get_cmd() == 'r_info':\n rinfo_result = self.ExecutionTask.get_param().split('!')\n\n if len(rinfo_result) > 1:\n msg = rinfo_result[1]\n logging.debug(\"%s\", msg)\n\n self.setStatus('STOP')\n self.setTestResult(rinfo_result[0])\n return\n\n if self.ExecutionTask.get_cmd() == 'ResultCheck':\n time.sleep(5)\n self.process_ResultCheck()\n return\n\n if self.ExecutionTask.get_cmd() == 'CheckThroughput':\n time.sleep(5)\n throughputChk = StreamHandler(self.test_mngr_initr)\n chk_result = throughputChk.processStreamResults(self.ExecutionTask.get_param())\n self.setCheckResult(chk_result)\n #if 'FAIL' in chk_result:\n # self.setStatus('STOP')\n return\n\n if self.ExecutionTask.get_cmd() == 'config_multi_subresults':\n self.process_config_multi_subresults()\n return\n\n ##############################################################\n # Process for any commands with received messages......\n ##############################################################\n status = \"\"\n retDict = self.ExecutionTask.get_ret()\n recvStr = \"\"\n if self.ExecutionTask.recv:\n recvStr = self.ExecutionTask.recv.rstrip('\\r\\n')\n #print \"recv : \" + recvStr\n \n if GlobalConfigFiles.curr_prog_name == \"WMMPS\" and \"sniffer_control_subtask\" in self.ExecutionTask.get_cmd():\n logging.debug('In WMMPS, before parsing the recvStr: %s' % recvStr)\n lines = re.split('\\n', recvStr)\n for line in lines:\n if re.search(\"RESULT\", line, re.I):\n if \"FAIL\" in line:\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n logging.debug('set test result to FAIL')\n return\n if \"PASS\" in line:\n self.setTestResult('PASS')\n logging.debug('set test result to Pass')\n return\n return\n \n stitems = recvStr.split(',') \n if len(stitems) < 2:\n #logging.debug(\"Bypassing this cmd..\")\n return\n\n status = stitems[1]\n iDNB = TestScriptSymbolTable.get_value_from_sym_tab(\"iDNB\", TestScriptSymbolTable.test_script_sym_tab)\n iINV = TestScriptSymbolTable.get_value_from_sym_tab(\"iINV\", TestScriptSymbolTable.test_script_sym_tab) \n \n if iINV is None:\n iINV = 0\n \n if 'ERROR' in recvStr or 'INVALID' in recvStr and (iDNB == 0 or iDNB is None) and (iINV == 0 or iINV is None):\n #error case...\n logging.debug(\"Return ERROR or INVALID---> STOP process \")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n elif status != 'COMPLETE' and iDNB == 0 and iINV == 0:\n #incomplete case...(running?)\n logging.debug(\"Command %s not completed\", self.ExecutionTask.get_cmd())\n else:\n displayname = \"\"\n for tbd in self.test_mngr_initr.test_prog_mngr.test_prog.testbed_dev_list:\n if tbd.ctrlipaddr == self.ExecutionTask.get_ipport():\n displayname = tbd.displayname\n break\n \n if \"FAIL\" in recvStr and (iINV == 0 or iINV is None):\n if \"SNIFFER\" in displayname or \"sniffer\" in self.ExecutionTask.get_cmd():\n logging.info(\"Test Case Criteria Failure - Command returned FAIL\")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n\n elif self.ExecutionTask.get_cmd() == 'device_get_info':\n try:\n if displayname == '':\n self.tmsPacket.setDutDeviceInfo(recvStr)\n else:\n self.tmsPacket.setTestbedInfo(displayname, recvStr)\n\n #for validation\n self.setValidationInfo(displayname, recvStr)\n\n except OSError:\n logging.debug(\"exception -- device_get_info capi call\")\n elif self.ExecutionTask.get_cmd() == 'ca_get_version':\n self.setValidationInfo(displayname, recvStr)\n\n elif self.ExecutionTask.get_cmd() == 'sniffer_get_info':\n self.setValidationInfo('sniffer', recvStr)\n\n elif self.ExecutionTask.get_cmd() == 'sta_associate':\n time.sleep(10)\n\n if len(stitems) > 2:\n retParam = self.ExecutionTask.get_param().split(',')\n streamFlag = \"\"\n if len(retParam) > 4:\n streamFlag = retParam[3]\n\n if stitems[2] == 'streamID':\n streamHndler = StreamHandler(self.test_mngr_initr)\n logging.debug(\"stream config - streamID : %s\", stitems[3])\n if streamFlag == 'send':\n logging.debug(\"traffic config - send : streamInfo append\")\n streamPacket = streamInfo(\"%s\" % (stitems[3]), self.ExecutionTask.get_ipport(), -1, 'send',\n retParam[15], retParam[17], streamHndler.running_phase, streamHndler.RTPCount)\n streamHndler.add_streamInfo(streamPacket)\n streamHndler.RTPCount = streamHndler.RTPCount + 1\n\n elif streamFlag == 'receive':\n logging.debug(\"traffic config - receive : streamInfo append\")\n streamPacket = streamInfo(\"%s\" % (stitems[3]), self.ExecutionTask.get_ipport(), -1, 'receive',\n -1, -1, streamHndler.running_phase, -1)\n streamHndler.add_streamInfo(streamPacket)\n\n else:\n logging.debug(\"traffic config - else : \")\n\n\n\n if retParam[1] == 'Multicast':\n logging.debug(\"----MULTICAST----\")\n streamHndler.multicast = 1\n\n if self.ExecutionTask.get_cmd() != \"traffic_agent_send\":\n ret_val = \"%s\" %(stitems[3].strip())\n logging.debug(\"traffic config - ret_val : %s\", ret_val)\n setRetVal(getRetKey(retDict), ret_val)\n\n elif stitems[2].lower() == 'interfacetype':\n ret_val = (\"%s\" %(stitems[5]))\n setRetVal(getRetKey(retDict), ret_val)\n\n elif stitems[2].lower() == 'interfaceid':\n ret_val = stitems[3].split('_')[0]\n setRetVal(getRetKey(retDict), ret_val)\n\n elif self.ExecutionTask.get_cmd() == 'traffic_stop_ping':\n\n keyVal = retParam[1]\n #\"%s;%s\"%(retParam[1], self.ExecutionTask.get_ipport())\n setRetVal(keyVal, stitems[5])\n #print(\"%s = %s\" % (retParam[1], stitems[5]))\n pinginternalchk = TestScriptSymbolTable.get_value_from_sym_tab(\"PingInternalChk\", TestScriptSymbolTable.test_script_sym_tab)\n temp_key = getRetKey(self.ExecutionTask.get_ret())\n \n if \"$\" in temp_key:\n sent_reply = temp_key.split(',')\n #print \"SLIM==> ping result save...\"\n #print sent_reply[0]\n #print sent_reply[1]\n setRetVal(sent_reply[0], stitems[3])\n setRetVal(sent_reply[1], stitems[5]) \n\n setRetVal(\"$pingResp\", stitems[5])\n if pinginternalchk == '0':\n logging.debug(\"Ping Internal Check\")\n \n elif stitems[5] == '0':\n logging.debug (\"Test Case Criteria Failure - NO IP Connection -- Aborting the test\")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n else:\n if stitems[5] == '0':\n logging.debug (\"Test Case Criteria Failure - NO IP Connection -- Aborting the test\")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n else:\n if len(retDict) > 0:\n tempKey = getRetKey(retDict)\n temp_val = tempKey.split(',')\n count = 0\n item_len = len(stitems)\n for i in temp_val:\n if item_len > count + 3:\n setRetVal(i, stitems[3+count])\n count = count + 2\n\n if self.__status == 'STOP':\n logging.debug(\"generate final result if task stops.\")\n #self.generateFinalResult()\n else:\n pass\n #logging.debug(\"Continue---\")\n return", "def get_queue_items(self, queue_name):\n proc = start_proc([\"/usr/bin/sudo\", \"rabbitmqctl\", \"list_queues\"],\n shell=False)\n for line in iter(proc.stdout.readline, \"\"):\n print(\"LIST QUEUES:\" + line)\n m = re.search(r\"%s\\s+([0-9]+)\" % queue_name, line)\n if m:\n return int(m.group(1))\n return None", "def launcher(i,q,cmd):\n while True:\n #grabs ip,cmd from queue\n ip = q.get()\n print \"Thread %s: Running %s to %s\" % (i,cmd,ip)\n host = \"root@%s\"%ip\n subprocess.call([\"ssh\", host, cmd])\n q.task_done()", "def pull_batch_from_queue(self):\n # get top rollout from queue (FIFO)\n rollout = self.global_runner.queue.get( timeout = 600.0 )\n while not rollout.terminal:\n try: \n rollout.extend( self.global_runner.queue.get_nowait() )\n except queue.Empty:\n break\n return rollout", "async def list_tasks():", "def on_get_result(self):\n if not self.queue.empty():\n self.update_status(self.queue.get(0))\n if self.worker_process.is_alive():\n self.master.after(self.query_delay, self.on_get_result)\n return\n else:\n self.exec_btn.config(state=Tkinter.NORMAL)", "def getoutput(cmd):\n return getstatusoutput(cmd)[1]", "def _process_worker(call_queue, result_queue):\n while True:\n call_item = call_queue.get(block=True)\n if call_item is None:\n # Wake up queue management thread\n result_queue.put(os.getpid())\n return\n try:\n r = call_item.fn(*call_item.args, **call_item.kwargs)\n except BaseException as e:\n exc = _ExceptionWithTraceback(e, e.__traceback__)\n result_queue.put(_ResultItem(call_item.work_id, exception=exc))\n logger.exception(e) # 主要是直接显示错误。\n else:\n result_queue.put(_ResultItem(call_item.work_id,\n result=r))", "def get(self):\n return self._out_queue.get()", "def recieve(self):\n return self.__proc.stdout.readline().strip('\\n')", "def run(self):\n results = []\n for task in self.tasks:\n results.append(task.run())\n self.tasks = []\n return results", "def run_all(self):\n results = []\n # Keep a loop going until all the tasks are gone:\n i = 0\n while self.tasks:\n i += 1\n time.sleep(0.0)\n print(f\"\\nOuter loop count: {i}\")\n # pop a task off the end\n task = self.tasks.pop()\n # run that task:\n try:\n res = task.send(None) # TaskLoop.run_all() - do_a_few_things() - count() - yield\n print(\"returned from send:\", res)\n self.tasks.insert(0, task) # move task to the begining of the list\n except StopIteration as si: # task completed yield return StopIteration exception\n results.append(si.args[0])\n print(\"task: {} result >>> {}\".format(task, si.args[0]))\n return results", "def parallel_work(jobs, nr_of_threads):\n work_queue = Queue()\n result_queue = Queue()\n result = {}\n\n for job in jobs:\n work_queue.put(job)\n\n if nr_of_threads > len(jobs):\n nr_of_threads = len(jobs)\n\n for i in range(nr_of_threads):\n worker = Process(target=check_plugin, args=(work_queue,result_queue))\n worker.start()\n\n while len(result.keys()) < len(jobs):\n data = result_queue.get()\n\n if \" | \" in data[1]:\n (status, output) = data[1].split(\" | \")\n else:\n status = \"UNKNOWN\"\n output = data[1]\n\n result[data[0]] = {\"status\": status, \"output\": output}\n #print \"Host \" + data[0] + \" \" + status\n\n return result", "def _execute(self):\n # Collect the results.\n results, _ = asyncio.run(\n apd.async_retrieve(\n self.args['pages'],\n self.args['from_'],\n self.args['to'],\n self.args['attempts'],\n self.args['backoff'],\n self.args['dump'],\n ))\n result_count = len(results)\n logger.info(f'Total: {result_count}')\n\n # Get the format and print the results.\n format_ = self.args['format_'].lower()\n formatter = Formatter(format_)\n formatter.print(results)", "def monitor_function(launcher, pid_file, frequency, queue):\n cmd = [\n launcher,\n pid_file,\n '',\n str(frequency)\n ]\n process = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n while True:\n output = process.stdout.readline()\n if output == '' and process.poll() is not None:\n break\n if output:\n # The 'output' is a string printed out by a resource monitor\n # script. It's a whitespace separated string of numbers.\n queue.put(output.strip())", "def _checker_worker(self):\n results = {}\n for cmd in self.check_cmds:\n res = subprocess.call(cmd.split(), stdout=open('/dev/null', 'w'))\n self.log(\"'%s' finished, result: %s\" % (cmd, res))\n results[cmd] = res\n if rospy.is_shutdown():\n return\n with self._lock:\n # just add results into the data structure\n self._results.add(results)", "def get_results():\n result = self._recv_result() # blocks\n del self._tasks_in_progress[result.task_id]\n del self._task_results_waiting[result.task_id]\n yield result.value", "def run(self):\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n # Fetch answer from task\n answer = next_task()\n self.task_queue.task_done()\n # Put into result queue\n self.result_queue.put(answer)\n return", "def wm(self):\n return self.get_par(\"readback\")", "def GetPublishedProcesses():\r\n pass", "def outputRetrieved(self, blTaskName, rng):\n return self._genericCommand('outputRetrieved', blTaskName, rng)", "async def run(self):\n pool_tasks = []\n async with aiomultiprocess.Pool(\n processes=4, maxtasksperchild=64, childconcurrency=8, queuecount=2\n ) as pool:\n for call in self.calls_list:\n pool_tasks.append(pool.apply(self._get_call, args=[call]))\n for download in tqdm(asyncio.as_completed(pool_tasks), total=len(pool_tasks)):\n await download", "def get_current_jobs(ssh):\n stdin, stdout, stderr = ssh.exec_command('qstat')\n\n running_jobs = []\n for line in stdout.readlines():\n if '.awonmgr2' in line:\n jobid = line.split('.awonmgr2')[0]\n running_jobs.append(jobid)\n \n return running_jobs", "def job_thread(argv):\n #pylint: disable=lost-exception\n\n try:\n exitcode = pfwdefs.PF_EXIT_FAILURE\n pid = os.getpid()\n stdp = None\n stde = None\n stdporig = None\n stdeorig = None\n wcl = WCL()\n wcl['wrap_usage'] = 0.0\n jobfiles = {}\n task = {'wrapnum':'-1'}\n try:\n # break up the input data\n (task, jobfiles, jwcl, ins, outq, errq, multi) = argv\n stdp = WrapOutput(task['wrapnum'], outq)\n stdporig = sys.stdout\n sys.stdout = stdp\n stde = WrapOutput(task['wrapnum'], errq)\n stdeorig = sys.stderr\n sys.stderr = stde\n\n # print machine status information\n exechost_status()\n\n wrappercmd = \"%s %s\" % (task['wrapname'], task['wclfile'])\n\n if not os.path.exists(task['wclfile']):\n print \"Error: input wcl file does not exist (%s)\" % task['wclfile']\n return (1, jobfiles, jwcl, 0, task['wrapnum'], pid)\n\n with open(task['wclfile'], 'r') as wclfh:\n wcl.read(wclfh, filename=task['wclfile'])\n wcl.update(jwcl)\n\n sys.stdout.flush()\n\n # set up the working directory if needed\n if multi:\n workdir = \"fwtemp%04i\" % (int(task['wrapnum']))\n else:\n workdir = None\n setup_wrapper(wcl, task['logfile'], workdir, ins)\n\n print \"Running wrapper: %s\" % (wrappercmd)\n sys.stdout.flush()\n starttime = time.time()\n try:\n exitcode = pfwutils.run_cmd_qcf(wrappercmd, task['logfile'],\n wcl['execnames'])\n except:\n (extype, exvalue, trback) = sys.exc_info()\n print '!' * 60\n print \"%s: %s\" % (extype, str(exvalue))\n\n traceback.print_exception(extype, exvalue, trback, file=sys.stdout)\n exitcode = pfwdefs.PF_EXIT_FAILURE\n sys.stdout.flush()\n if exitcode != pfwdefs.PF_EXIT_SUCCESS:\n print \"Error: wrapper %s exited with non-zero exit code %s. Check log:\" % \\\n (wcl[pfwdefs.PF_WRAPNUM], exitcode),\n logfilename = miscutils.parse_fullname(wcl['log'], miscutils.CU_PARSE_FILENAME)\n print \" %s/%s\" % (wcl['log_archive_path'], logfilename)\n print \"DESDMTIME: run_wrapper %0.3f\" % (time.time()-starttime)\n\n print \"Post-steps (exit: %s)\" % (exitcode)\n post_wrapper(wcl, ins, jobfiles, task['logfile'], exitcode, workdir)\n\n if exitcode:\n miscutils.fwdebug_print(\"Aborting due to non-zero exit code\")\n except:\n print traceback.format_exc()\n exitcode = pfwdefs.PF_EXIT_FAILURE\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback,\n limit=4, file=sys.stdout)\n\n finally:\n if stdp is not None:\n sys.stdout = stdporig\n if stde is not None:\n sys.stderr = stdeorig\n sys.stdout.flush()\n sys.stderr.flush()\n\n return (exitcode, jobfiles, wcl, wcl['wrap_usage'], task['wrapnum'], pid)\n except:\n print \"Error: Unhandled exception in job_thread.\"\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback,\n limit=4, file=sys.stdout)\n return (1, None, None, 0.0, '-1', pid)", "def _obtain(self):\n\n while True:\n # make sure we're observing load maximums\n if self.max_load is not None:\n try:\n load = os.getloadavg()\n if jobserver_running_jobs() > 0 and load[1] > self.max_load:\n time.sleep(0.01)\n continue\n except NotImplementedError:\n pass\n\n # make sure we're observing memory maximum\n if self.max_mem is not None:\n mem_used, mem_total = memory_usage()\n mem_percent_used = 100.0 * float(mem_used) / float(mem_total)\n if jobserver_running_jobs() > 0 and mem_percent_used > self.max_mem:\n time.sleep(0.01)\n continue\n\n # get a token from the job pipe\n try:\n token = os.read(self.job_pipe[0], 1)\n return token\n except OSError as e:\n if e.errno != errno.EINTR:\n raise", "def getResults(self, cleanup=True):\n self.wait_on_job()\n stdout_str = self.ofile_string()\n stderr_str = self.efile_string()\n if cleanup:\n self.erase_files()\n return (stdout_str, stderr_str)", "def run(outfile, nprocs, cmd, arg_list, input_options):\n\tnum_worker_threads = nprocs\n\tworker_queue = Queue.Queue()\n\tthreads = []\n\toutput_queue = Queue.Queue()\n\n\tdef output_function(**kwargs):\n\t\t\"\"\"\n\t\toutput_function take 'output' from the output_queue and writes it to outfile\n\t\tsince there is nly one thread running this function do not\n\t\tneed any kind of lock/semaphore to protect it\n\t\t\"\"\"\n\n\t\toutput_queue = kwargs['q']\n\t\twhile True:\n\t\t\titem = output_queue.get()\n\t\t\t# expects to get a string or None\n\t\t\tif item is None:\n\t\t\t\tbreak\n\t\t\toutfile.write(item)\n\t\t\t# outfile.write(\"output_function:: {item}\".format(item=item)+\"\\n\")\n\t\t\toutput_queue.task_done()\n\n\t# def output_function\n\n\tdef worker_function(ident, work):\n\t\t\"\"\"\n\t\tworker_function - called by a worker thread with 'work'.\n\t\tThe work is a shell command and arguments. Executes that command and passes the output to the output_queue\n\t\tDetailed behaviour is modified by input_options\n\n\t\tArgs:\n\t\t\tident (int)\t\t\t\t:\tthe index into the threads table of the thread that is running this worker\n\t\t\twork (list of strings)\t:\tthe arguments for this invocation\n\t\t\n\t\tOuter scope access:\n\t\t\tinput_options (dictionary):\tread only modified details of behaviour\n\t\t\toutput_queue (Queue.Queue):\tread only - where output text goes\n\n\t\t\"\"\"\n\n\t\tdef exec_debug(command_string) :\n\t\t\t\"\"\" \n\t\t\twhen the --debug option is set this outputs the command string rather than execute the command\n\t\t\t\n\t\t\t\tArgs:\n\t\t\t\t\tcommand_string (string) : the command and all args as a simple string\n\t\t\t\t\n\t\t\t\tOuter scope access:\n\t\t\t\t\tnone\n\n\t\t\t\tReturns:\n\t\t\t\t\tstring\n\t\t\t\"\"\"\n\t\t\tline += cmd_string + \"\\n\"\n\t\t\treturn line\n\n\t\tdef exec_lines(command_list, mark_flag):\n\t\t\t\"\"\" \n\t\t\twhen the --lines option is set this function outputs every line of output from the command to the output_queue as soon as it is avaliable\n\t\t\trather then wait for the command to complete and puts the command with all options on the fron of each outout\n\t\t\tline so it can be reconciles with the command that generated it. \n\n\t\t\tArgs:\n\t\t\t\tcommand list (dictionary) \t: the result of applying shlex.split() to command_string\n\t\t\t\tmark_flag(bool)\t\t\t\t: if true adds \n\n\t\t\tReturns:\n\t\t\t\tNothing\n\n\t\t\tOuter scope access:\n\t\t\t\toutput_queue\n\n\t\t\t\"\"\"\t\n\n\t\t\toutput = \"\"\n\t\t\tcommand_string = \" \".join(command_list)\n\t\t\ttry:\n\t\t\t\tprocess = subprocess.Popen(command_list, stdout=subprocess.PIPE)\n\t\t\t\tpipe = process.stdout\n\t\t\t\toutput = \"\"\n\n\t\t\t\twhile True:\n\n\t\t\t\t\toutput = pipe.readline()\n\t\t\t\t\tif len(output) == 0 : #and (proc.process.poll() is not None ):\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif mark_flag:\n\t\t\t\t\t\tmark = \"OUTPUT[\" + cmd_string + \"]: \"\n\t\t\t\t\t\toutput = mark + output\n\t\n\t\t\t\t\toutput_queue.put(output)\n\t\n\t\t\t\t# while\n\t\n\t\t\t\tprocess.wait()\n\t\t\t\treturn\n\t\t\t#\n\t\t\t# trying to catch some helpful output if the command fails\n\t\t\t#\n\t\t\texcept (subprocess.CalledProcessError) as cperror:\n\t\t\t\toutput += \"LINES \"+cperror.output\n\t\t\t\t# retcode = cperror.returncode\n\t\t\texcept (exceptions.OSError) as err:\n\t\t\t\toutput += \"LINES command : {0} gave error {1} \".format(command_string, str(err))\n\t\t\texcept: # npqa E722\n\t\t\t\toutput += \"LINES command : {0} gave error {1} \".format(command_string, sys.exc_info()[0])\n\n\t\t\tif mark_flag:\n\t\t\t\tmark = \"OUTPUT[\" + cmd_string + \"]: \"\n\t\t\t\toutput = mark + output + \"\\n\"\n\n\t\t\toutput_queue.put(output)\n\n\n\t\t# def exec_and_output_each_line\n\n\t\tdef exec_not_lines(command_string, mark_flag):\n\t\t\t\"\"\" \n\t\t\twhen neither the --debug or the --lines options are set this function runs the command and collects all the output\n\t\t\twaits for the command to complete and then returns all the output as a single string\n\n\t\t\tArgs:\n\t\t\t\tcommand_string (string) - \tthe complete command to be executed\n\t\t\t\tmark_flag(bool)\t\t\t- \twhen true the output has additional text on the start and end of the\n\t\t\t\t\t\t\t\t\t\t\toutput so that \n\n\t\t\t\t\t\t\t\t\t\t\t-\tthe start of command execution is marked\n\t\t\t\t\t\t\t\t\t\t\t-\tthe begionning and end of command output is marked\n\t\t\tReturns:\n\t\t\t\tall output as a single string\n\n\t\t\tOuter scope access:\n\t\t\t\tnone\n\n\t\t\t\"\"\"\n\t\t\ttry:\n\t\t\t\toutput = \"\"\n\t\t\t\tif mark_flag:\n\t\t\t\t\tmarker = \"\\nMARK \" + command_string + \"================================\\n\"\n\t\t\t\t\toutput_queue.put(marker)\n\n\t\t\t\t# subprocess.check_output returns a single string with all the output\n\t\t\t\t# if its multi line output there are line breaks in the string\n\t\t\t\toutput += subprocess.check_output(command_string, shell=True)\n\t\t\t\t#\n\t\t\t\t# trying to catch some helpful output if the command fails\n\t\t\t\t#\n\t\t\texcept (subprocess.CalledProcessError) as cperror:\n\t\t\t\toutput += cperror.output\n\t\t\t\t# retcode = cperror.returncode\n\t\t\texcept (exceptions.OSError) as err:\n\t\t\t\toutput += \"command : {0} gave error {1} \".format(command_string, str(err))\n\t\t\texcept: # npqa E722\n\t\t\t\toutput += \"command : {0} gave error {1} \".format(command_string, sys.exc_info()[0])\n\t\t\t\n\t\t\tif mark_flag:\n\t\t\t\toutput = output.replace(\"\\n\", \"\\n\\t\")\n\t\t\t\toutput = \"OUTPUT START[\" + command_string + \"]: \\n\" + output + \"\\nOUTPUT END[\" + command_string + \"]\" \n\n\t\t\treturn output\n\n\t\t# def exec_and_output_each_line\n\n\n\t\t#\n\t\t# we are going to exec the command with subprocess.check_output\n\t\t# this is best done with a single command string holding\n\t\t# the command opetions and all args\n\t\t#\n\t\tcmd_string = \" \".join([cmd] + work)\n\t\tcmd_list = shlex.split(cmd_string)\n\t\tline = \"\"\n\n\t\tif input_options['debug']:\n\n\t\t\toutput = exec_debug(cmd_string)\n\t\t\toutput_queue.put(output)\n\n\t\telif input_options['lines']:\n\n\t\t\toutput = exec_lines(cmd_list, input_options['mark'])\n\t\t\t# output_queue.put() not required it is done line by line inside exec_lines()\n\n\t\telse:\n\n\t\t\toutput = exec_not_lines(cmd_string, input_options['mark'])\n\t\t\toutput_queue.put(output)\n\n\t\treturn\n\n\t\t# semaphore.acquire()\n\t\t# print \"do_work:: {id} {work}\".format(id=ident, work=work)\n\t\t# semaphore.release()\n\n\t# def worker_function\n\n\tdef worker(**kwargs):\n\t\t\"\"\"\n\t\ttarget function for worker threads. Takes 'work' from the worker queue and\n\t\tpasses that to `worker_function`. When `work == None` return\n\t\tand terminate the worker thread.\n\n\t\tArgs:\n\t\t\tkwargs['ident'] (int)\t- the index of the thread running this worker\n\n\t\tOuter scope access:\n\t\t\tworker_queue (Queue.Queue) - multiple worker processes (and hence worker functions) take work from this queue\n\n\t\t@return nothing\n\t\t\"\"\"\n\t\tident = kwargs[\"ident\"]\n\t\twhile True:\n\t\t\titem = worker_queue.get()\n\t\t\tif item is None:\n\t\t\t\tbreak\n\t\t\tworker_function(ident, item)\n\t\t\tworker_queue.task_done()\n\n\t# def worker\n\n\t# def run - body\n\n\tfor i in range(num_worker_threads):\n\t\tkwargs = {\"ident\": i}\n\t\tt = threading.Thread(target=worker, kwargs=kwargs)\n\t\tt.start()\n\t\tthreads.append(t)\n\n\tfor item in arg_list:\n\t\tworker_queue.put(item)\n\n\toutput_thread = threading.Thread(target=output_function, kwargs={'q': output_queue})\n\toutput_thread.start()\n\n\t# block until all tasks are done\n\tworker_queue.join()\n\n\t# stop workers\n\tfor i in range(num_worker_threads):\n\t\tworker_queue.put(None)\n\n\tfor t in threads:\n\t\tt.join()\n\n\toutput_queue.put(None)\n\toutput_thread.join()", "def run(self):\n results = self.fetch()\n return results", "def getNode(self):\r\n try:\r\n output,error = Popen(\"qstat | grep \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()\r\n if self.jobId in output:\r\n return output.split(\"\\t\")[7]\r\n if len(error) > 0:\r\n logging.error(error)\r\n except ValueError:\r\n logging.info(\"Error: waiting for not submitted job...\")", "def get_results():\n self._recv_result() # blocks\n tasks = self._tasks_in_progress\n results = self._task_results_waiting\n\n for task_id in tasks.keys():\n if task_id not in results:\n break\n\n del tasks[task_id]\n result = results.pop(task_id)\n yield result.value", "def process(self):\n while not self.halted:\n self.step()\n return self.outputs", "def check_task(self): \n return self.buffer[0]", "def _submit_to_queue(self, script_file):\n if sys.version_info[0] < 3:\n process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE)\n else:\n # need string not bytes so must use universal_newlines\n process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE, universal_newlines=True)\n\n out, err = process.communicate()\n # grab the return code. PBS returns 0 if the job was successful\n queue_id = None\n if process.returncode == 0:\n try:\n # output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id\n queue_id = int(out.split('.')[0])\n except:\n # probably error parsing job code\n logger.critical(\"Could not parse job id following qsub...\")\n return SubmitResults(qid=queue_id, out=out, err=err, process=process)" ]
[ "0.68057764", "0.68057764", "0.68057764", "0.68057764", "0.68057764", "0.68057764", "0.6712736", "0.6354121", "0.6280556", "0.626264", "0.6234594", "0.6165189", "0.608507", "0.6033077", "0.59824157", "0.5962959", "0.59550273", "0.5916649", "0.59026563", "0.584164", "0.5835302", "0.5831745", "0.5818945", "0.58054334", "0.5790826", "0.57872194", "0.5781094", "0.5770582", "0.5765791", "0.5763648", "0.57568556", "0.57535577", "0.57373714", "0.5736213", "0.5734112", "0.572691", "0.56962985", "0.568767", "0.5675142", "0.56706554", "0.56706554", "0.56572765", "0.5649644", "0.56406707", "0.56406707", "0.56406707", "0.56406707", "0.56406707", "0.56406707", "0.56274384", "0.56199205", "0.5617631", "0.56050587", "0.560364", "0.5596633", "0.5590621", "0.5586244", "0.55807143", "0.5577207", "0.557674", "0.5560534", "0.5560495", "0.5552012", "0.5544122", "0.5535331", "0.55343854", "0.55337816", "0.551067", "0.5507938", "0.54998577", "0.54964375", "0.54921925", "0.5488203", "0.54863757", "0.5483803", "0.54675204", "0.546707", "0.546612", "0.5460934", "0.54604834", "0.5455643", "0.5439319", "0.54315513", "0.5428045", "0.5425179", "0.5421964", "0.54189616", "0.54149234", "0.5414595", "0.5413839", "0.54027265", "0.54019696", "0.5400893", "0.53993803", "0.5398493", "0.53972024", "0.53957766", "0.53830606", "0.5382801", "0.5368275" ]
0.6057725
13
Collect output from the subprocess without blocking the main process if subprocess hangs.
def _get_output(arguments, timeout=None): # NOTE Increase this value if tests fail with None being received as # stdout/stderr instead of the expected content output_timeout = 0.1 # seconds pidq = Queue() outputq = Queue() t = Thread(target=_queue_output, args=(arguments, pidq, outputq)) t.daemon = True t.start() try: pid = pidq.get(timeout=timeout) except Empty: pid = None # Process crashed or timed out for some reason if pid is None: return _retrieve_output(t, output_timeout, outputq, "TaskWarrior to start") # Wait for process to finish (normal execution) state = wait_process(pid, timeout) if state: # Process finished return _retrieve_output(t, output_timeout, outputq, "TaskWarrior thread to join") # If we reach this point we assume the process got stuck or timed out for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL): # Start with lower signals and escalate if process ignores them try: os.kill(pid, signal.SIGABRT) except OSError as e: # ESRCH means the process finished/died between last check and now if e.errno != errno.ESRCH: raise # Wait for process to finish (should die/exit after signal) state = wait_process(pid, timeout) if state: # Process finished return _retrieve_output(t, output_timeout, outputq, "TaskWarrior to die") # This should never happen but in case something goes really bad raise OSError("TaskWarrior stopped responding and couldn't be killed")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait(self):\n\n for output in self.proc.communicate():\n if output is not None:\n self.output += output", "def Wait(self):\n try:\n # Flush stdout and stderr to be sure no output is interleaved.\n sys.stdout.flush()\n sys.stderr.flush()\n\n # File position pointers are shared across processes, so we must open\n # our own file descriptor to ensure output is not lost.\n self._WaitForStartup()\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n results = []\n with open(self._output.name, 'r') as output:\n pos = 0\n running, exited_cleanly, task_errors, all_errors = (True, False, [], [])\n while running:\n # Check whether the process is still alive.\n running = self.is_alive()\n\n try:\n errors, results = \\\n self._queue.get(True, self.PRINT_INTERVAL)\n if errors:\n task_errors.extend(errors)\n all_errors.extend(errors)\n\n running = False\n exited_cleanly = True\n except Queue.Empty:\n pass\n\n if not running:\n # Wait for the process to actually exit. If the child doesn't exit\n # in a timely fashion, kill it.\n self.join(self.EXIT_TIMEOUT)\n if self.exitcode is None:\n msg = '%r hung for %r seconds' % (self, self.EXIT_TIMEOUT)\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessExitTimeout(msg), ''))\n self._KillChildren([self])\n elif not exited_cleanly:\n msg = ('%r exited unexpectedly with code %s'\n % (self, self.exitcode))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessUnexpectedExit(msg), ''))\n\n # Read output from process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n\n if len(buf) > 0:\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n elif running and time.time() > silent_death_time:\n msg = ('No output from %r for %r seconds' %\n (self, self.SILENT_TIMEOUT))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessSilentTimeout(msg), ''))\n self._KillChildren([self])\n\n # Read remaining output from the process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n running = False\n\n # Print output so far.\n while len(buf) > 0:\n sys.stdout.write(buf)\n pos += len(buf)\n if len(buf) < _BUFSIZE:\n break\n buf = output.read(_BUFSIZE)\n\n # Print error messages if anything exceptional occurred.\n if len(all_errors) > len(task_errors):\n logging.PrintBuildbotStepFailure()\n msg = '\\n'.join(x.str for x in all_errors if x)\n logging.warning(msg)\n traceback.print_stack()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n # Propagate any results.\n for result in results:\n results_lib.Results.Record(*result)\n\n finally:\n self.Cleanup(silent=True)\n\n # If an error occurred, return it.\n return all_errors", "def _run_monitor_thread(self):\n while True:\n chunk = self.stream.read(1024)\n if not chunk:\n # EOF - subprocess has exited, so trigger shutdown\n trigger_exit(ExitMode.CHILD)\n break\n self.output_deque.appendleft(chunk)", "def non_blocking_streamlit(process: psutil.Popen) -> None:\n while process.is_running():\n process.communicate()", "def check_output(*args, **kwargs):\n kwargs['stdout'] = subprocess.PIPE\n kwargs['stderr'] = subprocess.STDOUT\n\n p = subprocess.Popen(*args, **kwargs)\n\n try:\n while p.poll() is None:\n time.sleep(0.002)\n return p.poll(), p.stdout.read().decode('utf-8', 'ignore')\n finally:\n if p.poll() is None: # pragma: no cover\n p.kill()", "def ServeUntilSubprocessDies(self, process):\n child_result = 0\n try:\n while True:\n if process.poll() is not None:\n child_result = 0\n break\n if self.conn.poll():\n child_result = self.conn.recv()\n break\n time.sleep(0)\n except KeyboardInterrupt:\n pass\n finally:\n self.Shutdown()\n return child_result", "def waitOutput( self, verbose=False ):\n log = info if verbose else debug\n output = ''\n while self.waiting:\n data = self.monitor()\n output += data\n log( data )\n return output", "def wait(self, timeout: float = None) -> CompletedProcess: # type: ignore\n if self.stdout is None:\n return CompletedProcess(self.args, returncode=super().wait(timeout=timeout), stdout=None)\n else:\n stdout = []\n while self.poll() is None:\n stdout.append(line := self.stdout.readline())\n\n if self.verbose:\n print(line, end=\"\")\n\n return CompletedProcess(self.args, returncode=self.poll(), stdout=\"\".join(stdout))", "def _stdout_reader(self):\n self._is_launched.wait()\n stdout_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in stdout_iterator:\n self._log(\"raw\", \"stdout : {0}\".format(line.strip()))\n self.stdout_queue.put_nowait(line.strip())\n self.stdout_queue.put_nowait(None) # Stop queue consumers", "def _get_output(arguments, timeout=None):\n # NOTE Increase this value if tests fail with None being received as\n # stdout/stderr instead of the expected content\n output_timeout = 0.1 # seconds\n\n pidq = Queue()\n outputq = Queue()\n\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n\n # Process crashed or timed out for some reason\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq,\n \"Program to start\")\n\n # Wait for process to finish (normal execution)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"Program thread to join\")\n\n # If we reach this point we assume the process got stuck or timed out\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n # Start with lower signals and escalate if process ignores them\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n # 3 means the process finished/died between last check and now\n if e.errno != 3:\n raise\n\n # Wait for process to finish (should die/exit after signal)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"Program to die\")\n\n # This should never happen but in case something goes really bad\n raise OSError(\"Program stopped responding and couldn't be killed\")", "def poll_process(process, suppress_errors=False):\n\n while True:\n data_to_stdout(\".\")\n time.sleep(1)\n\n returncode = process.poll()\n\n if returncode is not None:\n if not suppress_errors:\n if returncode == 0:\n data_to_stdout(\" done\\n\")\n elif returncode < 0:\n data_to_stdout(\" process terminated by signal %d\\n\" % returncode)\n elif returncode > 0:\n data_to_stdout(\" quit unexpectedly with return code %d\\n\" % returncode)\n\n break", "def _exec_and_wait(command):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n proc.wait()\n return proc.stdout.read()", "def wait(self):\n num_pings = 0\n # Some streams seem to start fine with up to 4 pings before beginning download?\n # More investigation is needed\n max_pings = 1 + self._pingouts\n # timeout after 1 minute\n timeout = datetime.datetime.now() + datetime.timedelta(minutes=1)\n try:\n for line in self._process.stderr:\n # TODO: add mpegts or other variants depending on the container settings? or no?\n # if \"Output #0, mp4\" in line:\n if \"Output #0\" in line:\n self._process.communicate()\n self.move_to_dest()\n self._pingouts = 0\n break\n elif \"HandleCtrl, Ping\" in line:\n num_pings += 1\n if num_pings > max_pings:\n # The main issue with this is that the slain processes will not have their files moved\n # But I think this is preferable to the other solutions I've come up with.\n # For future reference, those were:\n #\n # 1) Sending SIGINT then continuing to read stderr until it exited (sometimes it doesn't)\n # 2) Sending SIGINT, storing a reference to the process, then restarting the download.\n # This prevents the process from being garbage collected until the Watcher is\n # 3) Sending SIGINT, then storing info about src and dest paths for the stopped download.\n # If a reference to the process is NOT stored, there's no way to be sure it has finished writing\n # (if it's writing at all). The only way was to give them a grace period and then just start\n # moving, but this adds undesirable time to the cleanup phase, when we may want to restart\n # a falsely completed Watcher asap.\n # 4) Just moving the file straightaway. This is obviously bad since ffmpeg takes a few moments to\n # finish.\n # NOTE: only option #1 was actually tried, the others were partially written before being\n # abandoned as their problems became clear\n #\n # Two additional options exist (not mutually exclusive):\n # 1) Passing the dead processes off to a queue and having another thread clean up.\n # 2) Having regular maintenance sweep the active folder and move files it can be sure are done\n # to their proper folders.\n #\n # I *probably* need to use 1) eventually, especially once I figure out how to actually end\n # stuck processes without killing the parent. But it requires a lot more code.\n # Until then let's just see how this works.\n #\n # When that time does come, a Downloader copy constructor may be useful.\n download_logger.debug(\"Download pinged {} times: Stopping\".format(num_pings))\n self._pingouts += 1\n self.stop()\n\n # close stderr to force the loop to exit\n time.sleep(0.1)\n self._process.stderr.close()\n time.sleep(0.1)\n # process will be garbage collected when the next one is started, or the Watcher dies\n # self._process = None\n # This *should* work for newer builds of FFmpeg without librtmp.\n # Only question is whether 1 minute is too long (or too short).\n # UPDATE: Why doesn't this ever seem to work?\n # is it because FFmpeg freezes output and hangs now? so we're never getting another line to iterate over\n # elif datetime.datetime.now() > timeout:\n # download_logger.debug(\"Download of {} timed out\".format(self.outfile))\n # self.stop()\n # time.sleep(0.1)\n # self._process.stderr.close()\n # time.sleep(0.1)\n else:\n time.sleep(0.2)\n\n except ValueError:\n download_logger.debug('ffmpeg stderr closed unexpectedly')\n\n # Is it possible for the process to end prematurely?\n return self._process.returncode", "def _cleanup_proc(self):\n logger.debug(\"{}: Cleaning up and waiting for process to exit\".format(\n self))\n try:\n self._loop.remove_reader(self._proc.stdout)\n self._proc.stdout.close()\n self._proc.stdin.close()\n except Exception:\n # Log errors, but otherwise ignore.\n logger.error(\"{}: Failed cleaning up process\".format(self),\n exc_info=True)\n finally:\n # If the wait fails, the sub-process will appear in the process\n # tree (labelled defunct). This is mostly harmless so just log a\n # warning.\n try:\n self._proc.wait(0)\n except subprocess.TimeoutExpired:\n logger.warning(\"{}: Wait failed\".format(self),\n exc_info=True)", "def complete_output(self):\n if self.stdout0:\n sys.stdout = self.stdout0\n sys.stderr = self.stderr0\n self.stdout0 = None\n self.stderr0 = None\n return self.outputBuffer.getvalue()", "def complete_output(self):\n if self.stdout0:\n sys.stdout = self.stdout0\n sys.stderr = self.stderr0\n self.stdout0 = None\n self.stderr0 = None\n return self.outputBuffer.getvalue()", "def complete_output(self):\n if self.stdout0:\n sys.stdout = self.stdout0\n sys.stderr = self.stderr0\n self.stdout0 = None\n self.stderr0 = None\n return self.outputBuffer.getvalue()", "def complete_output(self):\n if self.stdout0:\n sys.stdout = self.stdout0\n sys.stderr = self.stderr0\n self.stdout0 = None\n self.stderr0 = None\n return self.outputBuffer.getvalue()", "def run_command(self):\n\n while True:\n current_line = self.process.stdout.readline().rstrip()\n\n if not current_line:\n break\n\n yield self.decode_output(current_line)", "def _stream(cmd):\n # color_print(getuser() + '$ ' + cmd, COLOR.BLUE)\n output = [] # used to collect o/p from both stdout and stderr\n\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, shell=True)\n except subprocess.CalledProcessError as ex:\n print(\"Status : FAIL\", ex.returncode, ex.output)\n else:\n with proc.stdout:\n for line in iter(proc.stdout.readline, b''):\n # print(line)\n output.append(line)\n\n # Note: output is streamed to the user as and when it occurs.\n with proc.stderr:\n for line in iter(proc.stderr.readline, b''):\n # print(line)\n output.append(line)\n\n return output", "async def checked_run(*cmd):\n\n # Start the subprocess.\n logging.info('Running: %s', expand_cmd_str(cmd))\n with logged_timer('{} finished'.format(get_cmd_name(cmd))):\n p = await asyncio.create_subprocess_exec(\n *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT)\n\n # Stream output from the process stdout.\n chunks = []\n while True:\n chunk = await p.stdout.read(16 * 1024)\n if not chunk:\n break\n chunks.append(chunk)\n\n # Wait for the process to finish, check it was successful & build stdout.\n await p.wait()\n stdout = b''.join(chunks).decode()[:-1]\n if p.returncode:\n raise RuntimeError('Return code {} from process: {}\\n{}'.format(\n p.returncode, expand_cmd_str(cmd), stdout))\n\n return stdout", "def _defunct(self):\n while self._popen.poll() is None:\n time.sleep(0.1)", "def _handleBlockingProcess(self):\n # add into container of running processes, should process hang\n # for ever on wait()\n if self.caller is not None:\n self.caller.addExecutor(self)\n\n m = \"Waiting for '%s' (PID: %s) to finish ...\" % (self.command,\n self.proc.pid)\n self.logger.info(m)\n # waits here\n # however, when the client process gets killed upon a\n # CleanupProcessesAction this call would fail with\n # OSError: [Errno 10] No child processes\n # check #8 description\n try:\n self.returncode = self.proc.wait()\n except OSError as ex:\n self.logger.error(\"Waiting for process to complete failed \"\n \"(crashed/killed?), reason: %s\" % ex)\n self.returncode = str(ex)\n\n # process finished here, remove it from executors container\n # do not remove itself from executors, subsequent\n # CleanupProcessesAction may\n # be left out of context and this executor instance gets orphaned\n # if self.caller != None:\n # self.caller.removeExecutor(self)\n\n logs = self.getLogs()\n\n # considered returncodes for a synchronous call\n if self.returncode == 0:\n m = (\"Command '%s' finished, no error raised, return code: \"\n \"'%s'\\nlogs:\\n%s\" % (self.command, self.returncode, logs))\n # comment out now: e.g. in case of FDT Java client error, the\n # process output logs are logged 3 times. the only issue is\n # when fdtcp initiator crashes, there is nowhere to send logs then\n # the same comment applies below ...\n # self.logger.info(m) # log locally at fdtd side\n return m\n else:\n m = (\"Command '%s' failed, return code: \"\n \"'%s'\\nlogs:\\n%s\" % (self.command, self.returncode, logs))\n # self.logger.error(m) # log locally at fdtd side\n raise ExecutorException(m)", "def _proc_collect(self) -> None:\n while True:\n self.process_num_threads.set(self._process.num_threads())\n self.process_memory_bytes.set(self._process.memory_info().rss)\n self.process_cpu_percent.set(self._process.cpu_percent())\n\n sleep(self.process_scrape_interval)", "def watch(self):\n reader, writer = os.pipe2(0)\n\n pid = os.fork()\n\n # In the child\n if pid == 0:\n tty.setraw(0)\n os.close(reader)\n os.close(2)\n\n os.dup2(writer, 1)\n\n os.execlp(self.__program, self.__program, *self.__args)\n\n sys.exit(1)\n else:\n os.close(writer)\n\n while True:\n result = os.read(reader, 1024)\n if len(result) == 0:\n break\n sys.stdout.write(result.decode('utf-8'))\n\n os.waitpid(pid, 0)", "def run_command(cmd, print_output=True):\n def enqueue_output(out, queue):\n for line in iter(out.readline, b''):\n queue.put(line.decode(\"utf-8\"))\n out.close()\n\n print(\" -> {}\".format(cmd))\n proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n q_stdout = Queue()\n q_stderr = Queue()\n t_stdout = Thread(target=enqueue_output, args=(proc.stdout, q_stdout))\n t_stderr = Thread(target=enqueue_output, args=(proc.stderr, q_stderr))\n t_stderr.daemon = True # thread dies with the program\n t_stdout.daemon = True\n t_stdout.start()\n t_stderr.start()\n stdout = \"\"\n stderr = \"\"\n\n # read stdout and stderr without blocking\n finished = False\n while True:\n done = proc.poll()\n try:\n line_stdout = \"\"\n while True:\n line_stdout += q_stdout.get(timeout=0.01)\n except Empty:\n pass\n # accumilate stdout and print if we should\n stdout += line_stdout\n if print_output and line_stdout != \"\":\n sys.stdout.write(bcolors.COLOR_CYAN)\n for line in line_stdout.splitlines():\n sys.stdout.write(\"\\t{}\\n\".format(line))\n sys.stdout.write(bcolors.COLOR_NC)\n sys.stdout.flush()\n\n try:\n line_stderr = \"\"\n while True:\n line_stderr += q_stderr.get(timeout=0.01)\n except Empty:\n pass\n # accumilate stderr and print if we should\n stderr += line_stderr\n if print_output and line_stderr != \"\":\n sys.stderr.write(bcolors.COLOR_RED)\n for line in line_stderr.splitlines():\n sys.stderr.write(\"\\t{}\\n\".format(line))\n sys.stderr.write(bcolors.COLOR_NC)\n sys.stderr.flush()\n\n # check if we're done and the finished flag is set\n if finished:\n if done != 0 and print_output is False:\n sys.stderr.write(bcolors.COLOR_RED)\n for line in stderr.splitlines():\n sys.stderr.write(\"\\t{}\\n\".format(line))\n sys.stderr.write(bcolors.COLOR_NC)\n sys.stderr.flush()\n\n return stdout, stderr, done\n\n # check if the process is done...\n if done is not None:\n finished = True\n # give the process's stdout and stderr time to flush\n time.sleep(0.25)", "def wait_finish(self):\r\n self.proc.join()", "def collect_output(self):\n pass", "def collect_output(self):\n pass", "def ServeForever(self):\n child_result = 0\n try:\n # Block on this pipe, waiting for a response from the child process.\n child_result = self.conn.recv()\n except KeyboardInterrupt:\n pass\n finally:\n self.Shutdown()\n return child_result", "def sync_after_subprocess_completion(self):\n\n # First, let's flush the stored entries in cache accessors. This is to prevent cases\n # like current process contains an outdated stored entries.\n #\n # For example: In assisted mode, main process checks for versioning error which can\n # populate the stored entries as entries without an artifact. Now subprocess can\n # update the stored entries after computing them but it won't be communicated back.\n # This allows the main process to fetch fresh entries which might have been updated.\n for accessor in self._cache_accessors:\n accessor.flush_stored_entries()\n\n # Then, populate the value hash.\n if self._result_value_hashes_by_name is None:\n self._result_value_hashes_by_name = {}\n for accessor in self._cache_accessors:\n value_hash = accessor.load_result_value_hash()\n name = accessor.query.dnode.to_entity_name()\n if value_hash is None:\n raise AssertionError(\n oneline(\n f\"\"\"\n Failed to load cached value (hash) for entity {name!r};\n this suggests we did not successfully completed the entity\n in subprocess or the entity wasn't cached which should not\n happen.\"\"\"\n )\n )\n self._result_value_hashes_by_name[name] = value_hash\n\n # Lastly, we can mark the process as complete.\n self.is_complete = True", "def popenCommunicate(args, data='', outputs=None, ignoreErrors=False, poll_interval=0.01):\n stdError = None\n if not ignoreErrors:\n stdError = subprocess.STDOUT\n p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=stdError)\n fcntl.fcntl(p.stdin, fcntl.F_SETFL, os.O_NONBLOCK) # make the file nonblocking\n fcntl.fcntl(p.stdout, fcntl.F_SETFL, os.O_NONBLOCK) # make the file nonblocking\n\n bytesTotal = len(data)\n bytesWritten = 0\n while bytesWritten < bytesTotal:\n try:\n # p.stdin.write() doesn't return anything, so use os.write.\n bytesWritten += os.write(p.stdin.fileno(), data[bytesWritten:])\n except IOError, ex:\n if ex[0] != errno.EAGAIN:\n raise\n sys.exc_clear()\n socket.wait_write(p.stdin.fileno())\n\n p.stdin.close()\n\n if outputs is not None:\n while True:\n try:\n chunk = p.stdout.read(4096) \n if not chunk:\n break\n for output in outputs:\n output.write(chunk)\n except IOError, ex:\n if ex[0] != errno.EAGAIN:\n raise\n sys.exc_clear()\n socket.wait_read(p.stdout.fileno()) \n\n p.stdout.close()\n\n length = None\n try:\n length = len(outputs[0])\n except:\n length = 0\n\n logging.getLogger().debug(\"popenCommunicate() finished. Args: %s, Output Length: %d\" % (args,length))", "def wait_rc(popen, timeout=30):\n stop = False\n end_time = time.time() + timeout\n rc = None\n while not stop:\n rc = popen.poll()\n if time.time() > end_time:\n stop = True\n return rc\n if rc is not None:\n stop = True\n return rc\n else:\n time.sleep(0.5)", "def _wait_for_output(self):\n # Here we should get an empty list or list with a tuple [(fd, event)]\n # When we get list with a tuple we can use readline method on\n # the file descriptor.\n poll_result = self.poll_obj.poll(0)\n\n if poll_result:\n line = self.output().readline()\n if self._banner.match(line):\n return True\n\n return False", "def wait_for_data(receiver):\n\n while not receiver.available(pipes[1]):\n time.sleep(0.01)", "def wait(self):\n self.Popen.wait()", "def hook() -> None:\n real_recv = process.recv_raw\n\n def recv(self: process, numb: int) -> bytes:\n data = real_recv(self, numb)\n # Sometimes the returned data is of type str\n # Accept them by converting them to bytes\n if type(data) == str:\n data = data.encode()\n try:\n stdout_all = self.stdout_all\n except Exception: # pylint: disable=broad-except\n stdout_all = b\"\"\n stdout_all += data\n self.stdout_all = stdout_all\n return data\n\n process.recv_raw = recv", "def execute_command(cmd):\n popen = Popen(cmd, stdout=PIPE, stderr=PIPE)\n stdout = b''\n while True: # Save output to youtube_stdout while this being echoed\n tmp = popen.stdout.read(1)\n stdout += tmp\n _print(tmp, end=\"\")\n sys.stdout.flush()\n # do it until the process finish and there isn't output\n if tmp == b\"\" and popen.poll() is not None:\n break", "def _run_command(self,command,uid):\n\t\tcontrol = popen2.Popen3(command,1)\n\t\twhile control.poll() == -1 :\n\t\t\tt = select.select([control.fromchild,control.childerr],[],[],0.05)\n\t\tif control.fromchild in t[0]:\n\t\t\ts = os.read(control.fromchild.fileno(),16384)\n\t\t\tself.lock.acquire()\n\t\t\tself.data[uid]['data'] = self.data[uid]['data'] + s\n\t\t\tself.lock.release()\n\t\t\tself.data[uid]['all'] = self.data[uid]['all'] + s\n\t\tif control.childerr in t[0]:\n\t\t\ts = os.read(control.childerr.fileno(),16384)\n\t\t\tself.lock.acquire()\n\t\t\tself.data[uid]['err'] = self.data[uid]['err'] + s\n\t\t\tself.lock.release()\n\t\t\tself.data[uid]['allerr'] = self.data[uid]['allerr'] + s\n\t\t# check to see if we should kill this process \n\t\tif self.data[uid]['quit']: \n\t\t\tprint \"process %d killed\" % control.pid\n\t\t\tos.kill(control.pid,signal.SIGTERM)\n\t\ttime.sleep(0.01)\n\n\t\tprint \"done with popen\"\n\t\ts1 = os.read(control.fromchild.fileno(),16384)\n\t\ts2 = os.read(control.childerr.fileno(),16384)\n\t\tself.lock.acquire()\n\t\tself.data[uid]['data'] = self.data[uid]['data'] + s1\n\t\tself.data[uid]['err'] = self.data[uid]['err'] + s2\n\t\tself.lock.release()\n\t\tself.data[uid]['all'] = self.data[uid]['all'] + s1\n\t\tself.data[uid]['allerr'] = self.data[uid]['allerr'] + s2\n\t\tcontrol.fromchild.close()\n\t\tcontrol.tochild.close()\n\t\treturn control.sts/256", "def run_tool(args, quiet=False):\n pipe = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n result = \"\"\n for line in iter(pipe.stdout.readline, \"\"):\n if not line and pipe.poll() is not None:\n break\n output = line.decode(encoding='UTF-8').rstrip()\n if output != \"\":\n if not quiet:\n print(\"\\t * \" + output)\n result = output\n return result", "def test_poll_processes(self):\n message='abcdefg'\n response_type = 'TEST'\n t = threading.Thread(target=self.handle_process_pipes, args=(message, response_type))\n t.start()\n\n self.dut._poll_processes(message=message,\n timeout=2,\n response_type=response_type)\n\n t.join()", "def runCommand(command):\n p = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=1)\n for line in p.stdout:\n print (line.decode(\"utf-8\"),end=\"\") # the end=\"\" argument to print prevents unwanted newlines after each line\n p.wait()", "async def _run(self):\n self.sub_process.start()\n log.info('Started sub process (pid {}).'.format(self.sub_process.pid))\n\n # Wait until the process is actually started to not consider it dead when it's not even born yet\n while not self.sub_process.is_alive():\n try:\n # Wtb milliseconds async sleep omg\n await asyncio.wait_for(asyncio.sleep(1), 0.1)\n except asyncio.TimeoutError:\n pass\n\n # ERMAHGERD ! MAH FRAVRIT LERP !\n while True:\n try:\n data = self.mp_queue.get(False) # Do not block\n except QueueEmpty:\n if not self.sub_process.is_alive():\n log.warning('Sub process (pid {}) appears dead.'.format(self.sub_process.pid))\n asyncio.ensure_future(self.stop())\n\n # Arbitrary sleep time after an unsuccessful poll\n await asyncio.sleep(4)\n except Exception as e:\n # Might be triggered when the sub_process is terminated while putting data in the queue\n log.error('Queue polling error: ' + str(e))\n break\n else:\n if data is not None:\n # Process the data sent by the subprocess\n self.on_data(data)", "def _retrieve_output(thread, timeout, queue, thread_error):\n # Try to join the thread on failure abort\n thread.join(timeout)\n if thread.isAlive():\n # Join should have killed the thread. This is unexpected\n raise TimeoutWaitingFor(thread_error + \". Unexpected error\")\n\n # Thread died so we should have output\n try:\n # data = (stdout, stderr, exitcode)\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor(\"streams from program\")\n\n return data", "def call(*args, **kwargs):\n return Popen(*args, **kwargs).wait()", "def process_output(self, stdout=True, final_read=False):\n if stdout:\n pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee\n else:\n pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee\n\n if final_read:\n # read in all the data we can from pipe and then stop\n data = []\n while select.select([pipe], [], [], 0)[0]:\n data.append(os.read(pipe.fileno(), 1024))\n if len(data[-1]) == 0:\n break\n data = \"\".join(data)\n else:\n # perform a single read\n data = os.read(pipe.fileno(), 1024)\n buf.write(data)\n tee.write(data)", "def capture_output():\r\n stdout, stderr = sys.stdout, sys.stderr\r\n sys.stdout, sys.stderr = StringIO(), StringIO()\r\n out, err = [], []\r\n try:\r\n yield out, err\r\n finally:\r\n out.extend(sys.stdout.getvalue().splitlines())\r\n err.extend(sys.stderr.getvalue().splitlines())\r\n sys.stdout, sys.stderr = stdout, stderr", "def reader_thread(self, q):\r\n try:\r\n with self.process.stdout as pipe:\r\n for line in iter(pipe.readline, b''):\r\n q.put(line)\r\n finally:\r\n q.put(None)", "def run(self):\n logger.info(\"Running...\")\n self._child = Process(target=self.collect, args=())\n self._child.start()", "def executeCommand(cmd,loopsleep):\n\tsleep(loopsleep)\n\tresult = subprocess.getoutput(cmd)\n\treturn(result.split(\"\\n\"))", "def run_command(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE)\n while True:\n output = process.stdout.readline()\n if output == '' and process.poll() is not None:\n break\n if output:\n print output.strip()\n\n rc = process.poll()\n return rc", "def tail(self):\n for line in iter(self.proc.stdout.readline, ''):\n if len(line) == 0:\n break\n if self.log_filter(line.decode('ASCII')):\n continue\n if self.verbose:\n logging.debug(f\"{self.prefix}: {line.decode().rstrip()}\")\n with self.logs_cond:\n self.logs.append(str(line.rstrip()))\n self.logs_cond.notifyAll()\n self.running = False\n self.proc.stdout.close()\n if self.proc.stderr:\n self.proc.stderr.close()", "def poll(self):\n\n self.output += utils.eintr_retry(os.read, self.fd, 1048576)\n result = utils.eintr_retry(self.proc.poll)\n\n if result is not None:\n self.ended = time.time()\n\n return result", "def unchecked_call(self, cmd):\n args = self._parse_command(cmd)\n\n if self.proc is not None:\n nullbyte = struct.pack('B', 0)\n for arg in args:\n self.proc.stdin.write(b'ARG')\n self.proc.stdin.write(nullbyte)\n self.proc.stdin.write(arg.encode())\n self.proc.stdin.write(nullbyte)\n self.proc.stdin.write(b'RUN')\n self.proc.stdin.write(nullbyte)\n self.proc.stdin.flush()\n reply = {\n 'STDOUT': '',\n 'STDERR': '',\n 'STATUS': '',\n }\n for _ in range(0, 3):\n fieldname = Herbstluftwm._read_text_until_null_byte(self.proc.stdout)\n if fieldname is None:\n raise Exception('herbstclient did non print a full reply')\n fieldvalue = Herbstluftwm._read_text_until_null_byte(self.proc.stdout)\n if fieldvalue is None:\n raise Exception('herbstclient did non print a full reply')\n reply[fieldname] = fieldvalue\n #\n complete_proc = subprocess.CompletedProcess(args, int(reply['STATUS']))\n complete_proc.stdout = reply['STDOUT']\n complete_proc.stderr = reply['STDERR']\n else:\n complete_proc = subprocess.run(\n [self.herbstclient_path, '-n'] + args,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=self.env,\n universal_newlines=True,\n # Kill hc when it hangs due to crashed server:\n timeout=2)\n\n return complete_proc", "def spawn(stdout, command, **options):\n # grab arguments that we care about\n stderr = options.pop('stderr', None)\n daemon = options.pop('daemon', True)\n\n # empty out the first generator result if a coroutine is passed\n if hasattr(stdout, 'send'):\n res = six.next(stdout)\n res and P.write(res)\n if hasattr(stderr, 'send'):\n res = six.next(stderr)\n res and P.write(res)\n\n # spawn the sub-process\n return process(command, stdout=stdout, stderr=stderr, **options)", "def _dumpStdout(self, p, outputCallback):\n while p.poll() is None:\n try:\n # May raise IOError if in non-blocking mode\n l = p.stdout.read()\n outputCallback(l)\n except IOError:\n pass\n time.sleep(0.1)\n outputCallback(p.stdout.read())", "def run(self):\n logging.info(\"Running...\\n\")\n self._child = Process(target=self.collect, args=())\n self._child.start()", "def run(self):\n logging.info(\"Running...\\n\")\n self._child = Process(target=self.collect, args=())\n self._child.start()", "def stdout_read(self, timeout):\n chan = self._chan\n now = datetime.datetime.now()\n timeout_time = now + datetime.timedelta(seconds=timeout)\n output = \"\"\n while not _SHELL_PROMPT.search(output):\n rd, wr, err = select([chan], [], [], _SELECT_WAIT)\n if rd:\n data = chan.recv(_RECVSZ)\n output += data.decode()\n if datetime.datetime.now() > timeout_time:\n raise TimeoutError\n return output", "async def checked_run(cmd, env=None):\n\n # Start the subprocess.\n logging.info('Running: %s', await expand_cmd_str(cmd))\n with logged_timer('{} finished'.format(get_cmd_name(cmd))):\n p = await asyncio.create_subprocess_exec(\n *cmd, env=env,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.STDOUT)\n\n # Stream output from the process stdout.\n lines = []\n while True:\n line = await p.stdout.readline()\n if not line:\n break\n line = line.decode()[:-1]\n lines.append(line)\n logging.info(line)\n\n # Wait for the process to finish, check it was successful & build stdout.\n await p.wait()\n output = '\\n'.join(lines)[:-1]\n if p.returncode:\n raise RuntimeError('Return code {} from process: {}\\n{}'.format(\n p.returncode, await expand_cmd_str(cmd), output))\n\n return output", "def continuous_shell_reader(self):\n\n while not self.thread_stop.is_set():\n out = self.shell_reader()\n\n if not out == \"\":\n print(\"IPC: Received: {}\".format(out))", "def wait_for_iree_benchmark_module_start(process: subprocess.Popen,\n verbose: bool = False) -> None:\n\n while True:\n line = process.stdout.readline() # pytype: disable=attribute-error\n if line == \"\" and process.poll() is not None: # Process completed\n raise ValueError(\"Cannot find benchmark result line in the log!\")\n if verbose:\n print(line.strip())\n # Result available\n if re.match(r\"^BM_.+/real_time\", line) is not None:\n break", "async def copier_recorder(\r\n self,\r\n ) -> None:\r\n if not self.process:\r\n raise Exception(\"missing process; was this called inside a with statement?\")\r\n\r\n assert (\r\n self.process.stdout is not None\r\n ), \"process must be opened with stdout=PIPE and stderr=STDOUT\"\r\n\r\n async with self.process.stdout, self.printer_send_channel, self.notifier_send_channel:\r\n async for chunk in self.process.stdout:\r\n # print(f\"seen chunk: '{chunk!r}'\", flush=True) # debug\r\n self.stdout += chunk\r\n await self.printer_send_channel.send(chunk)\r\n\r\n # send notification\r\n # if it's full, that's fine: if expect() is run, it'll see\r\n # there's a \"pending\" notification and check stdout, then wait\r\n # for another notification\r\n try:\r\n self.notifier_send_channel.send_nowait(b\"\")\r\n except trio.WouldBlock:\r\n pass\r\n except trio.BrokenResourceError as err:\r\n print(f\"cause '{err.__cause__}'\")\r\n raise err", "def collect_data(endless):\r\n click.echo(\"start collecting data ...\")\r\n _collect_data(endless)", "def subproc(self,line):\n self.set_stdout()\n proc = subprocess.Popen(line.split(),stdout=self.stdout)\n proc.wait() #ensures that the subprocess executes and terminates before returning to the shell", "async def test_subprocess_forbid(event_loop):\n proc = await asyncio.subprocess.create_subprocess_exec(\n sys.executable, '--version', stdout=asyncio.subprocess.PIPE,\n loop=event_loop)\n await proc.communicate()", "def _IterProcessStdoutFcntl(process,\n iter_timeout=None,\n timeout=None,\n buffer_size=4096,\n poll_interval=1):\n # pylint: disable=too-many-nested-blocks\n import fcntl\n try:\n # Enable non-blocking reads from the child's stdout.\n child_fd = process.stdout.fileno()\n fl = fcntl.fcntl(child_fd, fcntl.F_GETFL)\n fcntl.fcntl(child_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)\n\n end_time = (time.time() + timeout) if timeout else None\n iter_end_time = (time.time() + iter_timeout) if iter_timeout else None\n\n while True:\n if end_time and time.time() > end_time:\n raise TimeoutError()\n if iter_end_time and time.time() > iter_end_time:\n yield None\n iter_end_time = time.time() + iter_timeout\n\n if iter_end_time:\n iter_aware_poll_interval = min(poll_interval,\n max(0, iter_end_time - time.time()))\n else:\n iter_aware_poll_interval = poll_interval\n\n read_fds, _, _ = select.select([child_fd], [], [],\n iter_aware_poll_interval)\n if child_fd in read_fds:\n data = _read_and_decode(child_fd, buffer_size)\n if not data:\n break\n yield data\n\n if process.poll() is not None:\n # If process is closed, keep checking for output data (because of timing\n # issues).\n while True:\n read_fds, _, _ = select.select([child_fd], [], [],\n iter_aware_poll_interval)\n if child_fd in read_fds:\n data = _read_and_decode(child_fd, buffer_size)\n if data:\n yield data\n continue\n break\n break\n finally:\n try:\n if process.returncode is None:\n # Make sure the process doesn't stick around if we fail with an\n # exception.\n process.kill()\n except OSError:\n pass\n process.wait()", "def _ffmpeg_loop(cls, ffmpeg: subprocess.Popen) -> Iterable[Progress]:\n while ffmpeg.poll() is None:\n rlist, _, _ = select((ffmpeg.stderr, ffmpeg.stdout), (), ())\n # Read logs from stdin\n if ffmpeg.stderr in rlist:\n status = cls.process_logs(ffmpeg.stderr.read().splitlines())\n if status:\n yield status\n # ignore stdout\n if ffmpeg.stdout in rlist:\n ffmpeg.stdout.read()", "def _get_output(self):\n idx = self.current_idx # Local copy to avoid race condition updates\n\n try: # Default case: Return the command output\n self.outputs[idx] = subprocess.check_output(self.commands[idx], stderr=subprocess.STDOUT).splitlines()\n except subprocess.CalledProcessError as e: # Command error: Return error message\n self.outputs[idx] = e.output.splitlines()\n\n self.updated = True\n time.sleep(self.interval)", "def Executingbysubprocess(command):\n result = subprocess.Popen(command, shell=True, stdout=PIPE).stdout\n output = result.read()\n print output", "def _run(proc: Popen, timeout):\n try:\n return proc.wait(timeout=timeout)\n except TimeoutExpired:\n pass\n if sys.platform != 'win32':\n proc.send_signal(signal.SIGINT)\n try:\n return proc.wait(timeout=5)\n except TimeoutExpired:\n pass\n\n proc.terminate() # SIGTERM\n try:\n return proc.wait(timeout=5)\n except TimeoutExpired:\n pass\n\n proc.kill() # SIGKILL\n return proc.wait(timeout=5)", "def clean_output(self, process, queue):\n while True:\n try:\n dirty = process.getline()\n clean = self.parse(dirty)\n except Queue.Empty:\n process.queueHasData.wait()\n except ValueError as inst:\n print(\"Error: \" + str(inst))\n else:\n if clean != None:\n self.cleanOutput.append(clean)", "def _exec_cmd_helper(self, cmd: str, nvim_ipc: str):\n assert self.busy is False\n\n self.shared_status.set_running()\n self.busy = True\n os.system(\"clear\")\n logging.info(\"Executing cmd {0}\".format(cmd))\n\n start = time.time()\n\n success = False\n if self.command_group.is_cmd_runner_command(cmd):\n for runner in self.runners:\n if runner.config.name == cmd:\n success = runner.run_all()\n break\n else:\n # The code block below essentially just \"tees\" the stdout and\n # stderr to a log file, while still preserving the terminal\n # output (inclusive colors).\n # Using subprocess.PIPE does not seem possible under Darwin,\n # since the pipe does not have the isatty flag set (the isatty\n # flag affects the color output).\n # Note that the file is only written at the end and not streamed.\n master, slave = pty.openpty()\n\n # This prevents LF from being converted to CRLF\n attr = termios.tcgetattr(slave)\n attr[1] = attr[1] & ~termios.ONLCR\n termios.tcsetattr(slave, termios.TCSADRAIN, attr)\n\n proc = subprocess.Popen(cmd, shell=True, stdout=slave, stderr=slave, close_fds=False)\n\n # Close the write end of the pipe in this process, since we don't need it.\n # Otherwise we would not get EOF etc.\n os.close(slave)\n\n read_stdout_stderr = os.fdopen(master, 'rb', buffering=0)\n complete_output = \"\"\n\n try:\n while proc.poll() is None:\n output = read_stdout_stderr.readline()\n os.write(1, output)\n complete_output += output.decode()\n\n # Read the last line\n output = read_stdout_stderr.readline()\n os.write(1, output)\n complete_output += output.decode()\n # This error is \"expected\" under Linux systems.\n # readline() doesn't seem to behave properly there.\n # The exception does not occur on MacOS.\n except OSError as oserr:\n if oserr.errno != errno.EIO or proc.poll() is None:\n logging.critical(\"Unexpected OS error: {0}\".format(oserr))\n except:\n logging.critical(\"Unexpected error while reading from process\")\n\n os.close(master)\n proc.wait()\n\n if proc.returncode == 0:\n success = True\n\n logfile, logfilename = tempfile.mkstemp(dir=cybld_helpers.get_base_path(),\n prefix=cybld_helpers.NVIM_LOG_PREFIX)\n\n # strip color codes from logfile\n # complete_output = re.sub(r'(\\x9B|\\x1B\\[)[0-?]*[ -\\/]*[@-~]', '', complete_output)\n complete_output = re.sub(r'\\x1b(\\[.*?[@-~]|\\].*?(\\x07|\\x1b\\\\))', '', complete_output)\n\n with open(logfile, 'w+') as logfile_opened:\n logfile_opened.write(complete_output)\n\n CyBldIpcNeovim(True, nvim_ipc, logfilename, cmd)\n\n end = time.time()\n\n self.busy = False\n cybld_helpers.print_seperator_lines()\n\n timediff_in_seconds = str(int(end - start))\n\n if success:\n cybld_helpers.print_centered_text(\"SUCCESS: {0} ({1} seconds)\".format(cmd, timediff_in_seconds), True)\n self.shared_status.set_success()\n else:\n cybld_helpers.print_centered_text(\"FAIL: {0} ({1} seconds)\".format(cmd, timediff_in_seconds), False)\n self.shared_status.set_fail()\n\n if self.settings.print_stats:\n cybld_helpers.print_centered_text(self.stats.get_command_stats(cmd), None)\n\n if success:\n self.talker.say_success()\n else:\n self.talker.say_fail()\n\n cybld_helpers.print_seperator_lines()\n self.stats.update_command_stats(cmd, success, int(timediff_in_seconds))\n\n if success:\n self.success_callback(cmd)\n else:\n self.fail_callback(cmd)", "def bg(self):\n p = None\n self.processes = []\n self._stderr = None\n stdin = sys.stdin\n cmds = self.commands\n\n if [c for c in cmds if c._cmd_args[:1] == ['sudo']]:\n check_sudo()\n\n for cmd in cmds:\n if isinstance(cmd, Stdin):\n stdin = cmd.iter_stdout\n elif isinstance(cmd, PyPipe):\n cmd.stdin = p.stdout\n stdin = cmd.iter_stdout\n p = cmd\n else:\n args = cmd.command_line(cmd.kwargs.get('shell', False))\n\n kwargs = dict(\n stdin=stdin, stderr=PIPE,\n stdout=PIPE\n )\n kwargs.update(cmd.kwargs)\n env_ = kwargs.pop('env', env)\n\n log.debug('Popen(%r, **%r)', args, kwargs)\n\n kwargs['env'] = env_\n\n try:\n p = Popen(args, **kwargs)\n except OSError:\n self._raise()\n\n self.processes.append(p)\n stdin = p.stdout\n return p", "def read(self):\n # now read stderr for log messages, we could buffer here but since\n # we're just logging the messages, I don't care to\n try:\n out = self.proc.stderr.read()\n if out:\n LOG.debug('reading %s got %d bytes on stderr', self.name,\n len(out))\n for line in out.splitlines():\n LOG.warning('%s: %s', self.name, line)\n except IOError as err:\n if err.errno != errno.EAGAIN:\n # allowing a caller to handle the exception as well\n raise\n except:\n LOG.exception('uncaught exception in stderr read')\n\n # This read call is non-blocking\n try:\n self.buffer += self.proc.stdout.read()\n if len(self.buffer):\n LOG.debug('reading %s, buffer now %d bytes',\n self.name, len(self.buffer))\n except IOError as err:\n if err.errno != errno.EAGAIN:\n raise\n except:\n # sometimes the process goes away in another thread and we don't\n # have it anymore\n LOG.exception('uncaught exception in stdout read')\n return\n\n # iterate for each line we have\n while self.buffer:\n idx = self.buffer.find('\\n')\n if idx == -1:\n break\n\n line = self.buffer[0:idx].strip()\n if line:\n self.datalines.append(line)\n self.buffer = self.buffer[idx+1:]", "def subprocess_wait_all(procs, poll=True):\n # type: (list, bool) -> list\n if procs is None or len(procs) == 0:\n raise ValueError('procs is invalid')\n rcodes = [None] * len(procs)\n stdout = [None] * len(procs)\n stderr = [None] * len(procs)\n while True:\n for i in range(0, len(procs)):\n if rcodes[i] is None:\n if poll:\n if procs[i].poll() is not None:\n rcodes[i] = procs[i].returncode\n else:\n stdout[i], stderr[i] = procs[i].communicate()\n rcodes[i] = procs[i].returncode\n if all(x is not None for x in rcodes):\n break\n time.sleep(0.1)\n return rcodes, stdout, stderr", "def _process_until_raw_prompt(self, capture_output=False):\n # TODO: experiment with Ctrl+C, Ctrl+D, reset\n eot_count = 0\n value = None\n done = False\n out = b\"\"\n err = b\"\"\n\n while not done:\n if (self._connection.num_bytes_received == 0\n and time.time() - self._startup_time > 2):\n self._send_output(\"[Device seems to be busy. Use Ctrl+C to interrupt.]\\n\", \"stdout\")\n \n # There may be an input submission waiting\n # and we can't progress without resolving it first\n self._check_for_side_commands()\n\n # Process input in chunks (max 1 parsing marker per chunk).\n # Prefer whole lines (to reduce the number of events),\n # but don't wait too long for eol.\n output = self._connection.soft_read_until(BLOCK_CLOSERS, timeout=0.01)\n stream_name = \"stderr\" if eot_count == 1 else \"stdout\"\n\n if output.endswith(THONNY_MSG_START):\n debug(\"MSGSTA: \" + str(output))\n output = output[: -len(THONNY_MSG_START)]\n\n # Low chance of failure (eg. because of precisely timed reboot),\n # therefore it's safe to use big timeout\n temp = self._connection.soft_read_until(THONNY_MSG_END, timeout=3)\n if temp.endswith(THONNY_MSG_END):\n value = temp[: -len(THONNY_MSG_END)]\n debug(\"GOTVALUE: \" + str(value))\n else:\n # failure, restore everything to help diagnosis\n output = output + THONNY_MSG_START + temp\n\n elif output.endswith(EOT):\n debug(\"EOT: \" + str(output))\n output = output[: -len(EOT)]\n eot_count += 1\n if eot_count == 2:\n # Normal completion of the command\n # big chance of being at the raw prompt\n temp = self._connection.soft_read_until(RAW_PROMPT, timeout=0.1)\n if temp == RAW_PROMPT and self._connection.incoming_is_empty():\n done = True\n elif temp:\n # Failure, temp needs to be parsed again\n self._connection.unread(temp)\n\n elif output.endswith(FIRST_RAW_PROMPT) and self._connection.incoming_is_empty():\n debug(\"FIRAPRO: \" + str(output))\n output = output[: -len(FIRST_RAW_PROMPT)]\n done = True\n\n elif (\n output.endswith(NORMAL_PROMPT)\n and self._connection.peek_incoming() == b\"\\r\\n\" + FIRST_RAW_PROMPT\n ):\n debug(\"NOPRO: \" + str(output))\n output = output + self._connection.read_until(FIRST_RAW_PROMPT)\n # skip both normal and raw prompt together\n # (otherwise they get processed separately)\n output = output[: -len(NORMAL_PROMPT + b\"\\r\\n\" + FIRST_RAW_PROMPT)]\n done = True\n\n elif output.endswith(NORMAL_PROMPT) and self._connection.incoming_is_empty():\n debug(\"NOPRO2: \" + str(output))\n output = output[: -len(NORMAL_PROMPT)]\n # switch to raw mode and continue\n self._connection.write(RAW_MODE_CMD)\n\n if capture_output:\n if stream_name == \"stdout\":\n out += output\n else:\n assert stream_name == \"stderr\"\n err += output\n else:\n # TODO: deal with partial UTF-8 chars\n self._send_output(output.decode(ENCODING), stream_name)\n\n debug(\"doneproc\")\n return (\n out.decode(ENCODING),\n err.decode(ENCODING),\n None if value is None else value.decode(ENCODING),\n )", "def compute(self):\n parfile = self.create_parfile()\n self._command = [self.class_exe, parfile]\n process = subprocess.Popen(self._command)\n try:\n # process.wait(timeout=300)\n process.wait()\n # except (KeyboardInterrupt, subprocess.TimeoutExpired) as e: # TimeoutExpired only in Python >= 3.3\n except Exception as e:\n process.kill()\n raise e\n return", "def _get_childs_data(child):\n (stdout, stderr) = child.communicate()\n ecode = child.poll()\n\n if ecode != 0:\n raise CmdError('Command %r returned %d' % (child.cmd, ecode))\n\n return stdout", "def subprocess_nowait(cmd, shell=False, cwd=None, env=None):\n # type: (str, bool, str, dict) -> subprocess.Process\n return subprocess.Popen(cmd, shell=shell, cwd=cwd, env=env)", "def collect_output_from_command(cmd):\n \n try:\n # print \"Trying %s\" % ' '.join(sys.argv[1:])\n p = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n output, err_msgs = p.communicate()\n except OSError as err:\n print(\"Failed running '%s' (%d - %s)\" %\n (sys.argv, err.errno, err.strerror))\n raise\n else:\n return output, err_msgs", "def finish_subprocess(proc, cmdline, cmd_input=None, ok_exit_codes=None):\n if ok_exit_codes is None:\n ok_exit_codes = [0]\n out, err = proc.communicate(cmd_input)\n\n ret = proc.returncode\n if ret not in ok_exit_codes:\n LOG.error(\"Command '%(cmdline)s' with process id '%(pid)s' expected \"\n \"return code in '%(ok)s' but got '%(rc)s': %(err)s\" %\n {'cmdline': cmdline, 'pid': proc.pid, 'ok': ok_exit_codes,\n 'rc': ret, 'err': err})\n raise SubprocessException(' '.join(cmdline), ret, out, err)\n return out", "def _retrieve_output(thread, timeout, queue, thread_error):\n # Try to join the thread on failure abort\n thread.join(timeout)\n if thread.is_alive():\n # Join should have killed the thread. This is unexpected\n raise TimeoutWaitingFor(thread_error + \". Unexpected error\")\n\n # Thread died so we should have output\n try:\n # data = (stdout, stderr, exitcode)\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor(\"streams from TaskWarrior\")\n\n return data", "def _pipe_monitor(self):\n while self._pipe_thread.isAlive() and not self._is_closed:\n if self._pipe.poll(1):\n recv_obj = self._pipe.recv()\n self.processKey(recv_obj)", "def test_reactor_stop_unblocks_EventualResult_in_threadpool(self):\n program = \"\"\"\\\nimport os, threading, signal, time, sys\n\nfrom twisted.internet.defer import Deferred\nfrom twisted.internet import reactor\n\nimport crochet\ncrochet.setup()\n\[email protected]_in_reactor\ndef run():\n reactor.callLater(0.1, reactor.stop)\n return Deferred()\n\nresult = [13]\ndef inthread():\n er = run()\n try:\n er.wait(timeout=10)\n except crochet.ReactorStopped:\n result[0] = 23\nreactor.callInThread(inthread)\ntime.sleep(1)\nsys.exit(result[0])\n\"\"\"\n process = subprocess.Popen([sys.executable, \"-c\", program],\n cwd=crochet_directory)\n self.assertEqual(process.wait(), 23)", "def _handleNonBlockingProcess(self):\n # register newly created process with the caller, even if it fails\n # so that subsequent CleanupProcesses action knows about it\n if self.caller is not None:\n self.caller.addExecutor(self)\n\n # if provided, check self.logOutputToWaitFor for non-blocking\n # process output indicating that the process has really reliably\n # started\n if self.logOutputToWaitFor:\n logs = self._handleLogOutputWaiting()\n else:\n time.sleep(1)\n logs = self.getLogs()\n\n # in case of failure - does return the same returncode as above\n # this next .poll() calls has to be here, above condition might have\n # escaped through log check (i.e. before .poll() call)\n self.returncode = self.proc.poll()\n\n if self.returncode is None:\n m = (\"Command '%s' is running (PID: %s) ...\\nlogs:\\n%s\" %\n (self.command, self.proc.pid, logs))\n return m\n else:\n m = (\"Command '%s' failed, returncode: '%s'\\nlogs:\\n%s\" %\n (self.command, self.returncode, logs))\n # self.logger.error(m) # log locally\n raise ExecutorException(m)", "def _serverThreadRunner(self):\n args = self._argsForSubprocess()\n logging.info(\"Test server popen() args: %s\" % str.join(\" \", args))\n self._server_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n with self._has_launched_cv:\n self._has_launched = True\n self._has_launched_cv.notify_all()\n stdout, stderr = self._server_process.communicate()\n logging.info(\"Process stdout: %s\", stdout.decode(\"utf-8\"))\n logging.info(\"Process stderr: %s\", stderr.decode(\"utf-8\"))\n return stdout, stderr", "def supercall(command):\n p = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE)\n retval = p.wait()\n \n if retval != 0:\n get_logger().critical('error calling {}'.format(command))\n for line in p.stderr.readlines():\n get_logger().critical(line.decode('utf8').replace('\\n', ''))\n\n return retval", "def runSubprocessInThread(command, resultQ, verbose=False):\n result = runSubProcess(command, verbose=verbose)\n resultQ.put(result)", "def start(self):\n if self.SILENT_TIMEOUT < self.MINIMUM_SILENT_TIMEOUT:\n raise AssertionError('Maximum recursion depth exceeded in %r' % self)\n\n sys.stdout.flush()\n sys.stderr.flush()\n self._output = tempfile.NamedTemporaryFile(delete=False, bufsize=0,\n prefix='chromite-parallel-')\n self._parent_pid = os.getpid()\n return multiprocessing.Process.start(self)", "def _queue_output(arguments, pidq, outputq):\n kwargs = arguments[\"process\"]\n input = arguments[\"input\"]\n\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n # pid None is read by the main thread as a crash of the process\n pidq.put(None)\n\n outputq.put((\n \"\",\n (\"Unexpected exception caught during execution: '{0}' . \".format(e)),\n 255)) # false exitcode\n\n return\n\n # Put the PID in the queue for main process to know.\n pidq.put(proc.pid)\n\n # Send input and wait for finish\n out, err = proc.communicate(input)\n\n out, err = out.decode('utf-8'), err.decode('utf-8')\n\n # Give the output back to the caller\n outputq.put((out, err, proc.returncode))", "def _run_extractor(self):\n \n # create the command to run.\n cli_args = [self.extractor_path, self.account_name, self.pst_file, self.output_path]\n if self.use_mono:\n cli_args.insert(0, \"mono\")\n self.logger.debug(\"Running command: {}\".format(\" \".join(cli_args)))\n \n # if @self.use_mono is False (i.e. Windows), hide the console window per:\n # https://stackoverflow.com/a/1016651\n # See also: https://docs.python.org/3/library/subprocess.html#windows-popen-helpers\n startup_info = None\n if not self.use_mono:\n startup_info = subprocess.STARTUPINFO()\n startup_info.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n\n # run @self.extractor_app; based on: https://stackoverflow.com/a/803396\n process = subprocess.Popen(cli_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n startupinfo=startup_info, universal_newlines=True)\n \n # prepare to capture each character outputted from @self.extractor_app.\n line_parts = []\n\n while process.poll() is None:\n \n # save output to @line_parts as long as the output is not a line break.\n # if the output is a line break, @line_parts is converted to a string and logged\n # and @line_parts is cleared.\n for std_out in process.stdout.read(1):\n if std_out != \"\\n\":\n std_out = std_out.encode(self.charset).decode(self.charset, \n errors=\"replace\")\n line_parts.append(std_out)\n process.stdout.flush()\n else:\n line = \"\".join(line_parts)\n line_parts[:] = []\n self._log_subprocess_line(line)\n\n # raise an exception if @process returns a positive integer (i.e. fails).\n if process.returncode > 0:\n self.logger.debug(\"Child process stderr: {}\".format(process.stderr.readlines()))\n msg = \"Command failed with return code: {}\".format(process.returncode)\n raise ChildProcessError(msg)\n\n return", "def waitUntilSubprocessLaunched(self):\n\n def hasLaunched():\n return self._has_launched\n\n with self._has_launched_cv:\n self._has_launched_cv.wait_for(hasLaunched)\n assert self._has_launched", "def runSubProcess(command, verbose=0):\n if verbose:\n print(command)\n \n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, stderr = process.communicate()\n status = process.poll()\n \n return output.decode('utf-8'), stderr.decode('utf-8'), status", "def Cleanup(self, silent=False):\n if os.getpid() != self._parent_pid or self._output is None:\n return\n try:\n # Print output from subprocess.\n if not silent and logging.getLogger().isEnabledFor(logging.DEBUG):\n with open(self._output.name, 'r') as f:\n for line in f:\n logging.debug(line.rstrip('\\n'))\n finally:\n # Clean up our temporary file.\n osutils.SafeUnlink(self._output.name)\n self._output.close()\n self._output = None", "def subprocess_with_output(\n cmd, shell=False, cwd=None, env=None, suppress_output=False):\n # type: (str, bool, str, dict, bool) -> int\n _devnull = None\n try:\n if suppress_output:\n _devnull = open(os.devnull, 'w')\n proc = subprocess.Popen(\n cmd, shell=shell, cwd=cwd, env=env, stdout=_devnull,\n stderr=subprocess.STDOUT)\n else:\n proc = subprocess.Popen(cmd, shell=shell, cwd=cwd, env=env)\n proc.wait()\n finally:\n if _devnull is not None:\n _devnull.close()\n return proc.returncode", "def pipemeter(cmd1, cmd2):\n\n proc1 = subprocess.Popen(cmd1, bufsize=0, shell=True, stdout=subprocess.PIPE)\n proc2 = subprocess.Popen(cmd2, bufsize=0, shell=True, stdin=subprocess.PIPE)\n bytes_piped = 0\n\n while True:\n data = proc1.stdout.read(CHUNKSIZE)\n length = len(data)\n if length == 0:\n break\n\n written = proc2.stdin.write(data)\n if written != length:\n raise RuntimeError(\"Write failed, wanted to write: {}, written={}\".format(length, written))\n\n bytes_piped += length\n\n proc1.stdout.close()\n proc2.stdin.close()\n\n return proc1.wait(), proc2.wait(), bytes_piped", "def run_with_output(self, cmd, end_strs=None, timeout=301, timeout_exception=True, api_call='write'):\n if api_call == 'write':\n self.write(cmd)\n out = ''\n else:\n out = self.runsingle(cmd)\n time.sleep(1)\n out += self.gather_output(cmd, out, end_strs, timeout, timeout_exception) # gather last of data buffer\n return out", "def call_subprocess(poutput, data=None):\n try:\n output = poutput.communicate(input=data)\n LOG.debug(\"Exit status: \" + str(poutput.returncode))\n if poutput.returncode != 0:\n LOG.warning(\"Process returned non-zero exit code: \" + str(poutput.returncode))\n LOG.warning(\"Process STDOUT: \" + output[0])\n LOG.warning(\"Process STDERR: \" + output[1])\n return output[0].strip(), output[1].strip()\n except Exception as e:\n LOG.exception(\"Command failed!\")\n raise e", "def get_stdout(self):\n _ = self.get() # force finished wait\n if self._stdout is not None:\n if wait_until_exists(self._stdout):\n with open(self._stdout) as f:\n self._out = f.read()\n return self._out" ]
[ "0.7034326", "0.66883934", "0.63439405", "0.6276364", "0.6119922", "0.6063825", "0.5989354", "0.588833", "0.5796989", "0.578598", "0.56847566", "0.5676479", "0.5609866", "0.5598541", "0.55334884", "0.55334884", "0.55334884", "0.55334884", "0.55307305", "0.5524578", "0.5516227", "0.5508247", "0.5507495", "0.5468501", "0.5460452", "0.545493", "0.54549164", "0.54335576", "0.54335576", "0.5429713", "0.54293483", "0.54288495", "0.54270655", "0.5387969", "0.5386026", "0.53807676", "0.5367811", "0.53628266", "0.5353012", "0.53479105", "0.53399074", "0.5323306", "0.5307976", "0.53023016", "0.5288623", "0.5287926", "0.52876234", "0.5285963", "0.52841693", "0.5267603", "0.52650595", "0.52524996", "0.5248692", "0.5239093", "0.5226585", "0.52184904", "0.5213783", "0.5213783", "0.5209777", "0.52088857", "0.52051157", "0.5195968", "0.519105", "0.51884234", "0.5182406", "0.5170411", "0.51629883", "0.51602066", "0.5158481", "0.5149543", "0.51242214", "0.51109344", "0.5105196", "0.5100315", "0.5088953", "0.50868815", "0.5080896", "0.5078591", "0.50755", "0.5073186", "0.50701785", "0.506447", "0.50633186", "0.5062903", "0.50612885", "0.5050606", "0.50484896", "0.503978", "0.50325173", "0.50266355", "0.501961", "0.50186634", "0.50167423", "0.5011476", "0.5010288", "0.5009476", "0.500439", "0.500298", "0.50021076", "0.49952215" ]
0.553726
14
Keep an inmemory cache of function results given its inputs
def memoize(obj): cache = obj.cache = {} @functools.wraps(obj) def memoizer(*args, **kwargs): key = str(args) + str(kwargs) if key not in cache: cache[key] = obj(*args, **kwargs) return cache[key] return memoizer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cache(func):\n results = {}\n\n @functools.wraps(func)\n def __cache(*args): # changed function\n nonlocal results # if this function call with parameters that already used\n if args in results.keys(): # then answer gets from dictionary\n # print(\"{} - got from cache\".format(args))\n rez = results[args]\n else:\n rez = func(*args)\n results[args] = rez\n return rez\n\n return __cache", "def _memoize_return_values(func):\n cache= {}\n @wraps(func)\n def memf(*args, **kwargs):\n key = (args, frozenset(kwargs.items()))\n if key not in cache:\n cache[key] = func(*args, **kwargs)\n return cache[key]\n return memf", "def memorize(func):\n cache = {}\n\n @wraps(func)\n def cached_function(*args, **kwargs):\n if args not in cache:\n cache[args] = func(*args, **kwargs)\n return cache[args]\n\n return cached_function", "def memoize(func):\n cache = {}\n # Store results in a dict that maps arguments to results\n def wrapper(*args, **kwargs):\n if(args, kwargs) not in cache:\n # call func() and store the result.\n cache[(args,kwargs)] = func(*args,**kwargs)\n return cache[(args,kwargs)]\n return wrapper", "def cache(func):\n storage = {}\n\n def wrapper(*args, **kwargs):\n key = str(*args, **kwargs)\n if storage.get(key):\n return storage[key]\n else:\n result = func(*args, **kwargs)\n storage[key] = result\n return result\n\n return wrapper", "def _memorize(func):\n\n def _wrapper(self, *args, **kwargs):\n \"\"\"Wrapper to cache the function's output.\n \"\"\"\n if self.use_cache:\n cache = load_cache(self.cache_filename)\n original_key = generate_hash(\n self.__class__.__name__, func.__name__, args, kwargs)\n cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest()\n cached_val = cache.get(cache_key)\n if cached_val:\n return cached_val\n val = func(self, *args, **kwargs)\n if self.use_cache:\n cache.set(cache_key, val)\n return val\n return _wrapper", "def memoize(f):\n cache = OrderedDict({})\n\n def wrapper(*keys, **kwargs):\n \"\"\" Search for invoker function's return value in cache for given arguments,\n if found then return else store function parameters as key\n and function return value as value in cache\n If cache size exceeds 2, delete the oldest used key value record\n \"\"\"\n key = str(keys) + str(kwargs)\n if key in cache:\n value = cache.pop(key)\n cache[key] = value\n return cache[key]\n while len(cache)>1:\n cache.popitem(False)\n cache[key] = f(*keys, **kwargs)\n return cache[key]\n return wrapper", "def cached(func):\n return _lru_cache(None)(func)", "def memoize(func):\n cache = {}\n @wraps(func)\n def wrap(*args):\n if args not in cache:\n cache[args] = func(*args)\n return cache[args]\n return wrap", "def cache(func):\n\n def func_wrapper(self, hook=None, result_name=None):\n \"\"\"Wrapper to cache the result of a function.\"\"\"\n if self._cache is not None:\n c = self._cache.copy()\n c['cache'] = True\n return c\n else:\n ret = func(self, hook=hook, result_name=result_name)\n if not isinstance(ret, dict):\n raise TypeError( # pragma: no cover\n \"A dictionary was expected not '{0}'.\\nIssue with class '{1}'\"\n \"\".format(\n type(ret), type(self)))\n self._cache = ret\n ret = ret.copy()\n ret['cache'] = False\n return ret\n return func_wrapper", "def memorized(f):\n cache = {}\n @wraps(f)\n def wrapped(*args):\n try:\n result = cache[args]\n except KeyError:\n result = cache[args] = f(*args)\n return result\n return wrapped", "def cache(fn):\n\tcache.c = dict()\n\tdef _fn(*args, **kwargs):\n\t\tkey = fn.__name__ + str(args) + str(kwargs)\n\t\ttry:\n\t\t\tret = cache.c[key]\n\t\texcept KeyError, e:\n\t\t\tret = fn(*args, **kwargs)\n\t\t\tcache.c[key] = ret\n\t\treturn ret\n\treturn _fn", "def cache_result(func):\n\n @wraps(func)\n def with_cache(*args, **kwargs):\n \"\"\"\n Cached function\n \"\"\"\n key = '{}{}{}'.format(\n hash(func), hash(args), hash(frozenset(kwargs.items())))\n\n cached_result = cache.get(key)\n if cached_result is not None:\n return cached_result if cached_result != 'None' else None\n result = func(*args, **kwargs)\n cache.set(key, result if result is not None else 'None')\n\n return result\n\n return with_cache", "def memo(func):\n cache = {}\n\n def wrapper(*args, **kwargs):\n update_wrapper(wrapper, func)\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = func(*args, **kwargs)\n return cache[key]\n return wrapper", "def cache_function(self, func):\n\n @wraps(func)\n def wrapper(*args):\n if self.__log:\n self.__logger.info(f\"Called {func.__name__} with {args}\")\n fileName = self.__build_file_name(func, args)\n\n if os.path.isfile(fileName):\n # Result is already stored in cache\n # Retrieve return value from cache\n return self.__read_cache(fileName)\n else:\n # Result is not stored in cache\n # Run function\n if len(args) > 0:\n returnVal = func(args)\n else:\n returnVal = func()\n\n # Store value in cache\n self.__write_cache(fileName, returnVal)\n\n # Give return value\n return returnVal\n\n return wrapper", "def instance_cache(func):\n def _wrapper(self, *args, **kwargs):\n key = (func.__name__,) + args\n for pair in sorted(kwargs.items()):\n key += pair\n if key in self._cache:\n return self._cache[key]\n data = func(self, *args, **kwargs)\n self._cache[key] = data\n return data\n return _wrapper", "def test_cache():\n\n def func(arg1, arg2):\n return arg1 * arg2\n\n first = cache(func)(100, 200)\n second = cache(func)(100, 200)\n assert first is second", "def cache_result(func):\n def cache_set(key, value):\n cache.set(key, value, CACHE_TIMEOUT)\n return value\n\n def cached_func():\n prefix = func.__name__\n cached_funcs.add(prefix)\n key = get_cache_key(prefix=prefix)\n return cache.get(key) or cache_set(key, func())\n return cached_func", "def memoize_by_args(func):\n memory = {}\n\n @functools.wraps(func)\n def memoized(*args):\n if args not in memory.keys():\n value = func(*args)\n memory[args] = value\n\n return memory[args]\n\n return memoized", "def memoize(function):\r\n cache = {}\r\n def decorated_function(*args):\r\n if args in cache:\r\n return cache[args]\r\n else:\r\n val = function(*args)\r\n cache[args] = val\r\n return val\r\n return decorated_function", "def memoize(f):\n cache = {}\n\n def memf(*x):\n if x not in cache:\n cache[x] = f(*x)\n return cache[x]\n return memf", "def _memoize(func, *args, **opts):\r\n if opts: # frozenset is used to ensure hashability\r\n key = args, frozenset(opts.items())\r\n else:\r\n key = args\r\n cache = func.cache # attribute added by memoize\r\n try:\r\n result = cache[key]\r\n except KeyError:\r\n result = cache[key] = func(*args, **opts)\r\n return result", "def memoize(func):\r\n cache = {}\r\n @functools.wraps(func)\r\n def wrapper(*args, **kwargs):\r\n key = (args, frozenset(kwargs.items()))\r\n if key not in cache:\r\n cache[key] = func(*args, **kwargs)\r\n return cache[key]\r\n return wrapper", "def __call__(self, *args):\n if args not in self.memo:\n self.memo[args] = self.f(*args)\n return self.memo[args]", "def cached_func(*args):\n try: # fails if cache is not instantiated\n return self.data['run'][func.__name__]\n except KeyError:\n value = func(*args)\n self.data['run'][func.__name__] = value\n return value", "def memoize(func):\n tbl = {}\n\n def helper(args):\n if args not in tbl:\n tbl[args] = func(args)\n return tbl[args]\n return helper", "def memo(func):\n cache = {}\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n key = str(args) + str(kwargs)\n try:\n return cache[key]\n except KeyError:\n rc = func(*args, **kwargs)\n cache[key] = rc\n return rc\n return wrapper", "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n return f(args)\n return _f", "def memoize(f):\n cache = {}\n @functools.wraps(f)\n def g(*args):\n ret = cache.get(args, cache)\n if ret is cache:\n ret = cache[args] = f(*args)\n return ret\n return g", "def memoization(func):\n cache = {}\n\n @wraps(func)\n def _wrap(*args, **kwargs):\n key = (args, tuple(sorted(kwargs.items())))\n result = cache.get(key, None)\n if result:\n print(\"It's cached\")\n return result\n\n result = func(*args, **kwargs)\n cache[key] = result\n return result\n\n return _wrap", "def unbound_cache(func):\n\n cache = {}\n\n @functools.wraps(func)\n def caching_wrapper(*args):\n try:\n return cache[args]\n except KeyError:\n result = func(*args)\n cache[args] = result\n return result\n\n return caching_wrapper", "def cache_result(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n assert len(args) == 0 and len(kwargs) == 0, \"Wrapped call must be empty\"\n if not hasattr(f, \"cached_result\"):\n f.cached_result = f()\n return f.cached_result\n return wrapper", "def memoize(f):\r\n cache = {}\r\n\r\n def rval(*args, **kwargs):\r\n kwtup = tuple(kwargs.items())\r\n key = (args, kwtup)\r\n if key not in cache:\r\n val = f(*args, **kwargs)\r\n cache[key] = val\r\n else:\r\n val = cache[key]\r\n return val\r\n\r\n return rval", "def run(self, func, *args):\n @wraps(func)\n def cached_func(*args):\n \"\"\"Run wise cache function\"\"\"\n try: # fails if cache is not instantiated\n return self.data['run'][func.__name__]\n except KeyError:\n value = func(*args)\n self.data['run'][func.__name__] = value\n return value\n return cached_func", "def cache(self, *args, **kwargs):\n\n default_fn = kwargs.pop('default_fn', None)\n\n def _run(*args, **kwargs):\n \"\"\"\n :param: *args\n :param: **kwargs (fname, force, verbose)\n \"\"\"\n\n fname = kwargs.pop('fname', None)\n force = kwargs.pop('force', False)\n verbose = kwargs.pop('verbose', True)\n copy = kwargs.get('copy', False)\n\n callback = None\n if len(args) > 1:\n callback, *args = args\n\n if len(args) > 0:\n adata = args[0] if isinstance(args[0], anndata.AnnData) else kwargs.get('adata')\n else:\n adata = kwargs.get('adata')\n\n assert isinstance(adata, anndata.AnnData), f'Expected `{adata}` to be of type `anndata.AnnData`.'\n\n if callback is None:\n callback = (lambda *_x, **_y: None) if default_fn is None else default_fn\n\n assert callable(callback), f'`{callblack}` is not callable.'\n\n if force:\n if verbose:\n print('Recomputing values.')\n res = callback(*args, **kwargs)\n cache_fn(res if copy else adata, fname, True, verbose, *args, **kwargs)\n return res\n\n # when loading to cache and copy is true, modify the copy\n if copy:\n adata = adata.copy()\n\n # we need to pass the *args and **kwargs in order to\n # get the right field when using regexes\n if not cache_fn(adata, fname, False, verbose, *args, **kwargs):\n if verbose:\n print('Computing values.')\n res = callback(*args, **kwargs)\n ret = cache_fn(res if copy else adata, fname, True, False, *args, **kwargs)\n\n assert ret, 'Caching failed.'\n\n return res\n\n # if cache was found and not modifying inplace\n return adata if copy else None\n\n cache_fn = self._create_cache_fn(*args, **kwargs)\n\n return _run", "def memoize(func):\r\n func.cache = {}\r\n return decorator(_memoize, func)", "def eval(self, func, *args, **kwargs):\r\n if self.cachedir is None:\r\n return func(*args, **kwargs)\r\n return self.cache(func)(*args, **kwargs)", "def wrapper(*keys, **kwargs):\n key = str(keys) + str(kwargs)\n if key in cache:\n value = cache.pop(key)\n cache[key] = value\n return cache[key]\n while len(cache)>1:\n cache.popitem(False)\n cache[key] = f(*keys, **kwargs)\n return cache[key]", "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(*args)\n _f.cache = cache\n return _f", "def memoize(cls, func, *args, **kw):\n with cls._locks[func], cls._lock:\n if not isinstance(args, collections.Hashable):\n result = func(*args, **kw)\n return result\n if kw:\n # frozenset is used to ensure hashability\n key = args, frozenset(kw.items())\n else:\n key = args\n # func.cache attribute added by memoize\n cache = cls._caches[func]\n try:\n if key in cache:\n result = cache[key].result\n cls.shrink_cache()\n return result\n except TypeError:\n result = func(*args, **kw)\n return result\n\n start = time.time()\n result = func(*args, **kw)\n end = time.time()\n duration = end - start\n\n cache[key] = CacheEntry(func, key, duration, result,\n kw.get('expiration'), *args, **kw)\n cls.shrink_cache()\n cls._cache.append(cache[key])\n return result", "def memoize(function):\n\tdef wrapper(*Args, **KWArgs):\n\n\t\tkey = _getMemoizeKey(function, *Args, **KWArgs)\n\n\t\tif key in Memory:\n\t\t\treturn Memory[key]\n\n\t\tRet = function(*Args, **KWArgs)\n\n\t\tMemory[key] = Ret\n\n\t\treturn Ret\n\n\twrap(function, wrapper)\n\n\treturn wrapper", "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n result = cache[args] = f(*args)\n return result\n except TypeError: # unhashable argument\n return f(*args)\n return _f", "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n result = cache[args] = f(*args)\n return result\n except TypeError: # unhashable argument\n return f(*args)\n return _f", "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n result = cache[args] = f(*args)\n return result\n except TypeError: # unhashable argument\n return f(*args)\n return _f", "def cached(function):\n\t@wraps(function)\n\tdef check_cache_first(cls, *args):\n\t\tif not args in cls._cache:\n\t\t\tcode = function(cls, *args)\n\t\t\tif code:\n\t\t\t\tcls._cache[args] = code\n\t\t\t\treturn code\n\t\telse:\n\t\t\treturn cls._cache[args]\n\t\treturn None\n\treturn check_cache_first", "def cached(key):\n def wrapper(function):\n def wrapped(d,g,i):\n if key not in d:\n d[key] = function(d,g,i)\n return d[key]\n return wrapped\n return wrapper", "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n return _f", "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n return _f", "def test_argument_change():\r\n mem = Memory(cachedir=env['dir'], verbose=0)\r\n func = mem.cache(count_and_append)\r\n # call the function for the first time, is should cache it with\r\n # argument x=[]\r\n assert func() == 0\r\n # the second time the argument is x=[None], which is not cached\r\n # yet, so the functions should be called a second time\r\n assert func() == 1", "def memoize(func):\n @wraps(func)\n def memoizer(self):\n if not hasattr(self, '_cache'):\n self._cache = {}\n if func.__name__ not in self._cache:\n self._cache[func.__name__] = func(self)\n return self._cache[func.__name__]\n return memoizer", "def cached(func):\n cache_dct = {}\n\n @wraps(func)\n def _lru_cache_decorator(*args):\n key = args\n if key in cache_dct:\n return cache_dct[key]\n else:\n cache_dct[key] = func(*args)\n return cache_dct[key]\n return _lru_cache_decorator", "def dynCache(*args, **kwargs)->None:\n pass", "def memo(f):\n # Peter Norvig's\n cache = {}\n\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(*args)\n _f.cache = cache\n return _f", "def _cached_call(self, args, kwargs):\r\n # Compare the function code with the previous to see if the\r\n # function code has changed\r\n output_dir, argument_hash = self._get_output_dir(*args, **kwargs)\r\n metadata = None\r\n # FIXME: The statements below should be try/excepted\r\n if not (self._check_previous_func_code(stacklevel=4) and\r\n os.path.exists(output_dir)):\r\n if self._verbose > 10:\r\n _, name = get_func_name(self.func)\r\n self.warn('Computing func %s, argument hash %s in '\r\n 'directory %s'\r\n % (name, argument_hash, output_dir))\r\n out, metadata = self.call(*args, **kwargs)\r\n if self.mmap_mode is not None:\r\n # Memmap the output at the first call to be consistent with\r\n # later calls\r\n out = _load_output(output_dir, self.func,\r\n timestamp=self.timestamp,\r\n mmap_mode=self.mmap_mode,\r\n verbose=self._verbose)\r\n else:\r\n try:\r\n t0 = time.time()\r\n out = _load_output(output_dir, _get_func_fullname(self.func),\r\n timestamp=self.timestamp,\r\n metadata=metadata, mmap_mode=self.mmap_mode,\r\n verbose=self._verbose)\r\n if self._verbose > 4:\r\n t = time.time() - t0\r\n _, name = get_func_name(self.func)\r\n msg = '%s cache loaded - %s' % (name, format_time(t))\r\n print(max(0, (80 - len(msg))) * '_' + msg)\r\n except Exception:\r\n # XXX: Should use an exception logger\r\n self.warn('Exception while loading results for '\r\n '(args=%s, kwargs=%s)\\n %s' %\r\n (args, kwargs, traceback.format_exc()))\r\n\r\n shutil.rmtree(output_dir, ignore_errors=True)\r\n out, metadata = self.call(*args, **kwargs)\r\n argument_hash = None\r\n return (out, argument_hash, metadata)", "def cached(\n inputs: INPUTS = None,\n params: PARAMETERS = None,\n outputs: OUTPUTS = None,\n) -> Callable[..., Callable[..., Optional[T]]]:\n\n def wrapper_builder(f: Callable[..., Optional[T]]) -> Callable[..., Optional[T]]:\n @functools.wraps(f)\n def wrapper(*args, **kw) -> Optional[T]:\n resolved_parameters = resolve_cache_parameters(params, args, kw)\n hash_key = current_cache.get_hash_key(f, inputs, resolved_parameters)\n if current_cache.use_cached(hash_key):\n return None\n\n result = f(*args, **kw)\n outputs_names = get_output_names(outputs, args, kw)\n current_cache.cache_outputs(hash_key, outputs_names)\n\n return result\n\n return wrapper\n\n return wrapper_builder", "def _proxy_cache(from_func, to_func):\n to_func.cache_info = from_func.cache_info\n to_func.cache_clear = from_func.cache_clear", "def memoize(func):\n mem = {}\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in mem:\n mem[key] = func(*args, **kwargs)\n return mem[key]\n return memoizer", "def memoize(f):\n class MemoDict(dict):\n def __init__(self, func):\n self.func = func\n\n def __call__(self, *args):\n return self[args]\n\n def __missing__(self, key):\n result = self[key] = self.func(*key)\n return result\n\n return MemoDict(f)", "def cached_func(*args):\n try: # fails if cache is not instantiated or if it is None\n value = self.data['step'][func.__name__]\n assert value is not None\n except (KeyError, AssertionError):\n value = func(*args)\n self.data['step'][func.__name__] = value\n return value", "def memo(f):\n def _f(*args):\n try:\n return _f.cache[args]\n except KeyError:\n _f.cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(*args)\n _f.__name__ = f.__name__\n _f.cache = {}\n return _f", "def test_argument_change(tmpdir):\n memory = Memory(location=tmpdir.strpath, verbose=0)\n func = memory.cache(count_and_append)\n # call the function for the first time, is should cache it with\n # argument x=[]\n assert func() == 0\n # the second time the argument is x=[None], which is not cached\n # yet, so the functions should be called a second time\n assert func() == 1", "def memoize(func):\n result: List[Any] = []\n\n @functools.wraps(func)\n def wrapped_func():\n if not result:\n result.append(func())\n return result[0]\n\n return wrapped_func", "def memoize(fn):\n cache = {}\n def newfn(*args, **kw):\n key = (tuple(args), tuple(sorted(kw.items())))\n if key in cache:\n return cache[key]\n else:\n cache[key] = val = fn(*args, **kw)\n return val\n newfn.__name__ = fn.__name__ + ' (MEMOIZED)'\n newfn.__module__ = fn.__module__\n return newfn", "def cached():\n def decorator(fn): # define a decorator for a function \"fn\"\n cache_name = fn.func_name\n def wrapped(*args, **kwargs): # define a wrapper that will finally call \"fn\" with all arguments\n if os.path.exists(cache_name):\n with gzip.GzipFile(cache_name, 'rb') as cachehandle:\n return pickle.load(cachehandle)\n\n # execute the function with all arguments passed\n res = fn(*args, **kwargs)\n\n # write to cache file\n with gzip.GzipFile(cache_name, 'wb') as cachehandle:\n pickle.dump(res, cachehandle, pickle.HIGHEST_PROTOCOL)\n return res\n return wrapped\n return decorator # return this \"customized\" decorator that uses \"cachefile\"", "def test_inter_process_cache():\r\n\r\n x, y = theano.tensor.dvectors('xy')\r\n f = theano.function([x, y], [MyOp()(x), MyOp()(y)])\r\n f(numpy.arange(60), numpy.arange(60))\r\n if theano.config.mode == 'FAST_COMPILE' or theano.config.cxx == \"\":\r\n assert MyOp.nb_called == 0\r\n else:\r\n assert MyOp.nb_called == 1\r\n\r\n # What if we compile a new function with new variables?\r\n x, y = theano.tensor.dvectors('xy')\r\n f = theano.function([x, y], [MyOp()(x), MyOp()(y)])\r\n f(numpy.arange(60), numpy.arange(60))\r\n if theano.config.mode == 'FAST_COMPILE' or theano.config.cxx == \"\":\r\n assert MyOp.nb_called == 0\r\n else:\r\n assert MyOp.nb_called == 1", "def collect(self):\n for func in self._caches:\n cache = {}\n for key in self._caches[func]:\n if (time.time() - self._caches[func][key][1]) < self._timeouts[func]:\n cache[key] = self._caches[func][key]\n self._caches[func].clear()\n self._caches[func].update(cache)", "def cacheable(cache_key_template = None):\n class Wrapper(object):\n def __init__(self, fct, cache_key_template):\n self.fct = fct\n self.fct_call = self.uncached_fct_call\n self.key = cache_key_template\n self.cache = {}\n self.__name__ = fct.__name__\n self.__doc__ = fct.__doc__\n def __call__(self, *args, **kwargs):\n #~ print self, self.fct_call\n return self.fct_call(*args, **kwargs)\n def uncached_fct_call(self, *args, **kwargs):\n try:\n del kwargs['cache_key']\n return self.fct(*args, **kwargs)\n except KeyError:\n return self.fct(*args, **kwargs)\n def cached_fct_call(self, *args, **kwargs):\n try:\n #~ print \"1\"\n kwargs_sort = kwargs.keys()\n #~ print \"2\"\n kwargs_sort.sort()\n #~ print \"3\"\n kwargs_values = tuple([kwargs[i] for i in kwargs_sort])\n #~ print \"4\"\n cache_key = self.key % (args + kwargs_values)\n except TypeError:\n # when self.key is None\n try:\n #~ print \"5\"\n cache_key = kwargs.pop('cache_key')\n except KeyError:\n print(\"Caching activated, but no cache_key given! Will not use cache for this call.\")\n return self.fct(*args, **kwargs)\n try:\n #~ print \"6\"\n return self.cache[cache_key]\n except KeyError:\n print(\"Caching result\")\n #~ print \"7\"\n self.cache[cache_key] = self.fct(*args, **kwargs)\n #~ print \"8\"\n return self.cache[cache_key]\n def cache_on(self):\n self.fct_call = self.cached_fct_call\n def cache_off(self):\n self.fct_call = self.uncached_fct_call\n self.cache.clear() # maybe not necessary\n def __repr__(self):\n '''Return the function's docstring.'''\n return self.fct.__repr__()\n #~ def __get__(self, obj, objtype):\n #~ '''Support instance methods.'''\n #~ print 'get krijgt: ', obj, objtype\n #~ print 'get geeft : ', functools.partial(self.__call__, obj)\n #~ return functools.partial(self.__call__, obj)\n def __get__(self, instance, owner):\n '''Support instance methods. From:\n http://metapython.blogspot.nl/2010/11/python-instance-methods-how-are-they.html'''\n #~ print 'getting'\n #~ a = time()\n #~ instance.__dict__['__call__'] = MethodType(self, instance, owner)\n #~ print 'getting', self, instance, owner\n #~ try:\n #~ raise AssertionError\n #~ except AssertionError:\n #~ traceback.print_stack()\n #~ self.fct_call = self.fct_call.__get__(instance, owner)\n thing = types.MethodType(self, instance, owner)\n #~ thing = self.__get__(instance, owner) # dit zou equivalent moeten zijn aan MethodType(self, instance, owner)\n #~ thing = self.__class__(self.fct_call.__get__(instance, owner))\n #~ print time()-a\n return thing\n #~ test = self.fct_call.__get__(instance, owner)\n #~ result = self.__class__(self.fct_call.__get__(instance, owner))\n #~ print \"get\", self, instance, owner, MethodType(self, instance, owner)\n #~ print test#, result\n #~ return result\n #~ return self.__get__(instance, owner)\n #raise AssertionError\n #return MethodType(self, instance, owner)\n #instance.__call__ = MethodType(self, instance, owner)\n #return instance.__call__\n #~ try:\n #~ return self.as_method\n #~ except AttributeError:\n #~ print \"AttributeError!\"\n #~ self.as_method = \n #~ return self.as_method\n #~ def __set__(self, value):\n #~ pass\n \n def closer(f):\n # See http://stackoverflow.com/questions/233673/lexical-closures-in-python#235764\n # on closures.\n return Wrapper(f, cache_key_template)\n \n return closer", "def memoize(f):\n\n class memodict(dict):\n def __init__(self, f):\n self.f = f\n self.__name__ = f.__name__\n\n def __call__(self, *args):\n global memoized_return_values\n try:\n return memoized_return_values[self.__name__]\n except KeyError:\n ret = memoized_return_values[self.__name__] = self.f(*args)\n return ret\n\n return memodict(f)", "def _memoizeArgsOnly (max_cache_size=1000):\n def wrapper (f):\n def fn (*args):\n try:\n return fn.cache[args]\n except KeyError:\n if fn.count >= max_cache_size:\n fn.cache = {}\n fn.count = 0\n fn.cache[args] = result = f(*args)\n fn.count += 1\n return result\n fn.cache = {}\n fn.count = 0\n return fn\n return wrapper", "def _memoize(args_func=sorted):\n def _memoize(wrapped):\n wrapped.cache = dict()\n wrapped.cache['call_count'] = 0\n @wraps(wrapped)\n def func(*args):\n wrapped.cache['call_count'] += 1\n hashed_args = tuple(args_func(args))\n if hashed_args in wrapped.cache:\n return wrapped.cache.get(hashed_args)\n return wrapped.cache.setdefault(hashed_args, wrapped(*args))\n return func\n return _memoize", "def call_and_shelve(self, *args, **kwargs):\r\n _, argument_hash, metadata = self._cached_call(args, kwargs)\r\n\r\n return MemorizedResult(self.cachedir, self.func, argument_hash,\r\n metadata=metadata, verbose=self._verbose - 1,\r\n timestamp=self.timestamp)", "def cache_response_timewise() -> Callable:\n def _cache_response_timewise(f: Callable) -> Callable:\n @wraps(f)\n def wrapper(wrappingobj: CacheableObject, *args: Any, **kwargs: Any) -> Any:\n ignore_cache = kwargs.pop('ignore_cache', False)\n cache_key = _function_sig_key(f.__name__, *args, **kwargs)\n now = ts_now()\n if ignore_cache is False:\n # Check the cache\n if cache_key in wrappingobj.results_cache:\n cache_life_secs = now - wrappingobj.results_cache[cache_key].timestamp\n\n cache_miss = (\n ignore_cache is True or\n cache_key not in wrappingobj.results_cache or\n cache_life_secs >= wrappingobj.cache_ttl_secs\n )\n\n if cache_miss:\n # Call the function, write the result in cache and return it\n result = f(wrappingobj, *args, **kwargs)\n wrappingobj.results_cache[cache_key] = ResultCache(result, now)\n return result\n\n # else hit the cache and return it\n return wrappingobj.results_cache[cache_key].result\n\n return wrapper\n return _cache_response_timewise", "def cache(self):\n self.cached_mu = self.mu.eval()\n self.cached_var = self.var.eval()\n self.cached_count = self.count.eval()", "def rerun(function, *Args, **KWArgs):\n\tkey = _getMemoizeKey(function, *Args, **KWArgs)\n\n\tif key in Memory:\n\t\tdel Memory[key] # Clear existing data, thus force a recalculation.\n\n\treturn function(*Args, **KWArgs) # #Note: This value will be memoized.", "def _wrapper(self, *args, **kwargs):\n if self.use_cache:\n cache = load_cache(self.cache_filename)\n original_key = generate_hash(\n self.__class__.__name__, func.__name__, args, kwargs)\n cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest()\n cached_val = cache.get(cache_key)\n if cached_val:\n return cached_val\n val = func(self, *args, **kwargs)\n if self.use_cache:\n cache.set(cache_key, val)\n return val", "def cachefor(name):\n def decorator(func):\n assert name not in cachefuncs\n cachefuncs[name] = func\n return func\n return decorator", "def memoize(func=None, maxlen=None):\r\n if func is not None:\r\n cache = BoundedOrderedDict(maxlen=maxlen)\r\n @functools.wraps(func)\r\n def memo_target(candidates, args):\r\n fitness = []\r\n for candidate in candidates:\r\n lookup_value = pickle.dumps(candidate, 1)\r\n if lookup_value not in cache:\r\n cache[lookup_value] = func([candidate], args)[0]\r\n fitness.append(cache[lookup_value])\r\n return fitness\r\n return memo_target\r\n else:\r\n def memoize_factory(func):\r\n return memoize(func, maxlen=maxlen)\r\n return memoize_factory", "def test_custom_cache_multiple(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n a = jax.numpy.array(0.1)\n b = jax.numpy.array(0.2)\n\n def cost(a, b, cache):\n with qml.queuing.AnnotatedQueue() as q1:\n qml.RY(a, wires=0)\n qml.RX(b, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape1 = qml.tape.QuantumScript.from_queue(q1)\n\n with qml.queuing.AnnotatedQueue() as q2:\n qml.RY(a, wires=0)\n qml.RX(b, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape2 = qml.tape.QuantumScript.from_queue(q2)\n\n res = execute(\n [tape1, tape2],\n dev,\n gradient_fn=param_shift,\n cache=cache,\n )\n return res[0]\n\n custom_cache = {}\n jax.grad(cost)(a, b, cache=custom_cache)\n\n cache = spy.call_args[0][1]\n assert cache is custom_cache", "def test_custom_cache(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=param_shift,\n cache=cache,\n )[0]\n\n custom_cache = {}\n params = jax.numpy.array([0.1, 0.2])\n jax.grad(cost)(params, cache=custom_cache)\n\n cache = spy.call_args[0][1]\n assert cache is custom_cache", "def memoize(f):\n class memodict(dict):\n def __init__(self, f):\n self.f = f\n def __call__(self, *args):\n return self[args]\n def __missing__(self, key):\n ret = self[key] = self.f(*key)\n return ret\n return memodict(f)", "def memoize(f):\n class memodict(dict):\n def __init__(self, f):\n self.f = f\n def __call__(self, *args):\n return self[args]\n def __missing__(self, key):\n ret = self[key] = self.f(*key)\n return ret\n return memodict(f)", "def memoize(f):\n class memodict(dict):\n def __init__(self, f):\n self.f = f\n def __call__(self, *args):\n return self[args]\n def __missing__(self, key):\n ret = self[key] = self.f(*key)\n return ret\n return memodict(f)", "def reset_cache(self, *args, **kwargs):\n value = self._fn(*self._inject_obj(args), **kwargs)\n\n if self._cache:\n key = self.get_cache_key(*args, **kwargs)\n cache_backend.set(key, value, timeout=self._timeout)\n\n if self._memoize:\n memoization_key = self._get_memoization_key(*args, **kwargs)\n self._cached_results[memoization_key] = value\n\n return value", "def simple_memoize(wrapped):\n wrapped.cache = dict()\n @wraps(wrapped)\n def func(*args):\n if args in wrapped.cache:\n return wrapped.cache[args]\n return wrapped.cache.setdefault(args, wrapped(*args))\n return func", "def _inference_tip_cached(\n func: InferFn, instance: None, args: typing.Any, kwargs: typing.Any\n) -> Iterator[InferenceResult]:\n node = args[0]\n try:\n result = _cache[func, node]\n # If through recursion we end up trying to infer the same\n # func + node we raise here.\n if result is None:\n raise UseInferenceDefault()\n except KeyError:\n _cache[func, node] = None\n result = _cache[func, node] = list(func(*args, **kwargs))\n assert result\n return iter(result)", "def collect(self):\n self.isCollecting = True\n for func in _caches:\n cache = {}\n for key in _caches[func]:\n if (time.time() - _caches[func][key][1]) < self._timeouts[func]:\n cache[key] = _caches[func][key]\n _caches[func] = cache\n self.isCollecting = False", "def cache(timeout):\n def cached(func, *args, **kwargs):\n \"\"\"\n Cache data wrapper.\n \"\"\"\n lock = threading.Lock()\n key = func.__name__\n\n with lock:\n if key in CACHE:\n age = time() - CACHE[key]['time']\n if age < timeout:\n return CACHE[key]['result']\n\n result = func(*args, **kwargs)\n CACHE[key] = {\n 'result': result,\n 'time': time()\n }\n return result\n return decorator(cached)", "def cache(func, cache, invalid_after):\n\n def cache_wrapper(*args, **kwargs):\n call_id = str(func) + str(args)\n try:\n return_value = cache[call_id]\n if ctime() - return_value[0] > invalid_after:\n raise Exception\n else:\n return return_value[1]\n except:\n return_value = func(*args, **kwargs)\n cache[call_id] = (ctime(), return_value)\n return return_value\n return cache_wrapper", "def memoize(func):\n\n @wraps(func)\n def wrapped():\n try:\n return func.result\n except AttributeError:\n pass\n\n func.result = func()\n return func.result\n\n return wrapped", "def memoize(f):\n\n class memodict(dict):\n def __getitem__(self, *key):\n return dict.__getitem__(self, key)\n\n def __missing__(self, key):\n self[key] = ret = f(*key)\n return ret\n\n return memodict().__getitem__", "def memoize(func):\n table = dict() # function specific memoize table\n def wrappingfunction(*args):\n if args not in table: # args tuple hasn't been seen yet\n table[args] = func(*args) # envoke func call and store value\n return table[args] # return stored value\n return wrappingfunction # return wrappee", "def keep(self, func):\n def cachedFunc(*args, **kwargs):\n return self.retrieve(func, args, kwargs)\n cachedFunc.__name__ = func.__name__\n return cachedFunc", "def cached(function, key_, duration=DEFAULT_TIMEOUT):\r\n key = _function_cache_key(key_)\r\n val = cache.get(key)\r\n if val is None:\r\n log.debug('cache miss for %s' % key)\r\n val = function()\r\n cache.set(key, val, duration)\r\n else:\r\n log.debug('cache hit for %s' % key)\r\n return val", "def memoize_with_args(f):\n\n class memodict():\n def __init__(self, f):\n self.f = f\n self.result = {}\n self.__name__ = f.__name__\n\n def __call__(self, *args):\n args_string = f.__name__\n for arg in args:\n if (isinstance(arg, ndarray)):\n args_string += hashlib.sha1(arg).hexdigest() + \",\"\n else:\n args_string += hashlib.sha1(str(arg)).hexdigest() + \",\"\n try:\n return self.result[args_string]\n except KeyError:\n self.result[args_string] = self.f(*args)\n return self.result[args_string]\n\n return memodict(f)", "def lru_cache(**keywrds):\n def inside(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n return function(*args, **kwargs)\n return wrapper\n return inside", "def memoize(prefix, time=60):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n key = memoize_key(prefix, *args, **kwargs)\n data = cache.get(key)\n if data is not None:\n return data\n data = func(*args, **kwargs)\n cache.set(key, data, time)\n return data\n return wrapper\n return decorator", "def cached(cachefile):\n def decorator(fn): # define a decorator for a function \"fn\"\n def wrapped(*args, **kwargs): # define a wrapper that will finally call \"fn\" with all arguments \n # if cache exists -> load it and return its content\n if os.path.exists(cachefile):\n with open(cachefile, 'rb') as cachehandle:\n print(\"using cached result from '%s'\" % cachefile)\n return pickle.load(cachehandle)\n\n # execute the function with all arguments passed\n res = fn(*args, **kwargs)\n\n # write to cache file\n with open(cachefile, 'wb') as cachehandle:\n print(\"saving result to cache '%s'\" % cachefile)\n pickle.dump(res, cachehandle)\n\n return res\n\n return wrapped\n\n return decorator # return this \"customized\" decorator that uses \"cachefile\"", "def cached(cachefile):\n def decorator(fn): # define a decorator for a function \"fn\"\n def wrapped(*args, **kwargs): # define a wrapper that will finally call \"fn\" with all arguments \n # if cache exists -> load it and return its content\n if os.path.exists(cachefile):\n with open(cachefile, 'rb') as cachehandle:\n print(\"using cached result from '%s'\" % cachefile)\n return pickle.load(cachehandle)\n\n # execute the function with all arguments passed\n res = fn(*args, **kwargs)\n\n # write to cache file\n with open(cachefile, 'wb') as cachehandle:\n print(\"saving result to cache '%s'\" % cachefile)\n pickle.dump(res, cachehandle)\n\n return res\n\n return wrapped\n\n return decorator # return this \"customized\" decorator that uses \"cachefile\"", "def memoize(f):\n memo = {}\n\n def helper(*args, **kwargs):\n x = args, tuple(kwargs.items())\n if x not in memo:\n memo[x] = f(*args, **kwargs)\n return memo[x]\n\n return helper", "def cache_for_fiber(func):\n @wraps(func)\n def inner(obj, *args):\n key = \":\".join([repr(val) for val in\n [func.__name__, obj.fiber.n, obj.fiber.nc, obj.fiber.rho, obj.fiber.V] +\n list(args)\n ])\n if not hasattr(obj, '_cache'):\n obj._cache={}\n if key not in obj._cache:\n obj._cache[key] = func(obj, *args)\n return obj._cache[key]\n return inner", "def test_call_and_shelve():\r\n\r\n for func, Result in zip((MemorizedFunc(f, env['dir']),\r\n NotMemorizedFunc(f),\r\n Memory(cachedir=env['dir']).cache(f),\r\n Memory(cachedir=None).cache(f),\r\n ),\r\n (MemorizedResult, NotMemorizedResult,\r\n MemorizedResult, NotMemorizedResult)):\r\n nose.tools.assert_equal(func(2), 5)\r\n result = func.call_and_shelve(2)\r\n nose.tools.assert_true(isinstance(result, Result))\r\n nose.tools.assert_equal(result.get(), 5)\r\n\r\n result.clear()\r\n nose.tools.assert_raises(KeyError, result.get)\r\n result.clear() # Do nothing if there is no cache.\r" ]
[ "0.7994633", "0.7447933", "0.7371433", "0.73587483", "0.73249936", "0.73011243", "0.7296001", "0.7275171", "0.72417337", "0.7185484", "0.71638167", "0.7161567", "0.71558344", "0.71247685", "0.7054305", "0.7026194", "0.6991613", "0.6981695", "0.6979137", "0.6976191", "0.69679546", "0.6964668", "0.6964465", "0.69074607", "0.6904446", "0.6879855", "0.6837961", "0.6824901", "0.6822193", "0.680647", "0.67902684", "0.6780938", "0.67803425", "0.67756987", "0.6770216", "0.67659783", "0.6714668", "0.66980755", "0.66949624", "0.6694446", "0.6689803", "0.66502726", "0.66502726", "0.66502726", "0.66231686", "0.66013277", "0.65851426", "0.65851426", "0.6568326", "0.65561736", "0.6556072", "0.65536004", "0.65499634", "0.6533185", "0.6530968", "0.65210694", "0.65065473", "0.6472875", "0.64667", "0.6456191", "0.6434316", "0.640345", "0.63778335", "0.6375026", "0.6373908", "0.636363", "0.635455", "0.635435", "0.6344731", "0.6342201", "0.6339995", "0.63260925", "0.63200617", "0.6315057", "0.6308426", "0.6296038", "0.6293499", "0.6284273", "0.6277214", "0.62740463", "0.6273763", "0.6273763", "0.6264923", "0.6263576", "0.6255714", "0.62477833", "0.6220904", "0.62023425", "0.61798596", "0.61650324", "0.6162855", "0.6158116", "0.6150476", "0.6136471", "0.6134334", "0.61331594", "0.6130295", "0.6130295", "0.612203", "0.6105003", "0.6097117" ]
0.0
-1
Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path.
def which(cmd, mode=os.F_OK | os.X_OK, path=None): # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly # rather than referring to PATH directories. This includes checking # relative to the current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if os.curdir not in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path # extensions. This will allow us to short circuit when given # "python.exe". If it does match, only test that one, otherwise we # have to try others. if any(cmd.lower().endswith(ext.lower()) for ext in pathext): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if normdir not in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode) and\n not os.path.isdir(fn))\n\n # If we're given a path with a directory part, look it up directly\n # rather than referring to PATH directories. This includes checking\n # relative to the current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n path = path.split(os.pathsep)\n\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if normdir not in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n return None", "def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n\n def _access_check(fn, mode):\n return os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)\n\n # If we're given a path with a directory part, look it up directly\n # rather than referring to PATH directories. This includes checking\n # relative to the current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n\n path = path.split(os.pathsep)\n\n if sys.platform == \"win32\":\n # The current directory takes precedence on Windows.\n if os.curdir not in path:\n path.insert(0, os.curdir)\n\n # PATHEXT is necessary to check on Windows.\n pathext = os.environ.get(\"PATHEXT\", \"\").split(os.pathsep)\n # See if the given file matches any of the expected path\n # extensions. This will allow us to short circuit when given\n # \"python.exe\". If it does match, only test that one, otherwise we\n # have to try others.\n if any(cmd.lower().endswith(ext.lower()) for ext in pathext):\n files = [cmd]\n else:\n files = [cmd + ext.lower() for ext in pathext]\n else:\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if normdir not in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n\n return None", "def Which(cmd, mode=os.F_OK | os.X_OK, path=None):\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode)\n and not os.path.isdir(fn))\n\n # Short circuit. If we're given a full path which matches the mode\n # and it exists, we're done here.\n if _access_check(cmd, mode):\n return cmd\n\n path = (path or os.environ.get(\"PATH\", os.defpath)).split(os.pathsep)\n\n if sys.platform == \"win32\":\n # The current directory takes precedence on Windows.\n if not os.curdir in path:\n path.insert(0, os.curdir)\n\n # PATHEXT is necessary to check on Windows.\n pathext = os.environ.get(\"PATHEXT\", \"\").split(os.pathsep)\n # See if the given file matches any of the expected path extensions.\n # This will allow us to short circuit when given \"python.exe\".\n matches = [cmd for ext in pathext if cmd.lower().endswith(ext.lower())]\n # If it does match, only test that one, otherwise we have to try\n # others.\n files = [cmd] if matches else [cmd + ext.lower() for ext in pathext]\n else:\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for pathcomp in path:\n pathcomp = os.path.normcase(pathcomp)\n if not pathcomp in seen:\n seen.add(pathcomp)\n for thefile in files:\n name = os.path.join(pathcomp, thefile)\n if _access_check(name, mode):\n return name\n return None", "def _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None):\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode)\n and not os.path.isdir(fn))\n\n # If we're given a path with a directory part, look it up directly rather\n # than referring to PATH directories. This includes checking relative to the\n # current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n path = path.split(os.pathsep)\n\n if sys.platform == \"win32\":\n # The current directory takes precedence on Windows.\n if not os.curdir in path:\n path.insert(0, os.curdir)\n\n # PATHEXT is necessary to check on Windows.\n pathext = os.environ.get(\"PATHEXT\", \"\").split(os.pathsep)\n # See if the given file matches any of the expected path extensions.\n # This will allow us to short circuit when given \"python.exe\".\n # If it does match, only test that one, otherwise we have to try\n # others.\n if any(cmd.lower().endswith(ext.lower()) for ext in pathext):\n files = [cmd]\n else:\n files = [cmd + ext for ext in pathext]\n else:\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if not normdir in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n return None", "def which(cmd):\n for path in os.environ['PATH'].split(os.pathsep):\n path = path.strip('\"')\n cmd_path = os.path.join(path, cmd)\n if os.path.isfile(cmd_path) and os.access(cmd_path, os.X_OK):\n return cmd_path\n\n return None", "def get_command_path(command):\n def excutable(command_path):\n return os.path.isfile(command_path) and os.access(command_path, os.X_OK)\n\n for path in os.environ[\"PATH\"].split(os.pathsep):\n command_path = os.path.join(path, command)\n if excutable(command_path):\n return command_path\n\n return None", "def which(cls, cmd):\n abs_path_cmd = None\n if sys.version_info >= (3, 3):\n abs_path_cmd = shutil.which(cmd)\n else:\n abs_path_cmd = find_executable(cmd)\n return abs_path_cmd", "def Which(binary, path=None):\n if path is None:\n path = os.environ.get('PATH', '')\n for p in path.split(':'):\n p = os.path.join(p, binary)\n if os.access(p, os.X_OK):\n return p\n return None", "def which(cmd, path=None):\n if path is None:\n path = os.environ[\"PATH\"].split(os.pathsep)\n\n for prefix in path:\n filename = os.path.join(prefix, cmd)\n executable = os.access(filename, os.X_OK)\n is_not_directory = os.path.isfile(filename)\n if executable and is_not_directory:\n return True\n\n return False", "def _FindExecutableOnPath(executable, path, pathext):\n\n if isinstance(pathext, six.string_types):\n raise ValueError(\n \"_FindExecutableOnPath(..., pathext='{0}') failed \"\n \"because pathext must be an iterable of strings, but got \"\n \"a string.\".format(pathext)\n )\n\n # Prioritize preferred extension over earlier in path.\n for ext in pathext:\n for directory in path.split(os.pathsep):\n # Windows can have paths quoted.\n directory = directory.strip('\"')\n full = os.path.normpath(os.path.join(directory, executable) + ext)\n # On Windows os.access(full, os.X_OK) is always True.\n if os.path.isfile(full) and os.access(full, os.X_OK):\n return full\n return None", "def find_on_path(command):\n\n if 'PATH' not in os.environ:\n return False\n\n path = os.environ['PATH']\n for element in path.split(os.pathsep):\n if not element:\n continue\n filename = os.path.join(element, command)\n if os.path.isfile(filename) and os.access(filename, os.X_OK):\n return True\n\n return False", "def cmdGetPath(self, cmd, die=True):\n rc, out, err = self.prefab.core.run(\"which %s\" % cmd, die=False, showout=False, profile=True)\n if rc > 0:\n if die:\n raise j.exceptions.RuntimeError(\"Did not find command: %s\" % cmd)\n else:\n return False\n return out.split(\"\\n\")[-1]", "def _find_extractor_by_cmd(extractor_cmd):\n if not extractor_cmd:\n return None\n if Path(extractor_cmd).is_file():\n return extractor_cmd\n return shutil.which(extractor_cmd)", "def which(file, env=os.environ):\n if file is None:\n return None\n for path in env.get('PATH', '').split(os.pathsep):\n if path:\n result = os.path.join(path, file)\n if os.path.exists(result):\n return os.path.realpath(result)\n return None", "def find_executable(cls, name, cmd, dry_run=False):\n if cls.PATH is None:\n cls.PATH = os.environ[\"PATH\"].split(\":\")\n for pdir in cls.PATH:\n pcmd = os.path.join(pdir, cmd)\n if os.path.exists(pcmd):\n return pcmd\n if dry_run:\n return cmd\n raise SystemExit(\"%s '%s' does not exist\" % (name, cmd))", "def FindExecutableOnPath(executable, path=None, pathext=None, allow_extensions=False):\n\n if not allow_extensions and os.path.splitext(executable)[1]:\n raise ValueError(\n \"FindExecutableOnPath({0},...) failed because first \"\n \"argument must not have an extension.\".format(executable)\n )\n\n if os.path.dirname(executable):\n raise ValueError(\n \"FindExecutableOnPath({0},...) failed because first \"\n \"argument must not have a path.\".format(executable)\n )\n\n if path is None:\n effective_path = _GetSystemPath()\n else:\n effective_path = path\n effective_pathext = (\n pathext\n if pathext is not None\n else _PlatformExecutableExtensions(platforms.OperatingSystem.Current())\n )\n\n return _FindExecutableOnPath(executable, effective_path, effective_pathext)", "def which(executable):\n def is_executable(path):\n \"\"\"True if path exists and is executable.\"\"\"\n return (os.path.exists(path) and\n not os.path.isdir(path) and\n os.access(path, os.F_OK | os.X_OK))\n\n def normalize(path):\n \"\"\"Return canonical case-normalized path.\"\"\"\n return os.path.normcase(os.path.realpath(path))\n\n def path_list():\n \"\"\"Get executable path list.\"\"\"\n return (os.environ.get(\"PATH\", None) or os.defpath).split(os.pathsep)\n\n def pathext_list():\n \"\"\"Get list of extensions to automatically search.\"\"\"\n return (os.environ.get(\"PATHEXT\") or \"\").split(os.pathsep)\n\n seen = set()\n\n for path in [normalize(p) for p in path_list()]:\n if path not in seen:\n for ext in [\"\"] + pathext_list():\n full_path = os.path.join(path, executable) + ext\n if is_executable(full_path):\n return full_path\n\n seen.add(path)\n\n return None", "def ensure_file(path, mode):\n assert isinstance(path, Path)\n parent = path.parent()\n assert parent != path, \"Path and parent were the same!\"\n ensure_dir(parent)\n fd = path.open(mode)\n return fd", "def find_binary_in_path(filename: str) -> str:\n if \"PATH\" not in os.environ:\n raise PATHNotFoundError\n for directory in os.environ[\"PATH\"].split(os.pathsep):\n binary = os.path.abspath(os.path.join(directory, filename))\n if os.path.isfile(binary) and os.access(binary, os.X_OK):\n return binary\n raise BinaryNotFoundError", "def which_bin(cmd):\n cmd = [\"which\", cmd]\n try:\n return stderr_output(cmd).strip().split('\\n')[0]\n except CryptoritoError:\n return None", "def _get_path(self, prompt):\n\n # When input from vim, vim escapes some special characters,\n # so we have to expand them first.\n cwd = vim.eval('expand(getcwd())')\n path = vim.eval('expand(input(\"%s\", \"\", \"file\"))' % prompt)\n if path == None or path == \"\":\n return None\n else:\n return os.path.join(cwd, os.path.expanduser(path))", "def find_executable(executable, path=None):\n if path is None:\n path = os.environ['PATH']\n paths = path.split(os.pathsep)\n base, ext = os.path.splitext(executable)\n\n if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'):\n executable = executable + '.exe'\n\n if not os.path.isfile(executable):\n for p in paths:\n f = os.path.join(p, executable)\n if os.path.isfile(f):\n # the file exists, we have a shot at spawn working\n return f\n return None\n else:\n return executable", "def find_executable(executable, path=None):\n import os, os.path, sys\n if path is None:\n path = os.environ['PATH']\n paths = path.split(os.pathsep)\n extlist = ['']\n if os.name == 'os2':\n (base, ext) = os.path.splitext(executable)\n # executable files on OS/2 can have an arbitrary extension, but\n # .exe is automatically appended if no dot is present in the name\n if not ext:\n executable = executable + \".exe\"\n elif sys.platform == 'win32':\n pathext = os.environ['PATHEXT'].lower().split(os.pathsep)\n (base, ext) = os.path.splitext(executable)\n if ext.lower() not in pathext:\n extlist = pathext\n for ext in extlist:\n execname = executable + ext\n if os.path.isfile(execname):\n return execname\n else:\n for p in paths:\n f = os.path.join(p, execname)\n if os.path.isfile(f):\n return f\n else:\n return None", "def which(module, mode, exename):\n return _which(exename)", "def find_in_PATH(filename: str):\n if path.isfile(filename):\n return path.normpath(filename)\n\n os_paths = os.environ['PATH'].split(path.pathsep)\n for os_path in os_paths:\n fullpath_file = path.join(os_path, filename)\n if path.isfile(fullpath_file):\n return path.normpath(fullpath_file)\n raise FileNotFoundError(f'could not find {filename}')", "def _whicha(cmd, paths=None):\n import os\n if paths is None:\n paths = os.environ['PATH'].split(':')\n possibilities = [os.path.expanduser(os.path.join(p, cmd)) for p in paths]\n return filter(lambda bin: os.path.exists(bin), possibilities)", "def with_path(target: pathlib.Path, cmd):\n\n def null_handler(signum, frame):\n pass # pragma: no cover\n\n signal.signal(signal.SIGINT, null_handler)\n return subprocess.Popen(cmd, env=_setup_env(target)).wait()", "def get_fh(filename, mode):\n fh = None\n try:\n if mode == 'r':\n fh = open(filename,'r')\n elif mode == 'w':\n fh = open(filename,'w')\n else:\n raise ValueError('Command should be r or w')\n except IOError as e:\n print(e)\n except ValueError as e:\n print(e)\n return fh", "def which(executable):\n if executable.startswith('/'):\n return executable\n\n path = os.environ['PATH'].split(os.pathsep)\n\n for executable_with_ext in _executable_names(executable):\n for entry in path:\n joined = os.path.join(entry, executable_with_ext)\n if os.path.isfile(joined) and os.access(joined, os.X_OK):\n return joined\n\n return None", "def which(cls, cmd):\n return get_exe_path(cmd + '.exe')", "def where(self, exe, path=None):\n if exe is None:\n return None\n if path is None:\n path = os.environ['PATH']\n paths = path.split(os.pathsep)\n extlist = ['']\n\n def is_executable(path):\n return os.path.isfile(path) and os.access(path, os.X_OK)\n\n if sys.platform == 'win32':\n pathext = os.environ['PATHEXT'].lower().split(os.pathsep)\n (base, ext) = os.path.splitext(exe)\n if ext.lower() not in pathext:\n extlist = pathext\n for ext in extlist:\n exe_name = exe + ext\n for p in paths:\n exe_path = os.path.join(p, exe_name)\n if is_executable(exe_path):\n return exe_path\n\n return None", "def _which(executable):\n\n def is_exe(fpath):\n \"\"\"Returns True if the path is an executable\"\"\"\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, _ = os.path.split(executable)\n if fpath:\n if is_exe(executable):\n return executable\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, executable)\n if is_exe(exe_file):\n return exe_file\n\n return None", "def executable(command):\n\n\t\tif os.path.isabs(command):\n\t\t\tif os.access(command, os.X_OK):\n\t\t\t\treturn command\n\t\tfor path in os.environ.get(\"PATH\", []).split(os.pathsep):\n\t\t\tif os.access(os.path.join(path, command), os.X_OK):\n\t\t\t\treturn os.path.join(path, command)\n\t\treturn False", "def which(self, executeable):\n for path in self.path:\n executeable_path = os.path.join(path, executeable)\n if os.path.exists(executeable_path):\n if os.access(executeable_path, os.X_OK):\n return os.path.join(path, executeable)\n\n return executeable", "def _Which(program, paths):\n if sys.platform == 'win32' and not program.lower().endswith('.exe'):\n program += '.exe'\n\n for path in paths:\n candidate = os.path.join(os.path.normpath(path), program)\n if os.path.isfile(candidate):\n return candidate\n\n return None", "def path_type(mode: str, docstring: Optional[str] = None, **kwargs) -> type:\n Path._check_mode(mode)\n name = \"Path_\" + mode\n key_name = \"path \" + \"\".join(sorted(mode))\n\n skip_check = get_private_kwargs(kwargs, skip_check=False)\n if skip_check:\n from ._deprecated import path_skip_check_deprecation\n\n path_skip_check_deprecation()\n name += \"_skip_check\"\n key_name += \" skip_check\"\n\n register_key = (key_name, str)\n if register_key in registered_types:\n return registered_types[register_key]\n\n class PathType(Path):\n _expression = name\n _mode = mode\n _skip_check = skip_check\n _type = str\n\n def __init__(self, v, **k):\n super().__init__(v, mode=self._mode, skip_check=self._skip_check, **k)\n\n restricted_type = type(name, (PathType,), {\"__doc__\": docstring})\n add_type(restricted_type, register_key, type_check=_is_path_type)\n\n return restricted_type", "def which(program):\n\n def is_bin(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_bin(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n bin_file = os.path.join(path, program)\n if is_bin(bin_file):\n return bin_file\n\n return None", "def binary_location(cmd, USE_PATH=False):\n if USE_PATH:\n return cmd\n else:\n return os.path.join(BIN_PREFIX, cmd)", "def chkpath(path):\n if os.path.exists(path):\n return os.path.abspath(path)\n else:\n msg = \"{0} does not exist.\".format(path)\n raise argparse.ArgumentTypeError(msg)", "def to_filehandle(fname, flag='r', return_opened=False, encoding=None):\n if is_string_like(fname):\n fh = Path(fname).open(mode=flag)\n opened = True\n elif isinstance(fname, Path):\n fh = fname.open(mode=flag)\n elif hasattr(fname, 'seek'):\n fh = fname\n opened = False\n else:\n raise ValueError('fname must be a pathlib Path, string or file handle')\n if return_opened:\n return fh, opened\n return fh", "def real_which(program):\n which_file = rsvprobe.which(program)\n if which_file:\n return os.path.realpath(which_file)\n else:\n return None", "def get_permission(path):\n return oct(stat.S_IMODE(os.stat(path).st_mode))", "def which(program, program_name):\n fpath, fname = os.path.split(program)\n if fpath:\n if __is_exe__(program):\n return program\n elif (__is_script__(program)):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if __is_exe__(exe_file):\n return exe_file\n logger.error(program_name + \" path = \" + fpath +\n \" not locatable in the path of directory specified\")\n return None", "def find_binary(binary: str, paths=None, fallback=None) -> str:\n\n if os.path.isabs(binary):\n if not (os.path.isfile(binary) and access(binary, os.X_OK)):\n raise CommandNotFound(binary)\n return binary\n\n if paths is None:\n paths = os.environ.get(\"PATH\", \"\").split(\":\")\n\n for path in paths:\n filename = os.path.join(os.path.abspath(path), binary)\n if access(filename, os.X_OK) and os.path.isfile(filename):\n return filename\n\n if fallback is not None:\n return fallback\n\n raise CommandNotFound(binary)", "def which():\n\n location = None\n if os.path.basename(_git_path) != _git_path:\n if os.path.isfile(_git_path):\n location = _git_path\n else:\n paths = [x for x in os.environ[\"PATH\"].split(os.pathsep) if not x.isspace()]\n for path in paths:\n exe = os.path.join(path, _git_path)\n if os.path.isfile(exe):\n location = exe\n break\n return location", "def getpaths(options, mode):\n\n input_prefix = options.input_prefix\n # Assign year\n if mode == \"year\":\n ymd_vals = [getattr(options, attr)\n for attr in [\"year\", \"month\", \"day\"]]\n year, month, day = map(lambda x: int(x) if x else None, ymd_vals)\n\n return getpaths_fromymd(input_prefix, year, month, day)\n\n elif mode == \"directory\":\n return getpaths_fromdir(input_prefix, options.directory)\n\n elif mode == \"file\":\n with open(options.file) as fh:\n return getpaths_fromfile(input_prefix, fh)\n\n elif mode == \"window\":\n return getpaths_fromwindow(input_prefix,\n int(options.window),\n datetime.datetime.now().date())\n\n elif mode == \"lookback\":\n return getpaths_fromlookback(input_prefix, int(options.lookback))\n\n else:\n raise RuntimeError(\"Incorrect input path generation mode\")", "def chmod(path, mode):\n try:\n st = os.stat(path)\n except OSError:\n return None, None\n\n origMode = fMode = stat.S_IMODE(st.st_mode)\n if isinstance(mode, str):\n parts = [s.strip() for s in mode.split(\",\")]\n for s in parts:\n m = _rModePart.match(s)\n if not m:\n return origMode, -2\n\n role, op, flags = m.groups()\n\n bits = 0\n for f in flags:\n bits |= _bitMap[role+f]\n\n if op == \"+\":\n fMode |= bits\n elif op == \"-\":\n fMode &= ~bits\n else:\n fMode = (fMode & _bitMap[role]) | bits\n else:\n fMode = mode\n\n try:\n os.chmod(path, fMode)\n except OSError:\n return origMode, -1\n\n return origMode, 0", "def which(fname):\n if \"PATH\" not in os.environ or not os.environ[\"PATH\"]:\n path = os.defpath\n else:\n path = os.environ[\"PATH\"]\n\n for p in [fname] + [os.path.join(x, fname) for x in path.split(os.pathsep)]:\n p = os.path.abspath(p)\n if os.access(p, os.X_OK) and not os.path.isdir(p):\n return p\n\n p = sp.Popen(\"locate %s\" % fname, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)\n (stdout, stderr) = p.communicate()\n if not stderr:\n for p in stdout.decode().split(\"\\n\"):\n if (\n (os.path.basename(p) == fname)\n and (os.access(p, os.X_OK))\n and (not os.path.isdir(p))\n ):\n return p", "def PathType(path_str):\n orig = path_str\n path_str = os.path.expanduser(path_str) # Expand user path if necessary\n path_str = os.path.abspath(path_str)\n\n if os.path.exists(path_str):\n return path_str\n else:\n raise argparse.ArgumentError(message='\"{}\" is not a valid path'.format(orig))", "def which(program):\r\n def is_exe(fpath):\r\n return os.path.exists(fpath) and os.access(fpath, os.X_OK)\r\n\r\n fpath, fname = os.path.split(program)\r\n if fpath:\r\n if is_exe(program):\r\n return program\r\n else:\r\n for path in os.environ[\"PATH\"].split(os.pathsep):\r\n exe_file = os.path.join(path, program)\r\n if is_exe(exe_file):\r\n return exe_file\r\n\r\n return None", "def _cmd_path_lex(line):\n import os\n args = line.split(' ', 1)\n cmd = args.pop(0)\n if len(args) == 0:\n paths = os.environ[\"PATH\"].split(\":\")\n else:\n pathstr = args[0].strip()\n try:\n paths = eval(pathstr)\n except:\n paths = pathstr\n if isinstance(paths, unicode) or isinstance(paths, str):\n if \":\" in paths:\n paths = paths.split(\":\")\n else:\n paths = [paths]\n return cmd, paths", "def path_which(args):\n print(header(\"$PATH Lookup: {}\".format(args.look)))\n loop_fmt = \"{color}{path}\"\n\n cnt = 0\n for part in os.environ[\"PATH\"].split(\":\"):\n color = u\"\"\n if args.color:\n color = CODES[cnt]\n\n msg = check_exec(part, args.look, args.version)\n if msg:\n print(header(loop_fmt.format(color=color, path=part), '-'))\n print(msg)\n cnt = (cnt + 1) % len(CODES)", "def which(program):\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None", "def which(program):\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None", "def which(program):\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n return None", "def _find_model(model_chkp_dir, mode='last'):\n\n if mode == 'last':\n file_name = sorted(os.listdir(model_chkp_dir))[-1]\n model_path = os.path.join(model_chkp_dir, file_name)\n\n elif mode == 'best':\n raise NotImplementedError\n\n return model_path", "def get_filetype(filemode):\n mode_funcs = [\n (stat.S_ISREG, \"-\"),\n (stat.S_ISBLK, \"b\"),\n (stat.S_ISCHR, \"c\"),\n (stat.S_ISDIR, \"d\"),\n (stat.S_ISFIFO, \"p\"),\n (stat.S_ISLNK, \"l\"),\n (stat.S_ISSOCK, \"s\"),\n ]\n for pred, val in mode_funcs:\n if pred(filemode):\n return val\n return \"?\"", "def get_path(path, name, ext):\n path_free = os.path.join(path, name + \"_free\" + ext)\n path = os.path.join(path, name + ext)\n if os.path.exists(path_free):\n return path_free\n elif os.path.exists(path):\n return path\n else:\n raise IOError(\"'{f:s}' not found in {p:s}\".format(f=name+ext, p=path))", "def get_command_to_set_search_path():\n \n # Check if already computed\n if _COMMAND_TO_SEARCH_PATH:\n return _COMMAND_TO_SEARCH_PATH[0]\n \n # Get name of the utility\n # In Pyzo it should be present in 'shared'.\n utilCommand = None\n if sys.platform.startswith('win'):\n return \n if sys.platform.startswith('linux'):\n utilname = 'patchelf'\n if sys.platform.startswith('darwin'):\n utilname = 'install_name_tool'\n if True:\n # Try old Pyzo\n utilCommand = os.path.join(sys.prefix, 'shared', utilname)\n if not os.path.isfile(utilCommand):\n utilCommand = utilname\n # Try new Pyzo / anaconda\n utilCommand = os.path.join(sys.prefix, 'bin', utilname)\n if not os.path.isfile(utilCommand):\n utilCommand = utilname\n # Test whether it exists\n try:\n subprocess.check_output(['which', utilCommand])\n except Exception:\n raise RuntimeError('Could not get command (%s) to set search path.' % utilCommand)\n \n # Store and return\n _COMMAND_TO_SEARCH_PATH.append(utilCommand)\n return utilCommand", "def which(executable):\n if 'PATH' in os.environ:\n envpath = os.environ['PATH']\n else:\n envpath = os.defpath\n PATH = envpath.split(os.pathsep)\n\n locations = PATH + [\n '/usr/local/bin',\n '/bin',\n '/usr/bin',\n '/usr/local/sbin',\n '/usr/sbin',\n '/sbin',\n ]\n\n for location in locations:\n executable_path = os.path.join(location, executable)\n if os.path.exists(executable_path):\n return executable_path", "def which(program):\n\t# requirements = os\n\tis_exe = lambda fpath: (os.path.isfile(fpath) and os.access(fpath, os.X_OK))\n\tfor path in os.environ['PATH'].split(os.pathsep):\n\t\tpath = path.strip('\"')\n\t\texe_file = os.path.join(path, program)\n\t\tif is_exe(exe_file):\n\t\t\treturn exe_file\n\tif is_exe(program):\n\t\treturn os.path.abspath(program)\n\treturn None", "def parse_entity_from_bidspath(path,entity,mode='r2l'):\n entity = entity if '-' in entity else entity + '-'\n # Easier to find it from the tail of the bidspath\n if mode == 'r2l':\n entity_position = path.rfind(entity)\n elif mode == 'l2r':\n entity_position = path.find(entity)\n else:\n raise ValueError('Incorrect usage of the mode argument.')\n\n if entity_position == -1:\n return None\n\n little_path = path[entity_position:]\n\n value = re.search('%s(.*?)%s' % ('-', '_'), little_path,).group(1)\n\n return value", "def which(program):\r\n import os\r\n def is_exe(fpath):\r\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\r\n\r\n fpath, fname = os.path.split(program)\r\n if fpath:\r\n if is_exe(program):\r\n return program\r\n else:\r\n for path in os.environ[\"PATH\"].split(os.pathsep):\r\n path = path.strip('\"')\r\n exe_file = os.path.join(path, program)\r\n if is_exe(exe_file):\r\n return exe_file\r\n\r\n return None", "def shutil_which(pgm):\n path = os.getenv('PATH')\n for p in path.split(os.path.pathsep):\n p = os.path.join(p, pgm)\n if os.path.exists(p) and os.access(p, os.X_OK):\n return p", "def shutil_which(pgm):\n path = os.getenv('PATH')\n for p in path.split(os.path.pathsep):\n p = os.path.join(p, pgm)\n if os.path.exists(p) and os.access(p, os.X_OK):\n return p", "def which(program):\n\n def is_exe(fpath):\n \"\"\"\n Return True is the fpath exists and is executable. This is needed since\n executables are specifed in the JSON files, but not the path to them.\n The executables may be in different locations based on which PC is\n running this.\n \"\"\"\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None", "def _get_config_path(config_arg: Optional[str]) -> Path:\n if config_arg:\n config_file = Path(config_arg)\n elif os.environ.get(ENV_VAR_FOR_CONFIG_FILE_PATH):\n config_file = Path(os.environ[ENV_VAR_FOR_CONFIG_FILE_PATH])\n else:\n config_file = None\n\n if not config_file or not config_file.is_file():\n logging.fatal(f\"Config file not found: {config_file}\")\n sys.exit(1)\n return config_file", "def which(name):\n # Inspired by https://twistedmatrix.com/trac/browser/tags/releases/\n # twisted-8.2.0/twisted/python/procutils.py\n # pylint: disable=W0141\n result = []\n path = os.environ.get('PATH', None)\n if path is None:\n return []\n for pdir in os.environ.get('PATH', '').split(os.pathsep):\n fname = os.path.join(pdir, name)\n if os.path.isfile(fname) and os.access(fname, os.X_OK):\n result.append(fname)\n return result[0] if result else None", "def get_path_to_executable(executable):\n path = shutil.which(executable)\n if path is None:\n raise ValueError(\n \"'{}' executable not found in PATH.\".format(executable))\n return path", "def find_executable(binary):\n\n\tfor syspath in os.environ.get('PATH', default_path).split(':'):\n\t\tif os.path.exists(os.path.join(syspath, binary)):\n\t\t\treturn os.path.join(syspath, binary)\n\n\treturn None", "def which(program):\n import os\n def is_exe(fpath):\n return os.path.exists(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None", "def which(program):\n\n def is_exe(fpath):\n found = os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n if not found and sys.platform == \"win32\":\n fpath = fpath + \".exe\"\n found = os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n return found\n\n fpath, _ = os.path.split(program)\n if fpath:\n if is_exe(program):\n logger.debug(\"Found executable: \" + str(program))\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = os.path.expandvars(os.path.expanduser(path)).strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n logger.debug(\"Found executable: \" + str(exe_file))\n return exe_file\n\n return None", "def validated_path(basepath, env = None, *path):\n if basepath is not None:\n result = os.path.realpath(os.path.join(os.path.expanduser(basepath), *path))\n\n if env is not None and not os.path.isdir(result):\n env.warn(result + ' not found.')\n\n return result\n else:\n raise ValueError", "def frompath(executable):\n # Based on distutils.spawn.find_executable.\n path = _os.environ.get('PATH', '')\n paths = [\n _os.path.expanduser(item)\n for item in path.split(_os.pathsep)\n ]\n ext = _os.path.splitext(executable)[1]\n exts = ['']\n if _sys.platform == 'win32' or _os.name == 'os2':\n eext = ['.exe', '.bat', '.py']\n if ext not in eext:\n exts.extend(eext)\n\n for ext in exts:\n if not _os.path.isfile(executable + ext):\n for path in paths:\n fname = _os.path.join(path, executable + ext)\n if _os.path.isfile(fname):\n # the file exists, we have a shot at spawn working\n return fname\n else:\n return executable + ext\n\n return None", "def whicha(line):\n return _whicha(*_cmd_path_lex(line))", "def find_path(name, path=None, exact=False):\n if path is None:\n path = os.environ.get('PATH', os.defpath)\n if isinstance(path, str):\n dpaths = path.split(os.pathsep)\n else:\n dpaths = path\n candidates = (join(dpath, name) for dpath in dpaths)\n if exact:\n if WIN32: # nocover\n # on WIN32 allow ``name`` to omit the extension suffix by trying\n # to match with all possible \"valid\" suffixes specified by PATHEXT\n pathext = [''] + os.environ.get('PATHEXT', '').split(os.pathsep)\n candidates = (p + ext for p in candidates for ext in pathext)\n candidates = filter(exists, candidates)\n else:\n import glob\n candidates = it.chain.from_iterable(\n glob.glob(pattern) for pattern in candidates)\n\n for candidate in candidates:\n yield candidate", "def installer_path_argument(command: Callable[..., None],\n ) -> Callable[..., None]:\n function = click.argument(\n 'installer',\n type=click_pathlib.Path(\n exists=True,\n dir_okay=False,\n file_okay=True,\n resolve_path=True,\n ),\n )(command) # type: Callable[..., None]\n return function", "def binary_location(cmd, USE_PATH=False):\n return os.path.join(BIN_PREFIX, cmd)", "def path(src, name='default'):\n try:\n return get_output(['hg', 'path', name], cwd=src).strip()\n except subprocess.CalledProcessError:\n return None", "def which(exe):\n\n def wrapper(function):\n @functools.wraps(function)\n def wrapped(*args, **kwargs):\n if salt.utils.path.which(exe) is None:\n raise CommandNotFoundError(\n \"The '{}' binary was not found in $PATH.\".format(exe)\n )\n return function(*args, **kwargs)\n\n return wrapped\n\n return wrapper", "def _open(path, mode, encoding=None):\n # type: (str, str, Optional[str]) -> typing.IO[Any]\n if mode not in {'r', 'rb', 'rt'}:\n raise ValueError('r')\n _, ext = os.path.splitext(path)\n ext = ext.lower()\n if ext == \".gz\":\n return gzip.open(path, mode, encoding=encoding)\n elif ext == \".bz2\":\n return bz2.open(path, mode, encoding=encoding)\n elif ext == \".xz\":\n return lzma.open(path, mode, encoding=encoding)\n elif ext == \".zip\":\n arh = zipfile.ZipFile(path, 'r')\n filelist = arh.infolist()\n if len(filelist) == 1:\n filename = filelist[0]\n zinfo = arh.getinfo(filename)\n f = arh.open(zinfo.filename, 'r')\n if 't' in mode:\n f = io.TextIOWrapper(f, encoding=encoding)\n return f\n else:\n raise ValueError(\"Expected a single file in the archive.\")\n else:\n return open(path, mode, encoding=encoding)", "def get_path(self, p_path):\n path_a = False\n path_a = path.abspath(p_path) # .abs\n path_p = {pattr: False for pattr in ['isDir', 'isFile', 'isLink', 'isMount',\n 'parent', 'item']}\n path_e = True if path.exists(path_a) else False # .exists\n if path_e:\n path_p['isDir'] = True if path.isdir(p_path) else path_p['isDir']\n path_p['isFile'] = True if path.isfile(p_path) else path_p['isFile']\n path_p['isLink'] = True if path.islink(p_path) else path_p['isLink']\n path_p['isMount'] = True if path.ismount(p_path) else path_p['isMount']\n path_a = path.normpath(path.normcase(path.realpath(path_a))) # .abs\n v_parts = path.split(path_a)\n path_p['parent'] = v_parts[0]\n path_p['item'] = v_parts[1]\n\n fpath = namedtuple('fpath', 'rqst exists abs isDir isFile isLink isMount parent item')\n return fpath(p_path, path_e, path_a, path_p['isDir'], path_p['isFile'], path_p['isLink'],\n path_p['isMount'], path_p['parent'], path_p['item'])", "def SearchPath(name, path=None):\n path = path or os.environ['PATH']\n for dir in path.split(os.pathsep):\n binpath = os.path.join(dir, name)\n if os.path.exists(binpath):\n return os.path.abspath(binpath)\n return None", "def SearchPath(name):\n search_path = os.getenv('PATH', os.defpath).split(os.pathsep)\n for directory in search_path:\n if directory:\n path = os.path.join(directory, name)\n if os.path.isfile(path) and os.access(path, os.X_OK):\n return path\n return None", "def scan_path(executable=\"mongod\"):\n for path in os.environ.get(\"PATH\", \"\").split(\":\"):\n path = os.path.abspath(path)\n executable_path = os.path.join(path, executable)\n if os.path.exists(executable_path):\n return executable_path", "def which(program):\n def is_exe(fpath):\n \"\"\"Determine wether file at given path is executable.\"\"\"\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, _ = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None", "def findInPath (exe):\n\n\tfor dirname in os.environ['PATH'].split (os.path.pathsep):\n\t\tpossible = os.path.join (dirname, exe)\n\t\tif os.path.isfile (possible):\n\t\t\treturn possible\n\n\t# Not found\n\traise NotFoundInPathException (exe)", "def which(program):\n\n\tfpath, fname = os.path.split(program)\n\tif fpath:\n\t\tif is_exe(program):\n\t\t\treturn program\n\telse:\n\t\tfor path in os.environ[\"PATH\"].split(os.pathsep):\n\t\t\tpath = path.strip('\"')\n\t\t\texe_file = os.path.join(path, program)\n\t\t\tif is_exe(exe_file):\n\t\t\t\treturn exe_file\n\n\treturn None", "def stat_mode_to_index_mode(mode: int) -> int:\n if S_ISLNK(mode): # symlinks\n return S_IFLNK\n if S_ISDIR(mode) or S_IFMT(mode) == S_IFGITLINK: # submodules\n return S_IFGITLINK\n return S_IFREG | (mode & S_IXUSR and 0o755 or 0o644) # blobs with or without executable bit", "def open_and_force_mkdir(path, mode):\n force_mkdir(os.path.dirname(path))\n return open(path, mode)", "def get_output_mode(output, mode):\n if mode != 'auto':\n try:\n return switch_output_mode_auto[mode]\n except KeyError:\n raise ValueError('Mode \"{}\" is not supported.')\n\n extension = output.split('.')[-1]\n try:\n return switch_output_mode[extension]\n except KeyError:\n return intermediary_to_schema", "def _which(program):\n # Borrowed from:\n # https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python\n # XXX May need more porting to handle .exe extensions on Windows\n\n fpath, _fname = os.path.split(program)\n if fpath:\n if _is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if _is_exe(exe_file):\n return exe_file\n\n return None", "def FindBinary( binary, user_options ):\n\n def _FindPath():\n key = '{0}_binary_path'.format( binary )\n if user_options.get( key ):\n return user_options[ key ]\n return GO_BINARIES.get( binary )\n\n binary_path = _FindPath()\n if os.path.isfile( binary_path ):\n return binary_path\n return None", "def qualify_full_filepath(filename, path=None):\n filepath = os.path.join(path or \"\", filename)\n if not os.path.isfile(filepath):\n raise OSError(f\"No available file found at: {filename}.\")\n return filepath", "def open_from_opt(opt, mode='r'):\n if opt is None or opt == '-':\n return sys.stdout if 'w' in mode else sys.stdin\n return open(opt, mode)", "def _prog(shell_cmd):\n cmd = _which(shell_cmd)\n return os.path.basename(cmd) if cmd else None", "def find_program_file():\n value = sys.argv[0]\n msg = \"Failed to determine absolute pathname of program!\"\n if not os.path.isabs(value):\n candidates = which(value)\n if not candidates:\n raise Exception(msg)\n value = candidates[0]\n if not os.access(value, os.X_OK):\n raise Exception(msg)\n return value", "def get_mode(self, port):\n port = int(port)\n self._validate_port(\"get_mode\", port)\n flags = self._regex_shell_fn(\n self._command_dict[\"GET_MODE\"].format(port),\n self._regex_dict[\"GET_MODE_REGEX\"],\n tries=5)\n\n if \"O\" in flags:\n mode = OFF\n elif \"S\" in flags:\n mode = SYNC\n else:\n mode = CHARGE\n return mode", "def svn_fs_check_path(*args):\r\n return _fs.svn_fs_check_path(*args)", "def _get_open_func(cls, filename: str, mode: str) -> Tuple[OpenFunc, str]:\n if filename.endswith('.gz'):\n return gzip.open, cls._map_disk_type('{}t', '{}').format(mode)\n else:\n return open, cls._map_disk_type('{}', '{}b').format(mode)" ]
[ "0.77106726", "0.7453694", "0.727641", "0.6943331", "0.6540709", "0.639578", "0.60801244", "0.59047854", "0.5880955", "0.5878199", "0.57527226", "0.5732597", "0.5709009", "0.5672643", "0.5576627", "0.5575957", "0.55535084", "0.5497285", "0.5452454", "0.5445605", "0.5425212", "0.5414241", "0.5405033", "0.53908074", "0.537771", "0.53698206", "0.53634244", "0.53582907", "0.53363436", "0.53197527", "0.53192466", "0.5273144", "0.5269621", "0.52603143", "0.5252459", "0.5249297", "0.5208998", "0.5207473", "0.5203305", "0.5192619", "0.5176062", "0.51691127", "0.51550126", "0.5153186", "0.51469207", "0.51456505", "0.51349354", "0.51329345", "0.51229084", "0.51195014", "0.5117286", "0.51056826", "0.50984216", "0.50984216", "0.509319", "0.5064819", "0.5050863", "0.5039684", "0.5036522", "0.50299567", "0.5028975", "0.50231", "0.49973202", "0.49927208", "0.49927208", "0.49887022", "0.49852479", "0.49839827", "0.4982426", "0.4981117", "0.49631062", "0.49588534", "0.4942758", "0.49322993", "0.49268126", "0.49263886", "0.49240577", "0.49235287", "0.4920083", "0.49098423", "0.4898495", "0.4897432", "0.4891135", "0.48826635", "0.48822355", "0.48747537", "0.48642045", "0.48541015", "0.48487034", "0.4839627", "0.48240915", "0.48190433", "0.48146042", "0.4814054", "0.4797778", "0.4795018", "0.4764981", "0.47423097", "0.47047678", "0.4690186" ]
0.7460819
1
Parse .data files on the client and server treating files as JSON
def parse_datafile(file): data = [] with open(file) as fh: for line in fh: line = line.rstrip("\n") # Turn [] strings into {} to be treated properly as JSON hashes if line.startswith('[') and line.endswith(']'): line = '{' + line[1:-1] + '}' if line.startswith("{"): data.append(json.loads(line)) else: data.append(line) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ingest_json_file(request):\n path = save_file(request) \n try:\n with open(path, encoding='utf-8') as f:\n data = json.loads(f.read())\n except Exception as e:\n log.error(log.exc(e))\n return None\n return data", "def _get_data_file(self, data_path):\n\n return json.load(open(data_path))", "def parse_files(files):\r\n return json.dumps(files.split())", "def write_file (data):\n\n req_text= data.text\n json_parsed=json.loads(req_text)\n return json_parsed", "def write_file (data):\n\n req_text= data.text\n json_parsed=json.loads(req_text)\n return json_parsed", "def open_data(filepath):\n\tdata = []\n\twith open(filepath) as f:\n\t data = f.readlines()\n\t data = list(map(json.loads, data)) \n\treturn data", "def load_json_data(filepath):\n with open(filepath,'r') as f:\n return json.load(f)", "def _load_json_data(filename):\n\n relative_path = join(\"data\", filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n with open(absolute_path) as data_file:\n return json.loads(data_file.read())", "def data_json(file_data):\n with open(file_data, 'r') as f:\n datastore = json.load(f)\n return datastore", "def data_from_file(filename):\n with open(data_full_filename(filename)) as f:\n return json.loads(f.read())", "def _runParser(self):\n with open(self.var('filePath')) as f:\n return json.load(f)", "def read_json_data(data_path: str):\n f = open(data_path, \"r\")\n return json.load(f)", "def get_data():\n string = open('Tinder/static/data/data.json').read()\n return flask.jsonify(json.loads(string))", "def parse_json_data(settings, dataset):\n for directory in dataset: # for directory in list of directories\n directory[\"data\"] = []\n for record in directory[\"rawdata\"]: # each record is the raw JSON data of a file in a directory\n jsonrootpath = get_json_root_path(record)\n globaloptions = get_json_global_options(record)\n #for item in record[\"client_stats\"]:\n # if \"job options\" in item.keys():\n # print(item[\"job options\"][\"iodepth\"])\n process_json_record(settings, directory, record, jsonrootpath, globaloptions)\n #print(\"================================\")\n #print(directory[\"data\"])\n #for directory in dataset:\n # for item in directory[\"data\"]:\n # print(item[\"iodepth\"])\n directory[\"data\"] = sort_list_of_dictionaries(directory[\"data\"])\n return dataset", "def input_data(self):\n return read_json(self.file_path)", "def load_data_file(path):\n with open(path, encoding='utf-8') as f:\n return json.load(f)", "def parse_json_from_path(self, infile_path):\r\n with open(infile_path, 'r') as infile:\r\n return self.parse_from_json(infile)", "def read_data(self):\n if self._file.is_file():\n try:\n self._data = read_json_file(self._file)\n except (OSError, json.JSONDecodeError):\n _LOGGER.warning(\"Can't read %s\", self._file)\n self._data = {}\n\n # Validate\n try:\n self._data = self._schema(self._data)\n except vol.Invalid as ex:\n _LOGGER.error(\"Can't parse %s: %s\",\n self._file, humanize_error(self._data, ex))\n\n # Reset data to default\n _LOGGER.warning(\"Reset %s to default\", self._file)\n self._data = self._schema({})", "def discovery_data(request):\n file = request.param\n p = Path(file)\n if not p.is_absolute():\n p = Path(__file__).parent / \"fixtures\" / file\n\n with open(p) as f:\n return json.load(f)", "def load_jsondata_from_file(path, ftype=None):\n print(\"loading %s\" % path)\n t0 = time.time()\n data = []\n with open(path, 'r') as f:\n if ftype == None:\n for line in f:\n item = json.loads(line)\n data.append(item)\n elif ftype == 'user':\n for line in f:\n item = json.loads(line)\n data.append({'user_id': item['user_id'], 'friends': item['friends']})\n elif ftype == 'business':\n for line in f:\n item = json.loads(line)\n data.append({'business_id': item['business_id'], 'categories': item['categories'], 'city': item['city']})\n elif ftype == 'review':\n for line in f:\n item = json.loads(line)\n data.append({'user_id': item['user_id'], 'business_id': item['business_id'], 'stars': item['stars']})\n print(\"loading %s done, time cost %.2f\" % (path, time.time()-t0))\n return data", "def load_data_file(self):\n with open(self.files['data'], 'r') as infile:\n data = json.load(infile)\n self.boundary_nodes = data['boundary_nodes']\n self.nodes = {int(k): v for k, v in data['nodes'].items()}\n self.levels = data['levels']\n infile.close()", "def main_data():\n with open(MAIN_JSON, 'r') as f:\n main_data = json.load(f)\n return main_data", "def read(self):\n retdata = []\n try:\n with open(self.filename) as json_file:\n data = json.load(json_file)\n retdata.append(data[\"server1\"])\n retdata.append(data[\"webhook1\"])\n retdata.append(data[\"server2\"])\n retdata.append(data[\"webhook2\"])\n except FileNotFoundError:\n retdata.append(\"No json file found!\")\n return retdata", "def parse_json(file_path):\n j_data = open(file_path, 'r')\n s_data = \"\"\n for line in j_data:\n s_data += line\n j_data.close()\n s_data.strip()\n p_data = json.loads(s_data)\n return p_data", "def load_data(fname):\n # load the json in gzip format\n with gzip.open(fname, 'r') as fin:\n data = json.loads(fin.read().decode('utf-8'))\n return data", "def parse_cluedata(input_path: str):\n\n\twith open(input_path, 'r') as clue_backup:\n\t\tjson_structure = json.loads(clue_backup.read())\n\n\tassert 'data' in json_structure, \".cluedata file does not have the expected structure\"\n\n\treturn json_structure", "def get_data():\n try:\n with open('./data.json') as data_file:\n return json.load(data_file)\n except:\n data_json = json.loads('{\"message\": \"Error with file data.json\", \"success\": false}')\n return data_json", "def read_server_file():\n try:\n with open(\"servers.json\", \"r\") as server_file:\n try:\n data = json.load(server_file)\n except:\n data = []\n return data\n except IOError:\n return []", "def json_data(self):\n return_data = []\n\n # Loop over num files\n for ifile in range(len(self.data)):\n\n # Loop over steps\n file_return_data = {}\n for istep in range(len(self.data[ifile])):\n file_return_data[istep] = {}\n for key in self.data[ifile][istep]:\n file_return_data[istep][key] = self.data[ifile][istep][key].tolist()\n\n return_data.append(file_return_data)\n\n return return_data", "def __init__(self, filename):\n #Opening the file and storing its contents in a list\n with open(filename) as fp:\n self.data = json.load(fp)", "def _load(self):\n if self.file_path.exists():\n with open(self.file_path) as fid:\n self.data = json.load(fid)", "def parse_json_files(self, filter_fn=None):\n def filter_function(f):\n return f is not None and f.endswith(\".json\")\n if not filter_fn:\n filter_fn = filter_function\n files = self.filter_files(None,filter_fn)\n dicts = []\n for f in files:\n with open(f) as fh:\n dicts.append(json.load(fh))\n return dicts", "def parse_json_format(file_path=None):\n is_file_res = check_is_file(file_path)\n if is_file_res['result']:\n with open(file_path) as f:\n if f.readline().strip().startswith('['):\n return generate_response(result='jsonl')\n return generate_response(result='json')\n else:\n return is_file_res", "def read_file(self):\n data = []\n with open(self.file_path, \"r\") as r_f:\n data = json.loads(r_f.read())\n\n return data", "def save(self,json_data):\n data_node_dir = DATA_NODE_DIR % (self.server_id,)\n with open(json_data['file']['path'], 'r') as f_in:\n for item in json_data['server_chunks'][self.server_id]:\n chunk = item['chunk']\n count = item['count']\n offset = item['offset']\n f_in.seek(offset, 0)\n content = f_in.read(count)\n with open(data_node_dir + os.path.sep + chunk, 'w') as f_out:\n f_out.write(content)\n f_out.flush()\n return {'command':'save','finish':1}", "def get_json_data(file_path: str) -> list:\n with open(file_path, 'r') as file:\n return json.load(file)", "def _read_json_file(self):\n with open(self.subcfgfilename) as json_file:\n json_string = json_file.read()\n json_data = json.loads(json_string)\n return(json_data)", "def parse_data(fp):\n pass", "def read_json(self):\n list_filepath_bsn_test = []\n\n try:\n for json_file in tqdm(self.file_path_list, desc='Loading in json files'):\n with open(json_file) as f:\n data = json.load(f)\n\n # Get out: filepath, _bsn, text, offsets and main text\n list_filepath_bsn_test.append([data['filepath'], data['fields']['_belanghebbende_bsn'], data['text'],\n data['offsets']['main_text']])\n except:\n print(\"Faulty json file: ,\", json_file)\n\n # Make it into a data_frame\n self.data = pd.DataFrame(list_filepath_bsn_test)\n headers = ['path', 'bsn', 'text', 'offsets']\n self.data.columns = headers", "def get_data(self):\n with open(self.filepath, 'r') as openFile:\n data = json.load(openFile)\n logging.info('Loaded JSON data file.')\n return data", "def get_data_json(filename):\n with open(filename) as f:\n data = json.load(f)\n return data", "def parse_data(self):\n data = {}\n content = self.headers.get('content-type', None)\n if content:\n ctype, pdict = parse_header(content)\n if ctype == 'application/json':\n length = int(self.headers['content-length'])\n data = json.loads(self.bytes_to_str(self.rfile.read(length)))\n return data", "def file_loader(self):\n\n for folder in self.config[\"data_folders\"]:\n f = os.path.join(folder, self.data_file)\n yield jsonlist.load_file(f)", "def parse():\n with open('src/wator/Properties.json') as data_file:\n data = json.load(data_file)\n return data", "def _load_data_from_file(self, input_file_path):\n with FileOrBufferHandler(input_file_path, 'r', \n encoding=self.file_encoding) as input_file:\n try:\n data = json.load(input_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n input_file.seek(0)\n data = data_utils.read_json(\n data_generator=input_file,\n selected_columns=self.selected_keys,\n read_in_string=False\n )\n return data", "def process_data(cur, conn, filepath, func):\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root, '*.json'))\n for f in files:\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))", "def read_file(path):\n\n items = list()\n with open(path, 'r') as raw_data:\n for line in raw_data:\n line = json.loads(line)\n\n items.append(line)\n return items", "def process_data(cur, conn, filepath, func):\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root, '*.json'))\n for file in files:\n all_files.append(os.path.abspath(file))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))", "def process_data(cur, conn, filepath, func):\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))", "def process_data(cur, conn, filepath, func):\n\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root, '*.json'))\n for f in files:\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))", "def load_data(path):\r\n\r\n _, ftype = os.path.splitext(path) #get fname and extension\r\n\r\n if os.path.isfile(path):\r\n with open(path) as f:\r\n\r\n if ftype == \".json\" or ftype == \".geojson\": #handle json\r\n data = json.load(f)\r\n # print(data)\r\n return data\r\n\r\n elif ftype == \".csv\": #handle csv with csv reader\r\n with open(path, newline ='') as csvfile:\r\n data = csv.DictReader(csvfile)\r\n return list(data)\r\n\r\n else:\r\n print(\"neither json or csv\")\r\n return None", "def set_data_from_json(self, filename):\n with open(filename, 'r') as f:\n self.data = json.load(f, object_pairs_hook=OrderedDict)", "def process_data(*args, **kwargs):\n\n filepath = kwargs[\"filepath\"]\n func = kwargs[\"func\"]\n \n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(datafile)\n print('{}/{} files processed.'.format(i, num_files))", "def read_data(file_name):\n file_path = pathlib.Path(__file__).parent / f'data/{file_name}'\n try:\n with open(file_path) as file:\n data = json.load(file)\n except (FileNotFoundError, json.JSONDecodeError) as e:\n # json decode error happens when file is empty\n data = []\n with open(file_path, 'w') as file:\n json.dump(data, file)\n logger.error(f'New empty file created. Error: {e}')\n\n return data", "def deserialize(self, data):", "def process_data(cur, conn, filepath, func):\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))", "def process_data(cur, conn, filepath, func):\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))", "def process_files_json():\n # chdir into beep root\n pwd = os.getcwd()\n os.chdir(os.environ.get(\"BEEP_ROOT\", \"/\"))\n\n meta_list = list(filter(lambda x: '_Metadata.csv' in x, os.listdir(SRC_DIR)))\n file_list = list(filter(lambda x: '.csv' in x if x not in meta_list else None, os.listdir(SRC_DIR)))\n all_list = list(filter(lambda x: '.csv' in x, os.listdir(SRC_DIR)))\n\n all_list = sorted(all_list)\n dumpfn(all_list, \"all_files.json\")\n\n [file_id, mapdf] = init_map(PROJECT_NAME, DEST_DIR)\n\n new_file_index = file_id\n\n for filename in tqdm(sorted(file_list)):\n # If the file has already been renamed another entry should not be made\n if mapdf['filename'].str.contains(filename).sum() > 0:\n continue\n old_file = os.path.join(SRC_DIR, filename)\n new_path = os.path.join(DEST_DIR, PROJECT_NAME)\n shutil.copy(old_file, new_path) # copy main data file\n shutil.copy(old_file.replace(\".csv\", '_Metadata.csv'), new_path) # copy meta data file\n\n if PROJECT_NAME == 'FastCharge':\n [date, channel_no, strname, protocol] = get_parameters_fastcharge(filename, SRC_DIR)\n elif PROJECT_NAME == 'ClosedLoopOED':\n [date, channel_no, strname, protocol] = get_parameters_oed(filename, SRC_DIR)\n else:\n raise ValueError(\"Unsupported PROJECT_NAME: {}\".format(PROJECT_NAME))\n\n df_dup = mapdf.set_index(['protocol', 'date'])\n if (protocol, date) in df_dup.index:\n row = mapdf[(mapdf['protocol'] == protocol) & (mapdf['date'] == date)]\n file_id = row['fid'].iloc[0]\n protocol = row['protocol'].iloc[0]\n date = row['date'].iloc[0]\n strname = row['strname'].iloc[0]\n else:\n file_id = new_file_index\n new_file_index = new_file_index + 1\n\n new_name = \"{}_{}_{}\".format(PROJECT_NAME, f'{file_id:06}', channel_no)\n new_file = os.path.join(DEST_DIR, PROJECT_NAME, \"{}.csv\".format(new_name))\n\n new_row = pd.DataFrame([[file_id, protocol, channel_no, date, strname,\n os.path.abspath(old_file),\n os.path.abspath(new_file)]],\n columns=METADATA_COLUMN_NAMES)\n mapdf = mapdf.append(new_row)\n\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename), new_file)\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename).replace(\".csv\", \"_Metadata.csv\"),\n new_file.replace(\".csv\", \"_Metadata.csv\"))\n\n mapdf.to_csv(os.path.join(DEST_DIR, PROJECT_NAME, PROJECT_NAME + \"map.csv\"), index=False)\n mapdf = mapdf.reset_index(drop=True)\n os.chdir(pwd)\n return json.dumps(mapdf.to_dict(\"list\"))", "def process_file(filename, items, folders):\n filename_abs = os.path.join(ROOT_DATA, filename)\n if os.path.isfile(filename_abs):\n root = ROOT.TFile.Open(filename_abs, \"READ\")\n if not root:\n print(\"File '%s' is not a root file\" % filename_abs)\n return None\n json_items = {}\n for item in items:\n json_item = process_item(root, item)\n if json_item:\n json_items[item] = json_item\n json_folders = {}\n for folder in folders:\n if folder != \"\":\n json_folder = process_folder(root, folder)\n if json_folder:\n json_folders[folder] = json_folder\n return {\"root\": filename, \"items\": json_items, \"trees\": json_folders}\n else:\n print(\"File '%s' does not exists\" % filename_abs)\n return None", "def fetch_data():\n log = logging.getLogger(__name__)\n log.info('Checking data files...')\n if not os.path.isfile('CGN.txt'):\n params_cgn = {\n 'institute.code': ['NLD037'],\n # 'crops': ['tomato'],\n 'taxonomy.genus': ['Solanum', 'Lycopersicon'],\n 'taxonomy.species': species\n }\n cgn = GenesysParser(params_cgn)\n cgn.fetch2json('CGN.txt')\n log.info('CGN data has been saved.')\n else:\n log.info('CGN data file already exists.')\n\n if not os.path.isfile('USDA.txt'):\n params_usda = {\n 'institute.code': usda_all,\n # 'crops': ['tomato'],\n 'taxonomy.genus': ['Solanum', 'Lycopersicon'],\n 'taxonomy.species': species\n }\n usda = GenesysParser(params_usda)\n usda.fetch2json('USDA.txt')\n log.info('USDA data has been saved.')\n else:\n log.info('USDA data file already exists.')", "def _raw_read(self, filepath, dirpath=None):\n self.json_dict = super().read(filepath)\n return self.json_dict", "def test_data_parse_vanilla_json(self):\n lines = ['{\"a\": \"val\", \"b\": \"val2\"}']\n dat, dat_type = parser._parse_data(lines)\n self.assertEqual({\"a\": \"val\", \"b\": \"val2\"}, dat)", "def read_json_file(self, fname):\n return {}", "def process_data(cur, conn, filepath, func):\r\n # get all files matching extension from directory\r\n all_files = []\r\n for root, dirs, files in os.walk(filepath):\r\n files = glob.glob(os.path.join(root, '*.json'))\r\n for f in files:\r\n all_files.append(os.path.abspath(f))\r\n\r\n\r\n\r\n # iterate over files and process\r\n for datafile in all_files:\r\n func(cur, datafile) ######### de function zy procces song file bta5od l filepath w currsor\r\n conn.commit()\r\n\r\n return all_files", "def process_data(cur, conn, filepath, func):\n \n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files:\n all_files.append(os.path.abspath(f))\n \n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n \n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))", "def load_data(filepath=None):\r\n if filepath is None:\r\n filepath = LATEST_DATA_SET_PATH\r\n\r\n with open(filepath) as file:\r\n return json.load(file)", "def extract_data(file_name=default_file_name):\n fille = abs_path(file_name)\n try:\n jfile = open(fille)\n except FileNotFoundError:\n create_file(fille)\n jfile = open(fille)\n data = json.load(jfile)\n if(not 'Students' in data.keys()):\n create_file(fille)\n jfile = open(fille)\n data = json.load(jfile)\n return data", "def get_json_files_data(path, min = 1):\n\n json_files = find_files(path, \"json\", min)\n json_data = dict()\n\n print(\"===========================================\")\n print(\"= Converting JSON data into Python object =\")\n print(\"===========================================\")\n i = 0\n for file in json_files:\n base = os.path.basename(file) # name with extension (ex. 'file.json')\n id = os.path.splitext(base)[0] # name without extension (ex. 'file') in this case, the names are the trip ids\n json_data[id] = json.load(open(file)) # get the json data as a python dict\n printrp('( ' + str(i) + ' / ' + str(len(json_files) - 1) + ' )') if found_CmdPrinter else print(i)\n i += 1\n\n print('( ' + str(i-1) + ' / ' + str(len(json_files) - 1) + ' )')\n return json_data", "def get_data(data_type):\n json_list = list()\n for source in data_type.value:\n json_list += open_json_file(source)\n return json_list", "def collect_data():\n mapping = {'nginx': Nginx,\n 'apache': Apache,\n 'server': Server,\n 'buildout': Buildout}\n with utils.cd(utils.displayer_dir()):\n for dirpath, dirnames, filenames in os.walk('.'):\n # server_id = dirpath\n for json_file in [f for f in filenames if f.endswith('.json')]:\n kind = json_file.split('___')[0]\n filepath = os.path.join(dirpath, json_file)\n logger.debug(\"Loading info from %s\",\n os.path.abspath(filepath))\n json_content = open(filepath).read()\n klass = mapping[kind]\n obj = klass(json_content)\n data[kind][obj.id.lower()] = obj\n # Link buildouts and nginx sites.\n for nginx in data['nginx'].values():\n buildout_id = nginx.data.get('buildout_id')\n if buildout_id is not None:\n buildout = data['buildout'].get(buildout_id)\n if buildout is not None:\n nginx.buildout = buildout\n buildout.site = nginx\n # Link buildouts and apache sites.\n for apache in data['apache'].values():\n buildout_id = apache.data.get('buildout_id')\n if buildout_id is not None:\n buildout = data['buildout'].get(buildout_id)\n if buildout is not None:\n apache.buildout = buildout\n buildout.site = apache\n # Link buildouts+sites with servers.\n for kind in ['nginx', 'apache', 'buildout']:\n for obj in data[kind].values():\n hostname = obj.data.get('hostname')\n if hostname is not None:\n hostname = hostname.lower()\n server = data['server'].get(hostname)\n if server is None:\n logger.error(\"Server with hostname %s not found.\",\n hostname)\n else:\n obj.server = server\n if kind == 'nginx' or kind == 'apache':\n server.sites.append(obj)\n elif kind == 'buildout':\n server.buildouts.append(obj)\n # Link nginx gunicorn ports with servers.\n for kind in ['nginx']:\n for obj in data[kind].values():\n hostname = obj.data.get('hostname')\n port = obj.data.get('proxy_port')\n try:\n port = int(port)\n except:\n pass\n if hostname is not None and port is not None:\n hostname = hostname.lower()\n server = data['server'].get(hostname)\n if server is None:\n logger.error(\"Server with hostname %s not found.\",\n hostname)\n continue\n server.ports[port] = obj", "def read_json():\n try:\n rospack = rospkg.RosPack()\n file_path = rospack.get_path('autonomous') + \"/src/data.txt\"\n with open(file_path) as json_file:\n json_data = json.load(json_file)\n \n new_data = []\n for d in json_data:\n a = Autons(len(new_data))\n a.deserialize_json(d)\n new_data.append(a)\n\n global data\n data = new_data\n except:\n read_json()", "def _read_data(self) -> None:\n raw_data = self.__mmap[:].decode('ascii').rstrip('\\0')\n self.__data = json.loads(raw_data)", "def process_data(cur, conn, filepath, func):\r\n\r\n # get all files matching extension from directory\r\n all_files = []\r\n for root, dirs, files in os.walk(filepath):\r\n files = glob.glob(os.path.join(root,'*.json'))\r\n for f in files :\r\n all_files.append(os.path.abspath(f))\r\n\r\n # get total number of files found\r\n num_files = len(all_files)\r\n print('{} files found in {}'.format(num_files, filepath))\r\n\r\n # iterate over files and process\r\n try:\r\n for i, datafile in enumerate(all_files, 1):\r\n func(cur, datafile)\r\n conn.commit()\r\n print('{}/{} files processed.'.format(i, num_files))\r\n except Exception as e:\r\n print(e)", "def read(self,json_data):\n read_path = (DATA_NODE_DIR % (self.server_id,)) + os.path.sep + json_data['command']['read']['chunk']\n with open(read_path, 'r') as f_in:\n f_in.seek(json_data['command']['read']['offset'])\n content = f_in.read(json_data['command']['read']['count'])\n print(content)\n return {'command':'read','finish':1}", "def load_json(directory=None):\n \n if directory:\n chdir(directory)\n with open('memedPost_json_data.txt') as json_data:\n data = load(json_data)\n return data\n else:\n chdir(curdir)\n with open('memedPost_json_data.txt') as json_data:\n data = load(json_data)\n return data", "def load_data(filename):\n with open(filename) as json_file:\n data = json.load(json_file)\n return data", "def process_data(cur, conn, filepath, func):\n\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root, '*.json'))\n for f in files:\n all_files.append(os.path.abspath(f))\n\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))", "def load_jsonl_data(file_path):\n data = []\n with jsonlines.open(file_path, \"r\") as reader:\n for line in reader:\n data.append(line)\n\n return data", "def read(path):\n data = []\n with open(path, \"r\", encoding=\"utf-8\") as file:\n for instance in file:\n data.append(json.loads(instance))\n\n return data", "def load(self):\n basepath = os.path.dirname(os.path.abspath(__file__))\n filename = os.sep.join([basepath, c.FOLDER_JSON, c.FILE_GAME_VERSIONS])\n Handler.ALL_VERS_DATA = {} # reset known data; do not retain defunct information\n with open(filename, \"r\") as f:\n data = json.loads( f.read() )\n self.update(data)\n self._updated = False\n #for v,record in iteritems(Handler.ALL_VERS_DATA):\n # print(type(v), v)\n #for k,v in iteritems(record): ", "def load(self):\n if not self.exist:\n self.create()\n\n with open(self.file_path, encoding=Config.ENCODING) as file:\n self.data = json.load(file)", "def _readin_JSON(file):\n\tdef object_decoder(obj):\n\t\t\"\"\"This function is used to properly load the JSON elements into the corresponding classes.\"\"\"\n\t\tif 'logfile' in obj:\n\t\t\treturn logfile(obj['logfile']['name'], obj['logfile']['lines'], obj['logfile']['type'], obj['logfile']['content'], obj['logfile']['sources'])\n\t\tif 'logfile_entry' in obj:\n\t\t\tif len(obj['logfile_entry']['timestamp']['datetime']) >= 20 :\n\t\t\t\tdate = datetime.datetime.strptime(obj['logfile_entry']['timestamp']['datetime'],\"%Y-%m-%dT%H:%M:%S.%f\")\n\t\t\telif obj['logfile_entry']['timestamp']['datetime'][-6:-5] != '+':\n\t\t\t\tdate = datetime.datetime.strptime(obj['logfile_entry']['timestamp']['datetime'],\"%Y-%m-%dT%H:%M:%S\")\n\t\t\telse:\n\t\t\t\tunformatted_date = obj['logfile_entry']['timestamp']['datetime']\n\t\t\t\tunformatted_date = unformatted_date[:-3]+unformatted_date[-2:]\n\t\t\t\t# once again, related to missing features in Python 3.6\n\t\t\t\tdate = datetime.datetime.strptime(unformatted_date,\"%Y-%m-%dT%H:%M:%S.%f%z\")\n\t\t\treturn logfile_entry(obj['logfile_entry']['id'], file, obj['logfile_entry']['message'], obj['logfile_entry']['structured_data'], date,obj['logfile_entry']['hostname'],obj['logfile_entry']['source'])\n\t\treturn obj\n\n\tfp = open(file,'r')\n\tlf = json.load(fp, object_hook=object_decoder)\n\tfp.close()\n\treturn lf", "def __init__(self, filename):\n self._filename = filename\n fp = open(filename)\n self._contents = json.loads(fp.read())\n for key in self._contents.keys():\n #\n # Some .json keys begin with an @ sign, which represents ???.\n # The caller should not have to know which fields have @ signs\n # and which don't. For each key that begins with an @ sign,\n # create a secondary key consisting of the same string without\n # the @ sign, and having the same value.\n if re.search(\"^@\", key):\n secondaryKey = re.sub(\"^@\", \"\", key)\n self._contents[secondaryKey] = self._contents[key]\n self._dataFileName = re.sub(\".json\", \"\", self._filename)\n self._validate()", "def read_data():\r\n\r\n if os.path.isfile(os.getcwd() + \"/www/access_list.txt\") and os.stat(os.getcwd() + \"/www/access_list.txt\").st_size != 0:\r\n data = json.load(open(os.getcwd() + \"/www/access_list.txt\"))\r\n return collections.defaultdict(dict, data)\r\n else:\r\n return collections.defaultdict(dict)", "def deserialize(self):\n with open(os.path.join(self.root_path, self._data_file), 'r') as file:\n data = json.load(file)\n for key, val in data.items():\n self.__dict__[key] = val", "def jsonCreator(raw_data):\r\n tweets_data = []\r\n tweets_file = open(raw_data, \"r\")\r\n for line in tweets_file:\r\n try:\r\n tweet = json.loads(line)\r\n tweets_data.append(tweet)\r\n except:\r\n continue\r\n return tweets_data", "def _remoteloadjson(path: str) -> JSONType:\n return json.loads(request.urlopen(path).read())", "def __load_data(data_loc):\n if not data_loc.endswith('.json'):\n raise ValueError('data_loc must be a json file location.')\n with open(data_loc, 'rb') as f:\n return json.load(f)", "def __init__(self, data):\n\t\tassert isinstance(data, str), \"Data location must be provided in type 'str'!\"\n\t\t\n\t\t# load the location provided\n\t\tdata = json.loads(open(data).read())\n\n\t\t# check for correct format\n\t\tassert isinstance(data, list), \"Data must be of type 'list'!\"\n\n\t\tfor element in data:\n\t\t\tassert isinstance(element, dict), \"Each element of data must be of type 'dict'!\"\n\n\t\tself.data = data", "def get_file_data_source(filename):\n def players_from_file():\n with open(filename, 'r') as f:\n json_str = '\\n'.join(f.readlines())\n return parse_players_json(json_str)\n return players_from_file", "def json_data_loader(path):\n res = open(path, 'r').read()\n logging.info(\"Loading file using a pyspark.read.json\")\n data_rdd = Spark.instance.sc().parallelize([res])\n return Spark.instance.spark().read.json(data_rdd)", "def openJson(self):\n json_file = open(self.file, 'r')\n json_data = json_file.read()\n result = json.loads(json_data)\n return result", "def process_data(cur, conn, filepath, func):\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{} files processed.'.format(num_files))", "def process_data(cur, conn, filepath: str, func: Callable) -> None:\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root, \"*.json\"))\n for f in files:\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print(\"{} files found in {}\".format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print(\"{}/{} files processed.\".format(i, num_files))", "def import_file(self):\n self.inputdata = json.load(self.infile)\n self.outputdata = self.inputdata\n self.logger.info('Json file Loaded')\n self.logger.debug(u'JSON:{d}'.format(d=self.inputdata))", "def _parse_json(model, f_name):\n # get the word index dictionary corresponding to the feature model type\n if model == \"baseline\":\n word_dict = _parse_word_dict(\"baseline_dict.txt\")\n elif model == \"hashing\":\n word_dict = _parse_word_dict(\"hashing_dict.txt\")\n elif model == \"cluster\":\n word_dict = _parse_word_dict(\"cluster_dict.txt\")\n else:\n error(\"Unknown model type %s\" % model)\n\n if os.path.isfile(f_name):\n if _svm:\n model += \"svm\"\n out = open(\"datasets/%s_%s.txt\" % (f_name[f_name.rfind(\"/\") + 1:].split(\".\")[0], model), \"w\")\n with open(f_name) as f:\n for line in f:\n obj = json.loads(line)\n txt = obj[\"text\"]\n rat = obj[\"stars\"] if \"stars\" in obj else 0\n out.write(\"%d \\t\" % rat)\n features = []\n for t in _extract(txt):\n if t in word_dict:\n while len(features) <= word_dict[t]:\n features.append(0)\n features[word_dict[t]] += 1\n for i, c in enumerate(features):\n if c == 0:\n continue\n if _svm:\n i += 1\n out.write(\"%d:%d \" % (i, c))\n out.write(\"\\n\")\n out.close()\n else:\n error(\"parse json - not a file: %s\" % f_name)", "def readFile(filename):\n lines = []\n f = open(filename, mode='r', buffering=1024)\n for json_data in f:\n # json_data = json_data.replace('\"', '\\\\\"')\n lines.append(json.loads(json_data))\n f.close()\n return lines", "def get_json(filename):\n with open(filename) as f:\n file_content = f.read()\n data = json.loads(file_content)\n return data", "def load(self, path):\n with open(path, \"rt\") as open_file:\n data = json.load(open_file)\n return data" ]
[ "0.6873522", "0.660964", "0.6609381", "0.6535058", "0.6535058", "0.64858943", "0.6374787", "0.63390714", "0.63368833", "0.62513703", "0.6144247", "0.61405396", "0.61333793", "0.6131228", "0.61294943", "0.6124854", "0.61158955", "0.6082589", "0.6079711", "0.604982", "0.6040914", "0.602779", "0.5977142", "0.59738696", "0.59664994", "0.5963698", "0.5948004", "0.5907847", "0.590754", "0.589776", "0.5884599", "0.586411", "0.5859941", "0.5841533", "0.58412874", "0.58368796", "0.5820269", "0.5811912", "0.5811014", "0.57935375", "0.5787169", "0.5774459", "0.57729024", "0.5772886", "0.5756621", "0.5750848", "0.5746404", "0.5746301", "0.57448477", "0.57423806", "0.57395244", "0.5726813", "0.5723568", "0.5723474", "0.5722489", "0.5721029", "0.5721029", "0.57105005", "0.57102454", "0.5707992", "0.5705844", "0.5703994", "0.5703271", "0.56979996", "0.5697071", "0.5693556", "0.5688426", "0.56849396", "0.56833106", "0.56828976", "0.56807405", "0.567678", "0.56724524", "0.566785", "0.5650296", "0.56447107", "0.56369454", "0.56360364", "0.563116", "0.562542", "0.5624296", "0.5622053", "0.56169456", "0.561586", "0.56150216", "0.5610511", "0.55959886", "0.55959713", "0.558342", "0.5582356", "0.5573962", "0.55678505", "0.5558641", "0.5558007", "0.5544693", "0.55352265", "0.55344754", "0.55252254", "0.5521753" ]
0.63090736
9
Create a temporary file that is removed at process exit
def mkstemp(data): def rmtemp(name): try: os.remove(name) except OSError: pass f = tempfile.NamedTemporaryFile(delete=False) f.write(data.encode('utf-8') if not isinstance(data, bytes) else data) f.close() # Ensure removal at end of python session atexit.register(rmtemp, f.name) return f.name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def temporary_file(request):\n file_handle, path = tempfile.mkstemp()\n os.close(file_handle)\n\n def cleanup():\n \"\"\"Remove temporary file.\"\"\"\n try:\n os.remove(path)\n except OSError:\n pass\n\n request.addfinalizer(cleanup)\n return path", "def create_temporary_file():\n f = NamedTemporaryFile(delete=False)\n return f.name", "def make_temp_file():\n with tempfile.NamedTemporaryFile() as f:\n return f.name", "def a_temp_file():\n filename = None\n try:\n tmpfile = tempfile.NamedTemporaryFile(delete=False)\n filename = tmpfile.name\n yield tmpfile\n finally:\n if filename and os.path.exists(filename):\n os.remove(filename)", "def makeTempFile(self,perms=0o600,keep=False):\n\n fd=os.open(self.temp,os.O_RDWR|os.O_CREAT|os.O_EXCL|os.O_TRUNC,perms)\n f=os.fdopen(fd,'w+') \n if not keep:\n atexit.register(os.remove,self.temp)\n return f", "def get_temporary_file(prefix=\"apsharvest_test_\", suffix=\"\", directory=\"\"):\n try:\n file_fd, filepath = mkstemp(prefix=prefix,\n suffix=suffix,\n dir=directory)\n os.close(file_fd)\n except IOError, e:\n try:\n os.remove(filepath)\n except Exception:\n pass\n raise e\n return filepath", "def tempfile():\n return mkstemp()[1]", "def _tempfile(self):\n fd, path = tempfile.mkstemp(dir = os.path.join(self.root, \"temporary\"))\n try:\n return os.fdopen(fd, \"wb\"), path\n except:\n os.unlink(path)\n os.close(fd)\n raise", "def _tempfile(filename):\n return tempfile.NamedTemporaryFile(mode='w',\n dir=os.path.dirname(filename),\n prefix=os.path.basename(filename),\n suffix=os.fsencode('.tmp'),\n delete=False)", "def make_temp_file():\n global TEST_DATA_PATH\n TEST_DATA_PATH = tempfile.mkstemp()", "def mkstemp(data):\n def rmtemp(name):\n try:\n os.remove(name)\n except OSError:\n pass\n\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(data)\n f.close()\n\n # Ensure removal at end of python session\n atexit.register(rmtemp, f.name)\n\n return f.name", "def mktemp(self):\n try:\n fd, fn = tempfile.mkstemp(dir=self.tempdir)\n yield fn\n finally:\n try:\n os.close(fd)\n os.unlink(fn)\n except (OSError, IOError) as e:\n print(\"could not remove temporary file: %s\" % e,\n file=sys.stderr)", "def get_temp_file(self, delete: bool = False, close: bool = False):\n prefix = str(self._tmp_folder / f\"pysimt_{os.getpid()}\")\n t = tempfile.NamedTemporaryFile(\n mode='w', prefix=prefix, delete=delete)\n self.register_tmp_file(t.name)\n if close:\n t.close()\n return t", "def _testfile():\r\n import tempfile\r\n return os.path.join(tempfile.gettempdir(), 'trash-%s' % os.getpid())", "def make_tempfile(name):\n open(name, \"w\", encoding=\"utf-8\").close()\n try:\n yield\n finally:\n os.unlink(name)", "def new_temp_file(prefix, suffix):\n f = tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, delete=False)\n f.close()\n return f.name", "def ensure_tmpdir():\n path = mkdtemp('aomi')\n atexit.register(clean_tmpdir, path)\n return path", "def _create_temp_batch_file(self):\n return tempfile.NamedTemporaryFile(delete=False)", "def managed_temp_object():\n import tempfile\n _, temp_object = tempfile.mkstemp()\n try:\n yield temp_object\n finally:\n os.remove(temp_object)", "def temp_file(suffix=\"\"):\n global _temp_dir\n warnings.warn(\n \"Please use the :mod:`tempfile` module from the standard library\",\n DeprecationWarning\n )\n _create_temp_dir()\n if suffix != \"\" and not suffix.startswith(\".\"):\n suffix = \".\" + suffix\n return tempfile.mktemp(suffix=suffix, dir=_temp_dir)", "def tmpfile(tmpdir_factory):\n\n def make(filename):\n fn = tmpdir_factory.mktemp(\"data\").join(filename)\n return fn\n\n # fn = tmpdir_factory.mktemp(\"data\").join(filename)\n return make", "def tempfile(mode: str = 'w+b', suffix: str = '', prefix: str = 'tmp'):\n f = tempfile_lib.NamedTemporaryFile(mode, suffix=suffix, prefix=prefix, delete=False)\n yield f\n os.unlink(f.name)", "def _get_temp_path(self):\n handle, path = tempfile.mkstemp()\n # windows can't write to a file that is already open by another process\n # (tests use pipe redirection to a log file)\n os.close(handle)\n return path", "def make_tempfile(content=None):\n fd, tmpfile = mkstemp()\n\n if content:\n write(fd, content)\n\n close(fd)\n return tmpfile", "def tempfile(suffix='', dir=None):\n\n tf = tmp.NamedTemporaryFile(delete=False, suffix=suffix, dir=dir)\n tf.file.close()\n try:\n yield tf.name\n finally:\n try:\n os.remove(tf.name)\n except OSError as e:\n if e.errno == 2:\n pass\n else:\n raise", "def _tmp(self):\n tmpfn = tempfile.NamedTemporaryFile(prefix='tmp',\n suffix='.out',\n delete=False)\n return tmpfn.name", "def clean_up(user, fname, tango_output):\n time.sleep(1)\n run(['rm', fname])\n time.sleep(1)\n path = tango_output + user + '.out'\n run(['rm', path])", "def create_file(path):\n open(path, \"w\").close()", "def remove_temporary_files():\n try:\n xml_file_path, bin_file_path = get_ida_exported_files()\n if os.path.isfile(xml_file_path):\n os.remove(xml_file_path)\n\n if os.path.isfile(bin_file_path):\n os.remove(bin_file_path)\n\n except Exception:\n print(\"GhIDA:: [!] Unexpected error while removing temporary files.\")", "def delete_temporary_files(request, tmp_path_factory):\r\n _tmp_path_factory = tmp_path_factory\r\n\r\n def cleanup():\r\n tmp_path = _tmp_path_factory.getbasetemp()\r\n if pathlib.Path(tmp_path).exists() and pathlib.Path(tmp_path).is_dir():\r\n shutil.rmtree(tmp_path)\r\n\r\n request.addfinalizer(cleanup)", "def _generate_to_tempfile(self, generator):\r\n (output_fd, output_path) = tempfile.mkstemp()\r\n with os.fdopen(output_fd, 'w') as output:\r\n generator.write(output)\r\n return output_path", "def make_temp_file(dir, data):\n fd, path = tempfile.mkstemp(dir=dir)\n if PY3:\n with os.fdopen(fd, 'w', encoding='utf-8') as f:\n f.write(data)\n else:\n with os.fdopen(fd, 'w') as f:\n f.write(data)\n return path", "def local_file(request):\n file = tempfile.NamedTemporaryFile(delete=False)\n with open(file.name, 'wb') as f:\n f.truncate(1024)\n\n def destroy():\n os.remove(file.name)\n request.addfinalizer(destroy)\n\n return file.name", "def _delete_temp():\n global _TEMP_NAME\n\n try:\n database.delete_temp(_TEMP_NAME)\n outputtools.delete_temp(_TEMP_NAME)\n except:\n raise", "def TemporaryFile(mode='w+b',bufsize=_1,suffix='',prefix='tmp',dir=None):\n\tpass", "def tempfile_name():\n ret = os.path.join(tempfile.gettempdir(), 'system_monitor.log')\n if os.access(ret, os.F_OK) and not os.access(ret, os.W_OK):\n print(\"WARNING: Couldn't write to log file {0}: (Permission denied)\".format(ret))\n ret = tempfile.mkstemp(prefix='system_monitor', suffix='.tmp', text=True)\n print(\"Create a new log file: {0}\".format(ret[1]))\n return ret[1]\n\n return ret", "def get_temporary_file(original_file, no_modifications=False):\n if no_modifications:\n handle = open(original_file, 'wb')\n return handle\n\n handle = open(get_temporary_file_name(original_file), 'wb')\n return handle", "def _temp_file(self, val):\n fd, fn = tempfile.mkstemp()\n fp = os.fdopen(fd, \"wb\")\n if val:\n if not isinstance(val, bytes):\n fp.write(val.encode(\"utf-8\", \"surrogateescape\"))\n else:\n fp.write(val)\n fp.close()\n return fn", "def mktmp(tmp_str):\n tmp_file_name = None\n\n try:\n with NamedTemporaryFile(mode='w', delete=False) as tmp_file:\n tmp_file.writelines(tmp_str)\n tmp_file.close()\n tmp_file_name = tmp_file.name\n\n yield tmp_file_name\n\n finally:\n if tmp_file_name:\n remove(tmp_file_name)", "def _temporary_resource_file(text, prefix='', suffix=''):\n import tempfile\n\n # Ensure the folder exists\n if not os.path.exists(_temp_path):\n os.mkdir(_temp_path)\n\n try:\n fd, temp_file_path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=_temp_path)\n if persist.debug_mode():\n persist.printf(\"{}: created temporary file at {}\".format(p_name, temp_file_path))\n\n try:\n with open(fd, 'w', encoding='utf-8') as f:\n f.write(text)\n temp_file_resource_path = \"/\".join([\"Packages\", _temp_dir_name,\n os.path.basename(temp_file_path)])\n yield temp_file_resource_path\n finally:\n os.remove(temp_file_path)\n except FileNotFoundError:\n _remove_temp_path()\n finally:\n # And remove the folder, if it's empty.\n # Otherwise wait for a \"restart\".\n try:\n os.rmdir(_temp_path)\n except OSError as e:\n if persist.debug_mode():\n persist.printf(\"{}: unable to delete temporary folder; {}\".format(p_name, e))", "def temporary(cls):\n fh, path = tempfile.mkstemp(suffix='.hdf5')\n os.close(fh)\n self = cls(path, 'w')\n self.path = path\n return self", "def cleanup_file(path_to_file):\n print \"Removing generated file: %s\" % path_to_file\n os.remove(path_to_file)", "def make_tempdir():\n return mkdtemp()", "def unix_sock_file():\n tmp_sock_fh, tmp_sock_fname = tempfile.mkstemp()\n\n yield tmp_sock_fname\n\n os.close(tmp_sock_fh)\n os.unlink(tmp_sock_fname)", "def missing_but_potential_file():\r\n tempf = tempfile.NamedTemporaryFile()\r\n fname = tempf.name\r\n tempf.close()\r\n return fname", "def save_to_tmp(form):\n file = request.files.get('file')\n suffix = os.path.splitext(secure_filename(file.filename))[-1]\n tf = tempfile.NamedTemporaryFile(dir='/tmp', delete=False, suffix=suffix, prefix='lpm_tmp_')\n filepath = tf.name\n tf.close()\n file.save(filepath)\n form.tmpname.data = os.path.basename(filepath)\n return filepath", "def makeTestFile(text):\n f = tempfile.NamedTemporaryFile()\n f.write(text)\n f.flush()\n return f", "def delete_temp_file(filename):\n try:\n os.remove(filename)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise e", "def temporary_directory(request):\n path = tempfile.mkdtemp()\n\n def cleanup():\n \"\"\"Remove temporary directory.\"\"\"\n shutil.rmtree(path)\n\n request.addfinalizer(cleanup)\n\n return path", "def tmp_dir():\n tmpdir = tempfile.mkdtemp()\n yield tmpdir\n shutil.rmtree(tmpdir)", "def _create_temp_files():\n\n fakes = {}\n for i in [\"domain\", \"problem\"]:\n q, fname = tempfile.mkstemp()\n os.close(q)\n fakes[i] = fname\n return fakes", "def create_temp_dir():\n\n try:\n temp_dir = os.getenv('TEMP_FILE_DIR')\n\n if not isinstance(temp_dir, type(None)):\n if os.path.exists(temp_dir):\n LOGGER.warning('Temp Directory Already Exists.')\n else:\n temp_dir = tempfile.mkdtemp()\n os.environ['TEMP_FILE_DIR'] = temp_dir\n else:\n temp_dir = tempfile.mkdtemp()\n os.environ['TEMP_FILE_DIR'] = temp_dir\n\n LOGGER.debug(f'Temp Dir: {temp_dir}')\n except Exception as ex:\n LOGGER.exception(ex)\n raise ex", "def safe_mkdtemp(**kw):\r\n # proper lock sanitation on fork [issue 6721] would be desirable here.\r\n return _MKDTEMP_SINGLETON.register(tempfile.mkdtemp(**kw))", "def temp_dir(request):\n tmp = tempfile.mkdtemp()\n request.addfinalizer(lambda: shutil.rmtree(tmp))\n return tmp", "def mkdtemp_clean(suffix=\"\", prefix=\"tmp\", dir=None):\r\n the_dir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)\r\n atexit.register(cleanup_tempdir, the_dir)\r\n return the_dir", "def get_temp_filename(suffix=None):\n file = tempfile.mkstemp(suffix=suffix or \"\", prefix=\"temp_\", dir=os.getcwd()) # or \"\" for Python 2 compatibility\n os.close(file[0])\n return file[1]", "def CleanUp(self, path):\n try:\n if os.path.exists(path):\n os.remove(path)\n except (OSError, IOError) as e:\n logging.info(\"Failed to remove temporary file %s. Err: %s\", path, e)", "def set_temp_file(self):\n\n index = self.filename.rfind('/') + 1\n self.temp_filename = self.filename[:index] + \"tmp_\" + self.filename[index:]", "def _clean_up_temporary_files(dataset_dir):\n return", "def handle2temp(handle, suffix=''):\n _fd, fname = tempfile.mkstemp(suffix=suffix)\n os.write(_fd, handle.read())\n os.close(_fd)\n return fname", "def _temporary_directory():\n name = tempfile.mkdtemp()\n try:\n yield name\n finally:\n shutil.rmtree(name)", "def get_temp_file(self, prefix=template, suffix=\"\"):\n ret = NamedTemporaryFile(delete=False, prefix=prefix, suffix=suffix)\n self._tempfiles.append(ret)\n if is_win():\n ret.close()\n return ret", "def delete_tempfile(path):\n try:\n unlink(path)\n except:\n pass", "def assumpfile1():\n afile = tempfile.NamedTemporaryFile(suffix='.json', mode='a', delete=False)\n afile.write(ASSUMP_CONTENTS)\n afile.close()\n # must close and then yield for Windows platform\n yield afile\n if os.path.isfile(afile.name):\n try:\n os.remove(afile.name)\n except OSError:\n pass # sometimes we can't remove a generated temporary file", "def tempfile_factory(total_length, content_type, filename, file_length):\n # We do need the \"+\" in there for the tempfile module's sake.\n return tempfile.TemporaryFile(\"w+b\")", "def mktemp(self):\n newDir = tempfile.mkdtemp(dir=tempfile.gettempdir())\n self.addCleanup(shutil.rmtree, newDir)\n return newDir", "def temporary_folder():\r\n tempdir = mkdtemp()\r\n try:\r\n yield tempdir\r\n finally:\r\n rmtree(tempdir)", "def create_temporary_file(path):\n # Save current working directory\n current_dir = os.getcwd()\n os.chdir('/')\n\n # Create list of path contents\n path_items = path.split('/')\n\n # Create directories for each string between '/' characters\n for item in path_items[1:-1]:\n if not os.path.exists(item):\n os.makedirs(item)\n os.chdir(item)\n\n # Create a new empty file with the last string in 'path_items'\n open(path_items[-1], 'a').close()\n\n # Change back to original dir\n os.chdir(current_dir)", "def tempdir():\n return mkdtemp()", "def _create_unique_file(self):\n with open(self.uniquefile, 'w') as f:\n f.write(self._uniquename)\n self._uniquefile_created = True\n self._extend_expiration_time()\n self._p(\"Unique file created: %s\" % self.uniquefile)", "def tempdir():\n\n # Create a directory and return the path\n return tempfile.mkdtemp()", "def _temp_path(self, uri_like):\n handle, filename = tempfile.mkstemp(suffix=uri_like.split(\"/\")[-1])\n os.close(handle)\n return filename", "def _make_tempdir(self):\n self._clean_tempdir()\n os.mkdir(self._get_tempdir())\n assert os.path.exists(self._get_tempdir())\n rospy.on_shutdown(self._clean_tempdir)\n rospy.on_shutdown(self._clear_cache)", "def test_removed(self):\n path = None\n with TemporaryDirectory() as tmp:\n path = tmp\n self.assertTrue(os.path.isdir(tmp))\n tmpfile = os.path.join(tmp, \"a_temp_file\")\n open(tmpfile, \"w\").write(\"data\")\n self.assertTrue(os.path.isfile(tmpfile))\n self.assertFalse(os.path.isdir(path))\n self.assertFalse(os.path.exists(path))", "def _register_temporary_file(self):\n _partition_file = self._subarray._partition_file\n _partition_dir = self._subarray._partition_dir\n if _partition_file not in _temporary_files:\n fd, _lock_file = mkstemp(\n prefix=_partition_file + \"_\", dir=_partition_dir\n )\n close(fd)\n _temporary_files[_partition_file] = (\n _partition_dir,\n _lock_file,\n set(),\n )\n else:\n _, _lock_file, _ = _temporary_files[_partition_file]\n\n return _lock_file", "def temp_file_name(suffix):\n return 'tmp%s%s' % (uuid.uuid4(), suffix)", "def getTempFile():\n root = getDir(tempDir)\n for i in range(100):\n path = os.path.join(root, '%d-%d' % (\n os.getpid(), random.randint(100000, 999999)))\n if not os.path.isfile(path):\n return path\n raise NotImplementedError(\"getTempFile() appears to be failing\")", "def TemporaryDirectory():\n name = tempfile.mkdtemp()\n try:\n yield name\n finally:\n shutil.rmtree(name)", "def touch(path):\n open(path, 'wb').close()", "def test_deleting_local_file_using_file_io_output_file() -> None:\n with tempfile.TemporaryDirectory() as tmpdirname:\n # Write to the temporary file\n file_location = os.path.join(tmpdirname, \"foo.txt\")\n with open(file_location, \"wb\") as f:\n f.write(b\"foo\")\n\n # Instantiate the file-io\n file_io = PyArrowFileIO()\n\n # Confirm that the file initially exists\n assert os.path.exists(file_location)\n\n # Instantiate the custom OutputFile\n output_file = PyArrowFileIO().new_output(location=f\"{file_location}\")\n\n # Delete the file using the file-io implementations delete method\n file_io.delete(output_file)\n\n # Confirm that the file no longer exists\n assert not os.path.exists(file_location)", "def mktemp(self):\n if self.dryrun:\n return os.path.expandvars(\"$TEMP/build\")\n\n return tempfile.mkdtemp()", "def _TempFilename(name, contents=None):\n temp_dir = tempfile.mkdtemp(prefix=name)\n try:\n path = os.path.join(temp_dir, name)\n if contents is not None:\n with open(path, 'wb') as f:\n f.write(contents)\n yield path\n finally:\n shutil.rmtree(temp_dir, True)", "def create_temp_env_directory():\n return tempfile.mkdtemp(prefix=\"spack-\")", "def copy_to_temp(object):\n temp_file = NamedTemporaryFile(delete=False)\n _copy_and_close(object, temp_file)\n return temp_file.name", "def TempFileDecorator(func):\n def f(self, *args, **kwargs):\n with tempfile.NamedTemporaryFile(dir=self.tempdir, delete=False) as f:\n self.tempfile = f.name\n return func(self, *args, **kwargs)\n\n f.__name__ = func.__name__\n f.__doc__ = func.__doc__\n f.__module__ = func.__module__\n return TempDirDecorator(f)", "def temp_dir():\n global _temp_dir\n warnings.warn(\n \"Please use the :mod:`tempfile` module from the standard library\",\n DeprecationWarning\n )\n _create_temp_dir()\n return _temp_dir", "def clean_up_temp_files():\n global __tmp_model_dir\n\n if __tmp_model_dir is not None:\n FileUtils.deleteDirectory(__tmp_model_dir)\n __tmp_model_dir = None", "def get_temp_dir():\n return tempfile.mkdtemp()", "def __del__(self):\n\t\tif self.temp_dir:\n\t\t\tself.temp_dir.cleanup()", "def _tmpfile(self,filename=None):\n\t\tif self._tmpdir is None:\n\t\t\tself._tmpdir = TemporaryDirectory(prefix=\"jitcxde_\")\n\t\t\n\t\tif filename is None:\n\t\t\treturn self._tmpdir.name\n\t\telse:\n\t\t\treturn path.join(self._tmpdir.name, filename)", "def __exit__(self, exc_type, exc_value, traceback):\n logger.debug(\"Cleaning up temporary directory: %s\", self.temporary_directory)\n shutil.rmtree(self.temporary_directory)\n del self.temporary_directory", "def create_temporary_image(image):\n\n temp = tempfile.NamedTemporaryFile()\n temp.write(image)\n temp.seek(0)\n\n return temp", "def classCleanup(cls):\n cls.RemoveTempFile(SettingsCommandTestCase.output_file_name)", "def _mkdtemp(*args, **kwargs):\n import tempfile\n dir_name = tempfile.mkdtemp(*args, **kwargs)\n import atexit, shutil\n ignore_errors = True\n atexit.register(shutil.rmtree, dir_name, ignore_errors)\n return dir_name", "def tmp_dir(data_dir):\n tmp_dir = os.path.join(data_dir, 'manorm_tmp_output')\n yield tmp_dir\n shutil.rmtree(tmp_dir)", "def write_tmp_patch(diff, filename=None):\n if not filename:\n prefix = 'cugit-'\n suffix = '-patch'\n filename = mkstemp(suffix, prefix)[1]\n with open(filename, 'w') as f:\n f.write(diff)\n return filename", "def makeTempDir(self):\n try:\n os.mkdir(self.temp_dir)\n except FileExistsError:\n pass", "def safe_write_file(self, fn, text):\n fd, tmpfn = mkstemp(dir=self.temp_dir)\n with open(fd, 'wt') as f:\n f.write(text)\n # https://stackoverflow.com/a/2333979\n f.flush()\n os.fsync(f.fileno())\n os.rename(tmpfn, fn)", "def unicon_logger():\n temp_path = tempfile_name()\n _logger = logging.getLogger()\n LOGGING_CFG['handlers']['file']['filename'] = temp_path\n dictConfig(LOGGING_CFG)\n\n return _logger", "def mktempdir(delete_on_exit=True):\n tmpdir = tempfile.mkdtemp(\"idstools\")\n if delete_on_exit:\n atexit.register(shutil.rmtree, tmpdir, ignore_errors=True)\n return tmpdir" ]
[ "0.7820664", "0.781029", "0.74709785", "0.74351716", "0.7366692", "0.72586644", "0.724302", "0.72234035", "0.7195028", "0.71175826", "0.7109041", "0.7028888", "0.7014582", "0.6984096", "0.6944709", "0.6938682", "0.6842143", "0.683967", "0.6832244", "0.6778418", "0.67015445", "0.66710734", "0.66690516", "0.66628313", "0.66547686", "0.6583", "0.65744597", "0.65698636", "0.6543013", "0.6538334", "0.6509781", "0.6492787", "0.6462833", "0.6459533", "0.64575154", "0.6455276", "0.643682", "0.64364547", "0.6423028", "0.6422325", "0.64093834", "0.6392037", "0.63717425", "0.63584185", "0.63510346", "0.6340489", "0.63386214", "0.633827", "0.632515", "0.62637407", "0.6256638", "0.62534434", "0.6237327", "0.6232651", "0.6228704", "0.62270296", "0.62157613", "0.62145156", "0.61991554", "0.61781967", "0.6175722", "0.6170868", "0.6166908", "0.6163603", "0.61614823", "0.6146182", "0.6141522", "0.61353016", "0.6134051", "0.6133073", "0.61182153", "0.6108651", "0.60901594", "0.60857767", "0.60813254", "0.6055295", "0.60528445", "0.6049999", "0.60473233", "0.603246", "0.6030856", "0.60259914", "0.60259515", "0.60196257", "0.6017925", "0.6015438", "0.6004251", "0.5991438", "0.598791", "0.598699", "0.59845805", "0.59788", "0.5969854", "0.5962537", "0.5955121", "0.59526455", "0.59477764", "0.59418136", "0.5923064", "0.59222376" ]
0.69565785
14
Create a temporary executable file that is removed at process exit
def mkstemp_exec(data): name = mkstemp(data) os.chmod(name, 0o755) return name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_temporary_file():\n f = NamedTemporaryFile(delete=False)\n return f.name", "def make_temp_file():\n with tempfile.NamedTemporaryFile() as f:\n return f.name", "def tempfile():\n return mkstemp()[1]", "def temporary_file(request):\n file_handle, path = tempfile.mkstemp()\n os.close(file_handle)\n\n def cleanup():\n \"\"\"Remove temporary file.\"\"\"\n try:\n os.remove(path)\n except OSError:\n pass\n\n request.addfinalizer(cleanup)\n return path", "def _testfile():\r\n import tempfile\r\n return os.path.join(tempfile.gettempdir(), 'trash-%s' % os.getpid())", "def make_temp_file():\n global TEST_DATA_PATH\n TEST_DATA_PATH = tempfile.mkstemp()", "def testSetExecutable(self):\n with tempfile.NamedTemporaryFile(delete=True) as temp_file:\n utils.SetExecutable(temp_file.name)\n self.assertEqual(os.stat(temp_file.name).st_mode & 0o777, 0o755)", "def makeTempFile(self,perms=0o600,keep=False):\n\n fd=os.open(self.temp,os.O_RDWR|os.O_CREAT|os.O_EXCL|os.O_TRUNC,perms)\n f=os.fdopen(fd,'w+') \n if not keep:\n atexit.register(os.remove,self.temp)\n return f", "def deleteExecutable(self):\n if not (self.exec_loc is None):\n os.remove(self.exec_loc)", "def get_temporary_file(prefix=\"apsharvest_test_\", suffix=\"\", directory=\"\"):\n try:\n file_fd, filepath = mkstemp(prefix=prefix,\n suffix=suffix,\n dir=directory)\n os.close(file_fd)\n except IOError, e:\n try:\n os.remove(filepath)\n except Exception:\n pass\n raise e\n return filepath", "def ensure_tmpdir():\n path = mkdtemp('aomi')\n atexit.register(clean_tmpdir, path)\n return path", "def mkstemp(data):\n def rmtemp(name):\n try:\n os.remove(name)\n except OSError:\n pass\n\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(data)\n f.close()\n\n # Ensure removal at end of python session\n atexit.register(rmtemp, f.name)\n\n return f.name", "def clean_up(user, fname, tango_output):\n time.sleep(1)\n run(['rm', fname])\n time.sleep(1)\n path = tango_output + user + '.out'\n run(['rm', path])", "def mkstemp(data):\n def rmtemp(name):\n try:\n os.remove(name)\n except OSError:\n pass\n\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(data.encode('utf-8') if not isinstance(data, bytes) else data)\n f.close()\n\n # Ensure removal at end of python session\n atexit.register(rmtemp, f.name)\n\n return f.name", "def remove_temporary_files():\n try:\n xml_file_path, bin_file_path = get_ida_exported_files()\n if os.path.isfile(xml_file_path):\n os.remove(xml_file_path)\n\n if os.path.isfile(bin_file_path):\n os.remove(bin_file_path)\n\n except Exception:\n print(\"GhIDA:: [!] Unexpected error while removing temporary files.\")", "def _get_temp_path(self):\n handle, path = tempfile.mkstemp()\n # windows can't write to a file that is already open by another process\n # (tests use pipe redirection to a log file)\n os.close(handle)\n return path", "def create_temp_copy(user, code):\n fname = user + \"_primes.py\"\n user_file = open(fname, 'w')\n user_file.write(code)\n user_file.close()\n return fname", "def test_create_script(executable, expected):\n filename = os.path.join(tempfile.gettempdir(), \"hello_world\")\n text = \"echo 'Hello World'\"\n\n create_script(filename, text, executable)\n assert os.path.exists(filename)\n\n # Disabling because it doesn't work on Windows.\n # s = os.stat(filename)\n # assert s.st_mode == expected\n\n if os.path.exists(filename):\n os.remove(filename)", "def _tempfile(self):\n fd, path = tempfile.mkstemp(dir = os.path.join(self.root, \"temporary\"))\n try:\n return os.fdopen(fd, \"wb\"), path\n except:\n os.unlink(path)\n os.close(fd)\n raise", "def a_temp_file():\n filename = None\n try:\n tmpfile = tempfile.NamedTemporaryFile(delete=False)\n filename = tmpfile.name\n yield tmpfile\n finally:\n if filename and os.path.exists(filename):\n os.remove(filename)", "def _tempfile(filename):\n return tempfile.NamedTemporaryFile(mode='w',\n dir=os.path.dirname(filename),\n prefix=os.path.basename(filename),\n suffix=os.fsencode('.tmp'),\n delete=False)", "def mktemp(self):\n if self.dryrun:\n return os.path.expandvars(\"$TEMP/build\")\n\n return tempfile.mkdtemp()", "def managed_temp_object():\n import tempfile\n _, temp_object = tempfile.mkstemp()\n try:\n yield temp_object\n finally:\n os.remove(temp_object)", "def temp_file(suffix=\"\"):\n global _temp_dir\n warnings.warn(\n \"Please use the :mod:`tempfile` module from the standard library\",\n DeprecationWarning\n )\n _create_temp_dir()\n if suffix != \"\" and not suffix.startswith(\".\"):\n suffix = \".\" + suffix\n return tempfile.mktemp(suffix=suffix, dir=_temp_dir)", "def new_temp_file(prefix, suffix):\n f = tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, delete=False)\n f.close()\n return f.name", "def pre_start_script_tmp_sh(tmp_path: Path) -> Path:\n tmp_file = tmp_path / \"prestart.sh\"\n with open(Path(tmp_file), \"x\") as f:\n f.write('echo \"Hello World, from a temporary pre-start shell script\"\\n')\n return Path(tmp_file)", "def tempdir():\n return mkdtemp()", "def get_temp_file(self, delete: bool = False, close: bool = False):\n prefix = str(self._tmp_folder / f\"pysimt_{os.getpid()}\")\n t = tempfile.NamedTemporaryFile(\n mode='w', prefix=prefix, delete=delete)\n self.register_tmp_file(t.name)\n if close:\n t.close()\n return t", "def _create_temp_batch_file(self):\n return tempfile.NamedTemporaryFile(delete=False)", "def tmpfile(tmpdir_factory):\n\n def make(filename):\n fn = tmpdir_factory.mktemp(\"data\").join(filename)\n return fn\n\n # fn = tmpdir_factory.mktemp(\"data\").join(filename)\n return make", "def make_tempfile(name):\n open(name, \"w\", encoding=\"utf-8\").close()\n try:\n yield\n finally:\n os.unlink(name)", "def make_tempdir():\n return mkdtemp()", "def make_script(name, data, target, execpath=False):\n exec_path = join('/tmp', name + '-script')\n target_path = join(target, 'tmp', name + '-script')\n sfile = file(target_path, 'w')\n sfile.write(data.read())\n sfile.close()\n os.system('chmod 755 %s' % target_path)\n if not execpath:\n return target_path\n else:\n # for chroot target exec_path\n return exec_path", "def remove_outputs_zip():\n os.remove(\"outputs.zip\")", "def _generate_to_tempfile(self, generator):\r\n (output_fd, output_path) = tempfile.mkstemp()\r\n with os.fdopen(output_fd, 'w') as output:\r\n generator.write(output)\r\n return output_path", "def make_tempfile(content=None):\n fd, tmpfile = mkstemp()\n\n if content:\n write(fd, content)\n\n close(fd)\n return tmpfile", "def create_temp_env_directory():\n return tempfile.mkdtemp(prefix=\"spack-\")", "def delete_temp_dir(app_name):\n sudo('rm -rf /tmp/.fab-deploy-{}'.format(app_name))", "def make_temp_file(dir, data):\n fd, path = tempfile.mkstemp(dir=dir)\n if PY3:\n with os.fdopen(fd, 'w', encoding='utf-8') as f:\n f.write(data)\n else:\n with os.fdopen(fd, 'w') as f:\n f.write(data)\n return path", "def mktemp(self):\n try:\n fd, fn = tempfile.mkstemp(dir=self.tempdir)\n yield fn\n finally:\n try:\n os.close(fd)\n os.unlink(fn)\n except (OSError, IOError) as e:\n print(\"could not remove temporary file: %s\" % e,\n file=sys.stderr)", "def cleanup_file(path_to_file):\n print \"Removing generated file: %s\" % path_to_file\n os.remove(path_to_file)", "def test_write(self):\n temp_file = tempfile.mkstemp()[1]\n try:\n with open(temp_file, \"w+\") as fh:\n self.new_manifest.write(fh)\n tools.eq_(self.new_manifest, load_manifest(temp_file))\n finally:\n os.unlink(temp_file)", "def remove_kill_script():\n try:\n os.unlink('kill_script.sh')\n except:\n pass", "def _create_initial_install_file():\n if not _is_manager_installed():\n touch(INITIAL_INSTALL_FILE)", "def _tmp(self):\n tmpfn = tempfile.NamedTemporaryFile(prefix='tmp',\n suffix='.out',\n delete=False)\n return tmpfn.name", "def get_temporary_file(original_file, no_modifications=False):\n if no_modifications:\n handle = open(original_file, 'wb')\n return handle\n\n handle = open(get_temporary_file_name(original_file), 'wb')\n return handle", "def create_file(path):\n open(path, \"w\").close()", "def cleanup():\n if config.get('global').get('no_cleanup'):\n return\n logging.info('Cleaning up temp directories')\n try:\n tmp_path = config.get('global').get('tmp_path')\n if os.path.exists(tmp_path):\n rmtree(tmp_path)\n except Exception as e:\n logging.error(format_debug(e))\n print_message('Error removing temp directories')\n\n try:\n archive_path = os.path.join(\n config.get('global').get('output_path'),\n 'script_archive',\n time.strftime(\"%Y-%m-%d-%I-%M\"))\n if not os.path.exists(archive_path):\n os.makedirs(archive_path)\n run_script_path = config.get('global').get('run_scripts_path')\n if os.path.exists(run_script_path):\n move(run_script_path, archive_path)\n except Exception as e:\n logging.error(format_debug(e))\n logging.error('Error archiving run_scripts directory')", "def assumpfile1():\n afile = tempfile.NamedTemporaryFile(suffix='.json', mode='a', delete=False)\n afile.write(ASSUMP_CONTENTS)\n afile.close()\n # must close and then yield for Windows platform\n yield afile\n if os.path.isfile(afile.name):\n try:\n os.remove(afile.name)\n except OSError:\n pass # sometimes we can't remove a generated temporary file", "def make_executable(filename):\n st = os.stat(filename)\n os.chmod(filename, st.st_mode | stat.S_IEXEC)", "def _delete_temp():\n global _TEMP_NAME\n\n try:\n database.delete_temp(_TEMP_NAME)\n outputtools.delete_temp(_TEMP_NAME)\n except:\n raise", "def tempfile_name():\n ret = os.path.join(tempfile.gettempdir(), 'system_monitor.log')\n if os.access(ret, os.F_OK) and not os.access(ret, os.W_OK):\n print(\"WARNING: Couldn't write to log file {0}: (Permission denied)\".format(ret))\n ret = tempfile.mkstemp(prefix='system_monitor', suffix='.tmp', text=True)\n print(\"Create a new log file: {0}\".format(ret[1]))\n return ret[1]\n\n return ret", "def getTempDir():\n return \"/tmp/therm-%08X\" % abs(hash(__file__.replace(\"pyc\", \"py\")))", "def createJob(self, executable ):\n thisfileFolderPath = os.path.dirname(inspect.getfile( inspect.currentframe() ))\n inp = open( os.path.join(thisfileFolderPath,\"job.oar.tpl\"), 'r')\n t = Template(inp.read())\n \n s = t.substitute(executable=executable)\n \n completePath = os.path.join(self.oarRunFolder,\"job.oar.sh\")\n outp = open(completePath, 'w')\n outp.write(s)\n outp.close()\n \n os.system('chmod +x ' + completePath)\n print \"OAR: created job file: \", completePath\n return completePath", "def _temp_file(self, val):\n fd, fn = tempfile.mkstemp()\n fp = os.fdopen(fd, \"wb\")\n if val:\n if not isinstance(val, bytes):\n fp.write(val.encode(\"utf-8\", \"surrogateescape\"))\n else:\n fp.write(val)\n fp.close()\n return fn", "def _make_temp_file(self, code):\n temp = None\n try:\n with tempfile.NamedTemporaryFile(prefix='atcc-',\n suffix='.focus',\n delete=False,\n dir=self.tmpdir()) as temp:\n\n logger.debug(\"'temp_file=%s\", temp.name)\n\n if isinstance(code, str):\n code = code.encode('utf-8')\n\n temp.write(code)\n temp.flush()\n yield temp.name\n finally:\n if temp:\n os.remove(temp.name)", "def tempdir():\n\n # Create a directory and return the path\n return tempfile.mkdtemp()", "def clean(self):\n os.remove(\"temp.py\") # Delete the file \"temp.py\", to free up disk space", "def delete_temp_file(filename):\n try:\n os.remove(filename)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise e", "def tmpfile(self, cmd, code, suffix=''):\n\n # Don't run for DataDefs or !DictionarySource\n if 'DataDefs' in self.filename:\n return ''\n elif '!DictionarySource' in self.filename:\n return ''\n\n with self._make_temp_file(code) as temp_path:\n cmd = list(cmd)\n\n if '@' in cmd:\n cmd[cmd.index('@')] = temp_path\n else:\n cmd.append(temp_path)\n\n return self._communicate(cmd)", "def _create_unique_file(self):\n with open(self.uniquefile, 'w') as f:\n f.write(self._uniquename)\n self._uniquefile_created = True\n self._extend_expiration_time()\n self._p(\"Unique file created: %s\" % self.uniquefile)", "def pre_start_script_tmp_py(tmp_path: Path) -> Path:\n tmp_file = shutil.copy(Path(pre_start_module.__file__), tmp_path)\n return Path(tmp_file)", "def get_temp_filename(suffix=None):\n file = tempfile.mkstemp(suffix=suffix or \"\", prefix=\"temp_\", dir=os.getcwd()) # or \"\" for Python 2 compatibility\n os.close(file[0])\n return file[1]", "def CreateTempFileFromTestcase(\n tempdir: pathlib.Path, tc: testcase.Testcase\n) -> pathlib.Path:\n path = tempdir / f\"{tc.id}.cl\"\n with open(path, \"w\") as f:\n f.write(tc.inputs[\"src\"])\n return path", "def _create_temp_files():\n\n fakes = {}\n for i in [\"domain\", \"problem\"]:\n q, fname = tempfile.mkstemp()\n os.close(q)\n fakes[i] = fname\n return fakes", "def _temporary_resource_file(text, prefix='', suffix=''):\n import tempfile\n\n # Ensure the folder exists\n if not os.path.exists(_temp_path):\n os.mkdir(_temp_path)\n\n try:\n fd, temp_file_path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=_temp_path)\n if persist.debug_mode():\n persist.printf(\"{}: created temporary file at {}\".format(p_name, temp_file_path))\n\n try:\n with open(fd, 'w', encoding='utf-8') as f:\n f.write(text)\n temp_file_resource_path = \"/\".join([\"Packages\", _temp_dir_name,\n os.path.basename(temp_file_path)])\n yield temp_file_resource_path\n finally:\n os.remove(temp_file_path)\n except FileNotFoundError:\n _remove_temp_path()\n finally:\n # And remove the folder, if it's empty.\n # Otherwise wait for a \"restart\".\n try:\n os.rmdir(_temp_path)\n except OSError as e:\n if persist.debug_mode():\n persist.printf(\"{}: unable to delete temporary folder; {}\".format(p_name, e))", "def mkdtemp_clean(suffix=\"\", prefix=\"tmp\", dir=None):\r\n the_dir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)\r\n atexit.register(cleanup_tempdir, the_dir)\r\n return the_dir", "def makeTestFile(text):\n f = tempfile.NamedTemporaryFile()\n f.write(text)\n f.flush()\n return f", "def make_executable(p):\n st = os.stat(p)\n os.chmod(p, st.st_mode | 0o111)", "def touch(path):\n open(path, 'wb').close()", "def _create_initial_install_file():\n if not _are_components_installed():\n touch(INITIAL_INSTALL_FILE)", "def warn_purge_exit(info_msg, filename, exit_msg):\n floyd_logger.info(info_msg)\n rmtree(os.path.dirname(filename))\n sys.exit(exit_msg)", "def create_temp_dir():\n\n try:\n temp_dir = os.getenv('TEMP_FILE_DIR')\n\n if not isinstance(temp_dir, type(None)):\n if os.path.exists(temp_dir):\n LOGGER.warning('Temp Directory Already Exists.')\n else:\n temp_dir = tempfile.mkdtemp()\n os.environ['TEMP_FILE_DIR'] = temp_dir\n else:\n temp_dir = tempfile.mkdtemp()\n os.environ['TEMP_FILE_DIR'] = temp_dir\n\n LOGGER.debug(f'Temp Dir: {temp_dir}')\n except Exception as ex:\n LOGGER.exception(ex)\n raise ex", "def missing_but_potential_file():\r\n tempf = tempfile.NamedTemporaryFile()\r\n fname = tempf.name\r\n tempf.close()\r\n return fname", "def classCleanup(cls):\n cls.RemoveTempFile(SettingsCommandTestCase.output_file_name)", "def make_executable(path):\n mode = os.stat(path).st_mode\n mode |= (mode & 0o444) >> 2 # copy R bits to X\n os.chmod(path, mode)", "def gettempfilename(suffix):\n if '_' in os.environ:\n # tempfile.mktemp() crashes on some Wine versions (the one of Ubuntu 12.04 particularly)\n if os.environ['_'].find('wine') >= 0:\n tmpdir = '.'\n if 'TMP' in os.environ:\n tmpdir = os.environ['TMP']\n import time\n import random\n random.seed(time.time())\n random_part = 'file%d' % random.randint(0, 1000000000)\n return os.path.join(tmpdir, random_part + suffix)\n\n return tempfile.mktemp(suffix)", "def cleanUpTemporaryFiles(options):\n os.system(\"rm \"+options.output_directory_per_run+\"/*.abundance\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*.phasing_score\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*regionsOfInterest*\")\n os.system(\"mv \"+options.output_directory_per_run+\"/* \"+options.output_directory_per_run+\"/../\")\n os.system(\"rm -rf \"+options.output_directory_per_run)", "def get_prog_file():\n get_file()\n ## Executa\n file = ARGS.output\n os.system(\"chmod +x \" + file)\n subprocess.call([file])", "def create_temp_output_paths() -> None:\n if not os.path.exists(TMP_PATH):\n os.makedirs(TMP_PATH)\n if not os.path.exists(TMP_MAP_PATH):\n os.makedirs(TMP_MAP_PATH)", "def getTempFile():\n root = getDir(tempDir)\n for i in range(100):\n path = os.path.join(root, '%d-%d' % (\n os.getpid(), random.randint(100000, 999999)))\n if not os.path.isfile(path):\n return path\n raise NotImplementedError(\"getTempFile() appears to be failing\")", "def cleanup_output(output_name):\n print(\"Removing {}\".format(output_name))\n os.remove(output_name)", "def clean(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n if path.exists(self.binary_name):\n os.unlink(self.binary_name)\n if path.exists(actual_output_file):\n os.unlink(actual_output_file)", "def create_temp_files(containers):\n for name in containers:\n run_cmd(f\"rm -rf /tmp/{name}.img\", True)\n for name in containers:\n run_cmd(f\"truncate -s 1G /tmp/{name}.img\", True)", "def mk_work_dir():\n return tempfile.mkdtemp(prefix='pentaho-aws-', suffix='')", "def get_temp_dir():\n return tempfile.mkdtemp()", "def make_executable(path):\n mode = os.stat(path).st_mode\n mode |= (mode & 0o444) >> 2\n os.chmod(path, mode)", "def make_executable(path):\n mode = os.stat(path).st_mode\n mode |= (mode & 0o444) >> 2\n os.chmod(path, mode)", "def make_executable(path):\n mode = os.stat(path).st_mode\n mode |= (mode & 0o444) >> 2\n os.chmod(path, mode)", "def mktempdir(delete_on_exit=True):\n tmpdir = tempfile.mkdtemp(\"idstools\")\n if delete_on_exit:\n atexit.register(shutil.rmtree, tmpdir, ignore_errors=True)\n return tmpdir", "def write_tmp_patch(diff, filename=None):\n if not filename:\n prefix = 'cugit-'\n suffix = '-patch'\n filename = mkstemp(suffix, prefix)[1]\n with open(filename, 'w') as f:\n f.write(diff)\n return filename", "def create_pid_file(app_config, app_logger):\n pid = str(os.getpid())\n pidfile = get_config_item(app_config, 'app_pid_file')\n\n if os.path.isfile(pidfile):\n print(\"{} already exists, exiting\".format(pidfile))\n app_logger.info(\"STARTUP: PID file exists... exiting...\")\n return False\n try:\n with (open(pidfile, 'w')) as pidfilestream:\n pidfilestream.write(pid)\n pidfilestream.close()\n return True\n # end with\n except IOError:\n app_logger.error(\"STARTUP: Could not create pid file at: {}\".format(pidfile))\n return False", "def __call__(self):\n for tmp_file in filter(lambda x: x.exists(), self.temp_files):\n tmp_file.unlink()\n\n for proc in self.processes:\n try:\n os.kill(proc, signal.SIGTERM)\n except ProcessLookupError:\n pass", "def start_check():\n if not os.path.exists(outfancy_temp_files):\n os.mkdir(outfancy_temp_files)\n if not os.path.exists(outfancy_temp_files + log_file):\n os.system('touch ' + outfancy_temp_files + log_file)", "def remove_lock_file():\n # pylint: disable=global-statement\n global LOCK_FILENAME\n\n if LOCK_FILENAME is not None and os.path.isfile(LOCK_FILENAME):\n os.unlink(LOCK_FILENAME)", "def start_prompt_remover(vm_address):\n call_exec_daemon('unpackTarball', [None, 'C:\\\\'], host = vm_address)\n script = autoit_generator.install_tools_script\n for line in script.split('\\n'):\n print 'AUTOIT:', line\n target_exe = autoit_generator.compile(vm_address,\n 'prompt_remover_generated', script)\n autorun_dir = \"C:\\\\docume~1\\\\admini~1\\\\startm~1\\\\Programs\\\\Startup\"\n run_via_exec_daemon(['mkdir', autorun_dir], host=vm_address,\n ignore_failure=True)\n autorun_promptremover_file = autorun_dir+'\\\\promptremove.bat'\n call_exec_daemon('createFile',\n [autorun_promptremover_file, target_exe+'\\r\\n'],\n host=vm_address)\n\n process = Process(target=loop_run_program, args=(target_exe, vm_address))\n process.start()\n return process, target_exe", "def setup_output(self, filename=None):\n temp_file_descriptor = None\n if filename is None:\n temp_file_descriptor, filename = mkstemp()\n\n self.filename = filename\n self._temp_file_descriptor = temp_file_descriptor", "def createFile(file):\n file_ = os.path.join(os.getcwd(),file)\n if not(os.path.isfile(file_)):\n with open(file_,\"a\") as f:\n f.close()", "def _make_tempdir(self):\n self._clean_tempdir()\n os.mkdir(self._get_tempdir())\n assert os.path.exists(self._get_tempdir())\n rospy.on_shutdown(self._clean_tempdir)\n rospy.on_shutdown(self._clear_cache)" ]
[ "0.66870767", "0.65108424", "0.63702273", "0.63672537", "0.63322586", "0.6330699", "0.6312687", "0.629912", "0.6277841", "0.62701666", "0.62639666", "0.6252135", "0.6187429", "0.61593467", "0.6120302", "0.6027149", "0.59966457", "0.59905773", "0.5983607", "0.5978929", "0.59576666", "0.59407455", "0.59264356", "0.59039104", "0.5844892", "0.5834019", "0.5827533", "0.5801009", "0.57845753", "0.5775881", "0.5764834", "0.57343197", "0.5722849", "0.57200533", "0.5712119", "0.5690954", "0.5685738", "0.5671706", "0.56686115", "0.5665062", "0.56578374", "0.5645882", "0.5642416", "0.5625806", "0.56212467", "0.5618252", "0.5606139", "0.56037325", "0.55955005", "0.5588669", "0.55857515", "0.55831504", "0.5567443", "0.5565516", "0.5552137", "0.55381143", "0.5536689", "0.55340815", "0.5533632", "0.55263436", "0.5524748", "0.55206347", "0.551304", "0.5509795", "0.5509383", "0.5508398", "0.55042994", "0.5502597", "0.5500286", "0.5495231", "0.54797333", "0.54741615", "0.54654014", "0.5449187", "0.5429951", "0.5428429", "0.5425502", "0.54208916", "0.5420451", "0.54195875", "0.54150623", "0.5408327", "0.54031307", "0.5398785", "0.53978443", "0.5386603", "0.5384844", "0.5384844", "0.5384844", "0.5378134", "0.5376983", "0.5372015", "0.5366728", "0.5366667", "0.5361295", "0.53526354", "0.5348491", "0.53483903", "0.534556" ]
0.6263371
12
Helper function to compute value for fields debit/credit/amount_currency based on an amount and the currencies given in parameter
def compute_amount_fields(self, amount, src_currency, company_currency, invoice_currency=False): amount_currency = False currency_id = False if src_currency and src_currency != company_currency: amount_currency = amount amount = src_currency.with_context(self._context).compute(amount, company_currency) currency_id = src_currency.id debit = amount > 0 and amount or 0.0 credit = amount < 0 and -amount or 0.0 if invoice_currency and invoice_currency != company_currency and not amount_currency: amount_currency = src_currency.with_context(self._context).compute(amount, invoice_currency) currency_id = invoice_currency.id return debit, credit, amount_currency, currency_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_amount_fields(self, amount, src_currency, company_currency):\n amount_currency = False\n currency_id = False\n date = self.env.context.get('date') or fields.Date.today()\n company = self.env.context.get('company_id')\n company = self.env['res.company'].browse(company) if company else self.env.user.company_id\n if src_currency and src_currency != company_currency:\n amount_currency = amount\n amount = src_currency._convert(amount, company_currency, company, date)\n currency_id = src_currency.id\n debit = amount > 0 and amount or 0.0\n credit = amount < 0 and -amount or 0.0\n return debit, credit, amount_currency, currency_id", "def getValue(currency=None):", "def getFactor(currency):", "def getUserCurrency():", "def getCurrencies():", "def getCurrencyFactor(id=None):", "def getBaseCurrency():", "def getActiveCurrency():", "def process_conversion(queries, query, src, dst, val, currencies, wf):\n ####################################################################################################\n # Make the currency case insensitive\n ####################################################################################################\n if src:\n src = src.upper()\n if dst:\n dst = dst.upper()\n\n ####################################################################################################\n # Validate the currencies to check if its a currency or not\n ####################################################################################################\n if not validate_currencies(queries, query, src, dst, currencies, wf):\n return 100\n\n rate = search_rate(src, dst, wf)\n\n if rate == -1:\n wf.add_item('No exchange rate found for the especified currencies...', icon=ICON_ERROR)\n return 1\n\n ####################################################################################################\n # Gets the currency info\n ####################################################################################################\n src_currency_info = currencies[src]\n dst_currency_info = currencies[dst]\n\n cur_src_name = get_currency_name(src_currency_info)\n cur_dst_name = get_currency_name(dst_currency_info)\n\n cur_dst_symbol = str.decode(dst_currency_info['Simbol'], encoding='utf-8')\n flag_file_icon = wf.workflowfile('flags/{}'.format(dst_currency_info['Flag']))\n\n if not val:\n val = 1\n\n converted_rate = Decimal(val) * rate\n\n decimal_places = get_decimal_places_to_use(rate)\n\n fmt_converted_rate = format_result(wf, converted_rate, decimal_places)\n\n # module 1 will result in just the decimal part, if the decimal part is 0, then i'll show only 2 decimal places\n if (rate % Decimal(1)).compare(Decimal('0')) == 0:\n fmt_rate = format_result(wf, rate, 2)\n else:\n fmt_rate = format_result(wf, rate, decimal_places)\n\n title = cur_dst_symbol + ' ' + fmt_converted_rate\n sub_title = u'({}) -> ({}) with rate {} for query: {}'.format(cur_src_name, cur_dst_name, fmt_rate,\n ' '.join(query).upper())\n\n wf.add_item(title, sub_title, valid=True, arg=str(converted_rate), icon=flag_file_icon)\n\n ############################################################################################\n # Checks if an update is available, and add it to the output\n ############################################################################################\n if wf.update_available:\n handle_check_update(wf)\n\n return 0", "def test_currency_case(self):\n form = copy.deepcopy(self.base_form)\n form[\"mc_currency\"] = \"UsD\"\n Payment.process_paypal_ipn(form)\n payments = Payment.query.all()\n self.assertEqual(payments[0].currency, Currency.US_Dollar.value)", "def test_get_currency_using_get(self):\n pass", "def get_currency_values_if_valid(self):\n home_value_exists = False\n foreign_value_exists = False\n if self.root.ids.home_currency_input.text == '':\n self.root.ids.home_currency_input.hint_text = 'Must enter an amount before calibrating'\n else:\n home_value_exists = True\n if self.root.ids.foreign_currency_input.text == '':\n self.root.ids.foreign_currency_input.hint_text = 'Must enter an amount before converting'\n else:\n foreign_value_exists = True\n if foreign_value_exists:\n try:\n foreign_amount = float(self.root.ids.foreign_currency_input.text)\n valid_foreign_amount = True\n except ValueError:\n self.root.ids.foreign_currency_input.text = ''\n self.root.ids.foreign_currency_input.hint_text = 'Invalid amount (not a number)'\n foreign_amount = 0\n valid_foreign_amount = False\n else:\n valid_foreign_amount = False\n foreign_amount = 0\n if home_value_exists:\n try:\n home_amount = float(self.root.ids.home_currency_input.text)\n valid_home_amount = True\n except ValueError:\n self.root.ids.home_currency_input.text = ''\n self.root.ids.home_currency_input.hint_text = 'Invalid amount (not a number)'\n home_amount = 0\n valid_home_amount = False\n else:\n valid_home_amount = False\n home_amount = 0\n\n return home_value_exists is foreign_value_exists is valid_foreign_amount is valid_home_amount is True, \\\n home_amount, foreign_amount", "def currency(self, currency):\n allowed_values = [\"AED\", \"AFN\", \"ALL\", \"AMD\", \"ANG\", \"AOA\", \"ARS\", \"AUD\", \"AWG\", \"AZN\", \"BAM\", \"BBD\", \"BDT\", \"BGN\", \"BHD\", \"BIF\", \"BMD\", \"BND\", \"BOB\", \"BOV\", \"BRL\", \"BSD\", \"BTN\", \"BWP\", \"BYR\", \"BZD\", \"CAD\", \"CDF\", \"CHE\", \"CHF\", \"CHW\", \"CLF\", \"CLP\", \"CNY\", \"COP\", \"COU\", \"CRC\", \"CUC\", \"CUP\", \"CVE\", \"CZK\", \"DJF\", \"DKK\", \"DOP\", \"DZD\", \"EGP\", \"ERN\", \"ETB\", \"EUR\", \"FJD\", \"FKP\", \"GBP\", \"GEL\", \"GHS\", \"GIP\", \"GMD\", \"GNF\", \"GTQ\", \"GYD\", \"HKD\", \"HNL\", \"HRK\", \"HTG\", \"HUF\", \"IDR\", \"ILS\", \"INR\", \"IQD\", \"IRR\", \"ISK\", \"JMD\", \"JOD\", \"JPY\", \"KES\", \"KGS\", \"KHR\", \"KMF\", \"KPW\", \"KRW\", \"KWD\", \"KYD\", \"KZT\", \"LAK\", \"LBP\", \"LKR\", \"LRD\", \"LSL\", \"LTL\", \"LVL\", \"LYD\", \"MAD\", \"MDL\", \"MGA\", \"MKD\", \"MMK\", \"MNT\", \"MOP\", \"MRO\", \"MRU\", \"MUR\", \"MVR\", \"MWK\", \"MXN\", \"MXV\", \"MYR\", \"MZN\", \"NAD\", \"NGN\", \"NIO\", \"NOK\", \"NPR\", \"NZD\", \"OMR\", \"PAB\", \"PEN\", \"PGK\", \"PHP\", \"PKR\", \"PLN\", \"PYG\", \"QAR\", \"RON\", \"RSD\", \"RUB\", \"RWF\", \"SAR\", \"SBD\", \"SCR\", \"SDG\", \"SEK\", \"SGD\", \"SHP\", \"SLL\", \"SOS\", \"SRD\", \"SSP\", \"STD\", \"STN\", \"SVC\", \"SYP\", \"SZL\", \"THB\", \"TJS\", \"TMT\", \"TND\", \"TOP\", \"TRY\", \"TTD\", \"TWD\", \"TZS\", \"UAH\", \"UGX\", \"USD\", \"USN\", \"USS\", \"UYI\", \"UYU\", \"UZS\", \"VEF\", \"VES\", \"VND\", \"VUV\", \"WST\", \"XAF\", \"XCD\", \"XOF\", \"XPF\", \"YER\", \"ZAR\", \"ZMW\", \"ZWL\"] # noqa: E501\n if currency not in allowed_values:\n raise ValueError(\n \"Invalid value for `currency` ({0}), must be one of {1}\" # noqa: E501\n .format(currency, allowed_values)\n )\n\n self._currency = currency", "def get_currency():\n return _currency", "def course(self, currency, sum):\n if currency == \"USD\":\n url = \"https://finance.rambler.ru/currencies/USD/\"\n elif currency == \"EUR\":\n url = \"https://finance.rambler.ru/currencies/EUR/\"\n else:\n return sum * 1000\n site = requests.get(url)\n soup = bs4.BeautifulSoup(site.text, 'html.parser')\n com = float(soup.find(\"div\", attrs={\"class\": \"finance-currency-plate__currency\"}).text.split()[0])\n return com * sum * 1000", "def bitcoins_to_currency(cls, currency, amount):\n if not (rate := cache.get(currency)):\n try:\n api_rate = cls.api_call(currency)\n decimals = Decimal(\"0.01\")\n total = amount * Decimal(str(api_rate))\n rate = total.quantize(decimals, rounding=ROUND_DOWN).normalize()\n except Exception:\n # Don't retry. Just send empty flag\n rate = cls.API_NOT_AVAILABLE\n if rate and rate != cls.API_NOT_AVAILABLE:\n cache.set(currency, rate)\n return rate", "def _get_amount_value(\n self, cr, uid, ids, ifrs_line=None, period_info=None,\n fiscalyear=None, exchange_date=None, currency_wizard=None,\n number_month=None, target_move=None, pdx=None, undefined=None,\n two=None, one_per=False, bag=None, context=None):\n\n context = context and dict(context) or {}\n # TODO: Current Company's Currency shall be used: the one on wizard\n from_currency_id = ifrs_line.ifrs_id.company_id.currency_id.id\n to_currency_id = currency_wizard\n\n if number_month:\n if two:\n context = {\n 'period_from': number_month, 'period_to': number_month}\n else:\n period_id = period_info[number_month][1]\n context = {'period_from': period_id, 'period_to': period_id}\n else:\n context = {'whole_fy': True}\n\n # NOTE: This feature is not yet been implemented\n # context['partner_detail'] = pdx\n context['fiscalyear'] = fiscalyear\n context['state'] = target_move\n\n if ifrs_line.type == 'detail':\n res = self._get_sum_detail(\n cr, uid, ifrs_line.id, number_month,\n context=context)\n elif ifrs_line.type == 'total':\n res = self._get_grand_total(\n cr, uid, ifrs_line.id, number_month,\n one_per=one_per, bag=bag, context=context)\n elif ifrs_line.type == 'constant':\n res = self._get_constant(cr, uid, ifrs_line.id, number_month,\n context=context)\n else:\n res = 0.0\n\n if ifrs_line.type == 'detail':\n res = self.exchange(\n cr, uid, ids, res, to_currency_id, from_currency_id,\n exchange_date, context=context)\n return res", "def getAmount2(*args):", "def getActiveCurrencies():", "def validate_payment_amount(\n self,\n value: Text,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any],\n ) -> Dict[Text, Any]:\n\n credit_card = tracker.get_slot(\"credit_card\")\n cc_balance = tracker.get_slot(\"credit_card_balance\")\n account_balance = float(tracker.get_slot(\"account_balance\"))\n try:\n entity = get_entity_details(\n tracker, \"amount-of-money\"\n ) or get_entity_details(tracker, \"number\")\n amount_currency = parse_duckling_currency(entity)\n if not amount_currency:\n raise (TypeError)\n if account_balance < float(amount_currency.get(\"amount_of_money\")):\n dispatcher.utter_message(template=\"utter_insufficient_funds\")\n return {\"payment_amount\": None}\n return amount_currency\n except (TypeError, AttributeError):\n pass\n if value and value.lower() in cc_balance.get(credit_card.lower()):\n key = value.lower()\n amount = cc_balance.get(credit_card.lower()).get(key)\n amount_type = f\" (your {key})\"\n\n if account_balance < float(amount):\n dispatcher.utter_message(template=\"utter_insufficient_funds\")\n return {\"payment_amount\": None}\n return {\n \"payment_amount\": f\"{amount:.2f}\",\n \"payment_amount_type\": amount_type,\n \"currency\": \"$\",\n }\n\n else:\n dispatcher.utter_message(template=\"utter_no_payment_amount\")\n return {\"payment_amount\": None}", "def getDefaultCurrency():", "def getBalance(self, currency=''):\n\n if self.app.getExchange() == 'binance':\n if self.mode == 'live':\n model = BAuthAPI(self.app.getAPIKey(), self.app.getAPISecret())\n df = model.getAccount()\n if isinstance(df, pd.DataFrame):\n if currency == '':\n # retrieve all balances\n return df\n else:\n # retrieve balance of specified currency\n df_filtered = df[df['currency'] == currency]['available']\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR', 'GBP', 'USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))\n else:\n return 0.0\n else:\n # return dummy balances\n if currency == '':\n # retrieve all balances\n return self.balance\n else:\n if self.app.getExchange() == 'binance':\n self.balance = self.balance.replace('QUOTE', currency)\n else: \n # replace QUOTE and BASE placeholders\n if currency in ['EUR','GBP','USD']:\n self.balance = self.balance.replace('QUOTE', currency)\n else:\n self.balance = self.balance.replace('BASE', currency)\n\n if self.balance.currency[self.balance.currency.isin([currency])].empty:\n self.balance.loc[len(self.balance)] = [currency, 0, 0, 0]\n\n # retrieve balance of specified currency\n df = self.balance\n df_filtered = df[df['currency'] == currency]['available']\n\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR', 'GBP', 'USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))\n\n else:\n if self.mode == 'live':\n # if config is provided and live connect to Coinbase Pro account portfolio\n model = CBAuthAPI(self.app.getAPIKey(), self.app.getAPISecret(), self.app.getAPIPassphrase(), self.app.getAPIURL())\n if currency == '':\n # retrieve all balances\n return model.getAccounts()[['currency', 'balance', 'hold', 'available']]\n else:\n df = model.getAccounts()\n # retrieve balance of specified currency\n df_filtered = df[df['currency'] == currency]['available']\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR','GBP','USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))\n \n else:\n # return dummy balances\n\n if currency == '':\n # retrieve all balances\n return self.balance\n else:\n # replace QUOTE and BASE placeholders\n if currency in ['EUR','GBP','USD']:\n self.balance = self.balance.replace('QUOTE', currency)\n elif currency in ['BCH','BTC','ETH','LTC','XLM']:\n self.balance = self.balance.replace('BASE', currency)\n\n if self.balance.currency[self.balance.currency.isin([currency])].empty == True:\n self.balance.loc[len(self.balance)] = [currency,0,0,0]\n\n # retrieve balance of specified currency\n df = self.balance\n df_filtered = df[df['currency'] == currency]['available']\n\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR','GBP','USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))", "def convert_amount(self, init, new_currency, amount):\r\n\r\n curr = CurrencyRates()\r\n curr_conversion = curr.convert(init, new_currency, amount)\r\n\r\n return curr_conversion", "def get_balance(self, currency):\n\n result = self.api_query('getInfo', {'coinName': currency, 'need_new':0})\n\n #{'success': True, 'message': '', 'result': {'Currency': 'NXS', 'Balance': 1.55257461, 'Available': 1.55257461, 'Pending': 0.0, 'CryptoAddress': None}}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 2}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255221}}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 1}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255362}}\n\n #{'success': False, 'message': 'INVALID_CURRENCY', 'result': None}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 1}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255600}}\n try:\n result = {'success': True, 'message' :'', 'result':{'Currency': currency, 'Balance': result['return']['funds_incl_orders'][currency], 'Available': result['return']['funds'][currency], 'Pending': 0.0, 'CryptoAddress': None}}\n except:\n result = {'success': False, 'message' :'', 'result':{'Currency': currency, 'Balance': 0.0, 'Available': 0.0, 'Pending': 0.0, 'CryptoAddress': None}}\n return result", "def getCurrency(self, cucd):\n obj = MSTCUR.get( cucd )\n if obj:\n return (obj.CMCRCUCD, obj.CMCRCUNM)\n return (None, None)", "def getRoundedValue(currency=None):", "def exchange(currency_from, currency_to, amount_from):\n x = analysis(currency_from, currency_to, amount_from)\n return(cal(x))", "def _amount_residual(self, cr, uid, ids, field_names, args, context=None):\n res = {}\n if context is None:\n context = {}\n cur_obj = self.pool.get('res.currency')\n for move_line in self.browse(cr, uid, ids, context=context):\n res[move_line.id] = {\n 'amount_residual': 0.0,\n 'amount_residual_currency': 0.0,\n }\n\n if move_line.reconcile_id:\n continue\n if not move_line.account_id.type in ('payable', 'receivable'):\n #this function does not suport to be used on move lines not related to payable or receivable accounts\n continue\n\n if move_line.currency_id:\n move_line_total = move_line.amount_currency\n sign = move_line.amount_currency < 0 and -1 or 1\n else:\n move_line_total = move_line.debit - move_line.credit\n sign = (move_line.debit - move_line.credit) < 0 and -1 or 1\n line_total_in_company_currency = move_line.debit - move_line.credit\n context_unreconciled = context.copy()\n if move_line.reconcile_partial_id:\n for payment_line in move_line.reconcile_partial_id.line_partial_ids:\n if payment_line.id == move_line.id:\n continue\n if payment_line.currency_id and move_line.currency_id and payment_line.currency_id.id == move_line.currency_id.id:\n move_line_total += payment_line.amount_currency\n else:\n if move_line.currency_id:\n context_unreconciled.update({'date': payment_line.date})\n amount_in_foreign_currency = cur_obj.compute(cr, uid, move_line.company_id.currency_id.id, move_line.currency_id.id, (payment_line.debit - payment_line.credit), round=False, context=context_unreconciled)\n move_line_total += amount_in_foreign_currency\n else:\n move_line_total += (payment_line.debit - payment_line.credit)\n line_total_in_company_currency += (payment_line.debit - payment_line.credit)\n\n result = move_line_total\n res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)\n res[move_line.id]['amount_residual'] = sign * line_total_in_company_currency\n return res", "def getcurrency(self):\n return self.__currency", "def make_change_dp(amount, denominations):", "def test_change_currency(self):\n converter_date = datetime.strptime('2001-09-11', '%Y-%m-%d')\n self.commerce_wallet.currency = ValidCurrencies.USD\n self.commerce_wallet.save()\n add_charge(\n amount_to_charge=1000, creditor_token=str(self.commerce_wallet.token),\n debtor_token=str(self.customer_wallet.token), comment='TROLOLOLO charge',\n converter_date=converter_date\n )\n\n # This day the rate change is 1000 $ == 896.3999999999999772626324556767940521240234375 €\n # So the final balances will be 1000 for commerce and almost 104.6 € for customer\n self.commerce_wallet.refresh_from_db()\n self.customer_wallet.refresh_from_db()\n self.assertAlmostEqual(1000 + self.start_commerce_funds, self.commerce_wallet.balance, msg=\"Correct balance for commerce.\")\n self.assertAlmostEqual(\n Decimal(self.start_customer_funds) - Decimal(896.3999999999999772626324556767940521240234375),\n self.customer_wallet.balance,\n msg=\"Correct balance for commerce.\"\n )", "async def fetch_currencies(self, params={}):\n response = await self.publicGetWalletAssets(params)\n #\n # {\n # \"XBt\": {\n # \"asset\": \"XBT\",\n # \"currency\": \"XBt\",\n # \"majorCurrency\": \"XBT\",\n # \"name\": \"Bitcoin\",\n # \"currencyType\": \"Crypto\",\n # \"scale\": \"8\",\n # # \"mediumPrecision\": \"8\",\n # # \"shorterPrecision\": \"4\",\n # # \"symbol\": \"₿\",\n # # \"weight\": \"1\",\n # # \"tickLog\": \"0\",\n # \"enabled\": True,\n # \"isMarginCurrency\": True,\n # \"minDepositAmount\": \"10000\",\n # \"minWithdrawalAmount\": \"1000\",\n # \"maxWithdrawalAmount\": \"100000000000000\",\n # \"networks\": [\n # {\n # \"asset\": \"btc\",\n # \"tokenAddress\": \"\",\n # \"depositEnabled\": True,\n # \"withdrawalEnabled\": True,\n # \"withdrawalFee\": \"20000\",\n # \"minFee\": \"20000\",\n # \"maxFee\": \"10000000\"\n # }\n # ]\n # },\n # }\n #\n result = {}\n for i in range(0, len(response)):\n currency = response[i]\n asset = self.safe_string(currency, 'asset')\n code = self.safe_currency_code(asset)\n id = self.safe_string(currency, 'currency')\n name = self.safe_string(currency, 'name')\n chains = self.safe_value(currency, 'networks', [])\n depositEnabled = False\n withdrawEnabled = False\n networks = {}\n scale = self.safe_string(currency, 'scale')\n precisionString = self.parse_precision(scale)\n precision = self.parse_number(precisionString)\n for j in range(0, len(chains)):\n chain = chains[j]\n networkId = self.safe_string(chain, 'asset')\n network = self.network_id_to_code(networkId)\n withdrawalFeeRaw = self.safe_string(chain, 'withdrawalFee')\n withdrawalFee = self.parse_number(Precise.string_mul(withdrawalFeeRaw, precisionString))\n isDepositEnabled = self.safe_value(chain, 'depositEnabled', False)\n isWithdrawEnabled = self.safe_value(chain, 'withdrawalEnabled', False)\n active = (isDepositEnabled and isWithdrawEnabled)\n if isDepositEnabled:\n depositEnabled = True\n if isWithdrawEnabled:\n withdrawEnabled = True\n networks[network] = {\n 'info': chain,\n 'id': networkId,\n 'network': network,\n 'active': active,\n 'deposit': isDepositEnabled,\n 'withdraw': isWithdrawEnabled,\n 'fee': withdrawalFee,\n 'precision': None,\n 'limits': {\n 'withdraw': {\n 'min': None,\n 'max': None,\n },\n 'deposit': {\n 'min': None,\n 'max': None,\n },\n },\n }\n currencyEnabled = self.safe_value(currency, 'enabled')\n currencyActive = currencyEnabled or (depositEnabled or withdrawEnabled)\n minWithdrawalString = self.safe_string(currency, 'minWithdrawalAmount')\n minWithdrawal = self.parse_number(Precise.string_mul(minWithdrawalString, precisionString))\n maxWithdrawalString = self.safe_string(currency, 'maxWithdrawalAmount')\n maxWithdrawal = self.parse_number(Precise.string_mul(maxWithdrawalString, precisionString))\n minDepositString = self.safe_string(currency, 'minDepositAmount')\n minDeposit = self.parse_number(Precise.string_mul(minDepositString, precisionString))\n result[code] = {\n 'id': id,\n 'code': code,\n 'info': currency,\n 'name': name,\n 'active': currencyActive,\n 'deposit': depositEnabled,\n 'withdraw': withdrawEnabled,\n 'fee': None,\n 'precision': precision,\n 'limits': {\n 'amount': {\n 'min': None,\n 'max': None,\n },\n 'withdraw': {\n 'min': minWithdrawal,\n 'max': maxWithdrawal,\n },\n 'deposit': {\n 'min': minDeposit,\n 'max': None,\n },\n },\n 'networks': networks,\n }\n return result", "def updateCurrency(self, currency: str, value: str) -> None:\n\n if currency == \"USDT\":\n self._USDT = float(value)\n elif currency == \"ETH\":\n self._ETH = float(value)\n elif currency == \"BTC\":\n self._BTC = float(value)", "def getAmount1(*args):", "def simple_convert(rates, value, from_currency, to_currency):\n currency_rate = find_currency_rate(rates, from_currency, to_currency)\n if is_same(from_currency, to_currency):\n return value\n elif is_reversed(currency_rate, from_currency, to_currency):\n return reverse_rate(currency_rate) * value\n else:\n return value * currency_rate[2]", "def linear_utility(self, amount_by_currency_id: CurrencyHoldings) -> float:\n assert self.is_initialized, \"Preferences params not set!\"\n result = linear_utility(\n self.exchange_params_by_currency_id, amount_by_currency_id\n )\n return result", "def get_balance_eq(self):\n usd_balance = self.get_balance(\"btc\")[\"usd_balance\"]\n btc_balance = self.get_balance(\"btc\")[\"btc_balance\"]\n ltc_balance = self.get_balance(\"ltc\")[\"ltc_balance\"]\n eth_balance = self.get_balance(\"eth\")[\"eth_balance\"]\n eur_balance = self.get_balance(\"eur\")[\"eur_balance\"]\n return {'usd': str(round(float(usd_balance), 2)),\n 'btc': str(round(float(btc_balance), 2)),\n 'ltc': str(round(float(ltc_balance), 2)),\n 'eth': str(round(float(eth_balance), 2)),\n 'eur': str(round(float(eur_balance), 2))}", "def compute_custom(self, from_amount, to_currency, rate, round=True):\n self, to_currency = self or to_currency, to_currency or self\n assert self, \"compute from unknown currency\"\n assert to_currency, \"compute to unknown currency\"\n # apply conversion rate\n if self == to_currency:\n to_amount = from_amount\n else:\n to_amount = from_amount * self._get_conversion_rate_custom(self, to_currency, rate)\n# to_amount = from_amount * self._get_conversion_rate(self, to_currency)\n # apply rounding\n return to_currency.round(to_amount) if round else to_amount", "def local_price(amount, currency):\n amt = convert(amount, currency)\n sym = symbol(currency)\n return f'{sym}{amt}'", "def get_values(self, currency):\n curr_dict = {\n \"brazilian_real\": None,\n \"american_dollar\": None,\n \"european_euro\": None,\n \"british_pound\": None,\n \"japanese_yen\": None,\n \"swiss_frank\": None,\n \"canadian_dollar\": None,\n \"australian_dollar\": None\n }\n index = 0\n for key in curr_dict:\n if key != currency:\n # list comprehension to get values from data\n curr_dict[key] = [\n element for record in select_records(currency, 1) for element in record\n if element == record[index] and isinstance(element, float)\n ]\n index += 1\n else:\n continue\n return curr_dict", "def currencies():\n return _CURRENCIES", "async def fetch_currencies(self, params={}):\n labels = [\n 'pub:list:currency',\n 'pub:map:currency:sym', # maps symbols to their API symbols, BAB > BCH\n 'pub:map:currency:label', # verbose friendly names, BNT > Bancor\n 'pub:map:currency:unit', # maps symbols to unit of measure where applicable\n 'pub:map:currency:undl', # maps derivatives symbols to their underlying currency\n 'pub:map:currency:pool', # maps symbols to underlying network/protocol they operate on\n 'pub:map:currency:explorer', # maps symbols to their recognised block explorer URLs\n 'pub:map:currency:tx:fee', # maps currencies to their withdrawal fees https://github.com/ccxt/ccxt/issues/7745,\n 'pub:map:tx:method', # maps withdrawal/deposit methods to their API symbols\n ]\n config = ','.join(labels)\n request = {\n 'config': config,\n }\n response = await self.publicGetConfConfig(self.extend(request, params))\n #\n # [\n #\n # a list of symbols\n # [\"AAA\",\"ABS\",\"ADA\"],\n #\n # # sym\n # # maps symbols to their API symbols, BAB > BCH\n # [\n # ['BAB', 'BCH'],\n # ['CNHT', 'CNHt'],\n # ['DSH', 'DASH'],\n # ['IOT', 'IOTA'],\n # ['LES', 'LEO-EOS'],\n # ['LET', 'LEO-ERC20'],\n # ['STJ', 'STORJ'],\n # ['TSD', 'TUSD'],\n # ['UDC', 'USDC'],\n # ['USK', 'USDK'],\n # ['UST', 'USDt'],\n # ['USTF0', 'USDt0'],\n # ['XCH', 'XCHF'],\n # ['YYW', 'YOYOW'],\n # # ...\n # ],\n # # label\n # # verbose friendly names, BNT > Bancor\n # [\n # ['BAB', 'Bitcoin Cash'],\n # ['BCH', 'Bitcoin Cash'],\n # ['LEO', 'Unus Sed LEO'],\n # ['LES', 'Unus Sed LEO(EOS)'],\n # ['LET', 'Unus Sed LEO(ERC20)'],\n # # ...\n # ],\n # # unit\n # # maps symbols to unit of measure where applicable\n # [\n # ['IOT', 'Mi|MegaIOTA'],\n # ],\n # # undl\n # # maps derivatives symbols to their underlying currency\n # [\n # ['USTF0', 'UST'],\n # ['BTCF0', 'BTC'],\n # ['ETHF0', 'ETH'],\n # ],\n # # pool\n # # maps symbols to underlying network/protocol they operate on\n # [\n # ['SAN', 'ETH'], ['OMG', 'ETH'], ['AVT', 'ETH'], ['EDO', 'ETH'],\n # ['ESS', 'ETH'], ['ATD', 'EOS'], ['ADD', 'EOS'], ['MTO', 'EOS'],\n # ['PNK', 'ETH'], ['BAB', 'BCH'], ['WLO', 'XLM'], ['VLD', 'ETH'],\n # ['BTT', 'TRX'], ['IMP', 'ETH'], ['SCR', 'ETH'], ['GNO', 'ETH'],\n # # ...\n # ],\n # # explorer\n # # maps symbols to their recognised block explorer URLs\n # [\n # [\n # 'AIO',\n # [\n # \"https://mainnet.aion.network\",\n # \"https://mainnet.aion.network/#/account/VAL\",\n # \"https://mainnet.aion.network/#/transaction/VAL\"\n # ]\n # ],\n # # ...\n # ],\n # # fee\n # # maps currencies to their withdrawal fees\n # [\n # [\"AAA\",[0,0]],\n # [\"ABS\",[0,131.3]],\n # [\"ADA\",[0,0.3]],\n # ],\n # ]\n #\n indexed = {\n 'sym': self.index_by(self.safe_value(response, 1, []), 0),\n 'label': self.index_by(self.safe_value(response, 2, []), 0),\n 'unit': self.index_by(self.safe_value(response, 3, []), 0),\n 'undl': self.index_by(self.safe_value(response, 4, []), 0),\n 'pool': self.index_by(self.safe_value(response, 5, []), 0),\n 'explorer': self.index_by(self.safe_value(response, 6, []), 0),\n 'fees': self.index_by(self.safe_value(response, 7, []), 0),\n }\n ids = self.safe_value(response, 0, [])\n result = {}\n for i in range(0, len(ids)):\n id = ids[i]\n if id.find('F0') >= 0:\n # we get a lot of F0 currencies, skip those\n continue\n code = self.safe_currency_code(id)\n label = self.safe_value(indexed['label'], id, [])\n name = self.safe_string(label, 1)\n pool = self.safe_value(indexed['pool'], id, [])\n type = self.safe_string(pool, 1)\n feeValues = self.safe_value(indexed['fees'], id, [])\n fees = self.safe_value(feeValues, 1, [])\n fee = self.safe_number(fees, 1)\n undl = self.safe_value(indexed['undl'], id, [])\n precision = '8' # default precision, todo: fix \"magic constants\"\n fid = 'f' + id\n result[code] = {\n 'id': fid,\n 'uppercaseId': id,\n 'code': code,\n 'info': [id, label, pool, feeValues, undl],\n 'type': type,\n 'name': name,\n 'active': True,\n 'deposit': None,\n 'withdraw': None,\n 'fee': fee,\n 'precision': int(precision),\n 'limits': {\n 'amount': {\n 'min': self.parse_number(self.parse_precision(precision)),\n 'max': None,\n },\n 'withdraw': {\n 'min': fee,\n 'max': None,\n },\n },\n 'networks': {},\n }\n networks = {}\n currencyNetworks = self.safe_value(response, 8, [])\n cleanId = id.replace('F0', '')\n for j in range(0, len(currencyNetworks)):\n pair = currencyNetworks[j]\n networkId = self.safe_string(pair, 0)\n currencyId = self.safe_string(self.safe_value(pair, 1, []), 0)\n if currencyId == cleanId:\n network = self.safe_network(networkId)\n networks[network] = {\n 'info': networkId,\n 'id': networkId.lower(),\n 'network': networkId,\n 'active': None,\n 'deposit': None,\n 'withdraw': None,\n 'fee': None,\n 'precision': None,\n 'limits': {\n 'withdraw': {\n 'min': None,\n 'max': None,\n },\n },\n }\n keysNetworks = list(networks.keys())\n networksLength = len(keysNetworks)\n if networksLength > 0:\n result[code]['networks'] = networks\n return result", "def add_currency(self):\n home_value_exists = False\n foreign_value_exists = False\n if self.root.ids.new_home_currency_input.text == '':\n self.root.ids.new_home_currency_input.hint_text = 'Must enter an amount before calibrating'\n else:\n home_value_exists = True\n if self.root.ids.new_foreign_currency_input.text == '':\n self.root.ids.new_foreign_currency_input.hint_text = 'Must enter an amount before converting'\n else:\n foreign_value_exists = True\n if foreign_value_exists:\n try:\n foreign_amount = float(self.root.ids.new_foreign_currency_input.text)\n self.root.ids.new_foreign_currency_input.hint_text = 'Add value comparatively to home currency'\n valid_foreign_amount = True\n except ValueError:\n self.root.ids.new_foreign_currency_input.text = ''\n self.root.ids.new_foreign_currency_input.hint_text = 'Invalid amount (not a number)'\n foreign_amount = 0\n valid_foreign_amount = False\n else:\n valid_foreign_amount = False\n foreign_amount = 0\n if home_value_exists:\n try:\n home_amount = float(self.root.ids.new_home_currency_input.text)\n self.root.ids.new_home_currency_input.hint_text = 'Add value comparatively to foreign currency'\n valid_home_amount = True\n except ValueError:\n self.root.ids.new_home_currency_input.text = ''\n self.root.ids.new_home_currency_input.hint_text = 'Invalid amount (not a number)'\n home_amount = 0\n valid_home_amount = False\n else:\n valid_home_amount = False\n home_amount = 0\n valid_input = home_value_exists is foreign_value_exists is valid_foreign_amount is valid_home_amount is True\n if self.root.ids.new_home_currency.text == '':\n valid_input = False\n self.root.ids.new_home_currency.hint_text = 'Must enter new currency name'\n elif self.root.ids.new_home_currency.text in self.currencies:\n valid_input = False\n self.root.ids.new_home_currency.text = ''\n self.root.ids.new_home_currency.hint_text = 'Currency already exists'\n else:\n self.root.ids.new_home_currency.hint_text = 'Enter currency name'\n if valid_input and home_amount > 0 and foreign_amount > 0:\n if self.new_foreign_currency != 'Select':\n self.currency_data.append([self.root.ids.new_home_currency.text, str(\n float(self.currency_data[find_nested_index(self.currency_data, 0, self.new_foreign_currency)][1]) *\n home_amount / foreign_amount)])\n self.currencies.append(self.root.ids.new_home_currency.text)\n self.root.ids.currency_output_label.text = 'Added currency: ' + self.root.ids.new_home_currency.text\n else:\n self.root.ids.currency_output_label.text = 'Must have a foreign currency'", "def onchange_amount(self, cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date, payment_rate_currency_id, company_id, context=None):\n if not context:\n context = {}\n default = super(account_voucher, self).onchange_amount(cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date, payment_rate_currency_id, company_id, context=context)\n if 'value' in default:\n amount = 'amount' in default['value'] and default['value']['amount'] or amount\n amount_in_word = amount_to_text(amount)\n default['value'].update({'amount_in_word':amount_in_word})\n if journal_id:\n allow_check_writing = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context).allow_check_writing\n default['value'].update({'allow_check':allow_check_writing})\n return default", "def cash_money(amount: float) -> dict:\n breakdown = {100: 0, 50: 0, 20: 0, 5: 0, 2: 0, 1: 0, 0.25: 0, 0.10: 0, 0.05: 0, 0.01: 0} # setup dict\n amount = round(amount, 2) # round to 2 decimal places\n\n for key in breakdown.keys(): # loop through dict\n try:\n denomination = int(amount // key)\n except (TypeError, ZeroDivisionError) as error:\n print('Cannot divide by that argument!')\n return\n else:\n breakdown[key] = denomination # set value to amount of that denomination\n amount -= denomination * key\n\n breakdown = {k: v for k, v in breakdown.items() if v != 0} # remove empty keys\n\n return breakdown", "def get_fee_pct(self, contract_type: str) -> Tuple[float, float]:\n if contract_type == 'forex':\n return (0.00002, 0.00002)\n elif contract_type == 'crypto':\n if self.CRYPTO_EXCHANGE == 'binance':\n if self.trade_volume < 50_000:\n return (.001, .001)\n elif self.trade_volume < 100_000:\n return (.0009, .0009)\n elif self.trade_volume < 5000_000:\n return (.0009, .0008)\n elif self.trade_volume < 1_000_000:\n return (.0008, .0007)\n elif self.trade_volume < 5_000_000:\n return (.0007, .0005)\n elif self.trade_volume < 10_000_000:\n return (.0006, .0004)\n elif self.trade_volume < 25_000_000:\n return (.0006, 0)\n elif self.trade_volume < 100_000_000:\n return (.0005, 0)\n elif self.trade_volume < 250_000_000:\n return (.0004, 0)\n elif self.trade_volume < 500_000_000:\n return (.0003, 0)\n else: return (.0002, 0)\n elif self.CRYPTO_EXCHANGE == 'kraken':\n if self.trade_volume < 50_000:\n return (.0026, .0016)\n elif self.trade_volume < 100_000:\n return (.0024, .0014)\n elif self.trade_volume < 250_000:\n return (.0022, .0012)\n elif self.trade_volume < 500_000:\n return (.002, .001)\n elif self.trade_volume < 1_000_000:\n return (.0018, .0008)\n elif self.trade_volume < 2_500_000:\n return (.0016, .0006)\n elif self.trade_volume < 5_000_000:\n return (.0014, .0004)\n elif self.trade_volume < 10_000_000:\n return (.0012, .0002)\n else: return (.001, 0)\n elif self.CRYPTO_EXCHANGE == 'coinbase':\n if self.trade_volume < 10_000:\n return (.005, .005)\n elif self.trade_volume < 50_000:\n return (.0035, .0035)\n elif self.trade_volume < 100_000:\n return (.0025, .0015)\n elif self.trade_volume < 1_000_000:\n return (.002, .001)\n elif self.trade_volume < 10_000_000:\n return (.0018, .0008)\n elif self.trade_volume < 50_000_000:\n return (.0015, .0005)\n elif self.trade_volume < 300_000_000:\n return (.0007, 0)\n elif self.trade_volume < 500_000_000:\n return (.0005, 0)\n else: return (.0004, 0)\n elif self.CRYPTO_EXCHANGE == 'robinhood':\n return (0.0001, 0.0001)\n return (0, 0)", "def cashmoney(amount: float)-> dict:\n denominations = {100: 0, 50: 0, 20: 0, 10: 0, 5: 0, 2: 0, 1: 0, 0.25: 0, 0.10: 0, 0.05: 0, 0.01: 0}\n if isinstance(amount, float) is False or amount <= 0:\n raise ValueError('money must be a positive floating point number.')\n else:\n amount *= 100\n for key in denominations.keys():\n denominations[key] = int(amount // (key * 100))\n amount %= (key * 100)\n\n return denominations", "def get_updated_currency(self, currency_array, main_currency,\n max_delta_days):\n url = 'http://rate.bot.com.tw/xrt/flcsv/0/day'\n\n # We do not want to update the main currency\n if main_currency in currency_array:\n currency_array.remove(main_currency)\n _logger.debug(\"BOT currency rate service : connecting...\")\n try:\n url_open = urllib.request.urlopen(url)\n csvfile = csv.reader(io.StringIO(url_open.read().decode('utf-8-sig')), delimiter=',')\n url_open.close()\n except IOError:\n raise UserError(\n _('Web Service does not exist (%s)!') % url)\n\n next(csvfile)\n exchange = {}\n for row in csvfile:\n bid = float(row[3])\n ask = float(row[13])\n\n exchange[row[0]] = {\n 'bid': bid,\n 'ask': ask\n }\n\n self.check_rate_date(datetime.today(), max_delta_days)\n self.supported_currency_array = list(exchange.keys())\n\n self.supported_currency_array.append('TWD')\n _logger.debug(\"Supported currencies = %s \" %\n self.supported_currency_array)\n self.validate_cur(main_currency)\n if main_currency != 'TWD':\n main_rate = float(exchange[main_currency]['ask'])\n if main_currency in currency_array:\n currency_array.remove(main_currency)\n for curr in currency_array:\n self.validate_cur(curr)\n if curr == 'TWD':\n rate = main_rate\n else:\n if main_currency == 'TWD':\n rate = 1 / float(exchange[curr]['ask'])\n else:\n rate = main_rate / float(exchange[curr]['ask'])\n self.updated_currency[curr] = rate\n _logger.debug(\n \"Rate retrieved : 1 %s = %s %s\" % (main_currency, rate, curr)\n )\n return self.updated_currency, self.log_info", "def debitMoney(user_id, expense_group_id, cursor):\n query = \"\"\"\n SELECT a.user_id, SUM(a.amount) as amount\n FROM accured_expenses AS a, expense AS e\n WHERE a.expense_id = e.id AND e.user_id = ? AND e.expense_group_id = ? AND a.paid = 0\n GROUP BY a.user_id \n \"\"\"\n cursor.execute(query, (user_id, expense_group_id))\n return cursor.fetchall()", "def round(self, cr, uid, currency, amount):\n\t\tres = super(res_currency, self).round(cr,uid,currency,amount)\n\n\t\tuser_obj = self.pool.get('res.users')\n\t\tcurrency_obj = self.pool.get('res.currency')\n\t\tuser = user_obj.browse(cr, uid, uid, {})\n\n\t\tif (currency.id==user.company_id.currency_id.id):\n\t\t\tres = round(amount)\n\t\treturn res", "def currency_checker(start, new, ammount):\n\n newamount = str(round(c.convert(start, new, amount),2))\n return newamount", "def get_currency_unit(table):\n currency = \"\"\n unit = \"1\"\n if 'waehrung' in table.keys():\n if \"TEUR\" in table['waehrung']:\n currency = \"EUR\"\n unit = \"1000\"\n elif \"TDM\" in table['waehrung']:\n currency = \"DM\"\n unit = \"1000\"\n elif len(table['waehrung'].split(\" \")) > 2:\n currency = table['waehrung'].split(\" \")[-1]\n unit = table['waehrung'].split(\" \")[-2]\n if unit == \"Mio\":\n unit = \"1000000\"\n if unit == \"Mrd\":\n unit = \"1000000000\"\n elif len(table['waehrung'].split(\" \")) > 1:\n currency = table['waehrung'].split(\" \")[-1]\n elif len(table['waehrung']) > 0:\n currency = table['waehrung']\n return currency, unit", "def get_currency_exchange_rate(self, from_currency, to_currency):\n _FUNCTION_KEY = 'CURRENCY_EXCHANGE_RATE'\n return _FUNCTION_KEY, 'Realtime Currency Exchange Rate', None", "def get_balance(card):\n data = {\n \"Card.Number\": card[0],\n \"Card.Pin\": card[1],\n }\n\n response = requests.post(BALANCE_URL, data=data, headers=HEADERS)\n if response.status_code == 200:\n match = BALANCE_RE.search(response.text)\n if match:\n return float(match.group(1))", "def convert(self, obj, amount=None):\r\n if isinstance(obj, Currency):\r\n if not amount:\r\n # Convert the full amount\r\n amount_to_convert = obj.amount\r\n else:\r\n if type(amount) in (int, float):\r\n if amount > obj.amount:\r\n raise CurrencyException(\"There are not that many to convert\")\r\n if amount < 0:\r\n raise CurrencyException(\"Cannot convert negative amounts\")\r\n else:\r\n amount_to_convert = amount\r\n else:\r\n amount_to_convert = 0\r\n\r\n # Subtract converted amount\r\n obj.amount -= amount_to_convert\r\n\r\n if obj.value > self.value:\r\n modifier = obj.value / self.value\r\n self.amount += amount_to_convert * modifier\r\n print \"Converted {} {} --> {} {}\".format(\r\n amount_to_convert, obj.name, amount_to_convert * modifier, self.name)\r\n elif obj.value < self.value:\r\n modifier = self.value / obj.value\r\n self.amount += amount_to_convert / modifier\r\n print \"Converted {} {} --> {} {}\".format(\r\n amount_to_convert, obj.name, amount_to_convert / modifier, self.name)\r\n else:\r\n modifier = 1\r\n self.amount += amount_to_convert\r\n print \"Converted {} {} --> {} {}\".format(\r\n amount_to_convert, obj.name, amount_to_convert, self.name)\r\n\r\n else:\r\n return NotImplemented", "def _convert(self, from_amount, to_currency, company, date, round=True):\n self, to_currency = self or to_currency, to_currency or self\n assert self, \"convert amount from unknown currency\"\n assert to_currency, \"convert amount to unknown currency\"\n assert company, \"convert amount from unknown company\"\n assert date, \"convert amount from unknown date\"\n # apply conversion rate\n if self == to_currency:\n to_amount = from_amount\n else:\n to_amount = from_amount * self._get_conversion_rate(self, to_currency)\n # apply rounding\n return to_currency.round(to_amount) if round else to_amount", "def test_usd(self):\n cash_accounts = self.port_values['cash_accounts']\n cash_account = self.extract_cash_account(cash_accounts, 'USD')\n self.assertNotEqual(cash_account, {})\n\n self.assertEqual(cash_account['account_num'], '012-875-0-804911-9')\n self.assertEqual(cash_account['account_type'], 'Current Account')\n self.assertEqual(cash_account['bank'], 'Bank of China (Hong Kong) Ltd')\n self.assertEqual(cash_account['date'], datetime.datetime(2015,12,10))\n self.assertAlmostEqual(cash_account['balance'], 8298021.81)\n self.assertAlmostEqual(cash_account['fx_rate'], 7.7502)\n self.assertAlmostEqual(cash_account['local_currency_equivalent'], 64311328.63)", "def _amount(amount, asset='HBD'):\n assert asset == 'HBD', 'unhandled asset %s' % asset\n return \"%.3f HBD\" % amount", "def parse_currency_args(currency):\n if currency and len(currency) < 3:\n currency = next((element['cc'] for element in symbols if element['symbol'] == currency), currency)\n return currency", "def compute(self):\n\t\tmontant = self.spn_montant.value() #recuperation de la valeur de la spn\n\t\tdevise_from = self.cbb_devisesFrom.currentText() #recuperation de la valeur de la cbb\n\t\tdevise_to = self.cbb_devisesTo.currentText()\n\t\t\n\t\t# on effectue la conversion grace a currency_converter\n\t\t# on fait une gestion d'erreur pour eviter les conversions non trouvees\n\t\ttry :\n\t\t\t\"\"\"on essaie\"\"\"\n\t\t\tresultat = self.c.convert(montant, devise_from, devise_to)\n\t\t\n\t\texcept currency_converter.currency_converter.RateNotFoundError :\n\t\t\t\"\"\"si erreur\"\"\"\n\t\t\tprint(\"le taux de conversion n'a pas ete trouve\")\n\t\t\n\t\telse :\n\t\t\t\"\"\"si pas d'erreur\"\"\"\n\t\t\tself.spn_montantConverti.setValue(resultat) #affichage dans la cbb", "def getCurrencySymbol():", "def convert(currency_from, currency_to, amount):\n symbol = codes.get_symbol(currency_to)\n try:\n print('to, from, amt ==', currency_from, currency_to, amount)\n converted_amt = rates.convert(currency_from, currency_to, amount)\n except RatesNotAvailableError:\n return None\n return f'{symbol} {round(converted_amt, 2)}'", "def get_amount():\n conn = None\n try:\n params = config()\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n\n #tas\n raw={}\n raw['plan']={}\n raw['actual']={}\n\n cur.execute(\"SELECT slc.*,plc.amount as amount_plan,ppm.amount as price \\\n ,to_char((slc.amount*ppm.amount)/aco.amount_actual_coal,'999.999') as cost_actual \\\n , to_char((plc.amount*ppm.amount)/aco.amount_actual_coal,'999.999') as cost_plan \\\n ,aco.amount_actual_coal \\\n FROM vw_tta_cps_sum_land_clearing_now AS slc \\\n LEFT JOIN vw_tta_cps_price_per_month AS ppm \\\n ON slc.int_month=ppm.month \\\n AND slc.year_id=ppm.year_id \\\n LEFT JOIN vw_tta_cps_plan_land_clearing AS plc \\\n ON slc.periode_month_id=plc.periode_month_id \\\n AND slc.land_clearing_location_id=plc.land_clearing_location_id \\\n LEFT JOIN vw_tta_cps_sr_all_per_month_plan_actual as aco \\\n ON aco.int_month=slc.int_month \\\n WHERE ppm.jenis_price=10 \\\n AND aco.plan_id=1\")\n rowcount=cur.rowcount\n print(\"The number of row: \", rowcount)\n row = cur.fetchone()\n counter=0\n item={}\n\n if rowcount>0:\n f=open('../../data_cost_per_ton.csv','w')\n f.write('AREA,PLAN,ACTUAL\\n')\n while row is not None:\n #print(row)\n raw[\"plan\"]=row[11]\n raw[\"actual\"]=row[10]\n f.write(str('RAW')+','+str(raw[\"plan\"])+','+str(raw[\"actual\"])+\"\\n\")\n\n row = cur.fetchone()\n\n if rowcount>0:\n f.close()\n cur.close()\n\n print(str(datetime.datetime.now())+' '+str(rowcount)+' row updated')\n except (Exception, psycopg2.DatabaseError) as error:\n print(str(datetime.datetime.now())+' '+str(error))\n finally:\n if conn is not None:\n conn.close()", "def money_from_args(args, fromobj):\n allcoins = (\"coins\", \"coin\", \"silver\", \"money\", \"pieces\", \"all\")\n currency = fromobj.item_data.currency\n currency = float(currency)\n currency = round(currency, 2)\n if args in allcoins:\n val = currency\n else:\n arglist = args.split()\n val = float(arglist[0])\n val = round(val, 2)\n vals = (val, currency)\n return vals", "def _total_d(self):\n debit = 0.0\n for l in self.data:\n debit += l['debit']\n self.t_credit += l['credit']\n self.t_balance += l['balance']\n return debit", "def usdToBtc(dollar, bitcoin):\n global btc\n global usd\n if usd>dollar:\n usd-=dollar\n btc+=bitcoin\n return True\n return False", "def bond_price(fv, c,n,m,r):\n return sum([a*b for a,b in zip(discount_factors(r,n,m),bond_cashflows(fv, c, n, m))])", "def _get_currency_rate(currency):\n response = requests.get(f'{config(\"OPENEXCHANGERATES_URL\")}')\n if not response.ok:\n # log\n # can handle exception in better way later\n raise Exception(\n f'currency conversion api not working {response.text}')\n rates = response.json().get('rates')\n currency_rate = rates.get(currency.upper(), None)\n if not currency_rate:\n raise ValueError(f'Given currency conversion rate not found')\n return currency_rate", "def bitcoin():\r\n # get the bitcoin price, until 2 decimal number, based on US dollar\r\n bitcoin_info = requests.get(\"https://api.binance.com/api/v3/avgPrice?symbol=BTCUSDT\")\r\n btcusdt = format(float(bitcoin_info.json()[\"price\"]), '.2f')\r\n\r\n if users_language[update.effective_chat.id] == \"english\":\r\n return \"bitcoin : $ \" + btcusdt\r\n elif users_language[update.effective_chat.id] == \"persian\":\r\n return \" دلار\" + btcusdt + \"بیت کوین : \"", "def map_to_currency(self, raw_currency: HitbtcRawCurrencyModel) -> HitbtcCurrencyModel:\n\n id_ = raw_currency[\"id\"]\n full_name = raw_currency[\"fullName\"]\n crypto = raw_currency[\"crypto\"]\n payin_enabled = raw_currency[\"payinEnabled\"]\n payin_payment_id = raw_currency[\"payinPaymentId\"]\n payin_confirmations = raw_currency[\"payinConfirmations\"]\n payout_enabled = raw_currency[\"payoutEnabled\"]\n payout_is_payment_id = raw_currency[\"payoutIsPaymentId\"]\n transfer_enabled = raw_currency[\"transferEnabled\"]\n delisted = raw_currency[\"delisted\"]\n precision_payout = int(raw_currency[\"precisionPayout\"])\n precision_transfer = int(raw_currency[\"precisionTransfer\"])\n raw_payout_fee = raw_currency.get(\"payoutFee\")\n payout_fee = Decimal(\n raw_payout_fee) if raw_payout_fee is not None else raw_payout_fee\n\n currency = HitbtcCurrencyModel(\n id=id_,\n full_name=full_name,\n crypto=crypto,\n payin_enabled=payin_enabled,\n payin_payment_id=payin_payment_id,\n payin_confirmations=payin_confirmations,\n payout_enabled=payout_enabled,\n payout_is_payment_id=payout_is_payment_id,\n transfer_enabled=transfer_enabled,\n delisted=delisted,\n payout_fee=payout_fee,\n precision_payout=precision_payout,\n precision_transfer=precision_transfer)\n\n return currency", "def returnCurrencies(self):\n pass", "def __init__(self,currency = '$',amount = 0.00, currencyFullName = \"United States Dollar\"):\n self.__currency = currency\n self.__amount = amount\n self.__currencyFullName = currencyFullName", "def convert_to_currency(self, value):\n return (Decimal(value) * Decimal(self.kurzMnozstvi) / \\\n Decimal(self.nbStred))", "def money_digitalization(raw_data, target_columns=['First Place Prize', 'Total Prize']):\r\n output = raw_data.copy()\r\n\r\n for column in target_columns:\r\n for i in range(len(raw_data)):\r\n money = raw_data.loc[i, column].replace('(', '').replace(')', '')\r\n if money[0] == '$':\r\n output.loc[i, column] = float(money[1:].replace('.', '').replace(',', '.'))\r\n elif money[:3] == 'US$':\r\n output.loc[i, column] = float(money[3:].replace('.', '').replace(',', '.'))\r\n else:\r\n print('money data error')\r\n exit(1)\r\n\r\n return output", "def calculate_debt(acc, key, value):\n if key == 'transactions':\n for amount in value:\n acc += amount \n\n return acc", "def test_create_currency_using_post(self):\n pass", "def test_convert_amounts(self):\n pass", "def __init__(self, value=None, exchange=None):\n self.usd_values = []\n\n if isinstance(value, CurrencyValue):\n if exchange is None:\n self.local_value = value.local_value\n self.usd_values = value.usd_values\n return\n else:\n raise Exception(\"Invalid arguments to constructor, passing in CurrencyValue and exchange rate\")\n\n # if we don't have an exchange rate, our value is in local currency, just save it away\n if exchange is None:\n self.local_value = value\n\n # otherwise, this is a usd value\n else:\n self.local_value = None\n \n # so add this value / exchange pair to our list of usd values\n if not value is None:\n self.usd_values.append([value, exchange])", "def convert_currency(self, new_code, rate):\n if not isinstance(rate, six.integer_types + (float, Decimal)):\n raise ValueError(\"You can only apply an integer, long, float or Decimal factor to an Amount\")\n return Amount.from_code_and_minor(\n new_code,\n int(Decimal(self.value * rate).to_integral(ROUND_HALF_UP)),\n )", "def _get_amount_with_operands(\n self, cr, uid, ids, ifrs_l, period_info=None, fiscalyear=None,\n exchange_date=None, currency_wizard=None, number_month=None,\n target_move=None, pdx=None, undefined=None, two=None,\n one_per=False, bag=None, context=None):\n\n context = context and dict(context) or {}\n\n if not number_month:\n context = {'whole_fy': True}\n\n res = self._get_amount_value(\n cr, uid, ids, ifrs_l, period_info, fiscalyear, exchange_date,\n currency_wizard, number_month, target_move, pdx, undefined, two,\n one_per=one_per, bag=bag, context=context)\n\n res = ifrs_l.inv_sign and (-1.0 * res) or res\n bag[ifrs_l.id]['ytd'] = res\n\n return res", "def CallValue(contract : 'Contract') -> float:\n return Option.__call_value(contract.underlyingPrice, contract.strikePrice, contract.interestRate / 100, contract.daysToExpiration / 365, contract.volatility / 100)", "def get_outbound_statements_grid_amount_billing_currency(self):\n return self.get_specific_column_value_from_grid(self.outbound_statements_grid_div_id, self.outbound_statements_grid_row_count, self.amount_billing_currency_column_name)", "def dollar():\r\n price = give_price_website_2(\"https://www.tgju.org/%D9%82%DB%8C%D9%85%D8%AA-%D8%AF%D9%84%D8%A7%D8%B1\")\r\n\r\n if users_language[update.effective_chat.id] == \"english\":\r\n return \"dollar : \" + format(price/10000, '.2f') + \" kTomans\"\r\n elif users_language[update.effective_chat.id] == \"persian\":\r\n return \" هزارتومان\" + format(price/10000000, '.3f') + \"دلار : \"", "def get_sale_price(self):\n Currency = Pool().get('currency.currency')\n Company = Pool().get('company.company')\n\n if self.carrier_cost_method != 'gls':\n return super(Carrier, self).get_sale_price() # pragma: no cover\n\n currency, = Currency.search([('code', '=', 'EUR')])\n company = Transaction().context.get('company')\n\n if company:\n currency = Company(company).currency\n\n return Decimal('0'), currency.id", "def parse_rate():\n try:\n response = requests.get(ecb_url)\n except Exception as e:\n return {\"error\": \"error occurred while accessing www.ecb.europa.eu: {}\".format(e)}, True\n else:\n currency_xml = response.content.decode()\n root = ET.fromstring(currency_xml)\n currencies_list = [currency.attrib.get('currency') for currency in root.iter(cube) if currency.attrib.get('currency')]\n rates_list = [float(currency.attrib.get('rate')) for currency in root.iter(cube) if currency.attrib.get('rate')]\n result = dict(zip(currencies_list, rates_list))\n result['EUR'] = float(1)\n return result, False", "def get_currencies(base=\"EUR\"):\n currency_dict = {}\n for currency in (\"EUR\", \"USD\", \"GBP\", \"CHF\", \"CAD\", \"JPY\"):\n url = 'http://finance.yahoo.com/d/quotes.csv?&s=' + currency + '=X&f=p'\n currency_dict[currency] = float(urllib.request.urlopen(url).read())\n factor = currency_dict[base]\n for currency in currency_dict:\n currency_dict[currency] /= factor\n return currency_dict", "def withdraw_money(c_id, amount):\n return ar.withdraw_money(c_id, amount)", "def balance(self, acct_id):\n acct = self.data.get(acct_id)\n if acct:\n bal = float(acct[\"due\"]) - float(acct[\"paid\"])\n return f\"${bal:.2f}\"\n return None", "def total_after_fees(amount, fees=None, is_payer=True):\n if not isinstance(fees, (tuple, list)):\n msg = 'fees must be of type list or tuple'\n raise TypeError(msg)\n if not isinstance(amount, Decimal):\n msg = 'Transaction amount must be of type decimal.Decimal.'\n raise TypeError(msg)\n\n stripe_fee = [f for f in fees if f.name == 'stripe-transaction']\n stripe_fee = stripe_fee[0] if stripe_fee else None\n other_fees = [f for f in fees if f.name != 'stripe-transaction']\n\n ledger = {'charge': {'initial': amount, },\n 'fees': list()}\n\n charge_amount = amount\n multiplier = 1 if is_payer else -1\n for other_fee in other_fees:\n fee_total = other_fee.flat + other_fee.fractional_pct * amount\n fee_total = fee_total\n charge_amount += multiplier * fee_total\n entry = {'name': other_fee.name,\n 'id': other_fee.id,\n 'fee': fee_total.quantize(QUANTIZE_DOLLARS)}\n ledger['fees'].append(entry)\n\n if stripe_fee is not None:\n # Percentages are stored as percentages in the db, convert it to a decimal\n new_charge_amount = (stripe_fee.flat + charge_amount) / (Decimal('1.0000') - stripe_fee.fractional_pct)\n stripe_fee_amount = new_charge_amount - charge_amount\n entry = {'name': stripe_fee.name,\n 'id': stripe_fee.id,\n 'fee': stripe_fee_amount.quantize(QUANTIZE_DOLLARS)}\n ledger['fees'].append(entry)\n charge_amount = new_charge_amount\n\n ledger['charge']['final'] = charge_amount.quantize(QUANTIZE_DOLLARS)\n return ledger", "def clean_currency(x):\n \n if isinstance(x, str):\n x=x.replace(\"*\",\"\")\n x=x.replace(\",\",\"\")\n if x=='':\n return(0)\n elif x[0]!='$':\n return(0)\n else:\n x=x.split(' ')[0]\n x=x.replace('$',\"\")\n return float(x)\n return(x)", "def __init__(self, currency, display_currency):\n self.currency = currency\n self.display_currency = display_currency\n self.values = None", "def convert_to_dollars(self):\n return 'Currency is', self.currency_type", "def test_get_currency_all_using_get(self):\n pass", "def format_currency(value, currency=None, show_if_zero=False):\n if not value and not show_if_zero:\n return ''\n if value == 0.0:\n return g.ledger.quantize(Decimal(0.0), currency)\n return g.ledger.quantize(value, currency)", "def cryptocurrency_deposit_request(self, walletId, currency):\n return", "def get_currency_feature_value(self):\n curr_feature = AssetFeature.Standard.CURRENCY.get_object()\n return AssetFeatureValue.objects.get_or_create(name=self.currency, feature=curr_feature)[0]", "def get_currencyinfo(table):\n cuinfo = []\n for item in table:\n if \"Beschäft\" in item:\n continue\n currency = \"\"\n unit = \"1\"\n if \"TEUR\" in item:\n currency = \"EUR\"\n unit = \"1000\"\n elif \"TDM\" in item:\n currency = \"DM\"\n unit = \"1000\"\n elif \"%\" in item:\n unit = \"%\"\n elif len(item.split(\"(\")) > 1:\n currency = item.split(\"(\")[-1].split(\" \")[-1].replace(\")\", \"\").replace(\",\", \"\").strip()\n if len(item.split(\"(\")[-1].split(\" \")) > 1:\n unit = item.split(\"(\")[-1].split(\" \")[-2]\n if \"Mio\" in item:\n unit = \"1000000\"\n if \"Mrd\" in item:\n unit = \"1000000000\"\n else:\n currency = item\n cuinfo.append({'currency': currency, 'unit': unit,'text': item.split(\"(\")[0]})\n return cuinfo", "def getCurrency(self):\n return self.base.get(\"currency\", [])", "def currency(self):\n return self._dict.get('currency')", "def dollars(cls, amount: int) -> 'Money':\n return cls(amount, \"USD\")" ]
[ "0.76655096", "0.6802662", "0.67237234", "0.6577972", "0.65410954", "0.6217159", "0.620707", "0.61840576", "0.59770834", "0.59099656", "0.5873708", "0.5872332", "0.58294046", "0.58154744", "0.578612", "0.57646793", "0.5753153", "0.57461447", "0.57427317", "0.57381535", "0.5725122", "0.57149595", "0.56717056", "0.56458056", "0.56367564", "0.56234604", "0.55689025", "0.5567864", "0.55324084", "0.5480228", "0.54771745", "0.5458446", "0.5449248", "0.54388773", "0.54335684", "0.54200584", "0.5414058", "0.54124606", "0.5406653", "0.54057574", "0.5365175", "0.5344278", "0.53399026", "0.53272593", "0.53128964", "0.5298518", "0.52870816", "0.5279076", "0.5234669", "0.52346504", "0.52289385", "0.5228423", "0.5219321", "0.5218161", "0.52102053", "0.5203011", "0.5202275", "0.51991355", "0.5195421", "0.5186528", "0.51765996", "0.5162951", "0.51571876", "0.51481366", "0.51462907", "0.51389074", "0.5137944", "0.5133704", "0.5130975", "0.5123873", "0.5123118", "0.51227915", "0.5121445", "0.51113325", "0.50984704", "0.50920963", "0.509072", "0.506873", "0.5066575", "0.5059936", "0.50457394", "0.50384", "0.50306296", "0.5028362", "0.50245994", "0.5022215", "0.50188017", "0.50179744", "0.5015105", "0.5014552", "0.50130385", "0.50101405", "0.5004124", "0.49975422", "0.4996876", "0.49927357", "0.4991803", "0.4987694", "0.49821424", "0.4980811" ]
0.71101564
1
Aim to create a sidebard card.
def test_sidebar_card_object(self): target_dict = { "itemType": "CARD", "align": "top", "showCloseAction": True, "showActionButton": True, "actionButtonUri": "https://changelog.md", "actionButtonUriTarget": "_new", "actionButtonName": "Discover more", "actionButtonName_nl": "Ontdek meer", "displayText": "This project ...", "displayText_nl": "Dit project ...", "displayTextAlign": "left", "showBackground": True, "minimumAccessLevel": "is_member", "maximumAccessLevel": "is_supervisor", } sidebar_card = SideBarCard( side_bar_manager=self.manager, alignment=SidebarItemAlignment.TOP, show_close_action=True, show_action_button=True, action_button_name="Discover more", action_button_uri="https://changelog.md", action_button_uri_target=URITarget.NEW, display_text="This project ...", display_text_align=Alignment.LEFT, show_background=True, minimum_access_level=SidebarAccessLevelOptions.IS_MEMBER, maximum_access_level=SidebarAccessLevelOptions.IS_SUPERVISOR, displayText_nl="Dit project ...", actionButtonName_nl="Ontdek meer", ) card_dict = sidebar_card.as_dict() self.maxDiff = None self.assertDictEqual(target_dict, card_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def card(self):\r\n return Card(self)", "def card(self):\r\n return Card(self)", "def create_uno_deck():\r\n pass", "def card_factory(rank,suit):\n pass", "def make_card_surface(self):\n\n surf = pygame.Surface((card_dimensions))\n surf.fill(CARD_OUTLINE)\n pygame.draw.rect(surf, NORMAL_MONSTER,(central_padding, central_padding,\n surf.get_width()-central_padding*2,\n surf.get_height()-central_padding*2))\n picture_outline = pygame.Surface((self.picture.get_width()+2,\n self.picture.get_height()+2))\n picture_outline.fill(CARD_OUTLINE)\n picture_outline.blit(self.picture,(1,1))\n surf.blit(picture_outline, (central_padding-1,surf.get_height()*1/7))\n util.draw_text(self.name, (central_padding*1.5, central_padding*1.5), surf)\n util.draw_text(\"ATK: \"+str(self.stats[0]), (central_padding*2, surf.get_height()*0.73), surf)\n util.draw_text(\"DEF: \"+str(self.stats[1]), (central_padding*2, surf.get_height()*0.83), surf)\n self.spr_card = surf", "def mock_card():\n return Card(Suit.SPADE, 1)", "def _prepare_card(self, card_id, player_id=None):\n if player_id is None:\n player = self.p0\n else:\n player = self.game.get_player(player_id)\n player.add_mana(10, 'T')\n card, _ = player.generate(Zone.Hand, 'last', card_id)\n return card", "def make_card(cls, suit, pip):\n return Card(suit, pip)", "def createCard(self,id,name):\n card = Card(id,name)\n self.cards[id] = card\n print('Created Card:'+id)", "def draw_card(self,card):\n self.hand.append(card)", "def create_card(keyword, buzzword_1, buzzword_2, buzzword_3, \n buzzword_4, buzzword_5, room_id=None, player_created=False):\n\n card = Card(room_id=room_id, keyword=keyword, buzzword_1=buzzword_1, \n buzzword_2=buzzword_2, buzzword_3=buzzword_3, \n buzzword_4=buzzword_4, buzzword_5=buzzword_5, \n player_created=player_created)\n\n db.session.add(card)\n db.session.commit()\n\n return card", "def define_card(card):\n try:\n value = define_card_value(card[0])\n color = define_card_color(card[1])\n return Card(value, color)\n except AttributeError:\n pass", "def deck_create_card(deck_id):\n log_request(request)\n username = request.json['username']\n sId = request.json['session_id']\n sideA = request.json['sideA']\n sideB = request.json['sideB']\n\n # verify session\n if not user.verify(username, sId):\n return jsonify({'error' : 101})\n \n dId = deck.get_id(deck_id)\n \n # check that the deck exists\n if not deck.exists(dId):\n return jsonify({'error' : 300})\n\n ret = card.new(dId, sideA, sideB)\n \n return jsonify({'error' : 0})", "def card(self, card_id):\r\n return Card(self, card_id)", "def cards_to_deal(cls, context={}):\n\t\traise NotImplementedError()", "def add_card(self, card):\n # This basically means \"the previous card was the 2nd so you can't double/split anymore\"\n if self.can_double and self.get_card_value(self.first_card) != self.count:\n self.can_double = False\n self.can_split = False\n # This is the second card and it's the same as the first, you can now split!\n if self.can_double and self.first_card == card:\n self.can_split = True\n if self.first_card == 0:\n self.first_card = card\n if card == 1:\n self.soft = True\n self.count += self.get_card_value(card)\n # Unsoften if you have an Ace worth 11 and it would make you bust\n if self.count > 21 and self.soft:\n self.soft = False\n self.count -= 10", "def create_hand(self):\n print(\"card len\")\n print(len(self.cards))\n\n my_hand = Hand()\n for index in range(5):\n my_hand.add_card(self.cards.pop())\n\n print(\"card len\")\n print(len(self.cards))\n print(\"hand len\")\n print(len(my_hand.cards))\n return my_hand", "def __init__(self, cards):\n self.cards = cards", "def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.raw_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n if is_double:\n return self.comment + print_card_double(card)\n return self.comment + print_card_16(card)", "def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.raw_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n if is_double:\n return self.comment + print_card_double(card)\n return self.comment + print_card_16(card)", "def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.raw_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n if is_double:\n return self.comment + print_card_double(card)\n return self.comment + print_card_16(card)", "def _create_card(self, model: str) -> Any:\n return self.anki.notes.Note(self.collection, self._get_card_model(model))", "def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)", "def hand():\n return PokerHand()", "def add_card(handler_input, response):\n # type: (HandlerInput, Response) -> None\n if response.card:\n return\n response.card = ui.SimpleCard(\n title='Daily Dungeon',\n content=convert_speech_to_text(response.output_speech.ssml)\n )", "def draw_a_card(cards):\n import random\n card_drawn = random.choices(card_deck)\n cards.append(card_drawn[0])\n return", "def __init__(self):\n # start with the wild cards\n self.pile = [Card.wild_card(), Card.wild_card()]\n for i in range(Card.num_values):\n for j in range(Card.num_colors):\n for k in range(Card.num_shapes):\n self.pile.append(Card(i + 1, j + 1, k + 1, False))\n assert len(self.pile) == Pile.num_cards\n shuffle(self.pile)", "def create_card(self, trello, name):\n\n trello_card = trello.create_card(self._list_data, name)\n new_card = Card(trello, self, trello_card)\n self._cards.append(new_card)\n\n return new_card", "def __init__(self):\n self.deck = []\n for n in range(1, 14):\n card1 = Card(n, \"diamond\")\n self.deck.append(card1)\n\n for n in range(1, 14):\n card1 = Card(n, \"spade\")\n self.deck.append(card1)\n\n for n in range(1, 14):\n card1 = Card(n, \"heart\")\n self.deck.append(card1)\n\n for n in range(1, 14):\n card1 = Card(n, \"club\")\n self.deck.append(card1)", "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n eid = integer(card, 2, 'eid')\n p1 = double_or_blank(card, 3, 'p1', 0.0)\n pressures = [\n p1,\n double_or_blank(card, 4, 'p2'),\n double_or_blank(card, 5, 'p3'),\n double_or_blank(card, 6, 'p4')]\n\n eids = [eid]\n g1_thru = integer_string_or_blank(card, 7, 'g1/THRU')\n if g1_thru == 'THRU' and integer_or_blank(card, 8, 'eid2'):\n # alternate form\n eid2 = integer(card, 8, 'eid2')\n if eid2:\n eids = list(unique(\n expand_thru([eid, 'THRU', eid2], set_fields=False, sort_fields=False)\n ))\n g1 = None\n g34 = None\n else:\n # standard form\n eids = [eid]\n g1 = integer_or_blank(card, 7, 'g1')\n g34 = integer_or_blank(card, 8, 'g34')\n\n # If both (CID, N1, n2, N3) and LDIR are blank, then the default is\n # LDIR=NORM.\n cid = integer_or_blank(card, 9, 'cid')\n n1 = double_or_blank(card, 10, 'N1', 0.)\n n2 = double_or_blank(card, 11, 'N2', 0.)\n n3 = double_or_blank(card, 12, 'N3', 0.)\n nvector = array([n1, n2, n3])\n\n surf_or_line = string_or_blank(card, 13, 'sorl', 'SURF')\n line_load_dir = string_or_blank(card, 14, 'ldir', 'NORM')\n assert len(card) <= 15, f'len(PLOAD4 card) = {len(card):d}\\ncard={card}'\n return PLOAD4(sid, eids, pressures, g1, g34, cid, nvector,\n surf_or_line, line_load_dir, comment=comment)", "def hide_card(self):\n try:\n self.hidden_card_value = self.hand[1]\n self.hand[1] = Card()\n except IndexError:\n print('The dealer does not have enough cards!')", "def __init__(self, card_one, from_split=False, player=None, **kwargs):\n if isinstance(card_one, Card):\n self.card_one = card_one\n else:\n raise TypeError(\"'card_one' must be a Card object.\")\n\n if player:\n if isinstance(player, Player):\n self.player = player\n else:\n raise TypeError(\"'player' must be a Player object.\")\n self.wager = self.player.wager(**kwargs)\n self.cards = [card_one]\n self.split = False\n self.soft = card_one.rank == 14\n self.stand = False\n self.bust = False\n self.blackjack = False\n self.from_split = from_split\n self.insurance = False\n self.total = card_one.value\n self.surrender = False\n self.double_down = False\n # this is used to determine whether to add 11 or 1 when delt an ace\n self.non_ace_total = 0\n self.num_aces = 1 * self.soft\n self.num_hard_aces = self.num_aces", "def test_create_recipe_card(self):\n pass", "def add_card(cls, card, comment=''):\n eid = integer(card, 1, 'eid')\n scale = string(card, 2, 'scale')\n x1_npoints = integer_or_double(card, 3, 'x1/npoints')\n if isinstance(x1_npoints, integer_types):\n npoints = x1_npoints\n assert 0 < npoints < 7, 'CBARAO npoints=%r must be 1-6' % npoints\n x1 = double(card, 4, 'x1')\n delta_x = double(card, 5, 'delta_x')\n x = np.linspace(x1, x1 + delta_x * (npoints-1), num=npoints)\n assert len(x) == npoints, x\n else:\n x = [\n x1_npoints,\n double_or_blank(card, 4, 'x2'),\n double_or_blank(card, 5, 'x3'),\n double_or_blank(card, 6, 'x4'),\n double_or_blank(card, 7, 'x5'),\n double_or_blank(card, 8, 'x6'),\n ]\n x = [xi for xi in x if xi is not None]\n assert len(card) <= 9, f'len(CBARAO card) = {len(card):d}\\ncard={card}'\n return CBARAO(eid, scale, x, comment=comment)", "def create_deck():\r\n deck = []\r\n faces = [2,3,4,5,6,7,8,9,10,\r\n 'Jack','Queen','King','Ace']\r\n suits = ['Spades', 'Diamonds', 'Clubs', 'Hearts']\r\n for face in faces:\r\n for suit in suits:\r\n # Creates a card-tuple and adds it to the deck.\r\n deck.append((face, suit))\r\n \r\n return deck", "def newDeal(self):\n self.card1 = Card(1, 'c')\n self.card2 = Card(4, 'd')\n self.stateLabel[\"text\"] = \"\"\n self.refreshImages()", "def reveal_card(self):\n self.hand[1] = self.hidden_card_value\n self.hidden_card_value = Card()", "def generate_quote():\n headers = {\n 'accept': 'text/plain'\n }\n response = requests.get(QUOTE_URL, headers=headers)\n quote = response.json()['starWarsQuote']\n card = make_card(title=quote, colour=0xF5C518, thumbnail=THUMBNAIL)\n return card", "def draw_card(self, card):\n self.current_hand.append(card)", "def generateCards(filename):\n infile = open(filename)\n\n header = pf.Header()\n\n # Loop through each line, converting to a pyfits card\n for line in infile.readlines():\n line = line.rstrip('\\n')\n line = line.strip()\n if(line == 'END'):\n break\n else:\n c = pf.Card().fromstring(line)\n c.verify() # This will attempt to fix issuesx[1]\n header.append(c)\n \n return header.cards", "def generate(v, vendors):\n return vendors[v].new_card()", "def draw_card(dealer,player):\n # hidden_img = Image(img_path+\"back.png\")\n depth = 100\n x0,y0 = 100,100\n x1,y1 = 100,300\n ix = 30\n\n bj_board.clear()\n for card in dealer:\n if card.state:\n card.image.moveTo(x0, y0)\n card.image.setDepth(depth)\n bj_board.add(card.image)\n else:\n img = Image(img_path+\"Back.png\")\n img.moveTo(x0, y0)\n img.setDepth(depth)\n bj_board.add(img)\n x0 += ix\n \n for card in player:\n if card.state:\n card.image.moveTo(x1, y1)\n card.image.setDepth(depth)\n bj_board.add(card.image)\n else:\n img = Image(img_path+\"back.png\")\n img.moveTo(x1, y1)\n img.setDepth(depth)\n bj_board.add(img)\n x1 += ix", "def draw(deck, hand): \r\n # Remakes deck if it becomes empty.\r\n if len(deck) == 0:\r\n deck = create_deck()\r\n \r\n i = random.randint(0,len(deck)-1)\r\n card = deck.pop(i)\r\n hand.append(card)\r\n \r\n return deck, hand", "async def cards_per_hand(ctx):\n message = NNB.cards_per_hand()\n await ctx.send(message)", "def new_card(self, card_id):\n\n self.last_action_ts = pygame.time.get_ticks()\n # self.background.hidden=True\n self.background.image_view.image = ui.get_image(card_id, '/home/pi/music/images/')\n self.showing_splash = False # we no longer show the splash screen image\n # self.background.hidden=False\n self.progress_view.hidden = False # we play a song, so show progress bar\n self.show_buttons() # show play controll buttons", "def test_for_dealing_card():\n deck1 = Shoe()\n deck1.deal_card()\n assert len(deck1.deck) == 51", "def __init__(self):\n self.deckcards = []\n for suit_by_number in range(4):\n for rank_by_number in range(1, 14):\n card = card_create.Createcard(suit_by_number, rank_by_number)\n self.deckcards.append(card)", "def add_card(cls, card, comment=''):\n eid = integer(card, 1, 'eid')\n pid = integer_or_blank(card, 2, 'pid', eid)\n ga = integer(card, 3, 'ga')\n gb = integer(card, 4, 'gb')\n gc = integer_or_blank(card, 5, 'gc')\n\n # card, eid, x1_default, x2_default, x3_default\n x, g0 = init_x_g0_cbeam3(card, eid, 0., 0., 0.)\n wa = np.array([double_or_blank(card, 9, 'w1a', 0.0),\n double_or_blank(card, 10, 'w2a', 0.0),\n double_or_blank(card, 11, 'w3a', 0.0)], dtype='float64')\n\n wb = np.array([double_or_blank(card, 12, 'w1b', 0.0),\n double_or_blank(card, 13, 'w2b', 0.0),\n double_or_blank(card, 14, 'w3b', 0.0)], dtype='float64')\n\n wc = np.array([double_or_blank(card, 15, 'w1c', 0.0),\n double_or_blank(card, 16, 'w2c', 0.0),\n double_or_blank(card, 17, 'w3c', 0.0)], dtype='float64')\n\n tw = np.array([double_or_blank(card, 18, 'twa', 0.),\n double_or_blank(card, 19, 'twb', 0.),\n double_or_blank(card, 20, 'twc', 0.)], dtype='float64')\n\n # TODO: what are the defaults?\n s = np.array([integer_or_blank(card, 21, 'sa', -1),\n integer_or_blank(card, 22, 'sb', -1),\n integer_or_blank(card, 23, 'sc', -1)], dtype='int32')\n assert len(card) <= 24, f'len(CBEAM3 card) = {len(card):d}\\ncard={card}'\n return CBEAM3(eid, pid, [ga, gb, gc], x, g0,\n wa=wa, wb=wb, wc=wc, tw=tw, s=s, comment=comment)", "def cards(self):\r\n return Cards(self)", "def test_create_card(self):\n data = {\n 'first_name': 'Ty',\n 'last_name': 'Cobb',\n 'variety': 'green portrait'\n }\n resp = self.app.post('cards', json=data)\n\n assert resp.status_code == 200\n\n assert data['first_name'] == resp.json['first_name']\n assert data['last_name'] == resp.json['last_name']\n assert data['variety'] == resp.json['variety']", "def make_param_card(self, param_card):\n ## \\todo explain what param card is\n logger.debug(\"Making param card '%s'\" % param_card)\n\n with open(param_card, 'r') as paramin:\n data = paramin.readlines()\n\n for i in range(0, len(data)):\n if \"APMASS\" in data[i] and self.apmass is not None:\n data[i] = \" 622 %.7fe-03 # APMASS\" % (self.apmass) + '\\n'\n logger.debug(\"APMASS in param card set to %d\" % self.apmass)\n if \"map\" in data[i] and self.map is not None:\n data[i] = \" 622 %.7fe-03 # map\" % (self.map) + '\\n'\n if \"mpid\" in data[i] and self.mpid is not None:\n data[i] = \" 624 %.7fe-03 # mpid\" % (self.mpid) + '\\n'\n if \"mrhod\" in data[i] and self.mrhod is not None:\n data[i] = \" 625 %.7fe-03 # mrhod\" % (self.mrhod) + '\\n'\n\n with open(param_card, 'w') as paramout:\n paramout.writelines(data)", "def draw_card(dealer,player): \n depth = 100\n x0,y0 = 100,100\n x1,y1 = 100,300\n\n bj_board.clear()\n for i in range(len(dealer)):\n if dealer[i].state==True:\n bj_board.add(dealer[i].image)\n dealer[i].image.moveTo(x0+i*20,y0)\n dealer[i].image.setDepth(depth-10*i)\n elif dealer[i].state==False:\n img=Image(img_path+\"Back.png\")\n bj_board.add(img)\n img.moveTo(x0+i*20,y0)\n img.setDepth(depth-10*i)\n for i in range(len(player)):\n bj_board.add(player[i].image)\n player[i].image.moveTo(x1+i*20,y1)\n player[i].image.setDepth(depth-10*i) \n \n text=Text(\"Your Total: \" + str(hand_value(player)))\n text.moveTo(300,300)\n bj_board.add(text)\n \n if dealer[0].state==True:\n text=Text(\"Dealer Total: \" + str(hand_value(dealer)))\n text.moveTo(300,100)\n bj_board.add(text)", "def embed(self):\n\n # aliases\n CARDS = self.game.emojis\n U200B = self.game.u200b_ZWSP\n U3000 = self.game.u3000_IS\n U2022 = self.game.u2022_bullet\n\n # combinations\n LF = f'\\n{U200B}'\n LFLF = f'{U200B}\\n{U200B}'\n BULLET_SEP = f'{U3000}{U2022}{U3000}'\n\n # helper functions\n spacer = lambda n: f'{U200B}{U3000 * n}{U200B}'\n pad_right = lambda n: f'{U3000 * n}{U200B}'\n pad_left = lambda n: f'{U200B}{U3000 * n}'\n\n hand: BlackjackDealerHand = self.game.dealer_hand\n\n if self.game.dealer_status == 'Busted':\n title_status = ' Busted'\n else:\n title_status = \"'s Turn\"\n title = f\"{U200B}\\n**__Kaa (Dealer){title_status}__**{LF}\"\n embed = discord.Embed(\n # title=f\"**{player['name']}**{LF}\",\n title=title,\n color=self.game.embed_color,\n )\n\n # blackjack title and icon\n embed.set_author(\n name='Blackjack' + pad_right(30),\n icon_url=self.game.thumbnail_url,\n )\n\n # footer showing current player pic, and the position in queue\n text = (\n f'Phase 5: Dealer Turn{BULLET_SEP}'\n f'Game will continue momentarily'\n )\n embed.set_footer(\n icon_url=self.game.bot.user.avatar_url,\n text=text,\n )\n\n\n # dealer cards field\n name = 'Cards'\n value = ''\n card: Card\n for card in hand.iter_all():\n value += CARDS[card.format_short()]\n embed.add_field(name=name, value=value, inline=True)\n\n # blank field for formatting\n embed.add_field(\n name=U200B,\n value=U200B,\n inline=True,\n )\n\n name = 'Hard[/Best]'\n # value = f'{pad_left(1)}{hand.value_hard}'\n value = f'{hand.value_hard}'\n if hand.value_hard != hand.value:\n value += f'/{hand.value}'\n if self.game.dealer_status == 'Busted':\n value += ' (Busted)'\n value += LF # added for bottom padding\n\n embed.add_field(name=name, value=value, inline=True)\n\n\n\n\n # players\n name = 'Players'\n value = self.player_hands\n embed.add_field(name=name, value=value, inline=True)\n\n # blank field for formatting\n embed.add_field(name=U200B, value=U200B, inline=True)\n\n name = U200B\n value = self.player_values\n embed.add_field(name=name, value=value, inline=True)\n\n return embed", "def create_card(cls, card_title, card_content, content_id, css_class=\"card-text\"):\n card = dbc.Card(\n dbc.CardBody(\n [\n html.H5(card_title, className=\"card-title\"),\n html.P(card_content, className=css_class, id=content_id),\n ],\n className=css_class,\n )\n )\n return card", "def create_deck(table):\n # Make a standard poker deck (14 represents an ace)\n\n for value in range(2, 15):\n\n if value > 10:\n if value == 11:\n name = 'Jack'\n elif value == 12:\n name = 'Queen'\n elif value == 13:\n name = 'King'\n elif value == 14:\n name = 'Ace'\n else:\n name = str(value)\n\n table.deck.append(Card(name + \"_Diamonds\", value, \"d\"))\n table.deck.append(Card(name + \"_Hearts\", value, \"h\"))\n table.deck.append(Card(name + \"_Spades\", value, \"s\"))\n table.deck.append(Card(name + \"_Clubs\", value, \"c\"))\n\n random.shuffle(table.deck)", "def make_card_icon(self):\n card_icon = pygame.Surface((card_dimensions[0]/2, card_dimensions[1]/2))\n card_icon.fill(self.color)\n pygame.draw.rect(card_icon, CARD_OUTLINE, (0,0,card_icon.get_width(), card_icon.get_height()), 1)\n #the picture\n pic = pygame.transform.scale(self.picture, (card_icon.get_width()-2, card_icon.get_height()*1/2))\n pic = util.outline_surface(pic)\n card_icon.blit(pic,(1, card_icon.get_height()/7))\n util.draw_text(self.name, (2,2), card_icon, text_size = 8)\n self.icon = card_icon", "def show_card(self):\n return self.hands.show(0)", "def startGame(d_hand, p_hand, deck1):\n NUM_CARDS = 2\n\n for i in range(NUM_CARDS):\n d_hand.getCard(deck1.drawCard())\n p_hand.getCard(deck1.drawCard())", "def main():\n\n # call to OS for positioning window\n os.environ['SDL_VIDEO_WINDOW_POS'] = \"%d,%d\" % (0, 25)\n\n # Initialization block\n pygame.init() # Initialize pygame module\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT)) # initialize screen\n\n # Testing\n # model_card = m_card.Card(m_card.CardType.TEMPURA)\n # view_card = v_card.CardView(screen, model_card)\n\n deck = Deck()\n player = Player()\n b_pack = deck.generate_booster(10)\n player.booster_pack = b_pack\n\n hand_view = HandView(screen, (0, SCREEN_HEIGHT - SCREEN_HEIGHT / 5), (SCREEN_WIDTH, SCREEN_HEIGHT / 5), player)\n pick_crds = PickedCardsView(screen, (0, 0), (SCREEN_WIDTH, SCREEN_HEIGHT / 5), player, 0)\n pick_crds2 = PickedCardsView(screen, (0, 0), (SCREEN_WIDTH, SCREEN_HEIGHT / 5), player, 180)\n # Game loop\n while True:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n elif event.type == pygame.MOUSEBUTTONUP:\n is_clicked([hand_view, pick_crds, pick_crds2], pygame.mouse.get_pos())\n screen.fill((0, 0, 0))\n hand_view.draw()\n pick_crds.draw()\n pick_crds2.draw()\n pygame.display.flip()", "def add_card(self, card):\r\n self.hand.append(card)", "def card(self, card_id_or_shortlink):\r\n return Card(self, card_id_or_shortlink)", "def create_deck():\n suit_list = [\"\\u2665\", #\n \"\\u2666\", #\n \"\\u2663\", #\n \"\\u2660\"] #\n name_points_dict = {\"A\": 1, \"2\": 2, \"3\": 3, \"4\": 4, \"5\": 5, \"6\": 6, \"7\": 7,\n \"8\": 8, \"9\": 9, \"10\": 10, \"J\": 10, \"Q\": 10, \"K\": 10}\n\n # Use a double ended queue structured list for the deck\n deck_list = deque([])\n\n # For each suit, create a card with each of the name and point entries\n for each_suit in suit_list:\n for each_entry in name_points_dict.keys():\n new_card = Card(each_entry,\n name_points_dict[each_entry],\n each_suit)\n deck_list.append(new_card)\n\n return deck_list", "def create_deck(self):\n\n deck = []\n\n # Suits and face values\n suits = ['Clubs', 'Diamonds', 'Hearts', 'Spades']\n face_values = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n\n # Creating deck\n for suit in suits:\n for value in face_values:\n deck.append(Card(suit[0], value))\n\n # Adding jokers\n if self.jokers:\n deck.append(Card('Jk', 0))\n deck.append(Card('Jk', 0))\n\n return deck", "def deal(deck): \r\n hand = []\r\n for n in range(2): \r\n deck, hand = draw(deck, hand)\r\n \r\n return deck, hand", "async def show_card(self, ctx, card: dict):\r\n emb = discord.Embed(\r\n title=card['name'],\r\n colour=discord.Colour.dark_purple(),\r\n url='https://roll20.net/compendium/dnd5e/Deck%20of%20Many%20Things#content',\r\n description=card['desc']\r\n )\r\n emb.set_footer(text='Use [p]domt info for list of all cards.')\r\n emb.set_image(url=card['img'])\r\n await ctx.send(embed=emb)", "def add_card(self, card_widget: WidgetT):", "def add_card(handler_input, response):\n # type: (HandlerInput, Response) -> None\n response.card = SimpleCard(\n title=skill_name,\n content=convert_speech_to_text(response.output_speech.ssml))", "def add_card(handler_input, response):\n # type: (HandlerInput, Response) -> None\n response.card = SimpleCard(\n title=skill_name,\n content=convert_speech_to_text(response.output_speech.ssml))", "def add_card(handler_input, response):\n # type: (HandlerInput, Response) -> None\n response.card = SimpleCard(\n title=skill_name,\n content=convert_speech_to_text(response.output_speech.ssml))", "def initial_draw(self):\n self.player.take_card(self.deck)\n self.dealer.take_card(self.deck)\n self.player.take_card(self.deck)\n self.dealer.put_face_down(self.deck)", "def add_card(cls, card, baror=None, comment=''):\n eid = integer(card, 1, 'eid')\n pid_default = eid\n x1_default, x2_default, x3_default = 0., 0., 0.\n offt_default = 'GGG'\n if baror is not None:\n if baror.pid is not None:\n pid_default = baror.pid\n if baror.x is None:\n x1_default = baror.g0\n x2_default = None\n x3_default = None\n else:\n x1_default, x2_default, x3_default = baror.x\n offt_default = baror.offt\n\n pid = integer_or_blank(card, 2, 'pid', pid_default)\n ga = integer(card, 3, 'ga')\n gb = integer(card, 4, 'gb')\n x, g0 = init_x_g0(card, eid, x1_default, x2_default, x3_default)\n\n # doesn't exist in NX nastran\n offt = integer_string_or_blank(card, 8, 'offt', offt_default)\n #print('cls.offt = %r' % (cls.offt))\n\n pa = integer_or_blank(card, 9, 'pa', 0)\n pb = integer_or_blank(card, 10, 'pb', 0)\n\n wa = np.array([double_or_blank(card, 11, 'w1a', 0.0),\n double_or_blank(card, 12, 'w2a', 0.0),\n double_or_blank(card, 13, 'w3a', 0.0)], dtype='float64')\n\n wb = np.array([double_or_blank(card, 14, 'w1b', 0.0),\n double_or_blank(card, 15, 'w2b', 0.0),\n double_or_blank(card, 16, 'w3b', 0.0)], dtype='float64')\n assert len(card) <= 17, f'len(CBAR card) = {len(card):d}\\ncard={card}'\n return CBAR(eid, pid, [ga, gb], x, g0,\n offt, pa, pb, wa, wb, comment=comment)", "def createDeckDialog(self):\n\n\t\tself.mCreatorname = self.getInput_CreatorName(\"Enter your name: \")\n\t\tself.chooseDeckName()\n\t\tself.setMaxAttributePoints()\n\t\tself.createMinions(self.mMaxAttributePoints)\n\t\tprint(str(self))\n\t\tself.mDeckDict = self.createDictionary()", "def deal_cards(self):\r\n\t\tself.player.double = False\r\n\t\tif self.cardstack.reshuffle:\r\n\t\t\tself.cardstack.shuffle(self.decks)\r\n\t\t\tself.cardstack.reshuffle = False\r\n\t\tself.hands.append(Hand())\r\n\t\tfor i in range(2):\r\n\t\t\tself.hands[0].add_card(self.cardstack.draw())\r\n\t\t\tself.dealer.add_card(self.cardstack.draw())", "def cards_to_deal(cls, context={}):\n\t\treturn cls.CARDS_TO_DEAL", "def test_constructor(self):\n hand = Hand([Card(\"A\", \"D\")])\n assert isinstance(hand, Hand)", "def create_deck(self):\n\n id_already_use, deck, hand = [], [], []\n\n for _ in range(self.number_domino - self.hand_size):\n\n # We generate a domino and keep its id in id_alread_use\n # then we make sure to ony keep new id\n\n id = (randint(0, 6), randint(0, 6))\n while id in id_already_use:\n id = (randint(0, 6), randint(0, 6))\n deck.append(Domino(id[0], id[1]))\n id_already_use.append(id)\n\n for _ in range(self.hand_size):\n id = (randint(0, 6), randint(0, 6))\n while id in id_already_use:\n id = (randint(0, 6), randint(0, 6))\n hand.append(Domino(id[0], id[1]))\n id_already_use.append(id)\n\n return deck, hand", "def deliver_card(data, access=None):\n\n schema = get_card_schema(data)\n if not schema:\n schema = card_schema\n data = deepcopy(data)\n\n if access is 'learn' and data['kind'] is 'choice':\n if data['order'] == 'random':\n shuffle(data['options'])\n\n if data['max_options_to_show']:\n data['options'] = data['options'][:data['max_options_to_show']]\n\n return deliver_fields(schema, data, access)", "def create_deck(number = 1):\n deck = []\n for suit, face in itertools.product(suit_names, face_names):\n if face == \"Ace\":\n value = 11\n elif face in ['Jack', 'Queen', 'King']:\n value = 10\n else:\n value = int(face)\n img = Image(img_path+suit+\"_\"+face + \".png\")\n state = True\n card = Card(suit, face, value, img, state)\n deck.append(card)\n random.shuffle(deck)\n return deck", "def card_factory(value: str, base: str, is_hidden: bool = False) -> str:\n if 1 <= len(value) <= 2:\n card = list(base)\n card[13:15] = f\"{value} \" if len(value) == 1 else f\"{value}\"\n card[74:76] = f\" {value}\" if len(value) == 1 else f\"{value}\"\n else:\n raise Exception(\"Invalid value lenght. Must be 1 or 2 charaters\")\n\n return hidden_face if is_hidden else \"\".join(card)", "def __init__(self):\n self.deck = init_deck()\n self.shuffle()", "def create_card(self, repo, card_name, query):\n DataHubManager.has_repo_file_privilege(\n self.username, self.repo_base, repo, 'write')\n\n # reject card names that have non alphanumeric characters\n if not re.match(r'^[A-Za-z0-9_]+$', card_name):\n raise ValueError(\n 'Only numbers, letters, and '\n 'underscores are allowed in card names')\n\n # to create a card, the user must be able to successfully execute\n # the query from their own database user.\n try:\n self.execute_sql(query)\n except Exception:\n raise PermissionDenied(\n 'Either missing required privileges or bad query')\n\n card, created = Card.objects.get_or_create(\n repo_base=self.repo_base, repo_name=repo,\n card_name=card_name, query=query)\n\n return card", "def setup_newgame(self):\n global chips\n self.bet = 100\n if chips < self.bet: \n self.game_over = True\n chips -= self.bet\n \n\n self.cards_list = arcade.SpriteList()\n\n #resets on newgame\n self.top_card_int = 0 ## this had to be moved here to make it so that you are not drawing over the 52 card limit\n self.player_hand = []\n self.dealer_hand = []\n self.player_value = 0\n self.dealer_value = 0\n self.player_ace_count = 0\n self.dealer_ace_count = 0\n self.player_almost_bust = 0\n self.dealer_almost_bust = 0\n self.blackjack = False\n self.victory = False\n self.defeat = False\n \n #creates deck\n for card_suit in CARD_SUITS:\n for card_value in CARD_VALUES:\n card = Card(card_suit, card_value, CARD_SCALE)\n self.cards_list.append(card)\n #shuffles deck\n for pos1 in range(len(self.cards_list)):\n pos2 = random.randrange(len(self.cards_list))\n self.cards_list.swap(pos1, pos2)\n \n #Current way to add cards to player and dealer hands since using .pop() on self.cards_list deletes the card itself even in the other hands\n \n #self.dealer_hand.append(self.top_card_int)\n self.hit(\"dealer\")\n self.dealer_hand[0].face_down()\n #first_card = self.dealer_hand[0]\n #first_card.face_down()\n #self.dealer_hand[0].face_down()\n self.hit(\"player\")\n self.player_hand[0].face_down()\n self.hit(\"dealer\")\n self.dealer_hand[1].face_down()\n self.hit(\"player\")\n self.player_hand[1].face_down()\n self.update_card_positions()", "def __init__(self, sides):\n self.sides = sides", "def __init__(self, cards = []):\n self.cards=cards", "def __init__ ( self ):\n \n self.__deck = []\n \n for i in range(0,7):\n self.__deck.append('1')\n \n for i in range(0,10):\n self.__deck.append('2')\n \n for i in range(0,3):\n self.__deck.append('3')\n \n #appends the event cards using the first 3 letters of the card in all caps\n self.__deck.append('SEA')\n self.__deck.append('HER')\n self.__deck.append('VIC')\n self.__deck.append('PIL')\n self.__deck.append('TRU')", "def __init__(self, side, ply, is_random):\n \n self.side = side\n self.ply = ply\n self.is_random = is_random", "def add_card(self, card):\n self.unpack_cards()\n card.dealt(self)\n self.card_list.append(card)\n self.num_cards.set(self.num_cards.get()+1)\n # pretty inefficient to unpack and pack on every card addition...\n self.pack_cards() \n if self.empty.get() is True:\n self.empty.set(False)\n self.toggle_empty_hand()", "def _random_card(self) -> Cards.Card:\n\t\tif len(self.drawstack) < 1:\n\t\t\tfor col in list(Cards.CardColour):\n\t\t\t\t# Numbers 1-9 + Skip, Reverse, Draw2\n\t\t\t\tfor typ in list(Cards.CardType)[1:13]:\n\t\t\t\t\tself.drawstack.append(Cards.Card(typ, col))\n\t\t\t\t\tself.drawstack.append(Cards.Card(typ, col))\n\t\t\t\tself.drawstack.append(Cards.Card(Cards.CardType.NUMBER0, col))\n\t\t\tfor _ in range(4):\n\t\t\t\tself.drawstack.append(Cards.Card(Cards.CardType.WILD))\n\t\t\t\tself.drawstack.append(Cards.Card(Cards.CardType.WILD_DRAW))\n\t\t\trandom.shuffle(self.drawstack)\n\n\t\treturn self.drawstack.pop()", "def add_card(cls, card, comment=''):\n eid = integer(card, 1, 'eid')\n pid = integer_or_blank(card, 2, 'pid', eid)\n ga = integer(card, 3, 'ga')\n gb = integer(card, 4, 'gb')\n x1_g0 = integer_double_or_blank(card, 5, 'x1_g0', 0.0)\n if isinstance(x1_g0, integer_types):\n g0 = x1_g0\n x = None\n elif isinstance(x1_g0, float):\n g0 = None\n x = np.array([double_or_blank(card, 5, 'x1', 0.0),\n double_or_blank(card, 6, 'x2', 0.0),\n double_or_blank(card, 7, 'x3', 0.0)], dtype='float64')\n if norm(x) == 0.0:\n msg = 'G0 vector defining plane 1 is not defined.\\n'\n msg += 'G0 = %s\\n' % g0\n msg += 'X = %s\\n' % x\n raise RuntimeError(msg)\n else:\n raise ValueError('invalid x1Go=%r on CBEND' % x1_g0)\n geom = integer(card, 8, 'geom')\n\n assert len(card) == 9, f'len(CBEND card) = {len(card):d}\\ncard={card}'\n return CBEND(eid, pid, [ga, gb], g0, x, geom, comment=comment)", "def __init__(self, this_card_name, this_card_points, this_card_suit):\n self.card_name = this_card_name\n self.card_points = this_card_points\n self.card_suit = this_card_suit", "def deal_demo():\n deck = get_deck()\n print(hand_to_string(deck))\n print(hand_to_string(get_hand(deck)))\n print(hand_to_string(get_hand(deck)))", "def __init__(self):\n self._cards = []", "def add_card(self, card):\n if not isinstance(card, Card):\n raise TypeError(\"'card' must be a card object.\")\n # append new card to list of cards in the hand\n self.cards.append(card)\n self.total = card + self.total\n # aces require a little more work\n if card.rank == 14:\n self.soft = True\n self.num_aces += 1\n self.num_hard_aces += 1\n # account for soft hands\n if self.total > 21 and self.soft:\n self.total -= 10\n self.num_hard_aces -= 1\n self.soft = False\n # catch the edge case where you're delt 12+ aces\n if self.total > 21:\n self.total -= 10\n self.num_hard_aces -= 1\n self.soft = False\n if self.num_hard_aces > 0:\n self.soft = True\n if self.total > 21:\n self.bust = True", "def new_from_cards():\n log_request(request)\n data = request.json\n \n if not valid_params(['username', 'deck_name', 'description', 'cards', 'session_id'], data):\n logging.debug(\"Missing parameters\")\n return jsonify({'error' : 500})\n \n username = data['username']\n deckname = data['deck_name']\n desc = data['description']\n cards = data['cards']\n sId = data['session_id']\n \n # authenticate the user\n uId = user.get_uId(username)\n if not user.verify(username, sId):\n return jsonify({'error' : 101})\n\n # create the deck in the database\n dId, deck_id = deck.new(deckname, uId, desc)\n\n # create the cards\n for c in cards:\n card.new(dId, c['sideA'], c['sideB'])\n\n ret = deck.get_deck(dId)\n ret['error'] = 0 # set error code\n return jsonify(ret)", "def generate_deck(self):\n\t\tsuits = [\"hearts\", \"spades\",\"diamonds\",\"clubs\"]\n\t\tcards = []\n\n\t\tfor suit in suits:\n\t\t\tif self.ace_as_eleven:\n\t\t\t\tace = Card(\"Ace\", 11, suit)\n\t\t\telse:\n\t\t\t\tace = Card(\"Ace\", 1, suit)\n\t\t\tcards.append(ace)\n\n\t\t\ttwo = Card(\"Two\", 2, suit)\n\t\t\tcards.append(two)\n\t\t\t\n\t\t\tthree = Card(\"Three\", 3, suit)\n\t\t\tcards.append(three)\n\n\t\t\tfour = Card(\"Four\", 4, suit)\n\t\t\tcards.append(four)\n\n\t\t\tfive = Card(\"Five\", 5, suit)\n\t\t\tcards.append(five)\n\n\t\t\tsix = Card(\"Six\", 6, suit)\n\t\t\tcards.append(six)\n\n\t\t\tseven = Card(\"Seven\", 7, suit)\n\t\t\tcards.append(seven)\n\n\t\t\teight = Card(\"Eight\", 8, suit)\n\t\t\tcards.append(eight)\n\n\t\t\tnine = Card(\"Nine\", 9, suit)\n\t\t\tcards.append(nine)\n\n\t\t\tten = Card(\"Ten\", 10, suit)\n\t\t\tcards.append(ten)\n\n\t\t\tjack = Card(\"Jack\", 10, suit)\n\t\t\tcards.append(jack)\n\n\t\t\tqueen = Card(\"Queen\", 10, suit)\n\t\t\tcards.append(queen)\n\n\t\t\tking = Card(\"King\", 10, suit)\n\t\t\tcards.append(king)\n\n\t\treturn cards", "def add_card(self, card):\n self.hand.append(card)", "def test_for_non_splittable_hand(self):\n hand = self._hand\n cards = [BjCard('clubs', '7'), BjCard('diamonds', '4')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.can_split, False)", "def __init__(self, cards, stack_style=SQUARED):\n self.cards = cards\n self.stack_style = stack_style", "def make_deck():\r\n deck = []\r\n for i in range(1,5):\r\n for j in range(1,14):\r\n card = (i,j)\r\n deck.append(card)\r\n return deck", "def create_account(self):\n account_identifier = \"\".join([str(num) for num in random.sample(range(10), 9)])\n first_fifteen_digit = self.BIN + account_identifier\n checksum = self.create_checksum(first_fifteen_digit)\n card_number = first_fifteen_digit + str(checksum)\n pin = \"\".join([str(num) for num in random.sample(range(10), 4)])\n balance = 0\n print(\"\\nYour card has been created\")\n print(f\"Your card number:\\n{card_number}\\nYour card PIN:\\n{pin}\")\n # fetching max id from database\n database_cursor.execute(\"SELECT id FROM card;\")\n ids = [x[0] for x in database_cursor.fetchall()]\n if ids:\n max_id = max(ids) + 1\n else:\n max_id = 1\n # insert new account into database\n database_cursor.execute(f\"INSERT INTO card VALUES ({max_id}, {card_number}, {pin}, {balance});\")\n database_connection.commit()", "def create_card_product(self):\n\n product_details = {\n \"name\": \"Simple Date Card Product\",\n \"start_date\": \"2019-02-01\",\n \"config\": {\n \"fulfillment\": {\n \"payment_instrument\": \"VIRTUAL_PAN\"\n }\n }\n }\n\n return self.client.card_products.create(product_details)" ]
[ "0.65023726", "0.65023726", "0.63833416", "0.6296066", "0.6287493", "0.6243154", "0.6175611", "0.6166363", "0.61019343", "0.6016748", "0.6007979", "0.59866345", "0.59609824", "0.5881401", "0.587644", "0.5873574", "0.5859107", "0.58429474", "0.58415014", "0.58415014", "0.58415014", "0.5834091", "0.5806693", "0.5727469", "0.571807", "0.5686402", "0.5685131", "0.56698334", "0.5619077", "0.5610841", "0.5600211", "0.55921054", "0.5589277", "0.5577124", "0.5565548", "0.5551773", "0.55418515", "0.5538016", "0.5527431", "0.551798", "0.551674", "0.55141133", "0.5511759", "0.5510079", "0.55067754", "0.55041826", "0.5503489", "0.549436", "0.5490605", "0.5472665", "0.546771", "0.5466818", "0.54620403", "0.54524493", "0.54432166", "0.5442057", "0.54418457", "0.5441762", "0.54333574", "0.542947", "0.5422266", "0.5421144", "0.5419448", "0.54137224", "0.54114956", "0.5407549", "0.5404807", "0.5404807", "0.5404807", "0.5403745", "0.5400584", "0.5399106", "0.5383415", "0.5377821", "0.5374776", "0.5372632", "0.53690904", "0.536717", "0.5364366", "0.53615206", "0.53594345", "0.53520536", "0.53515464", "0.5347104", "0.5341598", "0.5335733", "0.5333447", "0.5328984", "0.5327976", "0.5324507", "0.53230476", "0.53204316", "0.531918", "0.53072846", "0.53047657", "0.52927727", "0.5287912", "0.5284314", "0.5265933", "0.52609736", "0.5252862" ]
0.0
-1
A simple test to create a bucket with maxTTL and check whether new creates with greater exp are deleted when maxTTL has lapsed
def test_maxttl_lesser_doc_expiry(self): for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=int(self.maxttl)+500) self.sleep(int(self.maxttl), "waiting for all docs to expire per maxTTL rule...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}".format( int(self.maxttl) + 500, self.maxttl, self.maxttl, items)) if items > 0: self.fail("Bucket maxTTL of {0} is not honored".format(self.maxttl)) else: self.log.info("SUCCESS: Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}".format( int(self.maxttl) + 500, self.maxttl, self.maxttl, items))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_maxttl_on_existing_bucket(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=60)\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s\"\n \"(set after doc creation), after 60s, item count = {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Items with larger expiry before maxTTL updation deleted!\")\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s\"\n \"(set after doc creation), after 100s,\"\n \" item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Items with not greater expiry set before maxTTL \"\n \"updation not deleted after elapsed TTL!\")\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s, after 100s,\"\n \" item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Items with not greater expiry not \"\n \"deleted after elapsed maxTTL!\")", "def test_maxttl_setting(self):\n maxttl = int(self.input.param(\"maxttl\", None))\n self.run_multi_operations(buckets = self.buckets,\n query_definitions = self.query_definitions,\n create_index = True, drop_index = False,\n query_with_explain = False, query = False)\n self.sleep(20)\n self._verify_bucket_count_with_index_count()\n self.sleep(maxttl, \"waiting for docs to be expired automatically per maxttl rule\")\n self._expiry_pager(self.master)\n self.sleep(60, \"wait for expiry pager to run on all nodes...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Docs in source bucket is {0} after maxttl has elapsed\".format(items))\n if items != 0:\n self.fail(\"Docs in source bucket is not 0 after maxttl has elapsed\")\n self._verify_bucket_count_with_index_count()", "def test_update_maxttl(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=40)\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s\"\n \" updated maxttl = 40s, after 40s item count = {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Updated ttl affects docs with larger expiry before updation!\")\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s\"\n \" updated maxttl = 40s, after 100s item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Docs with 100s as expiry before maxTTL updation still alive!\")", "def test_maxttl_greater_doc_expiry(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=int(self.maxttl)-100)\n self.sleep(int(self.maxttl-100), \"waiting for all docs to expire per maxTTL rule...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) - 100,\n self.maxttl-100,\n self.maxttl-100,\n items))\n if items == 0:\n self.log.info(\"SUCCESS: Docs with lesser expiry deleted\")\n else:\n self.fail(\"FAIL: Doc with lesser expiry still present past ttl\")", "def test_cli_bucket_maxttl_setting(self):\n self.rest.force_eject_node()\n\n shell = RemoteMachineShellConnection(self.master)\n if self.input.param('enable_ipv6', False):\n self.reset_and_enable_ipv6(self.master)\n set_index_storage_type = \" --index-storage-setting=memopt \"\n options = ' --cluster-port=8091 \\\n --cluster-ramsize=300 \\\n --cluster-index-ramsize=300 \\\n --services=data,index,query %s ' \\\n % set_index_storage_type\n o, e = shell.execute_couchbase_cli(cli_command=\"cluster-init\",\n options=options)\n self.assertEqual(o[0], 'SUCCESS: Cluster initialized')\n\n self.log.info(\"Add new user after reset node! \")\n self.add_built_in_server_user(node=self.master)\n bucket_type = self.input.param(\"bucket_type\", \"couchbase\")\n options = ' --bucket=default \\\n --bucket-type={0} \\\n --bucket-ramsize=200 \\\n --max-ttl=400 \\\n --wait '.format(bucket_type)\n o, e = shell.execute_couchbase_cli(cli_command=\"bucket-create\",\n options=options)\n self.assertEqual(o[0], 'SUCCESS: Bucket created')\n\n self.sleep(30, \"Sleep before loading doc using cbdocloader\")\n\n cluster_flag = \"-c\"\n bucket_quota_flag = \"-m\"\n data_set_location_flag = \"-d\"\n shell.execute_command(\n \"{0}cbdocloader -u Administrator -p password \"\n \"{3} {1} -b default {4} 100 {5} {2}travel-sample.zip\"\n .format(self.bin_path, self.master.ip, self.sample_path,\n cluster_flag, bucket_quota_flag,\n data_set_location_flag))\n shell.disconnect()\n\n buckets = RestConnection(self.master).get_buckets()\n for bucket in buckets:\n if bucket.name != \"default\":\n self.fail(\"default bucket did not get created\")\n\n \"\"\" check for load data into travel-sample bucket \"\"\"\n end_time = time.time() + 120\n num_actual = 0\n while time.time() < end_time:\n self.sleep(10)\n num_actual = self.get_item_count(self.master, \"default\")\n if int(num_actual) == self.total_items_travel_sample:\n break\n self.assertTrue(int(num_actual) == self.total_items_travel_sample,\n \"Items number expected %s, actual %s\"\n % (self.total_items_travel_sample, num_actual))\n self.log.info(\"Total items %s \" % num_actual)\n self.sleep(400, \"Waiting for docs to expire as per maxttl\")\n self.expire_pager([self.master])\n self.sleep(20, \"Wait for expiry_purger to run\")\n num_actual = self.get_item_count(self.master, \"default\")\n if int(num_actual) != 0:\n self.fail(\"Item count is not 0 after maxttl has elapsed\")\n else:\n self.log.info(\"SUCCESS: Item count is 0 after maxttl has elapsed\")", "def test_maxttl_possible_values(self):\n # default\n rest = RestConnection(self.master)\n default_maxttl = rest.get_bucket_maxTTL()\n if default_maxttl != 0:\n self.fail(\"FAIL: default maxTTL if left unset must be 0 but is {0}\".format(default_maxttl))\n self.log.info(\"Verified: default maxTTL if left unset is {0}\".format(default_maxttl))\n\n # max value\n try:\n self._update_bucket_maxTTL(maxttl=2147483648)\n except Exception as e:\n self.log.info(\"Expected exception : {0}\".format(e))\n try:\n self._update_bucket_maxTTL(maxttl=2147483647)\n except Exception as e:\n self.fail(\"Unable to set maxTTL=2147483647, the max permitted value\")\n else:\n self.log.info(\"Verified: Max value permitted is 2147483647\")\n else:\n self.fail(\"Able to set maxTTL greater than 2147483647\")\n\n # min value\n try:\n self._update_bucket_maxTTL(maxttl=0)\n except Exception as e:\n self.fail(\"Unable to set maxTTL=0, the min permitted value\")\n else:\n self.log.info(\"Verified: Min value permitted is 0\")\n\n # negative value\n try:\n self._update_bucket_maxTTL(maxttl=-60)\n except Exception as e:\n self.log.info(\"Verified: negative values not permitted, exception : {0}\".format(e))\n else:\n self.fail(\"FAIL: Able to set a negative maxTTL\")\n\n # date/string\n try:\n self._update_bucket_maxTTL(maxttl=\"12/23/2016\")\n except Exception as e:\n self.log.info(\"Verified: string not permitted, exception : {0}\".format(e))\n else:\n self.fail(\"FAIL: Able to set a date string maxTTL\")", "def test_maxttl_with_doc_updates(self):\n rest = RestConnection(self.master)\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=40)\n\n self.sleep(20, \"waiting to update docs with exp=60s...\")\n\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=60)\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n for bucket in self.buckets:\n items = rest.get_active_key_count(bucket)\n self.log.info(\"Items: {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Docs with updated expiry deleted unexpectedly!\")\n\n self.sleep(20, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = rest.get_active_key_count(bucket)\n self.log.info(\"Items: {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Docs with updated expiry not deleted after new exp has elapsed!\")", "def test_delete_buckets(self):\n pass", "def test_create_bucket(self):\n pass", "def test_get_ttl(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'}}\n moes = {'1': time.time() + 5, '4': time.time() + 10}\n for key in keys_to_set.keys():\n storage.set(key, keys_to_set[key], moes.get(key))\n # test at moment t\n self.assertEqual(keys_to_set['1'], storage.get('1'), \"Key '1' should still exist.\")\n # test at moment t+6, one key should expire\n self.now += 6\n keys_to_set.pop('1')\n moes.pop('1')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertEqual(keys_to_set['4'], storage.get('4'), \"Key '4' should still exist.\")\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")\n # test at moment t+11\n self.now += 5\n keys_to_set.pop('4')\n moes.pop('4')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertRaises(StorageKeyError, storage.get, '4')\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")", "def test_purge(h3):\n\n assert h3.list_buckets() == []\n\n assert h3.create_bucket('b1') == True\n\n assert h3.list_objects('b1') == []\n\n with open('/dev/urandom', 'rb') as f:\n data = f.read(3 * MEGABYTE)\n\n h3.create_object('b1', 'o1', data)\n h3.create_object('b1', 'o2', data)\n h3.create_object('b1', 'o3', data)\n\n assert set(h3.list_objects('b1')) == set(['o1', 'o2', 'o3'])\n\n assert h3.purge_bucket('b1') == True\n\n assert h3.list_objects('b1') == []\n\n assert h3.delete_bucket('b1') == True", "def test_cbbackupmgr_restore_with_ttl(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This restore with ttl test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n if self.replace_ttl == \"expired\":\n if self.bk_with_ttl:\n self._load_all_buckets(self.master, gen, \"create\", int(self.bk_with_ttl))\n else:\n self._load_all_buckets(self.master, gen, \"create\", 0)\n else:\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n if self.bk_with_ttl:\n self.sleep(int(self.bk_with_ttl) + 10, \"wait items to be expired in backup\")\n compare_function = \"==\"\n if self.replace_ttl_with:\n compare_function = \"<=\"\n if self.should_fail:\n self.backup_restore()\n else:\n self.backup_restore_validate(compare_uuid=False,\n seqno_compare_function=compare_function)", "def test_delete_bucket(self):\n pass", "def test_create_bucket(self):\n bucket = pmp.utils.create_bucket(3, 5.0)\n self.assertIsInstance(bucket, pmp.Bucket)\n\n POS_INF = float(\"inf\")\n bucket = pmp.utils.create_bucket(0, POS_INF)\n self.assertIsInstance(bucket, pmp.Bucket)", "def test_create_hyperflex_cluster_storage_policy(self):\n pass", "def test_ttl(self):\n session = self.prepare()\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 int, v3 int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t \"\n \"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)\"))\n\n for i in range(100):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, {v}, {v}) USING TTL 10\".format(v=i))\n\n for i in range(100):\n assert_one(session, \"SELECT * FROM t_by_v2 WHERE v2 = {}\".format(i), [i, i, i, i])\n\n time.sleep(20)\n\n rows = list(session.execute(\"SELECT * FROM t_by_v2\"))\n assert len(rows) == 0, \"Expected 0 rows but got {}\".format(len(rows))", "def test01StoreExpiration(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 100):\n keys.append(s.Put(i, i))\n\n # This should not raise\n s.Get(keys[-1])\n\n # This should raise though\n self.assertRaises(KeyError, s.Get, keys[0])", "def test_create(self):\n responses.add(\n responses.Response(\n method='POST',\n url='https://connection.keboola.com/v2/storage/buckets',\n json=create_response\n )\n )\n name = 'my-new-bucket'\n description = 'Some Description'\n backend = 'snowflake'\n created_detail = self.buckets.create(name=name,\n description=description,\n backend=backend)\n assert created_detail['id'] == 'in.c-{}'.format(name)", "def test_creating_a_bucket(self):\n with self.client:\n self.create_bucket(self.get_user_token())", "def test_create_cluster_resource_quota(self):\n pass", "def test_many_expired_keys(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n for i in range(20):\n self.storage.set(i, i, moe=self.now + 1)\n self.now += 2\n self.gc.expire_random()\n for i in range(20):\n self.assertRaises(StorageKeyError, self.storage.get, i)", "def test_max_items(self):\r\n timeline = Timeline(connection=self.c1, bucket=self.bucket, max_items=3)\r\n now = datetime.utcnow()\r\n\r\n timeline.add(self.key, 1, now)\r\n timeline.add(self.key, 2, now)\r\n timeline.add(self.key, 3, now)\r\n self.assertEqual(len(timeline.get(self.key)), 3)\r\n\r\n timeline.add(self.key, 4, now)\r\n self.assertEqual(len(timeline.get(self.key)), 3)", "def test_evict_expired(self):\n\n # use an invasive technique so that we don't have to sleep for\n # the item to expire\n\n bc = TimedCache(keep_time=1)\n\n bc[\"test\"] = \"value\"\n bc[\"test2\"] = \"value2\"\n self.assertEqual(len(bc), 2)\n\n # test that expired item i\n bc.cache[\"test\"].timestamp = bc.cache[\"test\"].timestamp - 2\n bc.purge_expired()\n self.assertEqual(len(bc), 1)\n self.assertFalse(\"test\" in bc)\n self.assertTrue(\"test2\" in bc)", "def test_backup_restore_with_recreate(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n rest = RestConnection(self.backupset.cluster_host)\n rest.delete_bucket()\n bucket_name = \"default\"\n rest_helper = RestHelper(rest)\n rest.create_bucket(bucket=bucket_name, ramQuotaMB=512)\n bucket_ready = rest_helper.vbucket_map_ready(bucket_name)\n if not bucket_ready:\n self.fail(\"Bucket {0} is not created after 120 seconds.\".format(bucket_name))\n self.log.info(\"Deleted {0} bucket and recreated it - restoring it now..\"\\\n .format(bucket_name))\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")", "def test03Expire(self):\n s = utils.FastStore(max_size=100)\n key = \"test1\"\n s.Put(key, 1)\n\n # This should not raise\n self.assertEqual(s.Get(key), 1)\n s.ExpireObject(key)\n\n self.assertRaises(KeyError, s.Get, key)", "def test_delete_hyperflex_cluster_storage_policy(self):\n pass", "def test_setup_db_for_use_retention_creation(self):\n\n expected_retention = {\n 'name': 'testRetention',\n 'duration': '1h0m0s',\n 'shardGroupDuration': '1h0m0s',\n 'replicaN': 1,\n 'default': True\n }\n assert expected_retention in self.test_client.get_list_retention_policies(\n )", "def test_create_hyperflex_ext_fc_storage_policy(self):\n pass", "def test_list_bucket(self):\n\n if self.bos.does_bucket_exist(\"aaaaaaxzr1\"):\n self.bos.delete_bucket(\"aaaaaaxzr1\")\n if self.bos.does_bucket_exist(\"aaaaaaxzr2\"):\n self.bos.delete_bucket(\"aaaaaaxzr2\")\n\n time1 = utils.get_canonical_time()\n self.bos.create_bucket(\"aaaaaaxzr1\")\n\n time2 = utils.get_canonical_time()\n self.bos.create_bucket(\"aaaaaaxzr2\")\n\n response = self.bos.list_buckets()\n self.check_headers(response)\n\n self.assertEqual(response.owner.id, bos_test_config.OWNER_ID)\n self.assertEqual(response.owner.display_name, bos_test_config.DISPLAY_NAME)\n for bucket in response.buckets:\n if bucket.name == \"aaaaaaxzr1\":\n self.assertEqual(\n compat.convert_to_bytes(bucket.creation_date)[0:19], \n compat.convert_to_bytes(time1)[0:19])\n elif bucket.name == \"aaaaaaxzr2\":\n self.assertEqual(\n compat.convert_to_bytes(bucket.creation_date)[0:19], \n compat.convert_to_bytes(time2)[0:19])\n self.bos.delete_bucket(\"aaaaaaxzr1\")\n self.bos.delete_bucket(\"aaaaaaxzr2\")", "def test_ttl_included_on_create(self):\r\n with mock.patch.object(ConnectionPool, 'execute') as m:\r\n TestTTLModel.ttl(60).create(text=\"hello blake\")\r\n\r\n query = m.call_args[0][0]\r\n self.assertIn(\"USING TTL\", query)", "def test_delete_cluster_resource_quota(self):\n pass", "def test_create_resource_with_invalid_target_bucket_rpc(\n self, mcg_obj, mcg_connection_factory\n ):\n connection_name = mcg_connection_factory()\n for target_bucket in (\"\", \" \", \"/*-#$%@^\"):\n response = mcg_obj.send_rpc_query(\n \"pool_api\",\n \"create_namespace_resource\",\n {\n \"name\": \"invalid_resource\",\n \"connection\": connection_name,\n \"target_bucket\": target_bucket,\n },\n )\n assert \"error\" in response.json()", "def testBucketSize(self):\n b = SomeBucket()\n fit = b.add(1000)\n self.assertEqual(100, fit)", "def test_buckets(self):\n objectstore.bucket.Bucket.create('new_bucket', self.context)\n bucket = objectstore.bucket.Bucket('new_bucket')\n\n # creator is authorized to use bucket\n self.assert_(bucket.is_authorized(self.context))\n\n # another user is not authorized\n context2 = context.RequestContext('user2', 'proj2')\n self.assertFalse(bucket.is_authorized(context2))\n\n # admin is authorized to use bucket\n admin_context = context.RequestContext('admin_user', None)\n self.assertTrue(bucket.is_authorized(admin_context))\n\n # new buckets are empty\n self.assertTrue(bucket.list_keys()['Contents'] == [])\n\n # storing keys works\n bucket['foo'] = \"bar\"\n\n self.assertEquals(len(bucket.list_keys()['Contents']), 1)\n\n self.assertEquals(bucket['foo'].read(), 'bar')\n\n # md5 of key works\n self.assertEquals(bucket['foo'].md5, hashlib.md5('bar').hexdigest())\n\n # deleting non-empty bucket should throw a NotEmpty exception\n self.assertRaises(NotEmpty, bucket.delete)\n\n # deleting key\n del bucket['foo']\n\n # deleting empty bucket\n bucket.delete()\n\n # accessing deleted bucket throws exception\n self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket')", "def test_bucketEmpty(self):\n b = SomeBucket()\n b.add(20)\n self.clock.set(9)\n empty = b.drip()\n self.assertFalse(empty)\n self.clock.set(10)\n empty = b.drip()\n self.assertTrue(empty)", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def test_get_buckets(self):\n pass", "def expire(ttl):\n print(\"[+] Staring expiration of old endpoints.\")\n\n try:\n now = arrow.utcnow()\n expiration = now - timedelta(hours=ttl)\n endpoints = database.session_query(Endpoint).filter(\n cast(Endpoint.last_updated, ArrowType) <= expiration\n )\n\n for endpoint in endpoints:\n print(\n \"[!] Expiring endpoint: {name} Last Updated: {last_updated}\".format(\n name=endpoint.name, last_updated=endpoint.last_updated\n )\n )\n database.delete(endpoint)\n metrics.send(\"endpoint_expired\", \"counter\", 1)\n\n print(\"[+] Finished expiration.\")\n except Exception as e:\n sentry.captureException()", "def test_old_expiration(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'expire'", "def test_update_hyperflex_cluster_storage_policy(self):\n pass", "def test_001_create_and_delete_bucket(self):\n bucket_name = 'testbucket'\n\n deferred = threads.deferToThread(self.conn.create_bucket, bucket_name)\n deferred.addCallback(lambda _:\n threads.deferToThread(self.conn.get_all_buckets))\n\n deferred.addCallback(self._ensure_one_bucket, bucket_name)\n\n deferred.addCallback(lambda _:\n threads.deferToThread(self.conn.delete_bucket,\n bucket_name))\n deferred.addCallback(lambda _:\n threads.deferToThread(self.conn.get_all_buckets))\n deferred.addCallback(self._ensure_no_buckets)\n return deferred", "def test_model_can_create_a_bucketlist(self):\n old_count = Job.objects.count()\n self.job.save()\n new_count = Job.objects.count()\n self.assertNotEqual(old_count, new_count)", "def test_select_ttl_failure(self):", "def test_delete_hyperflex_ext_fc_storage_policy(self):\n pass", "def test_keys_ttl(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n storage = Storage()\n storage.set('1', 'one', self.now + 5)\n storage.set('2', 'two')\n storage.set('3', 'three', self.now + 10)\n self.now += 6\n self.assertEqual(['2','3'], storage.keys('*'))\n self.assertEqual(['2','3'], list(storage._keys_dict.keys()))", "def test_instance_is_returned(self):\r\n o = TestTTLModel.create(text=\"whatever\")\r\n o.text = \"new stuff\"\r\n o = o.ttl(60)\r\n self.assertEqual(60, o._ttl)", "def test_bucketlist_creation(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n self.assertIn('Climb the Himalayas', str(post_data.data))", "def test_update_bucket(self):\n pass", "def test_get_bucket(self):\n pass", "def test_non_existent_key(self):\n ttl = self.cache.ttl('does_not_exist')\n self.assertEqual(ttl, 0)", "def test_create_file_with_buckets(db, location):\n record = CernSearchRecord.create({\"title\": \"test\"}) # type: CernSearchRecord\n record = CernSearchRecord.get_record(record.id)\n\n assert record.bucket_id is not None\n assert record.bucket_content_id is not None\n\n assert record[\"_bucket\"] == record.bucket_id\n assert record[\"_bucket_content\"] == record.bucket_content_id", "def test_bucket_is_deleted(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n # Delete the created Bucket\n res = self.client.delete(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Bucket Deleted successfully')\n self.assertTrue(res.content_type == 'application/json')", "def test_sized_no_reuse(self):\n cache = LRUCache(max_size=5)\n for i in range(5):\n cache[i] = i\n for i in range(5):\n assert i in cache\n assert cache[i] == i\n for i in range(5, 10):\n cache[i] = i\n assert i in cache\n assert cache[i] == i\n assert i - 5 not in cache\n with pytest.raises(KeyError):\n assert cache[i - 5]", "def test_too_many_gigabytes(self):\n volume1 = self.start_service('volume', host='host1')\n volume2 = self.start_service('volume', host='host2')\n volume_ids1 = []\n volume_ids2 = []\n for index in xrange(FLAGS.max_gigabytes):\n volume_id = self._create_volume()\n volume1.create_volume(self.context, volume_id)\n volume_ids1.append(volume_id)\n volume_id = self._create_volume()\n volume2.create_volume(self.context, volume_id)\n volume_ids2.append(volume_id)\n volume_id = self._create_volume()\n self.assertRaises(driver.NoValidHost,\n self.scheduler.driver.schedule_create_volume,\n self.context,\n volume_id)\n for volume_id in volume_ids1:\n volume1.delete_volume(self.context, volume_id)\n for volume_id in volume_ids2:\n volume2.delete_volume(self.context, volume_id)\n volume1.kill()\n volume2.kill()", "def test_patch_bucket(self):\n pass", "def test_namespace_bucket_creation_with_many_resources_rpc(\n self, ns_resource_factory, bucket_factory\n ):\n logger.info(\"Create namespace resources and verify health\")\n ns_resources = [ns_resource_factory()[1] for _ in range(0, 100)]\n\n logger.info(\"Create the namespace bucket with many namespace resources\")\n bucket_factory(\n amount=1,\n interface=\"mcg-namespace\",\n write_ns_resource=ns_resources[0],\n read_ns_resources=ns_resources,\n )", "def test_merge_backup_with_purge_deleted_keys(self):\n self.log.info(\"Load 1st batch docs\")\n create_gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen1, \"create\", 0)\n self.log.info(\"Delete half docs of 1st batch\")\n delete_gen = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size,\n end=self.num_items // 2)\n self._load_all_buckets(self.master, delete_gen, \"delete\", 0)\n self.log.info(\"Load 2nd batch docs\")\n create_gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size,\n end=self.num_items // 2)\n self._load_all_buckets(self.master, create_gen2, \"create\", 0)\n self.log.info(\"Start backup\")\n self.backup_create()\n self.backup_cluster()\n nodes = []\n upto_seq = 100000\n self.log.info(\"Start compact each vbucket in bucket\")\n\n rest = RestConnection(self.master)\n cluster_nodes = rest.get_nodes()\n for bucket in RestConnection(self.master).get_buckets():\n found = self.get_info_in_database(self.backupset.cluster_host, bucket, \"deleted\")\n if found:\n shell = RemoteMachineShellConnection(self.backupset.cluster_host)\n shell.compact_vbuckets(len(bucket.vbuckets), cluster_nodes, upto_seq)\n shell.disconnect()\n found = self.get_info_in_database(self.backupset.cluster_host, bucket, \"deleted\")\n if not found:\n self.log.info(\"Load another docs to bucket %s \" % bucket.name)\n create_gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size,\n end=self.num_items // 4)\n self._load_bucket(bucket, self.master, create_gen3, \"create\",\n self.expire_time)\n self.backup_cluster()\n create_gen4 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size,\n end=self.num_items // 4)\n self._load_bucket(bucket, self.master, create_gen4, \"create\",\n self.expire_time)\n self.backup_cluster()\n self.backupset.end = 3\n status, output, message = self.backup_merge()\n if not status:\n self.fail(message)\n else:\n self.fail(\"cbcompact failed to purge deleted key\")", "def test_delete_empty(empty_bucket): # pylint: disable=redefined-outer-name\n with pytest.raises(KeyError):\n empty_bucket.delete(\"key 1\")", "def test_restore_with_non_exist_bucket(self):\n gen = BlobGenerator(\"ent-backup1_\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.log.info(\"Start doing backup\")\n self.backup_create()\n self.backup_cluster()\n self.log.info(\"Start to delete bucket\")\n BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)\n output, _ = self.backup_restore()\n if output and \"Error restoring cluster\" not in output[0]:\n self.fail(\"Restore to non exist bucket should fail\")", "def test_magma_couchstore_compatibility(self):\n restore_backend = \"couchstore\" if self.input.param(\"bucket_storage\", \"\") == \"magma\" else \"magma\"\n\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self.log.info(\"*** start to load items to all buckets\")\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.log.info(\"*** done loading items to all buckets\")\n\n self.backup_create_validate()\n self.backup_cluster_validate()\n\n # Tear down and replace bucket with opposite storage backend\n rest_client = RestConnection(self.master)\n rest_client.delete_bucket()\n rest_client.create_bucket(bucket=\"default\", ramQuotaMB=256,\n storageBackend=restore_backend, replicaNumber=0)\n\n self.backup_restore_validate()", "def test_transform_and_load_storage_buckets(neo4j_session):\n bucket_res = tests.data.gcp.storage.STORAGE_RESPONSE\n bucket_list = cartography.intel.gcp.storage.transform_gcp_buckets(bucket_res)\n cartography.intel.gcp.storage.load_gcp_buckets(neo4j_session, bucket_list, TEST_UPDATE_TAG)\n\n query = \"\"\"\n MATCH(bucket:GCPBucket{id:{BucketId}})\n RETURN bucket.id, bucket.project_number, bucket.kind\n \"\"\"\n expected_id = 'bucket_name'\n expected_project_num = 9999\n expected_kind = 'storage#bucket'\n nodes = neo4j_session.run(\n query,\n BucketId=expected_id,\n )\n actual_nodes = {(n['bucket.id'], n['bucket.project_number'], n['bucket.kind']) for n in nodes}\n expected_nodes = {\n (expected_id, expected_project_num, expected_kind),\n }\n assert actual_nodes == expected_nodes", "def fixture_make_bucket(request):\n def _make_bucket(resource, bucket_name, region_name=None):\n if not region_name:\n region_name = resource.meta.client.meta.region_name\n\n bucket = resource.create_bucket(\n Bucket=bucket_name,\n CreateBucketConfiguration={\n 'LocationConstraint': region_name\n }\n )\n\n def fin():\n bucket.objects.delete()\n bucket.delete()\n request.addfinalizer(fin)\n\n return bucket\n\n return _make_bucket", "def set_ttl(self, ttl):", "def test_timed(self):\n time = 0.001\n cache = TimedCache(max_age=time)\n\n cache[1] = 1\n assert 1 in cache\n sleep(time)\n assert 1 not in cache\n with pytest.raises(KeyError):\n assert cache[1]\n\n for i in range(50):\n cache[i] = i\n assert i in cache\n assert cache[i] == i\n sleep(time)\n for i in range(50):\n assert i not in cache\n with pytest.raises(KeyError):\n assert cache[i]", "def test_rotate_expiration(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(10, 80, 20, 19)\n assert key.audit_state == 'old'", "def bucket_exists(gs_client, test_bucket):\n bucket = gs_client.conn.bucket(test_bucket)\n if not bucket.exists():\n gs_client.conn.create_bucket(test_bucket, predefined_acl=\"project-private\")\n yield gs_client", "def testExpirationTime(self):\n\n bye = \"Good bye!\"\n memcache.add('bye', bye, 1)\n assert memcache.get('bye') == bye\n time.sleep(2)\n assert memcache.get('bye') == None", "def test_delete(self):\n responses.add(\n responses.Response(\n method='DELETE',\n url='https://connection.keboola.com/v2/storage/buckets/1?force=False&async=False',\n json={}\n )\n )\n bucket_id = '1'\n deleted_detail = self.buckets.delete(bucket_id, asynchronous=False)\n assert deleted_detail is None", "def test_delete_single(single_bucket): # pylint: disable=redefined-outer-name\n single_bucket.delete(\"key 1\")\n\n assert single_bucket.is_empty() is True", "def test_change_default_throttling_settings_http_with_overwrite_throttled_burst_above_account_quota():", "def test_expiry_in_future(self):\n link = DownloadLink()\n link.save()\n self.assertEqual(link.getExpiry(), link.createdAt + timedelta(seconds=60))", "def test_create_delete_space(self):\n\n # Make clients\n kibana = Kibana(host=self.kibana_host, username=self.username, password=self.password)\n\n # Parameters\n space_id = random_id()\n\n try:\n # Create spaces\n result = kibana.create_space(space_id, \"Test Space\")\n self.assertTrue(result)\n\n # Delete space\n result = kibana.delete_space(space_id)\n self.assertFalse(result)\n finally:\n # Delete space\n kibana.delete_space(space_id)", "def test_namespace_bucket_creation_rpc(\n self, ns_resource_factory, bucket_factory, platform\n ):\n # Create the namespace resource and verify health\n ns_resource_name = ns_resource_factory(platform=platform)[1]\n\n # Create the namespace bucket on top of the namespace resource\n bucket_factory(\n amount=1,\n interface=\"mcg-namespace\",\n write_ns_resource=ns_resource_name,\n read_ns_resources=[ns_resource_name],\n )", "def test_s3_table_functions(started_cluster):\n node.query(\n \"\"\"\n INSERT INTO FUNCTION s3\n (\n nc_s3,\n filename = 'test_file.tsv.gz',\n format = 'TSV',\n structure = 'number UInt64',\n compression_method = 'gz'\n )\n SELECT * FROM numbers(1000000)\n \"\"\",\n settings=settings,\n )\n\n assert (\n node.query(\n \"\"\"\n SELECT count(*) FROM s3\n (\n nc_s3,\n filename = 'test_file.tsv.gz',\n format = 'TSV',\n structure = 'number UInt64',\n compression_method = 'gz'\n );\n \"\"\"\n )\n == \"1000000\\n\"\n )", "def test_replace_cluster_resource_quota(self):\n pass", "def test_expire_ban(self):\n pass", "def __cleanup(self, ttl_in_sec):\n ttl_in_ms = ttl_in_sec * 1000\n while True:\n logging.debug(\"cleanup action...\")\n current_ts = self.__current_timestamp_in_ms()\n self.lock.acquire()\n for key, value in self.orderedDict.items():\n if value[1] > current_ts - ttl_in_ms:\n break\n else:\n self.orderedDict.pop(key, None)\n self.lock.release()\n time.sleep(ttl_in_sec)", "def test_copy(h3):\n\n count = 100 # More than 10\n\n assert h3.list_buckets() == []\n\n assert h3.create_bucket('b1') == True\n\n assert h3.list_objects('b1') == []\n\n with open('/dev/urandom', 'rb') as f:\n data = f.read(3 * MEGABYTE)\n\n h3.create_object('b1', 'object', data)\n\n for i in range(count):\n h3.copy_object('b1', 'object', 'copy%d' % i)\n\n # Get the list of objects\n objects = []\n while True:\n result = h3.list_objects('b1', offset=len(objects))\n objects += result\n if result.done:\n break\n\n assert len(objects) == count + 1\n\n for i in range(count):\n object_info = h3.info_object('b1', 'copy%d' % i)\n assert not object_info.is_bad\n assert object_info.size == (3 * MEGABYTE)\n assert type(object_info.creation) == float\n assert type(object_info.last_access) == float\n assert type(object_info.last_modification) == float\n assert type(object_info.last_change) == float\n\n object_data = h3.read_object('b1', 'copy%d' % i)\n assert object_data == data\n\n objects = []\n while True:\n result = h3.list_objects('b1', offset=len(objects))\n objects += result\n if result.done:\n break\n\n assert len(objects) == count + 1\n\n assert h3.purge_bucket('b1') == True\n\n assert h3.list_objects('b1') == []\n\n assert h3.delete_bucket('b1') == True", "def test_backup_restore_with_deletes(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n self._load_all_buckets(self.master, gen, \"delete\", 0)\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\"<=\")", "def test_change_default_throttling_settings_http_with_overwrite_throttled_burst_above_50():", "async def test_age_limit_expiry(hass: HomeAssistant) -> None:\n now = dt_util.utcnow()\n current_time = datetime(now.year + 1, 8, 2, 12, 23, tzinfo=dt_util.UTC)\n\n with freeze_time(current_time) as freezer:\n assert await async_setup_component(\n hass,\n \"sensor\",\n {\n \"sensor\": [\n {\n \"platform\": \"statistics\",\n \"name\": \"test\",\n \"entity_id\": \"sensor.test_monitored\",\n \"state_characteristic\": \"mean\",\n \"sampling_size\": 20,\n \"max_age\": {\"minutes\": 4},\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n for value in VALUES_NUMERIC:\n current_time += timedelta(minutes=1)\n freezer.move_to(current_time)\n async_fire_time_changed(hass, current_time)\n hass.states.async_set(\n \"sensor.test_monitored\",\n str(value),\n {ATTR_UNIT_OF_MEASUREMENT: UnitOfTemperature.CELSIUS},\n )\n await hass.async_block_till_done()\n\n # After adding all values, we should only see 5 values in memory\n\n state = hass.states.get(\"sensor.test\")\n new_mean = round(sum(VALUES_NUMERIC[-5:]) / len(VALUES_NUMERIC[-5:]), 2)\n assert state is not None\n assert state.state == str(new_mean)\n assert state.attributes.get(\"buffer_usage_ratio\") == round(5 / 20, 2)\n assert state.attributes.get(\"age_coverage_ratio\") == 1.0\n\n # Values expire over time. Only two are left\n\n current_time += timedelta(minutes=3)\n freezer.move_to(current_time)\n async_fire_time_changed(hass, current_time)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.test\")\n new_mean = round(sum(VALUES_NUMERIC[-2:]) / len(VALUES_NUMERIC[-2:]), 2)\n assert state is not None\n assert state.state == str(new_mean)\n assert state.attributes.get(\"buffer_usage_ratio\") == round(2 / 20, 2)\n assert state.attributes.get(\"age_coverage_ratio\") == 1 / 4\n\n # Values expire over time. Only one is left\n\n current_time += timedelta(minutes=1)\n freezer.move_to(current_time)\n async_fire_time_changed(hass, current_time)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.test\")\n new_mean = float(VALUES_NUMERIC[-1])\n assert state is not None\n assert state.state == str(new_mean)\n assert state.attributes.get(\"buffer_usage_ratio\") == round(1 / 20, 2)\n assert state.attributes.get(\"age_coverage_ratio\") == 0\n\n # Values expire over time. Buffer is empty\n\n current_time += timedelta(minutes=1)\n freezer.move_to(current_time)\n async_fire_time_changed(hass, current_time)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.test\")\n assert state is not None\n assert state.state == STATE_UNKNOWN\n assert state.attributes.get(\"buffer_usage_ratio\") == round(0 / 20, 2)\n assert state.attributes.get(\"age_coverage_ratio\") is None", "def test_create_storage_no_tiers_rate(self):\n storage_rates = (\n metric_constants.OCP_METRIC_STORAGE_GB_REQUEST_MONTH,\n metric_constants.OCP_METRIC_STORAGE_GB_USAGE_MONTH,\n )\n for storage_rate in storage_rates:\n ocp_data = {\n \"name\": \"Test Cost Model\",\n \"description\": \"Test\",\n \"source_type\": Provider.PROVIDER_OCP,\n \"providers\": [{\"uuid\": self.provider.uuid, \"name\": self.provider.name}],\n \"rates\": [{\"metric\": {\"name\": storage_rate}, \"tiered_rates\": [{\"unit\": \"USD\", \"value\": 0.22}]}],\n \"currency\": \"USD\",\n }\n\n with tenant_context(self.tenant):\n instance = None\n serializer = CostModelSerializer(data=ocp_data, context=self.request_context)\n if serializer.is_valid(raise_exception=True):\n instance = serializer.save()\n self.assertIsNotNone(instance)\n self.assertIsNotNone(instance.uuid)", "def test_create_cluster_policy(self):\n pass", "def test_backup_restore_sanity(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self.log.info(\"*** start to load items to all buckets\")\n self._load_all_buckets(self.master, gen, \"create\", self.expires)\n self.log.info(\"*** done to load items to all buckets\")\n self.ops_type = self.input.param(\"ops-type\", \"update\")\n self.expected_error = self.input.param(\"expected_error\", None)\n if self.auto_failover:\n self.log.info(\"Enabling auto failover on \" + str(self.backupset.cluster_host))\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.update_autofailover_settings(self.auto_failover, self.auto_failover_timeout)\n self.backup_create_validate()\n for i in range(1, self.backupset.number_of_backups + 1):\n if self.ops_type == \"update\":\n self.log.info(\"*** start to update items in all buckets\")\n self._load_all_buckets(self.master, gen, \"update\", self.expires)\n self.log.info(\"*** done update items in all buckets\")\n elif self.ops_type == \"delete\":\n self.log.info(\"*** start to delete items in all buckets\")\n self._load_all_buckets(self.master, gen, \"delete\", self.expires)\n self.log.info(\"*** done to delete items in all buckets\")\n self.sleep(10)\n self.log.info(\"*** start to validate backup cluster\")\n self.backup_cluster_validate()\n self.targetMaster = True\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n self.log.info(\"*** start to restore cluster\")\n restored = {\"{0}/{1}\".format(start, end): \"\"}\n for i in range(1, self.backupset.number_of_backups + 1):\n if self.reset_restore_cluster:\n self.log.info(\"\\n*** start to reset cluster\")\n self.backup_reset_clusters(self.cluster_to_restore)\n cmd_init = 'node-init'\n if self.same_cluster:\n self.log.info(\"Same cluster\")\n self._initialize_nodes(Cluster(), self.servers[:self.nodes_init])\n if self.hostname and self.master.ip.endswith(\".com\"):\n options = '--node-init-hostname ' + self.master.ip\n shell = RemoteMachineShellConnection(self.master)\n output, _ = shell.execute_couchbase_cli(cli_command=cmd_init,\n options=options,\n cluster_host=\"localhost\",\n user=self.master.rest_username,\n password=self.master.rest_password)\n shell.disconnect()\n if not self._check_output(\"SUCCESS: Node initialize\", output):\n raise(\"Failed to set hostname\")\n else:\n self.log.info(\"Different cluster\")\n shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n shell.enable_diag_eval_on_non_local_hosts()\n rest = RestConnection(self.backupset.restore_cluster_host)\n rest.force_eject_node()\n rest.init_node()\n if self.hostname and self.backupset.restore_cluster_host.ip.endswith(\".com\"):\n options = '--node-init-hostname ' + self.backupset.restore_cluster_host.ip\n output, _ = shell.execute_couchbase_cli(cli_command=cmd_init, options=options,\n cluster_host=\"localhost\",\n user=self.backupset.restore_cluster_host.rest_username,\n password=self.backupset.restore_cluster_host.rest_password)\n if not self._check_output(\"SUCCESS: Node initialize\", output):\n raise(\"Failed to set hostname\")\n shell.disconnect()\n self.log.info(\"\\n*** Done reset cluster\")\n self.sleep(10)\n\n \"\"\" Add built-in user cbadminbucket to second cluster \"\"\"\n self.add_built_in_server_user(node=self.input.clusters[0][:self.nodes_init][0])\n\n self.backupset.start = start\n self.backupset.end = end\n self.log.info(\"*** start restore validation\")\n self.backup_restore_validate(compare_uuid=False,\n seqno_compare_function=\">=\",\n expected_error=self.expected_error)\n if self.backupset.number_of_backups == 1:\n continue\n while \"{0}/{1}\".format(start, end) in restored:\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n restored[\"{0}/{1}\".format(start, end)] = \"\"", "def test_sized_no_reuse(self):\n cache = FIFOCache(max_size=5)\n for i in range(5):\n cache[i] = i\n for i in range(5):\n assert i in cache\n assert cache[i] == i\n for i in range(5, 10):\n cache[i] = i\n assert i in cache\n assert cache[i] == i\n assert i - 5 not in cache\n with pytest.raises(KeyError):\n assert cache[i - 5]", "def test_s3_table_functions_timeouts(started_cluster):\n with PartitionManager() as pm:\n pm.add_network_delay(node, 1200)\n\n with pytest.raises(QueryRuntimeException):\n node.query(\n \"\"\"\n INSERT INTO FUNCTION s3\n (\n nc_s3,\n filename = 'test_file.tsv.gz',\n format = 'TSV',\n structure = 'number UInt64',\n compression_method = 'gz'\n )\n SELECT * FROM numbers(1000000)\n \"\"\",\n settings=settings,\n )", "def test_persisted_values_with_ttl(self):\n blk = MergeStreams()\n self.configure_block(blk, {\"expiration\": {\"seconds\": 1}})\n self.assertEqual(blk.persisted_values(), [])\n blk.start()\n blk.stop()", "def test_create_task_exceeded_amount_tags(self):\n rv = TEST_CLIENT.post(\n TASK_ROUTE,\n json={\"tags\": [\"tag1\", \"tag2\", \"tag3\", \"tag4\", \"tag5\", \"tag6\"]}\n )\n result = rv.json()\n expected = {\n \"code\": \"ExceededTagAmount\",\n \"message\": \"Tag quantity exceeded maximum allowed\",\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 400)", "def test_create_instance_with_oversubscribed_ram_fail(self):\n self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)\n self.rt.update_available_resource(self.context.elevated(), NODENAME)\n\n # get total memory as reported by virt driver:\n resources = self.compute.driver.get_available_resource(NODENAME)\n total_mem_mb = resources['memory_mb']\n\n oversub_limit_mb = total_mem_mb * 1.5\n instance_mb = int(total_mem_mb * 1.55)\n\n # build an instance, specifying an amount of memory that exceeds\n # both total_mem_mb and the oversubscribed limit:\n params = {\"flavor\": {\"memory_mb\": instance_mb, \"root_gb\": 128,\n \"ephemeral_gb\": 128}}\n instance = self._create_fake_instance_obj(params)\n\n filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}\n\n self.compute.build_and_run_instance(self.context, instance,\n {}, {}, filter_properties, [],\n block_device_mapping=[])", "def test04KillObject(self):\n results = []\n\n class TestStore(utils.FastStore):\n\n def KillObject(self, obj):\n results.append(obj)\n\n s = TestStore(max_size=5)\n for i in range(0, 10):\n s.Put(i, i)\n\n # Only the first 5 messages have been expired (and hence called)\n self.assertEqual(results, list(range(0, 5)))", "def test_cbbackupmgr_restore_with_vbuckets_filter(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This test is only for cb version 5.5 and later. \")\n self.num_items = 1000\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n if self.should_fail:\n self.backup_cluster()\n else:\n self.backup_cluster_validate()\n if self.restore_should_fail:\n self.backup_restore()\n else:\n self.backup_restore_validate()", "def test_put_object_exceptions(self):\n # key is None\n err = None\n try:\n self.bos.put_object(self.BUCKET, None, None, 100, None)\n except ValueError as e:\n err = e\n finally:\n self.assertIsNotNone(err)\n # too long\n err = None\n try:\n self.bos.put_object(self.BUCKET, self.KEY, None, 6 * 1024 * 1024 * 1024, None)\n except ValueError as e:\n err = e\n finally:\n self.assertIsNotNone(err)", "def test_expiry():\n \n driver = MemoryDriver(expires=1)\n \n driver.state[\"hello\"] = driver.default()\n \n time.sleep(1)\n \n driver.expire(\"hello\", driver.state[\"hello\"][\"checkin\"])\n \n assert \"hello\" not in driver.state", "def test_patch_hyperflex_cluster_storage_policy(self):\n pass", "def test_create_bucket_list_return(self):\n bucket = BucketList(\"\", \"\")\n bucket = bucket.create_bucket_list(\"Name\", \"Completed\")\n self.assertIsInstance(bucket, BucketList)", "def test_delete_collection_cluster_resource_quota(self):\n pass", "def test_get_freeform_tagged_bucket(self, test, object_storage):\n namespace_name, bucket_name = self._get_bucket_details(object_storage)\n session_factory = test.oci_session_factory()\n policy = test.load_policy(\n {\n \"name\": \"get-freeform-tagged-instance\",\n \"resource\": \"oci.bucket\",\n \"query\": [\n {\"namespace_name\": namespace_name},\n ],\n \"filters\": [\n {\"type\": \"value\", \"key\": \"freeform_tags.Project\", \"value\": \"CNCF\"},\n ],\n },\n session_factory=session_factory,\n )\n resources = policy.run()\n test.assertEqual(len(resources), 1)\n test.assertEqual(resources[0][\"name\"], bucket_name)\n test.assertEqual(resources[0][\"freeform_tags\"][\"Project\"], \"CNCF\")", "def test_last_used(self, monkeypatch):\n monkeypatch.setenv('ENABLE_AUTO_EXPIRE', 'true')\n monkeypatch.setenv('INACTIVITY_AGE', '10')\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user3', 'kljin', 'Active', created, last_used)\n key.audit(10, 11, 2, 1)\n assert key.audit_state == 'expire'\n key.audit(60, 80, 2, 1)\n assert key.audit_state == 'stagnant_expire'", "def test_backup_purge(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n old_backup_name = \"\"\n new_backup_name = \"\"\n backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n resume=self.backupset.resume, purge=self.backupset.purge,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version)\n self.sleep(10)\n conn = RemoteMachineShellConnection(self.backupset.cluster_host)\n conn.kill_erlang()\n output = backup_result.result(timeout=200)\n self.log.info(str(output))\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n if output and output[0]:\n bk_info = json.loads(output[0])\n bk_info = bk_info[\"repos\"][0]\n else:\n return False, \"No output content\"\n if bk_info[\"backups\"]:\n for i in range(0, len(bk_info[\"backups\"])):\n old_backup_name = bk_info[\"backups\"][i][\"date\"]\n self.log.info(\"Backup name before purge: \" + old_backup_name)\n conn.start_couchbase()\n conn.disconnect()\n self.sleep(30)\n output, error = self.backup_cluster()\n if error or not self._check_output(\"Backup completed successfully\", output):\n self.fail(output)\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n if output and output[0]:\n bk_info = json.loads(output[0])\n bk_info = bk_info[\"repos\"][0]\n else:\n return False, \"No output content\"\n if bk_info[\"backups\"]:\n for i in range(0, len(bk_info[\"backups\"])):\n new_backup_name = bk_info[\"backups\"][i][\"date\"]\n self.log.info(\"Backup name after purge: \" + new_backup_name)\n\n # Once the purge (and backup) have completed we shouldn't see any orphaned multipart uploads\n if self.objstore_provider:\n self.assertEqual(\n self.objstore_provider.num_multipart_uploads(), 0,\n \"Expected all multipart uploads to have been purged (all newly created ones should have also been completed)\"\n )\n\n self.assertNotEqual(old_backup_name, new_backup_name,\n \"Old backup name and new backup name are same when purge is used\")\n self.log.info(\"Old backup name and new backup name are not same when purge is used\")", "def test_backup_restore_with_warmup(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n NodeHelper.do_a_warm_up(self.backupset.cluster_host)\n self.sleep(30)\n self.backup_cluster_validate()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n \"\"\" only membase bucket has warmup state \"\"\"\n if self.bucket_type == \"membase\":\n NodeHelper.wait_warmup_completed([self.backupset.cluster_host])" ]
[ "0.8141814", "0.7350605", "0.7336613", "0.7202318", "0.7191125", "0.6874046", "0.6224646", "0.6203937", "0.62027216", "0.605416", "0.59724385", "0.5928285", "0.58961767", "0.5894235", "0.58194166", "0.5818683", "0.5788125", "0.5779113", "0.57711524", "0.5734831", "0.5686498", "0.566598", "0.5661623", "0.5624034", "0.5606753", "0.5598206", "0.5589281", "0.5588242", "0.5569433", "0.54929066", "0.54909235", "0.54882765", "0.5476748", "0.54750025", "0.54691094", "0.544921", "0.54486215", "0.5400865", "0.5400265", "0.5382819", "0.537004", "0.5365938", "0.5361898", "0.53472745", "0.5340963", "0.5333534", "0.5327987", "0.5327543", "0.5322435", "0.5319358", "0.53155404", "0.52962345", "0.52747095", "0.5273123", "0.5252475", "0.5248222", "0.5242992", "0.52345276", "0.52289766", "0.5224831", "0.52229476", "0.5214337", "0.52118653", "0.52097124", "0.51723", "0.5169628", "0.51612663", "0.5159244", "0.51495403", "0.51464283", "0.5132105", "0.5123581", "0.5120167", "0.51148605", "0.5112885", "0.5108215", "0.5101204", "0.50970685", "0.50964344", "0.5093321", "0.509292", "0.50858855", "0.5084349", "0.5077702", "0.50733894", "0.50712526", "0.5064562", "0.5061604", "0.5058579", "0.5045329", "0.5039108", "0.50365037", "0.50293005", "0.5028069", "0.5024529", "0.5022256", "0.5021506", "0.50213975", "0.501022", "0.5008922" ]
0.72839487
3
maxTTL is set to 200s in this test, Docs have lesser TTL.
def test_maxttl_greater_doc_expiry(self): for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=int(self.maxttl)-100) self.sleep(int(self.maxttl-100), "waiting for all docs to expire per maxTTL rule...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}".format( int(self.maxttl) - 100, self.maxttl-100, self.maxttl-100, items)) if items == 0: self.log.info("SUCCESS: Docs with lesser expiry deleted") else: self.fail("FAIL: Doc with lesser expiry still present past ttl")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_maxttl_setting(self):\n maxttl = int(self.input.param(\"maxttl\", None))\n self.run_multi_operations(buckets = self.buckets,\n query_definitions = self.query_definitions,\n create_index = True, drop_index = False,\n query_with_explain = False, query = False)\n self.sleep(20)\n self._verify_bucket_count_with_index_count()\n self.sleep(maxttl, \"waiting for docs to be expired automatically per maxttl rule\")\n self._expiry_pager(self.master)\n self.sleep(60, \"wait for expiry pager to run on all nodes...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Docs in source bucket is {0} after maxttl has elapsed\".format(items))\n if items != 0:\n self.fail(\"Docs in source bucket is not 0 after maxttl has elapsed\")\n self._verify_bucket_count_with_index_count()", "def test_maxttl_lesser_doc_expiry(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=int(self.maxttl)+500)\n self.sleep(int(self.maxttl), \"waiting for all docs to expire per maxTTL rule...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) + 500,\n self.maxttl,\n self.maxttl,\n items))\n if items > 0:\n self.fail(\"Bucket maxTTL of {0} is not honored\".format(self.maxttl))\n else:\n self.log.info(\"SUCCESS: Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) + 500,\n self.maxttl,\n self.maxttl,\n items))", "def test_update_maxttl(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=40)\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s\"\n \" updated maxttl = 40s, after 40s item count = {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Updated ttl affects docs with larger expiry before updation!\")\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s\"\n \" updated maxttl = 40s, after 100s item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Docs with 100s as expiry before maxTTL updation still alive!\")", "def test_maxttl_with_doc_updates(self):\n rest = RestConnection(self.master)\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=40)\n\n self.sleep(20, \"waiting to update docs with exp=60s...\")\n\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=60)\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n for bucket in self.buckets:\n items = rest.get_active_key_count(bucket)\n self.log.info(\"Items: {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Docs with updated expiry deleted unexpectedly!\")\n\n self.sleep(20, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = rest.get_active_key_count(bucket)\n self.log.info(\"Items: {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Docs with updated expiry not deleted after new exp has elapsed!\")", "def test_set_maxttl_on_existing_bucket(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=60)\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s\"\n \"(set after doc creation), after 60s, item count = {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Items with larger expiry before maxTTL updation deleted!\")\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s\"\n \"(set after doc creation), after 100s,\"\n \" item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Items with not greater expiry set before maxTTL \"\n \"updation not deleted after elapsed TTL!\")\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s, after 100s,\"\n \" item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Items with not greater expiry not \"\n \"deleted after elapsed maxTTL!\")", "def set_ttl(self, ttl):", "def test_maxttl_possible_values(self):\n # default\n rest = RestConnection(self.master)\n default_maxttl = rest.get_bucket_maxTTL()\n if default_maxttl != 0:\n self.fail(\"FAIL: default maxTTL if left unset must be 0 but is {0}\".format(default_maxttl))\n self.log.info(\"Verified: default maxTTL if left unset is {0}\".format(default_maxttl))\n\n # max value\n try:\n self._update_bucket_maxTTL(maxttl=2147483648)\n except Exception as e:\n self.log.info(\"Expected exception : {0}\".format(e))\n try:\n self._update_bucket_maxTTL(maxttl=2147483647)\n except Exception as e:\n self.fail(\"Unable to set maxTTL=2147483647, the max permitted value\")\n else:\n self.log.info(\"Verified: Max value permitted is 2147483647\")\n else:\n self.fail(\"Able to set maxTTL greater than 2147483647\")\n\n # min value\n try:\n self._update_bucket_maxTTL(maxttl=0)\n except Exception as e:\n self.fail(\"Unable to set maxTTL=0, the min permitted value\")\n else:\n self.log.info(\"Verified: Min value permitted is 0\")\n\n # negative value\n try:\n self._update_bucket_maxTTL(maxttl=-60)\n except Exception as e:\n self.log.info(\"Verified: negative values not permitted, exception : {0}\".format(e))\n else:\n self.fail(\"FAIL: Able to set a negative maxTTL\")\n\n # date/string\n try:\n self._update_bucket_maxTTL(maxttl=\"12/23/2016\")\n except Exception as e:\n self.log.info(\"Verified: string not permitted, exception : {0}\".format(e))\n else:\n self.fail(\"FAIL: Able to set a date string maxTTL\")", "def test_select_ttl_failure(self):", "def test_cli_bucket_maxttl_setting(self):\n self.rest.force_eject_node()\n\n shell = RemoteMachineShellConnection(self.master)\n if self.input.param('enable_ipv6', False):\n self.reset_and_enable_ipv6(self.master)\n set_index_storage_type = \" --index-storage-setting=memopt \"\n options = ' --cluster-port=8091 \\\n --cluster-ramsize=300 \\\n --cluster-index-ramsize=300 \\\n --services=data,index,query %s ' \\\n % set_index_storage_type\n o, e = shell.execute_couchbase_cli(cli_command=\"cluster-init\",\n options=options)\n self.assertEqual(o[0], 'SUCCESS: Cluster initialized')\n\n self.log.info(\"Add new user after reset node! \")\n self.add_built_in_server_user(node=self.master)\n bucket_type = self.input.param(\"bucket_type\", \"couchbase\")\n options = ' --bucket=default \\\n --bucket-type={0} \\\n --bucket-ramsize=200 \\\n --max-ttl=400 \\\n --wait '.format(bucket_type)\n o, e = shell.execute_couchbase_cli(cli_command=\"bucket-create\",\n options=options)\n self.assertEqual(o[0], 'SUCCESS: Bucket created')\n\n self.sleep(30, \"Sleep before loading doc using cbdocloader\")\n\n cluster_flag = \"-c\"\n bucket_quota_flag = \"-m\"\n data_set_location_flag = \"-d\"\n shell.execute_command(\n \"{0}cbdocloader -u Administrator -p password \"\n \"{3} {1} -b default {4} 100 {5} {2}travel-sample.zip\"\n .format(self.bin_path, self.master.ip, self.sample_path,\n cluster_flag, bucket_quota_flag,\n data_set_location_flag))\n shell.disconnect()\n\n buckets = RestConnection(self.master).get_buckets()\n for bucket in buckets:\n if bucket.name != \"default\":\n self.fail(\"default bucket did not get created\")\n\n \"\"\" check for load data into travel-sample bucket \"\"\"\n end_time = time.time() + 120\n num_actual = 0\n while time.time() < end_time:\n self.sleep(10)\n num_actual = self.get_item_count(self.master, \"default\")\n if int(num_actual) == self.total_items_travel_sample:\n break\n self.assertTrue(int(num_actual) == self.total_items_travel_sample,\n \"Items number expected %s, actual %s\"\n % (self.total_items_travel_sample, num_actual))\n self.log.info(\"Total items %s \" % num_actual)\n self.sleep(400, \"Waiting for docs to expire as per maxttl\")\n self.expire_pager([self.master])\n self.sleep(20, \"Wait for expiry_purger to run\")\n num_actual = self.get_item_count(self.master, \"default\")\n if int(num_actual) != 0:\n self.fail(\"Item count is not 0 after maxttl has elapsed\")\n else:\n self.log.info(\"SUCCESS: Item count is 0 after maxttl has elapsed\")", "def get_ttl(self, keyword, key):", "def test_instance_is_returned(self):\r\n o = TestTTLModel.create(text=\"whatever\")\r\n o.text = \"new stuff\"\r\n o = o.ttl(60)\r\n self.assertEqual(60, o._ttl)", "def reduceTTL(self):\n self.TTL -= 1\n return self.TTL <= 0", "def max_ttl(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_ttl\")", "def max_ttl(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_ttl\")", "def ttl(self, ttl):\n\n self._ttl = ttl", "def ttl(self, ttl):\n\n self._ttl = ttl", "def token_explicit_max_ttl(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"token_explicit_max_ttl\")", "def _verify_timeout(self, doc):\n expires = doc['expires']\n if expires == 0:\n return False\n if expires >= self._time():\n return False\n return True", "def test_ttl(self):\n session = self.prepare()\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 int, v3 int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t \"\n \"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)\"))\n\n for i in range(100):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, {v}, {v}) USING TTL 10\".format(v=i))\n\n for i in range(100):\n assert_one(session, \"SELECT * FROM t_by_v2 WHERE v2 = {}\".format(i), [i, i, i, i])\n\n time.sleep(20)\n\n rows = list(session.execute(\"SELECT * FROM t_by_v2\"))\n assert len(rows) == 0, \"Expected 0 rows but got {}\".format(len(rows))", "def getTTL(self):\n return self.requests_ttl", "def _get_cache_ttl(self, request, response):\n return None # use default ttl", "def token_explicit_max_ttl(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"token_explicit_max_ttl\")", "def token_explicit_max_ttl(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"token_explicit_max_ttl\")", "def max_ttl(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"max_ttl\")", "def getTTL(self):\n return self.TTL", "def token_max_ttl(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"token_max_ttl\")", "def token_max_ttl(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"token_max_ttl\")", "def test_update_queryset_ttl_success_case(self):", "def token_max_ttl(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"token_max_ttl\")", "def ttl(self):\n return self._ttl", "def get_ttl(self, default_ttl: Optional[int] = None) -> Optional[int]:\n return default_ttl if self.ttl is None else self.ttl", "def test_get_ttl(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'}}\n moes = {'1': time.time() + 5, '4': time.time() + 10}\n for key in keys_to_set.keys():\n storage.set(key, keys_to_set[key], moes.get(key))\n # test at moment t\n self.assertEqual(keys_to_set['1'], storage.get('1'), \"Key '1' should still exist.\")\n # test at moment t+6, one key should expire\n self.now += 6\n keys_to_set.pop('1')\n moes.pop('1')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertEqual(keys_to_set['4'], storage.get('4'), \"Key '4' should still exist.\")\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")\n # test at moment t+11\n self.now += 5\n keys_to_set.pop('4')\n moes.pop('4')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertRaises(StorageKeyError, storage.get, '4')\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")", "def ttl(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"ttl\")", "def ttl(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"ttl\")", "def __init__(self, ttl=604800):\n self.data = OrderedDict()\n self.ttl = ttl", "def __init__(self, ttl=604800):\n self.data = OrderedDict()\n self.ttl = ttl", "def _get_recordTtl(self):\n return self.__recordTtl", "def test_client_max_age_3600(self, sess):\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n # request that we don't want a new one unless\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=3600'})\r\n assert r.from_cache is True\r\n\r\n # now lets grab one that forces a new request b/c the cache\r\n # has expired. To do that we'll inject a new time value.\r\n resp = self.cache.get(self.url)\r\n resp.headers['date'] = 'Tue, 15 Nov 1994 08:12:31 GMT'\r\n r = sess.get(self.url)\r\n assert not r.from_cache", "def test_ttl_included_on_create(self):\r\n with mock.patch.object(ConnectionPool, 'execute') as m:\r\n TestTTLModel.ttl(60).create(text=\"hello blake\")\r\n\r\n query = m.call_args[0][0]\r\n self.assertIn(\"USING TTL\", query)", "def ttl(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ttl\")", "def ttl(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ttl\")", "def ttl(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ttl\")", "def expiry(self):\n return time() + self.ttl * (0.95 + 0.1 * random())", "def ttl(self, key, version=None, client=None):\r\n if client is None:\r\n client = self.get_client(write=False)\r\n\r\n key = self.make_key(key, version=version)\r\n if not client.exists(key):\r\n return 0\r\n return client.ttl(key)", "def __init__(self, location, option):\n super(MyCache, self).__init__(location, option)\n self.dcreate('ttl')", "def test_max_age(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_max_age\": 1, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"max-age=1\"})", "def test_s_maxage(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_s_maxage\": 1, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"s-maxage=1\"})", "def _cost_based_distribution_ttl(self, budget):\n return int(max(self.DELAYS) + self._DELAY_GRACE + 60 + budget)", "def ttl(self):\n return self.app.config.WORKER_TTL", "def ttl(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"ttl\")", "def test_client_max_age_0(self, sess):\r\n print('first request')\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n print('second request')\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=0'})\r\n\r\n # don't remove from the cache\r\n assert self.cache.get(self.url)\r\n assert not r.from_cache", "def ttl(self, key):\n return self._command(b'PTTL', key, handler=lambda ms: .001 * ms)", "def __init__(self, nw_ttl=None):\n super().__init__()\n self.nw_ttl = nw_ttl", "def __settle_ttl(self):\n self.ttl = {}\n for v in self.g.nodes():\n indices = [self.universe.index((v, w))\n for w in self.g[v] if (v, w) in self.universe]\n self.ttl[v] = max(indices) if len(indices) else 0", "def ttl_from_expiration(expires):\n now = int(calendar.timegm(datetime.utcnow().utctimetuple()))\n expires = expires_to_timestamp(expires)\n return expires - now", "def test_non_existent_key(self):\n ttl = self.cache.ttl('does_not_exist')\n self.assertEqual(ttl, 0)", "def get_ttl(self, key, now=None):\n if now is None:\n now = time.time()\n with self._lock:\n # pylint: disable=unused-variable\n expire, _value = self._values[key]\n return expire - now", "def test_document_retrieval(self):", "def expire(ttl):\n print(\"[+] Staring expiration of old endpoints.\")\n\n try:\n now = arrow.utcnow()\n expiration = now - timedelta(hours=ttl)\n endpoints = database.session_query(Endpoint).filter(\n cast(Endpoint.last_updated, ArrowType) <= expiration\n )\n\n for endpoint in endpoints:\n print(\n \"[!] Expiring endpoint: {name} Last Updated: {last_updated}\".format(\n name=endpoint.name, last_updated=endpoint.last_updated\n )\n )\n database.delete(endpoint)\n metrics.send(\"endpoint_expired\", \"counter\", 1)\n\n print(\"[+] Finished expiration.\")\n except Exception as e:\n sentry.captureException()", "def _print_key_ttl_check(\n self,\n area_to_invalid_keys: Dict[str, Publication],\n ttl: int,\n threshold: float = 0.75,\n ) -> None:\n click.echo(\n self.validation_result_str(\n \"kvStore\", \"Key TTL Check\", len(area_to_invalid_keys) == 0\n )\n )\n\n click.echo(\n f\"Configured Time-To-Live(TTL): {ttl}ms, Threshold to alarm: {int(threshold * ttl)}ms\"\n )\n\n if len(area_to_invalid_keys) > 0:\n click.echo(\"Key-Value pairs with unexpected low TTL:\")\n self.print_kvstore_keys(\n area_to_invalid_keys,\n True, # Print TTL option\n False, # Print with JSON format option\n )", "def token_ttl(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"token_ttl\")", "def token_ttl(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"token_ttl\")", "def ttl(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ttl\")", "def ttl(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ttl\")", "def token_ttl(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"token_ttl\")", "def test_keys_ttl(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n storage = Storage()\n storage.set('1', 'one', self.now + 5)\n storage.set('2', 'two')\n storage.set('3', 'three', self.now + 10)\n self.now += 6\n self.assertEqual(['2','3'], storage.keys('*'))\n self.assertEqual(['2','3'], list(storage._keys_dict.keys()))", "def set(self, key, value, ttl=None):\n if ttl and (type(ttl) is int) and (ttl > 0):\n ttl += int(dt.now().strftime('%s'))\n self.dadd('ttl', (key, ttl))\n return super(MyCache, self).set(key, value)", "def block_override_ttl(self) -> int:\n return pulumi.get(self, \"block_override_ttl\")", "def get_result_ttl(self, default_ttl: int) -> int:\n return default_ttl if self.result_ttl is None else self.result_ttl", "def open(self):\n super(MemoryCache, self).open()\n\n def _timer():\n # Use a custom timer to try to spread expirations. Within one instance it\n # won't change anything but it will be better if you run multiple instances.\n return time.time() + self.__ttl * random.uniform(-0.25, 0.25)\n\n self.__cache = cachetools.TTLCache(\n maxsize=self.__size, ttl=self.__ttl, timer=_timer\n )", "def test_scroll_returns_over_2x_size_docs(self):\n scroll_size = 3 # fetch N docs per \"scroll\"\n total_docs = (scroll_size * 2) + 1\n docs = self._index_many_new_docs(total_docs)\n self.assertEqual(len(docs), total_docs)\n self.assertEqual(docs_to_dict(docs),\n self._scroll_hits_dict({}, size=scroll_size))", "def ttl(self) -> int:\n now = int(datetime.datetime.utcnow().timestamp())\n return now + int(settings.WALLET_TRANSACTION_TTL)", "def default_expiration_delta():\n return timezone.now() + const.EXPIRY_TOKEN_DELTA", "def test_expiry_in_future(self):\n link = DownloadLink()\n link.save()\n self.assertEqual(link.getExpiry(), link.createdAt + timedelta(seconds=60))", "def _validate_key_ttl(\n self,\n publications: Dict[str, Publication],\n ttl: int,\n threshold: float = 0.75,\n ) -> Dict[str, Publication]:\n\n area_to_invalid_keyvals = {}\n for area, publication in publications.items():\n invalid_keyvals = {\n k: v\n for (k, v) in publication.keyVals.items()\n # - the left ttl should be over ttl * 3/4 given refreshing\n # interval is every ttl/4;\n # - ttl can be INFINITY(represented by -1). Skip alerting;\n if (v.ttl >= 0 and v.ttl < threshold * ttl)\n or (v.ttl < 0 and v.ttl != Consts.CONST_TTL_INF)\n }\n\n # Map the invalid k-v pairs to a publication for displaying purpose\n if len(invalid_keyvals) > 0:\n area_to_invalid_keyvals[area] = Publication(keyVals=invalid_keyvals)\n return area_to_invalid_keyvals", "def _cost_based_ttl(self, value, budget):\n return BaseHabits._value_ttl(\n value=value, budget=budget, num_delays=len(\n self.DELAYS), max_ttl=self._MAX_TTL)", "def test_change_default_throttling_settings_http_with_overwrite_throttled():", "def test_get_documents_populated(index_with_documents):\n response = index_with_documents().get_documents()\n assert isinstance(response.results, list)\n assert len(response.results) == 20", "def update(self) -> None:\n\n \n #If time to live is 0\n if self.ttl == 0:\n\n #Kill itself\n self.kill()\n return\n\n #Otherwise\n else:\n\n #Reduce time to live\n self.ttl -= 1\n\n #Call superclass update\n return super().update()", "def set_ttl(self, key, ttl, now=None):\n if now is None:\n now = time.time()\n with self._lock:\n # pylint: disable=unused-variable\n _expire, value = self._values[key]\n self._values[key] = (now + ttl, value)", "def test_all_documents(self):", "def __expired_timestamp(self, timestamp):\n return int(time.time()) > timestamp + self.__ttl", "def ttl_seconds(self) -> \"int\":\n return self._attrs.get(\"ttlSeconds\")", "def test_smoker_latest_get(self):\n pass", "def test_client_document_retrieve(self):\n pass", "def renew_ttl(self, renew_backend: bool) -> None:\n if not self.modified:\n self.modified = True\n if renew_backend:\n self._session.renew_ttl()", "def test_setup_db_for_use_retention_creation(self):\n\n expected_retention = {\n 'name': 'testRetention',\n 'duration': '1h0m0s',\n 'shardGroupDuration': '1h0m0s',\n 'replicaN': 1,\n 'default': True\n }\n assert expected_retention in self.test_client.get_list_retention_policies(\n )", "def sc_ttl(self):\n return self._sc_ttl", "def test_change_default_throttling_settings_http_with_overwrite_throttled_rate_above_50():", "def test_record_eq_record_different_ttl(self):\n zone = Zone('test.example.com')\n record_current = Record(zone, 'test-record', {'type': 'A', 'ttl': 30})\n record_desired = Record(zone, 'test-record', {'type': 'A', 'ttl': 300})\n self.assertTrue(record_current != record_desired)", "def test_init_max_time_mins():\n\n tpot_obj = TPOTClassifier(max_time_mins=30, generations=1000)\n\n assert tpot_obj.generations == 1000000\n assert tpot_obj.max_time_mins == 30", "def test_long_timeout(self):\n self.cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second\n self.assertEqual(self.cache.get('key1'), 'eggs')\n\n self.cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)\n self.assertEqual(self.cache.get('key2'), 'ham')\n\n self.cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)\n self.assertEqual(self.cache.get('key3'), 'sausage')\n self.assertEqual(self.cache.get('key4'), 'lobster bisque')", "def test_max_items(self):\r\n timeline = Timeline(connection=self.c1, bucket=self.bucket, max_items=3)\r\n now = datetime.utcnow()\r\n\r\n timeline.add(self.key, 1, now)\r\n timeline.add(self.key, 2, now)\r\n timeline.add(self.key, 3, now)\r\n self.assertEqual(len(timeline.get(self.key)), 3)\r\n\r\n timeline.add(self.key, 4, now)\r\n self.assertEqual(len(timeline.get(self.key)), 3)", "def __cleanup(self, ttl_in_sec):\n ttl_in_ms = ttl_in_sec * 1000\n while True:\n logging.debug(\"cleanup action...\")\n current_ts = self.__current_timestamp_in_ms()\n self.lock.acquire()\n for key, value in self.orderedDict.items():\n if value[1] > current_ts - ttl_in_ms:\n break\n else:\n self.orderedDict.pop(key, None)\n self.lock.release()\n time.sleep(ttl_in_sec)", "def test_get_documents_offset_optional_params(index_with_documents):\n index = index_with_documents()\n response = index.get_documents()\n assert isinstance(response.results, list)\n assert len(response.results) == 20\n response_offset_limit = index.get_documents({\"limit\": 3, \"offset\": 1, \"fields\": \"title\"})\n assert len(response_offset_limit.results) == 3\n assert hasattr(response_offset_limit.results[0], \"title\")\n assert response_offset_limit.results[0].title == response.results[1].title", "def testSpecificTimestamps(self):\n predicate = \"metadata:predicate\"\n subject = \"aff4:/metadata:9\"\n\n # Check we can specify a timestamp\n data_store.DB.Set(subject, predicate, \"2\", timestamp=1000, token=self.token)\n (stored, ts) = data_store.DB.Resolve(subject, predicate, token=self.token)\n\n # Check the time is reasonable\n self.assertEqual(ts, 1000)\n self.assertEqual(stored, \"2\")", "def add_new_doc(self, document, end_of_corpus):\n max_tf = 0\n unique_terms_counter = 0\n document_dictionary = document.term_doc_dictionary\n # Go over each term in the doc\n for term in document_dictionary:\n try:\n # Update inverted index and posting\n if term not in self.inverted_idx:\n self.inverted_idx[term] = 1\n unique_terms_counter += 1\n else:\n self.inverted_idx[term] += 1\n if term not in self.posting_dict:\n self.posting_dict[term] = []\n\n self.posting_dict[term].append(\n (document.tweet_id, document_dictionary[term])) # key: str , value: array of tuples\n\n max_tf = max(document_dictionary[term], max_tf)\n\n except:\n\n print('problem with the following key {}'.format(term[0]))\n\n document.max_tf = max_tf\n document.unique_terms = unique_terms_counter\n self.docs_count += 1\n\n modulo = int(document.tweet_id) % 10\n self.documents[modulo][document.tweet_id] = [document.term_doc_dictionary, document.max_tf]\n\n if self.docs_count == self.DOCS_SIZE or end_of_corpus: # if we reach chunk size or end of corpus\n self.add_to_file(end_of_corpus)\n self.docs_count = 0\n self.posting_dict = {}\n\n for i in self.documents: # 0 - 9\n if self.documents[i].__len__() > 15000:\n doc = utils.load_obj(self.out + \"document\" + str(i))\n doc.update(self.documents[i])\n utils.save_obj(doc, self.out + \"document\" + str(i))\n self.documents[i] = {}", "def test_client_twrr_performance(self):\n pass", "def test_truncate(doctest):", "def test_change_default_throttling_settings_http_with_overwrite_not_throttled():" ]
[ "0.7520884", "0.7474054", "0.717667", "0.7172085", "0.67970985", "0.6264787", "0.62576485", "0.60355556", "0.59813035", "0.56774914", "0.5664216", "0.5654567", "0.56533533", "0.56533533", "0.56383187", "0.56383187", "0.5615611", "0.5612152", "0.56103104", "0.55894196", "0.551761", "0.55029446", "0.55029446", "0.54857665", "0.5377239", "0.5374815", "0.5374815", "0.53712595", "0.53654486", "0.5328611", "0.52737415", "0.52238804", "0.52054113", "0.52054113", "0.513754", "0.513754", "0.51097596", "0.5076771", "0.50571483", "0.5055012", "0.5055012", "0.5055012", "0.50533396", "0.5043776", "0.5043731", "0.5042068", "0.5027358", "0.501351", "0.49768853", "0.49547073", "0.49484822", "0.4926414", "0.49199492", "0.49119896", "0.48921868", "0.4888305", "0.48790783", "0.48370972", "0.48228544", "0.47965604", "0.47707298", "0.47707298", "0.4760678", "0.4760678", "0.4759996", "0.4729097", "0.47185686", "0.47131947", "0.46700275", "0.466308", "0.46627206", "0.46543846", "0.46512598", "0.4623993", "0.46133527", "0.46057487", "0.46029547", "0.45911616", "0.45886603", "0.45831206", "0.45631713", "0.45571905", "0.4547541", "0.454644", "0.4543599", "0.45397514", "0.4538373", "0.4523418", "0.45170426", "0.44961357", "0.44862092", "0.44841835", "0.44829786", "0.44823408", "0.44816616", "0.4477405", "0.44703272", "0.4466329", "0.44661924", "0.4465725" ]
0.7362958
2
1. Create a bucket with no max_ttl 2. Upload 1000 docs with exp = 100s 3. Set maxTTL on bucket as 60s 4. After 60s, run expiry pager, get item count, must be 1000 5. After 40s, run expiry pager again and get item count, must be 0 6. Now load another set of docs with exp = 100s 7. Run expiry pager after 60s and get item count, must be 0
def test_set_maxttl_on_existing_bucket(self): for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=100) self._update_bucket_maxTTL(maxttl=60) self.sleep(60, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = 100s, maxTTL = 60s" "(set after doc creation), after 60s, item count = {0}".format(items)) if items != self.num_items: self.fail("FAIL: Items with larger expiry before maxTTL updation deleted!") self.sleep(40, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = 100s, maxTTL = 60s" "(set after doc creation), after 100s," " item count = {0}".format(items)) if items != 0: self.fail("FAIL: Items with not greater expiry set before maxTTL " "updation not deleted after elapsed TTL!") for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=100) self.sleep(60, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = 100s, maxTTL = 60s, after 100s," " item count = {0}".format(items)) if items != 0: self.fail("FAIL: Items with not greater expiry not " "deleted after elapsed maxTTL!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_maxttl_lesser_doc_expiry(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=int(self.maxttl)+500)\n self.sleep(int(self.maxttl), \"waiting for all docs to expire per maxTTL rule...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) + 500,\n self.maxttl,\n self.maxttl,\n items))\n if items > 0:\n self.fail(\"Bucket maxTTL of {0} is not honored\".format(self.maxttl))\n else:\n self.log.info(\"SUCCESS: Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) + 500,\n self.maxttl,\n self.maxttl,\n items))", "def test_update_maxttl(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=40)\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s\"\n \" updated maxttl = 40s, after 40s item count = {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Updated ttl affects docs with larger expiry before updation!\")\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s\"\n \" updated maxttl = 40s, after 100s item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Docs with 100s as expiry before maxTTL updation still alive!\")", "def test_maxttl_with_doc_updates(self):\n rest = RestConnection(self.master)\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=40)\n\n self.sleep(20, \"waiting to update docs with exp=60s...\")\n\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=60)\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n for bucket in self.buckets:\n items = rest.get_active_key_count(bucket)\n self.log.info(\"Items: {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Docs with updated expiry deleted unexpectedly!\")\n\n self.sleep(20, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = rest.get_active_key_count(bucket)\n self.log.info(\"Items: {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Docs with updated expiry not deleted after new exp has elapsed!\")", "def test_maxttl_greater_doc_expiry(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=int(self.maxttl)-100)\n self.sleep(int(self.maxttl-100), \"waiting for all docs to expire per maxTTL rule...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) - 100,\n self.maxttl-100,\n self.maxttl-100,\n items))\n if items == 0:\n self.log.info(\"SUCCESS: Docs with lesser expiry deleted\")\n else:\n self.fail(\"FAIL: Doc with lesser expiry still present past ttl\")", "def test_maxttl_setting(self):\n maxttl = int(self.input.param(\"maxttl\", None))\n self.run_multi_operations(buckets = self.buckets,\n query_definitions = self.query_definitions,\n create_index = True, drop_index = False,\n query_with_explain = False, query = False)\n self.sleep(20)\n self._verify_bucket_count_with_index_count()\n self.sleep(maxttl, \"waiting for docs to be expired automatically per maxttl rule\")\n self._expiry_pager(self.master)\n self.sleep(60, \"wait for expiry pager to run on all nodes...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Docs in source bucket is {0} after maxttl has elapsed\".format(items))\n if items != 0:\n self.fail(\"Docs in source bucket is not 0 after maxttl has elapsed\")\n self._verify_bucket_count_with_index_count()", "def test_cli_bucket_maxttl_setting(self):\n self.rest.force_eject_node()\n\n shell = RemoteMachineShellConnection(self.master)\n if self.input.param('enable_ipv6', False):\n self.reset_and_enable_ipv6(self.master)\n set_index_storage_type = \" --index-storage-setting=memopt \"\n options = ' --cluster-port=8091 \\\n --cluster-ramsize=300 \\\n --cluster-index-ramsize=300 \\\n --services=data,index,query %s ' \\\n % set_index_storage_type\n o, e = shell.execute_couchbase_cli(cli_command=\"cluster-init\",\n options=options)\n self.assertEqual(o[0], 'SUCCESS: Cluster initialized')\n\n self.log.info(\"Add new user after reset node! \")\n self.add_built_in_server_user(node=self.master)\n bucket_type = self.input.param(\"bucket_type\", \"couchbase\")\n options = ' --bucket=default \\\n --bucket-type={0} \\\n --bucket-ramsize=200 \\\n --max-ttl=400 \\\n --wait '.format(bucket_type)\n o, e = shell.execute_couchbase_cli(cli_command=\"bucket-create\",\n options=options)\n self.assertEqual(o[0], 'SUCCESS: Bucket created')\n\n self.sleep(30, \"Sleep before loading doc using cbdocloader\")\n\n cluster_flag = \"-c\"\n bucket_quota_flag = \"-m\"\n data_set_location_flag = \"-d\"\n shell.execute_command(\n \"{0}cbdocloader -u Administrator -p password \"\n \"{3} {1} -b default {4} 100 {5} {2}travel-sample.zip\"\n .format(self.bin_path, self.master.ip, self.sample_path,\n cluster_flag, bucket_quota_flag,\n data_set_location_flag))\n shell.disconnect()\n\n buckets = RestConnection(self.master).get_buckets()\n for bucket in buckets:\n if bucket.name != \"default\":\n self.fail(\"default bucket did not get created\")\n\n \"\"\" check for load data into travel-sample bucket \"\"\"\n end_time = time.time() + 120\n num_actual = 0\n while time.time() < end_time:\n self.sleep(10)\n num_actual = self.get_item_count(self.master, \"default\")\n if int(num_actual) == self.total_items_travel_sample:\n break\n self.assertTrue(int(num_actual) == self.total_items_travel_sample,\n \"Items number expected %s, actual %s\"\n % (self.total_items_travel_sample, num_actual))\n self.log.info(\"Total items %s \" % num_actual)\n self.sleep(400, \"Waiting for docs to expire as per maxttl\")\n self.expire_pager([self.master])\n self.sleep(20, \"Wait for expiry_purger to run\")\n num_actual = self.get_item_count(self.master, \"default\")\n if int(num_actual) != 0:\n self.fail(\"Item count is not 0 after maxttl has elapsed\")\n else:\n self.log.info(\"SUCCESS: Item count is 0 after maxttl has elapsed\")", "def test_max_items(self):\r\n timeline = Timeline(connection=self.c1, bucket=self.bucket, max_items=3)\r\n now = datetime.utcnow()\r\n\r\n timeline.add(self.key, 1, now)\r\n timeline.add(self.key, 2, now)\r\n timeline.add(self.key, 3, now)\r\n self.assertEqual(len(timeline.get(self.key)), 3)\r\n\r\n timeline.add(self.key, 4, now)\r\n self.assertEqual(len(timeline.get(self.key)), 3)", "def __init__(self, bucket_size, bucket_fill_rate, current_time=None):\n self.__bucket_contents = bucket_size\n self.__bucket_size = bucket_size\n self.__bucket_fill_rate = bucket_fill_rate\n\n if current_time is None:\n current_time = time.time()\n\n self.__last_bucket_fill_time = current_time", "def test01StoreExpiration(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 100):\n keys.append(s.Put(i, i))\n\n # This should not raise\n s.Get(keys[-1])\n\n # This should raise though\n self.assertRaises(KeyError, s.Get, keys[0])", "def get_object_retention(Bucket=None, Key=None, VersionId=None, RequestPayer=None):\n pass", "def test_many_expired_keys(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n for i in range(20):\n self.storage.set(i, i, moe=self.now + 1)\n self.now += 2\n self.gc.expire_random()\n for i in range(20):\n self.assertRaises(StorageKeyError, self.storage.get, i)", "def test_maxttl_possible_values(self):\n # default\n rest = RestConnection(self.master)\n default_maxttl = rest.get_bucket_maxTTL()\n if default_maxttl != 0:\n self.fail(\"FAIL: default maxTTL if left unset must be 0 but is {0}\".format(default_maxttl))\n self.log.info(\"Verified: default maxTTL if left unset is {0}\".format(default_maxttl))\n\n # max value\n try:\n self._update_bucket_maxTTL(maxttl=2147483648)\n except Exception as e:\n self.log.info(\"Expected exception : {0}\".format(e))\n try:\n self._update_bucket_maxTTL(maxttl=2147483647)\n except Exception as e:\n self.fail(\"Unable to set maxTTL=2147483647, the max permitted value\")\n else:\n self.log.info(\"Verified: Max value permitted is 2147483647\")\n else:\n self.fail(\"Able to set maxTTL greater than 2147483647\")\n\n # min value\n try:\n self._update_bucket_maxTTL(maxttl=0)\n except Exception as e:\n self.fail(\"Unable to set maxTTL=0, the min permitted value\")\n else:\n self.log.info(\"Verified: Min value permitted is 0\")\n\n # negative value\n try:\n self._update_bucket_maxTTL(maxttl=-60)\n except Exception as e:\n self.log.info(\"Verified: negative values not permitted, exception : {0}\".format(e))\n else:\n self.fail(\"FAIL: Able to set a negative maxTTL\")\n\n # date/string\n try:\n self._update_bucket_maxTTL(maxttl=\"12/23/2016\")\n except Exception as e:\n self.log.info(\"Verified: string not permitted, exception : {0}\".format(e))\n else:\n self.fail(\"FAIL: Able to set a date string maxTTL\")", "def test_get_ttl(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'}}\n moes = {'1': time.time() + 5, '4': time.time() + 10}\n for key in keys_to_set.keys():\n storage.set(key, keys_to_set[key], moes.get(key))\n # test at moment t\n self.assertEqual(keys_to_set['1'], storage.get('1'), \"Key '1' should still exist.\")\n # test at moment t+6, one key should expire\n self.now += 6\n keys_to_set.pop('1')\n moes.pop('1')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertEqual(keys_to_set['4'], storage.get('4'), \"Key '4' should still exist.\")\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")\n # test at moment t+11\n self.now += 5\n keys_to_set.pop('4')\n moes.pop('4')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertRaises(StorageKeyError, storage.get, '4')\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")", "def test_evict_expired(self):\n\n # use an invasive technique so that we don't have to sleep for\n # the item to expire\n\n bc = TimedCache(keep_time=1)\n\n bc[\"test\"] = \"value\"\n bc[\"test2\"] = \"value2\"\n self.assertEqual(len(bc), 2)\n\n # test that expired item i\n bc.cache[\"test\"].timestamp = bc.cache[\"test\"].timestamp - 2\n bc.purge_expired()\n self.assertEqual(len(bc), 1)\n self.assertFalse(\"test\" in bc)\n self.assertTrue(\"test2\" in bc)", "def __init__(self):\n self.size = 1000\n self.bucket = [None] * self.size", "def expiry(self):\n return time() + self.ttl * (0.95 + 0.1 * random())", "def listget(base_url, keys, throttle, generic_rate, max_lookback, tmpdir, repo_configs, error_rate, get_rate):\n tname = threading.current_thread().name\n app.logger.debug(\"Thread:{x} - Initialise List/Get; base_url:{a}, throttle:{b}, generic_rate:{c}, max_lookback:{d}, tmpdir:{g}, error_rate:{h}, get_rate:{i}\".format(x=tname, a=base_url, b=throttle, c=generic_rate, d=max_lookback, g=tmpdir, h=error_rate, i=get_rate))\n\n genopts = [\"generic\", \"specific\"]\n genprobs = [generic_rate, 1 - generic_rate]\n\n getopts = [\"get\", \"leave\"]\n getprobs = [get_rate, 1 - get_rate]\n\n erropts = [\"err\", \"ok\"]\n errprobs = [error_rate, 1 - error_rate]\n\n errtypes = [\"page\", \"page_size\", \"missing_since\", \"malformed_since\"]\n errtypeprobs = [0.25] * 4\n\n while True:\n try:\n api_key = _select_from(keys)\n j = client.JPER(api_key, base_url)\n #print \"API \" + api_key\n\n # determine whether the metadata we're going to send will cause errors\n reqtype = _select_from(genopts, genprobs)\n #print \"Req: \" + reqtype\n\n # use this to determine the repository id for the request\n repository_id = None\n if reqtype == \"specific\":\n config = _select_from(repo_configs)\n repository_id = config.get(\"repository\")\n\n # determine the \"since\" date we're going to use for the request\n lookback = randint(0, max_lookback)\n since = dates.format(dates.before_now(lookback))\n # print \"Since: \" + since\n\n # choose a page size\n page_size = randint(1, 100)\n\n # now decide, after all that, if we're going to send a malformed request\n err = _select_from(erropts, errprobs)\n\n # if we are to make an erroneous request, go ahead and do it\n if err == \"err\":\n # choose a kind of malformed request\n malformed = _select_from(errtypes, errtypeprobs)\n params = {\"page\" : 1, \"pageSize\" : page_size, \"since\" : since}\n if malformed == \"page\":\n params[\"page\"] = \"one\"\n elif malformed == \"page_size\":\n params[\"pageSize\"] = \"twelvty\"\n elif malformed == \"missing_since\":\n del params[\"since\"]\n else:\n params[\"since\"] = \"a week last thursday\"\n\n # make the malformed url with the JPER client, so we know it gets there ok\n url = j._url(\"routed\", id=repository_id, params=params)\n app.logger.debug(\"Thread:{x} - List/Get sending malformed request for Account:{y} Type:{z} Error:{a} URL:{b}\".format(x=tname, y=api_key, z=reqtype, a=malformed, b=url))\n\n # make the request, and check the response\n resp = http.get(url)\n if resp is not None and resp.status_code == 400:\n app.logger.debug(\"Thread:{x} - List/Get received correct 400 response to malformed request\".format(x=tname))\n else:\n if resp is None:\n sc = None\n else:\n sc = resp.status_code\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; did not receive 400 response to malformed request, got {y}; URL:{z}\".format(x=tname, y=sc, z=url))\n\n # continue, so that we don't have to indent the code below any further\n continue\n\n # if we get to here, we're going to go ahead and do a normal request\n app.logger.debug(\"Thread:{x} - List/Get request for Account:{y} Type:{z} Since:{a}\".format(x=tname, y=api_key, z=reqtype, a=since))\n\n # iterate over the notifications, catching any errors (which would be unexpected)\n try:\n count = 0\n for note in j.iterate_notifications(since, repository_id, page_size):\n app.logger.debug(\"Thread:{x} - List/Get request for Account:{y} listing notifications for Repository:{z} retrieved Notification:{a}\".format(x=tname, y=api_key, z=repository_id, a=note.id))\n count += 1\n\n # determine if we're going to get the notification by itself (which is technically unnecessary, of course, but who knows what people's workflows will be)\n reget = _select_from(getopts, getprobs)\n if reget == \"get\":\n try:\n n = j.get_notification(note.id)\n app.logger.debug(\"Thread:{x} - Following List/Get for Account:{y} listing notifications for Repository:{z}, successfully retrieved copy of Notification:{a}\".format(x=tname, y=api_key, z=repository_id, a=note.id))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get notification failed for Notification:{y} that should have existed. This needs a fix: '{b}'\".format(x=tname, y=note.id, b=e.message))\n\n # now retrieve all the links in the note\n for link in note.links:\n url = link.get(\"url\")\n app.logger.debug(\"Thread:{x} - Following List/Get for Account:{y} on Repository:{b}, from Notification:{z} requesting copy of Content:{a}\".format(x=tname, y=api_key, z=note.id, a=url, b=repository_id))\n try:\n stream, headers = j.get_content(url)\n except client.JPERAuthException as e:\n # we got a 401 back from the service, that is acceptable, since we may not be authorised to access it\n app.logger.debug((\"Thread:{x} - get content unauthorised (401) for Content:{z} - this can happen, so is not necessarily unexpected\".format(x=tname, z=url)))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get content failed for Content:{z} that should have existed. This needs a fix: '{b}'\".format(x=tname, z=url, b=e.message))\n\n app.logger.debug(\"Thread:{x} - List/Get request completed successfully for Account:{y} listing notifications for Repository:{z} Count:{a}\".format(x=tname, y=api_key, z=repository_id, a=count))\n\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; List/Get request for Account:{y} listing notifications for Repository:{z} resulted in exception '{e}'\".format(x=tname, y=api_key, z=repository_id, e=e.message))\n\n # sleep before making the next request\n time.sleep(throttle)\n except Exception as e:\n app.logger.error(\"Thread:{x} - Fatal exception '{y}'\".format(x=tname, y=e.message))", "def expire(ttl):\n print(\"[+] Staring expiration of old endpoints.\")\n\n try:\n now = arrow.utcnow()\n expiration = now - timedelta(hours=ttl)\n endpoints = database.session_query(Endpoint).filter(\n cast(Endpoint.last_updated, ArrowType) <= expiration\n )\n\n for endpoint in endpoints:\n print(\n \"[!] Expiring endpoint: {name} Last Updated: {last_updated}\".format(\n name=endpoint.name, last_updated=endpoint.last_updated\n )\n )\n database.delete(endpoint)\n metrics.send(\"endpoint_expired\", \"counter\", 1)\n\n print(\"[+] Finished expiration.\")\n except Exception as e:\n sentry.captureException()", "def create_thumbnails():\n bucket = BASE_BUCKET + ARG.MANIFOLD\n result = S3_CLIENT.list_objects(Bucket=bucket, Prefix=PREFIX + \"/\", Delimiter=\"/\")\n lev1 = result.get('CommonPrefixes')\n for lev1pre in tqdm(lev1, desc=\"Prefixes\"):\n bpre = lev1pre.get('Prefix').split(\"/\")[-2]\n COUNT[\"Prefixes\"] += 1\n #result2 = S3_CLIENT.list_objects(Bucket=bucket, Prefix=\"/\".join([PREFIX, bpre]) + \"/\",\n # Delimiter=\"/\")\n paginator = S3_CLIENT.get_paginator(\"list_objects\")\n pages = paginator.paginate(Bucket=bucket, Prefix=\"/\".join([PREFIX, bpre]) + \"/\",\n Delimiter=\"/\")\n for page in pages:\n COUNT[\"Pages\"] += 1\n lev2 = page.get('CommonPrefixes')\n for lev2pre in lev2:\n body = lev2pre.get('Prefix').split(\"/\")[-2]\n COUNT[\"Body IDs\"] += 1\n if ARG.WRITE:\n invoke_lambda(bucket, body)\n else:\n LOGGER.debug(\"/\".join([bucket, bpre, body]))\n print(COUNT)", "def put_object_retention(Bucket=None, Key=None, Retention=None, RequestPayer=None, VersionId=None, BypassGovernanceRetention=None, ContentMD5=None):\n pass", "def limit_for(self, expiration=10, **kwargs):\n key = self._get_key(**kwargs)\n self.redis_conn.set(key, 1)\n self.redis_conn.expire(key, expiration)", "async def incr(req):\n key, ttl, err = validate_params(req)\n if err is not None:\n return err\n\n counter = incr_with_ttl(key, ttl)\n return web.json_response(data={'status': 'success', 'counter': counter})", "def __init__(__self__, *,\n bucket: str,\n kind: str,\n retention_interval: str,\n upload_interval: str):\n pulumi.set(__self__, \"bucket\", bucket)\n pulumi.set(__self__, \"kind\", kind)\n pulumi.set(__self__, \"retention_interval\", retention_interval)\n pulumi.set(__self__, \"upload_interval\", upload_interval)", "def _put_retry(self, s3_bucket, s3_filename, local_filename, max_retries=3, policy=None):\n b = self.conn.get_bucket(s3_bucket)\n retries = 0\n while retries < max_retries:\n try:\n s3_key = b.new_key(s3_filename)\n s3_key.set_contents_from_filename(local_filename, policy=policy)\n except:\n logger.info('File transfer error: ' + s3_filename, exc_info=True)\n retries = retries + 1\n if retries == max_retries:\n raise\n time.sleep(retries)\n else:\n logger.info('Archived %s to %s/%s', local_filename, s3_bucket, s3_filename)\n return os.path.getsize(local_filename)", "def large_upload_collection(upload_items: List[JSONDict]) -> UploadCollection:\n items = []\n\n item = upload_items[0]\n for i in range(3050):\n copy = item.copy()\n copy[\"guid\"] = copy[\"guid\"].replace(\"post1\", f\"post{i}\")\n items.append(copy)\n\n collection = UploadCollection(items=items)\n return collection", "def __init__(self):\n self.m = 1000\n self.bucket = [None] * 1000", "def __init__(self, bucket):\n self.bucket = bucket", "def set_ttl(self, ttl):", "def do_rate_limited_ops(\n handle, num_seconds, do_writes, limit, max_rows, min_size, max_size):\n put_request = PutRequest().set_table_name(table_name)\n get_request = GetRequest().set_table_name(table_name)\n #\n # Generate a string of max_size with all \"x\"s in it\n #\n user_data = ''\n if do_writes:\n for x in range(max_size):\n user_data += 'x'\n\n start_time = int(round(time() * 1000))\n end_time = start_time + num_seconds * 1000\n\n print('Running continuous ' + ('writes' if do_writes else 'reads') +\n ' for ' + str(num_seconds) + ' seconds.')\n #\n # Keep track of how many units we used\n #\n units_used = 0\n #\n # With rate limiting enabled, we can find the amount of time our operation\n # was delayed due to rate limiting by getting the value from the result\n # using Result.get_rate_limit_delayed_ms().\n #\n delay_ms = 0\n\n key = dict()\n value = dict()\n while True:\n fld_id = int(random() * max_rows)\n try:\n if do_writes:\n value['id'] = fld_id\n value['sid'] = fld_id\n rec_size = int(random() * (max_size - min_size))\n rec_size += min_size\n value['name'] = user_data[:rec_size]\n put_request.set_value(value)\n put_result = handle.put(put_request)\n units_used += put_result.get_write_units()\n delay_ms += put_result.get_rate_limit_delayed_ms()\n else:\n key['id'] = fld_id\n key['sid'] = fld_id\n get_request.set_key(key)\n get_result = handle.get(get_request)\n units_used += get_result.get_read_units()\n delay_ms += get_result.get_rate_limit_delayed_ms()\n except WriteThrottlingException as wte:\n # We should not get WriteThrottlingException exception\n print('Got unexpected write throttling exception')\n raise wte\n except ReadThrottlingException as rte:\n # We should not get ReadThrottlingException exception\n print('Got unexpected read throttling exception')\n raise rte\n if int(round(time() * 1000)) >= end_time:\n break\n num_seconds = (int(round(time() * 1000)) - start_time) // 1000\n units_used /= num_seconds\n\n if units_used < int(limit * 0.8) or units_used > int(limit * 1.2):\n if do_writes:\n msg = ('Writes: expected around ' + str(limit) + ' WUs, got ' +\n str(units_used))\n else:\n msg = ('Reads: expected around ' + str(limit) + ' RUs, got ' +\n str(units_used))\n raise RuntimeError(msg)\n\n print(('Writes' if do_writes else 'Reads') + ': average usage = ' +\n str(units_used) + ('WUs' if do_writes else 'RUs') +\n ' (expected around ' + str(limit))\n\n print('Total rate limiter delay time = ' + str(delay_ms) + 'ms')", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def load_to_dgm(self, active=75, ttl=0):\n doc_size = 1024\n curr_active = self.stat('vb_active_perc_mem_resident')\n\n # go into heavy dgm\n while curr_active > active:\n curr_items = self.stat('curr_items')\n gen_create = BlobGenerator('dgmkv', 'dgmkv-', doc_size, start=curr_items + 1, end=curr_items + 50000)\n try:\n self._load_all_buckets(self.master, gen_create, \"create\", ttl)\n except:\n pass\n curr_active = self.stat('vb_active_perc_mem_resident')", "def test_long_timeout(self):\n self.cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second\n self.assertEqual(self.cache.get('key1'), 'eggs')\n\n self.cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)\n self.assertEqual(self.cache.get('key2'), 'ham')\n\n self.cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)\n self.assertEqual(self.cache.get('key3'), 'sausage')\n self.assertEqual(self.cache.get('key4'), 'lobster bisque')", "def test_check_existing_enqueues_tasks(self):\n collection = handlers_endpoints_v1.DigestCollection(\n namespace=handlers_endpoints_v1.Namespace())\n collection.items.append(\n generate_digest(collection.namespace.namespace, 'some content'))\n key = model.get_entry_key(\n collection.namespace.namespace, collection.items[0].digest)\n\n # guarantee that one digest already exists in the datastore\n model.new_content_entry(key).put()\n self.call_api('preupload', self.message_to_dict(collection), 200)\n\n # find enqueued tasks\n self.assertEqual(1, self.execute_tasks())", "def test_list_objects():\n x = 0\n for obj in qmk_storage.list_objects():\n assert 'Key' in obj\n assert type(obj.get('LastModified')) == datetime.datetime\n\n if x > 5:\n break\n x += 1", "def _check_expire(self):\n self._log.debug(\"Checking entry expiration...\")\n current_time = time.time()\n for key in self._obj_cache.keys():\n self._log.debug(' -> %s (type = %s)',\n key, type(self._obj_cache[key]))\n # Remove if the key has a timeout, and the timeout period has been\n # exceeded (last access + timeout period <= current_time).\n if self._obj_timeouts[key] > 0 \\\n and current_time >= (self._obj_last_access[key]\n + self._obj_timeouts[key]):\n self._log.debug(' EXPIRED -- removing')\n # delete\n del self._obj_cache[key]\n del self._obj_last_access[key]\n del self._obj_timeouts[key]", "def _init():\n cache_file = _get_buckets_cache_filename()\n exp = time.time() - S3_CACHE_EXPIRE\n\n # check mtime of the buckets files cache\n metadata = None\n try:\n if os.path.getmtime(cache_file) > exp:\n metadata = _read_buckets_cache_file(cache_file)\n except OSError:\n pass\n\n if metadata is None:\n # bucket files cache expired or does not exist\n metadata = _refresh_buckets_cache_file(cache_file)\n\n return metadata", "def get_ttl(self, keyword, key):", "def test_change_default_throttling_settings_http_with_overwrite_throttled_burst_above_account_quota():", "def check_expiration(self, cur_time):\n\n\t\ttime_limit = 1000\n\t\ttime_elapsed = cur_time - self.time_created\n\n\t\t# Erase cache after an arbitrary amount of time\n\t\tif time_elapsed > time_limit:\n\t\t\tself.cache_expiration()", "def test_keys_ttl(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n storage = Storage()\n storage.set('1', 'one', self.now + 5)\n storage.set('2', 'two')\n storage.set('3', 'three', self.now + 10)\n self.now += 6\n self.assertEqual(['2','3'], storage.keys('*'))\n self.assertEqual(['2','3'], list(storage._keys_dict.keys()))", "def post_bucketlist():\n pass", "def __init__(self):\n self.bucket = 1000\n self.bucketItem = 1000\n \n self.hashset = [None] * self.bucket", "def test_client_max_age_3600(self, sess):\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n # request that we don't want a new one unless\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=3600'})\r\n assert r.from_cache is True\r\n\r\n # now lets grab one that forces a new request b/c the cache\r\n # has expired. To do that we'll inject a new time value.\r\n resp = self.cache.get(self.url)\r\n resp.headers['date'] = 'Tue, 15 Nov 1994 08:12:31 GMT'\r\n r = sess.get(self.url)\r\n assert not r.from_cache", "def main(transcribe_bucket_name, mp3_bucket_name):\n\n s3 = boto3.resource('s3')\n for bucket in s3.buckets.all():\n if bucket.name == transcribe_bucket_name:\n for key in bucket.objects.all():\n if key.key.endswith('.json'):\n r = {}\n # Get reference number\n reference = basename(key.key).replace('.json', '')\n r['ref'] = reference\n # Get URL\n location = boto3.client('s3') \\\n .get_bucket_location(\n Bucket=mp3_bucket_name)['LocationConstraint']\n base_url = join('https://s3-%s.amazonaws.com' % location,\n mp3_bucket_name)\n url = join(base_url, key.key.replace('.json', '.mp3'))\n r['url'] = url\n # Download json file\n try:\n s3.Bucket(transcribe_bucket_name) \\\n .download_file(key.key, key.key)\n except Exception as exception:\n return 1\n # Get text\n with open(key.key, 'r') as f:\n data = json.load(f)\n text = data['results']['transcripts'][0]['transcript']\n r['text'] = text\n # Get sentiment\n sentiment = get_sentiment(text)\n r['sentiment'] = sentiment\n # Check promotion\n promo = check_promo(text)\n r['promo'] = promo\n # Save to Gooogle Sheets\n values = [r['ref'], r['text'], r['promo'], r['sentiment'],\n r['url']]\n append_row(values)\n # Remove tmp json file from local machine\n remove(key.key)", "def test_change_default_throttling_settings_http_with_overwrite_throttled_burst_above_50():", "def test_client_max_age_0(self, sess):\r\n print('first request')\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n print('second request')\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=0'})\r\n\r\n # don't remove from the cache\r\n assert self.cache.get(self.url)\r\n assert not r.from_cache", "def test_ttl(self):\n session = self.prepare()\n session.execute(\"CREATE TABLE t (id int PRIMARY KEY, v int, v2 int, v3 int)\")\n session.execute((\"CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t \"\n \"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)\"))\n\n for i in range(100):\n session.execute(\"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, {v}, {v}) USING TTL 10\".format(v=i))\n\n for i in range(100):\n assert_one(session, \"SELECT * FROM t_by_v2 WHERE v2 = {}\".format(i), [i, i, i, i])\n\n time.sleep(20)\n\n rows = list(session.execute(\"SELECT * FROM t_by_v2\"))\n assert len(rows) == 0, \"Expected 0 rows but got {}\".format(len(rows))", "def expire(event, context):\n # scan the database for expired files\n expiry_at = datetime.utcnow() - runtime_context.NONSTORED_TIMEOUT\n files = FileModel.list_expired(expiry_at)\n # remove all files and all items one-by-one\n for file in files:\n file_id = file['id']['S']\n FileModel.update({\n 'id': file_id,\n 'deleted_at': datetime.utcnow()\n })\n LOGGER.debug('Files item updated (expired). service=ddb method=update_item id={}'.format(file_id))\n S3_CLIENT.delete_object(\n Bucket=runtime_context.BUCKET_NAME,\n Key=file_id\n )\n LOGGER.debug('S3 object deleted. service=s3 method=delete_object id={}'.format(file_id))", "def test_setup_db_for_use_retention_creation(self):\n\n expected_retention = {\n 'name': 'testRetention',\n 'duration': '1h0m0s',\n 'shardGroupDuration': '1h0m0s',\n 'replicaN': 1,\n 'default': True\n }\n assert expected_retention in self.test_client.get_list_retention_policies(\n )", "def testExpirationTime(self):\n\n bye = \"Good bye!\"\n memcache.add('bye', bye, 1)\n assert memcache.get('bye') == bye\n time.sleep(2)\n assert memcache.get('bye') == None", "def quota():\n try:\n fname = os.path.join(os.path.expanduser(\"~\"), \".planet.json\")\n contents = {}\n if os.path.exists(fname):\n with open(fname, \"r\") as fp:\n contents = json.loads(fp.read())\n else:\n raise IOError(\"Escape to End and Initialize\")\n if not len(contents) != 0:\n raise IOError(\"Escape to End and Initialize\")\n else:\n k = contents[\"key\"]\n main = requests.get(\n \"https://api.planet.com/auth/v1/\" + \"experimental/public/my/subscriptions\",\n auth=HTTPBasicAuth(k, \"\"),\n )\n if main.status_code == 200:\n content = main.json()\n for item_id in content:\n print(\" \")\n print(\"Allocation Name: %s\" % item_id[\"organization\"][\"name\"])\n print(\n \"Allocation active from: %s\" % item_id[\"active_from\"].split(\"T\")[0]\n )\n print(\"Quota Enabled: %s\" % item_id[\"quota_enabled\"])\n print(\"Total Quota in SqKm: %s\" % item_id[\"quota_sqkm\"])\n print(\"Total Quota used: %s\" % item_id[\"quota_used\"])\n if (item_id[\"quota_sqkm\"]) is not None:\n leftquota = float(\n item_id[\"quota_sqkm\"] - float(item_id[\"quota_used\"])\n )\n print(\"Remaining Quota in SqKm: %s\" % leftquota)\n else:\n print(\"No Quota Allocated\")\n print(\"\")\n else:\n print(\"Failed with exception code: \" + str(main.status_code))\n\n except IOError:\n print(\"Initialize client or provide API Key\")", "def handler(event, context):\n s3conn = s3.connect_to_region(region, profile_name=profile_name)\n bucket = s3conn.get_bucket(bucket_name)\n\n # Use a map to track keys that are no longer in the feed, used for deletion\n remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}\n\n logger.debug(\"Existing keys in bucket\\n%s\", '\\n'.join(remaining_keys));\n\n for id, json_data in fn_inner():\n key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))\n\n # Key found, delete it from cleanup map\n if key_name in remaining_keys:\n del remaining_keys[key_name]\n\n string_data = json.dumps(json_data)\n s3_object = bucket.get_key(key_name)\n if s3_object == None:\n key = bucket.new_key(key_name);\n key.set_contents_from_string(string_data)\n logger.info('Creating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n else:\n if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):\n logger.info('Updating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n s3_object.set_contents_from_string(string_data)\n else:\n logger.info('Same:\\ts3://%s/%s', bucket_name, key_name);\n logger.debug(string_data)\n\n # Remvoe remaining keys from the bucket to allow for cleanup\n for key in remaining_keys:\n logger.info('Removing:\\ts3://%s/%s', bucket_name, key);\n bucket.delete_key(key);\n\n logger.info('Done');", "def test_request_throttling_expires(self):\n self.set_throttle_timer(MockView, 0)\n\n request = self.factory.get('/')\n for dummy in range(4):\n response = MockView.as_view()(request)\n assert response.status_code == 429\n\n # Advance the timer by one second\n self.set_throttle_timer(MockView, 1)\n\n response = MockView.as_view()(request)\n assert response.status_code == 200", "def testBucketSize(self):\n b = SomeBucket()\n fit = b.add(1000)\n self.assertEqual(100, fit)", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def rest_rate_limit(r):\n\n try:\n #limit = int(r.headers[\"X-Rate-Limit-Limit\"])\n remain = int(r.headers[\"X-Rate-Limit-Remaining\"])\n reset = int(r.headers[\"X-Rate-Limit-Reset\"])\n curtime = times.to_unix(times.parse(r.headers[\"date\"]))\n except KeyError as e:\n # We dont have the proper headers\n log.error(\"Header not found - {}\", e)\n sleep(RETRY_AFTER)\n return\n\n if remain <= RATE_LIMIT_BUFFER:\n log.debug(\"Hit rate limit - {}\", remain)\n log.debug(\"Rate limit reset in {} seconds\", reset - curtime)\n sleep(reset - curtime + RESET_BUFFER)", "def __init__(self):\n self.bucket_length = 997\n self.bucket_array = [Bucket() for i in range(self.bucket_length)]", "def test_purge(h3):\n\n assert h3.list_buckets() == []\n\n assert h3.create_bucket('b1') == True\n\n assert h3.list_objects('b1') == []\n\n with open('/dev/urandom', 'rb') as f:\n data = f.read(3 * MEGABYTE)\n\n h3.create_object('b1', 'o1', data)\n h3.create_object('b1', 'o2', data)\n h3.create_object('b1', 'o3', data)\n\n assert set(h3.list_objects('b1')) == set(['o1', 'o2', 'o3'])\n\n assert h3.purge_bucket('b1') == True\n\n assert h3.list_objects('b1') == []\n\n assert h3.delete_bucket('b1') == True", "def checkSold(auto=False):\n\n #Create connection \n db = pymysql.connect(host=\"localhost\", user=\"testUser\", passwd=\"BorrisBulletDodger\", db=\"scraperdb\", charset='utf8')\n cursor = db.cursor()\n\n #SQL Query\n sql = \"SELECT url FROM motorcycles WHERE adExpiry IS NULL\"\n\n #Find data\n try: \n cursor.execute(sql)\n sqlResult = cursor.fetchall()\n urls = [i[0] for i in sqlResult]\n db.commit()\n except Exception as e:\n db.rollback()\n print(f\"Exception occured: {e}\")\n\n #User input to proceed if not auto\n while not auto:\n cont = input(f\"{len(urls)} stored listings found - Do you wish to check if sold?: \")\n if cont.lower() == 'y' or cont.lower() == 'yes':\n break\n elif cont.lower() == 'n' or cont.lower() == 'no':\n return\n else:\n print(\"Please enter y/n\")\n continue\n \n #Use threading to check if urls have expired\n maxThreads = 5\n urlsQ = Queue(maxsize=0)\n #Set number of threads\n numThreads = min(maxThreads, len(urls))\n #Create lock\n lock = Lock()\n #Create progress bar\n pbar = tqdm(total=len(urls))\n \n #Expired test\n def checkExpiredThread(q, results, db, cursor):\n \"\"\"\n Checks whether input url has expired\n Input: [\"url\"], {} - Keys=urls, vals=False\n \"\"\"\n\n while not q.empty():\n url = q.get()\n logger.debug(f\"{url} started - Tasks left: {q.unfinished_tasks}\")\n pbar.update(1)\n expired = None\n\n #Check if expired\n _, expired = getPage(url)\n results[url] = expired\n\n #Insert result into db\n if expired:\n logger.debug(f\"expired url: {url}\")\n #Record todays date\n curTime = datetime.now().strftime(\"%Y-%m-%d\")\n #Prepare sql string\n sql = \"\"\"UPDATE motorcycles\n SET adExpiry=%s\n WHERE url=%s\"\"\"\n #Get Lock - Prevent multiple db inserts simulataneously\n logger.debug(f\"{url} wants the lock\")\n with lock:\n logger.debug(f\"{url} has the lock\")\n try:\n cursor.execute(sql, (curTime, url))\n db.commit()\n except Exception as e:\n db.rollback()\n print(\"Exception occured: {}\".format(e))\n logger.debug(f\"{url} is finished with the lock\")\n\n q.task_done()\n logger.debug(f\"{url} finished\")\n\n\n #Load queue with urls, results dict keys = urls, vals = False - Ad default not expired\n results = {}\n for url in urls:\n urlsQ.put(url)\n results[url] = False\n\n #Create threads that execute checkExpiredThread function, updates data\n for _ in range(numThreads):\n worker = Thread(target=checkExpiredThread, args=(urlsQ, results, db, cursor))\n worker.setDaemon(True)\n worker.start()\n #Wait until the queue has been processed - All URLs checked\n urlsQ.join()\n pbar.close()\n\n #Remember to close database at the end \n db.close()\n \n #Count number of expired urls\n count = sum(1 for value in results.values() if value)\n logger.info(f\"{count}/{len(urls)} tracked listings have been sold since last processed\")\n print(f\"{count}/{len(urls)} tracked listings have been sold since last processed\")", "def test_change_default_throttling_settings_http_with_overwrite_throttled_rate_above_account_quota():", "def manipulate_bucketlist():\n pass", "def time_limit(self, limit=None):\n if limit is None:\n done, data = self._request('G3')\n if done:\n return int(data[0])*15\n else:\n limit = int(round(limit/15.0))\n if self._request('S3', str(limit))[0]:\n return limit\n\n raise EvseError", "def test_update_bucket(self):\n pass", "def create(base_url, keys, throttle, mdrate, mderrors, cterrors, max_file_size, tmpdir, retrieve_rate, routable, repo_configs):\n tname = threading.current_thread().name\n app.logger.debug(\"Thread:{x} - Initialise Create; base_url:{a}, throttle:{b}, mdrate:{c}, mderrors:{d}, cterrors:{e}, max_file_size:{f}, tmpdir:{g}, retrieve_rate:{h}, routable:{i}\".format(x=tname, a=base_url, b=throttle, c=mdrate, d=mderrors, e=cterrors, f=max_file_size, g=tmpdir, h=retrieve_rate, i=routable))\n\n mdopts = [\"mdonly\", \"md+ct\"]\n mdprobs = [mdrate, 1 - mdrate]\n\n mderroropts = [\"error\", \"ok\"]\n mderrorprobs = [mderrors, 1 - mderrors]\n\n cterroropts = [\"error\", \"ok\"]\n cterrorprobs = [cterrors, 1 - cterrors]\n\n retrieveopts = [\"get\", \"not\"]\n retrieveprobs = [retrieve_rate, 1 - retrieve_rate]\n\n while True:\n try:\n api_key = _select_from(keys)\n j = client.JPER(api_key, base_url)\n #print \"API \" + api_key\n\n # determine whether the metadata we're going to send will cause errors\n mdtype = _select_from(mderroropts, mderrorprobs)\n #print \"MD: \" + mdtype\n\n # generate a notification which may or may not have an error\n note = _make_notification(error=mdtype==\"error\", routable=routable, repo_configs=repo_configs)\n #print note\n\n # determine whether we're going to send some content\n hasct = _select_from(mdopts, mdprobs)\n #print \"CT: \" + hasct\n file_handle = None\n filepath = None\n cterr = \"ok\"\n if hasct == \"md+ct\":\n # determine if the content should have an error\n cterr = _select_from(cterroropts, cterrorprobs)\n #print \"CTERR:\" + cterr\n filepath = _get_file_path(tmpdir, max_file_size, error=cterr==\"error\")\n #print \"File\" + filepath\n file_handle = open(filepath)\n\n app.logger.debug(\"Thread:{x} - Create request for Account:{y} Type:{z} MD:{a} CT:{b}\".format(x=tname, y=api_key, z=hasct, a=mdtype, b=cterr))\n\n # make the create request, which may occasionally throw errors\n id = None\n try:\n id, loc = j.create_notification(note, file_handle)\n app.logger.debug(\"Thread:{x} - Create request for Account:{z} resulted in success, Notification:{y}\".format(x=tname, y=id, z=api_key))\n except:\n app.logger.error(\"Thread:{x} - Create request for Account:{y} resulted in expected exception\".format(x=tname, y=api_key))\n\n # cleanup after ourselves\n if filepath is not None:\n file_handle.close()\n os.remove(filepath)\n\n # now there's a chance that we might want to check our notification has been created correctly, so we might\n # retrieve it\n if id is not None:\n ret = _select_from(retrieveopts, retrieveprobs)\n if ret == \"get\":\n # time.sleep(2) # this gives JPER a chance to catch up\n app.logger.debug(\"Thread:{x} - Following Create for Account:{y}, requesting copy of Notification:{z}\".format(x=tname, y=api_key, z=id))\n try:\n n = j.get_notification(id)\n app.logger.debug(\"Thread:{x} - Following Create for Account:{y}, successfully retrieved copy of Notification:{z}\".format(x=tname, y=api_key, z=id))\n for link in n.links:\n if link.get(\"packaging\") is not None:\n url = link.get(\"url\")\n app.logger.debug(\"Thread:{x} - Following Create for Account:{y}, from Notification:{z} requesting copy of Content:{a}\".format(x=tname, y=api_key, z=id, a=url))\n try:\n stream, headers = j.get_content(url)\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get content failed for Content:{z} that should have existed. This needs a fix: '{b}'\".format(x=tname, z=url, b=e.message))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get notification failed for Notification:{y} that should have existed. This needs a fix: '{b}'\".format(x=tname, y=id, b=e.message))\n\n # sleep before making the next request\n time.sleep(throttle)\n except Exception as e:\n app.logger.error(\"Thread:{x} - Fatal exception '{y}'\".format(x=tname, y=e.message))", "def test03Expire(self):\n s = utils.FastStore(max_size=100)\n key = \"test1\"\n s.Put(key, 1)\n\n # This should not raise\n self.assertEqual(s.Get(key), 1)\n s.ExpireObject(key)\n\n self.assertRaises(KeyError, s.Get, key)", "def __cleanup(self, ttl_in_sec):\n ttl_in_ms = ttl_in_sec * 1000\n while True:\n logging.debug(\"cleanup action...\")\n current_ts = self.__current_timestamp_in_ms()\n self.lock.acquire()\n for key, value in self.orderedDict.items():\n if value[1] > current_ts - ttl_in_ms:\n break\n else:\n self.orderedDict.pop(key, None)\n self.lock.release()\n time.sleep(ttl_in_sec)", "def __init__(self, ttl=604800):\n self.data = OrderedDict()\n self.ttl = ttl", "def __init__(self, ttl=604800):\n self.data = OrderedDict()\n self.ttl = ttl", "def cache_limit(self):\n return 20", "async def throttle_pubs(self):\n while True:\n ch, msg = await self._pub_throttle.get() # Blocks until we get an item\n resp = await self._publish_to_channel(ch, msg)\n print(resp)\n print(\"[ *] Published to channel {ch} message: \\n{msg}\\n\".format(ch=ch, msg=msg))\n if resp:\n await asyncio.sleep(self.pub_rate)", "def update_bucketlist():\n pass", "def __init__(self, data, expires_in):\n self.data = data\n self.expires_in = expires_in\n self.expires_after = time.time() + expires_in", "def upload(filename, bucket):\n k = Key(bucket)\n k.key = uuid.uuid1().hex\n print \"Uploading batch to {}, key: {}...\".format(bucket.name, k.key)\n k.set_contents_from_filename(filename, reduced_redundancy=True)\n print \" Done.\"\n \n\n\n bucket = openBucket(dest)", "def test_len_during_iteration(self):\n\n class Data(Document):\n pass\n\n for i in range(300):\n Data().save()\n\n records = Data.objects.limit(250)\n\n # This should pull all 250 docs from mongo and populate the result\n # cache\n len(records)\n\n # Assert that iterating over documents in the qs touches every\n # document even if we call len(qs) midway through the iteration.\n for i, r in enumerate(records):\n if i == 58:\n len(records)\n assert i == 249\n\n # Assert the same behavior is true even if we didn't pre-populate the\n # result cache.\n records = Data.objects.limit(250)\n for i, r in enumerate(records):\n if i == 58:\n len(records)\n assert i == 249", "def test_put(self):\n cache = LRUCache(5)\n assert 0 == cache.size\n cache.put(1, 'aaa')\n assert 1 == cache.size", "def test_transform_and_load_storage_buckets(neo4j_session):\n bucket_res = tests.data.gcp.storage.STORAGE_RESPONSE\n bucket_list = cartography.intel.gcp.storage.transform_gcp_buckets(bucket_res)\n cartography.intel.gcp.storage.load_gcp_buckets(neo4j_session, bucket_list, TEST_UPDATE_TAG)\n\n query = \"\"\"\n MATCH(bucket:GCPBucket{id:{BucketId}})\n RETURN bucket.id, bucket.project_number, bucket.kind\n \"\"\"\n expected_id = 'bucket_name'\n expected_project_num = 9999\n expected_kind = 'storage#bucket'\n nodes = neo4j_session.run(\n query,\n BucketId=expected_id,\n )\n actual_nodes = {(n['bucket.id'], n['bucket.project_number'], n['bucket.kind']) for n in nodes}\n expected_nodes = {\n (expected_id, expected_project_num, expected_kind),\n }\n assert actual_nodes == expected_nodes", "def ttl_from_expiration(expires):\n now = int(calendar.timegm(datetime.utcnow().utctimetuple()))\n expires = expires_to_timestamp(expires)\n return expires - now", "def setup_buckets():\n s3 = boto.connect_s3()\n s3.create_bucket('mls_data.mls.angerilli.ca')", "def test_cbbackupmgr_restore_with_ttl(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This restore with ttl test is only for cb version 5.5 and later. \")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n if self.replace_ttl == \"expired\":\n if self.bk_with_ttl:\n self._load_all_buckets(self.master, gen, \"create\", int(self.bk_with_ttl))\n else:\n self._load_all_buckets(self.master, gen, \"create\", 0)\n else:\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n if self.bk_with_ttl:\n self.sleep(int(self.bk_with_ttl) + 10, \"wait items to be expired in backup\")\n compare_function = \"==\"\n if self.replace_ttl_with:\n compare_function = \"<=\"\n if self.should_fail:\n self.backup_restore()\n else:\n self.backup_restore_validate(compare_uuid=False,\n seqno_compare_function=compare_function)", "def test_write_multi_files_to_bucket(\n self, mcg_obj, awscli_pod, bucket_factory, amount, file_type\n ):\n data_dir = \"/data\"\n if file_type == \"large\":\n public_bucket = PUBLIC_BUCKET\n obj_key = LARGE_FILE_KEY\n elif file_type == \"small\":\n public_bucket = constants.TEST_FILES_BUCKET\n obj_key = \"random1.txt\"\n elif file_type == \"large_small\":\n public_bucket = PUBLIC_BUCKET\n obj_key = LARGE_FILE_KEY.rsplit(\"/\", 1)[0]\n\n # Download the file to pod\n awscli_pod.exec_cmd_on_pod(command=f\"mkdir {data_dir}\")\n public_s3_client = retrieve_anon_s3_resource().meta.client\n download_files = []\n # Use obj_key as prefix to download multiple files for large_small\n # case, it also works with single file\n for obj in public_s3_client.list_objects(\n Bucket=public_bucket, Prefix=obj_key\n ).get(\"Contents\"):\n # Skip the extra file in large file type\n if file_type == \"large\" and obj[\"Key\"] != obj_key:\n continue\n logger.info(f'Downloading {obj[\"Key\"]} from AWS bucket {public_bucket}')\n download_obj_cmd = f'cp s3://{public_bucket}/{obj[\"Key\"]} {data_dir}'\n awscli_pod.exec_cmd_on_pod(\n command=craft_s3_command(download_obj_cmd), out_yaml_format=False\n )\n download_files.append(obj[\"Key\"])\n # Write all downloaded objects to the new bucket\n bucketname = bucket_factory(1)[0].name\n base_path = f\"s3://{bucketname}\"\n for i in range(amount):\n full_object_path = base_path + f\"/{i}/\"\n sync_object_directory(awscli_pod, data_dir, full_object_path, mcg_obj)\n\n obj_list = list(\n obj.key.split(\"/\")[-1]\n for obj in mcg_obj.s3_list_all_objects_in_bucket(bucketname)\n )\n\n # Check total copy files amount match\n if file_type == \"large_small\":\n assert len(obj_list) == 2 * amount, \"Total file amount does not match\"\n else:\n assert len(obj_list) == amount, \"Total file amount does not match\"\n\n # Check deduplicate set is same\n test_set = set([i.split(\"/\")[-1] for i in download_files])\n assert test_set == set(obj_list), \"File name set does not match\"", "def __init__(self, location, option):\n super(MyCache, self).__init__(location, option)\n self.dcreate('ttl')", "def download_privacy_score(bucket_name, bucket_prefix):\n s3_client = boto3.client('s3')\n s3_client.get_paginator('list_objects_v2')\n input_matcher = re.compile(f'^{bucket_prefix}2[0-9][0-9][0-9]-[0-9][0-9][.]json$')\n\n def iterate_bucket(s3_client, bucket_name, bucket_prefix, input_matcher):\n pageinator = s3_client.get_paginator('list_objects_v2')\n\n for page in pageinator.paginate(Bucket=bucket_name, Prefix=bucket_prefix):\n if page['KeyCount'] == 0:\n continue\n\n for item in page['Contents']:\n if input_matcher.match(item['Key']):\n yield item['Key']\n\n latest_key = max(iterate_bucket(s3_client, bucket_name, bucket_prefix, input_matcher))\n print(f'Downloading latest_key file s3://{bucket_name}/{latest_key} ...')\n return json.loads(s3_client.get_object(Bucket=bucket_name, Key=latest_key)['Body'].read())", "def __init__(self, buckets = 200):\n self.data = [None] * buckets\n self.slot = [None] * buckets\n self.size = buckets", "def test_non_existent_key(self):\n ttl = self.cache.ttl('does_not_exist')\n self.assertEqual(ttl, 0)", "def _mock_backend(self):\n for crawl_id in self.crawlQueue:\n # Retrieve page count from engine and set in central redis\n page_count = self.engine_redis.get(crawl_id + \"_count\")\n self.central_redis.set(crawl_id + \"_count\", page_count)\n self.central_redis.expire(crawl_id + \"_count\", 60*60)\n if page_count == \"-2\": # if complete\n self.crawlQueue.remove(crawl_id)", "def list_bucket(self, bucket):\n self.response.write('Creating more files for listbucket...\\n')\n self.create_file(bucket + '/foo1')\n self.create_file(bucket + '/foo2')\n self.response.write('\\nListbucket result:\\n')\n\n page_size = 1\n stats = gcs.listbucket(bucket, max_keys=page_size)\n while True:\n count = 0\n for stat in stats:\n count += 1\n self.response.write(repr(stat))\n self.response.write('\\n')\n\n if count != page_size or count == 0:\n break\n last_filename = stat.filename[len(bucket) + 1:]\n stats = gcs.listbucket(bucket, max_keys=page_size, marker=last_filename)", "def get( key ):\n if ACTIVE is False:\n return None\n \n global CACHE, STATS_MISSES, STATS_HITS\n \n \"\"\" Return a key stored in the python instance cache or a None if it has expired or it doesn't exist \"\"\"\n if key not in CACHE:\n STATS_MISSES += 1\n return None\n \n value, expiry = CACHE[key]\n current_timestamp = time.time()\n if expiry == None or current_timestamp < expiry:\n STATS_HITS += 1\n return value\n else:\n STATS_MISSES += 1\n delete( key )\n return None", "def test_s3_table_functions(started_cluster):\n node.query(\n \"\"\"\n INSERT INTO FUNCTION s3\n (\n nc_s3,\n filename = 'test_file.tsv.gz',\n format = 'TSV',\n structure = 'number UInt64',\n compression_method = 'gz'\n )\n SELECT * FROM numbers(1000000)\n \"\"\",\n settings=settings,\n )\n\n assert (\n node.query(\n \"\"\"\n SELECT count(*) FROM s3\n (\n nc_s3,\n filename = 'test_file.tsv.gz',\n format = 'TSV',\n structure = 'number UInt64',\n compression_method = 'gz'\n );\n \"\"\"\n )\n == \"1000000\\n\"\n )", "def test_get_buckets(self):\n pass", "def test_expiry_in_future(self):\n link = DownloadLink()\n link.save()\n self.assertEqual(link.getExpiry(), link.createdAt + timedelta(seconds=60))", "def test_old_expiration(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'expire'", "def upload_bucket_samples():\n if not Config.region:\n logger.error(\"You must specify a region in order to scan a bucket target\")\n raise SystemExit(\n \"Target region not specified. Use -r or --region to specify the target region.\"\n )\n # Connect to S3 in our target region\n s_3 = boto3.resource(\"s3\", region_name=Config.region)\n # Connect to our target bucket\n bucket = s_3.Bucket(Config.target_dir)\n # Retrieve a list of all objects in the bucket\n summaries = bucket.objects.all()\n # Inform the user as this may take a minute\n logger.info(\"Assembling volume from target bucket (%s) for submission\", Config.target_dir)\n # Loop through our list of files, downloading each to memory then upload them to the Sandbox\n for item in summaries:\n # Grab the file name from the path\n filename = os.path.basename(item.key)\n # Teensy bit of witch-doctor magic to download the file\n # straight into the payload used for our upload to the Sandbox\n response = Samples.upload_sample(file_name=filename,\n file_data=io.BytesIO(\n bucket.Object(key=item.key).get()[\"Body\"].read()\n )\n )\n # Retrieve our uploaded file SHA256 identifier\n sha = response[\"body\"][\"resources\"][0][\"sha256\"]\n # Add this SHA256 to the upload payload element\n Analyzer.uploaded.append(sha)\n # Track the upload so we recognize the file when we're done\n Analyzer.files.append([filename, item.key, sha])\n # Inform the user of our progress\n logger.debug(\"Uploaded %s to %s\", filename, sha)", "def rotate(key_prefix, key_ext, bucket_name, daily_backups=7, weekly_backups=4, aws_key=None, aws_secret=None):\n\n session = boto3.Session(\n aws_access_key_id=aws_key,\n aws_secret_access_key=aws_secret\n )\n s3 = session.resource('s3')\n bucket = s3.Bucket(bucket_name)\n keys = bucket.objects.filter(Prefix=key_prefix)\n\n regex = '{0}-(?P<year>[\\d]+?)-(?P<month>[\\d]+?)-(?P<day>[\\d]+?){1}'.format(key_prefix, key_ext)\n backups = []\n\n for key in keys:\n match = re.match(regex, str(key.key))\n if not match:\n continue\n year = int(match.group('year'))\n month = int(match.group('month'))\n day = int(match.group('day'))\n key_date = datetime(year, month, day)\n backups[:0] = [key_date]\n backups = sorted(backups, reverse=True)\n\n if len(backups) > daily_backups+1 and backups[daily_backups] - backups[daily_backups+1] < timedelta(days=7):\n key = bucket.Object(\"{0}{1}{2}\".format(key_prefix,backups[daily_backups].strftime(\"-%Y-%m-%d\"), key_ext))\n logger.debug(\"deleting {0}\".format(key))\n key.delete()\n del backups[daily_backups]\n\n month_offset = daily_backups + weekly_backups\n if len(backups) > month_offset+1 and backups[month_offset] - backups[month_offset+1] < timedelta(days=30):\n key = bucket.Object(\"{0}{1}{2}\".format(key_prefix,backups[month_offset].strftime(\"-%Y-%m-%d\"), key_ext))\n logger.debug(\"deleting {0}\".format(key))\n key.delete()\n del backups[month_offset]", "def test_timed(self):\n time = 0.001\n cache = TimedCache(max_age=time)\n\n cache[1] = 1\n assert 1 in cache\n sleep(time)\n assert 1 not in cache\n with pytest.raises(KeyError):\n assert cache[1]\n\n for i in range(50):\n cache[i] = i\n assert i in cache\n assert cache[i] == i\n sleep(time)\n for i in range(50):\n assert i not in cache\n with pytest.raises(KeyError):\n assert cache[i]", "def test_transform_cart_item_pagination(self):\n size = 700\n service = ElasticsearchService()\n hits, search_after = service.transform_cart_item_request(catalog=self.catalog,\n entity_type='files',\n size=size)\n self.assertEqual(size, len(hits))\n hits, search_after = service.transform_cart_item_request(catalog=self.catalog,\n entity_type='files',\n size=size,\n search_after=search_after)\n self.assertEqual(size, len(hits))\n hits, search_after = service.transform_cart_item_request(catalog=self.catalog,\n entity_type='files',\n size=size,\n search_after=search_after)\n self.assertEqual(100, len(hits))", "def test_list_object_with_max_keys(self):\n for i in range(0, 9):\n self.bos.put_object_from_string(\n self.BUCKET, \n \"test_object_%s\" % compat.convert_to_bytes(random.random()),\n \"This is a string.\")\n\n response = self.bos.list_objects(self.BUCKET)\n\n all_list = list()\n tmp_list = list()\n\n for item in response.contents:\n all_list.append(item.key)\n\n response = self.bos.list_objects(self.BUCKET, max_keys=4)\n for item in response.contents:\n tmp_list.append(item.key)\n\n response = self.bos.list_objects(self.BUCKET, max_keys=5, marker=tmp_list[-1])\n for item in response.contents:\n tmp_list.append(item.key)\n\n self.assertListEqual(all_list, tmp_list)", "def open(self):\n super(MemoryCache, self).open()\n\n def _timer():\n # Use a custom timer to try to spread expirations. Within one instance it\n # won't change anything but it will be better if you run multiple instances.\n return time.time() + self.__ttl * random.uniform(-0.25, 0.25)\n\n self.__cache = cachetools.TTLCache(\n maxsize=self.__size, ttl=self.__ttl, timer=_timer\n )", "def test_persisted_values_with_ttl(self):\n blk = MergeStreams()\n self.configure_block(blk, {\"expiration\": {\"seconds\": 1}})\n self.assertEqual(blk.persisted_values(), [])\n blk.start()\n blk.stop()", "def test_change_default_throttling_settings_http_with_overwrite_throttled_rate_above_50():", "def test_put_get(self):\n key = 1\n item = 'aaa'\n cache = LRUCache(5)\n cache.put(key, item)\n assert item == cache.get(key)\n assert 1 == cache.size" ]
[ "0.74554807", "0.73381984", "0.73129797", "0.7263034", "0.69875336", "0.6090414", "0.56455797", "0.5475571", "0.5398008", "0.53416365", "0.53413856", "0.5322587", "0.53129023", "0.5301066", "0.5245236", "0.522405", "0.52154046", "0.5207431", "0.5185581", "0.5156831", "0.51409173", "0.5093467", "0.5086045", "0.5074731", "0.5055", "0.50472575", "0.5002737", "0.49924052", "0.49907944", "0.4980763", "0.4978899", "0.4961192", "0.49509802", "0.49378926", "0.49376935", "0.49310896", "0.49304405", "0.4917052", "0.49120516", "0.49098146", "0.49066755", "0.49025446", "0.48961398", "0.48948157", "0.48869798", "0.48761743", "0.48704052", "0.48668402", "0.48626998", "0.48521507", "0.48482013", "0.4831019", "0.48263523", "0.4824126", "0.4823487", "0.48031116", "0.48020956", "0.47952285", "0.47841886", "0.47686318", "0.47672367", "0.47647065", "0.4763225", "0.4759916", "0.47584823", "0.47571325", "0.47529325", "0.47529325", "0.47502455", "0.47380522", "0.47349095", "0.4731137", "0.4729835", "0.47165266", "0.47141474", "0.47083062", "0.47021356", "0.46822345", "0.46728516", "0.46703815", "0.46686384", "0.46661937", "0.466329", "0.46620217", "0.4652024", "0.46498647", "0.4646438", "0.46431318", "0.46313417", "0.46290767", "0.4620766", "0.4604562", "0.45969495", "0.45966393", "0.45945492", "0.45888945", "0.45881915", "0.45841146", "0.45839867", "0.45822665" ]
0.7790331
0
Test 1. min 0 2. max 2147483647q 3. default 0 4. negative values, date, string
def test_maxttl_possible_values(self): # default rest = RestConnection(self.master) default_maxttl = rest.get_bucket_maxTTL() if default_maxttl != 0: self.fail("FAIL: default maxTTL if left unset must be 0 but is {0}".format(default_maxttl)) self.log.info("Verified: default maxTTL if left unset is {0}".format(default_maxttl)) # max value try: self._update_bucket_maxTTL(maxttl=2147483648) except Exception as e: self.log.info("Expected exception : {0}".format(e)) try: self._update_bucket_maxTTL(maxttl=2147483647) except Exception as e: self.fail("Unable to set maxTTL=2147483647, the max permitted value") else: self.log.info("Verified: Max value permitted is 2147483647") else: self.fail("Able to set maxTTL greater than 2147483647") # min value try: self._update_bucket_maxTTL(maxttl=0) except Exception as e: self.fail("Unable to set maxTTL=0, the min permitted value") else: self.log.info("Verified: Min value permitted is 0") # negative value try: self._update_bucket_maxTTL(maxttl=-60) except Exception as e: self.log.info("Verified: negative values not permitted, exception : {0}".format(e)) else: self.fail("FAIL: Able to set a negative maxTTL") # date/string try: self._update_bucket_maxTTL(maxttl="12/23/2016") except Exception as e: self.log.info("Verified: string not permitted, exception : {0}".format(e)) else: self.fail("FAIL: Able to set a date string maxTTL")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_valid_range(val, max_val):\n if val < 0:\n val = 0\n elif val > max_val:\n val = max_val\n else:\n pass\n return val", "def intrange(value, name=\"\", value_min=None, value_max=None, zero=False):\n value = __integer(value, \"%s value\" % name, False)\n if value_min is not None:\n value_min = __integer(value_min, \"minimal %s value\" % name, True)\n intvalue(value_min, name, True, True, True)\n if value_max is not None:\n value_max = __integer(value_max, \"maximal %s value\" % name, True)\n intvalue(value_max, name, True, True, True)\n if not zero:\n if value == 0:\n __ex(\"The %s value must not be zero.\" % name, False)\n if (value_min is not None) and (value_max is not None):\n if value_min > value_max:\n __ex(\"The maximal %s value must be greater than the minimal \"\n \"value.\" % name, False)\n if (value_min == value_max) and (value != value_min):\n __ex(\"The %s value can only be %s (depending on further range \"\n \"further range arguments).\" % (name, value_min), False)\n if (value < value_min) or (value > value_max):\n __ex(\"The %s value must be between %s and %s (depending on \"\n \"further range arguments).\" % (name, value_min, value_max),\n False)\n elif value_min is not None:\n if value < value_min:\n __ex(\"The %s value must not be less than %s.\" % (name, value_min),\n False)\n elif value_max is not None:\n if value > value_max:\n __ex(\"The %s value must not be greater than %s.\" %\n (name, value_max), False)", "def test_creation_bounds_not_inclusive():\n with pytest.raises(ValueError) as __:\n value = -42\n __ = param.Integer(value=value, hardbounds=[-42, 100], inclusive_bounds=[False, False])", "def test_negatives(self):\n self.assertEqual(max_integer([-1, -2, -3, -4]), -1)\n self.assertEqual(max_integer([-4, -3, -2, 0]), 0)", "def range_validator(value_str, args):\n \n assert len(args) == 5, \"Error: range_validator requires 5 arguments.\"\n a_type, lb, ub, allow_none, error_msg = args\n try:\n if allow_none and value_str == 'None':\n value = None\n else:\n value = a_type(value_str)\n except ValueError:\n raise InputException(error_msg + value_str)\n if (lb != None and value < lb) or (ub != None and value > ub):\n raise InputException(error_msg + value_str)\n return value", "def test_set_outside_bounds_default_value(self):\n with pytest.raises(ValueError):\n Integer(\"yolo\", \"uniform\", -3, 2, default_value=4)", "def test_validate_min_value(self):\n\n test_values = [\n -5,\n 2,\n ]\n\n testrow = TestSchema()\n\n for value in test_values:\n testrow.int_min_field = value\n self.assertRaises(Exception, testrow.save)", "def test_negative(self):\n lst = [-1, -5, -98]\n self.assertEqual(max_integer(lst), -1)", "def test_base_cases(self):\n self.assertEqual(max_integer([1, 50, 2, 10]), 50)\n self.assertEqual(max_integer([-1, -20, -2, -50]), -1)\n self.assertEqual(max_integer([1, 2, 3, 4, 5]), 5)\n self.assertEqual(max_integer([40, 0, 2, 5]), 40)\n self.assertEqual(max_integer([10, 0, -2, 5]), 10)\n self.assertEqual(max_integer([40]), 40)\n self.assertEqual(max_integer([]), None)", "def __check_args_val(self):\n if self.__min_range < 0:\n error_msg = \"min_range must be greater than or equal to zero\"\n raise ValueError(error_msg)\n elif self.__max_range < 0:\n error_msg = \"max_range must be greater than or equal to zero\"\n raise ValueError(error_msg)\n elif self.__max_range < self.__min_range:\n error_msg = \"max_range must be greater than or equal to min_range\"\n raise ValueError(error_msg)", "def _limit(value, min_value, max_value):\n\n if value < min_value:\n return min_value\n if value > max_value:\n return max_value\n return value", "def test_inrange():\n assert cs.any > 0\n assert cs.any < cmax", "def test_positives(self):\n self.assertEqual(max_integer([1, 2, 3, 4]), 4)\n self.assertEqual(max_integer([4, 3, 2, 1]), 4)\n self.assertEqual(max_integer(['hello', 'hello']), 'hello')\n self.assertEqual(max_integer(['a', 'b']), 'b')", "def test_default_zero_fields_validate(self):\r\n it = self.IntegerTest()\r\n it.validate()", "def test_get_range(self):\n pass", "def _clean_int(value, default, min_value=None, max_value=None):\n if not isinstance(value, (int, long)):\n try:\n value = int(value)\n except (TypeError, ValueError):\n value = default\n if min_value is not None:\n value = max(min_value, value)\n if max_value is not None:\n value = min(value, max_value)\n return value", "def _clean_int(value, default, min_value=None, max_value=None):\n if not isinstance(value, (int, long)):\n try:\n value = int(value)\n except (TypeError, ValueError):\n value = default\n if min_value is not None:\n value = max(min_value, value)\n if max_value is not None:\n value = min(value, max_value)\n return value", "def __init__(\n self,\n *,\n type: str = \"number\",\n default: float = None,\n min: float = None,\n max: float = None,\n optional: bool = None,\n description: str = None,\n **kwargs,\n ):\n pass", "def check_value(self, name, min_int, max_int):\n while True:\n numb = input(f\"-- {name} : Entrez une valeur comprise \"\n f\"entre {min_int} et {max_int} : \")\n try:\n check = int(numb)\n if check == 99 or min_int <= check <= max_int:\n break\n except ValueError:\n pass\n return check", "def __init__(\n self,\n *,\n type: str = \"integer\",\n default: int = None,\n min: int = None,\n max: int = None,\n optional: bool = None,\n description: str = None,\n **kwargs,\n ):\n pass", "def _validate_db_int(**kwargs):\n max_int = (2 ** 31) - 1\n\n for param_key, param_value in kwargs.items():\n if param_value and param_value > max_int:\n msg = _(\"'%(param)s' value out of range, \"\n \"must not exceed %(max)d.\") % {\"param\": param_key,\n \"max\": max_int}\n raise exception.Invalid(msg)", "def _validate(self, value, **options):\n\n # this is a workaround to get the correct values of accepted min and max in\n # case they are callable and producing different results on each call.\n current_values = dict()\n current_values[self.CURRENT_MAX_KEY] = None\n current_values[self.CURRENT_MIN_KEY] = None\n options[self.CURRENT_VALUE_KEY] = current_values\n try:\n super()._validate(value, **options)\n except (self.maximum_value_error, self.minimum_value_error):\n equality_min = ''\n equality_max = ''\n\n inclusive_maximum = options.get('inclusive_maximum')\n if inclusive_maximum is None:\n inclusive_maximum = self.inclusive_maximum\n\n inclusive_minimum = options.get('inclusive_minimum')\n if inclusive_minimum is None:\n inclusive_minimum = self.inclusive_minimum\n\n if inclusive_minimum is not False:\n equality_min = self.inclusive_minimum_value_message\n\n if inclusive_maximum is not False:\n equality_max = self.inclusive_maximum_value_message\n\n current_min = current_values.get(self.CURRENT_MIN_KEY)\n if current_min is None:\n current_min = self.accepted_minimum\n\n current_max = current_values.get(self.CURRENT_MAX_KEY)\n if current_max is None:\n current_max = self.accepted_maximum\n\n raise self.range_value_error(self.range_value_message.format(\n param_name=self._get_field_name(**options),\n lower=self._get_representation(current_min),\n upper=self._get_representation(current_max),\n or_equal_min=equality_min, or_equal_max=equality_max))", "def one_positive(self):\n lst = [-1, -5, 98]\n self.assertEqual(max_integer(lst), 98)", "def test_real_range_constraint_validation():\n\n # Test valid values OK\n minimum = 1\n maximum = 2\n c = RealRangeConstraint(name=\"Property Band gap\",minimum=minimum,maximum=maximum)\n\n # Test minimum must be less than maximum\n minimum = 3\n maximum = 2\n try:\n c = RealRangeConstraint(name=\"Property Band gap\",minimum=minimum,maximum=maximum)\n assert False, \"RealRangeConstraint should require that minimum be less than maximum\"\n except CitrinationClientError:\n pass\n\n # Test values must be castable to float\n minimum = {}\n maximum = 2\n try:\n c = RealRangeConstraint(name=\"Property Band gap\",minimum=minimum,maximum=maximum)\n assert False, \"RealRangeConstraint should require that minimum and maximum be castable to floats\"\n except CitrinationClientError:\n pass", "def test_list(self):\n self.assertEqual(max_integer([1, 5, -7, 6, -4 , 10]), 10)\n self.assertEqual(max_integer([0]), 0)\n self.assertEqual(max_integer([0, 3 + 2, 7, 9 * 8, 35, 12]), 72)", "def test_default_zero_fields_validate(self):\r\n it = self.BigIntTest()\r\n it.validate()", "def one_negative(self):\n lst = [1, 5, -98]\n self.assertEqual(max_integer())", "def test_min_max_limiting() -> None:\n d = {\n \"one\": [-1, 0, 1],\n \"two\": [2, 3, -1],\n }\n # Update a single column\n df = pd.DataFrame(d)\n #\n # .loc accepts a boolean mask and set of columns to return.\n #\n df.loc[df[\"one\"] < 0, [\"one\"]] = 0\n #\n # one two\n # 0 2\n # 0 3\n # 1 -1\n #\n assert df.iloc[0, 0] == 0\n assert df.iloc[2, 1] == -1\n\n # You can use `clip` to enforce minimum and maximum values for an entire df.\n df = df.clip(lower=0.0)\n assert df.iloc[0, 0] == 0.0\n assert df.iloc[2, 1] == 0.0", "def test_integer_params(self):\n test_date = get_by_values(4, 5, 6, 2016)\n self.assertEquals(test_date, date(2016, 6, 25))", "def check_range(number: object, min_r: float, max_r: float, name: str = \"\") -> float:\n if not isinstance(number, (float, int)):\n raise FFmpegNormalizeError(f\"{name} must be an int or float\")\n if number < min_r or number > max_r:\n raise FFmpegNormalizeError(f\"{name} must be within [{min_r},{max_r}]\")\n return number", "def test_types(self):\n self.assertRaises(TypeError, max_integer, None)\n self.assertRaises(TypeError, max_integer, 1234)", "def __init__(__self__, *,\n max: pulumi.Input[int],\n min: pulumi.Input[int]):\n pulumi.set(__self__, \"max\", max)\n pulumi.set(__self__, \"min\", min)", "def __init__(__self__, *,\n max: pulumi.Input[int],\n min: pulumi.Input[int]):\n pulumi.set(__self__, \"max\", max)\n pulumi.set(__self__, \"min\", min)", "def test_data_type(self):\n with self.assertRaises(TypeError):\n max_integer(None)\n\n with self.assertRaises(TypeError):\n max_integer([\"Hey\", 3, 456, \"ALX\", 65])", "def test_lists_negatives(self):\n list = [-3, -8, 10, 9]\n self.assertEqual(max_integer(list), 10)\n\n list = [-1]\n self.assertEqual(max_integer(list), -1)", "def test_neg_ordered(self):\n no_list = [-1, -2, -3, -4, -5]\n self.assertEqual(max_integer(no_list), -1)", "def intvalue(value, name=\"\", positive=True, zero=False, negative=False):\n value = __integer(value, \"%s value\" % name, False)\n if not positive:\n if value > 0:\n __ex(\"The %s value must not be positive.\" % name, False)\n if not zero:\n if value == 0:\n __ex(\"The %s value must not be zero.\" % name, False)\n if not negative:\n if value < 0:\n __ex(\"The %s value must not be negative.\" % name, False)", "def __init__(__self__, *,\n max: Optional[pulumi.Input[int]] = None,\n min: Optional[pulumi.Input[int]] = None):\n if max is not None:\n pulumi.set(__self__, \"max\", max)\n if min is not None:\n pulumi.set(__self__, \"min\", min)", "def __init__(__self__, *,\n max: Optional[pulumi.Input[int]] = None,\n min: Optional[pulumi.Input[int]] = None):\n if max is not None:\n pulumi.set(__self__, \"max\", max)\n if min is not None:\n pulumi.set(__self__, \"min\", min)", "def is_valid_range(parser, arg, minimum=0, maximum=100):\n if arg < minimum:\n parser.error(\"%s < %s\", arg, minimum)\n else:\n if arg > maximum:\n parser.error(\"%s > %s\", arg, maximum)\n\n return arg", "def text_max_negative(self):\n self.assertEqual(max_integer([-5, -3, -4, -8]), -3)", "def test_creation_hardbounds_inclusive():\n value = -42\n hardbounds = [-42, 100]\n\n num_a = param.Integer(value=value, hardbounds=hardbounds, inclusive_bounds=[True, True])\n assert num_a.value == value\n assert num_a.hardbounds == hardbounds", "def test_out_of_range(self):\n term, rmd = util.parse_date(\"0699\")\n self.assertIsNone(util.parse_date_partial(term))", "def test_creation_incorrect_hardbounds_count():\n with pytest.raises(ValueError) as __:\n value = 1\n __ = param.Integer(value=value, hardbounds=[0, 10, 20])", "def test_max_begin(self):\n self.assertEqual(max_integer([5, 3, 4, 1]), 5)", "def test_int_range_constraint_validation():\n\n # Test valid values OK\n minimum = 1\n maximum = 2\n IntRangeConstraint(name=\"Ingredient count\", minimum=minimum, maximum=maximum)\n\n # Test minimum must be less than maximum\n minimum = 3\n maximum = 2\n try:\n RealRangeConstraint(name=\"Ingredient count\", minimum=minimum, maximum=maximum)\n assert False, \"IntRangeConstraint should require that minimum be less than maximum\"\n except CitrinationClientError:\n pass\n\n # Test values must be castable to float\n minimum = {}\n maximum = 2\n try:\n c = IntRangeConstraint(name=\"Ingredient count\", minimum=minimum, maximum=maximum)\n assert False, \"IntRangeConstraint should require that minimum and maximum be castable to integers\"\n except CitrinationClientError:\n pass", "def validate_points(self, data):\n if data> 1:\n data = 1\n elif data < 0:\n data=0\n return data", "def test_min_max_resources(self):\n with pytest.raises(AttributeError) as exc:\n Fidelity(\"epoch\", 3, 2)\n assert \"Minimum resources must be smaller than maximum resources.\" == str(\n exc.value\n )", "def __init__(self, allowable_min, allowable_max):\n self.allowable_min = allowable_min\n self.allowable_max = allowable_max\n self.value = None\n # Do it this way because we'll override in reset\n self.min_found = None\n self.max_found = None\n self.avg_found = None\n self.count = 0\n\n # override reset for the different data types\n self.reset()", "def chkLimits(name, value, Min, Max, unit = 'V', Hex = False):\n\n #global Log\n if not Min < value < Max:\n if Hex:\n line = \"%s:0x%X OUT OF LIMITS (0x%X, 0x%X). Test Failed !\" %(name, value, Min, Max)\n else:\n line = \"%s:%F %s OUT OF LIMITS (%F, %f). Test Failed !\" %(name, value, unit, Min, Max)\n Log.logError(line)\n Err.bumpError()\n return False\n if Hex:\n Log.logText(' '+'%s:0x%X expected range from:0x%X To: 0x%X. Test PASS !'% (name, value, Min, Max))\n else:\n Log.logText(' '+'%s:%F %s expected range From:%F %s To: %F %s. Test PASS !'% (name, value, unit, Min,unit, Max, unit))\n return True", "def test_non_numberic_validation(self):", "def test_non_numberic_validation(self):", "def _checkNumerical(inputvalue, minvalue=None, maxvalue=None, description='inputvalue'):\n # Type checking\n if not isinstance(description, str):\n raise TypeError('The description should be a string. Given: {0!r}'.format(description))\n\n if not isinstance(inputvalue, (int, long, float)):\n raise TypeError('The {0} must be numerical. Given: {1!r}'.format(description, inputvalue))\n\n if not isinstance(minvalue, (int, float, long, type(None))):\n raise TypeError('The minvalue must be numeric or None. Given: {0!r}'.format(minvalue))\n\n if not isinstance(maxvalue, (int, float, long, type(None))):\n raise TypeError('The maxvalue must be numeric or None. Given: {0!r}'.format(maxvalue))\n\n # Consistency checking\n if (not minvalue is None) and (not maxvalue is None):\n if maxvalue < minvalue:\n raise ValueError('The maxvalue must not be smaller than minvalue. Given: {0} and {1}, respectively.'.format( \\\n maxvalue, minvalue))\n\n # Value checking\n if not minvalue is None:\n if inputvalue < minvalue:\n raise ValueError('The {0} is too small: {1}, but minimum value is {2}.'.format( \\\n description, inputvalue, minvalue))\n\n if not maxvalue is None:\n if inputvalue > maxvalue:\n raise ValueError('The {0} is too large: {1}, but maximum value is {2}.'.format( \\\n description, inputvalue, maxvalue))", "def test_large_values(self):\n eq_(0, smart_int('1' * 1000))", "def range_limit(val, minv, maxv):\n\tif (val < minv):\n\t\tval = minv\n\telif (val > maxv):\n\t\tval = maxv\n\treturn val", "def _checkInt(inputvalue, minvalue=None, maxvalue=None, description='inputvalue'):\n if not isinstance(description, str):\n raise TypeError('The description should be a string. Given: {0!r}'.format(description))\n\n if not isinstance(inputvalue, (int, long)):\n raise TypeError('The {0} must be an integer. Given: {1!r}'.format(description, inputvalue))\n\n if not isinstance(minvalue, (int, long, type(None))):\n raise TypeError('The minvalue must be an integer or None. Given: {0!r}'.format(minvalue))\n\n if not isinstance(maxvalue, (int, long, type(None))):\n raise TypeError('The maxvalue must be an integer or None. Given: {0!r}'.format(maxvalue))\n\n _checkNumerical(inputvalue, minvalue, maxvalue, description)", "def test_limit(self):\n\t\tfor lim in [1, '234', -100, '-200']:\n\t\t\tself.filter.set_limit(lim)\n\t\t\tself.assertEqual(int(lim), self.filter.get_limit(), \"Limit mismatch: %s!=%s\" % (lim, self.filter.get_limit()))\n\t\tself.filter.set_limit('test')\n\t\tself.assertEqual('test', self.filter.get_limit(), \"String set failed for Filter limit.\")", "def validate(val, num1=0, num2=float('inf')):\n val = int(val)\n if not num1 <= val < num2:\n raise ArgumentTypeError(\"Value out of range: {}. \"\n \"Should be between {} and {}.\".format(val, num1, num2 - 1))\n return val", "def test_get_meta_range(self):\n pass", "def integer_input( min_value=0, max_value=999, default=0, \n prompt=\"please type number and press ENTER\"):\n while True:\n raw = input(prompt)\n if not raw.isdigit():\n print(\"please enter a number\")\n continue\n raw = int(raw)\n if min_value <= raw <= max_value:\n return raw\n print(\"please enter value between {} and {}\".format(min_value,\n max_value))", "def test_equals(self):\n self.assertEqual(max_integer([0, 0, 0]), 0)\n self.assertEqual(max_integer([2, 2]), 2)\n self.assertEqual(max_integer([-2, -2]), -2)\n self.assertEqual(max_integer([1]), 1)", "def test_max(self):\n val = DwcaValidator(yaml.load(self.yaml_value, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n max_true = ['99', '99.0', '89.9', '88', '-99']\n for value in max_true:\n document = {'age_3': value}\n self.assertTrue(val.validate(document))\n document = {'age_4': value}\n self.assertTrue(val.validate(document))\n\n max_false = ['99.1', '100']\n for value in max_false:\n document = {'age_3': value}\n self.assertFalse(val.validate(document))\n document = {'age_4': value}\n self.assertFalse(val.validate(document))", "def test_value_min(self):\n self.assertEqual(DPTValue1Ucount().to_knx(0), (0x00,))\n self.assertEqual(DPTValue1Ucount().from_knx((0x00,)), 0)", "def rangeLimit(val, minv, maxv):\n\treturn range_limit(val, minv, maxv)", "def test_creation_hardbounds():\n value = -42\n hardbounds = [-100, 100]\n\n num_a = param.Integer(value=value, hardbounds=hardbounds)\n assert num_a.value == value\n assert num_a.hardbounds == hardbounds", "def _validateInt(dErrors, sName, sValue, iMin = 0, iMax = 0x7ffffffe, aoNilValues = tuple([-1, None, ''])):\n (sValue, sError) = ModelDataBase.validateInt(sValue, iMin, iMax, aoNilValues, fAllowNull = True);\n if sError is not None:\n dErrors[sName] = sError;\n return sValue;", "def test_validate_goals():\n assert (validate_goals([\"max\", \"min\"], 2) == np.array([1, -1])).all()\n with pytest.raises(ValueError):\n validate_goals([2, 3], 2)\n with pytest.raises(ValueError):\n validate_goals([1], 2)\n with pytest.raises(ValueError):\n validate_goals(1, 1)\n\n assert (validate_goals(None, 3) == np.array([1, 1, 1])).all()", "def get_int_input_constrained(prompt, value_min, value_max, value_default):\n\n input_value = 0\n while input_value < 1:\n txt = input(prompt)\n try:\n input_value = min(max(int(txt), value_min), value_max)\n except ValueError:\n input_value = value_default\n\n return (True, input_value)", "def test_assert_min_exceeded(self):\n with self.assertRaises(ConversionError):\n DPTSignedRelativeValue.to_knx(-129)", "def test_creation_incorrect_softbounds_count():\n with pytest.raises(ValueError) as __:\n value = 1\n __ = param.Integer(value=value, softbounds=[0, 10, 20])", "def _validate_value(self, value):\n return (self.maximum_value is None) or (value <= self.maximum_value)", "def __init__(self, max: int, interval: Union[int, List[int]] = 0):\n super().__init__()\n if max < 1:\n raise ValueError('max: please enter a value greater than 0')\n\n if isinstance(interval, int):\n if interval < 0:\n raise ValueError('interval: negative numbers are not allowed')\n intervals = [interval]\n elif isinstance(interval, Iterable):\n for i in interval:\n if i < 0:\n raise ValueError('interval: negative numbers are not allowed')\n intervals = interval\n\n self.max = max\n self.intervals = intervals", "def test_check_max(self):\n\t\tself.filter.set_operator(\".max\")\n\t\tself.filter.set_limit(12)\n\t\tself.assertTrue(self.filter.check(Object(field=12)))\n\t\tself.assertTrue(self.filter.check(Object(field=0)))\n\t\tself.assertFalse(self.filter.check(Object(field=13)))", "def test_set_outside_bounds_default_value(self):\n with pytest.raises(ValueError):\n Real(\"yolo\", \"uniform\", -3, 2, default_value=5)", "def test_result(self):\n self.assertIsNone(max_integer([]))\n self.assertEqual(max_integer([1, 2, 3, 4]), 4)\n self.assertEqual(max_integer([4, 1, 2, 3]), 4)\n self.assertEqual(max_integer([1, 4, 3, 2]), 4)\n self.assertEqual(max_integer([-34, -2, -3, -37]), -2)\n self.assertEqual(max_integer([-231, 2, -33, -24]), 2)\n self.assertEqual(max_integer([23.4, 34.6, 56.5, 60.2]), 60.2)\n self.assertEqual(max_integer([1]), 1)\n self.assertEqual(max_integer([56.3]), 56.3)\n self.assertEqual(max_integer([-34]), -34)\n self.assertEqual(max_integer([\"holberton\", \"school\",\"student\"]), \"student\")", "def test_creation_outside_bounds():\n with pytest.raises(ValueError) as __:\n value = 42\n __ = param.Integer(value=value, hardbounds=[0, 41])", "def test_assign_period():\n\n \"\"\" Period 1 is allowed for all allowable years yyyy \"\"\"\n args.period = 1\n args.yyyy = 1999\n assert assign_period(args) == None\n args.yyyy = 2000\n assert assign_period(args) == 1\n args.yyyy = 2010\n assert assign_period(args) == 1\n args.yyyy = 2017\n assert assign_period(args) == 1\n args.yyyy = 2099\n assert assign_period(args) == 1\n args.yyyy = 3000\n assert assign_period(args) == None\n\n \"\"\" Period 3 is only allowed for 2007-2013 \"\"\"\n args.period = 3\n args.yyyy = 2006\n assert assign_period(args) == None\n args.yyyy = 2007\n assert assign_period(args) == 3\n args.yyyy = 2010\n assert assign_period(args) == 3\n args.yyyy = 2013\n assert assign_period(args) == 3\n args.yyyy = 2014\n assert assign_period(args) == None\n args.yyyy = 2017\n assert assign_period(args) == None\n\n \"\"\" Period 5 is only allowed for 2009 forward \"\"\"\n args.period = 5\n args.yyyy = 2008\n assert assign_period(args) == None\n args.yyyy = 2009\n assert assign_period(args) == 5\n args.yyyy = 2010\n assert assign_period(args) == 5\n args.yyyy = 2017\n assert assign_period(args) == 5\n args.yyyy = 2099\n assert assign_period(args) == 5\n args.yyyy = 3000\n assert assign_period(args) == None", "def test_non_cast_input():\n assert _currency_column_to_numeric(\"-1,000,000 yen\") == \"-1000000\"", "def test_int(self):\n self.assertTrue(validate_measure_input('0', self.measures))\n self.assertTrue(validate_measure_input('1', self.measures))\n self.assertTrue(validate_measure_input(str(len(self.measures)), self.measures))\n self.assertFalse(validate_measure_input(str(len(self.measures) + 1), self.measures))", "def test_lt():\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n assert num_a.value < 100", "def quantize(input=None, min_range=None, max_range=None, out_type=_Null, out=None, name=None, **kwargs):\n return (0,)", "def _validate_clear_args(limit):\n min_limit = 1\n max_limit = 20\n default_error = f\"[Limit] The `limit` argument must be a number between {min_limit} and {max_limit}\"\n try:\n limit = int(limit)\n except (ValueError, TypeError):\n return default_error\n if not (min_limit <= limit <= max_limit):\n return default_error\n return None", "def __init__( # pylint: disable=too-many-arguments\n self,\n min: Optional[float] = None,\n max: Optional[float] = None,\n step: Optional[int] = None, # pylint: disable=redefined-outer-name\n include_min: bool = True,\n include_max: bool = True,\n ) -> None:\n #: The optional minimal allowed value.\n self.min = min\n\n #: The optional maximal allowed value.\n self.max = max\n\n #: The optional step between values.\n self.step = step\n\n #: Whether the minimal value is allowed.\n self.include_min = include_min\n\n #: Whether the maximal value is allowd.\n self.include_max = include_max", "def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)", "def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)", "def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)", "def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)", "def test_float_int(self):\n self.assertEqual(max_integer([-5, 4.5, 4, 9.66]), 9.66)", "def test_data_at_range_limit(parallel, fast_reader, guess):\n # Python reader and strtod() are expected to return precise results\n rtol = 1.0e-30\n\n # Update fast_reader dict; adapt relative precision for fast_converter\n if fast_reader:\n fast_reader[\"parallel\"] = parallel\n if fast_reader.get(\"use_fast_converter\"):\n rtol = 1.0e-15\n elif np.iinfo(np.int_).dtype == np.dtype(np.int32):\n # On 32bit the standard C parser (strtod) returns strings for these\n pytest.xfail(\"C parser cannot handle float64 on 32bit systems\")\n\n if parallel:\n if not fast_reader:\n pytest.skip(\"Multiprocessing only available in fast reader\")\n elif CI:\n pytest.xfail(\"Multiprocessing can sometimes fail on CI\")\n\n # Test very long fixed-format strings (to strtod range limit w/o Overflow)\n for D in 99, 202, 305:\n t = ascii.read(\n StringIO(99 * \"0\" + \".\" + D * \"0\" + \"1\"),\n format=\"no_header\",\n guess=guess,\n fast_reader=fast_reader,\n )\n assert_almost_equal(t[\"col1\"][0], 10.0 ** -(D + 1), rtol=rtol, atol=1.0e-324)\n for D in 99, 202, 308:\n t = ascii.read(\n StringIO(\"1\" + D * \"0\" + \".0\"),\n format=\"no_header\",\n guess=guess,\n fast_reader=fast_reader,\n )\n assert_almost_equal(t[\"col1\"][0], 10.0**D, rtol=rtol, atol=1.0e-324)\n\n # 0.0 is always exact (no Overflow warning)!\n for s in \"0.0\", \"0.0e+0\", 399 * \"0\" + \".\" + 365 * \"0\":\n t = ascii.read(\n StringIO(s), format=\"no_header\", guess=guess, fast_reader=fast_reader\n )\n assert t[\"col1\"][0] == 0.0\n\n # Test OverflowError at precision limit with laxer rtol\n if parallel:\n pytest.skip(\"Catching warnings broken in parallel mode\")\n elif not fast_reader:\n pytest.skip(\"Python/numpy reader does not raise on Overflow\")\n with pytest.warns() as warning_lines:\n t = ascii.read(\n StringIO(\"0.\" + 314 * \"0\" + \"1\"),\n format=\"no_header\",\n guess=guess,\n fast_reader=fast_reader,\n )\n\n n_warns = len(warning_lines)\n assert n_warns in (0, 1), f\"Expected 0 or 1 warning, found {n_warns}\"\n if n_warns == 1:\n assert (\n \"OverflowError converting to FloatType in column col1, possibly \"\n \"resulting in degraded precision\" in str(warning_lines[0].message)\n )\n\n assert_almost_equal(t[\"col1\"][0], 1.0e-315, rtol=1.0e-10, atol=1.0e-324)", "def test_min(self):\n val = DwcaValidator(yaml.load(self.yaml_value, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n min_true = ['9', '9.0', '9.1', '10']\n for value in min_true:\n document = {'age_1': value}\n self.assertTrue(val.validate(document))\n document = {'age_2': value}\n self.assertTrue(val.validate(document))\n\n min_false = ['8.99999', '-9']\n for value in min_false:\n document = {'age_1': value}\n self.assertFalse(val.validate(document))\n document = {'age_2': value}\n self.assertFalse(val.validate(document))", "def column_range_validation_factory(minim=None, maxim=None, ignore_missing_vals=False):\n if minim is None:\n if isinstance(maxim, datetime):\n minim = datetime.min\n else:\n minim = -1 * (sys.maxsize - 1)\n if maxim is None:\n if isinstance(minim, datetime):\n maxim = datetime.max\n else:\n maxim = sys.maxsize\n\n def in_range_validation_fn(x):\n if ignore_missing_vals and pd.isnull(x):\n return True, {}\n return (isinstance(x, (type(minim), type(maxim)))) and (x <= maxim) and (x >= minim), {}\n\n in_range_validation_fn.__doc__ = \"checks whether values are between {} and {}\".format(\n minim, maxim\n )\n if ignore_missing_vals:\n in_range_validation_fn.__doc__ += \", ignoring nulls\"\n\n return in_range_validation_fn", "def test_empty(self):\n self.assertEqual(max_integer([]), None)", "def test_creation_hardbounds_autobound():\n value = -150\n hardbounds = [-100, 100]\n\n num_a = param.Integer(value=value, hardbounds=hardbounds, auto_bound=True)\n assert num_a.value == -100", "def test_check_min(self):\n\t\tself.filter.set_operator(\".min\")\n\t\tself.filter.set_limit(12)\n\t\tself.assertTrue(self.filter.check(Object(field=12)))\n\t\tself.assertTrue(self.filter.check(Object(field=15)))\n\t\tself.assertFalse(self.filter.check(Object(field=9)))", "def integer_validator(self, name, value):\n if type(value) is not int:\n raise TypeError(name + \" must be an integer\")\n elif value <= 0 and name not in (\"x\", \"y\"):\n raise ValueError(name + \" must be > 0\")\n elif value < 0 and name in (\"x\", \"y\"):\n raise ValueError(name + \" must be >= 0\")", "def validate(self, value: Any, low: int, high: int) -> bool:\n pass", "def test_if_input_is_negative(self):\n self.assertEquals(prime_numbers(-5), \"Numbers less than or equal to zero are not allowed!\")", "def integer_validator(self, name, value):\n if type(value) != int:\n raise TypeError(\"{} must be an integer\".format(name))\n if value <= 0:\n raise ValueError(\"{} must be greater than 0\".format(name))", "def test_max_end(self):\n self.assertEqual(max_integer([5, 3, 4, 8]), 8)", "def is_valid_input(value):\n if value is None:\n return None\n\n try:\n value = int(value)\n except ValueError:\n return None\n\n if 1 <= value <= 5:\n return value\n else:\n return None" ]
[ "0.6304185", "0.62069917", "0.6080596", "0.6072301", "0.6067451", "0.6045868", "0.6025268", "0.6013756", "0.6009615", "0.5928551", "0.5872796", "0.5859247", "0.5829964", "0.5817812", "0.58132696", "0.5796257", "0.5796257", "0.5784377", "0.57772344", "0.57770634", "0.577266", "0.574151", "0.5733266", "0.57324123", "0.56936353", "0.5692622", "0.56872475", "0.56828207", "0.56724083", "0.56555074", "0.56517404", "0.5647913", "0.5647913", "0.56456995", "0.562485", "0.56096363", "0.5602948", "0.5595335", "0.5595335", "0.5584631", "0.5582228", "0.5580756", "0.55707175", "0.55696356", "0.5568467", "0.5567646", "0.55625", "0.5560824", "0.5560077", "0.5553085", "0.5546825", "0.5546825", "0.5545743", "0.554324", "0.55429846", "0.5514797", "0.55141336", "0.55055064", "0.5500055", "0.5499874", "0.54985833", "0.549416", "0.54925144", "0.54923826", "0.5492138", "0.54871505", "0.546915", "0.54675704", "0.5462345", "0.5455259", "0.54551196", "0.54445535", "0.544263", "0.5440321", "0.5432048", "0.5427978", "0.5427723", "0.54224694", "0.54197466", "0.54169095", "0.54130054", "0.54082614", "0.54011184", "0.53929913", "0.53929913", "0.53929913", "0.53929913", "0.53859174", "0.53853285", "0.5384313", "0.5373696", "0.537231", "0.53721136", "0.5359923", "0.5358441", "0.53493154", "0.53491515", "0.53457624", "0.5343723", "0.53398454" ]
0.5408003
82
1. Create a bucket with ttl = 200s 2. Upload 1000 docs with exp = 100s 3. Update ttl = 40s 4. After 40s, run expiry pager again and get item count, must be 1000 5. After 60s, run expiry pager again and get item count, must be 0 6. Now load another set of docs with exp = 100s 7. Run expiry pager after 40s and get item count, must be 0
def test_update_maxttl(self): for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=100) self._update_bucket_maxTTL(maxttl=40) self.sleep(40, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s" " updated maxttl = 40s, after 40s item count = {0}".format(items)) if items != self.num_items: self.fail("FAIL: Updated ttl affects docs with larger expiry before updation!") self.sleep(60, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = RestConnection(self.master).get_active_key_count(bucket) self.log.info("Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s" " updated maxttl = 40s, after 100s item count = {0}".format(items)) if items != 0: self.fail("FAIL: Docs with 100s as expiry before maxTTL updation still alive!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_maxttl_on_existing_bucket(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=60)\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s\"\n \"(set after doc creation), after 60s, item count = {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Items with larger expiry before maxTTL updation deleted!\")\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s\"\n \"(set after doc creation), after 100s,\"\n \" item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Items with not greater expiry set before maxTTL \"\n \"updation not deleted after elapsed TTL!\")\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s, after 100s,\"\n \" item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Items with not greater expiry not \"\n \"deleted after elapsed maxTTL!\")", "def test_maxttl_with_doc_updates(self):\n rest = RestConnection(self.master)\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=40)\n\n self.sleep(20, \"waiting to update docs with exp=60s...\")\n\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=60)\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n for bucket in self.buckets:\n items = rest.get_active_key_count(bucket)\n self.log.info(\"Items: {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Docs with updated expiry deleted unexpectedly!\")\n\n self.sleep(20, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = rest.get_active_key_count(bucket)\n self.log.info(\"Items: {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Docs with updated expiry not deleted after new exp has elapsed!\")", "def test_maxttl_lesser_doc_expiry(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=int(self.maxttl)+500)\n self.sleep(int(self.maxttl), \"waiting for all docs to expire per maxTTL rule...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) + 500,\n self.maxttl,\n self.maxttl,\n items))\n if items > 0:\n self.fail(\"Bucket maxTTL of {0} is not honored\".format(self.maxttl))\n else:\n self.log.info(\"SUCCESS: Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) + 500,\n self.maxttl,\n self.maxttl,\n items))", "def test_maxttl_greater_doc_expiry(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=int(self.maxttl)-100)\n self.sleep(int(self.maxttl-100), \"waiting for all docs to expire per maxTTL rule...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) - 100,\n self.maxttl-100,\n self.maxttl-100,\n items))\n if items == 0:\n self.log.info(\"SUCCESS: Docs with lesser expiry deleted\")\n else:\n self.fail(\"FAIL: Doc with lesser expiry still present past ttl\")", "def test_maxttl_setting(self):\n maxttl = int(self.input.param(\"maxttl\", None))\n self.run_multi_operations(buckets = self.buckets,\n query_definitions = self.query_definitions,\n create_index = True, drop_index = False,\n query_with_explain = False, query = False)\n self.sleep(20)\n self._verify_bucket_count_with_index_count()\n self.sleep(maxttl, \"waiting for docs to be expired automatically per maxttl rule\")\n self._expiry_pager(self.master)\n self.sleep(60, \"wait for expiry pager to run on all nodes...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Docs in source bucket is {0} after maxttl has elapsed\".format(items))\n if items != 0:\n self.fail(\"Docs in source bucket is not 0 after maxttl has elapsed\")\n self._verify_bucket_count_with_index_count()", "def test_max_items(self):\r\n timeline = Timeline(connection=self.c1, bucket=self.bucket, max_items=3)\r\n now = datetime.utcnow()\r\n\r\n timeline.add(self.key, 1, now)\r\n timeline.add(self.key, 2, now)\r\n timeline.add(self.key, 3, now)\r\n self.assertEqual(len(timeline.get(self.key)), 3)\r\n\r\n timeline.add(self.key, 4, now)\r\n self.assertEqual(len(timeline.get(self.key)), 3)", "def test_many_expired_keys(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n for i in range(20):\n self.storage.set(i, i, moe=self.now + 1)\n self.now += 2\n self.gc.expire_random()\n for i in range(20):\n self.assertRaises(StorageKeyError, self.storage.get, i)", "def __init__(self, bucket_size, bucket_fill_rate, current_time=None):\n self.__bucket_contents = bucket_size\n self.__bucket_size = bucket_size\n self.__bucket_fill_rate = bucket_fill_rate\n\n if current_time is None:\n current_time = time.time()\n\n self.__last_bucket_fill_time = current_time", "def test_cli_bucket_maxttl_setting(self):\n self.rest.force_eject_node()\n\n shell = RemoteMachineShellConnection(self.master)\n if self.input.param('enable_ipv6', False):\n self.reset_and_enable_ipv6(self.master)\n set_index_storage_type = \" --index-storage-setting=memopt \"\n options = ' --cluster-port=8091 \\\n --cluster-ramsize=300 \\\n --cluster-index-ramsize=300 \\\n --services=data,index,query %s ' \\\n % set_index_storage_type\n o, e = shell.execute_couchbase_cli(cli_command=\"cluster-init\",\n options=options)\n self.assertEqual(o[0], 'SUCCESS: Cluster initialized')\n\n self.log.info(\"Add new user after reset node! \")\n self.add_built_in_server_user(node=self.master)\n bucket_type = self.input.param(\"bucket_type\", \"couchbase\")\n options = ' --bucket=default \\\n --bucket-type={0} \\\n --bucket-ramsize=200 \\\n --max-ttl=400 \\\n --wait '.format(bucket_type)\n o, e = shell.execute_couchbase_cli(cli_command=\"bucket-create\",\n options=options)\n self.assertEqual(o[0], 'SUCCESS: Bucket created')\n\n self.sleep(30, \"Sleep before loading doc using cbdocloader\")\n\n cluster_flag = \"-c\"\n bucket_quota_flag = \"-m\"\n data_set_location_flag = \"-d\"\n shell.execute_command(\n \"{0}cbdocloader -u Administrator -p password \"\n \"{3} {1} -b default {4} 100 {5} {2}travel-sample.zip\"\n .format(self.bin_path, self.master.ip, self.sample_path,\n cluster_flag, bucket_quota_flag,\n data_set_location_flag))\n shell.disconnect()\n\n buckets = RestConnection(self.master).get_buckets()\n for bucket in buckets:\n if bucket.name != \"default\":\n self.fail(\"default bucket did not get created\")\n\n \"\"\" check for load data into travel-sample bucket \"\"\"\n end_time = time.time() + 120\n num_actual = 0\n while time.time() < end_time:\n self.sleep(10)\n num_actual = self.get_item_count(self.master, \"default\")\n if int(num_actual) == self.total_items_travel_sample:\n break\n self.assertTrue(int(num_actual) == self.total_items_travel_sample,\n \"Items number expected %s, actual %s\"\n % (self.total_items_travel_sample, num_actual))\n self.log.info(\"Total items %s \" % num_actual)\n self.sleep(400, \"Waiting for docs to expire as per maxttl\")\n self.expire_pager([self.master])\n self.sleep(20, \"Wait for expiry_purger to run\")\n num_actual = self.get_item_count(self.master, \"default\")\n if int(num_actual) != 0:\n self.fail(\"Item count is not 0 after maxttl has elapsed\")\n else:\n self.log.info(\"SUCCESS: Item count is 0 after maxttl has elapsed\")", "def test_evict_expired(self):\n\n # use an invasive technique so that we don't have to sleep for\n # the item to expire\n\n bc = TimedCache(keep_time=1)\n\n bc[\"test\"] = \"value\"\n bc[\"test2\"] = \"value2\"\n self.assertEqual(len(bc), 2)\n\n # test that expired item i\n bc.cache[\"test\"].timestamp = bc.cache[\"test\"].timestamp - 2\n bc.purge_expired()\n self.assertEqual(len(bc), 1)\n self.assertFalse(\"test\" in bc)\n self.assertTrue(\"test2\" in bc)", "async def incr(req):\n key, ttl, err = validate_params(req)\n if err is not None:\n return err\n\n counter = incr_with_ttl(key, ttl)\n return web.json_response(data={'status': 'success', 'counter': counter})", "def test_check_existing_enqueues_tasks(self):\n collection = handlers_endpoints_v1.DigestCollection(\n namespace=handlers_endpoints_v1.Namespace())\n collection.items.append(\n generate_digest(collection.namespace.namespace, 'some content'))\n key = model.get_entry_key(\n collection.namespace.namespace, collection.items[0].digest)\n\n # guarantee that one digest already exists in the datastore\n model.new_content_entry(key).put()\n self.call_api('preupload', self.message_to_dict(collection), 200)\n\n # find enqueued tasks\n self.assertEqual(1, self.execute_tasks())", "def get_object_retention(Bucket=None, Key=None, VersionId=None, RequestPayer=None):\n pass", "def test01StoreExpiration(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 100):\n keys.append(s.Put(i, i))\n\n # This should not raise\n s.Get(keys[-1])\n\n # This should raise though\n self.assertRaises(KeyError, s.Get, keys[0])", "def test_update_bucket(self):\n pass", "def create_thumbnails():\n bucket = BASE_BUCKET + ARG.MANIFOLD\n result = S3_CLIENT.list_objects(Bucket=bucket, Prefix=PREFIX + \"/\", Delimiter=\"/\")\n lev1 = result.get('CommonPrefixes')\n for lev1pre in tqdm(lev1, desc=\"Prefixes\"):\n bpre = lev1pre.get('Prefix').split(\"/\")[-2]\n COUNT[\"Prefixes\"] += 1\n #result2 = S3_CLIENT.list_objects(Bucket=bucket, Prefix=\"/\".join([PREFIX, bpre]) + \"/\",\n # Delimiter=\"/\")\n paginator = S3_CLIENT.get_paginator(\"list_objects\")\n pages = paginator.paginate(Bucket=bucket, Prefix=\"/\".join([PREFIX, bpre]) + \"/\",\n Delimiter=\"/\")\n for page in pages:\n COUNT[\"Pages\"] += 1\n lev2 = page.get('CommonPrefixes')\n for lev2pre in lev2:\n body = lev2pre.get('Prefix').split(\"/\")[-2]\n COUNT[\"Body IDs\"] += 1\n if ARG.WRITE:\n invoke_lambda(bucket, body)\n else:\n LOGGER.debug(\"/\".join([bucket, bpre, body]))\n print(COUNT)", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def handler(event, context):\n s3conn = s3.connect_to_region(region, profile_name=profile_name)\n bucket = s3conn.get_bucket(bucket_name)\n\n # Use a map to track keys that are no longer in the feed, used for deletion\n remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}\n\n logger.debug(\"Existing keys in bucket\\n%s\", '\\n'.join(remaining_keys));\n\n for id, json_data in fn_inner():\n key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))\n\n # Key found, delete it from cleanup map\n if key_name in remaining_keys:\n del remaining_keys[key_name]\n\n string_data = json.dumps(json_data)\n s3_object = bucket.get_key(key_name)\n if s3_object == None:\n key = bucket.new_key(key_name);\n key.set_contents_from_string(string_data)\n logger.info('Creating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n else:\n if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):\n logger.info('Updating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n s3_object.set_contents_from_string(string_data)\n else:\n logger.info('Same:\\ts3://%s/%s', bucket_name, key_name);\n logger.debug(string_data)\n\n # Remvoe remaining keys from the bucket to allow for cleanup\n for key in remaining_keys:\n logger.info('Removing:\\ts3://%s/%s', bucket_name, key);\n bucket.delete_key(key);\n\n logger.info('Done');", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def put_object_retention(Bucket=None, Key=None, Retention=None, RequestPayer=None, VersionId=None, BypassGovernanceRetention=None, ContentMD5=None):\n pass", "def post_bucketlist():\n pass", "def update_bucketlist():\n pass", "def listget(base_url, keys, throttle, generic_rate, max_lookback, tmpdir, repo_configs, error_rate, get_rate):\n tname = threading.current_thread().name\n app.logger.debug(\"Thread:{x} - Initialise List/Get; base_url:{a}, throttle:{b}, generic_rate:{c}, max_lookback:{d}, tmpdir:{g}, error_rate:{h}, get_rate:{i}\".format(x=tname, a=base_url, b=throttle, c=generic_rate, d=max_lookback, g=tmpdir, h=error_rate, i=get_rate))\n\n genopts = [\"generic\", \"specific\"]\n genprobs = [generic_rate, 1 - generic_rate]\n\n getopts = [\"get\", \"leave\"]\n getprobs = [get_rate, 1 - get_rate]\n\n erropts = [\"err\", \"ok\"]\n errprobs = [error_rate, 1 - error_rate]\n\n errtypes = [\"page\", \"page_size\", \"missing_since\", \"malformed_since\"]\n errtypeprobs = [0.25] * 4\n\n while True:\n try:\n api_key = _select_from(keys)\n j = client.JPER(api_key, base_url)\n #print \"API \" + api_key\n\n # determine whether the metadata we're going to send will cause errors\n reqtype = _select_from(genopts, genprobs)\n #print \"Req: \" + reqtype\n\n # use this to determine the repository id for the request\n repository_id = None\n if reqtype == \"specific\":\n config = _select_from(repo_configs)\n repository_id = config.get(\"repository\")\n\n # determine the \"since\" date we're going to use for the request\n lookback = randint(0, max_lookback)\n since = dates.format(dates.before_now(lookback))\n # print \"Since: \" + since\n\n # choose a page size\n page_size = randint(1, 100)\n\n # now decide, after all that, if we're going to send a malformed request\n err = _select_from(erropts, errprobs)\n\n # if we are to make an erroneous request, go ahead and do it\n if err == \"err\":\n # choose a kind of malformed request\n malformed = _select_from(errtypes, errtypeprobs)\n params = {\"page\" : 1, \"pageSize\" : page_size, \"since\" : since}\n if malformed == \"page\":\n params[\"page\"] = \"one\"\n elif malformed == \"page_size\":\n params[\"pageSize\"] = \"twelvty\"\n elif malformed == \"missing_since\":\n del params[\"since\"]\n else:\n params[\"since\"] = \"a week last thursday\"\n\n # make the malformed url with the JPER client, so we know it gets there ok\n url = j._url(\"routed\", id=repository_id, params=params)\n app.logger.debug(\"Thread:{x} - List/Get sending malformed request for Account:{y} Type:{z} Error:{a} URL:{b}\".format(x=tname, y=api_key, z=reqtype, a=malformed, b=url))\n\n # make the request, and check the response\n resp = http.get(url)\n if resp is not None and resp.status_code == 400:\n app.logger.debug(\"Thread:{x} - List/Get received correct 400 response to malformed request\".format(x=tname))\n else:\n if resp is None:\n sc = None\n else:\n sc = resp.status_code\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; did not receive 400 response to malformed request, got {y}; URL:{z}\".format(x=tname, y=sc, z=url))\n\n # continue, so that we don't have to indent the code below any further\n continue\n\n # if we get to here, we're going to go ahead and do a normal request\n app.logger.debug(\"Thread:{x} - List/Get request for Account:{y} Type:{z} Since:{a}\".format(x=tname, y=api_key, z=reqtype, a=since))\n\n # iterate over the notifications, catching any errors (which would be unexpected)\n try:\n count = 0\n for note in j.iterate_notifications(since, repository_id, page_size):\n app.logger.debug(\"Thread:{x} - List/Get request for Account:{y} listing notifications for Repository:{z} retrieved Notification:{a}\".format(x=tname, y=api_key, z=repository_id, a=note.id))\n count += 1\n\n # determine if we're going to get the notification by itself (which is technically unnecessary, of course, but who knows what people's workflows will be)\n reget = _select_from(getopts, getprobs)\n if reget == \"get\":\n try:\n n = j.get_notification(note.id)\n app.logger.debug(\"Thread:{x} - Following List/Get for Account:{y} listing notifications for Repository:{z}, successfully retrieved copy of Notification:{a}\".format(x=tname, y=api_key, z=repository_id, a=note.id))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get notification failed for Notification:{y} that should have existed. This needs a fix: '{b}'\".format(x=tname, y=note.id, b=e.message))\n\n # now retrieve all the links in the note\n for link in note.links:\n url = link.get(\"url\")\n app.logger.debug(\"Thread:{x} - Following List/Get for Account:{y} on Repository:{b}, from Notification:{z} requesting copy of Content:{a}\".format(x=tname, y=api_key, z=note.id, a=url, b=repository_id))\n try:\n stream, headers = j.get_content(url)\n except client.JPERAuthException as e:\n # we got a 401 back from the service, that is acceptable, since we may not be authorised to access it\n app.logger.debug((\"Thread:{x} - get content unauthorised (401) for Content:{z} - this can happen, so is not necessarily unexpected\".format(x=tname, z=url)))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get content failed for Content:{z} that should have existed. This needs a fix: '{b}'\".format(x=tname, z=url, b=e.message))\n\n app.logger.debug(\"Thread:{x} - List/Get request completed successfully for Account:{y} listing notifications for Repository:{z} Count:{a}\".format(x=tname, y=api_key, z=repository_id, a=count))\n\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; List/Get request for Account:{y} listing notifications for Repository:{z} resulted in exception '{e}'\".format(x=tname, y=api_key, z=repository_id, e=e.message))\n\n # sleep before making the next request\n time.sleep(throttle)\n except Exception as e:\n app.logger.error(\"Thread:{x} - Fatal exception '{y}'\".format(x=tname, y=e.message))", "def expire(event, context):\n # scan the database for expired files\n expiry_at = datetime.utcnow() - runtime_context.NONSTORED_TIMEOUT\n files = FileModel.list_expired(expiry_at)\n # remove all files and all items one-by-one\n for file in files:\n file_id = file['id']['S']\n FileModel.update({\n 'id': file_id,\n 'deleted_at': datetime.utcnow()\n })\n LOGGER.debug('Files item updated (expired). service=ddb method=update_item id={}'.format(file_id))\n S3_CLIENT.delete_object(\n Bucket=runtime_context.BUCKET_NAME,\n Key=file_id\n )\n LOGGER.debug('S3 object deleted. service=s3 method=delete_object id={}'.format(file_id))", "def __init__(__self__, *,\n bucket: str,\n kind: str,\n retention_interval: str,\n upload_interval: str):\n pulumi.set(__self__, \"bucket\", bucket)\n pulumi.set(__self__, \"kind\", kind)\n pulumi.set(__self__, \"retention_interval\", retention_interval)\n pulumi.set(__self__, \"upload_interval\", upload_interval)", "def do_rate_limited_ops(\n handle, num_seconds, do_writes, limit, max_rows, min_size, max_size):\n put_request = PutRequest().set_table_name(table_name)\n get_request = GetRequest().set_table_name(table_name)\n #\n # Generate a string of max_size with all \"x\"s in it\n #\n user_data = ''\n if do_writes:\n for x in range(max_size):\n user_data += 'x'\n\n start_time = int(round(time() * 1000))\n end_time = start_time + num_seconds * 1000\n\n print('Running continuous ' + ('writes' if do_writes else 'reads') +\n ' for ' + str(num_seconds) + ' seconds.')\n #\n # Keep track of how many units we used\n #\n units_used = 0\n #\n # With rate limiting enabled, we can find the amount of time our operation\n # was delayed due to rate limiting by getting the value from the result\n # using Result.get_rate_limit_delayed_ms().\n #\n delay_ms = 0\n\n key = dict()\n value = dict()\n while True:\n fld_id = int(random() * max_rows)\n try:\n if do_writes:\n value['id'] = fld_id\n value['sid'] = fld_id\n rec_size = int(random() * (max_size - min_size))\n rec_size += min_size\n value['name'] = user_data[:rec_size]\n put_request.set_value(value)\n put_result = handle.put(put_request)\n units_used += put_result.get_write_units()\n delay_ms += put_result.get_rate_limit_delayed_ms()\n else:\n key['id'] = fld_id\n key['sid'] = fld_id\n get_request.set_key(key)\n get_result = handle.get(get_request)\n units_used += get_result.get_read_units()\n delay_ms += get_result.get_rate_limit_delayed_ms()\n except WriteThrottlingException as wte:\n # We should not get WriteThrottlingException exception\n print('Got unexpected write throttling exception')\n raise wte\n except ReadThrottlingException as rte:\n # We should not get ReadThrottlingException exception\n print('Got unexpected read throttling exception')\n raise rte\n if int(round(time() * 1000)) >= end_time:\n break\n num_seconds = (int(round(time() * 1000)) - start_time) // 1000\n units_used /= num_seconds\n\n if units_used < int(limit * 0.8) or units_used > int(limit * 1.2):\n if do_writes:\n msg = ('Writes: expected around ' + str(limit) + ' WUs, got ' +\n str(units_used))\n else:\n msg = ('Reads: expected around ' + str(limit) + ' RUs, got ' +\n str(units_used))\n raise RuntimeError(msg)\n\n print(('Writes' if do_writes else 'Reads') + ': average usage = ' +\n str(units_used) + ('WUs' if do_writes else 'RUs') +\n ' (expected around ' + str(limit))\n\n print('Total rate limiter delay time = ' + str(delay_ms) + 'ms')", "def __init__(self):\n self.size = 1000\n self.bucket = [None] * self.size", "def large_upload_collection(upload_items: List[JSONDict]) -> UploadCollection:\n items = []\n\n item = upload_items[0]\n for i in range(3050):\n copy = item.copy()\n copy[\"guid\"] = copy[\"guid\"].replace(\"post1\", f\"post{i}\")\n items.append(copy)\n\n collection = UploadCollection(items=items)\n return collection", "def test_get_ttl(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'}}\n moes = {'1': time.time() + 5, '4': time.time() + 10}\n for key in keys_to_set.keys():\n storage.set(key, keys_to_set[key], moes.get(key))\n # test at moment t\n self.assertEqual(keys_to_set['1'], storage.get('1'), \"Key '1' should still exist.\")\n # test at moment t+6, one key should expire\n self.now += 6\n keys_to_set.pop('1')\n moes.pop('1')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertEqual(keys_to_set['4'], storage.get('4'), \"Key '4' should still exist.\")\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")\n # test at moment t+11\n self.now += 5\n keys_to_set.pop('4')\n moes.pop('4')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertRaises(StorageKeyError, storage.get, '4')\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")", "def _check_expire(self):\n self._log.debug(\"Checking entry expiration...\")\n current_time = time.time()\n for key in self._obj_cache.keys():\n self._log.debug(' -> %s (type = %s)',\n key, type(self._obj_cache[key]))\n # Remove if the key has a timeout, and the timeout period has been\n # exceeded (last access + timeout period <= current_time).\n if self._obj_timeouts[key] > 0 \\\n and current_time >= (self._obj_last_access[key]\n + self._obj_timeouts[key]):\n self._log.debug(' EXPIRED -- removing')\n # delete\n del self._obj_cache[key]\n del self._obj_last_access[key]\n del self._obj_timeouts[key]", "def test_long_timeout(self):\n self.cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second\n self.assertEqual(self.cache.get('key1'), 'eggs')\n\n self.cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)\n self.assertEqual(self.cache.get('key2'), 'ham')\n\n self.cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)\n self.assertEqual(self.cache.get('key3'), 'sausage')\n self.assertEqual(self.cache.get('key4'), 'lobster bisque')", "def test_list_objects():\n x = 0\n for obj in qmk_storage.list_objects():\n assert 'Key' in obj\n assert type(obj.get('LastModified')) == datetime.datetime\n\n if x > 5:\n break\n x += 1", "def manipulate_bucketlist():\n pass", "def _init():\n cache_file = _get_buckets_cache_filename()\n exp = time.time() - S3_CACHE_EXPIRE\n\n # check mtime of the buckets files cache\n metadata = None\n try:\n if os.path.getmtime(cache_file) > exp:\n metadata = _read_buckets_cache_file(cache_file)\n except OSError:\n pass\n\n if metadata is None:\n # bucket files cache expired or does not exist\n metadata = _refresh_buckets_cache_file(cache_file)\n\n return metadata", "def expiry(self):\n return time() + self.ttl * (0.95 + 0.1 * random())", "def _put_retry(self, s3_bucket, s3_filename, local_filename, max_retries=3, policy=None):\n b = self.conn.get_bucket(s3_bucket)\n retries = 0\n while retries < max_retries:\n try:\n s3_key = b.new_key(s3_filename)\n s3_key.set_contents_from_filename(local_filename, policy=policy)\n except:\n logger.info('File transfer error: ' + s3_filename, exc_info=True)\n retries = retries + 1\n if retries == max_retries:\n raise\n time.sleep(retries)\n else:\n logger.info('Archived %s to %s/%s', local_filename, s3_bucket, s3_filename)\n return os.path.getsize(local_filename)", "def expire(ttl):\n print(\"[+] Staring expiration of old endpoints.\")\n\n try:\n now = arrow.utcnow()\n expiration = now - timedelta(hours=ttl)\n endpoints = database.session_query(Endpoint).filter(\n cast(Endpoint.last_updated, ArrowType) <= expiration\n )\n\n for endpoint in endpoints:\n print(\n \"[!] Expiring endpoint: {name} Last Updated: {last_updated}\".format(\n name=endpoint.name, last_updated=endpoint.last_updated\n )\n )\n database.delete(endpoint)\n metrics.send(\"endpoint_expired\", \"counter\", 1)\n\n print(\"[+] Finished expiration.\")\n except Exception as e:\n sentry.captureException()", "def testBucketSize(self):\n b = SomeBucket()\n fit = b.add(1000)\n self.assertEqual(100, fit)", "def progress_update(sent, total):\n l.debug(\"%d of %d Mb uploaded to Amazon S3.\", sent / 1000000, total / 1000000)", "def limit_for(self, expiration=10, **kwargs):\n key = self._get_key(**kwargs)\n self.redis_conn.set(key, 1)\n self.redis_conn.expire(key, expiration)", "def testExpirationTime(self):\n\n bye = \"Good bye!\"\n memcache.add('bye', bye, 1)\n assert memcache.get('bye') == bye\n time.sleep(2)\n assert memcache.get('bye') == None", "def __init__(self, bucket):\n self.bucket = bucket", "def __init__(self):\n self.m = 1000\n self.bucket = [None] * 1000", "def checkSold(auto=False):\n\n #Create connection \n db = pymysql.connect(host=\"localhost\", user=\"testUser\", passwd=\"BorrisBulletDodger\", db=\"scraperdb\", charset='utf8')\n cursor = db.cursor()\n\n #SQL Query\n sql = \"SELECT url FROM motorcycles WHERE adExpiry IS NULL\"\n\n #Find data\n try: \n cursor.execute(sql)\n sqlResult = cursor.fetchall()\n urls = [i[0] for i in sqlResult]\n db.commit()\n except Exception as e:\n db.rollback()\n print(f\"Exception occured: {e}\")\n\n #User input to proceed if not auto\n while not auto:\n cont = input(f\"{len(urls)} stored listings found - Do you wish to check if sold?: \")\n if cont.lower() == 'y' or cont.lower() == 'yes':\n break\n elif cont.lower() == 'n' or cont.lower() == 'no':\n return\n else:\n print(\"Please enter y/n\")\n continue\n \n #Use threading to check if urls have expired\n maxThreads = 5\n urlsQ = Queue(maxsize=0)\n #Set number of threads\n numThreads = min(maxThreads, len(urls))\n #Create lock\n lock = Lock()\n #Create progress bar\n pbar = tqdm(total=len(urls))\n \n #Expired test\n def checkExpiredThread(q, results, db, cursor):\n \"\"\"\n Checks whether input url has expired\n Input: [\"url\"], {} - Keys=urls, vals=False\n \"\"\"\n\n while not q.empty():\n url = q.get()\n logger.debug(f\"{url} started - Tasks left: {q.unfinished_tasks}\")\n pbar.update(1)\n expired = None\n\n #Check if expired\n _, expired = getPage(url)\n results[url] = expired\n\n #Insert result into db\n if expired:\n logger.debug(f\"expired url: {url}\")\n #Record todays date\n curTime = datetime.now().strftime(\"%Y-%m-%d\")\n #Prepare sql string\n sql = \"\"\"UPDATE motorcycles\n SET adExpiry=%s\n WHERE url=%s\"\"\"\n #Get Lock - Prevent multiple db inserts simulataneously\n logger.debug(f\"{url} wants the lock\")\n with lock:\n logger.debug(f\"{url} has the lock\")\n try:\n cursor.execute(sql, (curTime, url))\n db.commit()\n except Exception as e:\n db.rollback()\n print(\"Exception occured: {}\".format(e))\n logger.debug(f\"{url} is finished with the lock\")\n\n q.task_done()\n logger.debug(f\"{url} finished\")\n\n\n #Load queue with urls, results dict keys = urls, vals = False - Ad default not expired\n results = {}\n for url in urls:\n urlsQ.put(url)\n results[url] = False\n\n #Create threads that execute checkExpiredThread function, updates data\n for _ in range(numThreads):\n worker = Thread(target=checkExpiredThread, args=(urlsQ, results, db, cursor))\n worker.setDaemon(True)\n worker.start()\n #Wait until the queue has been processed - All URLs checked\n urlsQ.join()\n pbar.close()\n\n #Remember to close database at the end \n db.close()\n \n #Count number of expired urls\n count = sum(1 for value in results.values() if value)\n logger.info(f\"{count}/{len(urls)} tracked listings have been sold since last processed\")\n print(f\"{count}/{len(urls)} tracked listings have been sold since last processed\")", "def _mock_backend(self):\n for crawl_id in self.crawlQueue:\n # Retrieve page count from engine and set in central redis\n page_count = self.engine_redis.get(crawl_id + \"_count\")\n self.central_redis.set(crawl_id + \"_count\", page_count)\n self.central_redis.expire(crawl_id + \"_count\", 60*60)\n if page_count == \"-2\": # if complete\n self.crawlQueue.remove(crawl_id)", "def test_purge(h3):\n\n assert h3.list_buckets() == []\n\n assert h3.create_bucket('b1') == True\n\n assert h3.list_objects('b1') == []\n\n with open('/dev/urandom', 'rb') as f:\n data = f.read(3 * MEGABYTE)\n\n h3.create_object('b1', 'o1', data)\n h3.create_object('b1', 'o2', data)\n h3.create_object('b1', 'o3', data)\n\n assert set(h3.list_objects('b1')) == set(['o1', 'o2', 'o3'])\n\n assert h3.purge_bucket('b1') == True\n\n assert h3.list_objects('b1') == []\n\n assert h3.delete_bucket('b1') == True", "def test_request_throttling_expires(self):\n self.set_throttle_timer(MockView, 0)\n\n request = self.factory.get('/')\n for dummy in range(4):\n response = MockView.as_view()(request)\n assert response.status_code == 429\n\n # Advance the timer by one second\n self.set_throttle_timer(MockView, 1)\n\n response = MockView.as_view()(request)\n assert response.status_code == 200", "def test_bucket_is_updated(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Create a Bucket\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 201)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['name'] == 'Adventure')\n self.assertEqual(data['id'], 1)", "def list_bucket(self, bucket):\n self.response.write('Creating more files for listbucket...\\n')\n self.create_file(bucket + '/foo1')\n self.create_file(bucket + '/foo2')\n self.response.write('\\nListbucket result:\\n')\n\n page_size = 1\n stats = gcs.listbucket(bucket, max_keys=page_size)\n while True:\n count = 0\n for stat in stats:\n count += 1\n self.response.write(repr(stat))\n self.response.write('\\n')\n\n if count != page_size or count == 0:\n break\n last_filename = stat.filename[len(bucket) + 1:]\n stats = gcs.listbucket(bucket, max_keys=page_size, marker=last_filename)", "def main(transcribe_bucket_name, mp3_bucket_name):\n\n s3 = boto3.resource('s3')\n for bucket in s3.buckets.all():\n if bucket.name == transcribe_bucket_name:\n for key in bucket.objects.all():\n if key.key.endswith('.json'):\n r = {}\n # Get reference number\n reference = basename(key.key).replace('.json', '')\n r['ref'] = reference\n # Get URL\n location = boto3.client('s3') \\\n .get_bucket_location(\n Bucket=mp3_bucket_name)['LocationConstraint']\n base_url = join('https://s3-%s.amazonaws.com' % location,\n mp3_bucket_name)\n url = join(base_url, key.key.replace('.json', '.mp3'))\n r['url'] = url\n # Download json file\n try:\n s3.Bucket(transcribe_bucket_name) \\\n .download_file(key.key, key.key)\n except Exception as exception:\n return 1\n # Get text\n with open(key.key, 'r') as f:\n data = json.load(f)\n text = data['results']['transcripts'][0]['transcript']\n r['text'] = text\n # Get sentiment\n sentiment = get_sentiment(text)\n r['sentiment'] = sentiment\n # Check promotion\n promo = check_promo(text)\n r['promo'] = promo\n # Save to Gooogle Sheets\n values = [r['ref'], r['text'], r['promo'], r['sentiment'],\n r['url']]\n append_row(values)\n # Remove tmp json file from local machine\n remove(key.key)", "def __init__(self):\n self.bucket = 1000\n self.bucketItem = 1000\n \n self.hashset = [None] * self.bucket", "def persistToStore(self, items, requestInstance):\n self._dbConnection = self.mongoConnection()\n imgStored = 0\n\n if (self.mongoConnection() and self.cumulusConnection()):\n\n try:\n contain = self._cumulusConnection.get_bucket(self._containerName)\n except boto.exception.S3ResponseError as detail:\n if(detail.reason.strip() == \"Not Found\"):\n self._log.warning(\"Creating bucket\")\n self._cumulusConnection.create_bucket(self._containerName)\n contain = self._cumulusConnection.get_bucket(self._containerName)\n else:\n self._log.error(\"Code and reason \" + detail.code + \" \" + detail.reason)\n self._log.error(\"Error in ImgStorecumulusMongo - queryToStore. full error \" + str(sys.exc_info()))\n except:\n self._log.error(\"Error in ImgStorecumulusMongo - persistToStore. \" + str(sys.exc_info()))\n\n try:\n dbLink = self._dbConnection[self._dbName]\n collection = dbLink[self._datacollection]\n collectionMeta = dbLink[self._metacollection]\n\n k = Key(contain)\n\n for item in items:\n\n k.key = item._imgId\n if requestInstance == None:\n k.set_contents_from_filename(item._imgURI)\n else:\n requestInstance.file.seek(0)\n k.set_contents_from_file(requestInstance.file)\n\n tags = item._imgMeta._tag.split(\",\")\n tags_list = [x.strip() for x in tags]\n meta = {\"_id\": item._imgId,\n \"os\" : item._imgMeta._os,\n \"arch\" : item._imgMeta._arch,\n \"owner\" : item._imgMeta._owner,\n \"description\" : item._imgMeta._description,\n \"tag\" : tags_list,\n \"vmType\" : item._imgMeta._vmType,\n \"imgType\" : item._imgMeta._imgType,\n \"permission\" : item._imgMeta._permission,\n \"imgStatus\" : item._imgMeta._imgStatus,\n }\n data = {\"_id\": item._imgId,\n \"createdDate\" : datetime.utcnow(),\n \"lastAccess\" : datetime.utcnow(),\n \"accessCount\" : 0,\n \"size\" : item._size,\n \"extension\" : item._extension,\n }\n\n collectionMeta.insert(meta, safe=True)\n collection.insert(data, safe=True)\n\n imgStored += 1\n\n except pymongo.errors.AutoReconnect:\n self._log.warning(\"Autoreconnected.\")\n except pymongo.errors.ConnectionFailure:\n self._log.error(\"Connection failure. The file has not been stored. Image details: \" + item.__str__() + \"\\n\")\n except IOError:\n self._log.error(\"Error in ImgStorecumulusMongo - persistenToStore. \" + str(sys.exc_info()))\n self._log.error(\"No such file or directory. Image details: \" + item.__str__())\n except TypeError:\n self._log.error(\"TypeError in ImgStorecumulusMongo - persistenToStore \" + str(sys.exc_info()))\n except pymongo.errors.OperationFailure:\n self._log.error(\"Operation Failure in ImgStorecumulusMongo - persistenToStore. \" + str(sys.exc_info()))\n except:\n self._log.error(\"Error in ImgStoreCumulusMongo - persistToStore. \" + str(sys.exc_info()))\n finally:\n self._dbConnection.disconnect()\n else:\n self._log.error(\"Could not get access to the database. The file has not been stored\")\n\n for item in items: \n cmd = \"rm -f \" + item._imgURI\n os.system(cmd)\n\n if (imgStored == len(items)):\n return True\n else:\n return False", "def upload(filename, bucket):\n k = Key(bucket)\n k.key = uuid.uuid1().hex\n print \"Uploading batch to {}, key: {}...\".format(bucket.name, k.key)\n k.set_contents_from_filename(filename, reduced_redundancy=True)\n print \" Done.\"\n \n\n\n bucket = openBucket(dest)", "def _refresh_buckets_cache_file(cache_file):\n\n log.debug(\"Refreshing buckets cache file\")\n\n (\n key,\n keyid,\n service_url,\n verify_ssl,\n kms_keyid,\n location,\n path_style,\n https_enable,\n ) = _get_s3_key()\n metadata = {}\n\n # helper s3 query function\n def __get_s3_meta(bucket, key=key, keyid=keyid):\n ret, marker = [], \"\"\n while True:\n tmp = __utils__[\"s3.query\"](\n key=key,\n keyid=keyid,\n kms_keyid=keyid,\n bucket=bucket,\n service_url=service_url,\n verify_ssl=verify_ssl,\n location=location,\n return_bin=False,\n path_style=path_style,\n https_enable=https_enable,\n params={\"marker\": marker},\n )\n headers = []\n for header in tmp:\n if \"Key\" in header:\n break\n headers.append(header)\n ret.extend(tmp)\n if all(\n [header.get(\"IsTruncated\", \"false\") == \"false\" for header in headers]\n ):\n break\n marker = tmp[-1][\"Key\"]\n return ret\n\n if _is_env_per_bucket():\n # Single environment per bucket\n for saltenv, buckets in _get_buckets().items():\n bucket_files_list = []\n for bucket_name in buckets:\n bucket_files = {}\n s3_meta = __get_s3_meta(bucket_name)\n\n # s3 query returned nothing\n if not s3_meta:\n continue\n\n # grab only the files/dirs\n bucket_files[bucket_name] = [k for k in s3_meta if \"Key\" in k]\n bucket_files_list.append(bucket_files)\n\n # check to see if we added any keys, otherwise investigate possible error conditions\n if not bucket_files[bucket_name]:\n meta_response = {}\n for k in s3_meta:\n if \"Code\" in k or \"Message\" in k:\n # assumes no duplicate keys, consisdent with current erro response.\n meta_response.update(k)\n # attempt use of human readable output first.\n try:\n log.warning(\n \"'%s' response for bucket '%s'\",\n meta_response[\"Message\"],\n bucket_name,\n )\n continue\n except KeyError:\n # no human readable error message provided\n if \"Code\" in meta_response:\n log.warning(\n \"'%s' response for bucket '%s'\",\n meta_response[\"Code\"],\n bucket_name,\n )\n continue\n else:\n log.warning(\n \"S3 Error! Do you have any files in your S3 bucket?\"\n )\n return {}\n\n metadata[saltenv] = bucket_files_list\n\n else:\n # Multiple environments per buckets\n for bucket_name in _get_buckets():\n s3_meta = __get_s3_meta(bucket_name)\n\n # s3 query returned nothing\n if not s3_meta:\n continue\n\n # pull out the environment dirs (e.g. the root dirs)\n files = [k for k in s3_meta if \"Key\" in k]\n\n # check to see if we added any keys, otherwise investigate possible error conditions\n if not files:\n meta_response = {}\n for k in s3_meta:\n if \"Code\" in k or \"Message\" in k:\n # assumes no duplicate keys, consisdent with current erro response.\n meta_response.update(k)\n # attempt use of human readable output first.\n try:\n log.warning(\n \"'%s' response for bucket '%s'\",\n meta_response[\"Message\"],\n bucket_name,\n )\n continue\n except KeyError:\n # no human readable error message provided\n if \"Code\" in meta_response:\n log.warning(\n \"'%s' response for bucket '%s'\",\n meta_response[\"Code\"],\n bucket_name,\n )\n continue\n else:\n log.warning(\n \"S3 Error! Do you have any files in your S3 bucket?\"\n )\n return {}\n\n environments = [(os.path.dirname(k[\"Key\"]).split(\"/\", 1))[0] for k in files]\n environments = set(environments)\n\n # pull out the files for the environment\n for saltenv in environments:\n # grab only files/dirs that match this saltenv\n env_files = [k for k in files if k[\"Key\"].startswith(saltenv)]\n\n if saltenv not in metadata:\n metadata[saltenv] = []\n\n found = False\n for bucket_files in metadata[saltenv]:\n if bucket_name in bucket_files:\n bucket_files[bucket_name] += env_files\n found = True\n break\n if not found:\n metadata[saltenv].append({bucket_name: env_files})\n\n # write the metadata to disk\n _write_buckets_cache_file(metadata, cache_file)\n\n return metadata", "def quota():\n try:\n fname = os.path.join(os.path.expanduser(\"~\"), \".planet.json\")\n contents = {}\n if os.path.exists(fname):\n with open(fname, \"r\") as fp:\n contents = json.loads(fp.read())\n else:\n raise IOError(\"Escape to End and Initialize\")\n if not len(contents) != 0:\n raise IOError(\"Escape to End and Initialize\")\n else:\n k = contents[\"key\"]\n main = requests.get(\n \"https://api.planet.com/auth/v1/\" + \"experimental/public/my/subscriptions\",\n auth=HTTPBasicAuth(k, \"\"),\n )\n if main.status_code == 200:\n content = main.json()\n for item_id in content:\n print(\" \")\n print(\"Allocation Name: %s\" % item_id[\"organization\"][\"name\"])\n print(\n \"Allocation active from: %s\" % item_id[\"active_from\"].split(\"T\")[0]\n )\n print(\"Quota Enabled: %s\" % item_id[\"quota_enabled\"])\n print(\"Total Quota in SqKm: %s\" % item_id[\"quota_sqkm\"])\n print(\"Total Quota used: %s\" % item_id[\"quota_used\"])\n if (item_id[\"quota_sqkm\"]) is not None:\n leftquota = float(\n item_id[\"quota_sqkm\"] - float(item_id[\"quota_used\"])\n )\n print(\"Remaining Quota in SqKm: %s\" % leftquota)\n else:\n print(\"No Quota Allocated\")\n print(\"\")\n else:\n print(\"Failed with exception code: \" + str(main.status_code))\n\n except IOError:\n print(\"Initialize client or provide API Key\")", "def test_client_max_age_0(self, sess):\r\n print('first request')\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n print('second request')\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=0'})\r\n\r\n # don't remove from the cache\r\n assert self.cache.get(self.url)\r\n assert not r.from_cache", "def upload_bucket_samples():\n if not Config.region:\n logger.error(\"You must specify a region in order to scan a bucket target\")\n raise SystemExit(\n \"Target region not specified. Use -r or --region to specify the target region.\"\n )\n # Connect to S3 in our target region\n s_3 = boto3.resource(\"s3\", region_name=Config.region)\n # Connect to our target bucket\n bucket = s_3.Bucket(Config.target_dir)\n # Retrieve a list of all objects in the bucket\n summaries = bucket.objects.all()\n # Inform the user as this may take a minute\n logger.info(\"Assembling volume from target bucket (%s) for submission\", Config.target_dir)\n # Loop through our list of files, downloading each to memory then upload them to the Sandbox\n for item in summaries:\n # Grab the file name from the path\n filename = os.path.basename(item.key)\n # Teensy bit of witch-doctor magic to download the file\n # straight into the payload used for our upload to the Sandbox\n response = Samples.upload_sample(file_name=filename,\n file_data=io.BytesIO(\n bucket.Object(key=item.key).get()[\"Body\"].read()\n )\n )\n # Retrieve our uploaded file SHA256 identifier\n sha = response[\"body\"][\"resources\"][0][\"sha256\"]\n # Add this SHA256 to the upload payload element\n Analyzer.uploaded.append(sha)\n # Track the upload so we recognize the file when we're done\n Analyzer.files.append([filename, item.key, sha])\n # Inform the user of our progress\n logger.debug(\"Uploaded %s to %s\", filename, sha)", "def test_put(self):\n cache = LRUCache(5)\n assert 0 == cache.size\n cache.put(1, 'aaa')\n assert 1 == cache.size", "def test_transform_and_load_storage_buckets(neo4j_session):\n bucket_res = tests.data.gcp.storage.STORAGE_RESPONSE\n bucket_list = cartography.intel.gcp.storage.transform_gcp_buckets(bucket_res)\n cartography.intel.gcp.storage.load_gcp_buckets(neo4j_session, bucket_list, TEST_UPDATE_TAG)\n\n query = \"\"\"\n MATCH(bucket:GCPBucket{id:{BucketId}})\n RETURN bucket.id, bucket.project_number, bucket.kind\n \"\"\"\n expected_id = 'bucket_name'\n expected_project_num = 9999\n expected_kind = 'storage#bucket'\n nodes = neo4j_session.run(\n query,\n BucketId=expected_id,\n )\n actual_nodes = {(n['bucket.id'], n['bucket.project_number'], n['bucket.kind']) for n in nodes}\n expected_nodes = {\n (expected_id, expected_project_num, expected_kind),\n }\n assert actual_nodes == expected_nodes", "def test_old_expiration(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'expire'", "def check_expiration(self, cur_time):\n\n\t\ttime_limit = 1000\n\t\ttime_elapsed = cur_time - self.time_created\n\n\t\t# Erase cache after an arbitrary amount of time\n\t\tif time_elapsed > time_limit:\n\t\t\tself.cache_expiration()", "def get( key ):\n if ACTIVE is False:\n return None\n \n global CACHE, STATS_MISSES, STATS_HITS\n \n \"\"\" Return a key stored in the python instance cache or a None if it has expired or it doesn't exist \"\"\"\n if key not in CACHE:\n STATS_MISSES += 1\n return None\n \n value, expiry = CACHE[key]\n current_timestamp = time.time()\n if expiry == None or current_timestamp < expiry:\n STATS_HITS += 1\n return value\n else:\n STATS_MISSES += 1\n delete( key )\n return None", "def test_keys_ttl(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n storage = Storage()\n storage.set('1', 'one', self.now + 5)\n storage.set('2', 'two')\n storage.set('3', 'three', self.now + 10)\n self.now += 6\n self.assertEqual(['2','3'], storage.keys('*'))\n self.assertEqual(['2','3'], list(storage._keys_dict.keys()))", "def test_finalized_data_in_gs(self):\n # create content\n content = pad_string('huge, important data')\n namespace = 'default-gzip'\n request = self.store_request(namespace, content)\n\n # this should succeed\n self.mock(gcs, 'get_file_info', get_file_info_factory(content))\n self.call_api('finalize_gs_upload', self.message_to_dict(request), 200)\n\n # this should fail\n self.mock(gcs, 'get_file_info', get_file_info_factory())\n with self.call_should_fail('400'):\n self.call_api('finalize_gs_upload', self.message_to_dict(request), 200)\n self.assertEqual(1, self.execute_tasks())", "def cron_refresh_spacetrack_cache():\n s = SpaceTrackApi()\n updated_tles_str = s.get_all_tles()\n storage.save_tle_cache(updated_tles_str)\n last_updated[0] = int(time.time())\n metadata = {\n 'last_updated': last_updated[0],\n }\n storage.save_metadata(metadata)", "def do_update(url,indexHeaders,update_file):\n updateUrl=url.replace(\"buckets\",\"riak\")\n indexHeaders['content-type'] = 'application/json'\n r=requests.post(url, data=json.dumps(update_file), headers=indexHeaders)", "def test_expiry_in_future(self):\n link = DownloadLink()\n link.save()\n self.assertEqual(link.getExpiry(), link.createdAt + timedelta(seconds=60))", "async def throttle_pubs(self):\n while True:\n ch, msg = await self._pub_throttle.get() # Blocks until we get an item\n resp = await self._publish_to_channel(ch, msg)\n print(resp)\n print(\"[ *] Published to channel {ch} message: \\n{msg}\\n\".format(ch=ch, msg=msg))\n if resp:\n await asyncio.sleep(self.pub_rate)", "def test_buckets_returned_when_searched_2(self):\n with self.client:\n token = self.get_user_token()\n self.create_buckets(token)\n response = self.client.get(\n '/bucketlists/?q=T&page=2',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertIsInstance(data['buckets'], list, 'Items must be a list')\n self.assertEqual(len(data['buckets']), 3)\n self.assertEqual(data['buckets'][0]['id'], 4)\n self.assertEqual(data['count'], 6)\n self.assertEqual(data['next'], None)\n self.assertEqual(data['previous'], 'http://localhost/bucketlists/?page=1')\n self.assertEqual(response.status_code, 200)", "def test03Expire(self):\n s = utils.FastStore(max_size=100)\n key = \"test1\"\n s.Put(key, 1)\n\n # This should not raise\n self.assertEqual(s.Get(key), 1)\n s.ExpireObject(key)\n\n self.assertRaises(KeyError, s.Get, key)", "def test_copy(h3):\n\n count = 100 # More than 10\n\n assert h3.list_buckets() == []\n\n assert h3.create_bucket('b1') == True\n\n assert h3.list_objects('b1') == []\n\n with open('/dev/urandom', 'rb') as f:\n data = f.read(3 * MEGABYTE)\n\n h3.create_object('b1', 'object', data)\n\n for i in range(count):\n h3.copy_object('b1', 'object', 'copy%d' % i)\n\n # Get the list of objects\n objects = []\n while True:\n result = h3.list_objects('b1', offset=len(objects))\n objects += result\n if result.done:\n break\n\n assert len(objects) == count + 1\n\n for i in range(count):\n object_info = h3.info_object('b1', 'copy%d' % i)\n assert not object_info.is_bad\n assert object_info.size == (3 * MEGABYTE)\n assert type(object_info.creation) == float\n assert type(object_info.last_access) == float\n assert type(object_info.last_modification) == float\n assert type(object_info.last_change) == float\n\n object_data = h3.read_object('b1', 'copy%d' % i)\n assert object_data == data\n\n objects = []\n while True:\n result = h3.list_objects('b1', offset=len(objects))\n objects += result\n if result.done:\n break\n\n assert len(objects) == count + 1\n\n assert h3.purge_bucket('b1') == True\n\n assert h3.list_objects('b1') == []\n\n assert h3.delete_bucket('b1') == True", "def test_model_can_create_a_bucketlist(self):\n old_count = Job.objects.count()\n self.job.save()\n new_count = Job.objects.count()\n self.assertNotEqual(old_count, new_count)", "def test_list_bucket(self):\n\n if self.bos.does_bucket_exist(\"aaaaaaxzr1\"):\n self.bos.delete_bucket(\"aaaaaaxzr1\")\n if self.bos.does_bucket_exist(\"aaaaaaxzr2\"):\n self.bos.delete_bucket(\"aaaaaaxzr2\")\n\n time1 = utils.get_canonical_time()\n self.bos.create_bucket(\"aaaaaaxzr1\")\n\n time2 = utils.get_canonical_time()\n self.bos.create_bucket(\"aaaaaaxzr2\")\n\n response = self.bos.list_buckets()\n self.check_headers(response)\n\n self.assertEqual(response.owner.id, bos_test_config.OWNER_ID)\n self.assertEqual(response.owner.display_name, bos_test_config.DISPLAY_NAME)\n for bucket in response.buckets:\n if bucket.name == \"aaaaaaxzr1\":\n self.assertEqual(\n compat.convert_to_bytes(bucket.creation_date)[0:19], \n compat.convert_to_bytes(time1)[0:19])\n elif bucket.name == \"aaaaaaxzr2\":\n self.assertEqual(\n compat.convert_to_bytes(bucket.creation_date)[0:19], \n compat.convert_to_bytes(time2)[0:19])\n self.bos.delete_bucket(\"aaaaaaxzr1\")\n self.bos.delete_bucket(\"aaaaaaxzr2\")", "def create(base_url, keys, throttle, mdrate, mderrors, cterrors, max_file_size, tmpdir, retrieve_rate, routable, repo_configs):\n tname = threading.current_thread().name\n app.logger.debug(\"Thread:{x} - Initialise Create; base_url:{a}, throttle:{b}, mdrate:{c}, mderrors:{d}, cterrors:{e}, max_file_size:{f}, tmpdir:{g}, retrieve_rate:{h}, routable:{i}\".format(x=tname, a=base_url, b=throttle, c=mdrate, d=mderrors, e=cterrors, f=max_file_size, g=tmpdir, h=retrieve_rate, i=routable))\n\n mdopts = [\"mdonly\", \"md+ct\"]\n mdprobs = [mdrate, 1 - mdrate]\n\n mderroropts = [\"error\", \"ok\"]\n mderrorprobs = [mderrors, 1 - mderrors]\n\n cterroropts = [\"error\", \"ok\"]\n cterrorprobs = [cterrors, 1 - cterrors]\n\n retrieveopts = [\"get\", \"not\"]\n retrieveprobs = [retrieve_rate, 1 - retrieve_rate]\n\n while True:\n try:\n api_key = _select_from(keys)\n j = client.JPER(api_key, base_url)\n #print \"API \" + api_key\n\n # determine whether the metadata we're going to send will cause errors\n mdtype = _select_from(mderroropts, mderrorprobs)\n #print \"MD: \" + mdtype\n\n # generate a notification which may or may not have an error\n note = _make_notification(error=mdtype==\"error\", routable=routable, repo_configs=repo_configs)\n #print note\n\n # determine whether we're going to send some content\n hasct = _select_from(mdopts, mdprobs)\n #print \"CT: \" + hasct\n file_handle = None\n filepath = None\n cterr = \"ok\"\n if hasct == \"md+ct\":\n # determine if the content should have an error\n cterr = _select_from(cterroropts, cterrorprobs)\n #print \"CTERR:\" + cterr\n filepath = _get_file_path(tmpdir, max_file_size, error=cterr==\"error\")\n #print \"File\" + filepath\n file_handle = open(filepath)\n\n app.logger.debug(\"Thread:{x} - Create request for Account:{y} Type:{z} MD:{a} CT:{b}\".format(x=tname, y=api_key, z=hasct, a=mdtype, b=cterr))\n\n # make the create request, which may occasionally throw errors\n id = None\n try:\n id, loc = j.create_notification(note, file_handle)\n app.logger.debug(\"Thread:{x} - Create request for Account:{z} resulted in success, Notification:{y}\".format(x=tname, y=id, z=api_key))\n except:\n app.logger.error(\"Thread:{x} - Create request for Account:{y} resulted in expected exception\".format(x=tname, y=api_key))\n\n # cleanup after ourselves\n if filepath is not None:\n file_handle.close()\n os.remove(filepath)\n\n # now there's a chance that we might want to check our notification has been created correctly, so we might\n # retrieve it\n if id is not None:\n ret = _select_from(retrieveopts, retrieveprobs)\n if ret == \"get\":\n # time.sleep(2) # this gives JPER a chance to catch up\n app.logger.debug(\"Thread:{x} - Following Create for Account:{y}, requesting copy of Notification:{z}\".format(x=tname, y=api_key, z=id))\n try:\n n = j.get_notification(id)\n app.logger.debug(\"Thread:{x} - Following Create for Account:{y}, successfully retrieved copy of Notification:{z}\".format(x=tname, y=api_key, z=id))\n for link in n.links:\n if link.get(\"packaging\") is not None:\n url = link.get(\"url\")\n app.logger.debug(\"Thread:{x} - Following Create for Account:{y}, from Notification:{z} requesting copy of Content:{a}\".format(x=tname, y=api_key, z=id, a=url))\n try:\n stream, headers = j.get_content(url)\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get content failed for Content:{z} that should have existed. This needs a fix: '{b}'\".format(x=tname, z=url, b=e.message))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get notification failed for Notification:{y} that should have existed. This needs a fix: '{b}'\".format(x=tname, y=id, b=e.message))\n\n # sleep before making the next request\n time.sleep(throttle)\n except Exception as e:\n app.logger.error(\"Thread:{x} - Fatal exception '{y}'\".format(x=tname, y=e.message))", "def test_len_during_iteration(self):\n\n class Data(Document):\n pass\n\n for i in range(300):\n Data().save()\n\n records = Data.objects.limit(250)\n\n # This should pull all 250 docs from mongo and populate the result\n # cache\n len(records)\n\n # Assert that iterating over documents in the qs touches every\n # document even if we call len(qs) midway through the iteration.\n for i, r in enumerate(records):\n if i == 58:\n len(records)\n assert i == 249\n\n # Assert the same behavior is true even if we didn't pre-populate the\n # result cache.\n records = Data.objects.limit(250)\n for i, r in enumerate(records):\n if i == 58:\n len(records)\n assert i == 249", "async def getStorObjStats(app, key, bucket=None):\n # TBD - will need to be refactored to handle azure responses\n\n client = _getStorageClient(app)\n if not bucket:\n bucket = app['bucket_name']\n stats = {}\n\n if key[0] == '/':\n #key = key[1:] # no leading slash\n msg = f\"key with leading slash: {key}\"\n log.error(msg)\n raise KeyError(msg)\n\n log.info(f\"getStorObjStats({key})\")\n\n resp = await client.list_keys(bucket=bucket, limit=1, prefix=key)\n\n if 'Contents' not in resp:\n msg = f\"key: {key} not found\"\n log.info(msg)\n raise HTTPInternalServerError()\n contents = resp['Contents']\n log.debug(f\"storage_contents: {contents}\")\n\n found = False\n if len(contents) > 0:\n item = contents[0]\n if item[\"Key\"] == key:\n # if the key is a S3 folder, the key will be the first object in the folder,\n # not the requested object\n found = True\n if item[\"ETag\"]:\n etag = item[\"ETag\"]\n if len(etag) > 2 and etag[0] == '\"' and etag[-1] == '\"':\n # S3 returning extra quotes around etag?\n etag = etag[1:-1]\n stats[\"ETag\"] = etag\n else:\n if \"Owner\" in item and \"ID\" in item[\"Owner\"] and item[\"Owner\"][\"ID\"] == \"minio\":\n pass # minio is not creating ETags...\n else:\n log.warn(f\"No ETag for key: {key}\")\n # If no ETAG put in a fake one\n stats[\"ETag\"] = \"9999\"\n stats[\"Size\"] = item[\"Size\"]\n stats[\"LastModified\"] = int(item[\"LastModified\"].timestamp())\n if not found:\n msg = f\"key: {key} not found\"\n log.info(msg)\n raise HTTPNotFound()\n\n return stats", "def update(name=\"\", amount=0, execute=False):\n if name:\n bucket_metadata = get_bucket(name)\n if bucket_metadata:\n bucket = bucket_metadata[\"bucket\"]\n versioning = bucket_metadata[\"versioning\"] == \"Enabled\"\n lifecycle = bucket_metadata[\"lifecycle\"]\n update_bucket(name, bucket, versioning, lifecycle, execute)\n else:\n buckets = get_buckets(amount)\n for k, v in buckets.items():\n name = k\n bucket = v[\"bucket\"]\n versioning = v[\"versioning\"] == \"Enabled\"\n lifecycle = v[\"lifecycle\"]\n update_bucket(name, bucket, versioning, lifecycle, execute)", "def rest_rate_limit(r):\n\n try:\n #limit = int(r.headers[\"X-Rate-Limit-Limit\"])\n remain = int(r.headers[\"X-Rate-Limit-Remaining\"])\n reset = int(r.headers[\"X-Rate-Limit-Reset\"])\n curtime = times.to_unix(times.parse(r.headers[\"date\"]))\n except KeyError as e:\n # We dont have the proper headers\n log.error(\"Header not found - {}\", e)\n sleep(RETRY_AFTER)\n return\n\n if remain <= RATE_LIMIT_BUFFER:\n log.debug(\"Hit rate limit - {}\", remain)\n log.debug(\"Rate limit reset in {} seconds\", reset - curtime)\n sleep(reset - curtime + RESET_BUFFER)", "def test_blobstore_concat(self):\n memcache.Client().set('curl_test_upload','')\n upload_as = 'my_blobstore_async.txt'\n item_key = str(self.item.key())\n request = { \n 'jsonrpc': '2.0',\n 'id': 10,\n 'method': 'sequence',\n 'params' : [\n {\n 'jsonrpc': '2.0',\n 'id': 11,\n 'method': 'curl',\n 'params': [\n '-o',\n 'my_silly_blob',\n 'http://' + os.environ['SERVER_NAME'] + '/curl_test/data?serve=mytext&key=' + item_key,\n '-o',\n 'my_other_blob',\n 'http://' + os.environ['SERVER_NAME'] + '/curl_test/data?serve=mytext2&key=' + item_key,\n ]\n },\n {\n 'jsonrpc': '2.0',\n 'id': 13,\n 'method': 'cat',\n 'params': ['my_silly_blob', 'my_other_blob', '>', 'to_upload']\n },\n {\n 'jsonrpc': '2.0',\n 'id': 12,\n 'method': 'curl',\n 'params': ['-F',\n ('file=@to_upload;type=text/plain;filename='+upload_as),\n blobstore.create_upload_url('/curl_test/finish_upload?key=' + item_key)\n ]\n }\n ]\n }\n response = self.send_request('&log=1&async=1',request)\n ## The response and result aren't really needed in this but they may be\n ## useful for debugging.\n result = json.loads(response.content)\n \n try:\n ## So this is the really stupid part where we wait for a response\n ## from an asynchronous request. \n mckey = memcache.Client().get('curl_test_upload')\n i = 0\n while i < 30:\n i += 1\n mckey = memcache.Client().get('curl_test_upload')\n if mckey:\n break\n time.sleep(1)\n \n self_item_refreshed = db.get(item_key) # Apparently self.item caches myblob as None.\n \n blob_reader = blobstore.BlobReader(self_item_refreshed.myblob.key())\n blob_text = blob_reader.read()\n self.assertEqual(self.item.mytext+self.item.mytext2, blob_text)\n \n except DeadlineExceededError:\n raise AsyncTestWaitError(\"Timed out while waiting for asynchronous request to respond. You would never do this. Try the test again.\")", "def test_bucketEmpty(self):\n b = SomeBucket()\n b.add(20)\n self.clock.set(9)\n empty = b.drip()\n self.assertFalse(empty)\n self.clock.set(10)\n empty = b.drip()\n self.assertTrue(empty)", "def test_rotate_expiration(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(10, 80, 20, 19)\n assert key.audit_state == 'old'", "def put(self):\n global hits\n hits += 1\n return {\"hits\": hits}, 200", "def __init__(self, data, expires_in):\n self.data = data\n self.expires_in = expires_in\n self.expires_after = time.time() + expires_in", "def test_s3_table_functions(started_cluster):\n node.query(\n \"\"\"\n INSERT INTO FUNCTION s3\n (\n nc_s3,\n filename = 'test_file.tsv.gz',\n format = 'TSV',\n structure = 'number UInt64',\n compression_method = 'gz'\n )\n SELECT * FROM numbers(1000000)\n \"\"\",\n settings=settings,\n )\n\n assert (\n node.query(\n \"\"\"\n SELECT count(*) FROM s3\n (\n nc_s3,\n filename = 'test_file.tsv.gz',\n format = 'TSV',\n structure = 'number UInt64',\n compression_method = 'gz'\n );\n \"\"\"\n )\n == \"1000000\\n\"\n )", "def test_client_max_age_3600(self, sess):\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n # request that we don't want a new one unless\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=3600'})\r\n assert r.from_cache is True\r\n\r\n # now lets grab one that forces a new request b/c the cache\r\n # has expired. To do that we'll inject a new time value.\r\n resp = self.cache.get(self.url)\r\n resp.headers['date'] = 'Tue, 15 Nov 1994 08:12:31 GMT'\r\n r = sess.get(self.url)\r\n assert not r.from_cache", "def test_transform_cart_item_pagination(self):\n size = 700\n service = ElasticsearchService()\n hits, search_after = service.transform_cart_item_request(catalog=self.catalog,\n entity_type='files',\n size=size)\n self.assertEqual(size, len(hits))\n hits, search_after = service.transform_cart_item_request(catalog=self.catalog,\n entity_type='files',\n size=size,\n search_after=search_after)\n self.assertEqual(size, len(hits))\n hits, search_after = service.transform_cart_item_request(catalog=self.catalog,\n entity_type='files',\n size=size,\n search_after=search_after)\n self.assertEqual(100, len(hits))", "def test_put_get(self):\n key = 1\n item = 'aaa'\n cache = LRUCache(5)\n cache.put(key, item)\n assert item == cache.get(key)\n assert 1 == cache.size", "def uploadFilestoS3(self):\n allfilesuploadedcount = 0\n for eachfiledic in self.fileTobeUploaded:\n if eachfiledic[\"uploadedSuccess\"] == 0: #Means this file never got uploaded.\n if os.path.getsize(eachfiledic[\"filepath\"]) < 1000000000: #<1GB\n s3Log.info (\"FileSize < 1GB for :{}, so using single part upload.\".format(eachfiledic[\"filepath\"]) )\n if self.singlePartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n else:\n s3Log.info (\"FileSize > 1GB for :{}, so using Multi Part upload. \\n\".format(eachfiledic[\"filepath\"]) )\n if self.multiPartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n\n elif eachfiledic[\"uploadedSuccess\"] == 1: #Means it got uploaded in the last run.\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n self.saveStateOfThisRun()\n if len(self.fileTobeUploaded) == allfilesuploadedcount: #Means we uploaded all files in the queue\n return True\n else:\n return False", "def test_change_default_throttling_settings_http_with_overwrite_throttled_burst_above_account_quota():", "def test_merge_backup_with_purge_deleted_keys(self):\n self.log.info(\"Load 1st batch docs\")\n create_gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen1, \"create\", 0)\n self.log.info(\"Delete half docs of 1st batch\")\n delete_gen = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size,\n end=self.num_items // 2)\n self._load_all_buckets(self.master, delete_gen, \"delete\", 0)\n self.log.info(\"Load 2nd batch docs\")\n create_gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size,\n end=self.num_items // 2)\n self._load_all_buckets(self.master, create_gen2, \"create\", 0)\n self.log.info(\"Start backup\")\n self.backup_create()\n self.backup_cluster()\n nodes = []\n upto_seq = 100000\n self.log.info(\"Start compact each vbucket in bucket\")\n\n rest = RestConnection(self.master)\n cluster_nodes = rest.get_nodes()\n for bucket in RestConnection(self.master).get_buckets():\n found = self.get_info_in_database(self.backupset.cluster_host, bucket, \"deleted\")\n if found:\n shell = RemoteMachineShellConnection(self.backupset.cluster_host)\n shell.compact_vbuckets(len(bucket.vbuckets), cluster_nodes, upto_seq)\n shell.disconnect()\n found = self.get_info_in_database(self.backupset.cluster_host, bucket, \"deleted\")\n if not found:\n self.log.info(\"Load another docs to bucket %s \" % bucket.name)\n create_gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size,\n end=self.num_items // 4)\n self._load_bucket(bucket, self.master, create_gen3, \"create\",\n self.expire_time)\n self.backup_cluster()\n create_gen4 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size,\n end=self.num_items // 4)\n self._load_bucket(bucket, self.master, create_gen4, \"create\",\n self.expire_time)\n self.backup_cluster()\n self.backupset.end = 3\n status, output, message = self.backup_merge()\n if not status:\n self.fail(message)\n else:\n self.fail(\"cbcompact failed to purge deleted key\")", "def test_write_multi_files_to_bucket(\n self, mcg_obj, awscli_pod, bucket_factory, amount, file_type\n ):\n data_dir = \"/data\"\n if file_type == \"large\":\n public_bucket = PUBLIC_BUCKET\n obj_key = LARGE_FILE_KEY\n elif file_type == \"small\":\n public_bucket = constants.TEST_FILES_BUCKET\n obj_key = \"random1.txt\"\n elif file_type == \"large_small\":\n public_bucket = PUBLIC_BUCKET\n obj_key = LARGE_FILE_KEY.rsplit(\"/\", 1)[0]\n\n # Download the file to pod\n awscli_pod.exec_cmd_on_pod(command=f\"mkdir {data_dir}\")\n public_s3_client = retrieve_anon_s3_resource().meta.client\n download_files = []\n # Use obj_key as prefix to download multiple files for large_small\n # case, it also works with single file\n for obj in public_s3_client.list_objects(\n Bucket=public_bucket, Prefix=obj_key\n ).get(\"Contents\"):\n # Skip the extra file in large file type\n if file_type == \"large\" and obj[\"Key\"] != obj_key:\n continue\n logger.info(f'Downloading {obj[\"Key\"]} from AWS bucket {public_bucket}')\n download_obj_cmd = f'cp s3://{public_bucket}/{obj[\"Key\"]} {data_dir}'\n awscli_pod.exec_cmd_on_pod(\n command=craft_s3_command(download_obj_cmd), out_yaml_format=False\n )\n download_files.append(obj[\"Key\"])\n # Write all downloaded objects to the new bucket\n bucketname = bucket_factory(1)[0].name\n base_path = f\"s3://{bucketname}\"\n for i in range(amount):\n full_object_path = base_path + f\"/{i}/\"\n sync_object_directory(awscli_pod, data_dir, full_object_path, mcg_obj)\n\n obj_list = list(\n obj.key.split(\"/\")[-1]\n for obj in mcg_obj.s3_list_all_objects_in_bucket(bucketname)\n )\n\n # Check total copy files amount match\n if file_type == \"large_small\":\n assert len(obj_list) == 2 * amount, \"Total file amount does not match\"\n else:\n assert len(obj_list) == amount, \"Total file amount does not match\"\n\n # Check deduplicate set is same\n test_set = set([i.split(\"/\")[-1] for i in download_files])\n assert test_set == set(obj_list), \"File name set does not match\"", "def test_add_bucketlist_items(self):\r\n email = \"[email protected]\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(email, _pword, bucketlist.id, \"bucketlist item name\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(result['message'], 'Bucket list item added')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertLess(item_no, new_item_no)", "def main():\r\n # Create client for interfacing with S3\r\n s3 = create_client(untappd_access_key_id, untappd_secret_access_key)\r\n\r\n # Get id of the latest post that was handled in the previous function call\r\n last_update_id = get_last_update_id(s3)\r\n # Set inital value for lastest post handled by this function call\r\n most_recent_id = last_update_id\r\n\r\n # Get list of all posts from all breweries specified by env variable\r\n posts = get_all_posts(untappd_breweries.split(','))\r\n\r\n # Iterate over all posts and write new posts to the database\r\n for post in posts:\r\n # Get unique post id number from post\r\n # Example: 'https://untappd.com/user/Mckman007/checkin/756802330'\r\n # has post id number 756802330\r\n post_id = int(post['id'].rsplit('/', 1)[-1])\r\n # if post_id is greater than last_update_id, post is not yet in database\r\n if(post_id > last_update_id):\r\n most_recent_id = max(most_recent_id, post_id)\r\n write_post_to_s3(s3, post, post_id)\r\n\r\n # After all new posts have been written to database, set last_update_id\r\n # for next function call\r\n set_last_update_id(s3, most_recent_id)", "def ensure_space(self,\n context: context.RequestContext,\n volume: objects.Volume) -> bool:\n\n # Check to see if the cache is actually limited.\n if self.max_cache_size_gb == 0 and self.max_cache_size_count == 0:\n return True\n\n # Make sure that we can potentially fit the image in the cache\n # and bail out before evicting everything else to try and make\n # room for it.\n if (self.max_cache_size_gb != 0 and\n volume.size > self.max_cache_size_gb):\n return False\n\n # Assume the entries are ordered by most recently used to least used.\n entries = self.db.image_volume_cache_get_all(\n context,\n **self._get_query_filters(volume))\n\n current_count = len(entries)\n\n current_size = 0\n for entry in entries:\n current_size += entry['size']\n\n # Add values for the entry we intend to create.\n current_size += volume.size\n current_count += 1\n\n LOG.debug('Image-volume cache for %(service)s current_size (GB) = '\n '%(size_gb)s (max = %(max_gb)s), current count = %(count)s '\n '(max = %(max_count)s).',\n {'service': volume.service_topic_queue,\n 'size_gb': current_size,\n 'max_gb': self.max_cache_size_gb,\n 'count': current_count,\n 'max_count': self.max_cache_size_count})\n\n while (((current_size > self.max_cache_size_gb and\n self.max_cache_size_gb > 0)\n or (current_count > self.max_cache_size_count and\n self.max_cache_size_count > 0))\n and len(entries)):\n entry = entries.pop()\n LOG.debug('Reclaiming image-volume cache space; removing cache '\n 'entry %(entry)s.', {'entry': self._entry_to_str(entry)})\n self._delete_image_volume(context, entry)\n current_size -= entry['size']\n current_count -= 1\n LOG.debug('Image-volume cache for %(service)s new size (GB) = '\n '%(size_gb)s, new count = %(count)s.',\n {'service': volume.service_topic_queue,\n 'size_gb': current_size,\n 'count': current_count})\n\n # It is only possible to not free up enough gb, we will always be able\n # to free enough count. This is because 0 means unlimited which means\n # it is guaranteed to be >0 if limited, and we can always delete down\n # to 0.\n if self.max_cache_size_gb > 0:\n if current_size > self.max_cache_size_gb > 0:\n LOG.warning('Image-volume cache for %(service)s does '\n 'not have enough space (GB).',\n {'service': volume.service_topic_queue})\n return False\n\n return True", "def reaper(self):\n if not self.superuser_request:\n self.abort(402, 'uploads must be from an authorized drone')\n with tempfile.TemporaryDirectory(prefix='.tmp', dir=config.get_item('persistent', 'data_path')) as tempdir_path:\n try:\n file_store = files.FileStore(self.request, tempdir_path)\n except files.FileStoreException as e:\n self.abort(400, str(e))\n now = datetime.datetime.utcnow()\n fileinfo = dict(\n name=file_store.filename,\n created=now,\n modified=now,\n size=file_store.size,\n hash=file_store.hash,\n tags=file_store.tags,\n metadata=file_store.metadata\n )\n container = reaperutil.create_container_hierarchy(file_store.metadata)\n f = container.find(file_store.filename)\n target_path = os.path.join(config.get_item('persistent', 'data_path'), util.path_from_hash(fileinfo['hash']))\n if not f:\n file_store.move_file(target_path)\n container.add_file(fileinfo)\n rules.create_jobs(config.db, container.acquisition, 'acquisition', fileinfo)\n elif not file_store.identical(util.path_from_hash(fileinfo['hash']), f['hash']):\n file_store.move_file(target_path)\n container.update_file(fileinfo)\n rules.create_jobs(config.db, container.acquisition, 'acquisition', fileinfo)\n throughput = file_store.size / file_store.duration.total_seconds()\n log.info('Received %s [%s, %s/s] from %s' % (file_store.filename, util.hrsize(file_store.size), util.hrsize(throughput), self.request.client_addr))", "def sync_to_bucket(s3_url,\n region='eu-west-1',\n profile_name=None):\n\n parsed_s3_url = urlparse.urlparse(s3_url);\n\n bucket_name = parsed_s3_url.hostname;\n key_prefix = parsed_s3_url.path;\n if key_prefix[0] == '/':\n key_prefix = key_prefix[1:]\n if key_prefix[-1] != '/':\n key_prefix = key_prefix + '/'\n\n def inner(fn_inner):\n \"\"\"\n Decorator function function sent in should be having signature\n func(None,None, XmlDoc) and should yield JSON document one for\n each file that should be persisted to S3\n \"\"\"\n\n def handler(event, context):\n \"\"\"\n The AWS Lambda Entry Point\n \"\"\"\n s3conn = s3.connect_to_region(region, profile_name=profile_name)\n bucket = s3conn.get_bucket(bucket_name)\n\n # Use a map to track keys that are no longer in the feed, used for deletion\n remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}\n\n logger.debug(\"Existing keys in bucket\\n%s\", '\\n'.join(remaining_keys));\n\n for id, json_data in fn_inner():\n key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))\n\n # Key found, delete it from cleanup map\n if key_name in remaining_keys:\n del remaining_keys[key_name]\n\n string_data = json.dumps(json_data)\n s3_object = bucket.get_key(key_name)\n if s3_object == None:\n key = bucket.new_key(key_name);\n key.set_contents_from_string(string_data)\n logger.info('Creating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n else:\n if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):\n logger.info('Updating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n s3_object.set_contents_from_string(string_data)\n else:\n logger.info('Same:\\ts3://%s/%s', bucket_name, key_name);\n logger.debug(string_data)\n\n # Remvoe remaining keys from the bucket to allow for cleanup\n for key in remaining_keys:\n logger.info('Removing:\\ts3://%s/%s', bucket_name, key);\n bucket.delete_key(key);\n\n logger.info('Done');\n\n return handler\n\n return inner", "def checkExpiredThread(q, results, db, cursor):\n\n while not q.empty():\n url = q.get()\n logger.debug(f\"{url} started - Tasks left: {q.unfinished_tasks}\")\n pbar.update(1)\n expired = None\n\n #Check if expired\n _, expired = getPage(url)\n results[url] = expired\n\n #Insert result into db\n if expired:\n logger.debug(f\"expired url: {url}\")\n #Record todays date\n curTime = datetime.now().strftime(\"%Y-%m-%d\")\n #Prepare sql string\n sql = \"\"\"UPDATE motorcycles\n SET adExpiry=%s\n WHERE url=%s\"\"\"\n #Get Lock - Prevent multiple db inserts simulataneously\n logger.debug(f\"{url} wants the lock\")\n with lock:\n logger.debug(f\"{url} has the lock\")\n try:\n cursor.execute(sql, (curTime, url))\n db.commit()\n except Exception as e:\n db.rollback()\n print(\"Exception occured: {}\".format(e))\n logger.debug(f\"{url} is finished with the lock\")\n\n q.task_done()\n logger.debug(f\"{url} finished\")", "def lambda_handler(*_):\n Log.info(\"Checking bucket %s\", S3_BUCKET)\n Log.info(\"Output Key: %s\", S3_OUTPUT_KEY)\n bucket_location, objects = s3index.get_objects(S3_BUCKET)\n filtered = s3index.filter_objects(objects)\n by_date = s3index.order_objects(filtered)\n template = s3index.template_from_string(TEMPLATE)\n index = s3index.build_index(\n template, SITE_NAME, by_date, S3_BUCKET, bucket_location)\n s3_client = boto3.client(\"s3\")\n s3_client.put_object(\n ACL=\"public-read\",\n Body=index.encode(),\n Bucket=S3_BUCKET,\n Key=S3_OUTPUT_KEY,\n ContentType=\"text/html\"\n )", "def __init__(self):\n self.bucket_length = 997\n self.bucket_array = [Bucket() for i in range(self.bucket_length)]", "async def _expire(self, key, ttl):\n return await self.client.touch(key, ttl)" ]
[ "0.73347205", "0.72373694", "0.6889847", "0.6790015", "0.6164027", "0.56535953", "0.54997987", "0.5486921", "0.54866165", "0.54771954", "0.5461768", "0.53524595", "0.53395146", "0.53197896", "0.5305307", "0.5276118", "0.5267235", "0.52440965", "0.5219622", "0.52176183", "0.52138746", "0.5206336", "0.5199197", "0.51463145", "0.51260495", "0.51191366", "0.51190865", "0.5097199", "0.50952214", "0.5087055", "0.5076764", "0.50636387", "0.5045685", "0.5042873", "0.50267994", "0.5013056", "0.50126004", "0.50125676", "0.50094885", "0.4985546", "0.49672967", "0.4955209", "0.49501294", "0.4939543", "0.49130186", "0.4905966", "0.48986214", "0.48969764", "0.48805097", "0.48559198", "0.48542705", "0.48513585", "0.48456055", "0.48419443", "0.48337504", "0.48306724", "0.48300686", "0.48289034", "0.4827319", "0.48263422", "0.4822914", "0.4821719", "0.48163798", "0.48141217", "0.4805237", "0.4799801", "0.4799193", "0.47911388", "0.47877803", "0.47847015", "0.4783693", "0.47777152", "0.47769544", "0.4773675", "0.47699103", "0.476621", "0.4756738", "0.47534412", "0.47473338", "0.4741402", "0.47401994", "0.4739351", "0.47353497", "0.47313145", "0.47269174", "0.47263741", "0.47255647", "0.47217605", "0.47139332", "0.47133312", "0.47090775", "0.4708233", "0.47036585", "0.47020268", "0.4701254", "0.47002655", "0.4699733", "0.46975395", "0.46951702", "0.46916726" ]
0.69277155
2
1. Create a bucket with ttl = 60s 2. Upload 1000 docs with exp = 40s 3. After 20s, Update docs with exp = 60s 4. After 40s, run expiry pager again and get item count, must be 1000 5. After 20s, run expiry pager again and get item count, must be 0
def test_maxttl_with_doc_updates(self): rest = RestConnection(self.master) for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=40) self.sleep(20, "waiting to update docs with exp=60s...") for bucket in self.buckets: self._load_json(bucket, self.num_items, exp=60) self.sleep(40, "waiting before running expiry pager...") self.expire_pager(self.servers) for bucket in self.buckets: items = rest.get_active_key_count(bucket) self.log.info("Items: {0}".format(items)) if items != self.num_items: self.fail("FAIL: Docs with updated expiry deleted unexpectedly!") self.sleep(20, "waiting before running expiry pager...") self.expire_pager(self.servers) self.sleep(20, "waiting for item count to come down...") for bucket in self.buckets: items = rest.get_active_key_count(bucket) self.log.info("Items: {0}".format(items)) if items != 0: self.fail("FAIL: Docs with updated expiry not deleted after new exp has elapsed!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_maxttl_on_existing_bucket(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=60)\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s\"\n \"(set after doc creation), after 60s, item count = {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Items with larger expiry before maxTTL updation deleted!\")\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s\"\n \"(set after doc creation), after 100s,\"\n \" item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Items with not greater expiry set before maxTTL \"\n \"updation not deleted after elapsed TTL!\")\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s, after 100s,\"\n \" item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Items with not greater expiry not \"\n \"deleted after elapsed maxTTL!\")", "def test_update_maxttl(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=40)\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s\"\n \" updated maxttl = 40s, after 40s item count = {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Updated ttl affects docs with larger expiry before updation!\")\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s\"\n \" updated maxttl = 40s, after 100s item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Docs with 100s as expiry before maxTTL updation still alive!\")", "def test_maxttl_lesser_doc_expiry(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=int(self.maxttl)+500)\n self.sleep(int(self.maxttl), \"waiting for all docs to expire per maxTTL rule...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) + 500,\n self.maxttl,\n self.maxttl,\n items))\n if items > 0:\n self.fail(\"Bucket maxTTL of {0} is not honored\".format(self.maxttl))\n else:\n self.log.info(\"SUCCESS: Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) + 500,\n self.maxttl,\n self.maxttl,\n items))", "def test_maxttl_greater_doc_expiry(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=int(self.maxttl)-100)\n self.sleep(int(self.maxttl-100), \"waiting for all docs to expire per maxTTL rule...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) - 100,\n self.maxttl-100,\n self.maxttl-100,\n items))\n if items == 0:\n self.log.info(\"SUCCESS: Docs with lesser expiry deleted\")\n else:\n self.fail(\"FAIL: Doc with lesser expiry still present past ttl\")", "def test_maxttl_setting(self):\n maxttl = int(self.input.param(\"maxttl\", None))\n self.run_multi_operations(buckets = self.buckets,\n query_definitions = self.query_definitions,\n create_index = True, drop_index = False,\n query_with_explain = False, query = False)\n self.sleep(20)\n self._verify_bucket_count_with_index_count()\n self.sleep(maxttl, \"waiting for docs to be expired automatically per maxttl rule\")\n self._expiry_pager(self.master)\n self.sleep(60, \"wait for expiry pager to run on all nodes...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Docs in source bucket is {0} after maxttl has elapsed\".format(items))\n if items != 0:\n self.fail(\"Docs in source bucket is not 0 after maxttl has elapsed\")\n self._verify_bucket_count_with_index_count()", "def test_update_bucket(self):\n pass", "def test_max_items(self):\r\n timeline = Timeline(connection=self.c1, bucket=self.bucket, max_items=3)\r\n now = datetime.utcnow()\r\n\r\n timeline.add(self.key, 1, now)\r\n timeline.add(self.key, 2, now)\r\n timeline.add(self.key, 3, now)\r\n self.assertEqual(len(timeline.get(self.key)), 3)\r\n\r\n timeline.add(self.key, 4, now)\r\n self.assertEqual(len(timeline.get(self.key)), 3)", "async def incr(req):\n key, ttl, err = validate_params(req)\n if err is not None:\n return err\n\n counter = incr_with_ttl(key, ttl)\n return web.json_response(data={'status': 'success', 'counter': counter})", "def __init__(self, bucket_size, bucket_fill_rate, current_time=None):\n self.__bucket_contents = bucket_size\n self.__bucket_size = bucket_size\n self.__bucket_fill_rate = bucket_fill_rate\n\n if current_time is None:\n current_time = time.time()\n\n self.__last_bucket_fill_time = current_time", "def put_object_retention(Bucket=None, Key=None, Retention=None, RequestPayer=None, VersionId=None, BypassGovernanceRetention=None, ContentMD5=None):\n pass", "def post_bucketlist():\n pass", "def test_check_existing_enqueues_tasks(self):\n collection = handlers_endpoints_v1.DigestCollection(\n namespace=handlers_endpoints_v1.Namespace())\n collection.items.append(\n generate_digest(collection.namespace.namespace, 'some content'))\n key = model.get_entry_key(\n collection.namespace.namespace, collection.items[0].digest)\n\n # guarantee that one digest already exists in the datastore\n model.new_content_entry(key).put()\n self.call_api('preupload', self.message_to_dict(collection), 200)\n\n # find enqueued tasks\n self.assertEqual(1, self.execute_tasks())", "def progress_update(sent, total):\n l.debug(\"%d of %d Mb uploaded to Amazon S3.\", sent / 1000000, total / 1000000)", "def test_cli_bucket_maxttl_setting(self):\n self.rest.force_eject_node()\n\n shell = RemoteMachineShellConnection(self.master)\n if self.input.param('enable_ipv6', False):\n self.reset_and_enable_ipv6(self.master)\n set_index_storage_type = \" --index-storage-setting=memopt \"\n options = ' --cluster-port=8091 \\\n --cluster-ramsize=300 \\\n --cluster-index-ramsize=300 \\\n --services=data,index,query %s ' \\\n % set_index_storage_type\n o, e = shell.execute_couchbase_cli(cli_command=\"cluster-init\",\n options=options)\n self.assertEqual(o[0], 'SUCCESS: Cluster initialized')\n\n self.log.info(\"Add new user after reset node! \")\n self.add_built_in_server_user(node=self.master)\n bucket_type = self.input.param(\"bucket_type\", \"couchbase\")\n options = ' --bucket=default \\\n --bucket-type={0} \\\n --bucket-ramsize=200 \\\n --max-ttl=400 \\\n --wait '.format(bucket_type)\n o, e = shell.execute_couchbase_cli(cli_command=\"bucket-create\",\n options=options)\n self.assertEqual(o[0], 'SUCCESS: Bucket created')\n\n self.sleep(30, \"Sleep before loading doc using cbdocloader\")\n\n cluster_flag = \"-c\"\n bucket_quota_flag = \"-m\"\n data_set_location_flag = \"-d\"\n shell.execute_command(\n \"{0}cbdocloader -u Administrator -p password \"\n \"{3} {1} -b default {4} 100 {5} {2}travel-sample.zip\"\n .format(self.bin_path, self.master.ip, self.sample_path,\n cluster_flag, bucket_quota_flag,\n data_set_location_flag))\n shell.disconnect()\n\n buckets = RestConnection(self.master).get_buckets()\n for bucket in buckets:\n if bucket.name != \"default\":\n self.fail(\"default bucket did not get created\")\n\n \"\"\" check for load data into travel-sample bucket \"\"\"\n end_time = time.time() + 120\n num_actual = 0\n while time.time() < end_time:\n self.sleep(10)\n num_actual = self.get_item_count(self.master, \"default\")\n if int(num_actual) == self.total_items_travel_sample:\n break\n self.assertTrue(int(num_actual) == self.total_items_travel_sample,\n \"Items number expected %s, actual %s\"\n % (self.total_items_travel_sample, num_actual))\n self.log.info(\"Total items %s \" % num_actual)\n self.sleep(400, \"Waiting for docs to expire as per maxttl\")\n self.expire_pager([self.master])\n self.sleep(20, \"Wait for expiry_purger to run\")\n num_actual = self.get_item_count(self.master, \"default\")\n if int(num_actual) != 0:\n self.fail(\"Item count is not 0 after maxttl has elapsed\")\n else:\n self.log.info(\"SUCCESS: Item count is 0 after maxttl has elapsed\")", "def test_many_expired_keys(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n for i in range(20):\n self.storage.set(i, i, moe=self.now + 1)\n self.now += 2\n self.gc.expire_random()\n for i in range(20):\n self.assertRaises(StorageKeyError, self.storage.get, i)", "def create_thumbnails():\n bucket = BASE_BUCKET + ARG.MANIFOLD\n result = S3_CLIENT.list_objects(Bucket=bucket, Prefix=PREFIX + \"/\", Delimiter=\"/\")\n lev1 = result.get('CommonPrefixes')\n for lev1pre in tqdm(lev1, desc=\"Prefixes\"):\n bpre = lev1pre.get('Prefix').split(\"/\")[-2]\n COUNT[\"Prefixes\"] += 1\n #result2 = S3_CLIENT.list_objects(Bucket=bucket, Prefix=\"/\".join([PREFIX, bpre]) + \"/\",\n # Delimiter=\"/\")\n paginator = S3_CLIENT.get_paginator(\"list_objects\")\n pages = paginator.paginate(Bucket=bucket, Prefix=\"/\".join([PREFIX, bpre]) + \"/\",\n Delimiter=\"/\")\n for page in pages:\n COUNT[\"Pages\"] += 1\n lev2 = page.get('CommonPrefixes')\n for lev2pre in lev2:\n body = lev2pre.get('Prefix').split(\"/\")[-2]\n COUNT[\"Body IDs\"] += 1\n if ARG.WRITE:\n invoke_lambda(bucket, body)\n else:\n LOGGER.debug(\"/\".join([bucket, bpre, body]))\n print(COUNT)", "def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)", "def handler(event, context):\n s3conn = s3.connect_to_region(region, profile_name=profile_name)\n bucket = s3conn.get_bucket(bucket_name)\n\n # Use a map to track keys that are no longer in the feed, used for deletion\n remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}\n\n logger.debug(\"Existing keys in bucket\\n%s\", '\\n'.join(remaining_keys));\n\n for id, json_data in fn_inner():\n key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))\n\n # Key found, delete it from cleanup map\n if key_name in remaining_keys:\n del remaining_keys[key_name]\n\n string_data = json.dumps(json_data)\n s3_object = bucket.get_key(key_name)\n if s3_object == None:\n key = bucket.new_key(key_name);\n key.set_contents_from_string(string_data)\n logger.info('Creating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n else:\n if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):\n logger.info('Updating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n s3_object.set_contents_from_string(string_data)\n else:\n logger.info('Same:\\ts3://%s/%s', bucket_name, key_name);\n logger.debug(string_data)\n\n # Remvoe remaining keys from the bucket to allow for cleanup\n for key in remaining_keys:\n logger.info('Removing:\\ts3://%s/%s', bucket_name, key);\n bucket.delete_key(key);\n\n logger.info('Done');", "def update_bucketlist():\n pass", "def __init__(__self__, *,\n bucket: str,\n kind: str,\n retention_interval: str,\n upload_interval: str):\n pulumi.set(__self__, \"bucket\", bucket)\n pulumi.set(__self__, \"kind\", kind)\n pulumi.set(__self__, \"retention_interval\", retention_interval)\n pulumi.set(__self__, \"upload_interval\", upload_interval)", "def get_object_retention(Bucket=None, Key=None, VersionId=None, RequestPayer=None):\n pass", "def upload(filename, bucket):\n k = Key(bucket)\n k.key = uuid.uuid1().hex\n print \"Uploading batch to {}, key: {}...\".format(bucket.name, k.key)\n k.set_contents_from_filename(filename, reduced_redundancy=True)\n print \" Done.\"\n \n\n\n bucket = openBucket(dest)", "def do_rate_limited_ops(\n handle, num_seconds, do_writes, limit, max_rows, min_size, max_size):\n put_request = PutRequest().set_table_name(table_name)\n get_request = GetRequest().set_table_name(table_name)\n #\n # Generate a string of max_size with all \"x\"s in it\n #\n user_data = ''\n if do_writes:\n for x in range(max_size):\n user_data += 'x'\n\n start_time = int(round(time() * 1000))\n end_time = start_time + num_seconds * 1000\n\n print('Running continuous ' + ('writes' if do_writes else 'reads') +\n ' for ' + str(num_seconds) + ' seconds.')\n #\n # Keep track of how many units we used\n #\n units_used = 0\n #\n # With rate limiting enabled, we can find the amount of time our operation\n # was delayed due to rate limiting by getting the value from the result\n # using Result.get_rate_limit_delayed_ms().\n #\n delay_ms = 0\n\n key = dict()\n value = dict()\n while True:\n fld_id = int(random() * max_rows)\n try:\n if do_writes:\n value['id'] = fld_id\n value['sid'] = fld_id\n rec_size = int(random() * (max_size - min_size))\n rec_size += min_size\n value['name'] = user_data[:rec_size]\n put_request.set_value(value)\n put_result = handle.put(put_request)\n units_used += put_result.get_write_units()\n delay_ms += put_result.get_rate_limit_delayed_ms()\n else:\n key['id'] = fld_id\n key['sid'] = fld_id\n get_request.set_key(key)\n get_result = handle.get(get_request)\n units_used += get_result.get_read_units()\n delay_ms += get_result.get_rate_limit_delayed_ms()\n except WriteThrottlingException as wte:\n # We should not get WriteThrottlingException exception\n print('Got unexpected write throttling exception')\n raise wte\n except ReadThrottlingException as rte:\n # We should not get ReadThrottlingException exception\n print('Got unexpected read throttling exception')\n raise rte\n if int(round(time() * 1000)) >= end_time:\n break\n num_seconds = (int(round(time() * 1000)) - start_time) // 1000\n units_used /= num_seconds\n\n if units_used < int(limit * 0.8) or units_used > int(limit * 1.2):\n if do_writes:\n msg = ('Writes: expected around ' + str(limit) + ' WUs, got ' +\n str(units_used))\n else:\n msg = ('Reads: expected around ' + str(limit) + ' RUs, got ' +\n str(units_used))\n raise RuntimeError(msg)\n\n print(('Writes' if do_writes else 'Reads') + ': average usage = ' +\n str(units_used) + ('WUs' if do_writes else 'RUs') +\n ' (expected around ' + str(limit))\n\n print('Total rate limiter delay time = ' + str(delay_ms) + 'ms')", "def test_bucket_is_updated(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Create a Bucket\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 201)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['name'] == 'Adventure')\n self.assertEqual(data['id'], 1)", "def testBucketSize(self):\n b = SomeBucket()\n fit = b.add(1000)\n self.assertEqual(100, fit)", "def test01StoreExpiration(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 100):\n keys.append(s.Put(i, i))\n\n # This should not raise\n s.Get(keys[-1])\n\n # This should raise though\n self.assertRaises(KeyError, s.Get, keys[0])", "def __init__(self):\n self.size = 1000\n self.bucket = [None] * self.size", "def large_upload_collection(upload_items: List[JSONDict]) -> UploadCollection:\n items = []\n\n item = upload_items[0]\n for i in range(3050):\n copy = item.copy()\n copy[\"guid\"] = copy[\"guid\"].replace(\"post1\", f\"post{i}\")\n items.append(copy)\n\n collection = UploadCollection(items=items)\n return collection", "def uploadFilestoS3(self):\n allfilesuploadedcount = 0\n for eachfiledic in self.fileTobeUploaded:\n if eachfiledic[\"uploadedSuccess\"] == 0: #Means this file never got uploaded.\n if os.path.getsize(eachfiledic[\"filepath\"]) < 1000000000: #<1GB\n s3Log.info (\"FileSize < 1GB for :{}, so using single part upload.\".format(eachfiledic[\"filepath\"]) )\n if self.singlePartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n else:\n s3Log.info (\"FileSize > 1GB for :{}, so using Multi Part upload. \\n\".format(eachfiledic[\"filepath\"]) )\n if self.multiPartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n\n elif eachfiledic[\"uploadedSuccess\"] == 1: #Means it got uploaded in the last run.\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n self.saveStateOfThisRun()\n if len(self.fileTobeUploaded) == allfilesuploadedcount: #Means we uploaded all files in the queue\n return True\n else:\n return False", "def expire(event, context):\n # scan the database for expired files\n expiry_at = datetime.utcnow() - runtime_context.NONSTORED_TIMEOUT\n files = FileModel.list_expired(expiry_at)\n # remove all files and all items one-by-one\n for file in files:\n file_id = file['id']['S']\n FileModel.update({\n 'id': file_id,\n 'deleted_at': datetime.utcnow()\n })\n LOGGER.debug('Files item updated (expired). service=ddb method=update_item id={}'.format(file_id))\n S3_CLIENT.delete_object(\n Bucket=runtime_context.BUCKET_NAME,\n Key=file_id\n )\n LOGGER.debug('S3 object deleted. service=s3 method=delete_object id={}'.format(file_id))", "def test_evict_expired(self):\n\n # use an invasive technique so that we don't have to sleep for\n # the item to expire\n\n bc = TimedCache(keep_time=1)\n\n bc[\"test\"] = \"value\"\n bc[\"test2\"] = \"value2\"\n self.assertEqual(len(bc), 2)\n\n # test that expired item i\n bc.cache[\"test\"].timestamp = bc.cache[\"test\"].timestamp - 2\n bc.purge_expired()\n self.assertEqual(len(bc), 1)\n self.assertFalse(\"test\" in bc)\n self.assertTrue(\"test2\" in bc)", "def test_finalized_data_in_gs(self):\n # create content\n content = pad_string('huge, important data')\n namespace = 'default-gzip'\n request = self.store_request(namespace, content)\n\n # this should succeed\n self.mock(gcs, 'get_file_info', get_file_info_factory(content))\n self.call_api('finalize_gs_upload', self.message_to_dict(request), 200)\n\n # this should fail\n self.mock(gcs, 'get_file_info', get_file_info_factory())\n with self.call_should_fail('400'):\n self.call_api('finalize_gs_upload', self.message_to_dict(request), 200)\n self.assertEqual(1, self.execute_tasks())", "def upload_bucket_samples():\n if not Config.region:\n logger.error(\"You must specify a region in order to scan a bucket target\")\n raise SystemExit(\n \"Target region not specified. Use -r or --region to specify the target region.\"\n )\n # Connect to S3 in our target region\n s_3 = boto3.resource(\"s3\", region_name=Config.region)\n # Connect to our target bucket\n bucket = s_3.Bucket(Config.target_dir)\n # Retrieve a list of all objects in the bucket\n summaries = bucket.objects.all()\n # Inform the user as this may take a minute\n logger.info(\"Assembling volume from target bucket (%s) for submission\", Config.target_dir)\n # Loop through our list of files, downloading each to memory then upload them to the Sandbox\n for item in summaries:\n # Grab the file name from the path\n filename = os.path.basename(item.key)\n # Teensy bit of witch-doctor magic to download the file\n # straight into the payload used for our upload to the Sandbox\n response = Samples.upload_sample(file_name=filename,\n file_data=io.BytesIO(\n bucket.Object(key=item.key).get()[\"Body\"].read()\n )\n )\n # Retrieve our uploaded file SHA256 identifier\n sha = response[\"body\"][\"resources\"][0][\"sha256\"]\n # Add this SHA256 to the upload payload element\n Analyzer.uploaded.append(sha)\n # Track the upload so we recognize the file when we're done\n Analyzer.files.append([filename, item.key, sha])\n # Inform the user of our progress\n logger.debug(\"Uploaded %s to %s\", filename, sha)", "def do_update(url,indexHeaders,update_file):\n updateUrl=url.replace(\"buckets\",\"riak\")\n indexHeaders['content-type'] = 'application/json'\n r=requests.post(url, data=json.dumps(update_file), headers=indexHeaders)", "def _put_retry(self, s3_bucket, s3_filename, local_filename, max_retries=3, policy=None):\n b = self.conn.get_bucket(s3_bucket)\n retries = 0\n while retries < max_retries:\n try:\n s3_key = b.new_key(s3_filename)\n s3_key.set_contents_from_filename(local_filename, policy=policy)\n except:\n logger.info('File transfer error: ' + s3_filename, exc_info=True)\n retries = retries + 1\n if retries == max_retries:\n raise\n time.sleep(retries)\n else:\n logger.info('Archived %s to %s/%s', local_filename, s3_bucket, s3_filename)\n return os.path.getsize(local_filename)", "def manipulate_bucketlist():\n pass", "def __init__(self, bucket):\n self.bucket = bucket", "def listget(base_url, keys, throttle, generic_rate, max_lookback, tmpdir, repo_configs, error_rate, get_rate):\n tname = threading.current_thread().name\n app.logger.debug(\"Thread:{x} - Initialise List/Get; base_url:{a}, throttle:{b}, generic_rate:{c}, max_lookback:{d}, tmpdir:{g}, error_rate:{h}, get_rate:{i}\".format(x=tname, a=base_url, b=throttle, c=generic_rate, d=max_lookback, g=tmpdir, h=error_rate, i=get_rate))\n\n genopts = [\"generic\", \"specific\"]\n genprobs = [generic_rate, 1 - generic_rate]\n\n getopts = [\"get\", \"leave\"]\n getprobs = [get_rate, 1 - get_rate]\n\n erropts = [\"err\", \"ok\"]\n errprobs = [error_rate, 1 - error_rate]\n\n errtypes = [\"page\", \"page_size\", \"missing_since\", \"malformed_since\"]\n errtypeprobs = [0.25] * 4\n\n while True:\n try:\n api_key = _select_from(keys)\n j = client.JPER(api_key, base_url)\n #print \"API \" + api_key\n\n # determine whether the metadata we're going to send will cause errors\n reqtype = _select_from(genopts, genprobs)\n #print \"Req: \" + reqtype\n\n # use this to determine the repository id for the request\n repository_id = None\n if reqtype == \"specific\":\n config = _select_from(repo_configs)\n repository_id = config.get(\"repository\")\n\n # determine the \"since\" date we're going to use for the request\n lookback = randint(0, max_lookback)\n since = dates.format(dates.before_now(lookback))\n # print \"Since: \" + since\n\n # choose a page size\n page_size = randint(1, 100)\n\n # now decide, after all that, if we're going to send a malformed request\n err = _select_from(erropts, errprobs)\n\n # if we are to make an erroneous request, go ahead and do it\n if err == \"err\":\n # choose a kind of malformed request\n malformed = _select_from(errtypes, errtypeprobs)\n params = {\"page\" : 1, \"pageSize\" : page_size, \"since\" : since}\n if malformed == \"page\":\n params[\"page\"] = \"one\"\n elif malformed == \"page_size\":\n params[\"pageSize\"] = \"twelvty\"\n elif malformed == \"missing_since\":\n del params[\"since\"]\n else:\n params[\"since\"] = \"a week last thursday\"\n\n # make the malformed url with the JPER client, so we know it gets there ok\n url = j._url(\"routed\", id=repository_id, params=params)\n app.logger.debug(\"Thread:{x} - List/Get sending malformed request for Account:{y} Type:{z} Error:{a} URL:{b}\".format(x=tname, y=api_key, z=reqtype, a=malformed, b=url))\n\n # make the request, and check the response\n resp = http.get(url)\n if resp is not None and resp.status_code == 400:\n app.logger.debug(\"Thread:{x} - List/Get received correct 400 response to malformed request\".format(x=tname))\n else:\n if resp is None:\n sc = None\n else:\n sc = resp.status_code\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; did not receive 400 response to malformed request, got {y}; URL:{z}\".format(x=tname, y=sc, z=url))\n\n # continue, so that we don't have to indent the code below any further\n continue\n\n # if we get to here, we're going to go ahead and do a normal request\n app.logger.debug(\"Thread:{x} - List/Get request for Account:{y} Type:{z} Since:{a}\".format(x=tname, y=api_key, z=reqtype, a=since))\n\n # iterate over the notifications, catching any errors (which would be unexpected)\n try:\n count = 0\n for note in j.iterate_notifications(since, repository_id, page_size):\n app.logger.debug(\"Thread:{x} - List/Get request for Account:{y} listing notifications for Repository:{z} retrieved Notification:{a}\".format(x=tname, y=api_key, z=repository_id, a=note.id))\n count += 1\n\n # determine if we're going to get the notification by itself (which is technically unnecessary, of course, but who knows what people's workflows will be)\n reget = _select_from(getopts, getprobs)\n if reget == \"get\":\n try:\n n = j.get_notification(note.id)\n app.logger.debug(\"Thread:{x} - Following List/Get for Account:{y} listing notifications for Repository:{z}, successfully retrieved copy of Notification:{a}\".format(x=tname, y=api_key, z=repository_id, a=note.id))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get notification failed for Notification:{y} that should have existed. This needs a fix: '{b}'\".format(x=tname, y=note.id, b=e.message))\n\n # now retrieve all the links in the note\n for link in note.links:\n url = link.get(\"url\")\n app.logger.debug(\"Thread:{x} - Following List/Get for Account:{y} on Repository:{b}, from Notification:{z} requesting copy of Content:{a}\".format(x=tname, y=api_key, z=note.id, a=url, b=repository_id))\n try:\n stream, headers = j.get_content(url)\n except client.JPERAuthException as e:\n # we got a 401 back from the service, that is acceptable, since we may not be authorised to access it\n app.logger.debug((\"Thread:{x} - get content unauthorised (401) for Content:{z} - this can happen, so is not necessarily unexpected\".format(x=tname, z=url)))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get content failed for Content:{z} that should have existed. This needs a fix: '{b}'\".format(x=tname, z=url, b=e.message))\n\n app.logger.debug(\"Thread:{x} - List/Get request completed successfully for Account:{y} listing notifications for Repository:{z} Count:{a}\".format(x=tname, y=api_key, z=repository_id, a=count))\n\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; List/Get request for Account:{y} listing notifications for Repository:{z} resulted in exception '{e}'\".format(x=tname, y=api_key, z=repository_id, e=e.message))\n\n # sleep before making the next request\n time.sleep(throttle)\n except Exception as e:\n app.logger.error(\"Thread:{x} - Fatal exception '{y}'\".format(x=tname, y=e.message))", "def upload_obj(bucketname, dateiname, zielname=None):\n pass", "def upload_progress(self, cloud_file, size, uploaded):", "def test_create_bucket(self):\n pass", "def persistToStore(self, items, requestInstance):\n self._dbConnection = self.mongoConnection()\n imgStored = 0\n\n if (self.mongoConnection() and self.cumulusConnection()):\n\n try:\n contain = self._cumulusConnection.get_bucket(self._containerName)\n except boto.exception.S3ResponseError as detail:\n if(detail.reason.strip() == \"Not Found\"):\n self._log.warning(\"Creating bucket\")\n self._cumulusConnection.create_bucket(self._containerName)\n contain = self._cumulusConnection.get_bucket(self._containerName)\n else:\n self._log.error(\"Code and reason \" + detail.code + \" \" + detail.reason)\n self._log.error(\"Error in ImgStorecumulusMongo - queryToStore. full error \" + str(sys.exc_info()))\n except:\n self._log.error(\"Error in ImgStorecumulusMongo - persistToStore. \" + str(sys.exc_info()))\n\n try:\n dbLink = self._dbConnection[self._dbName]\n collection = dbLink[self._datacollection]\n collectionMeta = dbLink[self._metacollection]\n\n k = Key(contain)\n\n for item in items:\n\n k.key = item._imgId\n if requestInstance == None:\n k.set_contents_from_filename(item._imgURI)\n else:\n requestInstance.file.seek(0)\n k.set_contents_from_file(requestInstance.file)\n\n tags = item._imgMeta._tag.split(\",\")\n tags_list = [x.strip() for x in tags]\n meta = {\"_id\": item._imgId,\n \"os\" : item._imgMeta._os,\n \"arch\" : item._imgMeta._arch,\n \"owner\" : item._imgMeta._owner,\n \"description\" : item._imgMeta._description,\n \"tag\" : tags_list,\n \"vmType\" : item._imgMeta._vmType,\n \"imgType\" : item._imgMeta._imgType,\n \"permission\" : item._imgMeta._permission,\n \"imgStatus\" : item._imgMeta._imgStatus,\n }\n data = {\"_id\": item._imgId,\n \"createdDate\" : datetime.utcnow(),\n \"lastAccess\" : datetime.utcnow(),\n \"accessCount\" : 0,\n \"size\" : item._size,\n \"extension\" : item._extension,\n }\n\n collectionMeta.insert(meta, safe=True)\n collection.insert(data, safe=True)\n\n imgStored += 1\n\n except pymongo.errors.AutoReconnect:\n self._log.warning(\"Autoreconnected.\")\n except pymongo.errors.ConnectionFailure:\n self._log.error(\"Connection failure. The file has not been stored. Image details: \" + item.__str__() + \"\\n\")\n except IOError:\n self._log.error(\"Error in ImgStorecumulusMongo - persistenToStore. \" + str(sys.exc_info()))\n self._log.error(\"No such file or directory. Image details: \" + item.__str__())\n except TypeError:\n self._log.error(\"TypeError in ImgStorecumulusMongo - persistenToStore \" + str(sys.exc_info()))\n except pymongo.errors.OperationFailure:\n self._log.error(\"Operation Failure in ImgStorecumulusMongo - persistenToStore. \" + str(sys.exc_info()))\n except:\n self._log.error(\"Error in ImgStoreCumulusMongo - persistToStore. \" + str(sys.exc_info()))\n finally:\n self._dbConnection.disconnect()\n else:\n self._log.error(\"Could not get access to the database. The file has not been stored\")\n\n for item in items: \n cmd = \"rm -f \" + item._imgURI\n os.system(cmd)\n\n if (imgStored == len(items)):\n return True\n else:\n return False", "def test_creating_a_bucket(self):\n with self.client:\n self.create_bucket(self.get_user_token())", "def create(base_url, keys, throttle, mdrate, mderrors, cterrors, max_file_size, tmpdir, retrieve_rate, routable, repo_configs):\n tname = threading.current_thread().name\n app.logger.debug(\"Thread:{x} - Initialise Create; base_url:{a}, throttle:{b}, mdrate:{c}, mderrors:{d}, cterrors:{e}, max_file_size:{f}, tmpdir:{g}, retrieve_rate:{h}, routable:{i}\".format(x=tname, a=base_url, b=throttle, c=mdrate, d=mderrors, e=cterrors, f=max_file_size, g=tmpdir, h=retrieve_rate, i=routable))\n\n mdopts = [\"mdonly\", \"md+ct\"]\n mdprobs = [mdrate, 1 - mdrate]\n\n mderroropts = [\"error\", \"ok\"]\n mderrorprobs = [mderrors, 1 - mderrors]\n\n cterroropts = [\"error\", \"ok\"]\n cterrorprobs = [cterrors, 1 - cterrors]\n\n retrieveopts = [\"get\", \"not\"]\n retrieveprobs = [retrieve_rate, 1 - retrieve_rate]\n\n while True:\n try:\n api_key = _select_from(keys)\n j = client.JPER(api_key, base_url)\n #print \"API \" + api_key\n\n # determine whether the metadata we're going to send will cause errors\n mdtype = _select_from(mderroropts, mderrorprobs)\n #print \"MD: \" + mdtype\n\n # generate a notification which may or may not have an error\n note = _make_notification(error=mdtype==\"error\", routable=routable, repo_configs=repo_configs)\n #print note\n\n # determine whether we're going to send some content\n hasct = _select_from(mdopts, mdprobs)\n #print \"CT: \" + hasct\n file_handle = None\n filepath = None\n cterr = \"ok\"\n if hasct == \"md+ct\":\n # determine if the content should have an error\n cterr = _select_from(cterroropts, cterrorprobs)\n #print \"CTERR:\" + cterr\n filepath = _get_file_path(tmpdir, max_file_size, error=cterr==\"error\")\n #print \"File\" + filepath\n file_handle = open(filepath)\n\n app.logger.debug(\"Thread:{x} - Create request for Account:{y} Type:{z} MD:{a} CT:{b}\".format(x=tname, y=api_key, z=hasct, a=mdtype, b=cterr))\n\n # make the create request, which may occasionally throw errors\n id = None\n try:\n id, loc = j.create_notification(note, file_handle)\n app.logger.debug(\"Thread:{x} - Create request for Account:{z} resulted in success, Notification:{y}\".format(x=tname, y=id, z=api_key))\n except:\n app.logger.error(\"Thread:{x} - Create request for Account:{y} resulted in expected exception\".format(x=tname, y=api_key))\n\n # cleanup after ourselves\n if filepath is not None:\n file_handle.close()\n os.remove(filepath)\n\n # now there's a chance that we might want to check our notification has been created correctly, so we might\n # retrieve it\n if id is not None:\n ret = _select_from(retrieveopts, retrieveprobs)\n if ret == \"get\":\n # time.sleep(2) # this gives JPER a chance to catch up\n app.logger.debug(\"Thread:{x} - Following Create for Account:{y}, requesting copy of Notification:{z}\".format(x=tname, y=api_key, z=id))\n try:\n n = j.get_notification(id)\n app.logger.debug(\"Thread:{x} - Following Create for Account:{y}, successfully retrieved copy of Notification:{z}\".format(x=tname, y=api_key, z=id))\n for link in n.links:\n if link.get(\"packaging\") is not None:\n url = link.get(\"url\")\n app.logger.debug(\"Thread:{x} - Following Create for Account:{y}, from Notification:{z} requesting copy of Content:{a}\".format(x=tname, y=api_key, z=id, a=url))\n try:\n stream, headers = j.get_content(url)\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get content failed for Content:{z} that should have existed. This needs a fix: '{b}'\".format(x=tname, z=url, b=e.message))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get notification failed for Notification:{y} that should have existed. This needs a fix: '{b}'\".format(x=tname, y=id, b=e.message))\n\n # sleep before making the next request\n time.sleep(throttle)\n except Exception as e:\n app.logger.error(\"Thread:{x} - Fatal exception '{y}'\".format(x=tname, y=e.message))", "def test_model_can_create_a_bucketlist(self):\n old_count = Job.objects.count()\n self.job.save()\n new_count = Job.objects.count()\n self.assertNotEqual(old_count, new_count)", "def test_put_object_exceptions(self):\n # key is None\n err = None\n try:\n self.bos.put_object(self.BUCKET, None, None, 100, None)\n except ValueError as e:\n err = e\n finally:\n self.assertIsNotNone(err)\n # too long\n err = None\n try:\n self.bos.put_object(self.BUCKET, self.KEY, None, 6 * 1024 * 1024 * 1024, None)\n except ValueError as e:\n err = e\n finally:\n self.assertIsNotNone(err)", "def lambda_handler(*_):\n Log.info(\"Checking bucket %s\", S3_BUCKET)\n Log.info(\"Output Key: %s\", S3_OUTPUT_KEY)\n bucket_location, objects = s3index.get_objects(S3_BUCKET)\n filtered = s3index.filter_objects(objects)\n by_date = s3index.order_objects(filtered)\n template = s3index.template_from_string(TEMPLATE)\n index = s3index.build_index(\n template, SITE_NAME, by_date, S3_BUCKET, bucket_location)\n s3_client = boto3.client(\"s3\")\n s3_client.put_object(\n ACL=\"public-read\",\n Body=index.encode(),\n Bucket=S3_BUCKET,\n Key=S3_OUTPUT_KEY,\n ContentType=\"text/html\"\n )", "def save_to_s3(self, bucket):\r\n n = 0\r\n m = self.read()\r\n while m:\r\n n += 1\r\n key = bucket.new_key('%s/%s' % (self.id, m.id))\r\n key.set_contents_from_string(m.get_body())\r\n self.delete_message(m)\r\n m = self.read()\r\n return n", "def __init__(self):\n self.m = 1000\n self.bucket = [None] * 1000", "def test_add_bucketlist_items(self):\r\n email = \"[email protected]\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(email, _pword, bucketlist.id, \"bucketlist item name\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(result['message'], 'Bucket list item added')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertLess(item_no, new_item_no)", "def test_write_multi_files_to_bucket(\n self, mcg_obj, awscli_pod, bucket_factory, amount, file_type\n ):\n data_dir = \"/data\"\n if file_type == \"large\":\n public_bucket = PUBLIC_BUCKET\n obj_key = LARGE_FILE_KEY\n elif file_type == \"small\":\n public_bucket = constants.TEST_FILES_BUCKET\n obj_key = \"random1.txt\"\n elif file_type == \"large_small\":\n public_bucket = PUBLIC_BUCKET\n obj_key = LARGE_FILE_KEY.rsplit(\"/\", 1)[0]\n\n # Download the file to pod\n awscli_pod.exec_cmd_on_pod(command=f\"mkdir {data_dir}\")\n public_s3_client = retrieve_anon_s3_resource().meta.client\n download_files = []\n # Use obj_key as prefix to download multiple files for large_small\n # case, it also works with single file\n for obj in public_s3_client.list_objects(\n Bucket=public_bucket, Prefix=obj_key\n ).get(\"Contents\"):\n # Skip the extra file in large file type\n if file_type == \"large\" and obj[\"Key\"] != obj_key:\n continue\n logger.info(f'Downloading {obj[\"Key\"]} from AWS bucket {public_bucket}')\n download_obj_cmd = f'cp s3://{public_bucket}/{obj[\"Key\"]} {data_dir}'\n awscli_pod.exec_cmd_on_pod(\n command=craft_s3_command(download_obj_cmd), out_yaml_format=False\n )\n download_files.append(obj[\"Key\"])\n # Write all downloaded objects to the new bucket\n bucketname = bucket_factory(1)[0].name\n base_path = f\"s3://{bucketname}\"\n for i in range(amount):\n full_object_path = base_path + f\"/{i}/\"\n sync_object_directory(awscli_pod, data_dir, full_object_path, mcg_obj)\n\n obj_list = list(\n obj.key.split(\"/\")[-1]\n for obj in mcg_obj.s3_list_all_objects_in_bucket(bucketname)\n )\n\n # Check total copy files amount match\n if file_type == \"large_small\":\n assert len(obj_list) == 2 * amount, \"Total file amount does not match\"\n else:\n assert len(obj_list) == amount, \"Total file amount does not match\"\n\n # Check deduplicate set is same\n test_set = set([i.split(\"/\")[-1] for i in download_files])\n assert test_set == set(obj_list), \"File name set does not match\"", "def main(transcribe_bucket_name, mp3_bucket_name):\n\n s3 = boto3.resource('s3')\n for bucket in s3.buckets.all():\n if bucket.name == transcribe_bucket_name:\n for key in bucket.objects.all():\n if key.key.endswith('.json'):\n r = {}\n # Get reference number\n reference = basename(key.key).replace('.json', '')\n r['ref'] = reference\n # Get URL\n location = boto3.client('s3') \\\n .get_bucket_location(\n Bucket=mp3_bucket_name)['LocationConstraint']\n base_url = join('https://s3-%s.amazonaws.com' % location,\n mp3_bucket_name)\n url = join(base_url, key.key.replace('.json', '.mp3'))\n r['url'] = url\n # Download json file\n try:\n s3.Bucket(transcribe_bucket_name) \\\n .download_file(key.key, key.key)\n except Exception as exception:\n return 1\n # Get text\n with open(key.key, 'r') as f:\n data = json.load(f)\n text = data['results']['transcripts'][0]['transcript']\n r['text'] = text\n # Get sentiment\n sentiment = get_sentiment(text)\n r['sentiment'] = sentiment\n # Check promotion\n promo = check_promo(text)\n r['promo'] = promo\n # Save to Gooogle Sheets\n values = [r['ref'], r['text'], r['promo'], r['sentiment'],\n r['url']]\n append_row(values)\n # Remove tmp json file from local machine\n remove(key.key)", "def _check_expire(self):\n self._log.debug(\"Checking entry expiration...\")\n current_time = time.time()\n for key in self._obj_cache.keys():\n self._log.debug(' -> %s (type = %s)',\n key, type(self._obj_cache[key]))\n # Remove if the key has a timeout, and the timeout period has been\n # exceeded (last access + timeout period <= current_time).\n if self._obj_timeouts[key] > 0 \\\n and current_time >= (self._obj_last_access[key]\n + self._obj_timeouts[key]):\n self._log.debug(' EXPIRED -- removing')\n # delete\n del self._obj_cache[key]\n del self._obj_last_access[key]\n del self._obj_timeouts[key]", "def test_purge(h3):\n\n assert h3.list_buckets() == []\n\n assert h3.create_bucket('b1') == True\n\n assert h3.list_objects('b1') == []\n\n with open('/dev/urandom', 'rb') as f:\n data = f.read(3 * MEGABYTE)\n\n h3.create_object('b1', 'o1', data)\n h3.create_object('b1', 'o2', data)\n h3.create_object('b1', 'o3', data)\n\n assert set(h3.list_objects('b1')) == set(['o1', 'o2', 'o3'])\n\n assert h3.purge_bucket('b1') == True\n\n assert h3.list_objects('b1') == []\n\n assert h3.delete_bucket('b1') == True", "def test02StoreRefresh(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 5):\n keys.append(s.Put(i, i))\n\n # This should not raise because keys[0] should be refreshed each time its\n # gotten\n for i in range(0, 1000):\n s.Get(keys[0])\n s.Put(i, i)", "def limit_for(self, expiration=10, **kwargs):\n key = self._get_key(**kwargs)\n self.redis_conn.set(key, 1)\n self.redis_conn.expire(key, expiration)", "def main():\r\n # Create client for interfacing with S3\r\n s3 = create_client(untappd_access_key_id, untappd_secret_access_key)\r\n\r\n # Get id of the latest post that was handled in the previous function call\r\n last_update_id = get_last_update_id(s3)\r\n # Set inital value for lastest post handled by this function call\r\n most_recent_id = last_update_id\r\n\r\n # Get list of all posts from all breweries specified by env variable\r\n posts = get_all_posts(untappd_breweries.split(','))\r\n\r\n # Iterate over all posts and write new posts to the database\r\n for post in posts:\r\n # Get unique post id number from post\r\n # Example: 'https://untappd.com/user/Mckman007/checkin/756802330'\r\n # has post id number 756802330\r\n post_id = int(post['id'].rsplit('/', 1)[-1])\r\n # if post_id is greater than last_update_id, post is not yet in database\r\n if(post_id > last_update_id):\r\n most_recent_id = max(most_recent_id, post_id)\r\n write_post_to_s3(s3, post, post_id)\r\n\r\n # After all new posts have been written to database, set last_update_id\r\n # for next function call\r\n set_last_update_id(s3, most_recent_id)", "def test_put_bucketlist_item(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"[email protected]\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertNotEqual(item.name, \"bucketlist item name\")\r\n self.assertFalse(item.completed)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 1, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n item2 = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(item2.name, \"bucketlist item name\")\r\n self.assertTrue(item2.completed)", "def test_list_bucket(self):\n\n if self.bos.does_bucket_exist(\"aaaaaaxzr1\"):\n self.bos.delete_bucket(\"aaaaaaxzr1\")\n if self.bos.does_bucket_exist(\"aaaaaaxzr2\"):\n self.bos.delete_bucket(\"aaaaaaxzr2\")\n\n time1 = utils.get_canonical_time()\n self.bos.create_bucket(\"aaaaaaxzr1\")\n\n time2 = utils.get_canonical_time()\n self.bos.create_bucket(\"aaaaaaxzr2\")\n\n response = self.bos.list_buckets()\n self.check_headers(response)\n\n self.assertEqual(response.owner.id, bos_test_config.OWNER_ID)\n self.assertEqual(response.owner.display_name, bos_test_config.DISPLAY_NAME)\n for bucket in response.buckets:\n if bucket.name == \"aaaaaaxzr1\":\n self.assertEqual(\n compat.convert_to_bytes(bucket.creation_date)[0:19], \n compat.convert_to_bytes(time1)[0:19])\n elif bucket.name == \"aaaaaaxzr2\":\n self.assertEqual(\n compat.convert_to_bytes(bucket.creation_date)[0:19], \n compat.convert_to_bytes(time2)[0:19])\n self.bos.delete_bucket(\"aaaaaaxzr1\")\n self.bos.delete_bucket(\"aaaaaaxzr2\")", "def checkSold(auto=False):\n\n #Create connection \n db = pymysql.connect(host=\"localhost\", user=\"testUser\", passwd=\"BorrisBulletDodger\", db=\"scraperdb\", charset='utf8')\n cursor = db.cursor()\n\n #SQL Query\n sql = \"SELECT url FROM motorcycles WHERE adExpiry IS NULL\"\n\n #Find data\n try: \n cursor.execute(sql)\n sqlResult = cursor.fetchall()\n urls = [i[0] for i in sqlResult]\n db.commit()\n except Exception as e:\n db.rollback()\n print(f\"Exception occured: {e}\")\n\n #User input to proceed if not auto\n while not auto:\n cont = input(f\"{len(urls)} stored listings found - Do you wish to check if sold?: \")\n if cont.lower() == 'y' or cont.lower() == 'yes':\n break\n elif cont.lower() == 'n' or cont.lower() == 'no':\n return\n else:\n print(\"Please enter y/n\")\n continue\n \n #Use threading to check if urls have expired\n maxThreads = 5\n urlsQ = Queue(maxsize=0)\n #Set number of threads\n numThreads = min(maxThreads, len(urls))\n #Create lock\n lock = Lock()\n #Create progress bar\n pbar = tqdm(total=len(urls))\n \n #Expired test\n def checkExpiredThread(q, results, db, cursor):\n \"\"\"\n Checks whether input url has expired\n Input: [\"url\"], {} - Keys=urls, vals=False\n \"\"\"\n\n while not q.empty():\n url = q.get()\n logger.debug(f\"{url} started - Tasks left: {q.unfinished_tasks}\")\n pbar.update(1)\n expired = None\n\n #Check if expired\n _, expired = getPage(url)\n results[url] = expired\n\n #Insert result into db\n if expired:\n logger.debug(f\"expired url: {url}\")\n #Record todays date\n curTime = datetime.now().strftime(\"%Y-%m-%d\")\n #Prepare sql string\n sql = \"\"\"UPDATE motorcycles\n SET adExpiry=%s\n WHERE url=%s\"\"\"\n #Get Lock - Prevent multiple db inserts simulataneously\n logger.debug(f\"{url} wants the lock\")\n with lock:\n logger.debug(f\"{url} has the lock\")\n try:\n cursor.execute(sql, (curTime, url))\n db.commit()\n except Exception as e:\n db.rollback()\n print(\"Exception occured: {}\".format(e))\n logger.debug(f\"{url} is finished with the lock\")\n\n q.task_done()\n logger.debug(f\"{url} finished\")\n\n\n #Load queue with urls, results dict keys = urls, vals = False - Ad default not expired\n results = {}\n for url in urls:\n urlsQ.put(url)\n results[url] = False\n\n #Create threads that execute checkExpiredThread function, updates data\n for _ in range(numThreads):\n worker = Thread(target=checkExpiredThread, args=(urlsQ, results, db, cursor))\n worker.setDaemon(True)\n worker.start()\n #Wait until the queue has been processed - All URLs checked\n urlsQ.join()\n pbar.close()\n\n #Remember to close database at the end \n db.close()\n \n #Count number of expired urls\n count = sum(1 for value in results.values() if value)\n logger.info(f\"{count}/{len(urls)} tracked listings have been sold since last processed\")\n print(f\"{count}/{len(urls)} tracked listings have been sold since last processed\")", "def test_list_objects():\n x = 0\n for obj in qmk_storage.list_objects():\n assert 'Key' in obj\n assert type(obj.get('LastModified')) == datetime.datetime\n\n if x > 5:\n break\n x += 1", "def update(name=\"\", amount=0, execute=False):\n if name:\n bucket_metadata = get_bucket(name)\n if bucket_metadata:\n bucket = bucket_metadata[\"bucket\"]\n versioning = bucket_metadata[\"versioning\"] == \"Enabled\"\n lifecycle = bucket_metadata[\"lifecycle\"]\n update_bucket(name, bucket, versioning, lifecycle, execute)\n else:\n buckets = get_buckets(amount)\n for k, v in buckets.items():\n name = k\n bucket = v[\"bucket\"]\n versioning = v[\"versioning\"] == \"Enabled\"\n lifecycle = v[\"lifecycle\"]\n update_bucket(name, bucket, versioning, lifecycle, execute)", "def test_bucketlist_creation(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n self.assertIn('Climb the Himalayas', str(post_data.data))", "def test_upload_count(self):\n conn = initialize_connection()\n db = conn.picdb\n coll = db.images\n\n num = coll.count_documents({})\n\n self.assertEqual(num, 72389)", "def test_expiry_in_future(self):\n link = DownloadLink()\n link.save()\n self.assertEqual(link.getExpiry(), link.createdAt + timedelta(seconds=60))", "def test_s3_table_functions(started_cluster):\n node.query(\n \"\"\"\n INSERT INTO FUNCTION s3\n (\n nc_s3,\n filename = 'test_file.tsv.gz',\n format = 'TSV',\n structure = 'number UInt64',\n compression_method = 'gz'\n )\n SELECT * FROM numbers(1000000)\n \"\"\",\n settings=settings,\n )\n\n assert (\n node.query(\n \"\"\"\n SELECT count(*) FROM s3\n (\n nc_s3,\n filename = 'test_file.tsv.gz',\n format = 'TSV',\n structure = 'number UInt64',\n compression_method = 'gz'\n );\n \"\"\"\n )\n == \"1000000\\n\"\n )", "def sync_to_bucket(s3_url,\n region='eu-west-1',\n profile_name=None):\n\n parsed_s3_url = urlparse.urlparse(s3_url);\n\n bucket_name = parsed_s3_url.hostname;\n key_prefix = parsed_s3_url.path;\n if key_prefix[0] == '/':\n key_prefix = key_prefix[1:]\n if key_prefix[-1] != '/':\n key_prefix = key_prefix + '/'\n\n def inner(fn_inner):\n \"\"\"\n Decorator function function sent in should be having signature\n func(None,None, XmlDoc) and should yield JSON document one for\n each file that should be persisted to S3\n \"\"\"\n\n def handler(event, context):\n \"\"\"\n The AWS Lambda Entry Point\n \"\"\"\n s3conn = s3.connect_to_region(region, profile_name=profile_name)\n bucket = s3conn.get_bucket(bucket_name)\n\n # Use a map to track keys that are no longer in the feed, used for deletion\n remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}\n\n logger.debug(\"Existing keys in bucket\\n%s\", '\\n'.join(remaining_keys));\n\n for id, json_data in fn_inner():\n key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))\n\n # Key found, delete it from cleanup map\n if key_name in remaining_keys:\n del remaining_keys[key_name]\n\n string_data = json.dumps(json_data)\n s3_object = bucket.get_key(key_name)\n if s3_object == None:\n key = bucket.new_key(key_name);\n key.set_contents_from_string(string_data)\n logger.info('Creating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n else:\n if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):\n logger.info('Updating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n s3_object.set_contents_from_string(string_data)\n else:\n logger.info('Same:\\ts3://%s/%s', bucket_name, key_name);\n logger.debug(string_data)\n\n # Remvoe remaining keys from the bucket to allow for cleanup\n for key in remaining_keys:\n logger.info('Removing:\\ts3://%s/%s', bucket_name, key);\n bucket.delete_key(key);\n\n logger.info('Done');\n\n return handler\n\n return inner", "def test_001_create_and_delete_bucket(self):\n bucket_name = 'testbucket'\n\n deferred = threads.deferToThread(self.conn.create_bucket, bucket_name)\n deferred.addCallback(lambda _:\n threads.deferToThread(self.conn.get_all_buckets))\n\n deferred.addCallback(self._ensure_one_bucket, bucket_name)\n\n deferred.addCallback(lambda _:\n threads.deferToThread(self.conn.delete_bucket,\n bucket_name))\n deferred.addCallback(lambda _:\n threads.deferToThread(self.conn.get_all_buckets))\n deferred.addCallback(self._ensure_no_buckets)\n return deferred", "def list_bucket(self, bucket):\n self.response.write('Creating more files for listbucket...\\n')\n self.create_file(bucket + '/foo1')\n self.create_file(bucket + '/foo2')\n self.response.write('\\nListbucket result:\\n')\n\n page_size = 1\n stats = gcs.listbucket(bucket, max_keys=page_size)\n while True:\n count = 0\n for stat in stats:\n count += 1\n self.response.write(repr(stat))\n self.response.write('\\n')\n\n if count != page_size or count == 0:\n break\n last_filename = stat.filename[len(bucket) + 1:]\n stats = gcs.listbucket(bucket, max_keys=page_size, marker=last_filename)", "def publish():\n reset()\n compress()\n build()\n s3deploy()\n log_success()", "def test_bucketEmpty(self):\n b = SomeBucket()\n b.add(20)\n self.clock.set(9)\n empty = b.drip()\n self.assertFalse(empty)\n self.clock.set(10)\n empty = b.drip()\n self.assertTrue(empty)", "def _mock_backend(self):\n for crawl_id in self.crawlQueue:\n # Retrieve page count from engine and set in central redis\n page_count = self.engine_redis.get(crawl_id + \"_count\")\n self.central_redis.set(crawl_id + \"_count\", page_count)\n self.central_redis.expire(crawl_id + \"_count\", 60*60)\n if page_count == \"-2\": # if complete\n self.crawlQueue.remove(crawl_id)", "def test_exists(keys, key, expected_result):\n test_bucket = bucket.Bucket()\n for insert_key in keys:\n test_bucket.insert(insert_key, \"value\")\n\n result = test_bucket.exists(key)\n\n assert result == expected_result", "def _init():\n cache_file = _get_buckets_cache_filename()\n exp = time.time() - S3_CACHE_EXPIRE\n\n # check mtime of the buckets files cache\n metadata = None\n try:\n if os.path.getmtime(cache_file) > exp:\n metadata = _read_buckets_cache_file(cache_file)\n except OSError:\n pass\n\n if metadata is None:\n # bucket files cache expired or does not exist\n metadata = _refresh_buckets_cache_file(cache_file)\n\n return metadata", "def testExpirationTime(self):\n\n bye = \"Good bye!\"\n memcache.add('bye', bye, 1)\n assert memcache.get('bye') == bye\n time.sleep(2)\n assert memcache.get('bye') == None", "def test_batch_upload(\n large_upload_collection: UploadCollection,\n fake_session: HexpySession,\n caplog: CaptureFixture,\n) -> None:\n responses.add(\n responses.POST, HexpySession.ROOT + \"content/upload\", json={}, status=200\n )\n\n client = ContentUploadAPI(fake_session)\n\n with caplog.at_level(logging.INFO):\n response = client.upload(\n document_type=123456789, items=large_upload_collection, request_usage=True\n )\n\n assert (\n caplog.records[0].msg\n == \"More than 1000 items found. Uploading in batches of 1000.\"\n )\n\n assert response == {\"Batch 0\": {}, \"Batch 1\": {}, \"Batch 2\": {}, \"Batch 3\": {}}", "def put(handler, *args, **kwargs):\n bucket_id = args[0]\n user_id = args[1]\n content = tornado.escape.json_decode(handler.request.body)\n BucketClasss = type(\"Bucket_%s\" % (bucket_id), (Bucket,), {})\n bucket = yield BucketClasss.get(user_id)\n if not bucket:\n bucket = BucketClasss()\n bucket.pkey = user_id \n if bucket_id == \"userData\":\n save_ver = int(content.get(\"data\", {}).get(\"saveVer\",0))\n current_ver = int(bucket.data.get(\"data\", {}).get(\"saveVer\",0))\n if save_ver < current_ver:\n raise Return((405, None))\n bucket.data = content \n yield bucket.put()\n raise Return((204, None))", "def test_upload(self):\n package = make_package()\n datastr = 'foobar'\n data = StringIO(datastr)\n self.storage.upload(package, data)\n key = list(self.bucket.list())[0]\n self.assertEqual(key.get_contents_as_string(), datastr)\n self.assertEqual(key.get_metadata('name'), package.name)\n self.assertEqual(key.get_metadata('version'), package.version)", "def publish_for_user(self, gid, user, items):\n self.log.info('[{0}] Publishing updates [{1}]-->[{2}]'.format(self.name, gid, user))\n\n # access token\n token = self.get_token(user)\n if not token:\n self.log.error('[{0}] No access token for [{1}], post failed'.format(self.name, user))\n # increment error counter\n self.on_publish_error(gid, user)\n return\n\n # get the message map from db\n message_id_map = self.data.filter.get_message_id_map(self.name, user)\n\n # prepare items for publish\n prepared = self.get_next_prepared(gid, user, items, message_id_map)\n\n # publishing one item at a time\n # rest of the items will be scheduled in 1 minute\n if not prepared:\n self.log.info('All items filtered out, publish complete')\n return\n\n # check for time-space\n last_publish = self.data.get_publisher_value(':'.join((self.name, user)), 'last_publish')\n # minimum time-space is required to stop spamming activity\n time_space_min_ = prepared['params']['time_space_min'] or config.DEFAULT_MIN_TIME_SPACE\n if last_publish and time_space_min_:\n time_space_s = 60.0 * int(time_space_min_)\n wait_s = int(time_space_s - (time.time() - float(last_publish)))\n if wait_s > 0:\n self.log.info('T-space of {0}s for {1}'.format(wait_s, user))\n self.data.add_log(gid, 'Next publish to {0}:{1} in {2:.1f}min.'.format(self.name, user, wait_s / 60.0))\n self.data.buffer.buffer_in_s(gid, self.name, wait_s)\n return\n\n if prepared and 'item_id' in prepared and prepared['item_id'] != items[-1]['id']:\n self.log.info('Buffering remaining items')\n self.data.buffer.buffer_in_s(gid, self.name, 60.0)\n\n self.log.info('Publishing 1 out of {0} items'.format(len(items)))\n\n # publish prepared item\n self.publish_prepared(gid, user, prepared, message_id_map, token)\n\n # update map in the database\n self.data.filter.set_message_id_map(self.name, user, message_id_map)\n\n self.log.info('Publish complete')", "def test_long_timeout(self):\n self.cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second\n self.assertEqual(self.cache.get('key1'), 'eggs')\n\n self.cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)\n self.assertEqual(self.cache.get('key2'), 'ham')\n\n self.cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)\n self.assertEqual(self.cache.get('key3'), 'sausage')\n self.assertEqual(self.cache.get('key4'), 'lobster bisque')", "def test03Expire(self):\n s = utils.FastStore(max_size=100)\n key = \"test1\"\n s.Put(key, 1)\n\n # This should not raise\n self.assertEqual(s.Get(key), 1)\n s.ExpireObject(key)\n\n self.assertRaises(KeyError, s.Get, key)", "def quota():\n try:\n fname = os.path.join(os.path.expanduser(\"~\"), \".planet.json\")\n contents = {}\n if os.path.exists(fname):\n with open(fname, \"r\") as fp:\n contents = json.loads(fp.read())\n else:\n raise IOError(\"Escape to End and Initialize\")\n if not len(contents) != 0:\n raise IOError(\"Escape to End and Initialize\")\n else:\n k = contents[\"key\"]\n main = requests.get(\n \"https://api.planet.com/auth/v1/\" + \"experimental/public/my/subscriptions\",\n auth=HTTPBasicAuth(k, \"\"),\n )\n if main.status_code == 200:\n content = main.json()\n for item_id in content:\n print(\" \")\n print(\"Allocation Name: %s\" % item_id[\"organization\"][\"name\"])\n print(\n \"Allocation active from: %s\" % item_id[\"active_from\"].split(\"T\")[0]\n )\n print(\"Quota Enabled: %s\" % item_id[\"quota_enabled\"])\n print(\"Total Quota in SqKm: %s\" % item_id[\"quota_sqkm\"])\n print(\"Total Quota used: %s\" % item_id[\"quota_used\"])\n if (item_id[\"quota_sqkm\"]) is not None:\n leftquota = float(\n item_id[\"quota_sqkm\"] - float(item_id[\"quota_used\"])\n )\n print(\"Remaining Quota in SqKm: %s\" % leftquota)\n else:\n print(\"No Quota Allocated\")\n print(\"\")\n else:\n print(\"Failed with exception code: \" + str(main.status_code))\n\n except IOError:\n print(\"Initialize client or provide API Key\")", "def test_get_ttl(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'}}\n moes = {'1': time.time() + 5, '4': time.time() + 10}\n for key in keys_to_set.keys():\n storage.set(key, keys_to_set[key], moes.get(key))\n # test at moment t\n self.assertEqual(keys_to_set['1'], storage.get('1'), \"Key '1' should still exist.\")\n # test at moment t+6, one key should expire\n self.now += 6\n keys_to_set.pop('1')\n moes.pop('1')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertEqual(keys_to_set['4'], storage.get('4'), \"Key '4' should still exist.\")\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")\n # test at moment t+11\n self.now += 5\n keys_to_set.pop('4')\n moes.pop('4')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertRaises(StorageKeyError, storage.get, '4')\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")", "def push_to_db(dynamoMethods, pic_name, valdic):\n # global pic_flag\n # global val\n # global pic_name\n # global time_now\n try:\n # while True:\n #print '--------push run--------'\n #if pic_flag == 1:\n print '-------start push-----'\n # print val\n # fruit = 'apple'\n # s3.Bucket('fruit').upload_file(filename, filename)\n s3.meta.client.upload_file(pic_name, 'iotfruit', pic_name)\n my_db.upload_img(valdic)\n # lock.acquire()\n # pic_flag = 0\n # lock.release()\n print '-------push success-------'\n # time.sleep(1)\n except (KeyboardInterrupt):\n exit", "def __init__(self):\n self.bucket = 1000\n self.bucketItem = 1000\n \n self.hashset = [None] * self.bucket", "def test_put(self):\n cache = LRUCache(5)\n assert 0 == cache.size\n cache.put(1, 'aaa')\n assert 1 == cache.size", "def test_change_default_throttling_settings_http_with_overwrite_throttled_burst_above_account_quota():", "def test_create_bucket(self):\n bucket = pmp.utils.create_bucket(3, 5.0)\n self.assertIsInstance(bucket, pmp.Bucket)\n\n POS_INF = float(\"inf\")\n bucket = pmp.utils.create_bucket(0, POS_INF)\n self.assertIsInstance(bucket, pmp.Bucket)", "def cleanup_incomplete_uploads_from_blob_store() -> bool:\n\n DAYS_TO_RETAIN = 1\n\n # Get current time in UTC timezone\n now = datetime.datetime.now(pytz.timezone(\"UTC\"))\n\n client = get_s3_client(settings=node.settings)\n incomplete_upload_objs = client.list_multipart_uploads(Bucket=node.id.no_dash).get(\n \"Uploads\", []\n )\n\n for obj in incomplete_upload_objs:\n # Get the upload id and object name\n upload_id: str = obj[\"UploadId\"]\n obj_name: str = obj[\"Key\"]\n\n # Get the list of all parts of the object uploaded\n # This step is required to get the upload time of the object\n object_parts: list = client.list_parts(\n Bucket=node.id.no_dash, UploadId=upload_id, Key=obj_name\n ).get(\"Parts\", [])\n\n obj_part_expired = False\n for part in object_parts:\n # Normalize upload time to UTC timezone\n part_upload_time = pytz.timezone(\"UTC\").normalize(part[\"LastModified\"])\n\n # If upload time of any part of the object\n # crosses DAYS_TO_RETAIN, then expire the whole object\n if (now - part_upload_time).days > DAYS_TO_RETAIN:\n obj_part_expired = True\n break\n\n if obj_part_expired:\n # Abort multipart upload\n client.abort_multipart_upload(\n UploadId=upload_id,\n Key=obj_name,\n Bucket=node.id.no_dash,\n )\n\n return True", "def test_finalize_gs_creates_content_entry(self):\n content = pad_string('empathy')\n namespace = 'default'\n request = self.store_request(namespace, content)\n embedded = validate(\n request.upload_ticket, handlers_endpoints_v1.UPLOAD_MESSAGES[1])\n key = model.get_entry_key(embedded['n'], embedded['d'])\n\n # finalize_gs_upload should put a new ContentEntry into the database\n self.mock(gcs, 'get_file_info', get_file_info_factory(content))\n self.call_api('finalize_gs_upload', self.message_to_dict(request), 200)\n stored = key.get()\n self.assertEqual(key, stored.key)\n\n # assert that expected attributes are present\n self.assertEqual(None, stored.content)\n self.assertEqual(int(embedded['s']), stored.expanded_size)\n\n # ensure that verification occurs\n self.mock(gcs, 'read_file', lambda _bucket, _key: content)\n\n # add a side effect in execute_tasks()\n # TODO(cmassaro): there must be a better way than this\n def set_verified():\n stored_entry = stored.key.get()\n self.assertTrue(stored_entry)\n if not stored_entry.is_verified:\n stored_entry.is_verified = True\n self.mock_side_effect(self._taskqueue_stub, 'DeleteTask', set_verified)\n\n # assert that verification occurs in the taskqueue\n self.assertFalse(stored.key.get().is_verified)\n self.assertEqual(1, self.execute_tasks())\n self.assertTrue(stored.key.get().is_verified)", "def test_upsert_metadata_for_rate_plan(self):\n pass", "def test_buckets(self):\n objectstore.bucket.Bucket.create('new_bucket', self.context)\n bucket = objectstore.bucket.Bucket('new_bucket')\n\n # creator is authorized to use bucket\n self.assert_(bucket.is_authorized(self.context))\n\n # another user is not authorized\n context2 = context.RequestContext('user2', 'proj2')\n self.assertFalse(bucket.is_authorized(context2))\n\n # admin is authorized to use bucket\n admin_context = context.RequestContext('admin_user', None)\n self.assertTrue(bucket.is_authorized(admin_context))\n\n # new buckets are empty\n self.assertTrue(bucket.list_keys()['Contents'] == [])\n\n # storing keys works\n bucket['foo'] = \"bar\"\n\n self.assertEquals(len(bucket.list_keys()['Contents']), 1)\n\n self.assertEquals(bucket['foo'].read(), 'bar')\n\n # md5 of key works\n self.assertEquals(bucket['foo'].md5, hashlib.md5('bar').hexdigest())\n\n # deleting non-empty bucket should throw a NotEmpty exception\n self.assertRaises(NotEmpty, bucket.delete)\n\n # deleting key\n del bucket['foo']\n\n # deleting empty bucket\n bucket.delete()\n\n # accessing deleted bucket throws exception\n self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket')", "def test_request_throttling_expires(self):\n self.set_throttle_timer(MockView, 0)\n\n request = self.factory.get('/')\n for dummy in range(4):\n response = MockView.as_view()(request)\n assert response.status_code == 429\n\n # Advance the timer by one second\n self.set_throttle_timer(MockView, 1)\n\n response = MockView.as_view()(request)\n assert response.status_code == 200", "def test_copy(h3):\n\n count = 100 # More than 10\n\n assert h3.list_buckets() == []\n\n assert h3.create_bucket('b1') == True\n\n assert h3.list_objects('b1') == []\n\n with open('/dev/urandom', 'rb') as f:\n data = f.read(3 * MEGABYTE)\n\n h3.create_object('b1', 'object', data)\n\n for i in range(count):\n h3.copy_object('b1', 'object', 'copy%d' % i)\n\n # Get the list of objects\n objects = []\n while True:\n result = h3.list_objects('b1', offset=len(objects))\n objects += result\n if result.done:\n break\n\n assert len(objects) == count + 1\n\n for i in range(count):\n object_info = h3.info_object('b1', 'copy%d' % i)\n assert not object_info.is_bad\n assert object_info.size == (3 * MEGABYTE)\n assert type(object_info.creation) == float\n assert type(object_info.last_access) == float\n assert type(object_info.last_modification) == float\n assert type(object_info.last_change) == float\n\n object_data = h3.read_object('b1', 'copy%d' % i)\n assert object_data == data\n\n objects = []\n while True:\n result = h3.list_objects('b1', offset=len(objects))\n objects += result\n if result.done:\n break\n\n assert len(objects) == count + 1\n\n assert h3.purge_bucket('b1') == True\n\n assert h3.list_objects('b1') == []\n\n assert h3.delete_bucket('b1') == True", "def test_patch_bucket(self):\n pass", "def node(*,bucket, endpoint, key, crn, auth=IBM_CLOUD_OAUTH_URL):\n # Get bearer token to access COS S3 API\n # payload to generate auth token\n token_req_data = {\n 'grant_type' :'urn:ibm:params:oauth:grant-type:apikey',\n 'response_type':'cloud_iam',\n 'apikey' :key\n }\n response = requests.post(auth, data=token_req_data, headers={'Content-type': 'application/x-www-form-urlencoded'})\n if response.status_code != 200:\n raise Exception(\"error\")\n bearer_token_info = response.json()\n logging.debug(bearer_token_info)\n \n # Get Aspera connection information for the bucket\n header_auth = {\n 'ibm-service-instance-id':crn,\n 'Authorization':bearer_token_info['token_type'] + \" \" + bearer_token_info['access_token'],\n 'Accept':'application/xml'\n }\n response = requests.get(endpoint + \"/\" + bucket, headers=header_auth, params={'faspConnectionInfo':True})\n if response.status_code != 200:\n raise Exception(\"error accessing endpoint\")\n logging.debug(response.content)\n ats_info_root = xml.dom.minidom.parseString(response.content.decode('utf-8'));\n ats_ak = ats_info_root.getElementsByTagName('AccessKey')[0]\n ats_url = ats_info_root.getElementsByTagName('ATSEndpoint')[0].firstChild.nodeValue\n ats_ak_id = ats_ak.getElementsByTagName('Id')[0].firstChild.nodeValue\n ats_ak_secret = ats_ak.getElementsByTagName('Secret')[0].firstChild.nodeValue\n \n # Get delegated token to access the node api\n token_req_data['response_type'] = 'delegated_refresh_token'\n token_req_data['receiver_client_ids'] = 'aspera_ats'\n response = requests.post(auth, data=token_req_data, headers={'Content-type': 'application/x-www-form-urlencoded'})\n if response.status_code != 200:\n raise Exception(\"error when generating token\")\n delegated_token_info = response.json()\n aspera_storage_credentials = {\n 'type': 'token',\n 'token': delegated_token_info\n }\n logging.debug(aspera_storage_credentials)\n return {\n 'url': ats_url,\n 'auth': requests.auth.HTTPBasicAuth(ats_ak_id, ats_ak_secret),\n 'headers': {\n 'X-Aspera-Storage-Credentials':json.dumps(aspera_storage_credentials),\n },\n 'tspec': {'tags':{'aspera':{'node':{'storage_credentials':aspera_storage_credentials}}}}\n }", "def put(self):\n global hits\n hits += 1\n return {\"hits\": hits}, 200", "def upload_files_s3(files, bucket):\n \n print('************************************')\n print('Uploading files to s3 bucket...')\n print('************************************')\n \n for i in range(len(files)):\n upload_file_s3(files[i], bucket)\n \n print('************************************')\n print('Upload complete')\n print('************************************')", "def test_old_expiration(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('username', 'keyid', 'Active', created, last_used)\n key.audit(10, 11, 10, 8)\n assert key.audit_state == 'expire'", "def expiry(self):\n return time() + self.ttl * (0.95 + 0.1 * random())" ]
[ "0.70659494", "0.6699331", "0.6606719", "0.65539116", "0.58606344", "0.5722448", "0.5579716", "0.5559856", "0.5538106", "0.5445731", "0.5444631", "0.54434264", "0.54030365", "0.5382592", "0.53746575", "0.53511035", "0.53469026", "0.53101665", "0.5309392", "0.5298697", "0.5277794", "0.52463275", "0.52454925", "0.5243784", "0.52329767", "0.5169961", "0.51501995", "0.51417065", "0.51326084", "0.5132004", "0.51300377", "0.5117234", "0.50890386", "0.50602466", "0.5047266", "0.504477", "0.50293213", "0.5025823", "0.50243187", "0.5008717", "0.5002763", "0.49992606", "0.49947345", "0.49943987", "0.49935368", "0.49922255", "0.49855652", "0.4979921", "0.4977923", "0.49751213", "0.49719912", "0.4952498", "0.4941649", "0.49346083", "0.49167225", "0.49139863", "0.49120173", "0.49064875", "0.4902187", "0.48968253", "0.48934466", "0.4880985", "0.48779798", "0.48765963", "0.48646754", "0.48507515", "0.48392504", "0.48371515", "0.4836855", "0.48353156", "0.4834704", "0.48174986", "0.48166677", "0.48112056", "0.48105973", "0.48103142", "0.4800744", "0.48002654", "0.47996604", "0.47969747", "0.47947797", "0.47926125", "0.47904825", "0.47883013", "0.47877073", "0.4787422", "0.47814512", "0.47799486", "0.47783202", "0.47762766", "0.47722486", "0.47662437", "0.47646868", "0.47642818", "0.47586474", "0.47518018", "0.4750901", "0.47475398", "0.47474465", "0.47447735" ]
0.71103334
0
Use active_ids from the context to fetch the leads
def default_get(self, cr, uid, fields, context=None): if context is None: context = {} record_ids = context.get('active_ids', False) res = super(crm_lead_stage, self).default_get(cr, uid, fields, context=context) if record_ids: opp_ids = [] opps = self.pool.get('crm.lead').browse(cr, uid, record_ids, context=context) for opp in opps: opp_ids.append(opp.id) if 'lead_ids' in fields: res.update({'lead_ids': opp_ids}) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def queryset(self, request):\n qs = super(AdRepLeadAdmin, self).queryset(request)\n qs = AdRepLead.objects.select_related().filter(id__in=qs\n ).defer('site__envelope',\n 'site__geom',\n 'site__point')\n return qs", "def get_locations_by_ids(self, id_list):", "def prepare_related_bulletins(self, object):\n roles = ActorRole.objects.filter(\n actor=object.id).filter(bulletin__isnull=False)\n\n related_bulletins = [\n '/api/v1/bulletin/{0}/'.format(b.id)\n for ar in roles\n for b in ar.bulletin_set.all()\n ]\n\n return related_bulletins", "def _get_ads(self, params):\n return self._api.account.get_ads(params={**params, **self._state_filter()}, fields=[self.state_pk])", "def leads(self):\n from hubspot3.leads import LeadsClient\n\n return LeadsClient(**self.auth, **self.options)", "def get_activities():\n pass", "def get_all(self, *ids):", "def get_activity_list(self):\n return self._request_activity_list(self.athlete)", "def queryset(self, request):\n qs = super(AdRepAdmin, self).queryset(request)\n qs = AdRep.objects.select_related().filter(id__in=qs\n ).defer('site__envelope',\n 'site__geom',\n 'site__point')\n return qs", "def queryset(self, request):\n qs = super(AdRepConsumerAdmin, self).queryset(request)\n qs = AdRepConsumer.objects.select_related().filter(id__in=qs\n ).defer('consumer__site__envelope',\n 'consumer__site__geom',\n 'consumer__site__point')\n return qs", "def queryset(self, request):\n qs = super(AdRepAdvertiserAdmin, self).queryset(request)\n qs = AdRepAdvertiser.objects.select_related().filter(id__in=qs\n ).defer('advertiser__site__envelope',\n 'advertiser__site__geom',\n 'advertiser__site__point')\n return qs", "def fetchById(accountIdList):\n accounts= []\n url = accountsConfig['domain']\n for accId in accountIdList:\n r = requests.get(url +'/'+ str(accId), headers=accountsConfig['headers']).json()\n accounts.append(r)\n return accounts", "def lead_list(request):\n if request.method == 'GET':\n snippets = Lead.objects.all()\n serializer = LeadSerializer(snippets, many=True)\n return JSONResponse(serializer.data)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = LeadSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data, status=201)\n return JSONResponse(serializer.errors, status=400)", "def queryset(self, request):\n qs = super(TwitterAccountAdmin, self).queryset(request)\n qs = TwitterAccount.objects.select_related().filter(id__in=qs\n ).defer('site__envelope', 'site__geom', 'site__point')\n return qs", "def queryset(self, request):\n qs = super(AdRepSiteAdmin, self).queryset(request)\n qs = AdRepSite.objects.select_related().filter(\n id__in=qs).defer('site__envelope', 'site__geom', 'site__point')\n return qs", "def fetch_from_db(self):\n self._potential_deals = DBApi.get_instance().potential_records\n self._filters = DBApi.get_instance().filters\n # Add markdown for url\n for data in self._potential_deals:\n data[\"url\"] = f\"[Link]({data['url']})\"\n self._potential_deals_cols = self._db_api.get_potential_deal_columns()\n self._years = self._db_api.get_unique_years(self._potential_deals)\n self._make_model = self._db_api.get_all_make_models()\n self._action_options = [\"Action1\", \"Action2\", \"Action3\"]", "def get_deals_list(self, session) -> List:\n\n deals = session.query(\n Deals.id,\n Deals.linkedin,\n Deals.leadgen_id\n ).all()\n\n return deals", "def default_get(self, cr, uid, fields, context=None):\n if context is None:\n context = {}\n\n exchang_obj = self.pool.get('exchange.order')\n res ={}\n exchang_ids = context.get('active_ids', [])\n if not exchang_ids:\n return res\n\n result = []\n for req in exchang_obj.browse(cr, uid, exchang_ids, context=context):\n for product in req.order_line:\n result.append(self.__create_products(product))\n res.update({'products_ids': result})\n if 'current_date' in fields:\n res.update({'current_date': time.strftime('%Y-%m-%d %H:%M:%S')})\n return res", "def get_queryset(self):\n qs = super(JobActiveMixin, self).get_queryset()\n return qs.actives()", "def list(self,request,*args,**kwargs):\n response=super(ListAPIView,self).list(request,*args,**kwargs)\n #add applied_filters to the response which is set when filter_queryset method is called\n response=self.addAppliedFilters(response)\n #fetch data from the related views\n return self.fetch_related(request,response,*args,**kwargs)", "def _links_get(self, cr, uid, context=None):\n obj = self.pool.get('res.request.link')\n ids = obj.search(cr, uid, [])\n res = obj.read(cr, uid, ids, ['object', 'name'], context)\n return [(r['object'], r['name']) for r in res]", "def get_activities(cls):\n objs = cls.objects\n return objs", "def list(self, request):\n\n records = filter_against_records(request)\n \n if 'faculty_id' in request.query_params:\n faculty = Faculties.objects.filter(id=request.query_params.get('faculty_id'))[0]\n departments = Departments.objects.filter(faculty_id=model_to_dict(faculty)['id'])\n for department in departments:\n education_programs = EducationPrograms.objects.filter(main_department_id=model_to_dict(department)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'department_id' in request.query_params:\n department = Departments.objects.filter(id=request.query_params.get('department_id'))[0]\n education_programs = EducationPrograms.objects.filter(main_department_id=model_to_dict(department)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'start_year_id' in request.query_params:\n start_year = StartYears.objects.filter(id=request.query_params.get('start_year_id'))[0]\n education_programs = EducationPrograms.objects.filter(start_year_id=model_to_dict(start_year)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'specialization_id' in request.query_params:\n specialization = Specializations.objects.filter(id=request.query_params.get('specialization_id'))[0]\n education_programs = EducationPrograms.objects.filter(specialization_id=model_to_dict(specialization)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'education_level_id' in request.query_params:\n education_level = EducationLevels.objects.filter(id=request.query_params.get('education_level_id'))[0]\n education_programs = EducationPrograms.objects.filter(education_level_id=model_to_dict(education_level)['id'])\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'education_program_id' in request.query_params:\n education_program = EducationPrograms.objects.filter(id=request.query_params.get('education_program_id'))[0]\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'student_id' in request.query_params:\n records = records.filter(student_id=request.query_params.get('student_id'))\n\n \n\n \n \n \n students = Students.objects.all()\n res = []\n for student in students:\n student_records = records.filter(student_id=model_to_dict(student)['id'])\n if len(student_records) > 0:\n res.append(student)\n\n return Response(normalize_students(res))", "def get_queryset(self):\r\n if self.request.user.is_authenticated:\r\n activitystreams = ActivityStream.objects.all().order_by('created').reverse()\r\n follows = Follow.objects.filter(source=self.request.user).all()\r\n from subscription.models import Subscription\r\n subscriptions = Subscription.objects.filter(user=self.request.user).all()\r\n subscribed_communities = []\r\n to_be_filtered = []\r\n follow_targets = []\r\n for subscription in subscriptions:\r\n subscribed_communities.append(str(subscription.community.id))\r\n for follow in follows:\r\n follow_targets.append(str(follow.target.id))\r\n for activitystream in activitystreams:\r\n json_data = json.loads(activitystream.data)\r\n if \"actor\" in json_data:\r\n json_actor = json_data['actor']\r\n if \"http://\" + SERVER_ADDRESS + \"/users/view/\" in json_actor:\r\n json_actor = json_actor.replace(\"http://\" + SERVER_ADDRESS + \"/users/view/\", \"\")\r\n if str(json_actor) in follow_targets:\r\n to_be_filtered.append(activitystream.id)\r\n if \"target\" in json_data:\r\n json_target = json_data['target']\r\n if \"http://\" + SERVER_ADDRESS + \"/users/view/\" in json_target:\r\n json_target = json_target.replace(\"http://\" + SERVER_ADDRESS + \"/users/view/\", \"\")\r\n if str(json_target) == str(self.request.user.id):\r\n to_be_filtered.append(activitystream.id)\r\n if \"http://\" + SERVER_ADDRESS + \"/communities/\" in json_target:\r\n json_target = json_target.replace(\"http://\" + SERVER_ADDRESS + \"/communities/\", \"\")\r\n if json_target in subscribed_communities:\r\n to_be_filtered.append(activitystream.id)\r\n activitystreams = activitystreams.filter(id__in=to_be_filtered)\r\n return activitystreams\r\n else:\r\n return None", "def get_queryset(self):\n return Participant.active.all()", "def fetch_activities(access_token):\n\n headers = {\"Authorization\": \"Bearer \" + access_token}\n\n # Fetch list of athlete's activities\n activities = []\n page = 1\n while True:\n params = {\"per_page\": MAX_ACTIVITIES_PER_PAGE, \"page\": page}\n r = requests.get(API_URL + \"/athlete/activities\", headers=headers, params=params)\n new_activities = r.json()\n\n if \"errors\" in new_activities:\n raise AuthError(new_activities[\"message\"])\n activities.extend(new_activities)\n\n # Continue fetching activities if necessary\n if len(new_activities) == MAX_ACTIVITIES_PER_PAGE:\n page += 1\n else:\n break\n\n return activities", "async def get_contacts_for_active_monitor(dbcon: DBConnection, monitor_id: int) -> Iterable[object_models.Contact]:\n q = \"\"\"select\n contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active\n from active_monitor_contacts, contacts\n where active_monitor_contacts.active_monitor_id = %s\n and active_monitor_contacts.contact_id = contacts.id\"\"\"\n contacts = [object_models.Contact(*row) for row in await dbcon.fetch_all(q, (monitor_id,))]\n return contacts", "def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def browse_w_order(self, cr, uid, ids, context=None, list_class=None, fields_process={}):\n res = self.browse(cr, uid, ids, context, list_class, fields_process)\n resultat = []\n for id in ids:\n resultat += [x for x in res if x.id == id]\n return resultat", "def _get_listings(self):\n listing_ids = self.context.listing_ids\n if len(listing_ids) == 0:\n return\n listing_ids = [lid.lower() for lid in listing_ids]\n params = {\n 'limit': 0,\n 'offset': 0,\n 'lang': self.portal_state.language(),\n }\n params.update({\n 'listing_ids': listing_ids,\n })\n params = prepare_search_params(params)\n results = search(params, batching=False, context=self.context)\n if results is None or len(results) == 0:\n return\n\n # sort the results based on the listing_ids\n results = [(item['id']['value'], item) for item in results]\n results = dict(results)\n return [results.get(id) for id in listing_ids if id in results]", "def get_prefetched_queryset(self, *args, **kwargs):\n\n return (\n super()\n .get_prefetched_queryset(*args, **kwargs)\n .prefetch_related(\n \"assignment_related_users\",\n \"agenda_items\",\n \"lists_of_speakers\",\n \"tags\",\n \"attachments\",\n \"polls\",\n \"polls__options\",\n )\n )", "def get_invited_polls(self):\n\n invited_polls = []\n for poll_user in PollUser.objects.filter(user=self):\n invited_polls.append(poll_user.poll)\n\n return invited_polls", "def get_queryset(self):\n return self.request.user.contacts.all()", "def get_ads():\n return coll_ad.distinct(KEY_AD_ID)", "def get_translated_ids(id):", "def prepare_related_incidents(self, object):\n roles = ActorRole.objects.filter(\n actor=object.id).filter(incident__isnull=False)\n\n related_incidents = [\n '/api/v1/incident/{0}/'.format(b.id)\n for ar in roles\n for b in ar.incident_set.all()\n ]\n\n return related_incidents", "def test_in_list(self):\n\n # get available ids\n ids = list(DQ(\"(b.id) Book b\").tuples())\n ids = [id[0] for id in ids]\n\n # take just three of them\n c = {\"ids\": ids[:3]}\n dq = DQ(\"(b.id, b.name) Book{b.id in '$(ids)'} b\")\n r = list(dq.context(c).dicts())\n\n # make sure we got three of them\n self.assertEqual(len(r), 3)", "def get_ids(self) -> List[str]:", "def queryset(self, request):\n qs = super(AdRepOrderAdmin, self).queryset(request)\n qs = AdRepOrder.objects.select_related().filter(id__in=qs\n ).defer('ad_rep__site__envelope',\n 'ad_rep__site__geom',\n 'ad_rep__site__point')\n return qs", "def get_incidents(self) -> tuple[list[Any], Any, Any | None]:\n timestamp = None\n fetch_limit = arg_to_number(self.fetch_limit)\n fetch_time = self.fetch_time\n if not fetch_limit or not fetch_time:\n raise DemistoException('Missing parameter - fetch limit or fetch time')\n last_run = demisto.getLastRun()\n if last_run and last_run.get('timestamp'):\n timestamp = last_run.get('timestamp', '')\n last_fetched_ids = last_run.get('last_fetched_ids', [])\n else:\n if last_fetch := arg_to_datetime(fetch_time, required=True):\n # convert to ISO 8601 format and add Z suffix\n timestamp = last_fetch.strftime(DATE_FORMAT)\n last_fetched_ids = []\n\n page_size = '100'\n # set the until argument to prevent duplicates\n until = get_now_time()\n response = self.list_incidents_request(page_size, '0', until, timestamp)\n if not response.get('items'):\n return [], last_fetched_ids, timestamp\n\n page_number = response.get('totalPages', 1) - 1\n total = 0\n total_items: list[dict] = []\n while total < fetch_limit and page_number >= 0:\n try:\n response = self.list_incidents_request(page_size, page_number, until, timestamp)\n except HTTPError as e:\n if e.response is not None and e.response.status_code == 429:\n raise DemistoException(\n 'Too many requests, try later or reduce the number of Fetch Limit parameter.'\n ) from e\n raise e\n\n items = response.get('items', [])\n new_items = remove_duplicates_for_fetch(items, last_fetched_ids)\n # items order is from old to new , add new items at the start of list to maintain order\n total_items = new_items + total_items\n total += len(new_items)\n page_number -= 1\n\n # bring the last 'fetch_limit' items, as order is reversed\n total_items = total_items[len(total_items) - fetch_limit:]\n return total_items, last_fetched_ids, timestamp", "def banner_detail(request, banner_id):\n banner = Banner.objects.get(id=banner_id)\n is_current_banner = True if banner in get_current_banners() else False\n\n tag_strs = request.GET.get('tags', '').split(',')\n tag_strs = [t for t in tag_strs if t != u'']\n tag_ids = [tag.id for tag in Tag.objects.filter(slug__in=tag_strs)]\n page_num = request.GET.get('page_num')\n\n ideas = Idea.objects.related_with_counts().filter(\n banner=banner,\n state=State.objects.get(name='Active')\n ).order_by('-time')\n\n # Tag Filter\n for tag_id in tag_ids:\n ideas = ideas.filter(tags__pk=tag_id).distinct()\n\n IDEAS_PER_PAGE = getattr(settings, 'IDEAS_PER_PAGE', 10)\n pager = Paginator(ideas, IDEAS_PER_PAGE)\n # Boiler plate paging -- @todo abstract this\n try:\n page = pager.page(page_num)\n except PageNotAnInteger:\n page = pager.page(1)\n except EmptyPage:\n page = pager.page(pager.num_pages)\n\n # List of tags that are associated with an idea in the banner list\n tags = Tag.objects.filter(\n taggit_taggeditem_items__content_type__name='idea',\n taggit_taggeditem_items__object_id__in=ideas\n ).annotate(count=Count('taggit_taggeditem_items')\n ).order_by('-count', 'name')[:25]\n\n for tag in tags:\n if tag.slug in tag_strs:\n tag_slugs = \",\".join([s for s in tag_strs if s != tag.slug])\n tag.active = True\n else:\n tag_slugs = \",\".join(tag_strs + [tag.slug])\n tag.active = False\n if tag_strs == [tag.slug]:\n tag.tag_url = \"%s\" % (reverse('idea:banner_detail',\n args=(banner_id,)))\n else:\n tag.tag_url = \"%s?tags=%s\" % (reverse('idea:banner_detail',\n args=(banner_id,)),\n tag_slugs)\n\n return _render(request, 'idea/banner_detail.html', {\n 'ideas': page,\n 'tags': tags, # list of tags associated with banner ideas\n 'banner': banner,\n 'is_current_banner': is_current_banner,\n })", "def toggle_active(self):\n res = super().toggle_active()\n Product = self.env['lunch.product'].with_context(active_test=False)\n all_products = Product.search([('supplier_id', 'in', self.ids)])\n all_products._sync_active_from_related()\n return res", "def _query_get(self, cr, uid, obj='l', context=None):\n \n fiscalyear_obj = self.pool.get('account.fiscalyear')\n fiscalperiod_obj = self.pool.get('account.period')\n account_obj = self.pool.get('account.account')\n journal_obj = self.pool.get('account.journal')\n initial_bal = context.get('initial_bal', False)\n fiscalyear_ids = []\n if context is None:\n context = {}\n #Only Valid Move Lines (BALANCE MOVES)\n query = obj+\".state <> 'draft' \"\n #Filter by Company\n if context.get('company_id', False):\n query += \" AND \" +obj+\".company_id = %s\" % context['company_id']\n #Filter by Move State\n if context.get('state', False):\n if type(context['state']) in (list,tuple) :\n query += \" AND \"+obj+\".move_id IN (SELECT id FROM account_move WHERE state !='reversed') \" \n # query += \" AND \"+obj+\".move_id IN (SELECT id FROM account_move WHERE state IN (\"+st+\")) \"\n elif context['state'].lower() != 'all':\n query += \" AND \"+obj+\".move_id IN (SELECT id FROM account_move WHERE account_move.state != '\"+context['state']+\"') \"\n #Get Selected FiscalYear\n if not context.get('fiscalyear', False):\n if context.get('all_fiscalyear', False):\n fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])\n else:\n if context.get('date_from', False):\n #fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])\n date_from=context.get('date_from', False)\n date_from2 = datetime.strptime( date_from, '%Y-%m-%d')\n f_code=date_from2.year \n fiscalyear_ids = fiscalyear_obj.search(cr,uid, [ ('code', '=', f_code)])\n else:\n fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])\n \n else:\n #make the context['fiscalyear'] in one dimention list or ids\n fiscalyear_ids = type(context['fiscalyear']) is list and context['fiscalyear'] or [context['fiscalyear']]\n fiscalyear_clause = (','.join(map(str, fiscalyear_ids)))\n #Duration Filters\n\n if context.get('date_from', False) and context.get('date_to', False):\n \n if initial_bal:\n \n init_period = fiscalperiod_obj.search(cr, uid, [('special', '=', True), ('fiscalyear_id', 'in', fiscalyear_ids)])\n date_start = fiscalperiod_obj.browse(cr, uid, init_period[0], context=context).date_start\n \n query += \" AND \"+obj+\".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) ) \" % (fiscalyear_clause,)\n\n date_from=context['date_from']\n if context.get('date_from', False)==date_start:\n date_1 = datetime.strptime(date_from, DEFAULT_SERVER_DATE_FORMAT)\n date_from= date_1+timedelta(days=1)\n query += \" AND \" +obj+\".move_id IN (SELECT id FROM account_move WHERE date <='%s') \" %(context['date_from'],)\n query += \" AND \" +obj+\".move_id IN (SELECT id FROM account_move WHERE date <'%s') \" %(date_from,)\n\n else:\n if context['type']=='statement':\n \n query += \" AND \" +obj+\".move_id IN (SELECT id FROM account_move WHERE date >= '%s' AND date <= '%s') \"%(context['date_from'],context['date_to']) \n elif context['type']=='balance':\n init_period = fiscalperiod_obj.search(cr, uid, [('special', '=', True), ('fiscalyear_id', 'in', fiscalyear_ids)])\n\n date_start = fiscalperiod_obj.browse(cr, uid, init_period[0], context=context).date_start\n date_from=context['date_from']\n if context.get('date_from', False)==date_start:\n date_1 = datetime.strptime(date_from, DEFAULT_SERVER_DATE_FORMAT)\n date_from= date_1+timedelta(days=1)\n query += \" AND \" +obj+\".move_id IN (SELECT id FROM account_move WHERE date > '%s' AND date <= '%s') \"%(date_from,context['date_to']) \n query += \" AND \" +obj+\".move_id IN (SELECT id FROM account_move WHERE date >= '%s' AND date <= '%s') \"%(context['date_from'],context['date_to']) \n if context.get('period_from', False) and context.get('period_to', False) and not context.get('periods', False) and context.get('type', False)!='statement':\n if initial_bal:\n period_company_id = fiscalperiod_obj.browse(cr, uid, context['period_from'], context=context).company_id.id\n first_period = fiscalperiod_obj.search(cr, uid, [('company_id', '=', period_company_id), ('fiscalyear_id', 'in', fiscalyear_ids)], order='date_start')\n context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, first_period[0], first_period[first_period.index(context['period_from'])-1])\n else:\n context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, context['period_from'], context['period_to'])\n\n if context.get('periods', False) and context.get('type', False)!='statement':\n period_ids = ','.join(map(str, context['periods']))\n query += \" AND \"+obj+\".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND id IN (%s)) \" % (fiscalyear_clause, period_ids)\n else:\n sub_query = \"\"\n if not context.get('date_from', False) or context.get('period_from', False):\n special = initial_bal and (not context.get('date_from', False))\n sub_query = \"AND special = %s\"%(special,)\n query += \" AND \"+obj+\".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) %s) \" % (fiscalyear_clause, sub_query)\n\n #Filter by Journal\n #situation_journal = set(journal_obj.search(cr, uid, [('type', '=', 'situation')], context=context))\n #selected_journals = set(context.get('journal_ids', False) or journal_obj.search(cr, uid, [], context=context))\n #TEST: situation journal when opening balance & not\n #journal_ids = context.get('selected_journals', False) and selected_journals or \\\n # (initial_bal and list(selected_journals | situation_journal) or list(selected_journals-situation_journal))\n # if journal_ids:\n # query += ' AND '+obj+'.journal_id IN (%s) ' % ','.join(map(str, journal_ids))\n #if not context.get('selected_journals', False) and not initial_bal and situation_journal:\n #query += ' AND '+obj+'.journal_id NOT IN (%s) ' % ','.join(map(str, situation_journal))\n #Filter by chart of Account\n if context.get('chart_account_id', False):\n child_ids = account_obj._get_children_and_consol(cr, uid, [context['chart_account_id']], context=context)\n query += ' AND '+obj+'.account_id IN (%s) ' % ','.join(map(str, child_ids))\n #Filter by Move Line Statement\n if 'statement_id' in context:\n if context.get('statement_id', False):\n query += ' AND '+obj+'.statement_id IN (%s) ' % ','.join(map(str, context['statement_id']))\n else:\n query += ' AND '+obj+'.statement_id IS NULL '\n #Filter by Move Line\n if context.get('move_line_ids', False):\n query += ' AND '+obj+'.id IN (%s) ' % ','.join(map(str, context['move_line_ids']))\n #Filter by Analytic Account Type\n if context.get('analytic_display', False):\n query += ' AND '+obj+\".analytic_account_id IN (SELECT id FROM account_analytic_account WHERE analytic_type=%s) \" % (context.get('analytic_display', False).id,)\n\n return query", "def _get_opportunities_data(self, cr, uid, ids, field_name, arg, context=None):\r\n obj = self.pool.get('crm.lead')\r\n res = dict.fromkeys(ids, False)\r\n month_begin = date.today().replace(day=1)\r\n date_begin = month_begin - relativedelta.relativedelta(months=self._period_number - 1)\r\n date_end = month_begin.replace(day=calendar.monthrange(month_begin.year, month_begin.month)[1])\r\n lead_pre_domain = [('create_date', '>=', date_begin.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)),\r\n ('create_date', '<=', date_end.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)),\r\n ('type', '=', 'lead')]\r\n opp_pre_domain = [('date_deadline', '>=', date_begin.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)),\r\n ('date_deadline', '<=', date_end.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)),\r\n ('type', '=', 'opportunity')]\r\n for id in ids:\r\n res[id] = dict()\r\n lead_domain = lead_pre_domain + [('section_id', '=', id)]\r\n opp_domain = opp_pre_domain + [('section_id', '=', id)]\r\n res[id]['monthly_open_leads'] = json.dumps(self.__get_bar_values(cr, uid, obj, lead_domain, ['create_date'], 'create_date_count', 'create_date', context=context))\r\n res[id]['monthly_planned_revenue'] = json.dumps(self.__get_bar_values(cr, uid, obj, opp_domain, ['planned_revenue', 'date_deadline'], 'planned_revenue', 'date_deadline', context=context))\r\n return res", "def get_context_data(self, **kwargs):\n\n context = super(EntitiesView, self).get_context_data(**kwargs)\n\n # Get entities\n context['entities'] = get_entities_list(\n self.request.session.get('token', False),\n self.kwargs.get('aiid')\n ).get('entities')\n\n context['allow_regex'] = get_experiments_list(\n self.request.session.get('token', False),\n self.kwargs.get('aiid', False),\n 'regex-entity'\n ).get('state')\n\n return context", "def get_queryset(self):\n return super(ActiveUsersManager, self).get_queryset().filter(user__is_active=True)", "def get_objects(self,ids):\n return model.objects.filter(pk__in=ids).order_by(search_field)", "def get_ids(self):\n page = r.get(self.url)\n tree = html.fromstring(page.content)\n ids_elements = tree.xpath(\"//div[@id='selectedcontent']/div/ul/li/a\")\n return [self._e_to_id(e) for e in ids_elements]", "def join_ids(self) -> List[int]:\n return self._join_ids", "def list(self, request):\n # Get all Post records from the database\n posts = Post.objects.all()\n \n \n\n # Support filtering Posts by type\n # http://localhost:8000/Posts?type=1\n #\n # That URL will retrieve all tabletop Posts\n \n\n category = self.request.query_params.get('category', None)\n if category is not None:\n posts = posts.filter(category__id=category)\n \n user = RareUser.objects.get(user=request.auth.user)\n active = self.request.query_params.get('active', None)\n my_subscriptions=Subscription.objects.filter(follower_id=user.id)\n # print(my_subscriptions)\n \n if active is not None:\n print(\"my post navbar is being clicked\")\n # 1)get the posts where the user on the post equals the id on the user\n\n # 2)get the subscriptions where the follower on the subscription equals the id on the user\n # 3)get the posts where the user on the post equals the author in the subscription\n\n home_page_posts=[]\n\n followed_users=RareUser.objects.filter(rareusers_author__follower=user)\n for author in followed_users:\n subscribed_post=list(posts.filter(user=author))\n home_page_posts=home_page_posts+subscribed_post\n\n only_my_posts = list(posts.filter(user__id=user.id))\n home_page_posts=home_page_posts+only_my_posts\n\n # for subscription in my_subscriptions:\n \n # subscribed_post=posts.filter(user__id=subscription.author_id)\n # # my_list.append(subscribed_post)\n # # print(subscribed_post)\n # # my_list.append(only_my_posts)\n \n posts=home_page_posts\n \n \n users = self.request.query_params.get('user', None)\n if users is not None:\n \n posts = posts.filter(user__id=user)\n \n\n title = self.request.query_params.get('title', None)\n if title is not None:\n posts = posts.filter(title__contains=title)\n\n # subscribers=Subscription.objects.filter(follower=user.id)\n # for subscriber in subscribers:\n # subscriptionPosts=posts.filter(user=subscriber.author)\n # posts.append(subscriptionPosts)\n\n for post in posts:\n if post.user == user:\n post.my_post =True\n else:\n post.my_post =False\n\n \n\n serializer = PostSerializer(\n posts, many=True, context={'request': request})\n\n return Response(serializer.data)", "def get_queryset(self):\n\n userteammates = TeamMate.objects.filter(user=self.request.user)\n teams = []\n for teammateobject in userteammates:\n teams.append(teammateobject.team)\n\n\n #return Vote.objects.filter(team__in=teams)\n return Vote.objects.filter(choice__poll=1)", "def test_get_activities(self):\n pass", "def get_prefetched_queryset(self, *args, **kwargs):\n return (\n super()\n .get_prefetched_queryset(*args, **kwargs)\n .select_related(\"user\", \"option\", \"option__poll\")\n )", "def get_context_data(self):\n calls = Call.objects.all()\n return {\"calls\": reversed(calls)}", "def get(self):\r\n context = DataContext()\r\n build = linkBuilder(request.base_url)\r\n return [build(x) for x in context]", "def send_email_to_assigned_user(recipients, lead_id, domain='demo.django-crm.io', protocol='http', source=''):\n lead = Lead.objects.get(id=lead_id)\n created_by = lead.created_by\n blocked_domains = BlockedDomain.objects.values_list('domain', flat=True)\n blocked_emails = BlockedEmail.objects.values_list('email', flat=True)\n for user in recipients:\n recipients_list = []\n user = User.objects.filter(id=user, is_active=True).first()\n if user:\n if (user.email not in blocked_emails) and (user.email.split('@')[-1] not in blocked_domains):\n recipients_list.append(user.email)\n context = {}\n context[\"url\"] = protocol + '://' + domain + \\\n reverse('leads:view_lead', args=(lead.id,))\n context[\"user\"] = user\n context[\"lead\"] = lead\n context[\"created_by\"] = created_by\n context[\"source\"] = source\n subject = 'Assigned a lead for you. '\n html_content = render_to_string(\n 'assigned_to/leads_assigned.html', context=context)\n msg = EmailMessage(\n subject,\n html_content,\n to=recipients_list\n )\n msg.content_subtype = \"html\"\n msg.send()", "def candidates_retrieve_for_api(office_id, office_we_vote_id):\n # NOTE: Candidates retrieve is independent of *who* wants to see the data. Candidates retrieve never triggers\n # a ballot data lookup from Google Civic, like voterBallotItems does\n\n if not positive_value_exists(office_id) and not positive_value_exists(office_we_vote_id):\n status = 'VALID_OFFICE_ID_AND_OFFICE_WE_VOTE_ID_MISSING'\n json_data = {\n 'status': status,\n 'success': False,\n 'office_id': office_id,\n 'office_we_vote_id': office_we_vote_id,\n 'google_civic_election_id': 0,\n 'candidate_list': [],\n }\n return HttpResponse(json.dumps(json_data), content_type='application/json')\n\n candidate_list = []\n candidates_to_display = []\n google_civic_election_id = 0\n try:\n candidate_list_object = CandidateCampaignListManager()\n results = candidate_list_object.retrieve_all_candidates_for_office(office_id, office_we_vote_id)\n success = results['success']\n status = results['status']\n candidate_list = results['candidate_list']\n except Exception as e:\n status = 'FAILED candidates_retrieve. ' \\\n '{error} [type: {error_type}]'.format(error=e, error_type=type(e))\n handle_exception(e, logger=logger, exception_message=status)\n success = False\n\n if success:\n # Reset office_we_vote_id and office_id so we are sure that it matches what we pull from the database\n office_id = 0\n office_we_vote_id = ''\n for candidate in candidate_list:\n one_candidate = {\n 'id': candidate.id,\n 'we_vote_id': candidate.we_vote_id,\n 'ballot_item_display_name': candidate.display_candidate_name(),\n 'candidate_photo_url_large': candidate.we_vote_hosted_profile_image_url_large\n if positive_value_exists(candidate.we_vote_hosted_profile_image_url_large)\n else candidate.candidate_photo_url(),\n 'candidate_photo_url_medium': candidate.we_vote_hosted_profile_image_url_medium,\n 'candidate_photo_url_tiny': candidate.we_vote_hosted_profile_image_url_tiny,\n 'party': candidate.political_party_display(),\n 'order_on_ballot': candidate.order_on_ballot,\n 'kind_of_ballot_item': CANDIDATE,\n }\n candidates_to_display.append(one_candidate.copy())\n # Capture the office_we_vote_id and google_civic_election_id so we can return\n if not positive_value_exists(office_id) and candidate.contest_office_id:\n office_id = candidate.contest_office_id\n if not positive_value_exists(office_we_vote_id) and candidate.contest_office_we_vote_id:\n office_we_vote_id = candidate.contest_office_we_vote_id\n if not positive_value_exists(google_civic_election_id) and candidate.google_civic_election_id:\n google_civic_election_id = candidate.google_civic_election_id\n\n if len(candidates_to_display):\n status = 'CANDIDATES_RETRIEVED'\n else:\n status = 'NO_CANDIDATES_RETRIEVED'\n\n json_data = {\n 'status': status,\n 'success': True,\n 'office_id': office_id,\n 'office_we_vote_id': office_we_vote_id,\n 'google_civic_election_id': google_civic_election_id,\n 'candidate_list': candidates_to_display,\n }\n else:\n json_data = {\n 'status': status,\n 'success': False,\n 'office_id': office_id,\n 'office_we_vote_id': office_we_vote_id,\n 'google_civic_election_id': google_civic_election_id,\n 'candidate_list': [],\n }\n\n return HttpResponse(json.dumps(json_data), content_type='application/json')", "def get_prefetched_queryset(self, *args, **kwargs):\n return (\n super()\n .get_prefetched_queryset(*args, **kwargs)\n .select_related(\"user\", \"poll\")\n .prefetch_related(\"votes\")\n )", "def get_adcreatives(self, account_id, fields, batch=False):\n path = 'act_%s/adcreatives' % account_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)", "def show_activities(bucketlist_id):\n form = ActivityForm(request.form)\n if logged_in:\n\n # Check if buck has activities\n all_activities = Activity.activities\n buck_activities = {k:v for k, v in all_activities.items() if bucketlist_id==v['bucketlist_id']}\n if buck_activities:\n return render_template(\"show_activities.html\", form=form, bucketlist_id=bucketlist_id, data=buck_activities)\n\n # If buck ids do not match\n return render_template('show_activities.html', form=form, bucketlist_id=bucketlist_id)\n\n # If user is not logged in:\n return login_required()", "def test_intents(\n self, mock_get_ai_details, mock_get_ai,\n mock_get_entities_list, mock_get_intent_list, mock_get_categories\n ):\n\n # We mock API calls\n mock_get_ai.return_value = self.ai\n mock_get_ai_details.return_value = self.ai_details\n mock_get_entities_list.return_value.json.return_value = []\n\n mock_get_intent_list.return_value = {'intents': [\n {'intent_name': 'intent_1'},\n {'intent_name': 'intent_2'},\n {'intent_name': 'intent_3'},\n {'intent_name': 'intent_4'},\n {'intent_name': 'intent_5'},\n {'intent_name': 'intent_6'}\n ]}\n\n response = self.client.get(reverse(\n 'studio:intents',\n kwargs={\n 'aiid': self.ai['aiid']\n }\n ))\n\n self.assertContains(response, 'intent_1')\n self.assertContains(response, 'intent_2')\n self.assertContains(response, 'intent_3')\n self.assertContains(response, 'intent_4')\n self.assertContains(response, 'intent_5')\n self.assertContains(response, 'intent_6')", "def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n context['list_result'] = self.queryset\n get_student_ranking(context)\n get_college_ranking(context)\n return context", "def prefetchObjects(self, ids, alsoFetchLinkedObjects=True):\n _ = self.getObjectsByID(ids, alsoFetchLinkedObjects=alsoFetchLinkedObjects)", "def list(self, ids, merge_dict=None):\r\n objs = sql(self.db, self.table.refs.belongs(ids) & (self.table.auth_user == self.auth.user_id),\r\n *self.select_columns)\r\n ret = {self.resource_name: dict((x.pop('refs'), x) for x in objs)}\r\n if merge_dict:\r\n merge_dict.get('PA', {}).update(ret)\r\n return merge_dict\r\n return dict(PA=ret)", "def _fetch_placeholders_from_ids(self, obj):\n return Role.nested_object_traversal(\n obj, lambda x: self.placeholders[x.value], PlaceholderId\n )", "def external_ids(self, **kwargs):\n path = self._get_movie_id_path('external_ids')\n resp = self._get_method(path, kwargs)\n return resp", "def get_queryset(self):\n user = self.request.user\n collabLists = ListObject.objects.filter(collaborators__id=user.id)\n return collabLists", "def _get_listings(self):\n listing_ids = self.config.get('listing_ids', [])\n if len(listing_ids) == 0:\n return\n listing_ids = [lid.lower() for lid in listing_ids]\n params = {\n 'limit': 0,\n 'offset': 0,\n 'lang': self.portal_state.language(),\n }\n params.update(self.config)\n params['listing_ids'] = listing_ids\n params = prepare_search_params(params)\n results = search(params, batching=False, context=self.context)\n if results is None or len(results) == 0:\n return\n\n # sort the results based on the listing_ids\n results = [(item['id']['value'], item) for item in results]\n results = dict(results)\n self._listings = [\n results.get(id) for id in listing_ids if id in results\n ]", "async def get_in_active_users_async(self) -> tuple:\n users_list: dict_list_type = [user.to_dict() for user in UserModel.query(UserModel.is_active == False).fetch_async().get_result()]\n return jsonify({'status': True, 'payload': users_list, 'message': 'successfully retrieved active users'}), 200", "def get_active_contact(self):\n list_contact = Contact.objects.filter(phonebook__campaign=self.id,\n status=CONTACT_STATUS.ACTIVE).all()\n if not list_contact:\n return False\n return list_contact", "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n language_code = get_language()\n # Changed since T278337: add filter to queryset before we build the partners\n # dictionary\n partner_filtered_list = MainPartnerFilter(\n self.request.GET, queryset=self.get_queryset(), language_code=language_code\n )\n context[\"filter\"] = partner_filtered_list\n\n user = self.request.user\n if user.is_authenticated:\n user = User.objects.select_related(\"editor\").get(pk=self.request.user.pk)\n context[\"user\"] = user\n context[\"editor\"] = user.editor\n partners_list = []\n partner_search_list = []\n for partner in partner_filtered_list.qs:\n partner_dict = {}\n partner_dict[\"pk\"] = partner.pk\n partner_dict[\"company_name\"] = partner.company_name\n try:\n partner_dict[\"partner_logo\"] = partner.logos.logo.url\n except ObjectDoesNotExist:\n partner_dict[\"partner_logo\"] = None\n partner_dict[\"is_not_available\"] = partner.is_not_available\n partner_dict[\"is_waitlisted\"] = partner.is_waitlisted\n new_tags = partner.new_tags\n # Getting tags from locale files\n translated_tags = get_tag_names(language_code, new_tags)\n partner_dict[\"tags\"] = translated_tags\n partner_dict[\"languages\"] = partner.get_languages\n # Obtaining translated partner description\n partner_short_description_key = \"{pk}_short_description\".format(\n pk=partner.pk\n )\n partner_description_key = \"{pk}_description\".format(pk=partner.pk)\n partner_descriptions = get_partner_description(\n language_code, partner_short_description_key, partner_description_key\n )\n\n partner_dict[\"short_description\"] = partner_descriptions[\n \"short_description\"\n ]\n partner_dict[\"description\"] = partner_descriptions[\"description\"]\n partners_list.append(partner_dict)\n if partner_descriptions[\"description\"]:\n partner_desc = bleach.clean(\n partner_descriptions[\"description\"],\n tags=[],\n strip=True,\n )\n else:\n partner_desc = \"\"\n\n if partner_descriptions[\"short_description\"]:\n partner_short_desc = bleach.clean(\n partner_descriptions[\"short_description\"],\n tags=[],\n strip=True,\n )\n else:\n partner_short_desc = \"\"\n\n partner_search_list.append(\n {\n \"partner_pk\": partner.pk,\n \"partner_name\": partner.company_name,\n \"partner_short_description\": partner_short_desc,\n \"partner_description\": partner_desc,\n }\n )\n context[\"partners_list\"] = partners_list\n context[\"partner_search_list\"] = partner_search_list\n\n return context", "def serve_recos(ids, ref_catalog):\r\n desc_list = []\r\n for desc_id in ids:\r\n desc_list.append(ref_catalog[ref_catalog['id'] == desc_id].iloc[0]['description'])\r\n return desc_list", "def get_drip_campaigns(self):\n return list(DripCampaign.objects(user_id=self.user_id))", "def active_comics():\n # FUTURE: Should not include ended comics?\n return Comic.objects.exclude(active=False)", "def get_active_development_plan_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n current_development_plan = DevelopmentPlan.objects.filter(\n employee_relation=current_employee,\n employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!!\n\n if not current_employee:\n raise PermissionDenied()\n\n if current_development_plan:\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = current_development_plan.id\n dev_plan[\"deleted\"] = current_development_plan.deleted\n if current_development_plan.type:\n dev_plan[\"type\"] = current_development_plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = current_development_plan)\\\n .finished_at\n\n dev_plan[\"created_at\"] = current_development_plan.created_at\n dev_plan[\"created_by\"] = current_development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = current_development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = current_development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = current_development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = current_development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(current_development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = current_development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n print all_questions\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id,\n employee=current_employee).first()\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id,\n employee=current_employee).first()\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(current_development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n else:\n return JsonResponse(data={\"details\": \"The user with id={} doesn't have an active Development Plan\"\n .format(current_employee.user.id)}, status=404)", "def getActiveLoans(self,\n begin = None,\n end = None):\n\n raw = handleBeginEndCall(begin, end, self.api.returnActiveLoans)\n\n typeActiveLoanList = partial(ensure.listOf,\n ensurer=partial(ensure.dictOf, ensureRest=ensure.fail,\n ensurers={\n \"id\": ensure.typeInt,\n \"currency\": ensure.typeString,\n \"duration\": ensure.typeFloat,\n \"rate\": ensure.typeFloat,\n \"amount\": ensure.typeFloat,\n \"autoRenew\": ensure.typeInt,\n \"date\": typeDateTime,\n \"fees\": ensure.typeFloat}))\n\n return ensure.dictOf(raw=raw, path=\"activeLoans\",\n ensureRest=ensure.fail, ensurers={\n \"provided\": typeActiveLoanList,\n \"used\": typeActiveLoanList})", "def view_batch(request, id):\n\n bh_rec = BatchHeaders.objects.get(id=id)\n\n context = {\n 'count': bh_rec.total_count,\n 'amount': bh_rec.total_amount,\n 'status': bh_rec.status.text_code,\n 'due_date': bh_rec.due_date,\n 'created': bh_rec.created,\n 'batch_id': bh_rec.id,\n 'batch_ref': bh_rec.reference,\n 'success': request.GET.get('success'),\n 'sent': bh_rec.sent,\n 'uk_due_date': bh_rec.due_date.strftime('%d/%m/%Y'),\n 'uk_call_date': bh_rec.call_date.strftime('%d/%m/%Y'),\n 'funder_code': bh_rec.funder.funder_code\n }\n\n if bh_rec.status == OPEN:\n\n try:\n batch_lock = BatchLock.objects.filter(batch_header=bh_rec, released__isnull=True).order_by('-id')[0]\n except:\n batch_lock = False\n\n if batch_lock:\n context['batch_lock'] = batch_lock\n context['batch_lock_check'] = True\n else:\n batch_lock_session_id = str(uuid.uuid1())\n batch_lock = BatchLock(batch_header=bh_rec, user=request.user, session_id=batch_lock_session_id)\n batch_lock.save()\n\n context['batch_lock'] = batch_lock\n\n resync = False\n url = reverse('core_dd_drawdowns:view_batch', args=[id])\n\n if request.META.get('HTTP_REFERER'):\n if not re.search(url, request.META['HTTP_REFERER']):\n resync = True\n else:\n resync = True\n\n if resync:\n resync_drawdowns_with_dd_history(bh_rec.reference)\n\n else:\n context['history'] = True\n\n query = {\n 'batch_header': bh_rec\n }\n for k in ('agreement_id__contains', 'amount', 'ddi_status', 'status'):\n if request.GET.get(k):\n query[k] = request.GET[k]\n context['filter'] = query\n\n if query.get('ddi_status'):\n if query['ddi_status'] == 'No Setup':\n del(query['ddi_status'])\n query['dd_reference__isnull'] = True\n else:\n query['ddi_status'] = ncf_dd_status_text.objects.get(dd_text_description=query.get('ddi_status'))\n\n recs = DrawDown.objects.filter(**query)\n\n context['filtered_count'] = recs.count()\n\n paginator = Paginator(recs, 10)\n page = request.GET.get('page')\n try:\n pub = paginator.page(page)\n except PageNotAnInteger:\n pub = paginator.page(1)\n except EmptyPage:\n pub = paginator.page(paginator.num_pages)\n\n context['records'] = pub\n\n # TODO: GO PAF - Start\n context['go_funder1'] = go_funder.objects.get(funder_code='1')\n context['go_funder2'] = go_funder.objects.get(funder_code='2')\n context['go_funder3'] = go_funder.objects.get(funder_code='3')\n context['go_funder4'] = go_funder.objects.get(funder_code='4')\n context['go_funder5'] = go_funder.objects.get(funder_code='5')\n # TODO: GO PAF - End\n\n context.update({'forecast': forecast_prediction(bh_rec.reference)})\n\n if query.get('ddi_status'):\n query['ddi_status'] = '{}'.format(query['ddi_status'])\n\n context['query'] = query\n\n return render(request, 'batch_screen.html', context)", "def get_in_active_users(self) -> tuple:\n users_list: dict_list_type = [user.to_dict() for user in UserModel.query(UserModel.is_active == False).fetch()]\n return jsonify({'status': True, 'payload': users_list, 'message': 'successfully retrieved active users'}), 200", "def get_queryset(self):\n return self.queryset.filter(user=self.request.user, integration=integration_id)", "def get_lists(self):\n return [{\"id\": lst[\"list_id\"], \"name\": lst[\"name\"]}\n for lst in List.objects(user_id=self.user_id, active=True)]", "def resolve_entities(root, info, ids: list[int], **kwargs):\n return Entity.objects.filter(id__in=ids)", "def get_all_activities_list(self):\n self.__load_activities_from_file_into_memory()\n return super().get_all_activities_list()", "def get_user_active_list(self, user_id):\n return self.api.get_active_version_manager_by_user_id(user_id)", "def visible(self):\n return self.get_queryset().filter(\n record_status=self.model.ACTIVE, merged_with=None)", "def get(self, request, *args, **kwargs):\n my_normal_post_lists = NormalPosts.objects.filter(uploded_by=request.user.normalprofile).order_by(\"-id\")\n return render(request, self.template_name, {\n 'my_normal_post_lists': my_normal_post_lists,\n })", "def systray_get_activities(self):\n activities = super(Users, self).systray_get_activities()\n for activity in activities:\n if activity.get('model') == 'mailing.mailing':\n activities.remove(activity)\n query = \"\"\"SELECT m.mailing_type, count(*), act.res_model as model, act.res_id,\n CASE\n WHEN %(today)s::date - act.date_deadline::date = 0 Then 'today'\n WHEN %(today)s::date - act.date_deadline::date > 0 Then 'overdue'\n WHEN %(today)s::date - act.date_deadline::date < 0 Then 'planned'\n END AS states\n FROM mail_activity AS act\n JOIN mailing_mailing AS m ON act.res_id = m.id\n WHERE act.res_model = 'mailing.mailing' AND act.user_id = %(user_id)s \n GROUP BY m.mailing_type, states, act.res_model, act.res_id;\n \"\"\"\n self.env.cr.execute(query, {\n 'today': fields.Date.context_today(self),\n 'user_id': self.env.uid,\n })\n activity_data = self.env.cr.dictfetchall()\n \n user_activities = {}\n for act in activity_data:\n if not user_activities.get(act['mailing_type']):\n if act['mailing_type'] == 'sms':\n module = 'mass_mailing_sms'\n name = _('SMS Marketing')\n else:\n module = 'mass_mailing'\n name = _('Email Marketing')\n icon = module and modules.module.get_module_icon(module)\n res_ids = set()\n user_activities[act['mailing_type']] = {\n 'name': name,\n 'model': 'mailing.mailing',\n 'type': 'activity',\n 'icon': icon,\n 'total_count': 0, 'today_count': 0, 'overdue_count': 0, 'planned_count': 0,\n 'res_ids': res_ids,\n }\n user_activities[act['mailing_type']]['res_ids'].add(act['res_id'])\n user_activities[act['mailing_type']]['%s_count' % act['states']] += act['count']\n if act['states'] in ('today', 'overdue'):\n user_activities[act['mailing_type']]['total_count'] += act['count']\n\n for mailing_type in user_activities.keys():\n user_activities[mailing_type].update({\n 'actions': [{'icon': 'fa-clock-o', 'name': 'Summary',}],\n 'domain': json.dumps([['activity_ids.res_id', 'in', list(user_activities[mailing_type]['res_ids'])]])\n })\n activities.extend(list(user_activities.values()))\n break\n\n return activities", "async def autorole_list(self, ctx):\n roles = await self.bot.db.execute(\n \"SELECT role_id FROM autorole WHERE guild_id = %s\",\n ctx.guild.id,\n as_list=True,\n )\n content = discord.Embed(\n title=f\":scroll: Autoroles in {ctx.guild.name}\", color=int(\"ffd983\", 16)\n )\n rows = []\n for role_id in roles:\n rows.append(f\"<@&{role_id}> [`{role_id}`]\")\n\n if not rows:\n rows = [\"No roles have been set up yet!\"]\n\n await util.send_as_pages(ctx, content, rows)", "def get_activities(self, user_id=None, group_id=None, app_id=None,\n activity_id=None, start_index=0, count=0):\n raise NotImplementedError()", "def getUserActivities(context, request):\n mmdb = MADMaxDB(context.db)\n query = {}\n query['actor.username'] = request.actor['username']\n query['verb'] = 'post'\n chash = request.params.get('context', None)\n if chash:\n query['contexts.hash'] = chash\n\n is_head = request.method == 'HEAD'\n activities = mmdb.activity.search(query, sort=\"_id\", keep_private_fields=False, flatten=1, count=is_head, **searchParams(request))\n\n handler = JSONResourceRoot(activities, stats=is_head)\n return handler.buildResponse()", "def get_queryset(self):\n video_id = self.get_related_video_id()\n\n if video_id is None: # backward behavior for stand-alone site context\n return super().get_queryset().none()\n\n queryset = super().get_queryset().filter(video_id=video_id)\n\n if self.request.resource is not None: # Then we are in an LTI context\n queryset = self._get_lti_queryset(queryset)\n\n if (\n not self.request.resource and self.request.user and self.action == \"list\"\n ): # Then we are in stand-alone site context\n queryset = self._get_standalone_queryset(queryset)\n\n if self.action == \"list_attendances\":\n # we only want live sessions that are registered or with live_attendance not empty\n queryset = queryset.filter(\n Q(is_registered=True)\n | ~(Q(live_attendance__isnull=True) | Q(live_attendance__exact={}))\n )\n\n return queryset", "def active_users(self, *args, **kwargs):\r\n return self._get('ActiveUsers', *args, **kwargs)", "def list_percelen_adapter(obj, request):\n return {\n 'id': obj.id\n }", "def get_activities(self, activity_ids=None, max_records=50):\r\n return self.connection.get_all_activities(self, activity_ids, max_records)", "def fetch_video_list_ids_via_preflight (self, list_from=0, list_to=50):\n payload = {\n 'fromRow': list_from,\n 'toRow': list_to,\n 'opaqueImageExtension': 'jpg',\n 'transparentImageExtension': 'png',\n '_': int(time()),\n 'authURL': self.user_data['authURL']\n }\n\n response = self._session_get(component='video_list_ids', params=payload, type='api')\n return self._process_response(response=response, component=self._get_api_url_for(component='video_list_ids'))", "def plan_list_get(request):\n return list_by_company_guid(request, PlanModel)", "def on_connect(self):\n if current_user.is_authenticated:\n polls = Poll.query \\\n .filter(User.rooms.any(User.id == current_user.id)) \\\n .filter(or_(Poll.visible.is_(True), Room.owner_id == current_user.id)).all()\n else:\n if session.get(\"rooms\") is not None:\n polls = Poll.query \\\n .filter(Room.id.in_(session.get(\"rooms\"))) \\\n .filter(Poll.visible.is_(True)).all()\n else:\n polls = []\n\n for poll in polls:\n join_room(poll.id)", "def get_all_sales_ids_for_customer_ids():\n\n # your code" ]
[ "0.6057764", "0.52192557", "0.5219104", "0.50897825", "0.5034502", "0.5022137", "0.4999737", "0.49886566", "0.49597502", "0.4957497", "0.49499336", "0.49414816", "0.49347013", "0.489892", "0.4863161", "0.48234197", "0.4823024", "0.48136124", "0.481186", "0.47834936", "0.47805104", "0.4776128", "0.47657222", "0.47245422", "0.46960744", "0.46852517", "0.46827942", "0.46762693", "0.46762693", "0.46762693", "0.46762693", "0.46680117", "0.465237", "0.46488672", "0.4617659", "0.461558", "0.46139842", "0.459997", "0.4589027", "0.45839208", "0.4573639", "0.457147", "0.45671266", "0.45636365", "0.45562562", "0.4554957", "0.4548187", "0.45444953", "0.45399064", "0.45389327", "0.4537893", "0.45318225", "0.4529455", "0.45271006", "0.45240393", "0.45082757", "0.4497998", "0.44924277", "0.4473781", "0.44630134", "0.44603854", "0.4459361", "0.4458521", "0.4453736", "0.44527102", "0.4446501", "0.44412082", "0.44274247", "0.44209823", "0.44173467", "0.44128957", "0.4403729", "0.43945527", "0.43933132", "0.4393024", "0.43890673", "0.43847972", "0.43815556", "0.437899", "0.43781674", "0.4377798", "0.43652064", "0.43611714", "0.4361118", "0.43562657", "0.4353474", "0.43496227", "0.43488166", "0.43448853", "0.43346524", "0.4330084", "0.43261623", "0.43260196", "0.43221858", "0.43195", "0.43146113", "0.4309163", "0.4308891", "0.43050364", "0.43048948" ]
0.69877815
0
Use lead_ids from the wizard and set to new stage
def action_multi_lead_stage(self, cr, uid, ids, context=None): if context is None: context = {} wizard = self.browse(cr, uid, ids[0], context=context) lead_ids = wizard.lead_ids if lead_ids: for lead in lead_ids: self.pool.get('crm.lead').write(cr, uid, [lead.id], {'stage_id':wizard.stage_id.id},context) return {'type': 'ir.actions.act_window_close'}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _onchange_stage_id_values(self, stage_id):\n if not stage_id:\n return {}\n print('1111')\n\n call_attempt = len(self.env['call.attempt'].browse(self.call_attempt_ids))\n call_pitch = len(self.env['call.pitch'].browse(self.call_pitch_ids))\n contact_meeting = len(self.env['contact.meeting'].browse(self.contact_meeting_ids))\n # file_attached = len(self.env['ir.attachment'].search([('res_model','=','res.partner'),('res_id','=',self.id)]))\n msg=''\n ## file attached\n file_attached = len(\n self.env['ir.attachment'].search([('res_model', '=', 'res.partner'), ('res_id', '=', self.id)]))\n if self.stage_id.id in (8, 16) and file_attached == 0:\n msg = msg + ' - Upload at least one file \\n'\n ##\n if self.stage_id.id == 2 and call_attempt == 0:\n msg = msg + ' - Call Attempt \\n'\n\n if self.stage_id.id == 3 and call_pitch == 0:\n msg = msg + ' - Call Pitch \\n'\n\n if self.stage_id.id == 9 and self.date_call_back_one == False:\n msg = msg + ' - Date (callback) '\n\n if self.stage_id.id == 10 and self.date_meeting_set == False:\n msg = msg + ' - Date (meeting set) \\n'\n\n if self.stage_id.id == 6 and self.date_preagreement == False:\n msg = msg + ' - Date (pre_agreement) \\n'\n\n ## individual and company contact\n if self.stage_id.id in (8,16) and self.mobile == False:\n msg = msg + ' - Mobile \\n'\n if self.stage_id.id in (8,16) and self.email == False:\n msg = msg + ' - Email \\n'\n if self.stage_id.id in (8, 16) and self.street == False:\n msg = msg + ' - Street in Adress \\n'\n if self.stage_id.id in (8,16) and self.lang == False:\n msg = msg + ' - Language \\n'\n if self.stage_id.id in (8, 16) and self.business_developer_id == False:\n msg = msg + ' - Business Developer \\n'\n if self.stage_id.id in (8,16) and self.vat == False:\n msg = msg + ' - TIN \\n'\n\n ## individual contact\n if self.stage_id.id in (8,16) and self.parent_id and self.parent_id.street== False:\n msg = msg + ' - Invoicing Address (Company Adress) \\n'\n if self.stage_id.id in (8,16) and self.inami == False:\n msg = msg + ' - INAMI \\n'\n if self.stage_id.id in (8,16) and self.subscription_type == False:\n msg = msg + ' - Subscription Type \\n'\n if self.stage_id.id in (8,16) and not self.title and self.is_company != True:\n msg = msg + ' - Title \\n'\n if self.stage_id.id in (8,16) and self.specialization == False:\n msg = msg + ' - Specialization \\n'\n ### Prospection process\n if self.stage_id.id in (8,16) and self.date_signed == False:\n msg = msg + ' - Date(Signed) \\n'\n if self.stage_id.id in (8, 16) and self.bd_signed == False:\n msg = msg + ' - Business Developer (Signed) \\n'\n if self.stage_id.id in (8, 16) and self.comment_signed == False:\n msg = msg + ' - Comment (Signed) \\n'\n\n ### Subscription details\n if self.stage_id.id in (8,16) and self.subscription_month == False:\n msg = msg + ' - Monthly subscription \\n'\n if self.stage_id.id in (8,16) and self.subscription_commitment == False:\n msg = msg + ' - Commitment \\n'\n if self.stage_id.id in (8,16) and self.subscription_upfront_payment == False:\n msg = msg + ' - Upfront Payment \\n'\n if self.stage_id.id in (8,16) and self.subscription_upfront_turnover == False:\n msg = msg + ' - Upfront turnover \\n'\n if self.stage_id.id in (8,16) and self.subsciption_part_condition == False:\n msg = msg + ' - Particular Conditions \\n'\n\n ## stage activated and only individuals\n if self.stage_id.id == 16 and self.doctor_admin == False:\n msg = msg + ' - Doctor AdminID \\n'\n ### stage account managment\n if self.stage_id.id == 16 and self.first_email == False:\n msg = msg + ' - 1st email (activation) \\n'\n if self.stage_id.id == 16 and self.service_completed == False:\n msg = msg + ' - Services completed \\n'\n if self.stage_id.id == 16 and self.price_completed == False:\n msg = msg + ' - Prices completed \\n'\n if self.stage_id.id == 16 and self.cv_completed == False:\n msg = msg + ' - CV/experiences completed \\n'\n if self.stage_id.id == 16 and self.duration_completed == False:\n msg = msg + ' - Duration completed \\n'\n if self.stage_id.id == 16 and self.personal_message_completed == False:\n msg = msg + ' - Personal message completed \\n'\n if self.stage_id.id == 16 and self.profile_picture == False:\n msg = msg + ' - Profile picture \\n'\n if self.stage_id.id == 16 and self.photo_practice == False:\n msg = msg + ' - Photo Practice \\n'\n if self.stage_id.id == 16 and self.marketing_kit == False:\n msg = msg + ' - Marketing kit \\n'\n if self.stage_id.id == 16 and self.synchronisation_completed == False:\n msg = msg + ' - Synchronization \\n'\n if self.stage_id.id == 16 and self.backlink == False:\n msg = msg + ' - Backlink \\n'\n if self.stage_id.id == 16 and self.google_profile == False:\n msg = msg + ' - Google profile \\n'\n if self.stage_id.id == 16 and self.voicemail == False:\n msg = msg + ' - Voicemail \\n'\n if self.stage_id.id == 16 and self.mail_signature == False:\n msg = msg + ' - Mail signature \\n'\n if self.stage_id.id == 16 and self.email_to_patient == False:\n msg = msg + ' - Email to patient \\n'\n if self.stage_id.id == 16 and self.translation == False:\n msg = msg + ' - Translation \\n'\n if self.stage_id.id == 16 and self.business_card == False:\n msg = msg + ' - Manuel Sent \\n'\n if self.stage_id.id == 16 and self.manuel_sent == False:\n msg = msg + ' - Business cards \\n'\n if self.stage_id.id == 16 and self.widget == False:\n msg = msg + ' - Widget \\n'\n if self.stage_id.id == 16 and self.voice_mail == False:\n msg = msg + ' - Voicemail + email signature \\n'\n if self.stage_id.id == 16 and self.website_ok == False:\n msg = msg + ' - Website \\n'\n if self.stage_id.id == 16 and self.customer_service_number == False:\n msg = msg + ' - Customer service number on google profile \\n'\n if self.stage_id.id == 16 and self.website_backlink == False:\n msg = msg + ' - Backlink on website \\n'\n\n ## Lost paying, tab lost\n if self.stage_id.id == 17 and self.date_lost == False:\n msg = msg + ' - Lost Date \\n'\n if self.stage_id.id == 17 and self.reason_lost == False:\n msg = msg + ' - Lost Reason \\n'\n\n\n\n\n ##\n if msg:\n raise ValidationError('To move to this step you first need to fill those fields : \\n' + msg)\n\n return {}", "def increment_stage_in_forms(forms):\n for index, form in enumerate(forms.all(), 1):\n form.stage = index\n form.save(update_fields=['stage'])", "def default_get(self, cr, uid, fields, context=None):\n if context is None:\n context = {}\n record_ids = context.get('active_ids', False)\n res = super(crm_lead_stage, self).default_get(cr, uid, fields, context=context)\n\n if record_ids:\n opp_ids = []\n opps = self.pool.get('crm.lead').browse(cr, uid, record_ids, context=context)\n for opp in opps:\n opp_ids.append(opp.id)\n if 'lead_ids' in fields:\n res.update({'lead_ids': opp_ids})\n\n return res", "def stage(self, id, title = None):\r\n if id != self.lastStage:\r\n if title:\r\n REGISTRY['CIF'].write('Step %s: %s' % (id, title))\r\n self.callstack.setStage(id,title)\r\n else:\r\n REGISTRY['CIF'].write('Step %s' % id)\r\n self.callstack.setStage(id,\" \")\r\n \r\n if self.stepByStep:\r\n cmd = MsgHelper.createMessage(Messages.CMD_PAUSE)\r\n self.mailbox.push( cmd, high_priority = True )\r\n self.lastStage = id", "def setup(self, stage: Optional[str] = None) -> None:", "def _stage(self):\n\n pass", "def stage(self, stage: osbuild.Stage):", "def _read_group_stage_ids(self, stages, domain, order):\n stage_ids = self.env['salon.stage'].search([])\n return stage_ids", "def prepare_staging_area(sr_path, staging_path, vdi_uuids, seq_num=0):\n for vdi_uuid in vdi_uuids:\n source = os.path.join(sr_path, \"%s.vhd\" % vdi_uuid)\n link_name = os.path.join(staging_path, \"%d.vhd\" % seq_num)\n _link(source, link_name)\n seq_num += 1", "def update_tracking_tool(self, new):\n print(f\"Update tracking_tool in preproc/reg stages to {new}\")\n self.stages[\"Preprocessing\"].config.tracking_tool = new\n self.stages[\"Registration\"].config.tracking_tool = new", "def step_workflow(self):\n from odoo.addons.vneuron_workflow_odoo import workflow\n for res_id in self.ids:\n workflow.trg_write(self._uid, self._name, res_id, self._cr)\n return True", "def update_preprocessing_act(self, new):\n print(f\"Update act_tracking in preproc/reg stages to {new}\")\n self.stages[\"Preprocessing\"].config.act_tracking = new\n self.stages[\"Registration\"].config.act_tracking = new\n if not new:\n self.stages[\"Preprocessing\"].config.gmwmi_seeding = False\n self.stages[\"Registration\"].config.gmwmi_seeding = False", "def stage(self, stage):\n self._stage = stage\n self._layer = Sdf.Layer.CreateAnonymous()\n self._stage.GetSessionLayer().subLayerPaths.append(self._layer.identifier)", "def set_stage(stage):\n try:\n filename = os.path.join(get_var('SITE'), \".stage\")\n f = open(filename, \"w\")\n f.write(\"%s\\n\" % stage)\n f.close()\n logger.debug(\"set stage: %s\" % (stage))\n except:\n raise AssertionError(\"Unable to save setup/teardown stage! %s\" % (sys.exc_info()[1]))\n return stage", "def stage(self):\n pass", "def expand_sdf(stages, context):\n # type: (Iterable[Stage], TransformContext) -> Iterator[Stage]\n for stage in stages:\n transform = only_transform(stage.transforms)\n if transform.spec.urn == common_urns.primitives.PAR_DO.urn:\n\n pardo_payload = proto_utils.parse_Bytes(\n transform.spec.payload, beam_runner_api_pb2.ParDoPayload)\n\n if pardo_payload.restriction_coder_id:\n\n def copy_like(protos, original, suffix='_copy', **kwargs):\n if isinstance(original, str):\n key = original\n original = protos[original]\n else:\n key = 'component'\n new_id = unique_name(protos, key + suffix)\n protos[new_id].CopyFrom(original)\n proto = protos[new_id]\n for name, value in kwargs.items():\n if isinstance(value, dict):\n getattr(proto, name).clear()\n getattr(proto, name).update(value)\n elif isinstance(value, list):\n del getattr(proto, name)[:]\n getattr(proto, name).extend(value)\n elif name == 'urn':\n proto.spec.urn = value\n elif name == 'payload':\n proto.spec.payload = value\n else:\n setattr(proto, name, value)\n if 'unique_name' not in kwargs and hasattr(proto, 'unique_name'):\n proto.unique_name = unique_name(\n {p.unique_name\n for p in protos.values()},\n original.unique_name + suffix)\n return new_id\n\n def make_stage(base_stage, transform_id, extra_must_follow=()):\n # type: (Stage, str, Iterable[Stage]) -> Stage\n transform = context.components.transforms[transform_id]\n return Stage(\n transform.unique_name, [transform],\n base_stage.downstream_side_inputs,\n union(base_stage.must_follow, frozenset(extra_must_follow)),\n parent=base_stage.name,\n environment=base_stage.environment)\n\n main_input_tag = only_element(\n tag for tag in transform.inputs.keys()\n if tag not in pardo_payload.side_inputs)\n main_input_id = transform.inputs[main_input_tag]\n element_coder_id = context.components.pcollections[\n main_input_id].coder_id\n # Tuple[element, restriction]\n paired_coder_id = context.add_or_get_coder_id(\n beam_runner_api_pb2.Coder(\n spec=beam_runner_api_pb2.FunctionSpec(\n urn=common_urns.coders.KV.urn),\n component_coder_ids=[\n element_coder_id, pardo_payload.restriction_coder_id\n ]))\n # Tuple[Tuple[element, restriction], double]\n sized_coder_id = context.add_or_get_coder_id(\n beam_runner_api_pb2.Coder(\n spec=beam_runner_api_pb2.FunctionSpec(\n urn=common_urns.coders.KV.urn),\n component_coder_ids=[\n paired_coder_id,\n context.add_or_get_coder_id(\n # context can be None here only because FloatCoder does\n # not have components\n coders.FloatCoder().to_runner_api(None), # type: ignore\n 'doubles_coder')\n ]))\n\n paired_pcoll_id = copy_like(\n context.components.pcollections,\n main_input_id,\n '_paired',\n coder_id=paired_coder_id)\n pair_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/PairWithRestriction',\n urn=common_urns.sdf_components.PAIR_WITH_RESTRICTION.urn,\n outputs={'out': paired_pcoll_id})\n\n split_pcoll_id = copy_like(\n context.components.pcollections,\n main_input_id,\n '_split',\n coder_id=sized_coder_id)\n split_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/SplitAndSizeRestriction',\n urn=common_urns.sdf_components.SPLIT_AND_SIZE_RESTRICTIONS.urn,\n inputs=dict(transform.inputs, **{main_input_tag: paired_pcoll_id}),\n outputs={'out': split_pcoll_id})\n\n reshuffle_stage = None\n if common_urns.composites.RESHUFFLE.urn in context.known_runner_urns:\n reshuffle_pcoll_id = copy_like(\n context.components.pcollections,\n main_input_id,\n '_reshuffle',\n coder_id=sized_coder_id)\n reshuffle_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/Reshuffle',\n urn=common_urns.composites.RESHUFFLE.urn,\n payload=b'',\n inputs=dict(transform.inputs, **{main_input_tag: split_pcoll_id}),\n outputs={'out': reshuffle_pcoll_id})\n reshuffle_stage = make_stage(stage, reshuffle_transform_id)\n else:\n reshuffle_pcoll_id = split_pcoll_id\n reshuffle_transform_id = None\n\n if context.is_drain:\n truncate_pcoll_id = copy_like(\n context.components.pcollections,\n main_input_id,\n '_truncate_restriction',\n coder_id=sized_coder_id)\n # Lengthprefix the truncate output.\n context.length_prefix_pcoll_coders(truncate_pcoll_id)\n truncate_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/TruncateAndSizeRestriction',\n urn=common_urns.sdf_components.TRUNCATE_SIZED_RESTRICTION.urn,\n inputs=dict(\n transform.inputs, **{main_input_tag: reshuffle_pcoll_id}),\n outputs={'out': truncate_pcoll_id})\n process_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/Process',\n urn=common_urns.sdf_components.\n PROCESS_SIZED_ELEMENTS_AND_RESTRICTIONS.urn,\n inputs=dict(\n transform.inputs, **{main_input_tag: truncate_pcoll_id}))\n else:\n process_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/Process',\n urn=common_urns.sdf_components.\n PROCESS_SIZED_ELEMENTS_AND_RESTRICTIONS.urn,\n inputs=dict(\n transform.inputs, **{main_input_tag: reshuffle_pcoll_id}))\n\n yield make_stage(stage, pair_transform_id)\n split_stage = make_stage(stage, split_transform_id)\n yield split_stage\n if reshuffle_stage:\n yield reshuffle_stage\n if context.is_drain:\n yield make_stage(\n stage, truncate_transform_id, extra_must_follow=[split_stage])\n yield make_stage(stage, process_transform_id)\n else:\n yield make_stage(\n stage, process_transform_id, extra_must_follow=[split_stage])\n\n else:\n yield stage\n\n else:\n yield stage", "def stage_set_send_note(self, cr, uid, ids, stage_id, context=None):\n stage_name = self.pool.get('crm.case.stage').name_get(cr, uid, [stage_id], context=context)[0][1]\n return self.message_post(cr, uid, ids, body= _(\"Stage changed to <b>%s</b>.\") % (stage_name), context=context)", "def stage(self, stage_id):\r\n return pipelines.Stage(self, stage_id)", "def stages(self, stages):\n if stages is None:\n self._stages = None\n else:\n self._stages = stages if isinstance(stages, list) else [stages] * len(self.pidevice.allaxes)\n debug('ControllerStartup.stages = %s', itemstostr(self._stages))", "def step_impl_the_ru_is_set_to(context, business_id):\n context.bdd_helper.message_data[\"business_id\"] = business_id", "def create_stage(self, ApiId: str, StageName: str, AccessLogSettings: Dict = None, ClientCertificateId: str = None, DefaultRouteSettings: Dict = None, DeploymentId: str = None, Description: str = None, RouteSettings: Dict = None, StageVariables: Dict = None) -> Dict:\n pass", "def on12Lead(self, event): # wxGlade: DAQPanel.<event_handler>\n CreateDialog2 = Lead12Dialog2(self,self)\n CreateDialog2.ShowModal()", "def prepareFinishSlot(self):\r\n \r\n self.lockIndex = self._wizard.targetIndexes[0]\r\n self._targetRepositoryModel.lock([self.lockIndex])", "def setValuesInStep(\n self, stepName: str, interactionProperty: str = \"\", contactControls: str = \"\"\n ):\n pass", "def setValuesInStep(\n self, stepName: str, interactionProperty: str = \"\", contactControls: str = \"\"\n ):\n pass", "def Move_Stage(self):\n for i in range(3):\n if self.set_pos[i] == 0:\n continue\n print \"Moving stage %s by %s steps\\n\"%(self.POS_NAME[i], self.set_pos[i])\n self.ser.write('F,C'+self.STEPPER_NAME[i]+str(self.set_pos[i])+',R')\n time.sleep(0.5)\n time.sleep(0.5)\n return", "def set_fill_stages(self: _SelfType, val: Tuple[str]) -> _SelfType:\n self._fill_stages = val\n return self", "def setup_pivot():\n for piv_switcher in get_one_switcher():\n piv_switcher.setup()", "def test_workflows_id_replace_post(self):\n pass", "def post_stage(self):\n\n\t\tif self.stage == self.stages.declarations:\n\t\t\t# Prepare for output waveform generators.\n\t\t\tfor output in [var for var, type in self.variables.items() if type == 'output']:\n\t\t\t\tself.generators[output] = None\n\t\t\t\tself.waveforms[output] = None\n\n\t\t\t# Generate labels for all necessary values.\n\t\t\tself.all_values = set()\n\t\t\tfor name, type in self.variables.items():\n\t\t\t\tif type == 'pulse':\n\t\t\t\t\tfor attr in ['amplitude', 'length', 'shape']:\n\t\t\t\t\t\tself.all_values.add((name, attr))\n\t\t\t\telif type == 'acq_marker':\n\t\t\t\t\tfor attr in ['marker_num', 'output']:\n\t\t\t\t\t\tself.all_values.add((name, attr))\n\t\t\t\telif type != 'output':\n\t\t\t\t\tself.all_values.add((name,))\n\t\telif self.stage == self.stages.waveforms:\n\t\t\t# Finalize waveform creation.\n\t\t\tfor output in self.generators:\n\t\t\t\tself.waveforms[output] = self.generators[output].waveform", "def advance_stage(self):\n if self.stage == 0:\n self.curr_i = self.I\n elif self.stage == 1:\n self.curr_d = self.D\n elif self.stage == 2:\n self.curr_r == self.R", "def parse_parliament_steps(self, response):\n inquiry_item = response.meta['inquiry_item']\n\n phases = INQUIRY.PHASES.xt(response)\n\n for phase in phases:\n # Create phase if we don't have it yet\n phase_item, created = Phase.objects.get_or_create(\n title=phase['title'])\n if created:\n log.msg(u\"Created Phase {}\".format(\n green(u'[{}]'.format(phase_item.title))),level=log.DEBUG)\n\n # Create steps\n for step in phase['steps']:\n step_item, created = Step.objects.update_or_create(\n title=step['title']['text'],\n sortkey=step['sortkey'],\n date=step['date'],\n protocol_url=step['protocol_url'],\n law=inquiry_item,\n phase=phase_item,\n source_link=response.url\n )\n step_item.save()\n if created:\n log.msg(u\"Created Step {}\".format(\n green(u'[{}]'.format(step_item.title))),level=log.DEBUG)\n\n # Save statements for this step, if applicable\n if 'statements' in step['title']:\n for stmnt in step['title']['statements']:\n # Find the person\n pq = Person.objects.filter(\n source_link__endswith=stmnt['person_source_link'])\n if pq.exists() and pq.count() == 1:\n person_item = pq.first()\n st_data = {\n 'speech_type': stmnt['statement_type'],\n 'protocol_url': stmnt['protocol_link']\n }\n st_item, st_created = Statement.objects.update_or_create(\n index=stmnt['index'],\n person=person_item,\n step=step_item,\n defaults=st_data)\n if st_created:\n log.msg(u\"Created Statement by {} on {}\".format(\n green(\n u'[{}]'.format(person_item.full_name)),\n step_item.date),level=log.DEBUG)\n else:\n log.msg(u\"Updated Statement by {} on {}\".format(\n green(\n u'[{}]'.format(person_item.full_name)),\n step_item.date),level=log.DEBUG)\n else:\n # We can't save statements if we can't find the\n # Person\n self.logger.warning(\n red(u\"Skipping Statement by {}: Person with source_link {} does{} exist{}\").format(\n green(\n u'[{}]'.format(stmnt['person_name'])),\n blue(\n \"[{}]\".format(stmnt['person_source_link'])),\n red(\"{}\").format(\n \"\" if pq.exists() else \" not\"),\n \"\" if pq.count() > 1 else \", but {} persons matching found!\".format(\n pq.count())\n ))\n continue", "def set_site_id(self):\n self.site_id = entities.sites['next id']\n entities.sites['object list'].append(self)\n entities.sites['next id'] += 1", "def define_custom_mapping(self, custom_last_stage):\n # start by disabling all stages\n for stage in self.ordered_stage_list:\n self.stages[stage].enabled = False\n # enable until selected one\n for stage in self.ordered_stage_list:\n print(\"Enable stage : %s\" % stage)\n self.stages[stage].enabled = True\n if stage == custom_last_stage:\n break", "def set_context(self, objects, data, ids, report_type=None):\n #new_ids = data['form']['chart_account_id']\n\n # account partner memoizer\n # Reading form\n main_filter = self._get_form_param('filter', data, default='filter_no')\n filter_selection_line = self._get_form_param('selection_line', data)\n #target_move = self._get_form_param('target_move', data, default='all')\n start_date = self._get_form_param('date_from', data)\n stop_date = self._get_form_param('date_to', data)\n #start_period = self._get_form_param('period_from', data)\n #stop_period = self._get_form_param('period_to', data)\n start_period = self.get_start_period_br(data)\n stop_period = self.get_end_period_br(data)\n partner_ids = self._get_form_param('partner_ids', data)\n contract_ids = self._get_form_param('contract_ids', data)\n analytic_journal_ids = self._get_form_param('analytic_journal_ids', data)\n show_cost = self._get_form_param('cost', data)\n show_price = self._get_form_param('price', data)\n detail_by = self._get_form_param('detail_by', data)\n #detail_by = 'journal' # da fare su wizard -> possibile scegliere anche x data\n '''>>>>>>>>>>>>>\n fiscalyear = self.get_fiscalyear_br(data)\n result_selection = self._get_form_param('result_selection', data)\n chart_account = self._get_chart_account_id_br(data)\n \n if main_filter == 'filter_no' and fiscalyear:\n start_period = self.get_first_fiscalyear_period(fiscalyear)\n stop_period = self.get_last_fiscalyear_period(fiscalyear)\n <<<<<<<<<'''\n # Retrieving accounts\n '''>>>>>>>>>>\n filter_type = ('payable', 'receivable')\n if result_selection == 'customer':\n filter_type = ('receivable',)\n if result_selection == 'supplier':\n filter_type = ('payable',)\n <<<<<<<<<'''\n\n #contracts = self.get_all_analytic_accounts(new_ids, exclude_type=['view', 'template'],\n # only_type=filter_type)\n contracts = self.get_all_analytic_accounts(contract_ids, partner_ids, exclude_type=['view', 'template'], \n only_type=None)\n \n if not contracts:\n raise osv.except_osv(_('Error'), _('No contracts to print.'))\n\n if main_filter == 'filter_date':\n start = start_date\n stop = stop_date\n else:\n start = start_period\n stop = stop_period\n\n # when the opening period is included in the selected range of periods and\n # the opening period contains move lines, we must not compute the initial balance from previous periods\n # but only display the move lines of the opening period\n # we identify them as:\n # - 'initial_balance' means compute the sums of move lines from previous periods\n # - 'opening_balance' means display the move lines of the opening period\n '''>>>>>>>>>>>\n init_balance = main_filter in ('filter_no', 'filter_period')\n initial_balance_mode = init_balance and self._get_initial_balance_mode(start) or False\n\n initial_balance_lines = {}\n if initial_balance_mode == 'initial_balance':\n initial_balance_lines = self._compute_partners_initial_balances(contracts,\n start_period,\n partner_filter=partner_ids,\n exclude_reconcile=False)\n <<<<<<<'''\n ledger_lines = self._compute_contract_ledger_lines(contracts,\n main_filter,\n #target_move,\n start,\n stop,\n partner_filter=partner_ids,\n analytic_journal_filter=analytic_journal_ids, \n filter_selection_line=filter_selection_line, \n detail_by=detail_by)\n objects = []\n for contract in self.pool.get('account.analytic.account').browse(self.cursor, self.uid, contracts):\n contract.ledger_lines = ledger_lines.get(contract.id, {})\n \n ledg_lines_pids = ledger_lines.get(contract.id, {}).keys()\n if detail_by == 'journal':\n contract.elements_order = self._order_journals(ledg_lines_pids)\n #contract.elements_order = self._order_partners(ledg_lines_pids, init_bal_lines_pids)\n #contract.elements_order = ledg_lines_pids\n else:\n contract.elements_order = self._order_dates(ledg_lines_pids)\n objects.append(contract)\n \n self.localcontext.update({\n #'fiscalyear': fiscalyear,\n 'start_date': start_date,\n 'stop_date': stop_date,\n 'start_period': start_period,\n 'stop_period': stop_period,\n 'partner_ids': partner_ids,\n #'chart_account': chart_account,\n #'initial_balance_mode': initial_balance_mode,\n })\n\n return super(ContractLedgerWebkit, self).set_context(objects, data, contract_ids,\n report_type=report_type)", "def set_stage_in_byblo_conf_file(filename, stage_id):\n with open(filename) as inf:\n lines = [x.strip() for x in inf.readlines()]\n stages = {\n 0: '', # run the entire Byblo pipeline\n 1: ['--stages', 'enumerate,count,filter'], # run the first part only\n 2: ['--stages', 'allpairs,knn,unenumerate'] # run the second part only\n }\n\n # remove the current stages setting, may be multiple\n while True:\n try:\n index = lines.index('--stages')\n lines.pop(index)\n lines.pop(index)\n except ValueError:\n # '--stages' is not in list, nothing more to do\n break\n\n with open(filename, \"w\") as outf:\n for line in lines:\n outf.write(line)\n outf.write('\\n')\n for line in stages[stage_id]:\n outf.write(line)\n outf.write('\\n')", "def test_02_check_from_draft_to_done_with_steps(self):\r\n cr, uid = self.cr, self.uid\r\n filter_draft = self.create_filter_draft(cr, uid)\r\n filter_done = self.create_filter_done(cr, uid)\r\n self.create_rule(cr, uid, 'on_write', filter_pre_id=filter_draft, filter_id=filter_done)\r\n new_lead_id = self.create_lead_test_1(cr, uid)\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'draft')\r\n self.assertEquals(new_lead.user_id.id, self.admin)\r\n \"\"\" change the state of new_lead to open and check that responsible doen't change\"\"\"\r\n new_lead.write({'state': 'open'})\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'open')\r\n self.assertEquals(new_lead.user_id.id, self.admin)\r\n \"\"\" change the state of new_lead to pending and check that responsible doen't change\"\"\"\r\n new_lead.write({'state': 'pending'})\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'pending')\r\n self.assertEquals(new_lead.user_id.id, self.admin)\r\n \"\"\" change the state of new_lead to cancel and check that responsible doen't change\"\"\"\r\n new_lead.write({'state': 'cancel'})\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'cancel')\r\n self.assertEquals(new_lead.user_id.id, self.admin)\r\n \"\"\" change the state of new_lead to done and check that responsible doen't change \"\"\"\r\n new_lead.write({'state': 'done'})\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'done')\r\n self.assertEquals(new_lead.user_id.id, self.admin)\r\n self.delete_rules(cr, uid)", "def stepStarted(build, step):", "def stage(self, stage):\n if stage is None:\n raise ValueError(\"Invalid value for `stage`, must not be `None`\")\n allowed_values = [\"starting\", \"completed\", \"in_progress\"]\n if stage not in allowed_values:\n raise ValueError(\n \"Invalid value for `stage` ({0}), must be one of {1}\"\n .format(stage, allowed_values)\n )\n\n self._stage = stage", "def __startStage(self):\n try:\n self.stage = LIBS_2AxisStage(self.comval.get(), self.baudval.get(), self.startfile.get(), self.owd).start()\n except Exception as e:\n self.stagelabel.config(text='Could not start stage', fg='Red')\n print(e)\n self.window.after(5000, lambda : self.stagelabel.config(text='Stage Control Launcher', fg='Black'))", "def merge_action_stages(env):\n stage_draft = env.ref('mgmtsystem_action.stage_draft')\n stage_open = env.ref('mgmtsystem_action.stage_open')\n stage_close = env.ref('mgmtsystem_action.stage_close')\n\n old_stage_draft_id = env.ref('crm_claim.stage_claim1').id\n old_stage_open_id = env.ref('crm_claim.stage_claim5').id\n old_stage_close_id = env.ref('crm_claim.stage_claim2').id\n\n env['mgmtsystem.action'].search([\n ('stage_id', '=', old_stage_draft_id)\n ]).write({'stage_id': stage_draft.id})\n\n env['mgmtsystem.action'].search([\n ('stage_id', '=', old_stage_open_id)\n ]).write({'stage_id': stage_open.id})\n\n env['mgmtsystem.action'].search([\n ('stage_id', '=', old_stage_close_id)\n ]).write({'stage_id': stage_close.id})\n\n env['mgmtsystem.action.stage'].browse([\n old_stage_draft_id, old_stage_open_id, old_stage_close_id\n ]).unlink()", "def templatize(self):\n self.sanitize_ids()\n del self.steps[1:]\n self.current_step = None", "def _finish(self):\n self.stage = {}", "def commonWorkflow(context):\n setup = getToolByName(context, 'portal_setup')\n setup.runAllImportStepsFromProfile(PROFILE)\n portal_workflow = getToolByName(context, 'portal_workflow')\n portal_workflow.updateRoleMappings()", "def _patch_update_stages(\n self,\n stage_changes_list: list[dict[str, Any]],\n changed_fields: CHANGED_FIELDS_LIST_TYPE\n ) -> bool:\n stages: list[Stage] = []\n for change_info in stage_changes_list:\n stage_was_updated = False\n # Check if valid ID is provided and fetch stage if it exists.\n if 'id' not in change_info:\n self.abort(400, msg='Missing stage ID in stage updates')\n id = change_info['id']\n stage = Stage.get_by_id(id)\n if not stage:\n self.abort(400, msg=f'Stage not found for ID {id}')\n\n # Update stage fields.\n for field, field_type in api_specs.STAGE_FIELD_DATA_TYPES:\n if field not in change_info:\n continue\n form_field_name = change_info[field]['form_field_name']\n old_value = getattr(stage, field)\n new_value = change_info[field]['value']\n self._update_field_value(stage, field, field_type, new_value)\n changed_fields.append((form_field_name, old_value, new_value))\n stage_was_updated = True\n\n # Update milestone fields.\n milestones = stage.milestones\n for field, field_type in api_specs.MILESTONESET_FIELD_DATA_TYPES:\n if field not in change_info:\n continue\n if milestones is None:\n milestones = MilestoneSet()\n form_field_name = change_info[field]['form_field_name']\n old_value = getattr(milestones, field)\n new_value = change_info[field]['value']\n self._update_field_value(milestones, field, field_type, new_value)\n changed_fields.append((form_field_name, old_value, new_value))\n stage_was_updated = True\n stage.milestones = milestones\n\n if stage_was_updated:\n stages.append(stage)\n\n # Save all of the updates made.\n # Return a boolean representing if any changes were made to any stages.\n if stages:\n ndb.put_multi(stages)\n return True\n return False", "def form_valid(self, form):\n old_track = CaseTrack.objects.get(id=self.kwargs['pk'])\n old_track.done = True\n old_track.save()\n\n print self.kwargs['pk']\n self.object = form.save(commit=False)\n self.object.case = old_track.case\n self.object.user_from = old_track.user_to\n self.object.save()\n old_track.case.assigned = self.object.user_to\n old_track.case.save()\n\n\n return super(ModelFormMixin, self).form_valid(form)", "def RunStages(self):\n self._RunStage(build_stages.InitSDKStage)\n self.RunSetupBoard()\n self._RunStage(report_stages.RefreshPackageStatusStage)", "def change_view(self, request, object_id, form_url='', extra_context=None):\n section = models.Section.objects.filter(pk=object_id)\\\n .prefetch_related(\"facility__experiment\",\n \"participants\")\\\n .first()\n exp_id = section.facility.experiment.id\n # create bulk forms\n bulk_add_change_frm = create_bulk_add_change_form(request, exp_id)\n bulk_del_frm = create_bulk_delete_form(request)\n # attach site id and bulk forms to 'extra_context'\n extra_context = extra_context or {}\n extra_context['section_id'] = object_id\n extra_context[\"bulk_add_change_form\"] = bulk_add_change_frm\n extra_context['bulk_delete_form'] = bulk_del_frm\n # print extra_context\n return super(SectionAdmin, self).change_view(\n request, object_id, form_url, extra_context=extra_context)", "def save(self):\n self._initial_vms = self.selected_vms", "def _create_partner(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n lead = self.pool.get('crm.lead')\n lead_ids = context and context.get('active_ids') or []\n data = self.browse(cr, uid, ids, context=context)[0]\n partner_id = data.partner_id and data.partner_id.id or False\n partner_ids = lead.convert_partner(cr, uid, lead_ids, data.action, partner_id, context=context)\n return partner_ids[lead_ids[0]]", "def _update_stages(self, stages: Set[str], deployment_id: Optional[str]) -> Set[str]:\n prev_dep_ids = set()\n for stage in stages:\n # Collects previous deployment IDs to clean up\n response_get = cast(Dict, self._api_client.get_stage(restApiId=self._api_physical_id, stageName=stage))\n prev_dep_id = response_get.get(\"deploymentId\")\n if prev_dep_id:\n prev_dep_ids.add(cast(str, prev_dep_id))\n\n # Updates the stage with newest deployment\n LOG.debug(\"%sTrying to update the stage %s through client\", self.log_prefix, stage)\n response_upd = cast(\n Dict,\n self._api_client.update_stage(\n restApiId=self._api_physical_id,\n stageName=stage,\n patchOperations=[{\"op\": \"replace\", \"path\": \"/deploymentId\", \"value\": deployment_id}],\n ),\n )\n LOG.debug(\"%sUpdate Stage Result: %s\", self.log_prefix, response_upd)\n\n # Flushes the cache so that the changes are calleable\n self._api_client.flush_stage_cache(restApiId=self._api_physical_id, stageName=stage)\n self._api_client.flush_stage_authorizers_cache(restApiId=self._api_physical_id, stageName=stage)\n return prev_dep_ids", "def _prepare_first_step(self):\n if self.townhalls:\n self._game_info.player_start_location = self.townhalls.first.position\n self._game_info.map_ramps, self._game_info.vision_blockers = self._game_info._find_ramps_and_vision_blockers()", "def _init_wizard(self, request):\n if self.id not in request.session:\n request.session[self.id] = WizardState(\n steps=self.base_steps[:], # Copies the list\n current_step=self.base_steps[0],\n form_data={})\n\n self.initialize(request, request.session[self.id])", "def _execute_stage(self, index, stage, stop):\n if stop.is_set():\n _LOGGER.info(\"Stopped pipeline on group %s\", self._group)\n return\n _LOGGER.info(\" -> Running stage '%s' on group %s\", stage, self._group)\n if stage.name == 'on':\n self._group.on = True\n elif stage.name == 'off':\n self._group.on = False\n elif stage.name == 'hue':\n self._group.hue = stage.args[0]\n elif stage.name == 'saturation':\n self._group.saturation = stage.args[0]\n elif stage.name == 'color':\n self._group.color = Color(*stage.args)\n elif stage.name == 'brightness':\n self._group.brightness = stage.args[0]\n elif stage.name == 'temperature':\n self._group.temperature = stage.args[0]\n elif stage.name == 'transition':\n self._group.transition(*stage.args, **stage.kwargs)\n elif stage.name == 'white':\n self._group.white()\n elif stage.name == 'white_up':\n self._group.white_up()\n elif stage.name == 'white_down':\n self._group.white_down()\n elif stage.name == 'red_up':\n self._group.red_up()\n elif stage.name == 'red_down':\n self._group.red_down()\n elif stage.name == 'green_up':\n self._group.green_up()\n elif stage.name == 'green_down':\n self._group.green_down()\n elif stage.name == 'blue_up':\n self._group.blue_up()\n elif stage.name == 'blue_down':\n self._group.blue_down()\n elif stage.name == 'night_light':\n self._group.night_light()\n elif stage.name == 'link':\n self._group.link()\n elif stage.name == 'unlink':\n self._group.unlink()\n elif stage.name == 'flash':\n self._group.flash(**stage.kwargs)\n elif stage.name == 'repeat':\n self._repeat(index, stage, stop)\n elif stage.name == 'wait':\n time.sleep(*stage.args)\n elif stage.name == 'callback':\n stage.args[0](*stage.args[1:], **stage.kwargs)", "def reset_stage():\n return set_stage('')", "def check_stage(self):\n\n #Initalize target and goal_stage to stage1 values\n target = 3\n goal_stage = 2\n\n # Set target and goal_stage if current stage is not 1\n if self.current_stage == 2:\n target = 7\n goal_stage = 3\n elif self.current_stage == 3:\n target = 11\n goal_stage = 4\n\n # Check the stage goals\n if self.die_a.value + self.die_b.value == target and not self.cheating:\n self.current_stage = goal_stage", "def test_user_can_change_viewed_steps(self):\n\n lesson_with_steps = Lesson.objects.annotate(num_steps=Count('steps')).filter(num_steps__gt=3).order_by('-num_steps')[0]\n lesson_steps_ids = lesson_with_steps.steps.values_list('id', flat=True)\n LessonState.objects.get_or_create( #enroll to lesson\n project_state=ProjectState.objects.get_or_create(user=self.global_user_1, project=lesson_with_steps.project)[0],\n lesson=lesson_with_steps\n )\n api_lesson_state_enrolled_url = reverse('api:project-lesson-state-detail', kwargs={\n 'project_pk': lesson_with_steps.project.id,\n 'lesson_pk': lesson_with_steps.id\n })\n\n # Make sure the user is enrolled to this lesson.\n self.client.force_authenticate(self.global_user_1)\n resp = self.client.get(api_lesson_state_enrolled_url)\n self.assertEqual(resp.status_code, 200)\n\n def patch_steps(viewed_steps):\n resp = self.client.patch(api_lesson_state_enrolled_url, json.dumps({\n \"viewedSteps\": viewed_steps\n }), content_type='application/json')\n invalid_viewed_steps = set(viewed_steps) - set(lesson_steps_ids)\n if not invalid_viewed_steps:\n self.assertEqual(resp.status_code, 200)\n self.assertSetEqual(set(resp.data['viewedSteps']), set(lesson_with_steps.steps.filter(pk__in=resp.data['viewedSteps']).values_list('id', flat=True))) #viewedSteps are all in lesson steps\n self.assertEqual(len(resp.data['viewedSteps']), len(set(viewed_steps))) #viewedSteps has no duplicates\n else:\n self.assertEqual(resp.status_code, 400)\n self.assertIn('viewedSteps', resp.data)\n\n patch_steps([lesson_steps_ids[i] for i in [0,0,0]])\n patch_steps(lesson_steps_ids[:1] + [None])\n patch_steps(lesson_steps_ids[:1] + list(Step.objects.exclude(lesson=self.lesson_obj).values_list('id', flat=True)[:1]))\n patch_steps(lesson_steps_ids[0:max(1, len(lesson_steps_ids)-2)])\n patch_steps(lesson_steps_ids[0:max(1, len(lesson_steps_ids)):2])", "def test_scenes_scene_id_activate_post(self):\n pass", "def start_missions(self, farm_shifter_bios=False):\n logger.info(f\"{self.mode_name}: {self.stages} stages available.\")\n if self.stages > 0:\n self.game.select_mode(self.mode_name)\n stage_1_num, stage_2_num = self.separate_stages\n logger.info(f\"{self.mode_name}: available stages: {stage_1_num} and {stage_2_num}\")\n if stage_1_num + stage_2_num > self.stages:\n logger.debug(f\"Stages count {self.stages} is lesser than available stages. Second stage is locked.\")\n stage_2_num = 0\n if stage_1_num > 0 or stage_2_num > 0:\n while stage_1_num > 0 and self.stages > 0:\n stage_1_num = self.start_stage(self.stage_1_ui, stage_1_num, farm_shifter_bios=farm_shifter_bios)\n self.stages = stage_1_num + stage_2_num\n if stage_2_num > 0 and self.game.is_main_menu():\n self.game.select_mode(self.mode_name)\n while stage_2_num > 0 and self.stages > 0:\n stage_2_num = self.start_stage(self.stage_2_ui, stage_2_num, farm_shifter_bios=farm_shifter_bios)\n self.stages = stage_1_num + stage_2_num\n logger.info(f\"No more stages for {self.mode_name}.\")", "def reactivate(self):\n self.write({'active': True, 'state': 'running'})\n STAGE = self.env['anytracker.stage']\n for ticket in self:\n starts = STAGE.search([('method_id', '=', ticket.method_id.id),\n ('progress', '=', 0)])\n if len(starts) != 1:\n raise except_orm(\n _('Configuration error !'),\n _('One and only one stage should have a 0% progress'))\n # write stage in a separate line to recompute progress & risk\n ticket.write({'stage_id': starts[0].id})\n self.recompute_parents()", "def start_missions(self, farm_shifter_bios=False):\n logger.info(f\"{self.mode_name}: {self.stages} stages available.\")\n if self.stages > 0:\n self.game.select_mode(self.mode_name)\n stage_num = self.stages\n while stage_num > 0:\n stage_num = self.start_stage(self.mode_label_ui, stage_num, farm_shifter_bios=farm_shifter_bios)\n logger.info(f\"No more stages for {self.mode_name}.\")", "def handle_partner_assignation(self, action='create', partner_id=False):\n partner_ids = {}\n for lead in self:\n if lead.partner_id:\n partner_ids[lead.id] = lead.partner_id.id\n continue\n if action == 'create':\n partner = lead._create_lead_partner()\n partner_id = partner.id\n partner.team_id = lead.team_id\n if partner_id:\n lead.partner_id = partner_id\n lead.published_customer = partner_id\n partner_ids[lead.id] = partner_id\n return partner_ids", "def handle_partner_assignation(self, action='create', partner_id=False):\n partner_ids = {}\n for lead in self:\n if lead.partner_id:\n partner_ids[lead.id] = lead.partner_id.id\n continue\n if action in ('create', 'nothing'):\n partner = lead._create_lead_partner()\n partner_id = partner.id\n partner.team_id = lead.team_id\n if partner_id:\n lead.partner_id = partner_id\n partner_ids[lead.id] = partner_id\n return partner_ids", "def setup(self, stage: Union[str, None] = None) -> None:\n self.data_splits = {}\n # set up each of the dataset splits\n for key, path in self.paths.items():\n self.data_splits[key] = self.dataset_class(path)", "def office_edit_process_view(request):\n authority_required = {'verified_volunteer'} # admin, verified_volunteer\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n office_id = convert_to_int(request.POST.get('office_id', 0))\n office_name = request.POST.get('office_name', False)\n google_civic_office_name = request.POST.get('google_civic_office_name', False)\n google_civic_election_id = request.POST.get('google_civic_election_id', 0)\n primary_party = request.POST.get('primary_party', False)\n state_code = request.POST.get('state_code', False)\n\n election_state = ''\n if state_code is not False:\n election_state = state_code\n elif google_civic_election_id:\n election_manager = ElectionManager()\n results = election_manager.retrieve_election(google_civic_election_id)\n if results['election_found']:\n election = results['election']\n election_state = election.get_election_state()\n\n # Check to see if this office is already in the database\n office_on_stage_found = False\n try:\n office_query = ContestOffice.objects.filter(id=office_id)\n if len(office_query):\n office_on_stage = office_query[0]\n office_on_stage_found = True\n except Exception as e:\n handle_record_not_found_exception(e, logger=logger)\n\n try:\n if office_on_stage_found:\n # Update\n # Removed for now: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and\n if office_name is not False:\n office_on_stage.office_name = office_name\n if google_civic_office_name is not False:\n office_on_stage.google_civic_office_name = google_civic_office_name\n if primary_party is not False:\n office_on_stage.primary_party = primary_party\n if positive_value_exists(election_state):\n office_on_stage.state_code = election_state\n office_on_stage.save()\n office_on_stage_id = office_on_stage.id\n messages.add_message(request, messages.INFO, 'Office updated.')\n google_civic_election_id = office_on_stage.google_civic_election_id\n\n return HttpResponseRedirect(reverse('office:office_summary', args=(office_on_stage_id,)) +\n \"?google_civic_election_id=\" + str(google_civic_election_id))\n else:\n # Create new\n office_on_stage = ContestOffice(\n office_name=office_name,\n google_civic_election_id=google_civic_election_id,\n state_code=election_state,\n )\n # Removing this limitation: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and\n if primary_party is not False:\n office_on_stage.primary_party = primary_party\n office_on_stage.save()\n messages.add_message(request, messages.INFO, 'New office saved.')\n\n # Come back to the \"Create New Office\" page\n return HttpResponseRedirect(reverse('office:office_new', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id))\n except Exception as e:\n handle_record_not_saved_exception(e, logger=logger)\n messages.add_message(request, messages.ERROR, 'Could not save office.')\n\n return HttpResponseRedirect(reverse('office:office_list', args=()) +\n \"?google_civic_election_id=\" + google_civic_election_id)", "def step(self):\n self.driver.step()", "def set_workflow_steps(self, steps_list):\n self._data_dict[self.KEY_WF_STEPS] = steps_list", "def install_legislative_process_integration(self):\n profile = 'profile-interlegis.portalmodelo.pl:default'\n setup_tool = api.portal.get_tool('portal_setup')\n setup_tool.runAllImportStepsFromProfile(profile)", "def return_fnc(self, cr, uid, ids, context=None):\n\n wf_service = netsvc.LocalService(\"workflow\")\n for rec in self.browse(cr, uid, ids, context=context):\n lines = []\n if rec.state == 'recieved':\n for line in rec.spares_ids:\n if line.recieved_quantity != 0:\n \n lines_dict = {\n 'name': line.product_id.name[:250],\n #'picking_id': picking_id,\n 'product_id': line.product_id.id,\n 'product_qty': line.recieved_quantity,\n 'product_uom': line.product_id.uom_id.id,\n 'product_uos_qty':line.recieved_quantity,\n 'product_uos': line.product_id.uom_id.id,\n 'location_id': rec.damage_line_id.department_id.location_dest_id.id ,\n 'location_dest_id': rec.damage_line_id.department_id.stock_location_id.id,\n #'exchange_line_id': line.id,\n 'tracking_id': False,\n 'state': 'draft',\n 'note': '',\n 'price_unit': line.product_id.standard_price or 0.0,\n 'move_type': 'one',\n } \n \n lines.append([0, False, lines_dict])\n\n if lines:\n piking_dict = {\n 'name': '/',\n #'origin': order.name,\n #'request': order.id,\n 'date': time.strftime('%Y-%m-%d'),\n 'type': 'in',\n 'state': 'draft',\n #'exchange_id': order.id,\n 'job_id': rec.id,\n 'maintenance': True,\n 'note': '',\n 'department_id':rec.damage_line_id.department_id.department_id.id,\n #'stock_journal_id':order.stock_journal_id and order.stock_journal_id.id,\n 'invoice_state': 'none',\n 'move_lines': lines,\n 'state': 'draft'\n }\n new_id = self.pool.get('stock.picking.in').create(cr, uid, piking_dict, context)\n wf_service.trg_validate(uid, 'stock.picking', new_id, 'button_confirm', cr)\n self.write(cr, uid, ids, {'state': 'return','picking_id':new_id})\n continue\n else:\n self.write(cr, uid, ids, {'state': 'canceled'})\n\n return True", "def switch_pivot():\n for piv_switcher in get_one_switcher():\n piv_switcher.switch()", "def finishSlot(self):\r\n\r\n self._targetRepositoryModel.performImport(self._wizard.sourceIndexes, \r\n self.lockIndex, \r\n self._wizard.properties)", "def trip_started(self, id):\n self.current_trip.set_id(id)", "def go_to_record_home(self, obj_id):\n url = self.cumulusci.org.lightning_base_url\n url = \"{}/lightning/r/{}/view\".format(url, obj_id)\n self.selenium.go_to(url)\n self.wait_until_loading_is_complete(lex_locators[\"actions\"])", "def main():\n init()\n separator_len = 40\n for s in stage_instances:\n print('='*separator_len)\n print(s.name)\n print('-'*separator_len)\n\n s.add_tasks() # Add tasks from previous stage\n s.revive_or_archive() # Revive killed tasks or move them to failed\n s.schedule_jobs() # Schedule new jobs if needed\n s.print_status()\n print('='*separator_len + '\\n')\n render(stage_instances)", "def _collect_stages(self) -> Set[str]:\n # Get the stage name associated with the previous deployment and update stage\n # Stage needs to be flushed so that new changes will be visible immediately\n api_resource = get_resource_by_id(self._stacks, ResourceIdentifier(self._api_identifier))\n stage_resources = get_resource_ids_by_type(self._stacks, AWS_APIGATEWAY_STAGE)\n deployment_resources = get_resource_ids_by_type(self._stacks, AWS_APIGATEWAY_DEPLOYMENT)\n\n stages = set()\n # If it is a SAM resource, get the StageName property\n if api_resource:\n if api_resource.get(\"Type\") == AWS_SERVERLESS_API:\n # The customer defined stage name\n stage_name = api_resource.get(\"Properties\", {}).get(\"StageName\")\n if stage_name:\n stages.add(cast(str, stage_name))\n\n # The stage called \"Stage\"\n if stage_name != \"Stage\":\n response_sta = cast(Dict, self._api_client.get_stages(restApiId=self._api_physical_id))\n for item in response_sta.get(\"item\"): # type: ignore\n if item.get(\"stageName\") == \"Stage\":\n stages.add(\"Stage\")\n\n # For both SAM and ApiGateway resource, check if any refs from stage resources\n for stage_resource in stage_resources:\n # RestApiId is a required field in stage\n stage_dict = get_resource_by_id(self._stacks, stage_resource)\n if not stage_dict:\n continue\n rest_api_id = stage_dict.get(\"Properties\", {}).get(\"RestApiId\")\n dep_id = stage_dict.get(\"Properties\", {}).get(\"DeploymentId\")\n # If the stage doesn't have a deployment associated then no need to update\n if dep_id is None:\n continue\n # If the stage's deployment ID is not static and the rest API ID matchs, then update\n for deployment_resource in deployment_resources:\n if deployment_resource.resource_iac_id == dep_id and rest_api_id == self._api_identifier:\n stages.add(cast(str, stage_dict.get(\"Properties\", {}).get(\"StageName\")))\n break\n\n return stages", "def get_context_data(self, **kwargs):\n context= super(AddSprintView,self).get_context_data(**kwargs)\n\n context['current_action'] = 'Agregar'\n context['formset'] = self.TeamMemberInlineFormSet(self.request.POST if self.request.method == 'POST' else None)\n self.__filtrar_formset__(context['formset'])\n return context", "def clone_update(self, *args):\r\n if self.advancedProjType.get():\r\n ##print self.advBrdSelect.get()\r\n userBoard = self.localSDK.brdList.index(self.advBrdSelect.get()) + 1\r\n self.newProj.add_board(userBoard, self.localSDK.brdList)\r\n self.localSDK.get_projects(self.newProj.board[1])\r\n try:\r\n self.widgetList[36]['values'] = self.localSDK.demoLst\r\n self.widgetList[36].current(0)\r\n except IndexError:\r\n kT.debug_log('Index Error', sys.exc_info()[2])", "def before_update(mapper, conn, target):\n if not target.id_:\n dataset = ObjectNumber.parse(target.d_id)\n target.id_ = str(PartitionNumber(dataset, target.sequence_id))", "def setup_next_dialog(self, dialog_set, action_data):\n\t\tsource = self.get_source()\n\t\tdialog_key = action_data[1]\n\t\taction = GameAction(source.__class__.change_current_dialog, 0, source, dialog_key)\n\t\tdialog_set[-1].add_next_action(action)\n\t\treturn dialog_set", "def post_hook(self):\n self.mk_rg1()\n self.mk_rg2()\n self.mk_rg3()", "def _action_done(self):\n for ml in self:\n if ml.lot_name_in == ml.lot_name_repeat:\n if ml.lot_id and ml.lot_name_in and ml.product_id.tracking == 'serial':\n ml.lot_id.name = ml.lot_name_in\n ml.lot_id.lot_name_chasis = ml.lot_name\n ml.lot_id.edicion = ml.move_id.edicion\n ml.lot_id.colorinterno = ml.move_id.colorinterno.id\n ml.lot_id.colorexterno = ml.move_id.colorexterno.id\n ml.lot_id.n_llaves = ml.n_llaves\n ml.lot_id.cant_llaves = ml.cant_llaves\n ml.lot_id.n_caja = ml.n_caja\n ml.lot_id.mot_desarmada = ml.mot_desarmada\n ml.lot_name = ml.lot_name_in\n ml.lot_id.embarque = ml.picking_id.embarque\n for incidence in ml.incidencia:\n ml.lot_id.incidencia = [(4, incidence.id)]\n for incid in ml.lot_id.incidencia:\n incid.lot_id = ml.lot_id.id\n else:\n raise ValidationError(_(\n 'El numero de chasis \"%s\" no esta igual que el repetido') % ml.lot_name_in)\n\n super(StockMoveLine, self)._action_done()", "def set_view_steps(self, steps_list):\n self._data_dict[self.KEY_VIEW_STEPS] = steps_list", "def stage_and_commit(self):\n self.stage_all()\n self.commit()", "def create_lead_test_1(self, cr, uid, context=None):\r\n return self.model.create(cr, uid, {\r\n 'name': \"Lead Test 1\",\r\n 'user_id': self.admin,\r\n }, context=context)", "def update_start_values(self, params):\n allparwids = {}\n for comp in self.fit_components.values():\n if comp.usebox is not None and comp.usebox.IsChecked():\n for name, parwids in comp.parwids.items():\n allparwids[name] = parwids\n\n for pname, par in params.items():\n if pname in allparwids:\n allparwids[pname].value.SetValue(par.value)", "def set_virtual_stage(self, virtual_stage: int) -> None:\n self.virtual_stage = virtual_stage", "def steps(self, steps):\n\n self._steps = steps", "def add_stage(self, stage_name: str) -> \"CdkStage\":\n return jsii.invoke(self, \"addStage\", [stage_name])", "def _do_upgrade_step(self, step):\n request = self.layer['request']\n request.form['profile_id'] = self.profile_id\n request.form['upgrades'] = [step['id']]\n self.setup.manage_doUpgrades(request=request)", "def workflow_step(self, workflow_step):\n\n self._workflow_step = workflow_step", "def build_from_step_list(self, step_list: list):\n for step in step_list:\n self.run[step.name] = step", "def add_current_parameters(self, stepname):\n self.stepnames.append(stepname)\n if stepname in self.default:\n self.current.append(deepcopy(self.default[stepname]))\n else:\n self.current.append(ParameterSet())", "def do_step(self) -> None:", "def test_workflows_id_patch(self):\n pass", "def step():\n \n step = models.Step(action=u\"goto\", target=u\"http://www.joesfunerals.com\")", "def _do_upgrade_step(self, step):\n request = self.layer['request']\n request.form['profile_id'] = PROFILE\n request.form['upgrades'] = [step['id']]\n self.setup.manage_doUpgrades(request=request)", "def switch_virtual_stage(self, virtual_stage: int) -> None:\n old_stage = self.virtual_stage\n try:\n self.set_virtual_stage(virtual_stage)\n yield\n finally:\n self.set_virtual_stage(old_stage)", "def set_environment(request, run_id):\n run = get_object_or_404(model.Run, pk=run_id)\n\n try:\n current = int(request.GET.get(\"environment\", None))\n except (TypeError, ValueError):\n current = None\n try:\n build = int(request.GET.get(\"build\", None))\n except (TypeError, ValueError):\n build = None\n\n form_kwargs = {\n \"current\": current,\n \"environments\": run.environments.all().select_related()\n }\n\n # the run could be an individual, or a series.\n # if it's a series, we need to use the right form\n # that will prompt them for a build number.\n # if a run for this series exists with that build number\n # already, then use that id, otherwise clone this run,\n # set it active and\n # create a new one with the build id set.\n if run.is_series:\n form_kwargs[\"run\"] = run\n form_kwargs[\"build\"] = build\n form_kwargs[\"user\"] = request.user\n form_class = EnvironmentBuildSelectionForm\n else:\n form_class = EnvironmentSelectionForm\n\n if request.method == \"POST\":\n # user responding to this form with their selections\n # they may or may not be valid\n form = form_class(request.POST, **form_kwargs)\n\n if form.is_valid():\n result = form.save()\n\n # @@@ Carl: seems like there may be a better pattern for this than\n # what I'm doing here. Any ideas?\n try:\n # If a runid WAS returned, then that would be the new run\n # created for the build of the runseries.\n envid, runid = result\n except TypeError:\n # if no runid was returned, then this is not a runseries, and\n # we should just use the run id from this run.\n envid = result\n runid = run_id\n return redirect(\"runtests_run\", run_id=runid, env_id=envid)\n else:\n # run just specified, prompt user for env and possibly build\n form = form_class(**form_kwargs)\n\n return TemplateResponse(\n request,\n \"runtests/environment.html\",\n {\n \"envform\": form,\n \"run\": run,\n }\n )", "def test_handle_create_as_team_lead(self, mock_uuid):\r\n mock_uuid.uuid4.return_value = \"1\"\r\n team = Team(\"GTID\", \"team-name\", \"name\")\r\n team.team_leads.add(user)\r\n self.mock_facade.query.return_value = [team]\r\n project = Project(\"GTID\", [\"repo-link\"])\r\n project_attach = [project.get_attachment()]\r\n with self.app.app_context():\r\n resp, code = \\\r\n self.testcommand.handle(\"project create repo-link team-name\",\r\n user)\r\n expect = {'attachments': project_attach}\r\n self.assertDictEqual(resp, expect)\r\n self.assertEqual(code, 200)\r\n self.mock_facade.query.assert_called_once_with(Team,\r\n [(\"github_team_name\",\r\n \"team-name\")])\r\n self.mock_facade.store.assert_called_once_with(project)", "def get_initial(self):\n initial = super(SkillsView, self).get_initial()\n ai = get_ai(\n self.request.session.get('token', False),\n self.kwargs['aiid']\n )\n initial = {\n 'skills': ai['linked_bots']\n }\n return initial" ]
[ "0.58470714", "0.55518115", "0.5546699", "0.5379987", "0.53570205", "0.5289893", "0.5229131", "0.5181128", "0.5130612", "0.5128466", "0.50979745", "0.5055515", "0.505321", "0.50443345", "0.50421935", "0.50367343", "0.503288", "0.49982783", "0.497238", "0.49644795", "0.49617615", "0.49610013", "0.49151117", "0.4867824", "0.4867824", "0.4816936", "0.48104182", "0.4807522", "0.48037204", "0.47937816", "0.4780702", "0.47791693", "0.47723955", "0.4761293", "0.47594255", "0.4721381", "0.4714676", "0.47141972", "0.47019705", "0.46960598", "0.46930104", "0.46854302", "0.46735677", "0.46728426", "0.46694246", "0.46689594", "0.46563312", "0.4652285", "0.46501204", "0.464455", "0.46378177", "0.46173722", "0.4616464", "0.46107703", "0.4609571", "0.46064794", "0.46050146", "0.4594014", "0.4581285", "0.45785064", "0.45707083", "0.45674613", "0.45654786", "0.45620573", "0.45558563", "0.454098", "0.45407447", "0.45355543", "0.4527916", "0.45201346", "0.45195475", "0.4510655", "0.45090595", "0.45059103", "0.450394", "0.44942954", "0.44812503", "0.4479629", "0.4479209", "0.44741726", "0.44708496", "0.44658154", "0.44627652", "0.44613886", "0.44588172", "0.44543615", "0.4438866", "0.44381768", "0.44341248", "0.4432642", "0.443034", "0.44280472", "0.44229212", "0.44224074", "0.44200382", "0.4413568", "0.4412257", "0.44101608", "0.44090226", "0.440878" ]
0.64389664
0
Return json format of results
def display_json(self, results, verbose): print(json.dumps(results))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_json(self,query_results):\n results=query_results.data\n factory=factory_json()\n dump=factory.dumps(results)\n print(dump)\n # TODO return output for this\n return \"\"", "def format_results(results):\n parsed = json.loads(results.to_json())\n json_result = json.dumps(parsed, indent=4)\n return json_result", "def format_parsed_json(results):\n json_result = json.dumps(results, indent=4)\n return json_result", "def get_json_accessibility_result(self):\n axe_result = json.dumps(self.results, indent = 3)\n logger.info(axe_result)\n return axe_result", "def as_json(self):", "def print_json(results):\r\n import json\r\n stats = calc_stats(results)\r\n print(json.dumps(stats._asdict()))", "def json_view(self, recursive=False):\n\n context = self.context.aq_inner\n data = self.export(context, recursive=recursive)\n pretty = json.dumps(data, sort_keys=True, indent=4)\n self.request.response.setHeader(\"Content-type\", \"application/json\")\n return pretty", "def result(self):\n if self.__json:\n return self.__json[\"result\"]\n else:\n return {}", "def stat_ret():\n count_am = storage.count(\"Amenity\")\n count_ct = storage.count(\"City\")\n count_pl = storage.count(\"Place\")\n count_re = storage.count(\"Review\")\n count_st = storage.count(\"State\")\n count_us = storage.count(\"User\")\n return jsonify({\"amenities\": count_am,\n \"cities\": count_ct,\n \"places\": count_pl,\n \"reviews\": count_re,\n \"states\": count_st,\n \"users\": count_us})", "def to_json(self, exclude_vectors=True):\n json_repr = vars(self)\n json_repr[\"results\"] = [\n r.to_json(exclude_vectors=exclude_vectors) for r in json_repr[\"results\"]]\n return json_repr", "def parse_query_results(self):\n # TODO: nicely parsed needs defining; may depend on query\n return self.json_result", "def json_all_builder(self, customer_count, invoice_count, invl_count ):\n json_result = '{\\n'\n json_result += '\\t \"_results\":[\\n'\n json_result += '\\t\\t{ \"customer_count\": \"' + str(customer_count)\n json_result += ', \"invoice_count\": \"' + str(invoice_count)\n json_result += ', \"invl_count\": \"' + str(invl_count)\n json_result += '}\\n'\n json_result += '\\n\\t]\\n}'\n return json_result", "def search_json(request):\n query = request.GET.get('q')\n books = []\n authors = []\n sections = []\n if len(query) >= 3:\n for book in Book.objects.filter(title__icontains=query):\n books.append({\n 'title': book.title,\n 'url': book.get_absolute_url(),\n })\n for author in Author.objects.filter(name__icontains=query):\n authors.append({\n 'title': author.name,\n 'url': author.get_absolute_url(),\n })\n for section in Section.objects.filter(title__icontains=query):\n sections.append({\n 'title': section.title,\n 'url': section.get_absolute_url(),\n })\n\n return JsonResponse({\n 'results': {\n 'books': {\n 'name': 'Books',\n 'results': books,\n },\n 'authors': {\n 'name': 'Authors',\n 'results': authors,\n },\n 'sections': {\n 'name': 'Sections',\n 'results': sections,\n },\n }\n })", "def data_json(request):\n json_data = []\n for resource in Resource.objects.all():\n record = {} \n record['title'] = resource.name\n record['description'] = resource.description\n record['keyword'] = resource.csw_keywords.split(',')\n record['modified'] = resource.last_updated\n record['publisher'] = resource.organization\n record['contactPoint'] = resource.metadata_contact\n record['mbox'] = resource.contact_email\n record['identifier'] = resource.csw_identifier\n if resource.is_published:\n record['accessLevel'] = 'public'\n else:\n record['accessLevel'] = 'non-public'\n\n json_data.append(record)\n\n return HttpResponse(json.dumps(json_data), 'application/json')", "def result(request):\n\n books = list()\n\n try:\n query = request.POST.get('query')\n \n \n qs = Response.response_front(query)\n\n for book in qs:\n books.append(qs)\n\n result = {\n 'picture':Response.build(books[0]['picture']),\n 'title':books[0]['title']\n }\n \n return JsonResponse(result,safe=False)\n except:\n print('error')", "def get_json(self):\n json_item = {\"id: \": self.id,\n \"question: \": self.question,\n \"documents: \": self.documents,\n \"document_ids: \": self.document_ids,\n \"gold answers: \": self.gold}\n return json_item", "def make_json(result):\n new_result = result.to_dict()\n json_result = json.dumps(new_result, indent=4)\n return json_result", "def format(self, *args):\n\t\tweb.header('Content-Type', 'application/json; charset=utf-8')\n\t\treturn json.dumps(self.content)", "def results(request):\n q = {k:v for k,v in request.GET.iteritems()}\n\n # q = request.GET.getlist('h')\n # if q is None:\n # return JsonResponse({'Error':'provide query data with e.g. /?query={}'})\n\n # allow for selection based on result type\n thetype = q.pop('type',None)\n if thetype is not None:\n q['hb_taskname'] = q.get('hbtaskname',False) or tasks_that_make(thetype)\n\n rr = models.HBTask.objects.filter(status__gt=models.HBTask.NO_STATUS,**q)\n\n if rr:\n res = defaultdict(list)\n for r in rr:\n res[r.resulttype].append(r)\n\n res2 = {}\n for k in res.keys():\n res2[k] = [r.description for r in res[k]]\n else:\n res2 = None\n\n return JsonResponse( {'results':res2} )", "def json_friendly(self):", "def getResults():", "def to_json(self):\n return requests.get(self.__url).json()", "def output_result(self):\n output = {}\n output['draw'] = str(int(self.params['draw']))\n output['recordsTotal'] = str(self.cardinality)\n output['recordsFiltered'] = str(self.cardinality_filtered)\n if self.error:\n output['error'] = self.error\n return output\n\n output['data'] = self.results\n for k, v in self.yadcf_params:\n output[k] = v\n return output", "def to_json(self):\n pass", "def print_json(res, ctx):\n\n return json.dumps(res)", "def f1results():\n\n FIELDS = {'_id': False, }\n\n with MongoClient(MONGO_URI) as conn:\n collection = conn[DBS_NAME][COLLECTION_NAME]\n results = collection.find(projection=FIELDS)\n return json.dumps(list(results))", "def response(self):\n response = {}\n if self.stats is not None:\n response = self.stats\n\n return json.dumps(response)", "def json_out(self, data):\n\t\treturn json.dumps(data)", "def json(self):\n return {'id': self.id, 'name': self.name, 'description': self.description}", "def as_json(self):\n\n return {\n \"name\": self.name,\n \"summary\": self.summary.as_json(),\n \"cases\": [case.as_json() for case in self.cases]\n }", "def Results(self):\n return self.data", "def Results(self):\n return self.data", "def __format_results__(self, result_rows):\n for result in result_rows:\n self.return_dict['results']['items'].append(\n OrderedDict([\n ('count', result['count']),\n ('year', result['year']),\n ('country', {\n 'id': result['country_id'],\n 'link': self.request.route_url(\n 'api_v1_country_resource',\n api_version=self.api_version_string,\n url_name=result['country_url_name']\n ),\n },\n ),\n ])\n )", "def __format_results__(self, result_rows):\n for row in result_rows:\n self.return_dict['results']['items'].append(\n {\n 'name': row['name'],\n 'link': self.request.route_url(\n 'api_v1_disease_resource',\n api_version=self.api_version_string,\n url_name=row['id']\n ),\n 'search_link': self.request.route_url(\n 'api_v1_disease_search',\n api_version=self.api_version_string,\n url_name=row['id']\n ),\n 'information_link': row['info_link']\n }\n )", "def results(self, **kwargs):\n\t\ttry:\n\t\t\tdata = self.json(**kwargs)\n\t\texcept TypeError:\n\t\t\traise exceptions.InvalidIcinga2ApiResponseError()\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn tuple(data[\"results\"])\n\t\t\texcept KeyError:\n\t\t\t\treturn tuple()", "def serialize(self) -> dict:\n return {\n \"parameters\": self.parameters,\n \"results\": self.results,\n }", "def results(self):\n\n\t\tresults = {'answer':42}\n\n\t\treturn results", "def json(self):\r\n return {\"id\": self.id, \"code\": self.code, \"description\": self.description, \"xCoor\": self.x_coor, \"yCoor\": self.y_coor, \"latitude\": self.latitude,\r\n \"longitude\": self.longitude, \"waterschapId\": self.waterschap_id, \"watertypeId\": self.watertype_id, \"watertypeKrwId\": self.watertype_krw_id}", "def __format_results__(self, result_rows):\n for row in result_rows:\n self.return_dict['results']['items'].append(\n {\n 'name': row['name'],\n 'link': self.request.route_url(\n 'api_v1_country_resource',\n api_version=self.api_version_string,\n url_name=row['url_name']\n )\n }\n )", "def dict(self):\n\t\treturn self.json", "def json(self):\n return {\n 'id': self.id,\n 'name': self.name\n }", "def get_all_data():\n return jsonify(service.get_all_data())", "def json(self, update=False):\n return json.dumps(self.export(update=update), indent=4)", "def cat_results_react():\n\n cats = petfinder.search_data_map()\n cats = list(cats.values())\n\n return jsonify(cats)", "def results(self):\n pass", "def results(self):\r\n pass", "def to_multiple_jsons(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('multiple_jsons')\n else:\n self.output('multiple_jsons')", "def _convert_result_to_json(self, lines):\n if not lines:\n return {'status': 'Failed',\n 'msg': 'Final result is not available.'}\n\n lines = lines.split('\\n')\n n = len(lines)\n\n if n == 1:\n return self._result_to_dict(lines[0])\n\n return {'count': n,\n 'nodes': [self._result_to_dict(line) for line in lines]}", "def print_json(results, number, concurrency):\n import json\n stats = calc_stats(results, number, concurrency)\n print(json.dumps(stats))", "def get_json(self):\n return {\n \"power\": self.get_power(), \n \"timestamp\": self.get_timestamp(), \n \"shortage\": self.get_shortage()\n }", "def get_data(self):\n return self.data.to_json()", "def to_single_json(self):\n self.error_throw('output')\n \n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('single_json')\n else:\n self.output('single_json')", "def json(self):\n return self.__json", "def _format_query_response(self):\n output = self._initialize_response_output(self.parameters)\n output[\"data\"] = self.query_data\n\n self.query_sum = self._pack_data_object(self.query_sum, **self._mapper.PACK_DEFINITIONS)\n output[\"total\"] = self.query_sum\n\n if self._delta:\n output[\"delta\"] = self.query_delta\n\n return output", "def query():\n rows = []\n data = db.get()\n\n for calc in data:\n rows.append({\"ip\" : calc.ip, \"text\":calc.text})\n\n return jsonify(rows)", "def json_api():\n if 'category' in request.args:\n sqlsession = SQLSESSION()\n category = sqlsession.query(Category)\\\n .filter_by(name=request.args['category']).first()\n items = sqlsession.query(Item).filter_by(category_id=category.id)\\\n .all()\n return json.dumps({'category_id': category.id,\n 'category_name': category.name,\n 'items': [item.serialize() for item in items]})\n elif 'item' in request.args:\n sqlsession = SQLSESSION()\n items = sqlsession.query(Item).filter_by(name=request.args['item'])\\\n .all()\n return json.dumps([item.serialize() for item in items])\n sqlsession = SQLSESSION()\n categories = sqlsession.query(Category).all()\n items = sqlsession.query(Item).all()\n return json.dumps(\n {'categories': [cat.serialize() for cat in categories],\n 'items': [item.serialize() for item in items]})", "def results(self, period_start=None, period_end=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/results'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def json_out(self):\n temp_json = json.dumps(self.ecat_info, indent=4)\n print(temp_json)", "def generateJsonString(self) -> str:\n try:\n if self.lastResult is not None and len(self.lastResult) != 0:\n for result in self.lastResult:\n result['SAPMON_VERSION'] = PAYLOAD_VERSION\n result['PROVIDER_INSTANCE'] = self.providerInstance.name\n result['METADATA'] = self.providerInstance.metadata\n resultJsonString = json.dumps(\n self.lastResult, sort_keys=True, indent=4, cls=JsonEncoder)\n self.tracer.debug(\"[%s] resultJson=%s\" % (self.fullName,\n str(resultJsonString)))\n except Exception as e:\n self.tracer.error(\"[%s] Could not format lastResult=%s into JSON (%s)\", self.fullName,\n self.lastResult,\n e, exc_info=True)\n raise\n return resultJsonString", "def get_results(self):\n return self.result", "def to_json(self):\n return [\"population\", self.species_index, self.card_trade_index]", "def format_result(data, total_count=None, status_code=STATUS_CODE_SUCCESS, success=True):\n result = {\n \"data\": data,\n \"status_code\": status_code,\n \"success\": success\n }\n if total_count and total_count >= 0:\n result['total_count'] = total_count\n return jsonify(result)", "def get_saved_result(request):\n source_id = request.GET.get('source_id')\n data = []\n objs = ExtractedRelation.objects.filter(source=source_id)\n for i in objs:\n data.append({'sentence':i.sentence, 'head':i.head, 'tail':i.tail, 'pred_relation':i.pred_relation, 'pred_sentiment':i.sentiment, 'conf':i.conf})\n \n return HttpResponse(\n json.dumps({'data':data}),\n content_type=\"application/json\"\n )", "def get_json(self):\n url = 'http://lkd.to/api/' + self.user\n response = requests.get(url)\n return response.json()", "def index_json():\n\n response = views.get_feature_collection_metadata(config)\n\n return make_response(jsonify(response))", "def get_results(self):\n return self.results", "def get_results(self):\n return self.results", "def get_query_result(self):\n\n self.construct_query()\n\n uri = \"https://uts-ws.nlm.nih.gov\"\n content_endpoint = \"/rest/search/{0}?string={1}&sabs={2}&returnIdType={3}\".format(\n self.version, self.identifier, self.source, self.returntype)\n\n self.query = {'ticket':self.AuthClient.getst()}\n\n r = requests.get(uri+content_endpoint, params=self.query)\n\n items = json.loads(r.text)\n self.jsonData = items[\"result\"]\n\n #print(self.jsonData)\n\n ##uncomment the print statment if you want the raw json output, or you can just look at the documentation :=)\n #https://documentation.uts.nlm.nih.gov/rest/concept/index.html#sample-output\n #https://documentation.uts.nlm.nih.gov/rest/source-asserted-identifiers/index.html#sample-output\n #print (json.dumps(items, indent = 4))", "def text_json(request):\n query = str()\n\n if request.method == 'GET':\n query = request.GET.get('q')\n\n results = list()\n\n for c in search.tokenSearch(query):\n tmp = {'category':'課程代號','title':c.token}\n results.append(tmp)\n \n for c in search.zhNameSearch(query):\n tmp = {'category':'課程名稱','title':c.name_zh}\n results.append(tmp)\n\n \n for c in search.engNameSearch(query):\n tmp = {'category':'Course Name','title':c.name_eng}\n results.append(tmp)\n \n for t in Teacher.objects.filter(name_zh__icontains=query):\n tmp = {'category':'老師','title':t.name_zh}\n results.append(tmp)\n \n for d in Department.objects.filter(name_zh__icontains=query):\n tmp = {'category':'開課單位','title':d.name_zh}\n results.append(tmp)\n\n tmp = {'results':results}\n\n return HttpResponse(json.dumps(tmp))", "def to_json(self) -> JSON:\n pass", "def to_json(self):\n return json.dumps(self.for_json())", "def get_results(self, match_type, source) -> List[dict]:\n results = db.session.query(TestSearchResult).filter(\n TestSearchResult.search_id == self.id,\n TestSearchResult.match_type == match_type,\n TestSearchResult.source == source\n ).order_by(TestSearchResult.index.asc())\n\n results_json = []\n for result in results:\n results_json.append(result.json)\n return results_json", "def get(self):\n matches = Match.select()[:]\n\n if not matches:\n return 'No matches available!', 200\n\n result = { \"data\": [match.to_dict() for match in matches] }\n return result, 200", "def scorejson(request):\n team1 = Team.objects.filter(team_id='team1')\n team2 = Team.objects.filter(team_id='team2')\n score1 = Score.objects.filter(team__team_id='team1')\n score2 = Score.objects.filter(team__team_id='team2')\n data = {}\n score = Score.objects.all()\n if score:\n data['success']=1\n data['message']=\"Current Score Available\"\n data['score'] = []\n for i in range(len(score)):\n data['score'].append(\n {'score':score[i].score,\n 'team_name':score[i].team.team,\n 'team_id':score[i].team.team_id,\n })\n return JsonResponse(data)\n else:\n data['success']=0\n data['message']='no score available'\n return JsonResponse(data)", "def groups_json(request):\n resp = []\n group_list = ResearchGroup.objects.order_by('name')\n for group in group_list:\n resp.append({'name': group.name, 'id': group.id})\n return HttpResponse(json.dumps(resp, ensure_ascii=False), content_type=\"application/json; charset=utf-8\")", "def create_json(self, request, qs):\n\n j = Work.objects.get_dict(qs)\n\n response = JsonResponse(j, json_dumps_params={'indent': 4})\n name = '{}{}'.format(\n settings.PUBLISHER_CODE, datetime.now().toordinal())\n cd = 'attachment; filename=\"{}.json\"'.format(name)\n response['Content-Disposition'] = cd\n return response", "def get_json_results(result):\n if result.status_code == 200 and is_json(result.text):\n return json.loads(result.text)\n\n else:\n print(f\"The result code not successful. The error code is: {result.status_code}\")\n return False", "def get(cls):\n return {'products': [product.to_json() for product in ProductModel.find_all()]}", "def to_json(self):\n return json.dumps({\"data\": self._data.tolist(),\n \"header\": self._header.tolist(),\n \"dates\": self._dates.tolist()})", "def get_result(user_id):\n user = UserModel.query.get(user_id)\n results = user.results\n time_practiced = sum(r.time for r in results) if results else 0\n overall_wpm = (sum(r.wpm for r in results) / len(results)) if results else 0\n overall_acc = (sum(r.accuracy for r in results) / len(results)) if results else 0\n recent_wpm = results[-1].wpm if results else 0\n recent_acc = results[-1].accuracy if results else 0\n return jsonify(username=user.username,\n time_practiced=time_practiced,\n overall_wpm=overall_wpm,\n overall_acc=overall_acc,\n recent_acc=recent_acc,\n recent_wpm=recent_wpm), 200", "def json(self):\n robot_dict = self.robot_dict()\n target_dict = self.target_dict()\n json_str = '{'\n json_str = json_str + '\"robot_obj\" : ' + json.dumps(robot_dict) + \",\\n\"\n json_str = json_str + '\"target_obj\" : ' + json.dumps(target_dict) + \"\\n\"\n json_str = json_str + '}'\n return(json_str)", "def json_response(self, request, *args, **kwargs):\n\n return HttpResponse(self.construct_json(),\n content_type='application/json',\n mimetype='application/json', status=self.status)", "def get_stats_array_per_usecase(request):\n\n mode = request.GET.get('mode',None)\n usern = request.GET.get('member',request.session['username'])\n username = User.objects.get(username=usern, ns_id=mode)\n language = request.GET.get('language',request.session['language'])\n institute = request.GET.get('institute',request.session['institute'])\n batch = request.GET.get('batch',request.session['batch'])\n json_dict = {}\n js = {}\n js['original'] = {}\n js['percent'] = {}\n json_dict['medtag'] = get_array_per_usecase(username,mode,language,institute,batch)\n json_dict['pubmed'] = get_array_per_usecase_PUBMED(username,mode,language,institute,batch)\n\n\n # print(json_dict)\n return JsonResponse(json_dict)", "def jsonify_all(cls):\n return jsonify(accounts=[account.as_dict() for account in cls.query.all()])", "def get(self):\n return_status = None\n result = {}\n try:\n log.debug(\"Summary info : \")\n #get the payload to influx DB\n url = \"http://localhost:8086/query\"\n querystring = {\"pretty\": \"true\", \"db\": \"IOT\",\n \"q\": sql }\n response = requests.request(\"GET\", url, params=querystring)\n r_d=json.loads(response.text)\n result_d=[]\n for rec in r_d['results'][0]['series']:\n for element in rec['values']:\n temp_d={}\n temp_d.update(rec['tags'])\n temp_d.update(dict(zip(rec['columns'],element)))\n result_d.append(temp_d)\n result['status'] = 1\n result['message']=result_d\n return_status = 200\n except ValueError as e:\n result = {}\n log.exception('Value Exception while fetching aggregate data')\n result['status'] = 0\n return_status = 400\n result['message'] = e.args[0]\n except :\n result = {}\n log.exception('Exception while aggregating the data')\n return_status = 500\n result['status'] = 0\n result['message'] = 'Internal Error has occurred while fetching aggregate data'\n finally:\n resp = Response(json.dumps(result), status=return_status, mimetype=\"application/json\")\n return resp", "def GET(self, *args):\n if not args:\n self.logger.error('No arguments were given')\n return json.dumps({\"results\":{}})\n return json.dumps(self.get_single_new(args[0]))", "def prcp():\n \"\"\"Return the JSON representation of your dictionary.\"\"\"\n session = Session(engine)\n results_prcp = session.query(Measurement.date, Measurement.prcp).all()\n session.close()\n\n prcp_scores = []\n for p in prcp_query:\n my_dict = {}\n my_dict[prcp_query[0]] = prcp_query[1]\n prcp_scores.append(my_dict)\n\n return jsonify(prcp_scores)", "def as_json(self):\n # if we don't convert it to a dict we'll get a whole bunch of 'can't be serialized' things\n # match = self.__dict__\n # match.pop('_sa_instance_state', None)\n # for k in match:\n #\n # match['date'] = match['date'].isoformat()\n m = self.__dict__\n m['explosions'] = self.explosions.all()\n m['deaths'] = self.deaths.all()\n m['antagobjs'] = self.antagobjs.all()\n m['uplinkbuys'] = self.uplinkbuys.all()\n m['badassbuys'] = self.badassbuy.all()\n m['populationstats'] = self.populationstats.all()\n\n return dict_to_json(m)", "def wrap_results(response):\n if isinstance(response.data, list):\n return {\"results\": response.data}\n\n return response.data", "def json(self):\n return {\n '_id' : self._id,\n 'name' : self.name,\n 'description' : self.description,\n }", "def to_json(self):\n return None", "def toJSON(self):\r\n\r\n jsonToRet = []\r\n rowJson = []\r\n matrixJson = []\r\n\r\n if len(self.slctData) > 100:\r\n self.getSimMatSummary(100)\r\n jsonToRet.append(self.summaryOrdering)\r\n for i in range(0,len(self.simMatSmm)):\r\n for n in self.simMatSmm[i]:\r\n rowJson.append(n)\r\n matrixJson.append(rowJson)\r\n rowJson = []\r\n jsonToRet.append(matrixJson)\r\n\r\n jsonToRet.append(self.patchOrdering)\r\n # jsonToRet = []\r\n rowJson = []\r\n matrixJson = []\r\n\r\n for i in range(0,len(self.simMat)):\r\n for n in self.simMat[i]:\r\n rowJson.append(n)\r\n matrixJson.append(rowJson)\r\n rowJson = []\r\n jsonToRet.append(matrixJson)\r\n return jsonToRet", "def suggestion(request):\n #raw_data = None\n return_data = serializers.serialize('json', 0)\n return HttpResponse(return_data, mimeType='application/json')", "def tags_JSON(request):\n tags_as_json = serializers.serialize('json', Tag.objects.all())\n return HttpResponse(json.dumps(tags_as_json), content_type='json')", "def get_chart_one(request):\r\n json_str = []\r\n \r\n usuarios = Usuario.objects.all()\r\n for usuario in usuarios:\r\n peticiones = Peticion.objects.filter(usuario=usuario)\r\n json_str.append({ \r\n 'name': u'%s %s' % (usuario.persona.nombre,\r\n usuario.persona.apellidos),\r\n 'data': len(peticiones)\r\n }) \r\n json_obj = json.dumps(json_str, sort_keys=True, indent=4)\r\n response = HttpResponse(json_obj, mimetype=\"application/json\") \r\n return response", "def mongo_jsonify(query):\n return current_app.response_class(query.to_json(),\n mimetype='application/json')", "def index():\n\n return jsonify()", "def encode(self):\n return json.dumps(self.get_data(), indent=4)", "def get_chart_two(request):\r\n json_str = []\r\n \r\n usuarios = Usuario.objects.all()\r\n for usuario in usuarios:\r\n peticiones = CasoPrueba.objects.filter(usuario=usuario)\r\n total = CasoPrueba.objects.all()\r\n json_str.append({ \r\n 'name': u'%s %s' % (usuario.persona.nombre,\r\n usuario.persona.apellidos),\r\n 'data': len(peticiones),\r\n 'total': len(total)\r\n }) \r\n json_obj = json.dumps(json_str, sort_keys=True, indent=4)\r\n response = HttpResponse(json_obj, mimetype=\"application/json\") \r\n return response", "def get_data(request, *args, **kwargs):\n \n data = {\n \"sales\": 100,\n \"customers\": 10,\n }\n return JsonResponse(data) # http response" ]
[ "0.7852767", "0.7314439", "0.715554", "0.7062592", "0.7062298", "0.68037283", "0.6758764", "0.6735392", "0.66818523", "0.6601434", "0.6596625", "0.65821993", "0.6529765", "0.6528932", "0.6514097", "0.6506638", "0.6498062", "0.6485245", "0.64679945", "0.64450043", "0.64401627", "0.64366865", "0.64290035", "0.6424255", "0.641819", "0.63854563", "0.6384362", "0.6375571", "0.6325881", "0.63241434", "0.6313915", "0.6313915", "0.6303889", "0.6297818", "0.6285726", "0.6269618", "0.62612265", "0.6258941", "0.6256349", "0.62425053", "0.620866", "0.61944556", "0.61887795", "0.61850834", "0.61841655", "0.6156033", "0.61538285", "0.6150455", "0.61365503", "0.61224437", "0.6118196", "0.6117467", "0.6112627", "0.6109965", "0.61085266", "0.610297", "0.6097092", "0.6096595", "0.6096581", "0.6092886", "0.60910743", "0.6085522", "0.60848856", "0.6079239", "0.6076855", "0.6067738", "0.6067738", "0.605856", "0.60491544", "0.6047038", "0.60425794", "0.6039257", "0.6036704", "0.60308737", "0.60285974", "0.60222703", "0.6013552", "0.60135126", "0.60116", "0.6010661", "0.60106146", "0.6007354", "0.60024756", "0.60004014", "0.59987307", "0.59949815", "0.59926206", "0.59898126", "0.5987913", "0.59846264", "0.5979609", "0.59728956", "0.5969387", "0.59631306", "0.59571993", "0.5949431", "0.5947601", "0.59439", "0.5943417", "0.5936445" ]
0.7285609
2
r"""The initial call to start propagating messages.
def propagate(self, z_agg, edge_index, **kwargs): # assert aggr in ['add', 'mean', 'max'] # agg_list = self.search_space['agg'] kwargs['edge_index'] = edge_index size = None message_args = [] for arg in self.message_args: if arg[-2:] == '_i': # If arguments ends with _i then include indic tmp = kwargs[arg[:-2]] # Take the front part of the variable | Mostly it will be 'x', size = tmp.size(0) message_args.append(tmp[edge_index[0]]) # Lookup for head entities in edges elif arg[-2:] == '_j': tmp = kwargs[arg[:-2]] # tmp = kwargs['x'] size = tmp.size(0) message_args.append(tmp[edge_index[1]]) # Lookup for tail entities in edges else: message_args.append(kwargs[arg]) # Take things from kwargs update_args = [kwargs[arg] for arg in self.update_args] # Take update args from kwargs out = self.message(*message_args) # out = scatter_(z_agg_hard, self.search_space['agg'], out, edge_index[0], dim_size=size) out = scatter_(z_agg, self.search_space['agg'], out, edge_index[0], dim_size=size) out = self.update(out, *update_args) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\n if self._pumping:\n return\n self._pumping = True\n self._global_reactor.callLater(0, self._pump_once)", "def beginStep(self, message=''):\n if not self.initialized:\n self.start(message)", "def __enter__(self):\n print(self.msg)\n self.start = self()\n return self", "def _start(self):\n pass", "def start(self):\n self._msg_disp.start()\n self._msg_disp.process_message(DhsStart())", "def run_and_propagate(self):\n pass", "def starting(self):\n ident = self.ident()\n print('{} starting & consuming \"{}\".'.format(ident, self.to_consume))\n\n if self.max_tasks:\n print('{} will die after {} tasks.'.format(ident, self.max_tasks))\n else:\n print('{} will never die.'.format(ident))", "def start(self) -> None:\n logger.log(self.log_level, f'Start {self.name}...')\n self.started = True\n super().start()", "def startComponent(self):\n\n # create message service instance\n self.ms = MessageService()\n\n # register\n self.ms.registerAs(\"MergeAccountant\")\n\n # subscribe to messages\n self.ms.subscribeTo(\"MergeAccountant:StartDebug\")\n self.ms.subscribeTo(\"MergeAccountant:EndDebug\")\n self.ms.subscribeTo(\"MergeAccountant:Enable\")\n self.ms.subscribeTo(\"MergeAccountant:Disable\")\n self.ms.subscribeTo(\"JobSuccess\")\n self.ms.subscribeTo(\"GeneralJobFailure\")\n self.ms.subscribeTo(\"MergeAccountant:SetJobCleanupFlag\")\n\n # set trigger access for cleanup\n self.trigger = Trigger(self.ms)\n\n # set message service instance for PM interaction\n File.ms = self.ms\n\n # wait for messages\n while True:\n\n\n # get message\n messageType, payload = self.ms.get()\n self.ms.commit()\n # create session object\n Session.set_database(dbConfig)\n Session.connect()\n\n # start transaction\n Session.start_transaction()\n\n # process it\n self.__call__(messageType, payload)\n self.ms.commit()\n\n # commit and close session\n Session.commit_all()\n Session.close_all()", "def begin(self):\n self._logger.debug(\"Begin\")", "def init(self):\n self.dispatcher.start()\n self.replyer.start()", "def start():\n log(\"=========== hook: start ===========\")", "def start(self) -> None:\n ...", "def start(self) -> None:\n ...", "def start(self):\n ...", "def Start(self):\n for unused_i in range(0, self.args.message_count):\n self.CallClient(\n standard.ReadBuffer, offset=0, length=100, next_state=\"Process\")", "def start(self) -> None:\n self.__enter__()", "def _start(self):", "def start(self):\n self.sender.start()\n self.receiver.start()", "def start(self):\n self.sender.start()\n self.receiver.start()", "def start(self):\r\n pass", "def start (self):\n pass", "def start (self):\n pass", "def start(self):\n self._connect()\n self._init_exchange()\n self._init_queue()\n self._bind_queue()", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n raise NotImplementedError", "def start(self):\n raise NotImplementedError", "def start(self):\n raise NotImplementedError", "def pre_start_hook(self):\n\n LOG.debug(_('XManager pre_start_hook...'))\n\n pass", "def fstart(wrapper: MessageDispatcher, message: str):\n channels.Main.send(messages[\"fstart_success\"].format(wrapper.source))\n wrapper.target = channels.Main\n start(wrapper, forced=True)", "def start(self) -> None:", "def start(self) -> None:", "def begin(self):\n pass", "def start(self):\n\n def pubsub_thread():\n \"\"\" Call get_message in loop to fire _handler. \"\"\"\n\n while not self._stop.is_set():\n self._pubsub.get_message()\n sleep(0.01)\n\n # subscribe to personal channel and fire up the message handler\n self._pubsub.subscribe(**{'actor:%s' % self.uuid: self._handler})\n self._proc = Thread(target=pubsub_thread)\n self._proc.daemon = True\n self._proc.start()", "def start(self):\n self.reset()\n self.on_start()", "def Start(self) :\n\t\t...", "def start(self):\n raise NotImplementedError()", "def start(self):\n raise NotImplementedError()", "def start(self):\n raise NotImplementedError()", "def start(self):\n raise NotImplementedError()", "def pre_start(self) -> None:\n pass", "def starting(self) -> None:\n log_title(\"Start resolving requirements\")\n for req in self.requirements:\n stream.logger.info(\"\\t\" + req.as_line())", "def start(self):\n raise NotImplementedError(\"(%s).start\" % self)", "def start(self, annealer):\n # chain up\n super().start(annealer=annealer)\n # build a cooling step to hold the state of the problem\n self.step = self.CoolingStep.start(annealer=annealer)\n # all done\n return self", "def start(self):\n self.listener.start()\n # No need to start broadcaster, it just sends when necessary", "def started(self):", "def start(self):\n log.startLoggingWithObserver(self.emit, setStdout=0)", "def teleopInit(self):\n self.globalInit()\n self.teleop.start()", "def start(self):\n if self.debug:\n print(\"%s start\" % self.name)", "def run_simulation(self):\n print(\"# Starting propagation simulation using {} propagtion routine\".format(\n self.__class__.__name__))\n self.propagate()\n print(\"# Finished propagation simulation\")", "def start(self):\n super().start()\n loop = IOLoop.current()\n # Collect and send all IOPub messages, for all time\n # TODO: Check errors from this loop and restart as needed (or shut down the kernel)\n loop.add_callback(self.relay_iopub_messages)", "def start(self):\r\n if self._ready:\r\n return\r\n\r\n self._start()\r\n self._ready = True", "def start(self):\n self._state = 'Started'", "def start(self):\n\t\tself.stream.start_stream()", "def start(self, **kwargs):\n pass", "def start(self, **kwargs):\n pass", "def on_begin(self, args, kwargs):\n self.last_msg = datetime.datetime.utcnow()", "def start(self):\n self.parent.start(auto_terminate=False)\n self.started = True", "def run(self):\n self.started()", "def activate(self):\n self.start()", "def start(self):\n self.p.start()", "def do_start_messaging(self, *arg):\n print_info(\"Starting messaging\")\n\n # Send commands to POCS via this publisher\n try:\n self.cmd_publisher = PanMessaging.create_publisher(\n self.cmd_pub_port)\n print_info(\"Command publisher started on port {}\".format(\n self.cmd_pub_port))\n except Exception as e:\n print_warning(\"Can't start command publisher: {}\".format(e))\n\n try:\n self.cmd_subscriber = PanMessaging.create_subscriber(\n self.cmd_sub_port)\n print_info(\"Command subscriber started on port {}\".format(\n self.cmd_sub_port))\n except Exception as e:\n print_warning(\"Can't start command subscriber: {}\".format(e))\n\n # Receive messages from POCS via this subscriber\n try:\n self.msg_subscriber = PanMessaging.create_subscriber(\n self.msg_sub_port)\n print_info(\"Message subscriber started on port {}\".format(\n self.msg_sub_port))\n except Exception as e:\n print_warning(\"Can't start message subscriber: {}\".format(e))\n\n # Send messages to PAWS\n try:\n self.msg_publisher = PanMessaging.create_publisher(\n self.msg_pub_port)\n print_info(\"Message publisher started on port {}\".format(\n self.msg_pub_port))\n except Exception as e:\n print_warning(\"Can't start message publisher: {}\".format(e))", "def notify_started(self):\n raise NotImplementedError", "def start_publishing(self):\n print(f\"{self._connection_param}: Issuing consumer related RPC commands\")\n # self._channel.confirm_delivery(self.on_delivery_confirmation)\n self.schedule_next_message(self.SLOW_SEND)", "def start(self):\n\t\tif self._send_greenlet is None:\n\t\t\tself._send_greenlet = gevent.spawn(self._send_loop)", "def start(self):\n assert not self.state is CallState.finished\n\n self.state = CallState.started\n self.start_time = time()", "def start(self):\n if self._chan is not None:\n try:\n self._chan.start_consume()\n except ChannelError:\n log.info('Subscriber is already started')\n\n else:\n self.gl = spawn(self.listen)", "def start_procedure(self):\n pass", "def enter(self):\n # lets not immediately run the animation\n assert self.notify.debugStateCall(self)\n self.node.postFlatten()\n # for some reason phaseIvals must be created here, doesn't work in __init__\n self.createPhaseIntervals()\n AnimatedProp.AnimatedProp.enter(self)\n\n # make it look like the other props by forcing pose 0\n defaultAnim = self.node.getAnimControl('anim')\n numFrames = defaultAnim.getNumFrames()\n self.node.pose('phase0', 0)\n self.accept(\"%sZeroPhase\" % self.propString, self.handleNewPhase)\n self.accept(\"%sZeroIsRunning\" % self.propString, self.handleNewIsRunning)\n self.startIfNeeded()", "def start():", "def start():", "def start():", "def start():", "def start(self):\n # Start view\n # Since it can generate events, it doesn't need\n # to know about the Interactor instance\n self.view.start()\n # Finish coupling view events to presenter callbacks\n self.interactor.start(self, self.view)\n\n # Start model\n # Since it can't generate events, give it an Observer\n # instance to do so.\n self.model.start(self.observer)\n # Finish Coupling model events to presenter callbacks\n self.observer.start(self)\n\n # Derived Presenter will then start()", "def do_start(self):\n threading.Thread(group = None, \n target = self._subscribe_message, name = \"RabbitMQSubscribeThread\") .start()\n threading.Thread(group = None, \n target = self._publish_message, name = \"RabbitMQPublishThread\").start()", "def setupStarted(self, *args, **kwargs): # real signature unknown\n pass", "def start(self) -> None:\n cb = self._callback\n if cb is not None:\n self._callback = None\n propagate(from_=ensure_future(cb()), to=self._future)", "def start( *args, **kwargs ):", "def event_start(self, **kwargs):\n del kwargs\n self.start()", "def _init_start(self):\n def start(core, args):\n task = ' '.join(args.task) if args.task else ''\n return core.start(task=task)\n\n usage = 'stl start [task]'\n desc = (\n 'make a log that you are starting to work'\n )\n\n subp = self.subparsers.add_parser(\n 'start', usage=usage, description=desc, help=desc)\n\n subp.add_argument(\n 'task', nargs=argparse.REMAINDER,\n help='the task that you are about to start working on')\n\n subp.set_defaults(func=start)", "def _onStart(self, name):\n logging.debug(\"onStart...\")", "def start(self):\r\n start_thread(self._extract_thread_func, \"message sorter thread\")\r\n self.debug(\"### initialized stream sorter with %g s time window\"\r\n % (self.delay))", "def initial(self):\n self.update_panel_displays()\n yield 0\n #\n if self.options.initial_state:\n self.started = True\n self.nextState(getattr(self, self.options.initial_state)())\n else:\n self.nextState(self.start_screen())", "def test_Integrator_Propagator_full(annealing_steps=10):\n from qmlify.propagation import Propagator\n pdf_state, pdf_state_subset, integrator, ani_handler, atom_map, particle = propagator_testprep()\n\n propagator = Propagator(openmm_pdf_state = pdf_state,\n openmm_pdf_state_subset = pdf_state_subset,\n subset_indices_map = atom_map,\n integrator = integrator,\n ani_handler = ani_handler,\n context_cache=None,\n reassign_velocities=True,\n n_restart_attempts=0)\n\n particle_state, _return_dict = propagator.apply(particle.state, n_steps = annealing_steps, reset_integrator=True, apply_pdf_to_context=True)\n\n #assert that the iteration is equal to the total number of iterations\n assert propagator._iteration == propagator._n_iterations\n\n #the length of the state works must be the annealing step length + 1 since the first work is defaulted as 0.\n assert len(propagator.state_works[0]) == annealing_steps + 1\n\n #check to make sure that the particle state is maintained in memory\n assert particle_state == particle.state\n\n #the work should be negative\n assert propagator.state_works[0][-1] < 0.", "def BEGIN(self):\n raise self.SEND()", "def start_execution(self):\n self.send_message(\"control.start\",None)", "def begin(self):\n self.wakeup()", "def initialize(self):\n logger.debug(\"Begin Generation\")\n self.events.begin_generation()", "def start(self):\n self.j_pump.start()\n return self", "def start(self):\n\n # Start listening for records\n self._run_loop(True)\n # There might still be records in the queue.\n self._run_loop(False)", "def startRep(self, rep):\n \n pass", "def startLogging (self):\n self.isLogging = True\n self.startCallback ()", "def _fire(self):\r\n if not self._canceled:\r\n self.__call__(self, None)\r\n if not (self._canceled or self._one_shot):\r\n self._start()", "def start(self):\n self.events[0].record()\n self.cur = 1" ]
[ "0.6312824", "0.61891603", "0.606415", "0.60143113", "0.591316", "0.5910299", "0.5900974", "0.5852277", "0.5843216", "0.5824698", "0.58228225", "0.57953435", "0.5789157", "0.5789157", "0.57543075", "0.5754194", "0.5743585", "0.5737249", "0.5731807", "0.5731807", "0.57178223", "0.5709688", "0.5709688", "0.5709607", "0.57008106", "0.57008106", "0.57008106", "0.57008106", "0.57008106", "0.57008106", "0.57008106", "0.57008106", "0.56933993", "0.56933993", "0.56933993", "0.5685031", "0.5681007", "0.56767565", "0.56767565", "0.5638049", "0.5625256", "0.5620031", "0.5612811", "0.5595092", "0.5595092", "0.5595092", "0.5595092", "0.5587519", "0.5581424", "0.5577469", "0.5566691", "0.5565889", "0.55597085", "0.5556399", "0.55515885", "0.5527551", "0.55259436", "0.55163026", "0.550779", "0.5507098", "0.55034107", "0.5495736", "0.5495736", "0.54892975", "0.5489179", "0.54805076", "0.54778075", "0.54665095", "0.54662603", "0.5464592", "0.5463323", "0.54472446", "0.54292876", "0.54184824", "0.5414015", "0.54134816", "0.5394428", "0.5394428", "0.5394428", "0.5394428", "0.5394196", "0.5387789", "0.536351", "0.53604025", "0.5359454", "0.5352721", "0.53503597", "0.53494453", "0.53481877", "0.5346652", "0.53376794", "0.5327664", "0.5326944", "0.53160375", "0.5307037", "0.53002673", "0.5291418", "0.5289933", "0.5287688", "0.5282573", "0.52780604" ]
0.0
-1
r"""Updates node embeddings in analogy to
def update(self, aggr_out): # pragma: no cover return aggr_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_embeddings(self):", "def back_entities_embedding(self, entity):\n self.ent_embs.ent_embs.weight.data[entity] = self.source_entity", "def generate_embeddings_with_prev(self, old_emb, dims):\n self.embeddings = old_emb\n for node in self.nx_graph.nodes_iter():\n if self.nx_graph.degree(node) == 0:\n continue\n if node not in self.embeddings:\n nbr_vecs = []\n for nbr in self.nx_graph.neighbors(node):\n if nbr in self.embeddings:\n nbr_vecs.append(self.embeddings[nbr])\n\n if len(nbr_vecs):\n self.embeddings[node] = np.mean(nbr_vecs, axis=0)\n else:\n self.embeddings[node] = self._rand_vec(dims)", "def node_embedding(self, type):\n raise Exception(\" not implemented in base model\")", "def update_entity_embedding(self, entity, ims, mu):\n self.source_entity = self.ent_embs.ent_embs.weight.data[entity]\n self.ent_embs.ent_embs.weight.data[entity] = mu * self.source_entity + (1 - mu) * torch.mean(ims, dim=0)", "def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.pretrained_word_mat = tf.get_variable(\"word_emb_mat\",\n [self.vocab.word_size() - 2, self.vocab.word_embed_dim],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[2:],\n dtype=tf.float32),\n trainable=False)\n self.word_pad_unk_mat = tf.get_variable(\"word_unk_pad\",\n [2, self.pretrained_word_mat.get_shape()[1]],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[:2],\n dtype=tf.float32),\n trainable=True)\n\n self.word_mat = tf.concat([self.word_pad_unk_mat, self.pretrained_word_mat], axis=0)\n self.p_emb = tf.nn.embedding_lookup(self.word_mat, self.p)\n self.q_emb = tf.nn.embedding_lookup(self.word_mat, self.q)", "def set_embedding(self, embedding):\n assert self._embedding.weight.size() == embedding.size()\n self._embedding.weight.data.copy_(embedding)\n # self._embedding.weight.requires_grad = False", "def _update_embedding_param(self):\n for layer, ids in self._tls._unique_ids_all_layers.items():\n value = self._get_embedding_variable(layer).numpy()\n self._update_embedding_func(layer, ids, value)\n\n for slot in self._allowed_slot_names:\n value = self._get_slot_variable(layer, slot).numpy()\n slot_table_name = get_slot_table_name(layer, slot)\n self._update_embedding_func(slot_table_name, ids, value)", "def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.word_embeddings = tf.get_variable(\n 'word_embeddings',\n shape=(self.term_vocab.size(), self.term_vocab.embed_dim),\n initializer=tf.constant_initializer(self.term_vocab.embeddings),\n trainable=True\n )\n self.p_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.p)\n self.q_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.q)\n\n with tf.variable_scope('char_embedding'):\n self.char_embeddings = tf.get_variable(\n 'char_embeddings',\n shape=(self.char_vocab.size(), self.char_vocab.embed_dim),\n initializer=tf.constant_initializer(self.char_vocab.embeddings),\n trainable=True\n )\n self.p_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.p_char) # [batch, seqlen, max_char_num, embedding_size]\n self.q_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.q_char)\n\n self.p_char_emb = self.cnn_emb(self.p_char_emb, \"p_emb\")\n self.q_char_emb = self.cnn_emb(self.q_char_emb, \"q_emb\")\n '''\n self.p_char_emb = tf.reshape(self.p_char_emb, [-1, self.max_char_num, self.emb_size])\n self.q_char_emb = tf.reshape(self.q_char_emb, [-1, self.max_char_num, self.emb_size])\n\n self.p_char_emb = cnn_layer.conv(self.p_char_emb, self.emb_size,\n bias=True, activation=tf.nn.relu, kernel_size=5, name=\"char_conv\", reuse=None)\n self.q_char_emb = cnn_layer.conv(self.q_char_emb, self.emb_size,\n bias=True, activation=tf.nn.relu, kernel_size=5, name=\"char_conv\", reuse=True)\n\n self.p_char_emb = tf.reduce_max(self.p_char_emb, axis=1) # [batch*seqlen, 1, emb_size]\n self.q_char_emb = tf.reduce_max(self.q_char_emb, axis=1)\n\n batch_size = tf.shape(self.p_word_emb)[0]\n self.p_char_emb = tf.reshape(self.p_char_emb, [batch_size, -1, self.emb_size])\n self.q_char_emb = tf.reshape(self.q_char_emb, [batch_size, -1, self.emb_size])\n\n self.p_char_emb = tf.nn.dropout(self.p_char_emb, 0.95)\n self.q_char_emb = tf.nn.dropout(self.q_char_emb, 0.95)\n '''\n self.p_emb = tf.concat([self.p_word_emb, self.p_char_emb], -1)\n self.q_emb = tf.concat([self.q_word_emb, self.q_char_emb], -1)", "def add_embeddings(self):\n\n with tf.device('/cpu:0'):\n with tf.variable_scope('Embedding_Layer'):\n embeddings = tf.Variable(self.initial_embeddings,name = 'Embeddings')\n self.input_embeddings = tf.nn.embedding_lookup(embeddings, self.inputs_placeholder) #(N,S,D)\n self.question_embeddings = tf.nn.embedding_lookup(embeddings, self.questions_placeholder) #(N,S,D)", "def add_embedding(self):\n ### YOUR CODE HERE (~4-6 lines)\n embeddingTensor = tf.Variable(self.pretrained_embeddings)\n embeddings = tf.nn.embedding_lookup(embeddingTensor, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [-1, self.max_length, Config.n_features * Config.embed_size])\n ### END YOUR CODE\n return embeddings", "def update_knowledge(self):\n pass", "def update_weights(self):\n gained_vocab = len(self.wv.vocab) - len(self.wv.syn0)\n newsyn0 = empty((gained_vocab, self.vector_size), dtype=REAL)\n\n # randomize the remaining words\n for i in range(len(self.wv.syn0), len(self.wv.vocab)):\n # construct deterministic seed from word AND seed argument\n newsyn0[i - len(self.wv.syn0)] = self.seeded_vector(self.wv.index2word[i] + str(self.seed))\n\n # Raise an error if an online update is run before initial training on a corpus\n if not len(self.wv.syn0):\n raise RuntimeError(\n \"You cannot do an online vocabulary-update of a model which has no prior vocabulary. \"\n \"First build the vocabulary of your model with a corpus before doing an online update.\"\n )\n\n self.wv.syn0 = vstack([self.wv.syn0, newsyn0])\n\n if self.hs:\n self.syn1 = vstack([self.syn1, zeros((gained_vocab, self.layer1_size), dtype=REAL)])\n if self.negative:\n self.syn1neg = vstack([self.syn1neg, zeros((gained_vocab, self.layer1_size), dtype=REAL)])\n self.wv.syn0norm = None\n\n # do not suppress learning for already learned words\n self.syn0_lockf = ones(len(self.wv.vocab), dtype=REAL) # zeros suppress learning", "def update_embeddings(self, retriever):\n\n docs = self.get_all_documents()\n passages = [d.text for d in docs]\n logger.info(f\"Updating embeddings for {len(passages)} docs ...\")\n embeddings = retriever.embed_passages(passages)\n\n assert len(docs) == len(embeddings)\n\n doc_updates = []\n for doc, emb in zip(docs, embeddings):\n update = {\"_op_type\": \"update\",\n \"_index\": self.index,\n \"_id\": doc.id,\n \"doc\": {self.embedding_field: emb.tolist()},\n }\n doc_updates.append(update)\n\n bulk(self.client, doc_updates, request_timeout=300)", "def load_pretrained_embeddings(self, embeddings):\r\n self.embedding.weight = nn.Parameter(embeddings)", "def init_embedding(self):\n self.embedding.weight.data.uniform_(-1./self.num_embeddings, 1./self.num_embeddings)", "def _warm_cache(self):\n for word, index in self.word_to_index.items():\n self.embedding_layer.weight.data[index].copy_(torch.from_numpy(self.embedder.get_word_vector(word)))", "def update_embedding_layer(line_tokens, model, dictionary):\n\n global elmo_embedder\n\n out_of_corpus_vocab = [word for word in line_tokens if word not in dictionary.word2idx]\n if len(out_of_corpus_vocab) == 0:\n return\n\n print(\"OOV words found:\", out_of_corpus_vocab, file=sys.stderr)\n\n if model.using_pretrained == \"fasttext\":\n pretrained_emb_model = vc.FastText()\n elif model.using_pretrained == \"glove\":\n pretrained_emb_model = vc.GloVe()\n elif model.using_pretrained == \"elmo_top\" or model.using_pretrained == \"elmo_avg\":\n dirname = os.path.dirname(__file__)\n options_file = os.path.join(dirname, '../elmo-config/elmo_2x4096_512_2048cnn_2xhighway_options.json')\n weight_file = os.path.join(dirname, '../elmo-config/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5')\n # Retrieve cached version if already loaded, saves a lot of time if this code path is executed multiple times\n if elmo_embedder is None:\n elmo_embedder = ElmoEmbedder(options_file, weight_file)\n else:\n raise Exception(\"Unsupported embedding model:\", model.using_pretrained)\n\n print(\"Using Pretrained embedding:\", model.using_pretrained)\n\n if model.using_pretrained in ['fasttext', 'glove']:\n\n pretrained_vectors = pretrained_emb_model.vectors\n pretrained_stoi = pretrained_emb_model.stoi\n\n elif 'elmo' in model.using_pretrained:\n # We get the embeddings for all line_tokens which includes both known and unknown, if any.\n # As elmo is character level we can find embedding of any word\n reduced_vocab = [token for token in line_tokens if token not in\n [\"#\", \"<EOT>\", \"<EOL>\", \"</s>\", \"<eos>\", \"<P>\", \"<unk>\"]]\n pretrained_stoi = {v: k for k, v in enumerate(reduced_vocab)}\n elmo_embeddings = elmo_embedder.embed_sentence(reduced_vocab)\n if 'top' in model.using_pretrained:\n pretrained_vectors = elmo_embeddings[-1]\n elif 'avg' in model.using_pretrained:\n pretrained_vectors = np.average(elmo_embeddings, axis=0)\n pretrained_vectors = torch.from_numpy(pretrained_vectors)\n out_of_corpus_vocab = [word for word in line_tokens if word not in dictionary.word2idx and word in pretrained_stoi]\n\n if len(out_of_corpus_vocab) == 0:\n return\n\n # Update for only unknown/new word\n new_vectors = []\n for word in out_of_corpus_vocab:\n dictionary.add_word(word)\n new_vectors.append(pretrained_stoi[word])\n\n new_vectors = torch.index_select(pretrained_vectors, 0, torch.LongTensor(new_vectors))\n\n model.embedder = torch.nn.Embedding.from_pretrained(torch.cat([model.embedder.weight, new_vectors]))\n if model.tie_weights:\n model.decoder.weight = model.encoder.weight", "def init_emb(self):\n # Initialize users and items' embeddings\n nn.init.xavier_uniform_(self.user_embedding.weight)\n nn.init.xavier_uniform_(self.item_embedding.weight)", "def add_embedding(self, token, embedding):\n self.word2idx[token] = self.vocab_size\n self.vocab_size += 1\n\n self.embedding = np.vstack((self.embedding, embedding))", "def _update_node(node, value):\n node.N += 1\n node.W += value\n node.Q = node.W / node.N", "def load_pretrained_embeddings(self, embeddings):\n self.embedding.weight = nn.Parameter(embeddings)", "def add_embedding(self, prefix=''):\n with tf.variable_scope(prefix + 'embed'):\n if self.cfg.fix_emb:\n assert (hasattr(self.cfg, 'W_emb'))\n W_emb = pkl.load(open(self.cfg.W_emb_path, 'rb'))\n W = tf.get_variable('W', initializer= W_emb, trainable=True)\n print(\"iniitalize word embedding finished\")\n else:\n weightInit = tf.random_uniform_initializer(-0.001, 0.001)\n vocab = pkl.load(open(self.cfg.vocab_path, 'rb'))\n W = tf.get_variable('W', [len(vocab), self.cfg.emb_size], initializer=weightInit)\n if hasattr(self.cfg, 'relu_w') and self.cfg.relu_w:\n W = tf.nn.relu(W)\n return W", "def _index(self, corpus):\n\n # Transform documents to embeddings vectors\n ids, dimensions, stream = self.embedder.model.index(corpus)\n\n # Load streamed embeddings back to memory\n embeddings = np.empty((len(ids), dimensions), dtype=np.float32)\n with open(stream, \"rb\") as queue:\n for x in range(embeddings.shape[0]):\n embeddings[x] = pickle.load(queue)\n\n # Remove temporary file\n os.remove(stream)\n\n all_text = []\n for para_id, text, _ in corpus:\n all_text.append([text, para_id])\n\n df = pd.DataFrame(all_text, columns=[\"text\", \"paragraph_id\"])\n\n embedding_path = os.path.join(\n self.index_path, self.embed_paths[\"embeddings\"])\n dataframe_path = os.path.join(\n self.index_path, self.embed_paths[\"dataframe\"])\n ids_path = os.path.join(self.index_path, self.embed_paths[\"ids\"])\n\n # Load new data\n if os.path.isfile(embedding_path) and (self.encoder_args[\"overwrite\"] is False):\n logger.info(f\"Loading new data from {embedding_path}\")\n\n # Load existing embeddings\n old_embeddings = np.load(embedding_path) # LOAD EMBEDDINGS\n # Remove embeddings with document id overlaps\n embeddings = np.vstack((old_embeddings, embeddings))\n\n # load IDs\n old_ids = [doc_id[:-1] for doc_id in open_txt(ids_path)]\n logger.debug(f\"New ID Length = {len(ids)}\")\n logger.debug(f\"Old ID Length = {len(old_ids)}\")\n # Remove document ids overlaps\n logger.debug(f\"New ID Length = {len(ids)}\")\n ids = old_ids + ids\n logger.debug(f\"Merged ID Length = {len(ids)}\")\n\n # Append new dataframe\n old_df = pd.read_csv(dataframe_path)\n df = pd.concat([old_df, df])\n\n # Store embeddings and document index\n # for future reference\n np.save(embedding_path, embeddings)\n with open(ids_path, \"w\") as fp:\n fp.writelines([i + \"\\n\" for i in ids])\n\n # Save data csv\n df.to_csv(dataframe_path, index=False)\n\n # Normalize embeddings\n self.embedder.normalize(embeddings)\n\n # Save embeddings metadata\n self.embedder.config[\"ids\"] = ids\n self.embedder.config[\"dimensions\"] = dimensions\n\n # Create embeddings index\n logger.info(f\"Creating embeddings and index\")\n self.embedder.embeddings = ANN.create(self.embedder.config)\n logger.info(f\"Created embeddings\")\n\n # Build the index\n self.embedder.embeddings.index(embeddings)\n logger.info(f\"Built the embeddings index\")", "def set_tied(self):\n self.lm_head.set_embeddings_weights(self.transformer.wte.weight)", "def _embed(self):\n batch_size = tf.shape(self.p)[0]\n with tf.variable_scope(\"emb\"):\n with tf.variable_scope(\"char\"):\n pc_emb = tf.reshape(tf.nn.embedding_lookup(\n self.char_embed, self.pc), \n [batch_size * self.max_p_len, self.max_w_len, self.vocab.char_embed_dim])\n qc_emb = tf.reshape(tf.nn.embedding_lookup(\n self.char_embed, self.qc), \n [batch_size * self.max_q_len, self.max_w_len, self.vocab.char_embed_dim])\n cell_fw = tf.contrib.rnn.GRUCell(self.char_hidden_size)\n cell_bw = tf.contrib.rnn.GRUCell(self.char_hidden_size)\n _, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, pc_emb, self.pc_length, dtype=tf.float32)\n pc_emb = tf.concat([state_fw, state_bw], axis=1)\n _, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, qc_emb, self.qc_length, dtype=tf.float32)\n qc_emb = tf.concat([state_fw, state_bw], axis=1)\n pc_emb = tf.reshape(pc_emb, [batch_size, self.max_p_len, 2 * self.char_hidden_size])\n qc_emb = tf.reshape(qc_emb, [batch_size, self.max_q_len, 2 * self.char_hidden_size])\n\n with tf.name_scope(\"word\"):\n p_emb = tf.nn.embedding_lookup(self.word_embed, self.p)\n q_emb = tf.nn.embedding_lookup(self.word_embed, self.q)\n\n with tf.name_scope(\"pos\"):\n p_pos_emb = tf.nn.embedding_lookup(self.pos_embed, self.p_pos)\n q_pos_emb = tf.nn.embedding_lookup(self.pos_embed, self.q_pos)\n \n with tf.name_scope(\"em\"):\n sh = tf.shape(self.p_em)\n resh = [sh[0], sh[1], 1]\n p_em_feat = tf.reshape(tf.cast(self.p_em, dtype=tf.float32), shape=resh)\n\n self.p_emb = tf.concat([p_emb, pc_emb, p_pos_emb, p_em_feat], axis=2)\n self.q_emb = tf.concat([q_emb, qc_emb, q_pos_emb], axis=2)", "def class_weights_embedding():\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'class_weights_embedding_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n else:\n set_params(use_preproc_data=False)\n\n # define the changing parameter and its value\n changing_param_name = 'class_weights'\n changing_param_value = [{0: 1, 1: 2}, {0: 15, 1: 85}]\n # {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]#\n\n # set constant parameters\n set_params(use_word_emb=1)\n # save constant parameters to a new \"experiment_..\" file\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**(3))), \"KB\")\n\n # update the parameter value\n set_params(class_weights_1=value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n set_params(model_id = new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name, new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()\n\n set_params(preproc_data_id=new_model_id)", "def add_embedding(self):\n #with tf.variable_scope(\"RNN\", reuse = tf.AUTO_REUSE):\n embeddings = tf.get_variable(\"embeddings\", initializer = self.pretrained_embeddings,trainable=True)\n inputs = self.input_placeholder\n inputs = tf.reshape(inputs, [self.config.batch_size, -1 , self.config.n_features])\n embeddings = tf.nn.embedding_lookup(embeddings, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [self.config.batch_size, -1, self.config.n_features* self.config.embed_size])\n embeddings = tf.cast(embeddings, tf.float32)\n return embeddings", "def main():\n logging.basicConfig(level=logging.DEBUG)\n custom_embedding = True\n\n # Download embeddings'\n if custom_embedding:\n embedding_path = '../data/custom_embedding.pkl'\n embedding_index_path = '../data/custom_vocab_index.pkl'\n logging.info('Pulling custom embedding from: {}, and custom vocab from: {}'.format(embedding_path, embedding_index_path))\n embedding_matrix = pickle.load(open(embedding_path, 'rb'))\n embedding_index_lookup = pickle.load(open(embedding_index_path, 'rb'))\n\n else:\n logging.warning('Downloading embedding. If downloading for the first time, this make take 5-10 minutes.')\n embedding_url = 'https://s3.amazonaws.com/dl4j-distribution/GoogleNews-vectors-negative300.bin.gz'\n embedding_path = '~/nlp_example/'\n embedding_filename = 'GoogleNews-vectors-negative300.bin.gz'\n lib.download_file(embedding_url, embedding_path, embedding_filename)\n\n # Unpack embedding\n model = gensim.models.KeyedVectors.load_word2vec_format(embedding_path + '/' + embedding_filename, binary=True)\n embedding_matrix = model.syn0\n embedding_index_lookup = dict([(k, v.index) for k, v in model.vocab.items()])\n\n # Create thesaurus\n thesaurus = Thesaurus(embedding_matrix, embedding_index_lookup)\n\n # Find nearest neighbors for examples\n print(thesaurus.synonyms('day'))\n print(thesaurus.synonyms('top'))\n print(thesaurus.synonyms('bottom'))\n print(thesaurus.synonyms('cat'))\n print(thesaurus.synonyms('grown'))\n\n\n pass", "def embedding_layer(self):\n with tf.name_scope(\"Embedding_Layer\"):\n V_size = len(self.vocab)\n embed_dim = len(self.embed[0]) \n W_embed_ = tf.get_variable(\"W_embed\",shape=[V_size, embed_dim],trainable=False).assign(np.asarray(self.embed))\n W_analogy_embed_ = tf.get_variable(\"W_analogy_embed\",shape=[V_size, embed_dim],trainable=True,initializer=tf.random_uniform_initializer(minval=-1,maxval=1))\n return W_embed_, W_analogy_embed_", "def _modify_updates(self, updates):\n\n if self.max_kernel_norm is not None:\n W, = self.transformer.get_params()\n if W in updates:\n updated_W = updates[W]\n row_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=(0, 1, 2)))\n desired_norms = T.clip(row_norms, 0, self.max_kernel_norm)\n scales = desired_norms / (1e-7 + row_norms)\n updates[W] = (updated_W * scales.dimshuffle('x', 'x', 'x', 0))", "def embedding_updater_model(variables, rank,\n n_slots,\n init_params=None,\n n_ents=None,\n init_noise=0.0,\n loss=total_loss_logistic,\n scoring=multilinear,\n reg=0.0):\n qc, yc, wc, q, y, local_voc = variables\n n_data = y.get_shape()[0].value\n # model definition\n # initialization\n if init_params is not None:\n emb0_val = init_params[0]\n if len(init_params) > 1:\n step_size = init_params[1]\n else:\n step_size = 1.0\n emb0_val += np.random.randn(n_ents, rank) * init_noise\n else:\n emb0_val = np.random.randn(n_ents, rank)\n step_size = 1.0\n\n emb0 = tf.Variable(np.array(emb0_val, dtype=np.float32))\n if local_voc is not None:\n emb0 = tf.gather(emb0, local_voc)\n\n # emb0 = tf.tile(tf.reshape(tf.Variable(np.array(emb0_val, dtype=np.float32)), (1, n_ents, rank)), (n_data, 1, 1))\n\n # reading and answering steps\n emb1 = reader(emb0=emb0, step_size=step_size, context=(qc, yc), weights=wc, n_slots=n_slots,\n loss_grad=loss_quadratic_grad)\n pred = answerer(emb1, q, scoring=scoring)\n objective = loss(pred, y)\n if reg > 0:\n objective += reg * tf.nn.l2_loss(emb0)\n\n return objective, pred, y", "def pretrained_embedding_layer(model,model2,model3, word_to_index,emb_dim_max):\n words_ignored = []\n vocab_len = len(word_to_index) + 1 \n emb_matrix = np.zeros([vocab_len,emb_dim_max])\n \n print(' Total words would be processed : '+str(vocab_len))\n for word, idx in word_to_index.items():\n if word in model:\n emb_matrix[idx,:200] = model[word]\n emb_matrix[idx,200:] = 0\n if word in model2:\n emb_matrix[idx, :100] = model2[word]\n emb_matrix[idx, 100:] = 0\n if word in model3.keys():\n emb_matrix[idx,:] = model3[word]\n else:\n words_ignored.append(word)\n print(str(len(words_ignored))+\" words ignored\")\n print(emb_matrix.shape) \n \n \n embedding_layer = Embedding(vocab_len,emb_dim_max,trainable = True)\n \n # Build the embedding layer, it is required before setting the weights of the embedding layer. \n embedding_layer.build((None,)) # Do not modify the \"None\". This line of code is complete as-is.\n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n \n return embedding_layer,words_ignored", "def findOptimalEmbedding(tree, embedding_dim=2, lr=1e-3, num_updates=5000):\n num_vertices = len(tree) \n \n # initialize euclidean embedding tensor ~ uniform distribution in range [0, 1)\n euclid_embedding = Variable(torch.rand(num_vertices, embedding_dim).type(torch.FloatTensor), requires_grad=True)\n \n # initialize euclidean embedding tensor ~ uniform distribution in range [0, 0.1)\n hyp_embedding = Variable(torch.div(torch.rand(num_vertices, embedding_dim), 10).type(torch.FloatTensor), requires_grad=True)\n \n print('Finding Optimal Embedding with dim = %i, lr = %f, total number of updates = %i' %(embedding_dim, lr, num_updates))\n for t in range(num_updates):\n \n # l2_loss function is the sum of l2 norm (no sqrt) between the space distance and tree distance \n euclid_loss = l2_loss(euclid_embedding, tree, euclid_dist)\n hyp_loss = l2_loss(hyp_embedding, tree, hyp_dist)\n \n # print out loss in console\n sys.stdout.write('\\r' + ('%i: euclid loss = %f, hyperbolic loss = %f' % (t, euclid_loss.data[0], hyp_loss.data[0])))\n sys.stdout.flush() \n \n # using autograd, get gradients for embedding tensors\n euclid_loss.backward()\n hyp_loss.backward()\n \n # Update weights using gradient descent\n euclid_embedding.data -= lr * euclid_embedding.grad.data\n hyp_embedding.data -= lr *inverse_metric_tensor(hyp_embedding)*hyp_embedding.grad.data\n\n # Manually zero the gradients after updating weights\n euclid_embedding.grad.data.zero_()\n hyp_embedding.grad.data.zero_() \n \n print('\\n finished optimization!')\n np.save('euclid_embedding.npy', euclid_embedding.data.numpy())\n np.save('hyp_embedding.npy', hyp_embedding.data.numpy())\n print('Saved Euclidean embedding to euclidean_embedding.npy and hyperbolic embedding to hyp_embedding.npy !')", "def prepare_emb(self):\n with tf.variable_scope(\"PrepEmb\", reuse=tf.AUTO_REUSE):\n self.src_ten = tf.cast(tf.convert_to_tensor(self.src_ten), tf.float32)\n self.tgt_ten = tf.cast(tf.convert_to_tensor(self.tgt_ten), tf.float32)\n # Mapping\n self.src_ten = tf.matmul(self.src_ten, self.W)\n # Normalization\n self.src_ten = tf.nn.l2_normalize(self.src_ten, axis=1)\n self.tgt_ten = tf.nn.l2_normalize(self.tgt_ten, axis=1)", "def update(self, X_query, y_query, X_query_embedded):\n #print(\"X_query.shape: {}\".format(X_query.shape))\n #print(\"y_query.shape: {}\".format(y_query.shape))\n\n # add new rows of data\n self.X.append(X_query)\n self.y = torch.cat([self.y, torch.Tensor([[y_query]])], axis=0)\n\n # Append to (n x d_embedding) Tensor\n self.X_embedded = torch.cat([self.X_embedded.float(),\n X_query_embedded.float()],\n dim=0)\n\n #print(\"self.X_embedded.shape: {}\".format(self.X_embedded.shape))\n #print(\"self.y.shape: {}\".format(self.y.shape))\n self.model = get_fitted_model(self.X_embedded.float(),\n self.y.float())", "def update_node(self, node):\n return node.update()", "def axiom_embedding(self, axioms):\n raise NotImplementedError('Use a derived model')", "def load_pretrained_embedding(self, pre_embeddings):\n assert (pre_embeddings.size()[1] == self.embedding_dim)\n self.word_embeds.weight = nn.Parameter(pre_embeddings)", "def self_attention(target, source_neighbors, embed, embed_unsigned, new2old, type=\"distance\"):\r\n if target in source_neighbors:\r\n raise Exception(\"Known (source, target) relation cannot be used, it's cheating!\")\r\n weight = {}\r\n embed_result = np.zeros(shape=(embed.shape[1]))\r\n target_old = int(new2old[target][\"id\"])\r\n total_weight = 0\r\n for n in source_neighbors:\r\n n_old = int(new2old[n][\"id\"])\r\n if type == \"distance\":\r\n distance = np.linalg.norm(embed_unsigned[target_old, :] - embed_unsigned[n_old, :])\r\n weight[n] = np.exp(-distance)\r\n elif type == \"product\":\r\n product = np.inner(embed_unsigned[target_old, :], embed_unsigned[n_old, :])\r\n weight[n] = np.exp(product)\r\n total_weight += weight[n]\r\n # normalize weight of neighbors to sum() = 1\r\n for n in source_neighbors:\r\n embed_result += (weight[n] / total_weight) * embed[int(n), :]\r\n return embed_result", "def set_embeddings(self, embeddings: torch.Tensor, fine_tune: bool = True) -> None:\n if embeddings is None:\n # initialize embedding layer with the uniform distribution\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n else:\n # initialize embedding layer with pre-trained embeddings\n self.embedding.weight = nn.Parameter(embeddings, requires_grad=fine_tune)", "def update_from_tags():\n tags.update_diagrams()\n tags.update_tiles()", "def _add_pre_trained_embedding(self):\n\n if self.embedding_type['type'] == 'glove':\n self.logging.info('use pre-trained glove word2vec')\n # a. load pre trained glove\n GLOVE_DIR = '../data/glove_pretrained/glove.6B'\n glove_suffix_name = 'glove.6B.' + str(self.embedding_size) + 'd.txt'\n import os\n import numpy as np\n\n embeddings_index = {}\n f = open(os.path.join(GLOVE_DIR, glove_suffix_name)) # 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n self.logging.info('')\n self.logging.info('Found %s word vectors.' % len(embeddings_index))\n\n # b. compute embedding matrix\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector # words not found in embedding index will be all-zeros.\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt) + ' / ' + str(len(self.word_index)))\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n\n elif self.embedding_type['type'] == 'gensim':\n self.logging.info('use pre-trained gensim word2vec')\n\n import gzip\n import gensim\n from keras.layers import Embedding\n import numpy as np\n\n # fname = '../data/word2vec_pretrained/motors/d_300_k_712904_w_6_e_60_v_motors'\n # fname = '../data/word2vec_pretrained/fashion/d_300_k_1341062_w_6_e_70_v_fashion'\n\n self.logging.info('load word2vec path: ' + str(self.embedding_type['path']))\n model = gensim.models.Word2Vec.load(self.embedding_type['path'])\n pretrained_weights = model.wv.syn0\n vocab_size, vector_dim = pretrained_weights.shape\n\n method = 3\n if method == 1:\n self.logging.info('word2vec attempt to fit into embedding layer - middle complex')\n # convert the wv word vectors into a numpy matrix that is suitable for insertion\n # into our TensorFlow and Keras models\n\n embedding_matrix = np.zeros((len(model.wv.vocab), vector_dim))\n for i in range(len(model.wv.vocab)):\n embedding_vector = model.wv[model.wv.index2word[i]]\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n embedding_layer = Embedding(input_dim=embedding_matrix.shape[0],\n output_dim=embedding_matrix.shape[1],\n # input_length=self.maxlen,\n weights=[embedding_matrix],\n trainable=False)\n elif method == 2:\n self.logging.info('word2vec simple embedding matching - simple complex')\n embedding_layer = Embedding(input_dim=vocab_size,\n output_dim=vector_dim,\n input_length=self.maxlen,\n weights=[pretrained_weights],\n trainable=False)\n elif method == 3:\n\n self.logging.info('word2vec match using word_index from keras tokenizer - as used in glove match above')\n # b. compute embedding matrix\n\n # sd = 1 / np.sqrt(len(self.word_index) + 1)\n # embedding_matrix = np.random.normal(0, scale=sd, size=(len(self.word_index) + 1, self.embedding_size))\n\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n if word in model.wv:\n embedding_vector = model.wv[word]\n embedding_matrix[i] = embedding_vector\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt))\n\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n else:\n raise ValueError('unknown method value')\n\n else:\n raise ValueError('unknown embedding type')\n self.logging.info('create glove pre-trained embedding: ' + str(self.embedding_size))\n return embedding_layer", "def embed(self, x):\n if self.embedding is None:\n return x\n else:\n return self.embedding(x)", "def reset_weights(self):\n np.random.seed(self.seed)\n self.node_embedding = xavier_normal(size=(self.vocab_size, self.layer1_size), as_type=np.float32)\n self.context_embedding = xavier_normal(size=(self.vocab_size, self.layer1_size), as_type=np.float32)\n\n\n self.centroid = np.zeros((self.k, self.layer1_size), dtype=np.float32)\n self.covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.inv_covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.pi = np.zeros((self.vocab_size, self.k), dtype=np.float32)", "def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n \n vocab_size = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n any_word = list(word_to_vec_map.keys())[0]\n emb_dim = word_to_vec_map[any_word].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n \n ### START CODE HERE ###\n # Step 1\n # Initialize the embedding matrix as a numpy array of zeros.\n # See instructions above to choose the correct shape.\n emb_matrix = np.zeros((vocab_size, emb_dim))\n \n # Step 2\n # Set each row \"idx\" of the embedding matrix to be \n # the word vector representation of the idx'th word of the vocabulary\n for word, idx in word_to_index.items():\n emb_matrix[idx, :] = word_to_vec_map[word]\n\n # Step 3\n # Define Keras embedding layer with the correct input and output sizes\n # Make it non-trainable.\n embedding_layer = tensorflow.keras.layers.Embedding(input_dim = vocab_size, output_dim = emb_dim, trainable = False)\n ### END CODE HERE ###\n\n # Step 4 (already done for you; please do not modify)\n # Build the embedding layer, it is required before setting the weights of the embedding layer. \n embedding_layer.build((None,)) # Do not modify the \"None\". This line of code is complete as-is.\n \n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n \n return embedding_layer", "def link(self, input):\n self.input = input\n self.output = self.embeddings[self.input]\n return self.output", "def edge_embedding(self, type):\n raise Exception(\" not implemented in base model\")", "def update_nodes(self):\n raise NotImplementedError('ERROR: sweeper has to implement update_nodes(self)')", "def __init__(self, num_words, embedding_size, use_cuda):\n super(StandardEmbedding, self).__init__()\n self.embedding_size = embedding_size\n self.num_hash_functions = 0\n self.embeddings = nn.Embedding(num_words, embedding_size)\n self.embeddings = self.embeddings.cuda() if use_cuda else self.embeddings", "def add_word_embedding_op(self):\n if self.pos:\n print(\"adding pos embeddings\")\n with tf.variable_scope(\"pos\"):\n _pos_embeddings = tf.Variable(self.pos_embeddings,\n name=\"la_pos_embeddings\",\n dtype=tf.float32, trainable=False)\n pos_embeddings = tf.nn.embedding_lookup(_pos_embeddings, self.pos_ids,\n name=\"pos_embeddings\")\n self.pos_vecs = pos_embeddings\n print(\"adding word_embeddings\")\n with tf.variable_scope(\"words\"):\n _word_embeddings = tf.Variable(self.embeddings, name=\"_word_embeddings\",\n dtype=tf.float32, trainable=False)\n word_embeddings = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids,\n name=\"word_embeddings\")\n if self.use_window:\n print(\"Concatenating word vectors of context words\")\n word_embeddings_sl = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids_sl,\n name=\"word_embeddings_sl\")\n word_embeddings_sr = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids_sr,\n name=\"word_embeddings_sr\")\n word_embeddings = tf.concat([word_embeddings_sr, word_embeddings,\n word_embeddings_sl], axis=-1)\n if self.use_char_embeddings:\n print(\"adding CNN for char embeddings\")\n with tf.variable_scope(\"chars\"):\n _char_embeddings = tf.get_variable(name=\"_char_embeddings\",\n dtype=tf.float32,\n shape=[self.char_count, \n self.c_dim_input])\n char_embeddings = tf.nn.embedding_lookup(_char_embeddings, \n self.char_ids, \n name=\"char_embeddings\")\n s = char_embeddings.shape\n # the shape of our char_embeddings is now (batch_size, max number of words\n # in each sentence, max number of chars in each word, self.c_dim )\n char_filter = tf.get_variable(\"char_filter\", dtype=tf.float32,\n shape=[self.c_filter_width, \n self.c_filter_height,\n self.c_dim_input,\n self.c_dim_output])\n print(\"adding 2d convolution layer\")\n char_conv_layer = tf.nn.conv2d(char_embeddings, char_filter, \n strides=[1, 1, 1, 1], \n padding=\"SAME\")\n char_conv_layer = tf.nn.tanh(char_conv_layer)\n print(\"adding 2d pooling layer\")\n char_conv_layer = tf.layers.max_pooling2d(char_conv_layer, \n 1, \n strides=1)\n char_output = tf.reshape(char_conv_layer, shape=[-1, self.max_len, \n self.max_word_length*\n self.c_dim_output])\n word_embeddings = tf.concat([word_embeddings, char_output], axis=-1)\n if self.pos and self.concat_pos:\n print(\"concatenating pos with word_embeddings\")\n word_embeddings = tf.concat([word_embeddings, pos_embeddings], axis=-1)\n self.word_embeddings = word_embeddings\n if self.use_additional and self.hybrid:\n print(\"using additional embeddings\")\n _word_embeddings_2 = tf.Variable(self.additional_embeddings,\n name=\"two_word_embeddings\",\n dtype=tf.float32, trainable=False)\n word_embeddings_2 = tf.nn.embedding_lookup(_word_embeddings_2,\n self.word_ids,\n name=\"two_word_embeddings\")\n self.word_embeddings_2 = word_embeddings_2", "def _embed_result(self, embedding):\n # project original embedding\n project_weight = self.project.weight # (o, c)\n project_embedding = embedding.permute(0, 2, 1).unsqueeze(-1) \\\n * project_weight.permute(1, 0) # (n, e, c, 1) * (c, o) -> (n, e, c, o)\n project_embedding = project_embedding.permute(0, 3, 2, 1) # (n, o, c, e)\n # interaction\n square_of_sum = torch.sum(project_embedding, dim=2) ** 2\n sum_of_square = torch.sum(project_embedding ** 2, dim=2)\n embed_result = 0.5 * (square_of_sum - sum_of_square).sum(dim=2)\n return embed_result", "def init_word_embed(config):\n embedding_mat_val = np.load(config.wordembed_params)\n with tf.variable_scope('vc'):\n with tf.variable_scope('lstm', reuse=True):\n embedding_mat = tf.get_variable(\"embedding_mat\", [config.num_vocab, config.embed_dim])\n init_we = tf.assign(embedding_mat, embedding_mat_val)\n return [init_we]", "def _add_seq2seq(self):\n hps = self._hps\n vsize = self._vocab.size() # size of the vocabulary\n \n with tf.variable_scope('seq2seq'):\n # Some initializers\n self.rand_unif_init = tf.random_uniform_initializer(-hps.rand_unif_init_mag, hps.rand_unif_init_mag, seed=123)\n self.trunc_norm_init = tf.truncated_normal_initializer(stddev=hps.trunc_norm_init_std)\n\n\n with tf.variable_scope('embedding'):\n if hps.pretrained_embeddings:\n word2vec = load_embeddings(hps.embeddings_path, self._vocab.word2id, hps.rand_unif_init_mag)\n self.embedding = tf.get_variable('embedding', [vsize, hps.emb_dim],\n dtype=tf.float32, initializer=tf.constant_initializer(word2vec))\n # self.assign_embedding = tf.assign(self.embedding, word2vec)\n else:\n self.embedding = tf.get_variable('embedding', [vsize, hps.emb_dim],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n if hps.mode==\"train\": self._add_emb_vis(self.embedding) # add to tensorboard\n\n # tensor with shape (batch_size, max_enc_steps, emb_size)\n emb_enc_inputs = tf.nn.embedding_lookup(self.embedding, self._enc_batch)\n if self._hps.hier:\n enc_batch_sections = tf.unstack(self._enc_batch_sections, axis=1)\n sec_emb_enc_inputs = [tf.nn.embedding_lookup(self.embedding, section)\n for section in enc_batch_sections]\n # list length max_dec_steps containing shape (batch_size, emb_size)\n emb_dec_inputs = [tf.nn.embedding_lookup(self.embedding, x)\n for x in tf.unstack(self._dec_batch, axis=1)]\n\n\n # Hierarchical attention model\n if self._hps.hier:\n with tf.variable_scope('encoder'), tf.device(self._next_device()):\n sec_enc_outs = []\n states_fw = []\n states_bw = []\n states = []\n\n # level 1, encode words to sections\n with tf.variable_scope(\"word_level_encoder\", reuse=tf.AUTO_REUSE) as scope:\n encoder_outputs_words = []\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n fw_st, bw_st = None, None\n if self._hps.use_do: # DropOut\n cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=1.0 - self._hps.do_prob)\n for i in range(self._hps.num_sections):\n encoder_tmp_output, (fw_st, bw_st) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, inputs=sec_emb_enc_inputs[i], dtype=tf.float32,\n sequence_length=self._batch_sections_len[:,i], swap_memory=True, initial_state_bw=bw_st, initial_state_fw=fw_st)\n # concatenate the forwards and backwards states\n encoder_tmp_output = tf.concat(axis=2, values=encoder_tmp_output) #shape=[batch x seq_len x hidden_size]\n \n encoder_outputs_words.append(encoder_tmp_output)\n # instead of concating the fw and bw states, we use a ff network\n combined_state = self._reduce_states(fw_st, bw_st)\n states.append(combined_state)\n scope.reuse_variables()\n \n # level 2, encode sections to doc\n encoder_outputs_words = tf.stack(encoder_outputs_words, axis=1) # shape [batch x num_sections x seq_len x hidden_size]\n shapes = encoder_outputs_words.shape\n encoder_outputs_words = tf.reshape(encoder_outputs_words, (shapes[0].value, -1, shapes[-1].value)) #shape=[batch x (seq_len * num_sections) x hidden_size]\n\n doc_sections_h = tf.stack([s.h for s in states], axis=1) # [batch x num_sections x hidden_size]\n doc_sections_c = tf.stack([s.c for s in states], axis=1) # [batch x num_sections x hidden_size]\n\n with tf.variable_scope(\"section_level_encoder\"):\n if FLAGS.section_level_encoder == 'RNN':\n cell_fw_1 = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw_1 = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n if self._hps.use_do:\n cell_fw_1 = tf.contrib.rnn.DropoutWrapper(cell_fw_1, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw_1 = tf.contrib.rnn.DropoutWrapper(cell_bw_1, output_keep_prob=1.0 - self._hps.do_prob)\n encoder_output_sections, (fw_st_2, bw_st_2) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw_1, cell_bw_1, inputs=doc_sections_h, sequence_length=self._doc_sec_lens, dtype=tf.float32, swap_memory=True)\n encoder_output_sections = tf.concat(axis=2, values=encoder_output_sections)\n doc_sections_state = self._reduce_states(fw_st_2, bw_st_2)\n else:\n if FLAGS.section_level_encoder == 'AVG': # average section cells\n doc_sections_state_h = tf.reduce_mean(doc_sections_h, axis=1)\n doc_sections_state_c = tf.reduce_mean(doc_sections_c, axis=1)\n elif FLAGS.section_level_encoder == 'FF': # use a feedforward network to combine section cells\n doc_sections_state_h = tf.reshape([doc_sections_h.shape[0].eval(), -1])\n doc_sections_state_h = tf.layers.dense(\n inputs=doc_sections_state_h,\n units=self._hps.hidden,\n activation=tf.nn.relu) \n doc_sections_state_c = tf.reshape([doc_sections_c.shape[0].eval(), -1])\n doc_sections_state_c = tf.layers.dense(\n inputs=doc_sections_state_c,\n units=self._hps.hidden,\n activation=tf.nn.relu)\n else:\n raise AttributeError('FLAGS.section_level_encoder={} is not a valid option'.format(FLAGS.section_level_encoder))\n doc_sections_state = tf.contrib.rnn.LSTMStateTuple(doc_sections_state_c, doc_sections_state_h)\n encoder_output_sections = doc_sections_h \n \n elif not self._hps.multi_layer_encoder:\n with tf.variable_scope('encoder'):\n with tf.variable_scope('word_level_encoder'):\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n (encoder_outputs, (fw_st, bw_st)) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs=emb_enc_inputs, dtype=tf.float32, sequence_length=self._enc_lens, swap_memory=True)\n # concatenate the forwards and backwards states\n encoder_outputs = tf.concat(axis=2, values=encoder_outputs)\n \n # stack n layers of lstms for encoder\n elif self._hps.multi_layer_encoder:\n # TODO: check\n for layer_i in xrange(self._hps.enc_layers):\n with tf.variable_scope('encoder%d'%layer_i), tf.device(\n self._next_device()):\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n if self._hps.use_do: # add dropout\n cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=1.0 - self._hps.do_prob)\n emb_enc_inputs, (fw_st, bw_st) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs=emb_enc_inputs, dtype=tf.float32, sequence_length=self._enc_lens, swap_memory=True)\n emb_enc_inputs = tf.concat(axis=2, values=emb_enc_inputs)\n encoder_outputs = emb_enc_inputs\n \n if self._hps.hier:\n self._enc_sec_states = encoder_output_sections\n self._enc_states = encoder_outputs_words \n else:\n self._enc_states = encoder_outputs\n self._enc_sec_states = None\n \n # convert the encoder bidirectional hidden state to the decoder state\n # (unidirectional) by an MLP\n if self._hps.hier:\n self._dec_in_state = doc_sections_state\n else:\n with tf.variable_scope('encoder'):\n with tf.variable_scope('word_level_encoder'):\n self._dec_in_state = self._reduce_states(fw_st, bw_st) \n \n # Add the decoder\n\n with tf.variable_scope('decoder'), tf.device(self._next_device()):\n cell = tf.contrib.rnn.LSTMCell(\n self._hps.hidden_dim,\n state_is_tuple=True,\n initializer=self.rand_unif_init)\n \n # We need to pass in the previous step's coverage vector each time\n prev_coverage = self.prev_coverage\\\n if hps.mode==\"decode\" and self._hps.coverage \\\n else None \n \n \n if self._hps.hier:\n decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage, self.attn_dists_sec =\\\n self.attn_decoder(emb_dec_inputs,\n self._dec_in_state,\n self._enc_states,\n cell,\n self._enc_sec_states,\n num_words_section=self._batch_sections_len,\n enc_padding_mask=self._enc_padding_mask,\n enc_section_padding_mask=self._enc_section_padding_mask,\n initial_state_attention=(self._hps.mode==\"decode\"),\n pointer_gen=self._hps.pointer_gen,\n use_coverage=self._hps.coverage,\n prev_coverage=prev_coverage,\n temperature=self._hps.temperature\n )\n \n else:\n decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage, _ =\\\n self.attn_decoder(emb_dec_inputs,\n self._dec_in_state,\n self._enc_states,\n cell,\n encoder_section_states=None,\n num_words_section=None,\n enc_padding_mask=self._enc_padding_mask,\n initial_state_attention=(self._hps.mode==\"decode\"),\n pointer_gen=self._hps.pointer_gen,\n use_coverage=self._hps.coverage,\n prev_coverage=prev_coverage,\n ) \n \n\n # Project decoder output to vocabulary\n with tf.variable_scope('output_projection'), tf.device(self._next_device()):\n if self._hps.output_weight_sharing:\n # share weights of embedding layer with projection\n # self.embedding is in shape [vsize, hps.emb_dim]\n w_proj = tf.get_variable('w_proj', [self._hps.emb_dim, self._hps.hidden_dim],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n w = tf.tanh(tf.transpose(tf.matmul(self.embedding, w_proj))) # shape = [vsize, hps.hidden_dim]\n \n # w_t = tf.transpose(w)\n b = tf.get_variable('b', [vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n else: \n w = tf.get_variable('w', [self._hps.hidden_dim, vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n # w_t = tf.transpose(w)\n b = tf.get_variable('b', [vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n # vocabulary score at each decoder step\n vocab_scores = []\n for i,output in enumerate(decoder_outputs):\n if i > 0:\n tf.get_variable_scope().reuse_variables()\n vocab_scores.append(tf.nn.xw_plus_b(output, w, b)) # apply the linear layer\n\n # the final vocab distribution for each decoder time step\n # shape of each element is [batch_size, vsize]\n vocab_dists = [tf.nn.softmax(s) for s in vocab_scores] \n\n \n # pointing / generating\n if FLAGS.pointer_gen:\n final_dists = self._calc_final_dist(vocab_dists, self.attn_dists)\n# log_dists = [tf.log(dist) for dist in final_dists]\n else:\n# log_dists = [tf.log(dist) for dist in vocab_dists]\n final_dists = vocab_dists\n \n\n # Calculate Losses:\n \n if self._hps.mode in ['train', 'eval']:\n # Calculate the loss\n with tf.variable_scope('loss'), tf.device(self._next_device()):\n if FLAGS.pointer_gen:\n # Calculate the loss per step\n # This is fiddly; we use tf.gather_nd to pick out the gold target words\n # will be list length max_dec_steps containing shape (batch_size)\n loss_per_step = [] \n batch_nums = tf.range(0, limit=hps.batch_size) # shape (batch_size)\n for dec_step, dist in enumerate(final_dists):\n # The indices of the target words. shape (batch_size)\n targets = self._target_batch[:,dec_step] \n indices = tf.stack( (batch_nums, targets), axis=1) # shape (batch_size, 2)\n # shape (batch_size). loss on this step for each batch\n gold_probs = tf.gather_nd(dist, indices)\n losses = -tf.log(gold_probs)\n loss_per_step.append(losses)\n\n # Apply dec_padding_mask mask and get loss\n self._loss = _mask_and_avg(loss_per_step, self._dec_padding_mask)\n \n\n else: # baseline model\n # this applies softmax internally\n self._loss = tf.contrib.seq2seq.sequence_loss(\n tf.stack(vocab_scores, axis=1), self._target_batch, self._dec_padding_mask) # this applies softmax internally\n\n tf.summary.scalar('loss', self._loss)\n\n # Calculate coverage loss from the attention distributions\n if self._hps.coverage:\n with tf.variable_scope('coverage_loss'):\n self._coverage_loss = _coverage_loss(self.attn_dists, self._dec_padding_mask)\n tf.summary.scalar('coverage_loss', self._coverage_loss)\n self._total_loss = self._loss + self._hps.cov_loss_wt * self._coverage_loss\n tf.summary.scalar('total_loss', self._total_loss)\n \n # ---------------------------/\n\n\n if self._hps.mode == \"decode\":\n assert len(final_dists) == 1 # final_dists is a singleton list containing shape (batch_size, extended_vsize)\n final_dists = final_dists[0]\n topk_probs, self._topk_ids = tf.nn.top_k(final_dists, hps.batch_size*2) # take the k largest probs. note batch_size=beam_size in decode mode\n self._topk_log_probs = tf.log(topk_probs)", "def test_embedding_attend(self):\n features = 5\n embed = layers.Embed(num_embeddings=10, features=features)\n inputs = np.array([[1]], dtype=np.int64)\n variables = embed.init(jax.random.PRNGKey(0), inputs)\n query = np.ones(features, dtype=np.float32)\n result = embed.apply(variables, query, method=embed.attend)\n expected = np.sum(variables['params']['embedding'], -1)\n np.testing.assert_array_almost_equal(result, expected)", "def node2vec_embedding(\n G_training,\n dimensions=64,\n walk_length=10,\n num_walks=10,\n p=1,\n q=1.2\n):\n node2vec = Node2Vec(\n G_training,\n dimensions=dimensions,\n walk_length=walk_length,\n num_walks=num_walks,\n p=p,\n q=q\n )\n print(\"Fitting node2vec model...\")\n # Using skip-gram algorithm and negative sampling\n # instead of hierarchical softmax\n model = node2vec.fit(window=5, min_count=1, sg=1, hs=0)\n return model", "def visualise_embedding(embedding_dir, perplexity, learning_rate, image_path_name, label_mapping):\n tsne = TSNE(\n n_components=2,\n random_state=0,\n perplexity=perplexity,\n learning_rate=learning_rate,\n n_iter=20000\n )\n\n with open(embedding_dir, 'r') as emb_file:\n embedding_list = emb_file.readlines()\n\n print('Number of subword units: {}'.format(len(embedding_list)))\n\n embedding_dict = {}\n vector_emb = []\n subword_labels = []\n # TODO: Make this a clean solution\n # Start at 2 to skip the random </s> character which is coming through (may need to be 3 for Georgian)\n for embedding in embedding_list[2:]:\n segmented_embedding = embedding.split()\n subword_labels.append(label_mapping[segmented_embedding[0]])\n embedding_vector = [float(dim) for dim in segmented_embedding[1:]]\n vector_emb.append(embedding_vector)\n embedding_dict[segmented_embedding[0]] = embedding_vector\n\n emb_2d = tsne.fit_transform(vector_emb)\n\n datapoint_indices = range(len(emb_2d))\n fig, ax = plt.subplots()\n for i, subword_label in zip(datapoint_indices, subword_labels):\n ax.scatter(emb_2d[i, 0], emb_2d[i, 1], c='c', label=subword_label)\n\n\n for i, subword_label in enumerate(subword_labels):\n ax.annotate(subword_label, (emb_2d[i, 0], emb_2d[i, 1]))\n\n plt.savefig(image_path_name)\n return embedding_dict", "def refresh_index(self):\n synchronize()\n # TODO: add logger call here\n self._compute_embeddings()", "def update_weights(self):\n\t\tpass", "def forward(self, x):\n return x + self.pos_embedding", "def word2vec2embed(word2vec, word2idx):\n\temb_dim = word2vec['the'].shape[0]\n\temb = torch.nn.Embedding(len(word2idx), emb_dim) \n\temb_matrix = []\n\tfor w, idx in word2idx.items():\n\t\tif w in word2vec:\n\t\t\temb_matrix.append(word2vec[w])\n\t\telse:\n\t\t\temb_matrix.append(np.zeros(emb_dim,))\n\temb.weight.data.copy_(torch.from_numpy(np.array(emb_matrix)))\n\treturn emb", "def __init__(self, voc_size=8000, embed_size=100, hid_size=100, trunc=4,\n model=None):\n\n self.log = logging.getLogger(\"TEST.Embed\")\n self.log.setLevel(logging.INFO)\n\n self.unknown_token = \"UNKNOWN_TOKEN\"\n self.sentence_start_token = \"SENTENCE_START\"\n self.sentence_end_token = \"SENTENCE_END\"\n\n if model is None:\n self.log.info(\"Initializing RNN parameters and functions...\")\n\n self.vocabulary_size = voc_size\n self.embed_size = embed_size\n self.hidden_size = hid_size\n self.bptt_truncate = trunc\n\n # Instantiate the network weights\n # I feel like the first and third are switched for some reason...\n # but it's pretty consistent in the example code. Perhaps it's\n # backwards for a purpose\n # The weights going from the input layer to the word embedding\n # layer (E, in tutorial)\n weights_ie = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (embed_size, voc_size))\n\n # The weights going from input layer to hidden layer\n # (U, in tutorial)\n weights_eh = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (3, hid_size, embed_size))\n\n # The weights going from hidden layer to hidden layer\n # (W, in tutorial)\n weights_hh = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (3, hid_size, hid_size))\n\n # The weights going from hidden layer to output layer\n # (V, in tutorial)\n weights_ho = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (voc_size, hid_size))\n\n # The bias for the hidden units (no bias applied to embedding layer)\n bias = np.zeros((3, hid_size))\n\n # The bias for the output units\n out_bias = np.zeros(voc_size)\n\n self.weights_ie = theano.shared(\n name='weights_ie',\n value=weights_ie.astype(theano.config.floatX))\n\n self.weights_eh = theano.shared(\n name='weights_eh',\n value=weights_eh.astype(theano.config.floatX))\n\n self.weights_hh = theano.shared(\n name='weights_hh',\n value=weights_hh.astype(theano.config.floatX))\n\n self.weights_ho = theano.shared(\n name='weights_ho',\n value=weights_ho.astype(theano.config.floatX))\n\n self.bias = theano.shared(\n name='bias',\n value=bias.astype(theano.config.floatX))\n\n self.out_bias = theano.shared(\n name='out_bias',\n value=out_bias.astype(theano.config.floatX))\n\n self.cache_ie = theano.shared(\n name='cache_ie',\n value=np.zeros(weights_ie.shape).astype(theano.config.floatX))\n\n self.cache_eh = theano.shared(\n name='cache_eh',\n value=np.zeros(weights_eh.shape).astype(theano.config.floatX))\n\n self.cache_hh = theano.shared(\n name='cache_hh',\n value=np.zeros(weights_hh.shape).astype(theano.config.floatX))\n\n self.cache_ho = theano.shared(\n name='cache_ho',\n value=np.zeros(weights_ho.shape).astype(theano.config.floatX))\n\n self.cache_bias = theano.shared(\n name='cache_bias',\n value=np.zeros(bias.shape).astype(theano.config.floatX))\n\n self.cache_out_bias = theano.shared(\n name='cache_out_bias',\n value=np.zeros(out_bias.shape).astype(theano.config.floatX))\n\n self.vocabulary = []\n self.word_to_index = {}\n self.index_to_word = []\n else:\n self.log.info(\"Loading model parameters from saved model...\")\n\n with open(model, \"rb\") as modelFile:\n params = cPickle.load(modelFile)\n\n self.vocabulary_size = params[0]\n self.embed_size = params[1]\n self.hidden_size = params[2]\n self.bptt_truncate = params[3]\n\n self.weights_ie = params[4]\n self.weights_eh = params[5]\n self.weights_hh = params[6]\n self.weights_ho = params[7]\n\n self.vocabulary = params[8]\n if not self.vocabulary[-1] == self.unknown_token:\n self.log.info(\"Appending unknown token\")\n self.vocabulary[-1] = self.unknown_token\n self.index_to_word = params[9]\n self.word_to_index = params[10]\n\n self.bias = params[11]\n self.out_bias = params[12]\n\n self.cache_ie = params[13]\n self.cache_eh = params[14]\n self.cache_hh = params[15]\n self.cache_ho = params[16]\n self.cache_bias = params[17]\n self.cache_out_bias = params[18]\n # End of if statement\n\n # Symbolic representation of one input sentence\n input = T.ivector('sentence')\n\n # Symbolic representation of the one output sentence\n output = T.ivector('sentence')\n\n # Symbolic representation of the cache decay for RMSprop\n decay = T.scalar('decay')\n\n # Stochastic Gradient Descent step\n learning_rate = T.scalar('learning_rate')\n\n def forward_propagate(word, previous_state):\n \"\"\"\n Vertically propagates one of the words.\n\n :type word: int\n :param word: the index of the current input word\n\n :type previous_state: T.dvector()\n :param word: the output of the hidden layer from the previous\n horizontal layer\n \"\"\"\n # Embedding layer\n word_vector = self.weights_ie[:, word]\n\n # GRU layer\n update_gate = T.nnet.hard_sigmoid(\n self.weights_eh[0].dot(word_vector) +\n self.weights_hh[0].dot(previous_state) +\n self.bias[0]\n )\n\n reset_gate = T.nnet.hard_sigmoid(\n self.weights_eh[1].dot(word_vector) +\n self.weights_hh[1].dot(previous_state) +\n self.bias[1]\n )\n\n hypothesis = T.tanh(\n self.weights_eh[2].dot(word_vector) +\n self.weights_hh[2].dot(previous_state * reset_gate) +\n self.bias[2]\n )\n\n current_state = (T.ones_like(update_gate) - update_gate) * hypothesis + update_gate * previous_state\n\n # Output layer\n current_output = T.nnet.softmax(\n self.weights_ho.dot(current_state) + self.out_bias\n )[0]\n\n # Not sure why current_output[0] and not just current_output...\n return [current_output, current_state]\n\n #######################################################################\n # Symbolically represents going through each input sentence word and\n # then calculating the state of the hidden layer and output word for\n # each word. The forward_propagate function is the one used to\n # generate the output word and hidden layer state.\n #######################################################################\n self.theano = {}\n\n [out, state], updates = theano.scan(\n forward_propagate,\n sequences=input,\n truncate_gradient=self.bptt_truncate,\n outputs_info=[None, dict(initial=T.zeros(self.hidden_size))],\n name=\"forward_propagate\"\n )\n\n # Predicts the output words for each word in the sentence\n prediction = T.argmax(out, axis=1)\n\n # Calculates the output error between the predicted output and the\n # actual output\n out_error = T.sum(T.nnet.categorical_crossentropy(out, output))\n\n # Symbolically represents gradient calculations for gradient descent\n d_weights_ie = T.grad(out_error, self.weights_ie)\n d_weights_eh = T.grad(out_error, self.weights_eh)\n d_weights_hh = T.grad(out_error, self.weights_hh)\n d_weights_ho = T.grad(out_error, self.weights_ho)\n d_bias = T.grad(out_error, self.bias)\n d_out_bias = T.grad(out_error, self.out_bias)\n\n # Symbolic theano functions\n self.forward_propagate = theano.function([input], out,\n name=\"forward_propagate\")\n self.predict = theano.function([input], prediction, name=\"predict\")\n self.calculate_error = theano.function([input, output], out_error,\n name=\"calculate_error\")\n self.bptt = theano.function([input, output],\n [d_weights_ie, d_weights_eh, d_weights_hh, d_weights_ho, d_bias,\n d_out_bias],\n name=\"bptt\")\n\n # RMSprop parameters\n cache_ie = (decay * self.cache_ie) + ((1 - decay) * d_weights_ie ** 2)\n cache_eh = (decay * self.cache_eh) + ((1 - decay) * d_weights_eh ** 2)\n cache_hh = (decay * self.cache_hh) + ((1 - decay) * d_weights_hh ** 2)\n cache_ho = (decay * self.cache_ho) + ((1 - decay) * d_weights_ho ** 2)\n cache_bias = (decay * self.cache_bias) + ((1 - decay) * d_bias ** 2)\n cache_out_bias = (decay * self.cache_out_bias) + ((1 - decay) * d_out_bias ** 2)\n eps = 1e-6 # Prevents division by 0\n\n self.sgd_step = theano.function(\n [input, output, learning_rate, theano.In(decay, value=0.9)],\n [],\n updates=[\n (self.weights_ie, self.weights_ie - learning_rate *\n d_weights_ie / (T.sqrt(self.cache_ie + eps))),\n (self.weights_eh, self.weights_eh - learning_rate *\n d_weights_eh / (T.sqrt(self.cache_eh + eps))),\n (self.weights_hh, self.weights_hh - learning_rate *\n d_weights_hh / (T.sqrt(self.cache_hh + eps))),\n (self.weights_ho, self.weights_ho - learning_rate *\n d_weights_ho / (T.sqrt(self.cache_ho + eps))),\n (self.bias, self.bias - learning_rate * d_bias /\n (T.sqrt(self.cache_bias + eps))),\n (self.out_bias, self.out_bias - learning_rate *\n d_out_bias / (T.sqrt(self.cache_out_bias + eps))),\n (self.cache_ie, cache_ie),\n (self.cache_eh, cache_eh),\n (self.cache_hh, cache_hh),\n (self.cache_ho, cache_ho),\n (self.cache_bias, cache_bias),\n (self.cache_out_bias, cache_out_bias)]\n )\n\n self.x_train = None\n self.y_train = None", "def build_model(allidx,MAX_LENGTH,onlyArg):\n wordidx, labelidx, featuresidx, extraidx=allidx\n posidx, neridx, depidx, distanceidx, chnkidx, wikineridx, dbpedianeridx, subneridx = featuresidx\n\n main_input = Input(shape=(MAX_LENGTH,), name='main_input', dtype='int32')\n inputNodes=[main_input]\n\n w2vmodel=\"../embeddings/Domain-Word2vec.model\"\n\n embedding_matrix,EMBEDDING_DIM,vocabulary_size=prepare.wv_embedded(wordidx,w2vmodel)\n \n x = Embedding(output_dim=EMBEDDING_DIM, weights=[embedding_matrix],input_dim=vocabulary_size, input_length=MAX_LENGTH, mask_zero=False)(main_input)\n numnode=int(EMBEDDING_DIM/2)\n\n # pos embedding\n inputNodes,pos_layer=layers.embedlayer(inputNodes,\"pos_input\",posidx,MAX_LENGTH)\n x=Concatenate()([x,pos_layer])\n numnode+=int(len(posidx)/2)\n\n # ner embedding\n inputNodes,ner_layer=layers.embedlayer(inputNodes,\"ner_input\",neridx,MAX_LENGTH)\n x=Concatenate()([x,ner_layer])\n numnode+=int(len(neridx)/2)\n\n inputNodes,wikiner_layer=layers.embedlayer(inputNodes,\"wikiner_input\",wikineridx,MAX_LENGTH)\n x=Concatenate()([x,wikiner_layer])\n numnode+=int(len(wikineridx)/2)\n\n inputNodes,dbpedianer_layer=layers.embedlayer(inputNodes,\"dbpedianer_input\",dbpedianeridx,MAX_LENGTH)\n x=Concatenate()([x,dbpedianer_layer])\n numnode+=int(len(dbpedianeridx)/2)\n\n # dep embedding\n inputNodes,dep0_layer=layers.embedlayer(inputNodes,\"dep0_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep0_layer])\n numnode+=int(len(depidx)/2)\n\n inputNodes,dep1_layer=layers.embedlayer(inputNodes,\"dep1_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep1_layer])\n numnode+=int(len(depidx)/2)\n\n inputNodes,dep2_layer=layers.embedlayer(inputNodes,\"dep2_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep2_layer])\n numnode+=int(len(depidx)/2)\n\n # chnk embedding\n inputNodes,lvl_layer=layers.embedlayer(inputNodes,\"lvl_input\",distanceidx,MAX_LENGTH)\n x=Concatenate()([x,lvl_layer])\n numnode+=int(len(distanceidx)/2)\n\n inputNodes,chnk_layer=layers.embedlayer(inputNodes,\"chnk_input\",chnkidx,MAX_LENGTH)\n x=Concatenate()([x,chnk_layer])\n numnode+=int(len(chnkidx)/2)\n\n # wikiclass embedding\n inputNodes,subner_layer=layers.embedlayer(inputNodes,\"subner_input\",subneridx,MAX_LENGTH)\n x=Concatenate()([x,subner_layer])\n numnode+=int(len(subneridx)/2)\n\n if onlyArg:\n neartrigger_input = Input(shape=(MAX_LENGTH,), name='neartrigger_input', dtype='int32')\n inputNodes.append(neartrigger_input)\n neartrigger_layer = Embedding(output_dim=EMBEDDING_DIM, weights=[embedding_matrix],input_dim=vocabulary_size, \\\n input_length=MAX_LENGTH, mask_zero=False)(neartrigger_input)\n x=Concatenate()([x,neartrigger_layer])\n numnode+=50\n inputNodes,x,numnode=layers.extralayer(inputNodes,x,numnode,extraidx,featuresidx,MAX_LENGTH)\n\n lstm_out = Bidirectional(LSTM(numnode, dropout=0.2,return_sequences=True))(x)\n numnode=int((numnode+len(labelidx))*2/3)\n\n if onlyArg:\n lstm_out = SeqSelfAttention(attention_activation='tanh', attention_width=5)(lstm_out)\n\n lstm_out = Dropout(0.2)(lstm_out)\n out = Dense(numnode)(lstm_out)\n\n crf = CRF(len(labelidx), sparse_target=False) # CRF layer\n main_output=crf(out)\n loss=crf_loss #crf.loss_function\n acc=[crf_accuracy]\n\n model = Model(inputs=inputNodes, outputs=main_output) \n model.compile(loss=loss,optimizer=Adam(0.001),metrics=acc)\n model.summary()\n\n return model", "def mix_embeddings(ndata, proj):\n h = ndata['h']\n c = proj(ndata['content'])\n ndata['h'] = h + c[:, :h.shape[1]]", "def __init__(self, vocab_size, embedding_size, context_size, hid_dim, out_dim):\n super(Net, self).__init__()\n self.E = nn.Embedding(vocab_size, embedding_size) # Embedding matrix\n self.after_embed_size = embedding_size * context_size\n self.lin = nn.Linear(self.after_embed_size, hid_dim)\n self.lin2 = nn.Linear(hid_dim, out_dim)", "def SNEmbedding(*args, **kwargs):\n return spectral_norm(nn.Embedding(*args, **kwargs))", "def _mutate_node(self, node):\n self.idx += 1\n\n if self.idx != self.r:\n return\n\n # Exclude some things like signatures, etc.\n exclusions = ['signature', 'crc']\n for ex in exclusions:\n if ex in node._pfp__name.lower():\n return\n\n if type(node) == pfp.fields.Dom:\n return\n elif self._base_name(node) == 'Struct':\n # This is a container, interested in\n # its children nodes\n return\n elif self._base_name(node) == 'Array':\n print(\"%s is an Array of %s (%s)\" % (node._pfp__name,\n node.field_cls, node.width))\n # I can change the data at once:\n node.raw_data = \"cacaca\"\n\n # Or iterate through its elements:\n # for e in node:\n # e._pfp__set_value(e._pfp__value + 1)\n else:\n # CORE TYPE\n # This is supposed to cast\n print('CORE TYPE?')\n node._pfp__set_value(1337)", "def embedding_setup(self, embedding, emb_trainable):\n if emb_trainable == True:\n emb_variable = tf.get_variable(\n name=\"embedding_matrix\", shape=embedding.shape,\n initializer = tf.constant_initializer(embedding))\n return emb_variable\n else:\n return embedding", "def update_neighbours(self, iteration, iterations, input_vector, bmu):\n\n t = iteration / iterations\n learning_rate = self.learning_rate(t)\n for node in self.codebook:\n influence = self.codebook.neighbourhood(node, bmu, t)\n node.update(learning_rate, influence, input_vector, bmu)", "def init_emb(self):\r\n initrange = 0.5 / self.embedding_dim\r\n self.embeddings.weight.data.uniform_(-initrange, initrange)\r\n self.affine.weight.data.uniform_(-0, 0)\r\n self.affine.bias.data.zero_()", "def _update_feature_vec(fvec, word, tag_ngram):", "def comp_edge_embeddings(edges, node_embeddings):\n\n logging.info(\"Computing edge embeddings.\")\n\n edge_embeddings = np.zeros([len(edges), config.EMBED_DIM])\n\n for i in tqdm(range(len(edges))):\n cur_edge = edges[i]\n edge_embeddings[i] = np.multiply(node_embeddings[cur_edge[0] - 1], node_embeddings[cur_edge[1] - 1])\n\n return edge_embeddings", "def product_update(self, action):\n\n # if not isinstance(action, Action):\n # raise TypeError\n\n worlds = []; to_remove = [] # to_remove will be used to remove edges from tensor product\n name = 0\n for world in self.worlds:\n for event in action.events:\n assignment = copy.deepcopy(world.assignment)\n if event.precondition.semantic(self, world):\n if not event.postcondition == None:\n for i in event.postcondition.keys():\n assignment[i] = event.postcondition[i]\n world = World(name, assignment)\n worlds.append(world)\n if self.point == world.name and action.point == event.name:\n self.point = name # point in modified Kripke model\n name += 1\n else:\n to_remove.append((world.name, event.name))\n self.worlds = worlds\n\n for agent in self.agents:\n event_adj = list2mat(action.relations[agent]) # adj corresponds to adjacency matrix\n world_adj = list2mat(self.relations[agent])\n updated_adj = np.kron(world_adj, event_adj) # updated Kripke relations\n for w_e in to_remove:\n i = w_e[0]*len(action.events) + w_e[1] # index of corresponding (world, event) pair in kronecker matrix\n for j in range(updated_adj.shape[0]):\n updated_adj[i][j] = updated_adj[j][i] = 0 # deleting edges to the removed nodes / worlds\n self.relations[agent] = mat2list(updated_adj)\n\n return", "async def update_embed(self) -> None:\n\n self.embed = build_actions_embed(LoggingActions.all_enabled_actions(self.bits))\n await self.message.edit(embed=self.embed)", "def TransformerTokenEmbedding(\n num_embeddings, embedding_dim, padding_idx, freeze_embed=False\n):\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\n nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)\n nn.init.constant_(m.weight[padding_idx], 0)\n if freeze_embed:\n m.weight.requires_grad = False\n return m", "def embeddings(self):\n self._ensure_is_connected()\n return self._embeddings", "def build_word_embeddings(self):\n if self.mode == \"encode\":\n # Word embeddings are fed from an external vocabulary which has possibly\n # been expanded (see vocabulary_expansion.py).\n encode_emb1 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb1\")\n # No sequences to decode.\n encode_emb2 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb2\")\n elif self.mode == \"test\":\n encode_emb1 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb1\")\n # No sequences to decode.\n encode_emb2 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb2\")\n else:\n word_emb = tf.get_variable(\n name=\"word_embedding\",\n shape=[self.config.vocab_size, self.config.word_embedding_dim],\n initializer=self.uniform_initializer)\n\n encode_emb1 = tf.nn.embedding_lookup(word_emb, self.encode_ids1)\n encode_emb2 = tf.nn.embedding_lookup(word_emb, self.encode_ids2)\n\n\n self.encode_emb1 = encode_emb1\n self.encode_emb2 = encode_emb2", "def _add_flag_to_node_and_appdesc(self, object_embed, appdesc_embed,\n obj_shape):\n node_label = tf.constant([[[1, 0]]], tf.float32)\n app_desc_label = tf.constant([[[0, 1]]], tf.float32)\n\n object_embed = tf.concat(\n [object_embed,\n tf.tile(node_label, [obj_shape[0], obj_shape[1], 1])], -1)\n appdesc_embed = tf.concat(\n [appdesc_embed,\n tf.tile(app_desc_label, [obj_shape[0], 1, 1])], -1)\n\n return object_embed, appdesc_embed", "def update(self, initial, follows):", "def embed(self, features, feature_name, params):\n with tf.variable_scope(\"%s_embed\" % feature_name):\n embed_matrix = tf.get_variable(\"embedding_matrix\",\n [getattr(params, \"%s_vocab_size\" % feature_name), \n getattr(params, \"%s_embed_size\" % feature_name)])\n return tf.nn.embedding_lookup(embed_matrix, features[feature_name])", "def stage2EmbeddingTest(originalGraph, originalSubGraph, originalEmbedding):\n\tgraph = cloneGraph(originalGraph)\n\tsubgraph = cloneGraph(originalSubGraph)\n\tembedding = cloneGraph(originalEmbedding)\n\tpotentialEdges = getEdgeTuples(graph)\n\tcurrEdges = getEdgeTuples(subgraph)\n\tcurrVertices = getVertices(subgraph)\n\tfaces = cloneGraph(embedding)\n\n\tif not checkSubGraph(graph, subgraph):\n print \"NOT A SUBGRAPH\"\n return False\n if graphsEqual(graph, subgraph):\n# print \"GRAPHS EQUAL\"\n return embedding\n\n# print \"currVertices: \" + str(currVertices )\n\t[fragments, allFragmentFaces, fragmentVOAs] = getFragmentFaces2(potentialEdges, currEdges, currVertices, faces)\n\tpotentialFaces = cloneGraph(allFragmentFaces)\n\n# print \"allFragmentFaces: \" + str(allFragmentFaces)\n# print \"faces: \" + str(faces)\n\tfor currFragment in fragments:\n\t\tfragmentFaces = allFragmentFaces[currFragment]\n\t\tfor currFace in fragmentFaces:\n currFaceEmbeddingGraph = getGraphFromVerticesInOrder(embedding[currFace])\n\n if type(currFragment[0]) is tuple:\n currFragmentGraph = getGraphFromEdgeTupleList(list(currFragment))\n else:\n currFragmentGraph = getGraphFromEdgeTupleList([currFragment])\n\n# currFragmentGraph = getGraphFromEdgeTupleList([currFragment])\n currGraph = joinGraphs(currFragmentGraph, currFaceEmbeddingGraph)\n tempFace = {}\n tempFace[1] = embedding[currFace] #Not 100% sure about this!\n# print \"\\n\\n\\n\\nIT HAPPENS HERE\\n\\n\\n\\n\"\n# print \"currGraph: \" + str(currGraph)\n# print \"currFaceEmbeddingGraph: \" + str(currFaceEmbeddingGraph)\n# print \"tempFace: \" + str(tempFace)\n if not planarityTest3(currGraph, currFaceEmbeddingGraph, tempFace): #embedding[currFace]\n potentialFaces[currFragment].remove(currFace)\n# print \"\\n\\n ==============\\ngraph: \" + str(graph)\n# print \"subgraph: \" + str(subgraph)\n# print \"faces: \" + str(faces)\n# print \"fragments: \" + str(fragments)\n# print \"potentialFaces: \" + str(potentialFaces)\n# print \"fragmentVOAs: \" + str(fragmentVOAs)\n# print \"NOW CALLING EMBEDDING TEST \\n ==============\\n\\n\"\n\treturn embeddingTest(graph, subgraph, faces, fragments, potentialFaces, fragmentVOAs)", "def __init__(self, embed_size, vocab):\n super(ModelEmbeddings, self).__init__()\n \n self.embed_size = embed_size\n self.char_embed_size = 50\n self.max_word_len = 21\n self.dropout_rate = 0.3\n self.vocab = vocab \n \n ## A4 code\n pad_token_idx = vocab.char2id['<pad>']\n self.embedding = nn.Embedding(num_embeddings =len(vocab.char2id),\n embedding_dim =self.char_embed_size,\n padding_idx =pad_token_idx,)\n \n self.CNN = CNN(char_embed_size=self.char_embed_size,\n num_filters=embed_size,\n max_word_length=self.max_word_len,)\n self.Highway = Highway(word_embed_size=self.embed_size)\n self.dropout = nn.Dropout(p=self.dropout_rate)\n ## End A4 code\n\n ### YOUR CODE HERE for part 1j\n\n\n ### END YOUR CODE", "def testEmbeddings(self):\n input_data = {\n \"x\":\n constant_op.constant(\n np.array(np.random.random_sample((20)), dtype=np.int32))\n }\n\n class EmbeddingModel(keras.Model):\n\n def __init__(self):\n super(EmbeddingModel, self).__init__()\n self.shared_weights = self.add_weight(\n \"weights\",\n shape=(2000, 300),\n dtype=dtypes.float32,\n initializer=init_ops.random_normal_initializer(\n mean=0.0, stddev=300**(-0.5)))\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec(shape=(20), dtype=dtypes.int32)\n ])\n def func(self, x):\n return array_ops.gather(self.shared_weights, x)\n\n model = EmbeddingModel()\n root, output_func = self._freezeModel(model.func)\n self._testConvertedFunction(root, root.f, output_func, input_data)", "def modify_to_return_embeddings(net, model_name):\n if model_name in [\"vgg_face_dag\", \"vgg_m_face_bn_dag\"]:\n net.fc8 = torch.nn.Sequential()\n else:\n msg = \"{} not yet supported\".format(model_name)\n raise NotImplementedError(msg)\n return net", "def reset_weight_zero(self):\n self.node_embedding = np.random.uniform(low=-0.5, high=0.5, size=(self.vocab_size, self.layer1_size)).astype(\n np.float32)\n self.context_embedding = np.zeros((self.vocab_size, self.layer1_size), dtype=np.float32)\n\n self.centroid = np.zeros((self.k, self.layer1_size), dtype=np.float32)\n self.covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.inv_covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.pi = np.zeros((self.vocab_size, self.k), dtype=np.float32)\n log.info(\"reset communities data| k: {}\".format(self.k))", "def update():", "def update():", "def update_node(self, node, updating_node):\n out_edges = list(self.source_net.edges(node, data=True))\n self.remove_node(node)\n self.source_net.add_node(node, attr_dict=self.source_net.nodes[updating_node]['attr_dict'])\n self.source_net.add_edges_from(out_edges)\n\n # Transfer incoming edges\n for u, v, data in self.source_net.in_edges(updating_node, data=True):\n self.source_net.add_edge(u, node, **data)\n\n self.remove_node(updating_node)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)", "def forward(self, doc):\n out = torch.tensor([]).float().to(self.device)\n\n for i in range(len(doc)):\n sentences_raw = sentencesplit(cleantxt(doc[i]))\n sentences_ready = torch.tensor([]).float().to(self.device)\n for sentence in sentences_raw:\n sentence = sentence.split()\n if sentence == []:\n continue\n lookup_tensor = torch.tensor([]).long().to(self.device)\n for word in sentence:\n if word in self.embedd_dict:\n lookup_tensor = torch.cat((lookup_tensor,\n torch.LongTensor([self.embedd_dict[word]])), 0)\n else:\n lookup_tensor = torch.cat((lookup_tensor, torch.LongTensor([0])), 0)\n # Word embedding\n xw = self.word_embedding(lookup_tensor).view(1, -1, self.embedding_dim).to(self.device)\n # Word GRU\n self.hidden_gru_words = self.init_hidden_words()\n hw, self.hidden_gru_words = self.gru_word(xw, self.hidden_gru_words)\n # Word MLP\n uw = nn.Tanh()(self.MLP_word(hw)).to(self.device)\n # Word attention\n attention_score = torch.matmul(uw, self.attention_word).squeeze().to(self.device)\n attention_score = F.softmax(attention_score, dim=0).view(uw.size(0), uw.size(1), 1).to(self.device)\n scored_x = (hw * attention_score).to(self.device)\n s = torch.sum(scored_x, dim=1).to(self.device)\n #collecting sentences\n sentences_ready = torch.cat((sentences_ready, s), 0)\n # Sentence GRU\n if len(sentences_ready) == 0:\n out = torch.cat((out,\n torch.randn(1, self.number_cat).to(self.device)), 0).to(self.device)\n continue\n sentences_ready_gru = sentences_ready.view(1, -1, self.embedding_dim).to(self.device)\n self.hidden_gru_sentences = self.init_hidden_sentences()\n hs, self.hidden_gru_sentences = self.gru_sentence(torch.tensor(sentences_ready_gru), self.hidden_gru_sentences)\n # SENTENCE MLP\n us = nn.Tanh()(self.MLP_sentence(hs)).to(self.device)\n # Sentence attention\n attention_score = torch.matmul(us, self.attention_sentence).squeeze().to(self.device)\n attention_score = F.softmax(attention_score, dim=0).view(us.size(0), us.size(1), 1).to(self.device)\n scored_x = (hs * attention_score).to(self.device)\n v = torch.sum(scored_x, dim=1).to(self.device)\n # classification\n p = self.MLP_classification(v).to(self.device)\n out = torch.cat((out, p.float()), 0).float().to(self.device)\n return out", "def load_embeddings(path, vocab, source_domain, target_domain, emb_name):\n\n pkl = './work/embeddings/%s_%s_%s.pkl' % (source_domain, target_domain, emb_name)\n if os.path.exists(pkl):\n print(\"Load embeddings from existing pkl file %s...\" % pkl)\n # word embeddings weights have been loaded\n embeddings = pickle.load(open(pkl, 'rb'))\n else:\n print(\"Load embedding from %s...\" % path)\n raw_embeddings = {}\n if emb_name == 'yelp_electronics':\n with open(path) as fp:\n for line in fp:\n word_vector = line.split(\",\")[:-1]\n vector_list = []\n for element in word_vector[len(word_vector) - 100:]:\n vector_list.append(float(element))\n word = ','.join(word_vector[:len(word_vector) - 100])\n vector = np.asarray(vector_list)\n if word in vocab:\n raw_embeddings[word] = vector\n else:\n with open(path) as fp:\n for line in fp:\n eles = line.strip().split(' ')\n word = eles[0]\n if word in vocab:\n raw_embeddings[word] = eles[1:]\n\n dim_w = len(raw_embeddings['the'])\n n_words = len(vocab)\n embeddings = np.zeros(shape=(n_words, dim_w))\n for w in vocab:\n wid = vocab[w]\n if w in raw_embeddings:\n embeddings[wid] = np.array([float(ele) for ele in raw_embeddings[w]])\n else:\n # for OOV words, add random initialization\n embeddings[wid] = np.random.uniform(-0.25, 0.25, dim_w)\n print(\"Find %s word embeddings...\" % len(embeddings))\n if not os.path.exists('./work/embeddings'):\n os.mkdir('./work/embeddings')\n emb_path = './work/embeddings/%s_%s_%s.pkl' % (source_domain, target_domain, emb_name)\n # write the embedding weights back to the disk\n pickle.dump(embeddings, open(emb_path, 'wb'))\n embeddings = np.array(embeddings, dtype='float32')\n return embeddings", "def init_embedding_weights(self, dictionary, embeddings_index, embedding_dim):\r\n pretrained_weight = np.empty([len(dictionary), embedding_dim], dtype=float)\r\n for i in range(len(dictionary)):\r\n if dictionary.idx2word[i] in embeddings_index:\r\n pretrained_weight[i] = embeddings_index[dictionary.idx2word[i]]\r\n else:\r\n pretrained_weight[i] = helper.initialize_out_of_vocab_words(embedding_dim)\r\n # pretrained_weight is a numpy matrix of shape (num_embeddings, embedding_dim)\r\n if isinstance(self.embedding, nn.Sequential):\r\n self.embedding[0].weight.data.copy_(torch.from_numpy(pretrained_weight))\r\n else:\r\n self.embedding.weight.data.copy_(torch.from_numpy(pretrained_weight))", "def rnn_with_embedding(self,cell,init_state,input_seq,\n input_seq_len,reuse=None,\n scope=\"RNN\"): \n with tf.variable_scope(scope,reuse=reuse) as vs:\n log(vs.name+\"/Encoding sequences\")\n with tf.device('/cpu:0'):\n emb = tf.get_variable(\"emb\",\n [self.vocab_size,self.hidden_size],\n dtype=tf.float32)\n un_emb = tf.get_variable(\"unemb\",\n [self.hidden_size,self.vocab_size],\n tf.float32)\n # We need a bias\n un_emb_b = tf.get_variable(\"unemb_b\",\n [self.vocab_size],\n dtype=tf.float32)\n \n assert scope+\"/emb:0\" in emb.name,\\\n \"Making sure the reusing is working\"\n emb_input_seq = tf.nn.embedding_lookup(\n emb,input_seq)\n emb_input_list = tf.unpack(\n tf.transpose(emb_input_seq,[1,0,2]))\n \n # RNN pass\n if init_state is None:\n init_state = cell.zero_state(\n tf.shape(emb_input_list[0])[0],tf.float32)\n \n emb_output_list, final_state = tf.nn.rnn(\n cell,emb_input_list,initial_state=init_state,\n sequence_length=input_seq_len)\n\n # We shift the predicted outputs, because at\n # each word we're trying to predict the next.\n emb_output_list = emb_output_list[:-1]\n \n # Unembedding\n output_list = [tf.matmul(t,un_emb) + un_emb_b\n for t in emb_output_list]\n outputs = tf.transpose(tf.pack(output_list),[1,0,2])\n\n return outputs, final_state", "def update_output(self, latent_mat, weight_mat, y_list):", "def update_weights(self):\r\n\r\n inedges=self.in_edges\r\n for edge in inedges:\r\n weight=edge.weight+self.learning_rate*self.delta*(edge.source.activation)\r\n edge.change_weight(weight)", "def _use_embeddings(self, word):\n if word == \"@PAD@\":\n return torch.zeros(self.embeddings_dim)\n else:\n return self.embeddings[word]", "def augment(self):\n n1 = { 'edges': [ self.next_insert['pred'], self.next_insert ], 'pred': self.next_insert['pred'] }\n n2 = { 'edges': [ n1, self.next_insert ], 'pred': n1 }\n self.next_insert['pred'] = n2\n self.next_insert = n2\n self.nodect += 2", "def get_movie_embedding(self):\n raise NotImplementedError(\"has to be overwritten\")", "def embedded(self, word_ids, embedding_tensor, scope=\"embedding\"):\n with tf.variable_scope(scope):\n with tf.device(\"/cpu:0\"):\n inputs = tf.nn.embedding_lookup(embedding_tensor, word_ids)\n return inputs", "def instantiate_weights(self):\n with tf.variable_scope(\"embedding_projection\"), tf.device('/cpu:0'): # embedding matrix\n self.Embedding = tf.get_variable(\"Embedding\", shape=[self.vocab_size, self.embed_size],\n initializer=self.initializer)\n # self.Embedding_label = tf.get_variable(\"Embedding_label\", shape=[self.num_classes, self.embed_size],\n # dtype=tf.float32) # ,initializer=self.initializer\n # self.W_projection = tf.get_variable(\"W_projection\", shape=[self.sequence_length * self.d_model, self.num_classes],\n # initializer=self.initializer) # [embed_size,label_size]\n # self.b_projection = tf.get_variable(\"b_projection\", shape=[self.num_classes])", "def word_embedding_forward(x, W):\n out, cache = None, None\n ##############################################################################\n # TODO: Implement the forward pass for word embeddings. #\n # #\n # HINT: This should be very simple. #\n ##############################################################################\n out = W[x,:]\n cache = x, W\n\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return out, cache" ]
[ "0.6827302", "0.6605902", "0.6546911", "0.6507674", "0.6356848", "0.63033694", "0.6274012", "0.6221701", "0.60340726", "0.59661525", "0.59410673", "0.58897674", "0.5863574", "0.5854144", "0.5821187", "0.5804857", "0.5790733", "0.57406765", "0.5728432", "0.5712224", "0.5701808", "0.5695617", "0.56831414", "0.5663827", "0.56628793", "0.5619715", "0.5611502", "0.5602671", "0.5579829", "0.55687684", "0.5545164", "0.5541559", "0.5534769", "0.55226386", "0.54965454", "0.5495209", "0.5492549", "0.54872483", "0.5465521", "0.5454428", "0.54484344", "0.5443793", "0.5440879", "0.5420443", "0.54194725", "0.5415529", "0.540595", "0.5387698", "0.5387694", "0.53832614", "0.5366934", "0.536396", "0.536317", "0.5341826", "0.53335774", "0.53243506", "0.5322804", "0.53174615", "0.5316945", "0.5316154", "0.53146136", "0.5313458", "0.53083915", "0.5301649", "0.52999645", "0.5298846", "0.5297891", "0.5293451", "0.5290825", "0.5281062", "0.527861", "0.52630013", "0.5255398", "0.5252593", "0.5248571", "0.5245144", "0.5241434", "0.52376884", "0.52346414", "0.5221294", "0.5212387", "0.5210623", "0.52029276", "0.520034", "0.5200052", "0.51995003", "0.51995003", "0.51986945", "0.51976854", "0.51973003", "0.5193391", "0.518273", "0.5181841", "0.51812214", "0.5176196", "0.51719683", "0.5171516", "0.51711434", "0.51710725", "0.5161736", "0.5158422" ]
0.0
-1
A view to show all products, including ability to search
def portfolio(request): projects = Project.objects.all() categories = None if request.GET: if 'category' in request.GET: categories = request.GET['category'].split(',') projects = projects.filter(category__name__in=categories) categories = ProjectCategory.objects.filter(name__in=categories) context = { 'projects': projects, 'current_categories': categories, } return render(request, 'portfolio/portfolio.html', context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_products(request):\n products = Product.objects.all()\n return render(request, \"products.html\", {\"products\": products})", "def all_products(request):\n\n products = Product.objects.all()\n return render(request, 'products.html', {'products': products})", "def products(request):\n\n return render(request, \"core/products.html\", {\n \"products\": Product.objects.all()\n })", "def products(request):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n products = Product.objects.all()\n template = \"auctionsmng/products.html\"\n\n context = {\n 'products': products\n }\n\n return render(request, template, context)", "def product_list_view(request):\n queryset = Product.objects.all()\n context = {\n \"object_list\": queryset\n }\n\n return render(request, \"products/product_list.html\", context)", "def all_products(request):\n\n products_list = Product.objects.all().order_by('id')\n query = None\n collections = None\n collection_page = None\n sort = None\n direction = None\n query_page = None\n \n if request.GET:\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n products_list = products_list.annotate(lower_name=Lower('name'))\n\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n products_list = products_list.order_by(sortkey)\n\n if 'collection' in request.GET:\n collections = request.GET['collection'].split(',')\n products_list = products_list.filter(collection__name__in=collections)\n collections = Collection.objects.filter(name__in=collections)\n collection_page = request.GET['collection']\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"You didn't enter any search criteria!\\\n You were automatically redirected to All Products page.\")\n return redirect(reverse('products'))\n query_page = request.GET['q']\n \n queries = Q(name__icontains=query) | Q(description__icontains=query)\n products_list = products_list.filter(queries)\n\n if 'on_sale' in request.GET:\n products_list = products_list.filter(on_sale=True)\n \n current_sorting = f'{sort}_{direction}'\n total = len(products_list)\n paginator = Paginator(products_list, 12)\n page_number = request.GET.get('page')\n products = paginator.get_page(page_number)\n\n context = {\n 'products': products,\n 'current_collections': collections,\n 'collection_page': collection_page,\n 'search_term': query,\n 'query_page': query_page,\n 'current_sorting': current_sorting,\n 'sort': sort,\n 'direction': direction,\n 'total': total,\n }\n\n return render(request, 'products/products.html', context)", "def shop_products(request):\n\n shop = Shop.objects.get(user=request.user)\n products = Products.objects.filter(shop_rel=shop)\n paginator = pagination.PageNumberPagination()\n paginator.page_size = 7\n result_page = paginator.paginate_queryset(products, request=request)\n serializer = ProductSerializer(result_page, many=True)\n return paginator.get_paginated_response(serializer.data)", "def get_all_products(self):\n\t\tpass", "def all_products(request):\n\n products = Product.objects.all()\n categories = None\n query = None\n sort = None\n direction = None\n heading = 'Products & Services'\n\n if request.GET:\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n products = products.annotate(lower_name=Lower('name'))\n if sortkey == 'category':\n sortkey = 'category__name'\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n products = products.order_by(sortkey)\n\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n products = products.filter(category__name__in=categories)\n categories = Category.objects.filter(name__in=categories)\n if len(categories) == 1:\n heading = categories[0].friendly_name\n else:\n for category in categories:\n if 'products' in category.name:\n heading = 'Products & Services'\n break\n else:\n if 'services' in category.name:\n heading = 'Services'\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"Please enter search criteria!\")\n return redirect(reverse('home'))\n\n queries = Q(\n name__icontains=query) | Q(description__icontains=query)\n products = products.filter(queries)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'products': products,\n 'search_term': query,\n 'current_categories': categories,\n 'current_sorting': current_sorting,\n 'heading': heading,\n }\n\n return render(\n request, 'products/products.html', context)", "def products(request):\n search = ''\n if request.method == 'POST':\n # if 'search' in request.session:\n # del(request.session['search'])\n search_form = SearchForm(request.POST)\n if search_form.is_valid():\n search = search_form.cleaned_data.get('search_product')\n request.session['search'] = search\n else:\n return HttpResponseRedirect(request.path_info)\n\n elif 'search' in request.session:\n search = request.session['search']\n\n else:\n search_form = SearchForm()\n return render(request, 'store/products.html')\n\n products_list = Products.objects.filter(product_name__icontains=search).order_by('-nutrition_grade_fr')\n paginator = Paginator(products_list, 6)\n page = request.GET.get('page')\n try:\n products_found = paginator.get_page(page)\n except PageNotAnInteger:\n # If page not an Integer then deliver first page.\n products_found = paginator.get_page(1)\n except EmptyPage:\n # If page over the last result page, then deliver last result page.\n products_found = paginator.get_page(paginator.num_pages)\n context = {\n 'search': search,\n 'products_found': products_found,\n 'paginate': True\n }\n messages.info(request, \"Nous avons trouvé {0} produits pour votre requête.\"\n .format(len(products_list)))\n\n return render(request, 'store/products.html', context)", "def products():\n\n\treturn render_template(\"products.html\")", "def all_products(request):\n\n products = Product.objects.filter(is_holiday=False)\n query = None\n categories = None\n sort = None\n direction = None\n\n if request.GET:\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n products = products.annotate(lower_name=Lower('name'))\n if sortkey == 'category':\n sortkey = 'category__name'\n\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n products = products.order_by(sortkey)\n\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n products = products.filter(category__name__in=categories)\n categories = Category.objects.filter(name__in=categories)\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"You didn't enter any search critera.\\\n Please try again.\")\n return redirect(reverse('products'))\n\n queries = Q(name__icontains=query) | \\\n Q(description__icontains=query)\n products = products.filter(queries)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'products': products,\n 'search_term': query,\n 'current_categories': categories,\n 'current_sorting': current_sorting,\n }\n\n return render(request, 'products/all_products.html', context)", "def product_list(request):\n if request.method == 'GET':\n _products = Product.objects.all()\n serializer = ProductSerializer(_products, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)", "def index(request):\n\n products = Top_selling_product.objects.all()\n context = {'products':products}\n\n return render(request, 'home/index.html',context)", "def product_list(request):\n error = {\n 'status': False,\n 'name': None,\n 'text': None,\n 'level': None,\n 'debug': None\n }\n\n limit, error = get_limit(request, error)\n\n serializer = FreshSerializer()\n queryset = Product.objects.all()[:limit]\n\n if not queryset:\n error = {\n \"status\": True,\n \"name\": \"No Products\",\n \"text\": \"No Products found\",\n \"level\": \"Information\",\n \"debug\": \"\"\n }\n\n data = {\n \"products\": json.loads(serializer.serialize(queryset)),\n \"error\": error\n }\n\n return HttpResponse(json.dumps(data), content_type=\"application/json\")", "def list_products(self):\n return self._make_get_request(self._urls['products'])", "def listProducts(request):\n form_product = forms.ProductForm()\n Products = productBll.listProduct()\n paginator = Paginator(Products, LIST_COUNT)\n \n page = request.GET.get('page')\n if page == None :\n page=1\n \n try:\n ProductList = paginator.page(page)\n except PageNotAnInteger:\n ProductList = paginator.page(1)\n except EmptyPage:\n ProductList = paginator.page(paginator.num_pages)\n return render_to_response('admin/product/listProducts.html',{'form': form_product,'IMAGE_FILE_PATH':IMAGE_FILE_PATH,'ProductList':ProductList},context_instance=RequestContext(request))", "def all_products_view(request):\n products = Product.objects.all()\n paginator = Paginator(products, 6)\n page_number = request.GET.get('page', 1)\n page = paginator.page(page_number)\n\n \"\"\"render a products html page and within that page we will have access to products, so all_products\"\"\"\n\n if page.has_next():\n\n next_url = f'?page={page.next_page_number()}'\n\n else:\n\n next_url = ''\n\n if page.has_previous():\n\n prev_url = f'?page={page.previous_page_number()}'\n\n else:\n\n prev_url = ''\n\n \n return render(request, 'products.html', {'page': page, 'next_page_url': next_url, 'prev_page_url': prev_url})", "def products(request):\n\n products = Product.objects.all()\n query = None\n category = None\n sort = None\n direction = None\n\n if request.GET:\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n products = products.filter(category__name__in=categories)\n categories = Category.objects.filter(name__in=categories)\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request,\n \"Ups, you didn't enter\\\n any search criteria!\")\n return redirect(reverse('products'))\n\n queries = (\n Q(name__icontains=query) |\n Q(description__icontains=query)\n )\n products = products.filter(queries)\n # add sorting products ascending\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'category':\n sortkey = 'category__name'\n if sortkey == 'name':\n sortkey = 'lower_name'\n products = products.annotate(lower_name=Lower('name'))\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n products = products.order_by(sortkey)\n\n sorting = f'{sort}_{direction}'\n\n context = {\n 'products': products,\n 'search_phrase': query,\n 'selected_category': category,\n 'sorting': sorting,\n }\n\n return render(request, 'products/products.html', context)", "def list_products(self):\n url = self.base_url\n # TODO add filtering support when holvi api supports it\n obdata = self.connection.make_get(url)\n return ProductList(obdata, self)", "def get(self):\n return ProductModel.query.all()", "def do_search(request):\n products = Product.objects.filter(title__icontains=request.GET['q'])\n paginator = Paginator(products, 4) # Show 4 products per page\n \n page = request.GET.get('page')\n try:\n products = paginator.page(page)\n except PageNotAnInteger:\n products = paginator.page(1)\n except EmptyPage:\n products = paginator.page(paginator.num_pages)\n return render(request, \"products.html\", {\"products\": products})", "def get(self):\n return Products().get_all_products()", "def do_search(request):\n products = Product.objects.filter(title__icontains=request.GET['q'])\n return render(request, \"products.html\", {\"products\": products})", "def index(self, user):\n\n cart_products = CartProduct.index(user)\n CartProductsView.index(cart_products)", "def product_view(request, product):\n product = Products.objects.get(product=product)\n\n context = {\n \"product\": product,\n }\n\n return render(request, \"products/product_detail.html\", context)", "def get_all_products():\n products = app.session.query(models.Product).all()\n return products", "def do_search(request):\n products = Product.objects.filter(name__icontains=request.GET['q'])\n return render(request, \"search_results.html\", {\"products\": products})", "def shop_all(request):\n shop_items = Product.objects.all()\n categories = None\n query = None\n sort = None\n direction = None\n queried_category = None\n\n if request.GET:\n # checks whether a sort parameter exists and orders by selected\n # criteria if so\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey == 'lower_name'\n shop_items = shop_items.annotate(lower_name=Lower('name'))\n if sortkey == 'category':\n sortkey = 'category__name'\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n shop_items = shop_items.order_by(sortkey)\n\n # checks whether category parameter exists and splits categories\n # into a list and filters each one if it does\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n shop_items = shop_items.filter(category__name__in=categories)\n categories = Category.objects.filter(name__in=categories)\n # Renders the category name on the pagewhen user views all items\n # in one category\n for category in categories:\n queried_category = category.friendly_name\n\n # checks whether search query exists and returns results containing\n # keywords\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"You didn't enter any search terms!\")\n return redirect(reverse('shop'))\n\n queries = Q(friendly_name__icontains=query) | Q(description__icontains=query)\n shop_items = shop_items.filter(queries)\n\n sort_by = f'{sort}_{direction}'\n\n context = {\n 'shop_items': shop_items,\n 'search_term': query,\n 'categories': categories,\n 'sort_by': sort_by,\n 'queried_category': queried_category,\n }\n\n return render(request, 'shop/shop.html', context)", "def all_products(request):\n products = Product.objects.all()\n query = None\n categories = None\n sort= None\n direction = None\n\n if request.GET:\n\n # if 'sort' in request.GET:\n # sortkey = request.GET['sort']\n # if sortkey == \"price\":\n # products = products.order_by('-price').reverse()\n\n # if 'sort' in request.GET:\n # sortkey = request.GET['sort']\n # if sortkey == \"rating\":\n # products = products.order_by('-rating')\n\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n products = products.annotate(lower_name=Lower('name'))\n if sortkey == 'category':\n sortkey = 'category__name'\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n products = products.order_by(sortkey)\n\n if 'category' in request.GET:\n # Returns a list\n # categories = list(request.GET['category'].split(','))\n # Convert into list to compare below\n categories = request.GET['category'].split(',')\n # __name__: Looking for name field in category model since related by foreign key\n # category is present in products field but with number reference, this method allows us to use the actual name\n # instead of number by referencing the category model using foreign key in models.\n # using filter since object already queried\n # category_name obtained from foreignkey defined in Product model/lookups that span relationships\n # Obtaining query set for html(category__name: double undrscore since refering to foeignkey)\n # https://docs.djangoproject.com/en/3.1/topics/db/queries/#lookups-that-span-relationships\n # The __in refers to list. Returns all products with categories in category list as queryset\n # https://docs.djangoproject.com/en/3.1/topics/db/queries/#the-pk-lookup-shortcut\n products = products.filter(category__name__in=categories)\n # Get all categories where name in catgories list as a queryset\n categories = Category.objects.filter(name__in=categories)\n\n if 'q' in request.GET:\n query = request.GET['q']\n # Query is blank query= \"\"\n if not query:\n messages.error(request, \"You didn't enter any search criteria!\")\n return redirect(reverse('products'))\n else:\n queries = Q(name__icontains=query) | Q(description__icontains=query)\n products = products.filter(queries)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'products': products,\n 'search_term': query,\n 'current_categories': categories,\n 'current_sorting': current_sorting,\n }\n return render(request, 'products/products.html', context)", "def get(self):\n return Product.query.all()", "def get_queryset(self):\n queryset = super(ProductListView, self).get_queryset()\n queryset = Services.objects.all()\n return queryset", "def all_items(request):\n\n items = Product.objects.all()\n query = None\n category = None\n sort = None\n direction = None\n\n if request.GET:\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n items = items.annotate(lower_name=Lower('name'))\n if sortkey == 'category':\n sortkey = 'category__name'\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n items = items.order_by(sortkey)\n\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n items = items.filter(category__name__in=categories)\n categories = Category.objects.filter(name__in=categories)\n\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"Please input the item you are looking for\")\n return redirect(reverse('items'))\n \n queries = Q(name__icontains=query) | Q(description__icontains=query)\n items = items.filter(queries)\n \n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'products': items,\n 'search_term': query,\n 'current_categories': category,\n 'current_sorting': current_sorting,\n }\n\n return render(request, 'products/items.html', context)", "def specialoccasion(request):\n products = Product.objects.all()\n return render(request, \"specialoccasion.html\", {\"products\": products})", "def Product_list(request):\n if request.method == 'GET':\n tasks = Product.objects.all()\n serializer = ProductSerializer(tasks, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = ProductSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(\n serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def products(self):\r\n return products.Products(self)", "def get_products(self):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM products;\")\n res = cur.fetchall()\n if res:\n prdcts=[]\n for prodct_item in res:\n picked_prdct = {\n 'product_id':prodct_item[0],\n 'product_name':prodct_item[1],\n 'price':prodct_item[2],\n 'quantity':prodct_item[3]\n }\n prdcts.append(picked_prdct)\n return jsonify({\"Products\": prdcts}), 200\n return jsonify({\"message\":\"No products in store\"})", "def products():\n username = session['username']\n api_key = session['api_key']\n url = 'https://consumernotebook.com/api/v1/products/?username={0}&apikey={1}'.format(username, api_key)\n r = requests.get(url)\n products = []\n if r.status_code != 200:\n error = \"{0} error. Are you sure you entered a valid API key?\".format(r.status_code)\n return render_template('products.html', error=error)\n else:\n products_json = json.loads(r.content)\n for product in products_json[u'objects']:\n products.append(product[u'title'])\n return render_template('products.html', products=products)", "def products(self):\n response = requests.get(self._url(self._PRODUCTS_PATH), headers=self._headers)\n return response.json()", "def product_management():\n sort_by = request.args.get(\"sort\")\n\n \"\"\"\n Sort method is from https://docs.mongodb.com/manual/reference/\n method/cursor.sort/index.html\n \"\"\"\n if sort_by:\n products = list(mongo.db.products.find().sort(sort_items(sort_by)))\n\n else:\n products = list(mongo.db.products.find().sort('name', 1))\n\n \"\"\"\n Pagination code is from https://gist.github.com/mozillazg/\n 69fb40067ae6d80386e10e105e6803c9\n \"\"\"\n page, per_page, offset = get_page_args(\n page_parameter='page', per_page_parameter='per_page', per_page=10)\n pagination_products = paginate_items(products, offset, per_page)\n pagination = paginate(products, page, per_page)\n\n return render_template(\n \"product_management.html\",\n page_title=\"Product Management\",\n products=pagination_products,\n pagination=pagination)", "def searchresult(request):\n query = request.GET.get('query','')\n print(query)\n\n if not query:\n title = \"Aucun champ rempli, affichage des 10 premiers produits\"\n product = Product.objects.all()[0:10]\n context = {\n 'product' : product,\n 'title': title\n }\n else:\n product = Product.objects.filter(name__contains=query)[:6]\n # cat = Category.product.all()\n\n if not product:\n title = \"Votre recherche, \" + query + \", n'a donné aucun résultat, affichage des 10 premiers produits\"\n product = Product.objects.all()[0:10]\n context ={\n 'title': title,\n 'product': product\n }\n else:\n title = str(\"Votre recherche est :\" + \" \" + query)\n context = {\n 'title': title,\n 'product': product\n }\n print(product)\n # print(cat)\n # return render(request,\"search_result.html\",context)\n return render(request,\"search_result.html\",context)", "def all_products(request):\n products = Product.objects.all()\n departments = None\n\n if request.GET:\n if 'department' in request.GET:\n departments = request.GET['department'].split(',')\n products = products.filter(department__name__in=departments)\n departments = Department.objects.filter(name__in=departments)\n\n context = {\n 'products': products,\n 'current_departments': departments,\n }\n\n return render(request, 'products/products.html', context)", "def products(self):\r\n return Products(self)", "def product_list(request):\n if request.method == 'GET':\n products = Product.objects.all()\n serializer = ProductSerializer(products, many=True)\n return JSONResponse(serializer.data)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = ProductSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data, status=201)\n return JSONResponse(serializer.errors, status=400)", "def filter(request):\n product = Product.objects.filter(name__icontains=request.GET['q']).filter(brand__icontains=request.GET['brand']).filter(year__icontains=request.GET['year'])\n return render(request, \"search_results.html\", {\"products\": product})", "def list(self, request):\n order_products = Order_Products.objects.all()\n\n order = self.request.query_params.get('order', None)\n product = self.request.query_params.get('product', None)\n payment = self.request.query_params.get('payment', None)\n\n if product is not None:\n orderproducts = orderproducts.filter(product__id=product)\n if order is not None:\n orderproducts = orderproducts.filter(order_payment=None)\n\n\n serializer = Order_Products_Serializer(\n order_products, many=True, context={'request': request}\n )\n return Response(serializer.data)", "def sheetcakes(request):\n products = Product.objects.all()\n return render(request, \"sheetcakes.html\", {\"products\": products})", "def all_prods(request):\n products = Product.objects.all()\n stars = Product.objects.annotate(\n avg_review=Avg('productreview__rating'),\n )\n context = {\n 'products': products,\n 'stars': stars\n }\n return render(request, \"products.html\", context)", "def search(request):\n # en, copier sur celinelever formulaire django et ecrire le mien\n query = request.GET.get(\"query\")\n # Query Html escape\n user_product = escape(query)\n if not query:\n context = {\"attention\": \"Vous devez renseigner un produit!!\"}\n return render(request, \"products/index.html\", context)\n\n else:\n # Product contains the query is and query is not sensitive to case.\n product = Products.objects.filter(product_name_fr__icontains=user_product)[:1]\n\n if not product.exists():\n try:\n new_product = insert_products_if_not_in_found_in_data_base(user_product)#new_feature\n product = Products.objects.filter(product_name_fr__icontains=new_product)[:1]\n\n return redirect(\"products_list\", product=product[0].product)\n except:\n context = {\n \"attention\": \"Produit non trouvé, essayer de chercher un autre produit svp!!\"\n }\n return render(request, \"products/index.html\", context)\n else:\n product = product[0]\n\n return redirect(\"products_list\", product=product.product)\n\n return render(request, \"products/search_product.html\", context)", "def get_all_products(self):\n try: \n data = [ProductService.self_json(product) \n for product in self.productModel.query.all()] \n \n return Response(\n response=json.dumps(data),\n status=200, \n mimetype='application/json')\n \n except SQLAlchemyError as err:\n \n return Response(\n response=json.dumps({\"Error\": str(err.args[0])}),\n status=500,\n mimetype='application/json')\n \n finally:\n self.db.session.close()", "def products(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/products'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def index(request):\n\n return render(request, \"products/index.html\")", "def get_queryset(self):\n q = self.get_search_query()\n if q:\n # Send signal to record the view of this product\n product_search.send(sender=self, query=q, user=self.request.user)\n \n return Item.browsable.filter(title__icontains=q)\n else:\n return Item.browsable.all()", "def get_queryset(self, *args, **kwargs):\n return Product.objects.featured()", "def get_all_product():\r\n\r\n with mysql.db_session() as session:\r\n product = session.query(Product).all()\r\n\r\n if not product:\r\n return response.create_not_found_response()\r\n\r\n response_data = [each.to_dict() for each in product]\r\n\r\n return response.Response(message=response_data)", "def list_products(admin):\n fields = [\n \"id\",\n \"name\",\n \"price\",\n \"barcode\",\n \"active\",\n \"countable\",\n \"purchase_sum\",\n \"replenishment_sum\",\n \"balance_score\",\n \"revocable\",\n \"imagename\",\n \"tags\",\n \"creation_date\",\n ]\n\n query = QueryFromRequestParameters(Product, request.args, fields)\n result, content_range = query.result()\n products = convert_minimal(result, fields)\n for product in products:\n product[\"tags\"] = [t.id for t in product[\"tags\"]]\n response = jsonify(products)\n response.headers[\"Content-Range\"] = content_range\n return response", "def get_all_products():\n data = order_obj.get_all_products()\n return data", "def shop(request):\n\n products = Product.objects.all()\n sizes = Size.objects.all()\n forsixs = Forsix.objects.all()\n query = None\n categories = None\n sort = None\n direction = None\n\n if request.GET:\n if 'sort' in request.GET:\n sort = request.GET['sort']\n if sort == 'category':\n sort = 'category__name'\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sort = f'-{sort}'\n products = products.order_by(sort)\n\n if 'category' in request.GET:\n category_name = request.GET['category'].split(',')\n products = products.filter(category__name__in=category_name)\n categories = Category.objects.filter(name__in=category_name)\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"Please enter search words\")\n return redirect(reverse('shop'))\n\n queries = Q(\n name__icontains=query) | Q(description__icontains=query)\n products = products.filter(queries)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'products': products,\n 'forsixs': forsixs,\n 'sizes': sizes,\n 'search_words': query,\n 'categories_selected': categories,\n 'current_sorting': current_sorting,\n }\n\n return render(request, 'products/shop.html', context)", "def get_all_products(request, *args, query_str=''):\n\n active_filters = []\n products = Product.objects.all()\n product_fields = (\n (\"size\", \"options\"),\n (\"price\", \"range\"),\n (\"colours\", \"options\"),\n (\"year\", \"range\"),\n (\"collection\", \"equals\")\n )\n field_ranges = []\n for field, filter_type in product_fields:\n if filter_type == \"range\":\n (min_val) = products.filter().values_list(field).order_by(field)[0]\n (max_val) = products.filter().values_list(field).order_by\\\n (f'-{field}')[0]\n obj = {}\n obj['min_val'] = int(min_val[0])\n obj['max_val'] = int(max_val[0])\n obj['field'] = field\n field_ranges.append(obj)\n\n # if filter_type == \"options\":\n\n\n\n if request.GET:\n for key in request.GET:\n if \"__range\" in key:\n val = request.GET.getlist(key)\n val[:] = [int(x) for x in val]\n active_filters.append(\n [key.split(\"__\")[0], key.split(\"__\")[1], val]\n )\n obj = {}\n obj[key] = val\n query = Q(**obj)\n products = products.filter(query)\n\n\n if 'collection' in request.GET:\n collection_pk = request.GET['collection']\n if not collection_pk or not collection_pk.isnumeric():\n if query:\n return redirect(\n reverse('products'),\n kwargs={'query_str': query}\n )\n else:\n return redirect(reverse('products'))\n\n products = products.filter(collection=collection_pk)\n\n if 'q' in request.GET:\n query = request.GET['q']\n query_str = query\n if not query:\n return redirect(reverse('products'))\n\n queries = Q(display_name__icontains=query) | \\\n Q(name__icontains=query)\n products = products.filter(queries)\n\n\n context = {\n 'products': products,\n 'MEDIA_URL': settings.MEDIA_URL,\n 'search_term': query_str,\n 'filters': product_fields,\n 'field_ranges': field_ranges,\n 'active_filters': active_filters\n }\n\n return render(request, 'products/products.html', context)", "def listall():\n # I like to define the query separately.\n query = db.product\n\n # List of additional links.\n links = []\n \n links.append(\n dict(header = \"Profit\",\n body = lambda row : produce_profit(row)\n )\n )\n links.append(\n dict(header = \"\",\n body = lambda row : produce_star_btn(row.id)\n )\n )\n links.append(\n dict(header = \"\",\n body = lambda row : produce_pls_minus_btn(row)\n )\n )\n links.append(\n dict(header='',\n body = lambda row : produce_poster_btns(row.id)\n \n )\n )\n \n # Let's get rid of some fields in the add form.\n if len(request.args) > 0 and request.args[0] == 'new':\n db.product.prod_poster.readable = False\n db.product.prod_post_time.writable = False\n db.product.prod_sold.writable = False\n db.product.prod_starred.readable, db.product.prod_starred.writable =False, False\n # Grid definition.\n grid = SQLFORM.grid(\n query, \n field_id = db.product.id, # Useful, not mandatory.\n fields = [db.product.id, db.product.prod_name,\n db.product.prod_in_stock, db.product.prod_sold,\n db.product.prod_price, db.product.prod_cost], \n headers = {'product.prod_name': 'Product Name',\n 'product.prod_in_stock':'In Stock',\n 'product.prod_sold':'Sold', \n 'product.prod_price':'Price', \n 'product.prod_cost':'Cost'},\n links = links,\n # And now some generic defaults.\n details=False,\n create=True, editable=False, deletable=False,\n csv=False, \n user_signature=True, # We don't need it as one cannot take actions directly from the form.\n )\n return dict(grid=grid)", "def list(self, request):\n product_category = ProductCategory.objects.all()\n\n # Support filtering ProductCategorys by area id\n # name = self.request.query_params.get('name', None)\n # if name is not None:\n # ProductCategories = ProductCategories.filter(name=name)\n\n serializer = ProductCategorySerializer(\n product_category, many=True, context={'request': request})\n return Response(serializer.data)", "def product_detail(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n print(request.path)\n template = './product_detail.html'\n context = {\n 'product': product,\n }\n\n # products = Product.objects.all()\n\n return render(request, template, context)", "def list(self, request):\n product_types = ProductType.objects.all()\n serializer = ProductTypeSerializer(product_types, many=True)\n\n return Response(serializer.data, status=status.HTTP_200_OK)", "def index(request):\n products = Product.objects.all()\n highlights = Original.objects.filter(status='h')\n context = {\n \"index_page\": \"active\",\n \"products\": products,\n \"highlights\": highlights,\n \"title\": \"Home\"\n }\n return render(request, \"index.html\", context)", "def listProducts(self):\n response = self.productClient.list_products(parent=self.locationPath)\n return [ProductSearch.Product._fromResponse(self.productSearch, x) for x in response]", "def api_all():\r\n\tconn = sqlite3.connect('Shopify_products.db')\r\n\tconn.row_factory = dict_factory\r\n\tcur = conn.cursor()\r\n\tall_products = cur.execute('SELECT * FROM products WHERE inventory_count>0;').fetchall()\r\n\treturn jsonify(all_products)", "def list(self, request):\n skin_type = self.request.query_params.get(\"skin_type\")\n queryset = self.filter_queryset(self.get_queryset())\n if isinstance(queryset, Response): # exception (invalid query parameters)\n return queryset\n products_list = SortProducts.sort_products(param=skin_type, querySet=queryset)\n page = self.paginate_queryset(products_list)\n if len(page) != 0:\n serializer = self.get_serializer(page, many=True)\n # self.get_paginated_response(serializer.data)\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(\"Can't find data\", status=status.HTTP_404_NOT_FOUND)", "def products_view(request, product_id):\n if not product_id:\n return JsonResponse({\"error\": \"Product id is not provided\"}, 400)\n if request.method == \"GET\":\n response, status_code = get_products(request, product_id)\n if status_code != 200:\n return JsonResponse(response, status=status_code)\n else:\n serialize_data = ProductSerializer(response, many=False).data\n return JsonResponse(serialize_data, status=200, safe=False)\n else:\n response, status_code = update_product(request, product_id)\n if status_code != 204:\n return JsonResponse(response, status=status_code)\n serialize_data = ProductSerializer(response, many=False).data\n return JsonResponse(serialize_data, status=status_code, safe=False)", "def search_products_as_admin_single_page(self, **kwargs):\n return slurp(\n 'search_products_as_admin',\n self.search_products_as_admin,\n 'ProductViewDetails',\n **kwargs\n )", "def GetProductDirect(self, model):\n items = []\n\n def xml_parser(body_xml_etree):\n for product in body_xml_etree.find('Products').findall('Product'):\n sku = product.find('Skus').find('Sku')\n attrs = product.find('Attributes')\n\n images = []\n for img in sku.find('Images').findall('Image'):\n if img.text:\n imgurl = string.replace(img.text, 'catalog.jpg', 'zoom.jpg')\n images.append(imgurl)\n\n p = {\n 'name': attrs.find('name').text,\n 'description': attrs.find('short_description').text,\n 'model': sku.find('SellerSku').text,\n 'stocks': int(sku.find('Available').text) or int(sku.find('quantity').text),\n 'price': float(sku.find('price').text),\n 'images': images,\n 'weight': float(sku.find('package_weight').text) or 0.9,\n # 'category': 'PENDING',\n # 'logistics': 'PENDING', # Not in lazada\n }\n items.append(p)\n\n result = self._Request(_LIST_PRODUCTS_ACTION,\n search=model, body_xml_parser=xml_parser)\n if result.error_code:\n raise CommunicationError(\n 'Error communicating: %s' % result.error_description)\n\n items = [x for x in items if x['model'] == model]\n if len(items) == 0:\n raise NotFoundError('No results for %s' % model)\n elif len(items) > 1:\n raise MultipleResultsError('Multiple results for %s' % model)\n\n return items[0]", "def show_all_products():\n\n data = cur.execute(\"\"\"SELECT productid, productname, unitcost, stock FROM catalogue\"\"\").fetchall()\n\n print(tabulate(data, headers=[\"Product ID\", \"Name\", \"Cost\", \"Stock\"]))", "def get(cls):\n return {'products': [product.to_json() for product in ProductModel.find_all()]}", "def test_list_products_filtered_by_keyword(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/?name=1')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.__len__(), 1)\n self.assertEqual(response.data[0]['name'], 'Producto 1')\n self.assertEqual(response.data[0]['description'], 'Descripcion producto 1')", "def display(auth_context):\n\n products = product_catalog.list_products()\n # Get promoted products recommended by the AutoML model.\n promos = product_catalog.get_promos()\n return render_template('product_catalog.html',\n products=products,\n promos=promos,\n auth_context=auth_context,\n bucket=product_catalog.BUCKET)", "def product_list(request, format=None):\n if request.method == 'GET':\n products = Product.objects.all()\n serializer = ProductSerializer(products, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n if 'id' in request.DATA:\n del request.DATA['id']\n if 'ordered' in request.DATA:\n del request.DATA['ordered']\n serializer = ProductSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def product_search(obj, query):\n client = get_client(obj)\n\n pgs = client.product_list(q=query)\n\n print(json.dumps(pgs, indent=4))", "def products(self):\n return list(Product.select())", "def products(self):\n from hubspot3.products import ProductsClient\n\n return ProductsClient(**self.auth, **self.options)", "def all_prints(request):\n prints = Product.objects.filter(department=\"1\")\n\n context = {\n 'prints': prints,\n }\n\n return render(request, 'products/prints.html', context)", "def processProductsRequest(self):\n\n\t\t# Use the simple page renderer to create the body content\n\t\treturn self.render_simple_page('Products')", "def get_products(self):\n page = 1\n out = []\n while True:\n resp = self.get_session().Product.find(limit=10,page=page)\n if not len(resp):\n return\n yield resp\n page += 1", "def product_detail(request, product_id):\n # Search for product in Product Model using pk identifier obtained from project_id\n product = get_object_or_404(Product, pk=product_id)\n context = {\n 'product': product,\n }\n return render(request, 'products/product_detail.html', context)", "def products(self):\r\n return self._products", "def view_products():\n min_id = (Product.select().order_by(Product.product_id.asc()).get()).product_id\n max_id = (Product.select().order_by(Product.product_id.desc()).get()).product_id\n print(f\"\\nPlease select id between {min_id} & {max_id}\")\n id = int(input(\"Select product id: \"))\n while id not in range(min_id, max_id+1):\n print(\"Your selection must be between {} and {}\".format(min_id, max_id))\n id = int(input(\"Select product id: \"))\n print(f\"\"\"\\n-Product: {Product.get_by_id(id).product_name}\n-Quantity: {Product.get_by_id(id).product_quantity}\n-Price: {Product.get_by_id(id).product_price} cents\n-Date updated: {Product.get_by_id(id).date_updated}\\n\"\"\")\n input(\"\\nPress ENTER to continue\")\n clear()", "def get_shop_products(request, slug, cat):\n try:\n shop = Shop.objects.get(slug=slug)\n products = Products.objects.filter(shop_rel=shop).order_by('?')\n shop_slugs = list(map(lambda x: x[\"slug\"], shop.categories))\n if not shop.categories:\n products = []\n elif cat in shop_slugs:\n products = products.filter(genre__slug=cat)\n paginator = pagination.PageNumberPagination()\n paginator.page_size = 12\n result_page = paginator.paginate_queryset(products, request=request)\n serializer = ProductSerializer(result_page, many=True)\n return paginator.get_paginated_response(serializer.data)\n except shop.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)", "def search():\n\n products = current_app.products\n\n try:\n count = int(request.args.get('count'))\n except ValueError as e:\n log.error('Error while trying to cast count argument to int. {}'\n .format(e))\n raise InvalidParameterException('Parameter {} is invalid'\n .format('count'))\n try:\n radius = int(request.args.get('radius')) # radius in meters\n except ValueError as e:\n log.error('Error while trying to cast count argument to int. {}'\n .format(e))\n raise InvalidParameterException('Parameter {} is invalid'\n .format('radius'))\n try:\n lat = float(request.args.get('lat'))\n except ValueError as e:\n log.error('Error while trying to cast lat argument to float. {}'\n .format(e))\n raise InvalidParameterException('Parameter {} is invalid'\n .format('lat'))\n try:\n lng = float(request.args.get('lng'))\n except ValueError as e:\n log.error('Error while trying to cast lng argument to float. {}'\n .format(e))\n raise InvalidParameterException('Parameter {} is invalid'\n .format('lng'))\n tags = request.args.get('tags')\n\n log.debug('Request with arguments ' +\n 'count: {}, radius: {}, lat: {}, lng: {}, tags: {}'\n .format(count, radius, lat, lng, tags))\n matching_products = product_filter.get_matching_products(\n products,\n lat,\n lng,\n radius,\n tags\n )\n\n log.debug('Found {} matching products'\n .format(len(matching_products)))\n log.debug('Sorting products by popularity...')\n matching_products.sort(key=lambda product: product.popularity,\n reverse=True)\n return jsonify({'products': matching_products[:count]})", "def weddingcakes(request):\n products = Product.objects.all()\n return render(request, \"weddingcakes.html\", {\"products\": products})", "def view_product(cls, product_id):\n product = Product.get_by_id(product_id)\n print(f'Product ID: {product.product_id}')\n print(f'Product Name: {product.product_name}')\n print(f'Quantity: {product.product_quantity}')\n print(f'Price: ${product.product_price / 100:.2f}\\n')", "def get_queryset(self):\n queryset = Producto.objects.all()\n nombre = self.request.query_params.get('nombre')\n print(nombre)\n if nombre is not None:\n queryset = queryset.filter(nombre__icontains=nombre)\n return queryset\n return queryset", "def product_detail(request, product_id):\n\n product = get_object_or_404(Product, pk=product_id)\n options = None\n\n if 'option' in request.GET:\n options = request.GET['option']\n options = list(Option.objects.filter(name__in=options))\n\n context = {\n 'product': product,\n 'options': options,\n }\n\n return render(request, 'products/product_detail.html', context)", "def get(self, request, search_string=None):\n query = SearchQuery(search_string)\n\n name_vector = SearchVector('name', weight='A')\n description_vector = SearchVector('description', weight='B')\n vectors = name_vector + description_vector\n qs = Product.objects\n qs = qs.annotate(search=vectors).filter(search=query)\n qs = qs.annotate(rank=SearchRank(vectors, query)).order_by('-rank')\n print(qs)\n return Response(ProductSerializer(qs, many=True).data)", "def get_products():\n products = db.session.query(Product).all()\n product_details = {}\n\n for product in products:\n product_details[product.product_id] = product.name\n\n return jsonify(product_details)", "def test_view_all_products(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('NY_denims', str(reply['products']))\n self.assertEqual(resp.status_code, 200)", "def get(self, request, **kwargs):\n elementos_list= Elementos.objects.all()\n return render(request, 'alchemy/index.html', {})", "def recommend_products(request):\n response, status_code = get_recommend_products(request)\n if status_code != 200:\n return JsonResponse(response, status=status_code)\n else:\n serialize_data = ProductSerializer(response, many=True).data\n return JsonResponse(serialize_data, status=200, safe=False)", "def search_product(self):\n cat = []\n product = open_products()\n radio = self.radiobutton_check()\n search = self.lineEdit_search.text()\n _translate = QtCore.QCoreApplication.translate\n __sortingEnabled = self.tableWidget.isSortingEnabled()\n self.tableWidget.setSortingEnabled(False)\n o=0\n if len(self.lineEdit_search.text()) == 0:\n self.show_product()\n else:\n for r in range(0, len(product)):\n if search.upper() in str(product[r][radio]).upper():\n cat.append(product[r])\n for i in range(0, len(cat)):\n for c in range(0, 5):\n item = self.tableWidget.item(i, c)\n item.setText(_translate(\"MainWindow\", str(cat[i][c])))\n o+=1\n else:\n for c in range(0, 5):\n item = self.tableWidget.item(r, c)\n item.setText(_translate(\"MainWindow\", \"\"))\n if o == 0:\n self.frame_3.show()\n self.label_16.setText('PRODUCT NOT FOUND!')", "def product_detail(request, product_id):\n \n product = get_object_or_404(Product, pk=product_id)\n review_form = ReviewForm()\n reviews = Review.objects.filter(product_id=product_id).order_by('-created_at')\n\n context = {\n 'product': product,\n 'review_form': review_form,\n 'reviews': reviews,\n }\n\n return render(request, 'products/product_detail.html', context)", "def products(self):\n return self._products", "def search_read(self, filters=None):\n return self._call('%s.list' % self._shopware_model, [filters])", "def products_list(request, product):\n product_found = get_object_or_404(Products, product=product)\n\n nut = product_found.nutrition_grade_fr\n\n query_set_product = (\n Products.objects.filter(category=product_found.category)\n .filter(\n Q(nutrition_grade_fr__lte=nut) \n ) # propose products with value less or equal at the search product\n .exclude(product=product_found.product)\n )\n\n if len(query_set_product) >= 6:\n random_six_products = random.sample(\n list(query_set_product), 6\n ) # select 6 products randomly\n \n else:\n query_set_product = Products.objects.filter(\n Q(nutrition_grade_fr__lte=nut) \n ).exclude(product=product_found.product)\n\n random_six_products = random.sample(\n list(query_set_product), 6\n ) # select 6 products randomly \n\n\n if \"submit\" in request.POST: # do something with interview_HTML button is clicked\n save_product = request.POST.get(\"submit\")\n save_product = Products.objects.get(product=save_product)\n if not request.user.is_authenticated:\n return redirect(\"%s?next=%s\" % (settings.LOGIN_URL, request.path))\n user = request.user\n\n user = CustomUser.objects.get(email=user)\n\n save = History(\n user=user,\n chosen_product=product_found,\n remplacement_product=save_product,\n )\n save.save()\n\n context = {\n \"proposed_product\": product_found,\n \"products\": random_six_products,\n }\n\n return render(request, \"products/products.html\", context)", "def return_items(self):\n cur = self.cursor\n cur.execute(f\"SELECT * FROM {self.product_name}\")\n products = cur.fetchall()\n return products" ]
[ "0.82876235", "0.8283801", "0.78482085", "0.76325524", "0.7614649", "0.7584741", "0.74856204", "0.74783254", "0.74655133", "0.7386778", "0.7367176", "0.7330932", "0.72938323", "0.7253225", "0.72413594", "0.7230313", "0.72217464", "0.720815", "0.711731", "0.7079289", "0.7042373", "0.7028177", "0.70093006", "0.7000184", "0.6945874", "0.69368106", "0.69047356", "0.6901446", "0.68892926", "0.6871828", "0.68513656", "0.6850784", "0.6805938", "0.67736834", "0.675656", "0.6690695", "0.6676956", "0.6665751", "0.6665126", "0.6658981", "0.6657857", "0.6634716", "0.66325015", "0.6611833", "0.6603269", "0.656776", "0.656687", "0.65624213", "0.65375406", "0.65341586", "0.6531938", "0.6525054", "0.6507184", "0.65003103", "0.64951515", "0.64484364", "0.64111954", "0.6401471", "0.63982344", "0.63963634", "0.6388473", "0.6373166", "0.6350433", "0.6342107", "0.63372904", "0.63347876", "0.63315004", "0.63235456", "0.63097113", "0.6307378", "0.63046503", "0.6303535", "0.630236", "0.6287682", "0.6275252", "0.6266468", "0.62559295", "0.6254655", "0.62545097", "0.6238129", "0.6237431", "0.621093", "0.62092817", "0.6206755", "0.6203278", "0.62026775", "0.6199228", "0.61872774", "0.6180744", "0.61788607", "0.61374116", "0.6130865", "0.6127162", "0.6100423", "0.6063022", "0.60580295", "0.6034422", "0.60250944", "0.60194886", "0.6016035", "0.60156065" ]
0.0
-1
A view to add a new portfolio project
def add_project(request): if not request.user.is_superuser: messages.error(request, 'Sorry, only store owners can do that.') return redirect(reverse('home')) if request.method == 'POST': form = ProjectForm(request.POST, request.FILES) if form.is_valid(): project = form.save() messages.success(request, 'Project added successfully!') return redirect(reverse('portfolio')) else: messages.error(request, 'Failed to add project.\ # Please ensure the form is valid') else: form = ProjectForm() form = ProjectForm() template = 'portfolio/add_project.html' context = { 'form': form, } return render(request, template, context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_project():\n if request.method == \"POST\":\n result = add_project_to_db(\n request.form[\"title\"],\n request.form[\"link\"],\n request.form[\"description\"]\n )\n flash(result)\n return redirect(url_for(\"portfolio\"))\n else:\n return render_template(\"add_project.html\")", "def add_project(request):\n\n profile = get_object_or_404(Profile, user=request.user)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n project_form = ProjectForm(request.POST, request.FILES)\n if project_form.is_valid():\n project = project_form.save(commit=False)\n project.owner = profile\n project.save()\n messages.success(request, 'Successfully created project!')\n return redirect(reverse('project_detail', args=[project.id]))\n else:\n messages.error(\n request,\n 'Failed to create project. Please ensure the form is valid'\n )\n\n project_form = ProjectForm()\n\n template = 'gameproject/add_project.html'\n context = {\n 'project_form': project_form,\n }\n\n return render(request, template, context)", "def newproject_view(request):\n\n # Use to tell to the template that the user want to creat a new project\n is_new = True\n\n # Get all the user. Everyone may be member of the project\n users = User.objects.all()\n\n # If the view received data, try to creat a project\n if request.method == \"POST\":\n form = ProjectForm(request.user, request.POST)\n if form.is_valid():\n # Save the new project in the database\n form.save(commit=True)\n\n # redirect to the project list display page\n return redirect(\"projects\")\n else:\n # creat an empty form for the template\n form = ProjectForm(request.user)\n\n return render(request, 'newProject.html', locals())", "def add_project():\n \n if 'username' in session: \n form=ProjectForm()\n \n if request.method == 'POST':\n if form.validate_on_submit():\n user = mongo.db.user.find_one({'username': session['username']})\n mongo.db.projects.insert_one({'username': user['username'],\n 'date': datetime.utcnow(),\n 'title': form.title.data,\n 'deadline': datetime.strptime(form.deadline.data, \"%d/%m/%Y\"),\n 'brief': form.brief.data,\n 'status': \"open\",\n 'note': form.note.data,\n 'user_id': user['_id']\n })\n \n flash('Your project has been created.', 'success')\n return redirect(url_for('projects'))\n \n return render_template('pages/addproject.html', title='New Project', form=form, legend=\"Add a project\")\n \n flash('You need to be logged in to post any content.', 'info')\n return redirect(url_for('login'))", "def get_add_project_form():\n\n return render_template(\"project_add.html\")", "def create_project(request):\n if request.method == \"POST\":\n temp = json.loads(request.body)\n form = ProjectForm(temp)\n\n # check whether it's valid:\n if form.is_valid():\n prj_obj = form.save(commit=False)\n # prj_obj.description = bleach.clean(prj_obj.description, strip=True)\n # fint the user profile object based on the email in session\n user_profile = UserProfile.objects.get(email=request.session['email'])\n prj_obj.user = user_profile\n # Save the project object - project needs to exist before\n # manytomany field is accessed.\n prj_obj.save()\n # get the list of tag objects to add to project\n tag_objects_list = _get_tags(form.cleaned_data['tags_list'])\n article_object_list = _get_articles(form.cleaned_data['articles'])\n for tag_object in tag_objects_list:\n prj_obj.tags.add(tag_object)\n for article_object in article_object_list:\n prj_obj.articles.add(article_object)\n prj_obj.save()\n return HttpResponse(str(prj_obj.id))\n # return HttpResponseRedirect('/projects/' + str(prj_obj.id))\n else:\n print form.errors.as_data()\n else:\n # Remove when front end updated.\n form = ProjectForm()\n return render(request, 'projects/create_project.html', {'form': form})", "def get_project_add_form():\n\n return render_template(\"project_add.html\")", "def post_project():\n\n title = request.form.get('title')\n description = request.form.get('description')\n max_grade = request.form.get('max_grade')\n\n hackbright.make_new_project(title, description, max_grade)\n\n flash(\"Successfully added new project.\")\n\n return redirect(\"/project?title={}\".format(title))", "def add_project(project):\n print('add_project: ' + str(project))\n try_insert_or_update(models.projects.insert(), # pylint: disable=no-value-for-parameter\n [dict(\n name=project['name'], path=project['name'], active=True, user_id=current_user.id)])\n return", "def project():\n\n ADMIN = current.session.s3.system_roles.ADMIN\n\n menu = M(c=\"project\")(\n M(\"Projects\", f=\"project\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Locations\", f=\"location\")(\n M(\"Map\", m=\"map\"),\n M(\"Contacts\", f=\"location_contact\"),\n ),\n M(\"Reports\", f=\"location\", m=\"report\")(\n M(\"3W\", f=\"location\", m=\"report\"),\n M(\"Beneficiaries\", f=\"beneficiary\", m=\"report\"),\n #M(\"Indicators\", f=\"indicator\", m=\"report\",\n # check=indicators,\n # ),\n #M(\"Indicators over Time\", f=\"indicator\", m=\"timeplot\",\n # check=indicators,\n # ),\n M(\"Funding\", f=\"organisation\", m=\"report\"),\n ),\n M(\"Import\", f=\"project\", m=\"import\", p=\"create\", restrict=[ADMIN])(\n M(\"Import Projects\", m=\"import\", p=\"create\"),\n M(\"Import Project Organizations\", f=\"organisation\",\n m=\"import\", p=\"create\"),\n M(\"Import Project Communities\", f=\"location\",\n m=\"import\", p=\"create\"),\n ),\n M(\"Activity Types\", f=\"activity_type\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Beneficiary Types\", f=\"beneficiary_type\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Sectors\", f=\"sector\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Themes\", f=\"theme\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )\n\n return menu", "def portfolio_detail():\n return render_template('portfolio/portfolio.html')", "def add_project(project, taglist):\n if anonymize:\n import random\n project['name'] = 'Anonimized Project ' + str(project['id'])[-3:]\n project['client'] = 'Anonimized Client'\n\n wf.add_item(title=project['name'],\n subtitle='Client: ' +\n project['client'] +\n ' Hit ENTER to show menu, press ALT for more info.',\n modifier_subtitles={\n 'alt': 'Tags: ' + ', '.join(taglist),\n },\n arg=str(project['id']),\n valid=True,\n icon='icons/project_{0}.png'.format(\n project['project_state']).lower(),\n copytext=project['name'])", "def get_projects_route():\n response_object = {'status': 'success'}\n if request.method == 'POST':\n post_data = request.get_json()\n if post_data is not None:\n add_project(post_data)\n response_object['message'] = 'Project added!'\n else:\n response_object['projects'] = get_projects()\n return jsonify(response_object)", "def edit_project(request, project_id):\n try:\n project = Project.objects.get(pk=project_id)\n except Project.DoesNotExist:\n raise Http404(\"Project does not exist\")\n # check whether the user is the one who created this project\n \n if project.user.email != request.session['email']:\n return HttpResponseRedirect('/projects/'+str(project_id))\n else:\n if request.method == \"POST\":\n temp = json.loads(request.body)\n form = ProjectForm(temp, instance=project)\n # form = ProjectForm(request.POST, instance=project)\n # check whether it's valid:\n if form.is_valid():\n #clear any previously stored tags to fix the bug \n #where we remove the tags and its not reflected\n try:\n project.tags.clear()\n project.articles.clear()\n except:\n pass\n m = form.save(commit=False)\n # m.description = bleach.clean(m.description, strip=True)\n m.save()\n tag_objects_list = _get_tags(form.cleaned_data['tags_list'])\n article_object_list = _get_articles(form.cleaned_data['articles'])\n for tag_object in tag_objects_list:\n m.tags.add(tag_object)\n for article_object in article_object_list:\n m.articles.add(article_object)\n m.save()\n # return HttpResponseRedirect('/projects/' + str(m.id))\n # return project_detail(request, m.id)\n return HttpResponse(str(m.id))\n else:\n return render(request, 'projects/edit_project.html',\n {'project': project})\n # return render(request, 'projects/error_edit.html', {'form': form})\n else:\n return render(request, 'projects/edit_project.html',\n {'project': project})\n return project_detail(request, project_id)", "def show_project():\n\n title = request.args.get('title')\n\n title, description, grade = hackbright.get_project_by_title(title)\n\n grade_list = hackbright.get_grades_by_title(title)\n\n html = render_template(\"project.html\", title=title,\n description=description, grade=grade,\n grade_list=grade_list)\n\n return html", "def portfolio(request):\n projects = Project.objects.all()\n categories = None\n\n if request.GET:\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n projects = projects.filter(category__name__in=categories)\n categories = ProjectCategory.objects.filter(name__in=categories)\n\n context = {\n 'projects': projects,\n 'current_categories': categories,\n }\n\n return render(request, 'portfolio/portfolio.html', context)", "def portfolio():\n projects = get_projects()\n for project in projects:\n unicode_body = project[\"description\"].decode(\"utf-8\")\n html_body = markdown.markdown(unicode_body)\n safe_html_body = Markup(html_body)\n project[\"description\"] = safe_html_body\n context = {\n \"projects\": projects\n }\n return render_template(\"portfolio.html\", **context)", "def create_project(self, **kwargs):\n _url = f\"{self.base_url}/projects\"\n if \"name\" not in kwargs:\n raise ValueError(\"Parameter 'name' is mandatory\")\n return self.http_call(\"post\", _url, json_data=kwargs).json()", "def projects_view(request):\n\n # The projects to be displayed. Only the ones in which the logged in user is involved\n projects = request.user.projets.all().order_by('name')\n return render(request, 'projects.html', locals())", "def view_project():\n\n project_title = request.args.get('title')\n\n description, max_grade = hackbright.get_project_info(project_title)\n\n student_grades = hackbright.list_students_by_completed_project(project_title)\n\n return render_template(\"project_info.html\",\n title=project_title,\n description=description,\n max_grade=max_grade,\n student_grades=student_grades)", "def add_project(self, project):\n c = self.conn.cursor()\n cursor = c.execute(\"INSERT INTO projects VALUES (null, ?, ?, ?, ?)\", (project['owner'],\n project['title'],\n datetime.now(), datetime.now(),))\n\n self.conn.commit()\n project_id = cursor.lastrowid\n\n self.conn.cursor().execute(\"INSERT INTO users_projects VALUES (?,?)\", (project['owner'], project_id),)\n self.conn.commit()\n return self.get_project(project_id)", "def add_portfolio(self, portfolio):\n self.portfolios.append(portfolio)", "def add_portfolio(self, portfolio):\n self.portfolios.append(portfolio)", "def add(self, name, project):\n self.projects[name] = project", "def create_project(projectname):\n auth_id = request.get_json().get(\"auth_id\")\n storage_accesses = request.get_json().get(\"storage_accesses\", [])\n response = jsonify(\n admin.create_project(\n current_app.scoped_session(), projectname, auth_id, storage_accesses\n )\n )\n return response", "def create_project():\n client = RequestManager()\n project_name = \"\".join(choices(string.ascii_letters + string.digits, k=10))\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects\")\n body = {\"name\": project_name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n STORED_ID['project_id'] = response.json()['id']", "def create_project(self, **kwargs):\n save = kwargs.get('save', True) \n if kwargs.has_key('save'):\n del(kwargs['save'])\n\n index = self.object_index()\n defaults = dict(slug = \"test-project-%s\" % index,\n basecamp_url = \"https://foo.basecamphq.com/projects/%s/log\" % index)\n defaults.update(kwargs)\n p = Project(**defaults)\n\n if save:\n p.save()\n self.assert_(p.id)\n return p", "def project_clone(request, proj_id=None):\n\n if not proj_id or not request.user.is_authenticated():\n raise Http404\n\n project = get_object_or_404(Project, id=proj_id)\n\n if project.user != request.user and project.is_private:\n raise Http404\n\n project.pk = None\n project.user = request.user\n project.save()\n\n for scenario in Scenario.objects \\\n .filter(project_id=proj_id) \\\n .order_by('created_at'):\n scenario.pk = None\n scenario.project = project\n scenario.save()\n\n return redirect('/project/{0}'.format(project.id))", "def create_project_form(request):\n \n # First we check to see the site has been set up, otherwise we throw the user to the config screen\n if not bool(os.path.isdir(Project.project_options.repository_directory)):\n request.user.message_set.create(message=\"The site has not been set up yet. Log in as your admin user and create your settings!\")\n return HttpResponseRedirect(reverse('site-config'))\n \n if request.is_ajax():\n template ='project/project_create_ajax.html'\n else:\n template = 'project/project_create.html'\n \n # Lets check if this form is being shown or processed\n if request.method == \"POST\":\n # We're processing the form, so lets create the instance\n form = NewProjectForm(request.POST, auto_id=False)\n # The form is correct, lets proceeed.\n if form.is_valid():\n # Lets check the user has conformed to a sites T&C's\n if form.cleaned_data['t_and_c'] == True:\n # Create the project instance\n project = Project(\n project_id = string.lower(form.cleaned_data['project_id']),\n project_name = form.cleaned_data['project_name'],\n short_description = form.cleaned_data['short_description'],\n full_description = form.cleaned_data['full_description'],\n project_manager = request.user,\n hgweb_style = form.cleaned_data.get('hgweb_style', ''),\n project_icon = form.cleaned_data['project_icon'],\n )\n # Ok, we're all good, so lets save.\n project.save()\n # We'll tell the user that there site has been saved\n request.user.message_set.create(message=_(\"The project \" + form.cleaned_data['project_name'] + \" has been created\"))\n if request.is_ajax():\n return HttpResponse(\n \"{'success': 'true', 'url': '\" + reverse('project-detail', kwargs={'slug':form.cleaned_data['project_id']}) + \"', 'project': \" + json_encode(project) + \"}\"\n , mimetype=\"application/json\")\n else:\n return HttpResponseRedirect(reverse('project-detail', kwargs={'slug': form.cleaned_data['project_id']}))\n else:\n return render_to_response(template,\n {\n 'form':form.as_table(),\n }, context_instance=RequestContext(request)\n )\n #return HttpResponseRedirect(reverse('project-detail', kwargs={'slug':form.cleaned_data['name_short']}))\n else:\n form = NewProjectForm()\n is_auth = request.user.is_authenticated()\n \n return render_to_response(template,\n {\n 'form':form.as_table(),\n 'is_auth': is_auth\n }, context_instance=RequestContext(request)\n )", "def edit_project_view(request, project_id):\n\n # Use to tell to the template that the user want to edit a project\n is_new = False\n\n # Retrieve the project to be edited or raise an error if this project does not exist\n project = get_object_or_404(Projet, id=project_id)\n\n # Check if the logged in user is allowed to edit this project\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n\n # Check if the view receive data from the form\n if request.method == \"POST\":\n form = ProjectForm(request.user, request.POST)\n if form.is_valid():\n # Manually update the field using the data from form\n project.name = form.cleaned_data[\"name\"]\n project.members.set(form.cleaned_data[\"members\"])\n # Save the project. Does not creat a new project as long as the project's id is not modified\n project.save()\n return redirect(\"projects\")\n else:\n form = ProjectForm(user=request.user, instance=project)\n return render(request, 'newProject.html', locals())\n else:\n return redirect(\"projects\")\n return redirect(\"projects\")", "def view_projects(request):\n current_user=request.user\n current_user_name=current_user.username\n projects=Project.objects.all()\n return render(request, 'view_projects.html',{'projects':projects, 'current_user_name':current_user})", "def NewProject (projectname):\n\tif projectname == \"\" or projectname == None:\n\t\tnewprojcode(projectname)\n\telse:\n\t\tnewprojCode_withNamed()", "def openproject():\n\n # POST\n if request.method == \"POST\":\n\n # Validate form submission\n if not request.form.get(\"projectname\"):\n return apology(\"missing project name\")\n elif not request.form.get(\"link\"):\n return apology(\"missing project link\")\n\n\n # Record project in the database\n db.execute(\"\"\"INSERT INTO projects (projectname, link)\n VALUES(:projectname, :link)\"\"\", projectname=request.form.get(\"projectname\"), link=request.form.get(\"link\"))\n\n # Display that the project has been opened\n flash(\"Opened!\")\n return redirect(\"/\")\n\n # GET\n else:\n return render_template(\"openproject.html\")", "def newProject(self):\n dialog = NewProjectDialog()\n if not dialog.name is None and not dialog.path is None:\n self._app.createProject(str(dialog.name), str(dialog.path))", "def _post_project(prj=None):\n template_path = (os.path.join(\n os.path.split(__file__)[0], \"post_project_template.xml\"))\n with open(template_path, 'r') as file:\n template = Template(file.read())\n response_xml = template.render(\n name=f\"Project_TEST_{datetime.now()}\",\n open_date=str(datetime.today().date()),\n res_uri=f\"{LIMS_API.tools.api.host}researchers/1\")\n\n prj_response = LIMS_API.tools.api.post(\n f\"{LIMS_API.tools.api.host}projects\", response_xml)\n\n prj_response_soup = BeautifulSoup(\n prj_response, \"xml\").find(\"prj:project\")\n prj = api_types.Project(\n prj_response_soup.find(\"name\"),\n DEFAULT_RES,\n datetime.today().date(),\n [],\n prj_response_soup[\"uri\"])\n\n return prj", "def edit_project(project_id):\n \n if 'username' in session: \n project = mongo.db.projects.find_one_or_404(\n {'_id': ObjectId(project_id)})\n form=ProjectForm()\n form.title.data = project['title']\n form.status.data = project['status']\n form.deadline.data = project['deadline'].strftime('%d/%m/%Y')\n form.brief.data = project['brief']\n form.note.data = project['note']\n return render_template('pages/editproject.html', form=form, project=project, legend='Edit your project')", "def newproject():\n log('Criando novo projeto', yellow)\n log('Cria a conta no bitbucket com o nome do projeto vázio que o script se encarregará do resto', red)\n\n conta = raw_input('Digite o nome do projeto: ')\n\n local('echo \"clonando projeto %s\"' % bitbucket_repository)\n local('git clone {0} {1}{2}'.format(bitbucket_repository, folder_project_local, conta))\n local('cd {0}{1}'.format(folder_project_local, conta))\n local('mkvirtualenv {0}'.format(conta))\n local('setvirtualenvproject')\n local('pip install -r requirements.txt')\n local('rm -rf {0}{1}/.git'.format(folder_project_local, conta))\n local('rm -rf README.md')\n local('git init')\n local('git remote add origin [email protected]:{0}/{1}.git'.format(bitbucket_user, conta))", "def project_clone_view(user_data, cache):\n return ProjectCloneCtrl(cache, user_data, dict(request.json)).to_response()", "def user_project_view(cls, user, project):\r\n pass", "def add_project(self, proj, i):\r\n self.__projects[i] = proj", "def post(self, request, formal=None):\n serializers = ProjectSerializer(data=request.data)\n if serializers.is_valid():\n serializers.save()\n return Response(serializers.data, status=status.HTTP_201_CREATED)\n permission_classes=(IsAdminOrReadOnly)\n return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)", "def add_task():\n found = False\n project_id = None\n task = request.form['task']\n project = request.form['project']\n \n if not task:\n return redirect('/')\n\n if not project:\n project = 'Tasks'\n\n projects = Projects.query.all()\n\n for proj in projects:\n if proj.project_name == project:\n found = True\n\n # add the project if not in database already\n if not found:\n add_project = Projects(project, True)\n db.session.add(add_project)\n db.session.commit()\n projects = Projects.query.all()\n\n # set the active tab\n for proj in projects:\n if proj.project_name == project:\n project_id = proj.project_id\n proj.active = True\n else:\n proj.active = False\n\n status = bool(int(request.form['status']))\n\n # add the new task\n new_task = Tasks(project_id, task, status)\n db.session.add(new_task)\n db.session.commit()\n return redirect('/')", "def index():\n return render_template('project.html')", "def update_project(id):\n if request.method == \"POST\":\n result = update_project_to_db(\n id,\n request.form[\"title\"],\n request.form[\"link\"],\n request.form[\"description\"]\n )\n flash(result)\n return redirect(url_for(\"portfolio\"))\n else:\n project = get_project(id)\n return render_template(\"edit_project.html\", **project)", "def new_project(self, project_name: str) -> str:\n if project_name in [NO_PROJECT_NAME, \"\"]:\n raise MephistoDBException(f'Invalid project name \"{project_name}')\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n try:\n c.execute(\n \"INSERT INTO projects(project_name) VALUES (?);\", (project_name,)\n )\n project_id = str(c.lastrowid)\n return project_id\n except sqlite3.IntegrityError as e:\n if is_key_failure(e):\n raise EntryDoesNotExistException()\n elif is_unique_failure(e):\n raise EntryAlreadyExistsException(\n f\"Project {project_name} already exists\"\n )\n raise MephistoDBException(e)", "def create(self, request):\n lot = Lot.objects.get(pk=request.data[\"lotId\"])\n\n project = Project()\n project.name = request.data[\"name\"]\n project.estimatedCost = request.data[\"estimatedCost\"]\n project.estimatedCompletionDate = request.data[\"estimatedCompletionDate\"]\n project.lotId = lot\n #projectNote=projectNote\n\n\n try:\n project.save()\n serializer = ProjectSerializer(project, context={'request': request}) #converting data into json\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except ValidationError as ex:\n return Response({\"reason\": ex.message}, status=status.HTTP_400_BAD_REQUEST)", "def createProject(self, payLoad):\n\n uri = \"/v1/projects/\" \n response = self.client.post(uri, payLoad)\n return response", "def edit_project(request, project_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n project = get_object_or_404(Project, pk=project_id)\n if request.method == 'POST':\n form = ProjectForm(request.POST, request.FILES, instance=project)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully updated project')\n return redirect(reverse('portfolio'))\n else:\n messages.error(request, 'Failed to update project. \\\n # Please ensure the form is valid.')\n else:\n form = ProjectForm(instance=project)\n messages.info(request, f'You are editing {project.name}')\n\n template = 'portfolio/edit_project.html'\n context = {\n 'form': form,\n 'project': project,\n }\n\n return render(request, template, context)", "def create(self, request, *args, **kwargs):\n project = Project.objects.get(id=kwargs[\"projects_pk\"])\n self.check_object_permissions(request, project)\n\n serializer = self.get_serializer(data=request.data)\n if serializer.is_valid():\n serializer.save(permission=\"contributor\", role=\"Contributor\")\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def project(self, request):\n return self._project(request, 'project')", "def create(request, proj_id):\n \n project = get_object_or_404(Project, pk=proj_id)\n \n # require permission to proceed\n must_have_permission(request.user, project, \"can_create_roles\")\n \n obj_permissions = ObjectPermission.objects.filter_from_instance(project)\n \n project_url = reverse(\"project_detail\", args=[project.id])\n \n def pre_save(instance, created):\n instance.project = project\n \n return generic_crud(\n request,\n obj_id=None,\n model=ProjectRole,\n template=TEMPLATE_PATH+\"/create.html\",\n redirect=lambda instance: project_url,\n form_class=ProjectRoleForm,\n pre_save=pre_save,\n extra_form_params={\n \"obj_permissions\": obj_permissions,\n },\n extra_context={\n \"project\": project,\n \"breadcrumbs\": (\n (\"Home\", reverse(\"home\")),\n (\"Project %s\" % project.name,\n project_url),\n (\"Create Role\", request.path),\n )\n }\n )", "def newtask_view(request, project_id):\n # Use to tell to the template that user want to creat a new task\n is_new = True\n\n # Retrieve the task, raise an error if the task does not exist\n project = get_object_or_404(Projet, id=project_id)\n\n # Check if the user is allowed to add a task to this project\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n\n # Check if a form has been submitted\n if request.method == \"POST\":\n # Pass project to the form. Set the task's project fields with this project (initialize and never modify)\n form = TaskForm(project, request.POST)\n if form.is_valid():\n task = form.save(commit=True)\n task.last_modification = datetime.datetime.now() # it's probably not necessary\n task.save()\n\n return redirect(\"task\", task_id=task.id)\n else:\n # Pass project to the form. Set the task's project fields with this project (initialize and never modify)\n form = TaskForm(project)\n else:\n return redirect(\"projects\")\n return render(request, \"newtask.html\", locals())", "def _create_project(self):\n request = {\n \"project\": {\n \"description\": \"description\",\n \"enabled\": True,\n \"name\": uuid.uuid4().hex,\n \"domain_id\": \"default\",\n }\n }\n response = self.client.post(PROJECT_PATH, data=json.dumps(request),\n headers=HEADERS)\n\n if response.status_code == 201:\n return response.json()\n else:\n raise SystemExit(\"Failed to create project.\")", "def get_and_display_project():\n\n project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project)\n\n\n github_grade_list = hackbright.get_grades_by_title(project)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n max_grade=max_grade,\n github_grade_list=github_grade_list)", "def post(self, request):\n body = request.body.decode(\"utf-8\")\n print(body)\n print(request.META)\n if not body:\n return HttpResponse(status=HTTPStatus.BAD_REQUEST)\n\n data = json.loads(body)\n project_name = data['name']\n projects = Project.objects.all()\n serializer = ProjectSerializer(projects, many=True)\n existing_projects = [project['name'] for project in serializer.data]\n if project_name in existing_projects:\n return Response(status=HTTPStatus.CONFLICT)\n\n project_location = os.path.join(PROJECTS_FOLDER, project_name+'.aedt')\n project = Project.objects.create(name=project_name, project_location=project_location)\n project.save()\n return HttpResponse(HTTPStatus.OK)", "def user_project_view(cls, user, project):\n pass", "def create_project(self, pool, project, arg):\n self.verify_pool(pool)\n svc = self.project_path % (pool, project)\n ret = self.rclient.get(svc)\n if ret.status != restclient.Status.OK:\n svc = self.projects_path % pool\n ret = self.rclient.post(svc, arg)\n if ret.status != restclient.Status.CREATED:\n exception_msg = (_('Error creating project: '\n '%(project)s on '\n 'pool: %(pool)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.')\n % {'project': project,\n 'pool': pool,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n raise exception.ShareBackendException(msg=exception_msg)", "def _page_projects(self):\n return self._open(self.app.page_projects)", "def add_new_project(title, description, max_grade):\n QUERY = \"\"\"INSERT into Projects (title, description, max_grade) VALUES(?,?,?)\"\"\"\n db_cursor.execute(QUERY, (title, description, max_grade))\n db_connection.commit()\n print \"Success! Add %s project, and here is the description: %s, and max grade: %s\"\\\n %(title, description, max_grade)", "def test_add_project(self):\n pass", "def get_project_form():\n\n return render_template(\"project_search.html\")", "def project_created_handler(event):\n obj = event.obj\n # submit Project after creation\n obj.workflow.start()", "def project_assign(request, project_code):\n if request.user.is_authenticated:\n projects = Project.objects.all()\n context = {'projects': projects}\n selected_project = get_object_or_404(Project, code=project_code)\n try:\n selected_project.status = 2 # project is assigned\n selected_project.save()\n\n # getting the head\n assigned_head = User.objects.get(department=selected_project.department,\n role__name__iexact=role_department_head)\n # create new task history object\n task_history = TaskHistory()\n task_history.project = selected_project\n task_history.description = (model_to_dict(selected_project))\n task_history.status = 'New Project'\n task_history.user = assigned_head\n task_history.save()\n\n \"\"\" Setting notification as project is assigned to a head \"\"\"\n assigned_head.notification_count += 1\n assigned_head.save()\n selected_project.assigned_at = datetime.now() # setting the assigned time\n selected_project.save()\n # print(assigned_head, '------------------------------------------*********************',\n # assigned_head.notification_count)\n messages.success(request, f\"Project '{selected_project.name}' is assigned to the department head.\")\n return redirect('project-list')\n except Exception as e:\n # print('error at assign project ====', e)\n messages.error(request, f\"Error: {e}\")\n return render(request, 'projectmanager/project_list.html', context)", "def test_project_view(self):\n with self.app.app_context():\n p = project(save=True)\n\n response = self.client.get('/project/%s' % p.slug)\n eq_(response.status_code, 200)\n\n response = self.client.get('/project/not-a-real-project')\n eq_(response.status_code, 404)", "def test_projects_post(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='POST',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def add_project_to_groups(projectname):\n groups = request.get_json().get(\"groups\", [])\n return jsonify(\n admin.add_project_to_groups(\n current_app.scoped_session(), username, groups=groups\n )\n )", "def post(self, data):\n conn = pecan.request.db_conn\n try:\n project = db_models.Project(**data.as_dict())\n return conn.create_project(request.context, project)\n except Exception:\n LOG.exception('Fail to create project: %s' % data.as_dict())\n raise exception.ProjectCreateFailed(project_id=data.project_id,\n user_id=data.user_id)", "def create_new_project(project_name, token=None):\n session = konfuzio_session(token)\n url = create_new_project_url()\n new_project_data = {\"name\": project_name}\n r = session.post(url=url, json=new_project_data)\n return r", "def projectdetails(http_request, project_id=0):\n\tp = get_object_or_404(Project, pk=project_id)\n\treturn render_to_response('project_detail.html', {'project': p})", "def _on_new_project(self):\n lang = self.ddnGuiLanguage.get()\n projectfile = filedialog.asksaveasfilename(\\\n filetypes=[('Paratext Biblical Terms', '.htm'), ], \\\n initialdir=self.BibTerm, \\\n initialfile='', \\\n title=LOCALIZED_TEXT[lang]['BibTerms2Dict project'], \\\n defaultextension='.prj')\n if os.path.exists(projectfile):\n messagebox.showwarning(LOCALIZED_TEXT[lang]['New Project'], \\\n LOCALIZED_TEXT[lang]['{} already exist choose another name.'].\\\n format(os.path.basename(projectfile)))\n return\n else:\n newfile = codecs.open(fileout, mode='w', encoding='utf-8')\n newfile.close()\n self.list_projects = [f.rstrip('.prj') \\\n for f in os.listdir(self.BibTerm) \\\n if f.endswith('.prj')]\n self.ddnCurProject['values'] = self.list_projects\n self.ddnCurProject.set(os.path.basename(projectfile)[:-4])\n self.update\n\n pass", "def on_add(self, project, name, **kwargs):\n pass", "def show(ctx, project_id, backend):\n try:\n project = ctx.obj['projects_db'].get(project_id, backend)\n except IOError:\n raise Exception(\"Error: the projects database file doesn't exist. \"\n \"Please run `taxi update` to create it\")\n\n if project is None:\n ctx.obj['view'].err(\n \"Could not find project `%s`\" % (project_id)\n )\n else:\n ctx.obj['view'].project_with_activities(project)", "def project_detail(request, project_id):\n try:\n project = Project.objects.get(pk=project_id)\n project.description = markdown.markdown(bleach.clean(project.description, strip=True), extensions=['markdown.extensions.fenced_code'])\n p2 = Project.objects.get(pk=project_id)\n user_profile = UserProfile.objects.get(email=request.session['email'])\n submissions_list = Submission.objects.filter(project=project)\n except Project.DoesNotExist:\n raise Http404(\"Project does not exist\")\n context = {'project': project, 'submissions_list':submissions_list, 'current_user': request.session['email'], 'user_profile': user_profile}\n return render(request, 'projects/details.html', context)", "def create_project(self, name):\n project = self._post('/projects', data={'name': name})\n self.create_project_hook(project['id'], self.webhook_url + name)\n return project", "def project():", "def project():", "def project():", "def project_create(project):\n client.project.create(project)", "def test_project_list_with_projects(self):\n # Add test projects.\n first_project = add_project(title='Title 1', description='Description 1')\n second_project = add_project(title='Title 2', description='Description 2')\n\n # Check that project list contains test projects.\n response = self.client.get(reverse('portfolio:project_list'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, first_project.title)\n self.assertContains(response, first_project.description)\n self.assertContains(response, second_project.title)\n self.assertContains(response, second_project.description)", "def projects():\n \n if 'username' in session:\n current_user = mongo.db.user.find_one({'username': session['username']}) \n projects = mongo.db.projects.find().sort('date',pymongo.DESCENDING)\n return render_template('pages/projects.html', title='Projects', projects=projects, current_user=current_user)\n \n flash('Please login to view user projects.', 'warning')\n return redirect(url_for('login'))", "def save(self, project_id=None):\r\n if project_id is not None:\r\n project = Project.objects.get(pk=int(project_id))\r\n else:\r\n project = Project()\r\n # Fill out the data of the given project and prepare it\r\n # for saving into database.\r\n project.Name = self.cleaned_data['name']\r\n project.ProjectClient = self.cleaned_data['project_client']\r\n project.Start = self.cleaned_data['start']\r\n project.End = self.cleaned_data['end']\r\n project.ProjectManager = self.cleaned_data['project_manager']\r\n project.QualityAssurance = self.cleaned_data['quality_assurance']\r\n project.Price = self.cleaned_data['price']\r\n project.Segment = self.cleaned_data['segment']\r\n project.Type = self.cleaned_data['type']\r\n project.save()\r\n # If the item was just created, set up workflow for it\r\n if project_id is None:\r\n workflow = Workflow.objects.get(name='Project')\r\n utils.set_workflow(project, workflow)\r\n state = utils.get_state(project)\r\n project.Status = state\r\n project.save()\r\n return project", "def project():\n\n return M(c=\"project\", f=\"task\")(\n M(\"Tasks\", f=\"task\")(\n M(\"Create\", m=\"create\"),\n M(\"My Open Tasks\", vars={\"mine\":1}),\n ),\n )", "def index():\n user_id = session[\"user_id\"]\n portfolio_table = port(user_id, db)\n \n if not isinstance(portfolio_table, dict): \n return apology(\"Error in portfolio\")\n \n return render_template(\"portfolio.html\",\n shares_list = portfolio_table[\"shares\"],\n cash = portfolio_table[\"cash\"],\n total = portfolio_table[\"total\"])", "def get_project():\n\n title = request.args.get('title')\n if not title:\n return \"Please enter a title!\"\n\n project = hackbright.get_project_by_title(title)\n\n grades = hackbright.get_grades_by_title(title)\n\n if not project:\n return \"There is no project with title \\\"{}\\\".\".format(title)\n\n title, description, max_grade = project\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n max_grade=max_grade,\n grades=grades)", "def test_01_portfolio_create_new(self):\n p = Portfolio.create_new(slug=\"test\",\n description=\"Portfolio to be used in JPMorgan tests\",\n user=\"automated unit tester\",)\n self.assertTrue(isinstance(p, Portfolio),\n msg=\"Portfolio was NOT successfully created\")\n print(\"Portfolio '{}' - '{}' was successfully created by '{}' at '{}'\".format(\n p.slug,\n p.description,\n p.created_by,\n p.created_at\n ))\n\n \"\"\"Is portfolio create new not returning a Portfolio duplicated instance?\"\"\"\n p = Portfolio.create_new(slug=\"test\",\n description=\"Portfolio to be used in JPMorgan tests AGAIN\",\n user=\"automated unit tester\",)\n self.assertFalse(isinstance(p, Portfolio),\n msg=\"Duplicated portfolio was successfully created\")\n print(\"Portfolio 'test' was NOT created AGAIN\")", "def project(self, project_id):\r\n return p.Project(self, project_id)", "def projects(request):\n projects = (\n Project.objects.visible()\n .visible_for(request.user)\n .prefetch_related(\"latest_translation__user\")\n .order_by(\"name\")\n )\n\n if not projects:\n return render(request, \"no_projects.html\", {\"title\": \"Projects\"})\n\n return render(\n request,\n \"projects/projects.html\",\n {\"projects\": projects, \"top_instances\": projects.get_top_instances()},\n )", "def new_project(self, rootdir=None):\n if rootdir is None:\n rootdir = Ui.instance().select_directory(user.home)\n if not os.path.exists(rootdir):\n os.makedirs(rootdir)\n\n print 'Weld.new_project in ', rootdir\n project = Project(rootdir)\n\n project.save()\n self.project = project\n self.current_project_path = rootdir\n Ui.instance().set_resources_draggable(True)\n Ui.instance().show_status('new project created')", "def OnNew(self, e):\n self.mainparent.statusbar.SetStatusText(\"New Project\", 0)\n\n filename = \"__new_project__\"\n self.mainparent.input_file = InputFile(filename, read=False)\n self.mainparent.statusbar.SetStatusText(\"---New Project---\", 2)\n self.mainparent.file_loaded = True\n\n # reset menus and such\n self.mainparent.reset_namelist_menu()\n self.mainparent.nmlpanel.reset(unset_namelist=True)", "def upload_project(request):\n current_user = request.user\n current_user_name = current_user.username\n # project_ratings=Rating.objects.filter(id=project_id)\n if request.method == 'POST':\n form = ProjectForm(request.POST, request.FILES)\n if form.is_valid():\n project_post = form.save(commit=True) \n else:\n raise Http404 \n \n return redirect(view_projects)\n else: \n project_form=ProjectForm()\n \n return render(request, 'upload_project.html', {'project_form':project_form})", "def add_project(self, project=None):\n is_project = type(project) is Project\n id_exists = project.client_id in [c.client_id for c in self.client_list]\n pid_exists = project.project_id() in [p.project_id() for p in self.project_list]\n\n # cancel if it's no project or the client_id does not exist\n # or the project_id already exists\n if not is_project or not id_exists or pid_exists:\n return False\n\n # add the project\n self.project_list.append(project)\n self.save_project_to_file(project=project)\n return True", "def test_project_view(self):\n response = self.client.get('/projects/')\n self.assertEqual(response.status_code, 200)", "def addProject(self, project):\n\n result = Project.getProjectDependencies(project, \"external\", self.__updateRepositories)\n for project in result:\n\n Console.info(\"Adding %s...\", Console.colorize(project.getName(), \"bold\"))\n Console.indent()\n\n # Append to session list\n self.__projects.append(project)\n\n # Import library methods\n libraryPath = os.path.join(project.getPath(), \"jasylibrary.py\")\n if os.path.exists(libraryPath):\n self.loadLibrary(project.getName(), libraryPath, doc=\"Library of project %s\" % project.getName())\n\n # Import command methods\n commandPath = os.path.join(project.getPath(), \"jasycommand.py\")\n if os.path.exists(commandPath):\n self.loadCommands(project.getName(), commandPath)\n\n # Import project defined fields which might be configured using \"activateField()\"\n fields = project.getFields()\n for name in fields:\n entry = fields[name]\n\n if name in self.__fields:\n raise UserError(\"Field '%s' was already defined!\" % (name))\n\n if \"check\" in entry:\n check = entry[\"check\"]\n if check in [\"Boolean\", \"String\", \"Number\"] or isinstance(check, list):\n pass\n else:\n raise UserError(\"Unsupported check: '%s' for field '%s'\" % (check, name))\n\n self.__fields[name] = entry\n\n\n Console.outdent()", "def post(self):\n try:\n draft_project_dto = DraftProjectDTO(request.get_json())\n draft_project_dto.user_id = token_auth.current_user()\n draft_project_dto.validate()\n except DataError as e:\n current_app.logger.error(f\"error validating request: {str(e)}\")\n return {\"Error\": \"Unable to create project\", \"SubCode\": \"InvalidData\"}, 400\n\n try:\n draft_project_id = ProjectAdminService.create_draft_project(\n draft_project_dto\n )\n return {\"projectId\": draft_project_id}, 201\n except ProjectAdminServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 403\n except (InvalidGeoJson, InvalidData) as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 400", "def test_create_project_request(self):\n pass", "def project(request, proj_id=None, scenario_id=None):\n\n if proj_id:\n project = get_object_or_404(Project, id=proj_id)\n\n if project.user != request.user and project.is_private:\n raise Http404\n\n return render_to_response('home/home.html', get_context(request))", "def project(projectname,targetamount):\n if (validatename(projectname) and validatenum(targetamount)):\n targetamount=float(targetamount)\n con = lite.connect(databasefile)\n with con:\n cur = con.cursor() \n cur.execute(\"SELECT Id FROM projects where name=?\", (projectname,))\n exists = cur.fetchone()\n if exists:\n click.echo(\"Project name already exists!\")\n sys.exit()\n cur.execute(\"INSERT INTO projects (Name, Tamount) VALUES (?, ?)\", (projectname, targetamount))\n click.echo(\"Added %s project with target of $%-.2f\" % (projectname, targetamount))", "def add_project(self, name, branches):\n prj_e = self._doc.createElement('project')\n prj_e.setAttribute('name', name)\n for branch in branches:\n br_e = self._doc.createElement('branch')\n for key, val in branch.iteritems():\n br_e.setAttribute(key, val)\n prj_e.appendChild(br_e)\n self._doc.firstChild.appendChild(prj_e)", "def display_project_info(project_name):\n\n # project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project_name)\n\n grades = hackbright.get_grades_by_title(project_name)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n grade=max_grade,\n grades=grades)", "def create_new_project(self,\n customer_name,\n contract_date,\n project_info,\n project_datest,\n project_dateend,\n project_budget,\n project_actst=None,\n project_actend=None,\n project_cost=None):\n\n customer_info = self.query_customer(cus_name=customer_name)\n\n if customer_info:\n # Search for project manager in the same region as the customer.\n customer_region_id = customer_info[0][1]\n get_employee_query = \"select employee.emp_id, emp_lname, emp_fname from employee, \" \\\n \"empskill, skill, region where employee.emp_id = \" \\\n \"empskill.emp_id and empskill.skill_id = \" \\\n \"skill.skill_id and skill.skill_descrpt = \" \\\n \"'Project Manager' and region.region_id = \" \\\n \"employee.region_id and region.region_id = '{}' \"\n try:\n self.dbCursor.execute(\n get_employee_query.format(customer_region_id))\n employee_info = self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n if len(employee_info) == 0:\n ErrorMessageWindow(\"No suitable project manager found!\")\n else:\n if customer_info and employee_info:\n if len(customer_info) > 1:\n MultiRowScreen(customer_info, \"project\")\n else:\n cus_id = customer_info[0][0]\n emp_id = employee_info[0][0]\n optional_inputs = [project_actst, project_actend,\n project_cost]\n\n query = \"insert into project(cus_id, emp_id, proj_date, \" \\\n \"proj_descrpt, proj_estdatest, proj_estdateend, \" \\\n \"proj_estbudget) values ('{}', '{}', '{}', '{}', \" \\\n \"'{}', '{}', '{}') \".format(cus_id,\n emp_id,\n contract_date,\n project_info,\n project_datest,\n project_dateend,\n project_budget)\n\n yes_options = False\n for item in optional_inputs:\n if item != \"\":\n yes_options = True\n\n if yes_options is False:\n try:\n self.dbCursor.execute(query)\n SuccessMessageWindow(\"Insert success!\")\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n self.dbConnection.commit()\n else:\n option_names = [\"proj_actdatest\",\n \"proj_actdateend\",\n \"proj_actcost\"]\n options_index = []\n filled_options = []\n\n index = 0\n for item in optional_inputs:\n if item != \"\":\n options_index.append(index)\n filled_options.append(item)\n index += 1\n update_query = \"update project set \"\n\n j = 0\n for i in options_index:\n if j < len(filled_options) - 1:\n update_query += \"{}='{}', \".format(\n option_names[i], filled_options[j]\n )\n else:\n update_query += \"{}='{}' \".format(\n option_names[i], filled_options[j]\n )\n j += 1\n\n try:\n try:\n self.dbCursor.execute(query)\n SuccessMessageWindow(\"Insert success!\")\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n self.dbConnection.commit()\n\n self.dbCursor.execute(update_query)\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n self.dbConnection.commit()\n else:\n ErrorMessageWindow(\"Customer not found!\")" ]
[ "0.7893704", "0.7302018", "0.7279046", "0.7119255", "0.70725876", "0.7025085", "0.6988927", "0.6972085", "0.6608903", "0.65972", "0.659065", "0.6497519", "0.64028376", "0.6389602", "0.6371995", "0.63446647", "0.6344435", "0.6330495", "0.63034815", "0.6294346", "0.6290368", "0.6288127", "0.6288127", "0.6283533", "0.62677646", "0.6265085", "0.6257913", "0.62073946", "0.6194536", "0.6187606", "0.61775106", "0.6166407", "0.61476856", "0.6136868", "0.61219186", "0.6120963", "0.6118594", "0.61117285", "0.6108015", "0.60943335", "0.6085318", "0.60622597", "0.6057076", "0.6045134", "0.60414654", "0.6030329", "0.60225195", "0.59964067", "0.5989335", "0.5988574", "0.5977206", "0.5961754", "0.5946682", "0.59365654", "0.59323156", "0.59203655", "0.59137565", "0.5892128", "0.5890914", "0.5889899", "0.58826995", "0.5882668", "0.5881947", "0.58779955", "0.5870084", "0.58603084", "0.5850107", "0.58384925", "0.5836607", "0.5830717", "0.5819316", "0.5812822", "0.5800021", "0.5787887", "0.5785875", "0.5785875", "0.5785875", "0.57811517", "0.5772172", "0.5766269", "0.57647204", "0.57596266", "0.5754242", "0.5728003", "0.5704901", "0.56961846", "0.5685776", "0.5685731", "0.56767833", "0.567561", "0.56728", "0.5670572", "0.56564", "0.56556606", "0.56515044", "0.56490403", "0.5644292", "0.564166", "0.5636555", "0.5617504" ]
0.7959094
0
A view to edit a portfolio project
def edit_project(request, project_id): if not request.user.is_superuser: messages.error(request, 'Sorry, only store owners can do that.') return redirect(reverse('home')) project = get_object_or_404(Project, pk=project_id) if request.method == 'POST': form = ProjectForm(request.POST, request.FILES, instance=project) if form.is_valid(): form.save() messages.success(request, 'Successfully updated project') return redirect(reverse('portfolio')) else: messages.error(request, 'Failed to update project. \ # Please ensure the form is valid.') else: form = ProjectForm(instance=project) messages.info(request, f'You are editing {project.name}') template = 'portfolio/edit_project.html' context = { 'form': form, 'project': project, } return render(request, template, context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_project(project_id):\n \n if 'username' in session: \n project = mongo.db.projects.find_one_or_404(\n {'_id': ObjectId(project_id)})\n form=ProjectForm()\n form.title.data = project['title']\n form.status.data = project['status']\n form.deadline.data = project['deadline'].strftime('%d/%m/%Y')\n form.brief.data = project['brief']\n form.note.data = project['note']\n return render_template('pages/editproject.html', form=form, project=project, legend='Edit your project')", "def edit_project_view(request, project_id):\n\n # Use to tell to the template that the user want to edit a project\n is_new = False\n\n # Retrieve the project to be edited or raise an error if this project does not exist\n project = get_object_or_404(Projet, id=project_id)\n\n # Check if the logged in user is allowed to edit this project\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n\n # Check if the view receive data from the form\n if request.method == \"POST\":\n form = ProjectForm(request.user, request.POST)\n if form.is_valid():\n # Manually update the field using the data from form\n project.name = form.cleaned_data[\"name\"]\n project.members.set(form.cleaned_data[\"members\"])\n # Save the project. Does not creat a new project as long as the project's id is not modified\n project.save()\n return redirect(\"projects\")\n else:\n form = ProjectForm(user=request.user, instance=project)\n return render(request, 'newProject.html', locals())\n else:\n return redirect(\"projects\")\n return redirect(\"projects\")", "def edit_project(request, project_id):\n try:\n project = Project.objects.get(pk=project_id)\n except Project.DoesNotExist:\n raise Http404(\"Project does not exist\")\n # check whether the user is the one who created this project\n \n if project.user.email != request.session['email']:\n return HttpResponseRedirect('/projects/'+str(project_id))\n else:\n if request.method == \"POST\":\n temp = json.loads(request.body)\n form = ProjectForm(temp, instance=project)\n # form = ProjectForm(request.POST, instance=project)\n # check whether it's valid:\n if form.is_valid():\n #clear any previously stored tags to fix the bug \n #where we remove the tags and its not reflected\n try:\n project.tags.clear()\n project.articles.clear()\n except:\n pass\n m = form.save(commit=False)\n # m.description = bleach.clean(m.description, strip=True)\n m.save()\n tag_objects_list = _get_tags(form.cleaned_data['tags_list'])\n article_object_list = _get_articles(form.cleaned_data['articles'])\n for tag_object in tag_objects_list:\n m.tags.add(tag_object)\n for article_object in article_object_list:\n m.articles.add(article_object)\n m.save()\n # return HttpResponseRedirect('/projects/' + str(m.id))\n # return project_detail(request, m.id)\n return HttpResponse(str(m.id))\n else:\n return render(request, 'projects/edit_project.html',\n {'project': project})\n # return render(request, 'projects/error_edit.html', {'form': form})\n else:\n return render(request, 'projects/edit_project.html',\n {'project': project})\n return project_detail(request, project_id)", "def update_project(id):\n if request.method == \"POST\":\n result = update_project_to_db(\n id,\n request.form[\"title\"],\n request.form[\"link\"],\n request.form[\"description\"]\n )\n flash(result)\n return redirect(url_for(\"portfolio\"))\n else:\n project = get_project(id)\n return render_template(\"edit_project.html\", **project)", "def edit_project(request, game_project_id):\n\n profile = get_object_or_404(Profile, user=request.user)\n game_project = get_object_or_404(GameProject, pk=game_project_id)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n if game_project.owner != profile:\n messages.error(request, 'Sorry, only the project owner can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n game_project_form = ProjectForm(\n request.POST,\n request.FILES,\n instance=game_project\n )\n if game_project_form.is_valid():\n game_project_form.save(commit=False)\n game_project.owner = profile\n game_project.total_amount = 0\n for order in Order.objects.filter(\n game_project=game_project).filter(status='PA'):\n game_project.total_amount += order.donation_item.amount\n game_project.save()\n messages.success(request, 'Successfully updated project!')\n return redirect(reverse('project_detail', args=[game_project.id]))\n else:\n messages.error(\n request,\n 'Failed to update project. Please ensure the form is valid.'\n )\n else:\n game_project_form = ProjectForm(instance=game_project)\n messages.info(request, f'You are editing {game_project.title}')\n\n template = 'gameproject/edit_project.html'\n context = {\n 'game_project_form': game_project_form,\n 'game_project': game_project,\n }\n\n return render(request, template, context)", "def portfolio_detail():\n return render_template('portfolio/portfolio.html')", "def project_detail(request, project_id):\n try:\n project = Project.objects.get(pk=project_id)\n project.description = markdown.markdown(bleach.clean(project.description, strip=True), extensions=['markdown.extensions.fenced_code'])\n p2 = Project.objects.get(pk=project_id)\n user_profile = UserProfile.objects.get(email=request.session['email'])\n submissions_list = Submission.objects.filter(project=project)\n except Project.DoesNotExist:\n raise Http404(\"Project does not exist\")\n context = {'project': project, 'submissions_list':submissions_list, 'current_user': request.session['email'], 'user_profile': user_profile}\n return render(request, 'projects/details.html', context)", "def view_project():\n\n project_title = request.args.get('title')\n\n description, max_grade = hackbright.get_project_info(project_title)\n\n student_grades = hackbright.list_students_by_completed_project(project_title)\n\n return render_template(\"project_info.html\",\n title=project_title,\n description=description,\n max_grade=max_grade,\n student_grades=student_grades)", "def show_project():\n\n title = request.args.get('title')\n\n title, description, grade = hackbright.get_project_by_title(title)\n\n grade_list = hackbright.get_grades_by_title(title)\n\n html = render_template(\"project.html\", title=title,\n description=description, grade=grade,\n grade_list=grade_list)\n\n return html", "def user_project_view(cls, user, project):\r\n pass", "def projects_view(request):\n\n # The projects to be displayed. Only the ones in which the logged in user is involved\n projects = request.user.projets.all().order_by('name')\n return render(request, 'projects.html', locals())", "def add_project():\n if request.method == \"POST\":\n result = add_project_to_db(\n request.form[\"title\"],\n request.form[\"link\"],\n request.form[\"description\"]\n )\n flash(result)\n return redirect(url_for(\"portfolio\"))\n else:\n return render_template(\"add_project.html\")", "def add_project(request):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = ProjectForm(request.POST, request.FILES)\n if form.is_valid():\n project = form.save()\n messages.success(request, 'Project added successfully!')\n return redirect(reverse('portfolio'))\n else:\n messages.error(request, 'Failed to add project.\\\n # Please ensure the form is valid')\n else:\n form = ProjectForm()\n\n form = ProjectForm()\n template = 'portfolio/add_project.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)", "def updateProjects(request):\n\n updater = ProjectUpdater()\n updater.run()\n return http.HttpResponse(\"Ok\")", "def portfolio():\n projects = get_projects()\n for project in projects:\n unicode_body = project[\"description\"].decode(\"utf-8\")\n html_body = markdown.markdown(unicode_body)\n safe_html_body = Markup(html_body)\n project[\"description\"] = safe_html_body\n context = {\n \"projects\": projects\n }\n return render_template(\"portfolio.html\", **context)", "def view_projects(request):\n current_user=request.user\n current_user_name=current_user.username\n projects=Project.objects.all()\n return render(request, 'view_projects.html',{'projects':projects, 'current_user_name':current_user})", "def edit_project_activated(self):\n if self.project:\n self.edit_project(EDIT)\n else:\n QMessageBox.warning(self, programName, \"There is no project to edit\")", "def get_and_display_project():\n\n project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project)\n\n\n github_grade_list = hackbright.get_grades_by_title(project)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n max_grade=max_grade,\n github_grade_list=github_grade_list)", "def update(self, request, pk=None):\n lot = Lot.objects.get(pk=request.data[\"lotId\"])\n\n project = Project.objects.get(pk=pk)\n project.name = request.data[\"name\"]\n project.estimatedCost = request.data[\"estimatedCost\"]\n project.estimatedCompletionDate = request.data[\"estimatedCompletionDate\"]\n #project.projectNote = Note.objects.get(pk=request.data['projectNote'])\n\n project.lotId = lot\n project.save()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def edit(request):\n if 'form.submitted' in request.params:\n # delete old post\n title = request.params['title']\n name = title_to_name(title)\n\n if not name or DBSession.query(Post).filter(Post.name==name).count():\n # this should be a popup ajaxy box\n return Response(\"Name %s is in use, choose a different title\" % name, content_type='text/plain', status_int=500)\n\n body = request.params['body']\n post = Post(title, body, name)\n DBSession.add(post)\n return HTTPFound(location = request.route_url('view_post', postname=name))\n\n save_url = request.route_url('edit_post')\n post = DBSession.query(Post).filter(Post.name==name).first()\n return environment_factory(post=post, save_url=save_url)", "def user_project_view(cls, user, project):\n pass", "def projectdetails(http_request, project_id=0):\n\tp = get_object_or_404(Project, pk=project_id)\n\treturn render_to_response('project_detail.html', {'project': p})", "def portfolio(request):\n projects = Project.objects.all()\n categories = None\n\n if request.GET:\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n projects = projects.filter(category__name__in=categories)\n categories = ProjectCategory.objects.filter(name__in=categories)\n\n context = {\n 'projects': projects,\n 'current_categories': categories,\n }\n\n return render(request, 'portfolio/portfolio.html', context)", "def show(ctx, project_id, backend):\n try:\n project = ctx.obj['projects_db'].get(project_id, backend)\n except IOError:\n raise Exception(\"Error: the projects database file doesn't exist. \"\n \"Please run `taxi update` to create it\")\n\n if project is None:\n ctx.obj['view'].err(\n \"Could not find project `%s`\" % (project_id)\n )\n else:\n ctx.obj['view'].project_with_activities(project)", "def team_edit(team_id):\n if request.method == 'GET':\n team = Team.query.filter_by(team_id=team_id).one()\n return render_template('edit_team.html', team=team)", "def project():\n\n ADMIN = current.session.s3.system_roles.ADMIN\n\n menu = M(c=\"project\")(\n M(\"Projects\", f=\"project\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Locations\", f=\"location\")(\n M(\"Map\", m=\"map\"),\n M(\"Contacts\", f=\"location_contact\"),\n ),\n M(\"Reports\", f=\"location\", m=\"report\")(\n M(\"3W\", f=\"location\", m=\"report\"),\n M(\"Beneficiaries\", f=\"beneficiary\", m=\"report\"),\n #M(\"Indicators\", f=\"indicator\", m=\"report\",\n # check=indicators,\n # ),\n #M(\"Indicators over Time\", f=\"indicator\", m=\"timeplot\",\n # check=indicators,\n # ),\n M(\"Funding\", f=\"organisation\", m=\"report\"),\n ),\n M(\"Import\", f=\"project\", m=\"import\", p=\"create\", restrict=[ADMIN])(\n M(\"Import Projects\", m=\"import\", p=\"create\"),\n M(\"Import Project Organizations\", f=\"organisation\",\n m=\"import\", p=\"create\"),\n M(\"Import Project Communities\", f=\"location\",\n m=\"import\", p=\"create\"),\n ),\n M(\"Activity Types\", f=\"activity_type\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Beneficiary Types\", f=\"beneficiary_type\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Sectors\", f=\"sector\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Themes\", f=\"theme\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )\n\n return menu", "def edit(self, **kwargs):\n ...", "def edit_view(request, title, modelform, instance=None, **kwargs):\n instance_form = modelform(request.POST or None, instance=instance)\n if instance_form.is_valid():\n instance = instance_form.save()\n messages.success(request, _(\"%s was edited.\") % instance)\n return redirect(instance.get_absolute_url())\n return form(\n {**kwargs, \"form\": instance_form, \"action_name\": _(\"Edit\"), \"title\": title},\n \"deployments/form.html\",\n request,\n )", "def update_project(project_id):\n\n project = mongo.db.projects\n project.find_one_and_update({'_id': ObjectId(project_id) },\n {'$set':\n {'title': request.form.get('title'),\n 'status': request.form.get('status'),\n 'deadline': datetime.strptime(request.form.get('deadline'), '%d/%m/%Y'),\n 'note': request.form.get('note'),\n 'brief': request.form.get('brief')}})\n return redirect(url_for('projects'))", "def get_add_project_form():\n\n return render_template(\"project_add.html\")", "def edit_form():\n return template (\"edit\")", "def get_project_form():\n\n return render_template(\"project_search.html\")", "def openproject():\n\n # POST\n if request.method == \"POST\":\n\n # Validate form submission\n if not request.form.get(\"projectname\"):\n return apology(\"missing project name\")\n elif not request.form.get(\"link\"):\n return apology(\"missing project link\")\n\n\n # Record project in the database\n db.execute(\"\"\"INSERT INTO projects (projectname, link)\n VALUES(:projectname, :link)\"\"\", projectname=request.form.get(\"projectname\"), link=request.form.get(\"link\"))\n\n # Display that the project has been opened\n flash(\"Opened!\")\n return redirect(\"/\")\n\n # GET\n else:\n return render_template(\"openproject.html\")", "def edit(self):\n\n pass", "def project(self, request):\n return self._project(request, 'project')", "def test_project_view(self):\n with self.app.app_context():\n p = project(save=True)\n\n response = self.client.get('/project/%s' % p.slug)\n eq_(response.status_code, 200)\n\n response = self.client.get('/project/not-a-real-project')\n eq_(response.status_code, 404)", "def post(self, project_slug):\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n args = PROJECT_EDIT_PARSER.parse_args(strict=True)\n args = clean_attrs(args)\n\n if args.get('utility', project.utility):\n ensure_target_registry(False)\n\n set_target_registry(args)\n return self.handle_write(project, data=args)", "def get_project_add_form():\n\n return render_template(\"project_add.html\")", "def edit_parterre(id):\n parterre = get_parterre(id)\n form = ParterreForm(parterre)\n return render_template(\"create-parterre.html\",\n title= parterre.get_name()+\" - edit\",\n form = form,\n parterre = parterre,\n param = \"modif\")", "def add_project(request):\n\n profile = get_object_or_404(Profile, user=request.user)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n project_form = ProjectForm(request.POST, request.FILES)\n if project_form.is_valid():\n project = project_form.save(commit=False)\n project.owner = profile\n project.save()\n messages.success(request, 'Successfully created project!')\n return redirect(reverse('project_detail', args=[project.id]))\n else:\n messages.error(\n request,\n 'Failed to create project. Please ensure the form is valid'\n )\n\n project_form = ProjectForm()\n\n template = 'gameproject/add_project.html'\n context = {\n 'project_form': project_form,\n }\n\n return render(request, template, context)", "def edit(self, *args, **kw):\n\t\t\ttmpl_context.widget = self.edit_form\n\t\t\tpks \t\t= self.provider.get_primary_fields(self.model)\n\t\t\tkw \t\t\t= {}\n\n\t\t\tfor i, pk in enumerate(pks):\n\t\t\t\tkw[pk] \t\t= args[i]\n\n\t\t\tvalue \t\t= self.edit_filler.get_value(kw)\n\t\t\tvalue['_method'] \t= 'PUT'\n\n\t\t\treturn dict(value = value, model = self.model.__name__, pk_count = len(pks))", "def get_project():\n\n title = request.args.get('title')\n if not title:\n return \"Please enter a title!\"\n\n project = hackbright.get_project_by_title(title)\n\n grades = hackbright.get_grades_by_title(title)\n\n if not project:\n return \"There is no project with title \\\"{}\\\".\".format(title)\n\n title, description, max_grade = project\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n max_grade=max_grade,\n grades=grades)", "def display_project_info(project_name):\n\n # project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project_name)\n\n grades = hackbright.get_grades_by_title(project_name)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n grade=max_grade,\n grades=grades)", "def index():\n user_id = session[\"user_id\"]\n portfolio_table = port(user_id, db)\n \n if not isinstance(portfolio_table, dict): \n return apology(\"Error in portfolio\")\n \n return render_template(\"portfolio.html\",\n shares_list = portfolio_table[\"shares\"],\n cash = portfolio_table[\"cash\"],\n total = portfolio_table[\"total\"])", "def index():\n return render_template('project.html')", "def project(request, proj_id=None, scenario_id=None):\n\n if proj_id:\n project = get_object_or_404(Project, id=proj_id)\n\n if project.user != request.user and project.is_private:\n raise Http404\n\n return render_to_response('home/home.html', get_context(request))", "def edit_form(pagename):\n\n articles = get_articles()\n\n edit_article = None\n for article in articles:\n if article[\"title\"] == pagename:\n edit_article = article\n\n if edit_article == None:\n return template(\"skapa-artikel\")\n\n else:\n return template(\"edit\", article=edit_article)", "def edit(request, company_id=None):\n if company_id:\n company = get_object_or_404(Company, id=company_id)\n if request.POST and company.owner == request.user:\n form = CompanyForm(request.POST, instance=company)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/companies')\n if company.owner != request.user:\n return HttpResponseForbidden()\n form = CompanyForm(instance=company)\n context = dict(form=form)\n return render(request, 'companies/edit.html', context)\n else:\n companies = Company.objects.filter(owner=request.user)\n context = dict(companies=companies)\n return render(request, 'companies/companies_by_user.html', context)", "def edit(self, *args, **kw):\n id_tipo_item = UrlParser.parse_id(request.url, \"tipositems\")\n url_action = \"../\"\n \n pp = PoseePermiso('redefinir tipo item',\n id_tipo_item=id_tipo_item)\n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(url_action)\n tmpl_context.widget = self.edit_form\n value = self.edit_filler.get_value( \\\n values={'id_atributos_por_tipo_item': int(args[0])})\n value['_method'] = 'PUT'\n page = \"Atributo {nombre}\".format(nombre=value[\"nombre\"])\n return dict(value=value, \n page=page, \n atras=url_action)", "def viewProject(self, projectId=None,size=None):\n\n uri = \"/v1/projects/\"\n if projectId:\n uri = uri + str(projectId)\n if size==0:\n uri =uri + \"?size=0\"\n response = self.client.get(uri)\n return response", "def do_project_show(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n _, project = cs.projects.get(id)\n utils.print_dict(project)", "def newproject_view(request):\n\n # Use to tell to the template that the user want to creat a new project\n is_new = True\n\n # Get all the user. Everyone may be member of the project\n users = User.objects.all()\n\n # If the view received data, try to creat a project\n if request.method == \"POST\":\n form = ProjectForm(request.user, request.POST)\n if form.is_valid():\n # Save the new project in the database\n form.save(commit=True)\n\n # redirect to the project list display page\n return redirect(\"projects\")\n else:\n # creat an empty form for the template\n form = ProjectForm(request.user)\n\n return render(request, 'newProject.html', locals())", "def project_overview(project_name):\n if not db_find_project(project_name):\n abort(404)\n\n _project = Project.objects(project_name=project_name).first()\n # _forks = ProjectFork.objects(project_name=project_name, file_list__ne=[], total_changed_line_number__ne=0)\n _forks = ProjectFork.objects(project_name=project_name, total_changed_line_number__ne=0)\n\n # TODO _all_tags could be opted by AJAX\n _all_tags = {}\n if current_user.is_authenticated:\n _project_tags = ForkTag.objects(project_name=project_name, username=current_user.username)\n for tag in _project_tags:\n _all_tags[tag.fork_full_name] = tag.tags\n\n if current_user.is_authenticated:\n print('View: ', current_user.username, project_name)\n\n return render_template('project_overview.html', project=_project, forks=_forks, all_tags=_all_tags)", "def edit_plante(id):\n plante = get_plante(id)\n form = PlanteForm(plante)\n return render_template(\n \"create-plante.html\",\n title = plante.get_name()+\" - edit\",\n form = form,\n plante = plante,\n param = \"modif\")", "def edit(request, pageName):\n \n if request.method == \"POST\":\n form = EditForm(request.POST)\n \n if form.is_valid(): \n content = form.cleaned_data[\"content\"]\n title = form.cleaned_data[\"title\"]\n \n util.save_entry(title, content)\n return HttpResponseRedirect(reverse(\"encyclopedia:visit_entry\", args=(title, )))\n \n else:\n\n form = EditForm({'title': pageName, 'content': util.get_entry(pageName) })\n return render(request, \"encyclopedia/edit_page.html\", {\n \"form\": EditForm(),\n \"pageName\": pageName\n })\n \n \n return render(request, \"encyclopedia/edit_page.html\", {\n \"form\": EditForm({'title': pageName, 'content': util.get_entry(pageName) }),\n \"pageName\": pageName\n })", "def edittask_view(request, task_id):\n\n # Use to tell to the template tha the user want to edit an already existing task\n is_new = False\n\n # Retrieve the task, raise an error if the task does not exist\n task = get_object_or_404(Task, id=task_id)\n project = task.projet\n # Check if logged in user is allowed to modify the task\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n # Check if the form has been submitted\n if request.method == \"POST\":\n form = TaskForm(project, request.POST)\n if form.is_valid():\n task = form.save(commit=False)\n # Manually set the project id. Otherwise a new task would be created\n task.id = task_id\n task.last_modification = datetime.datetime.now()\n task.save()\n\n return redirect(\"task\", task_id=task.id)\n else:\n # Initialize the form with the task\n form = TaskForm(project, instance=task)\n else:\n return redirect(\"projects\")\n return render(request, \"newtask.html\", locals())", "def list(self, request, *args, **kwargs):\n project = Project.objects.get(id=kwargs[\"projects_pk\"])\n self.check_object_permissions(request, project)\n return super().list(request, args, kwargs)", "def get_project_info():\n\n title = request.args.get('project')\n\n project_info_list = hackbright.get_project_by_title(title)\n\n html = render_template(\"project_info.html\",\n project_info_list=project_info_list)\n return html", "def create_project(request):\n if request.method == \"POST\":\n temp = json.loads(request.body)\n form = ProjectForm(temp)\n\n # check whether it's valid:\n if form.is_valid():\n prj_obj = form.save(commit=False)\n # prj_obj.description = bleach.clean(prj_obj.description, strip=True)\n # fint the user profile object based on the email in session\n user_profile = UserProfile.objects.get(email=request.session['email'])\n prj_obj.user = user_profile\n # Save the project object - project needs to exist before\n # manytomany field is accessed.\n prj_obj.save()\n # get the list of tag objects to add to project\n tag_objects_list = _get_tags(form.cleaned_data['tags_list'])\n article_object_list = _get_articles(form.cleaned_data['articles'])\n for tag_object in tag_objects_list:\n prj_obj.tags.add(tag_object)\n for article_object in article_object_list:\n prj_obj.articles.add(article_object)\n prj_obj.save()\n return HttpResponse(str(prj_obj.id))\n # return HttpResponseRedirect('/projects/' + str(prj_obj.id))\n else:\n print form.errors.as_data()\n else:\n # Remove when front end updated.\n form = ProjectForm()\n return render(request, 'projects/create_project.html', {'form': form})", "def project_clone_view(user_data, cache):\n return ProjectCloneCtrl(cache, user_data, dict(request.json)).to_response()", "def _page_projects(self):\n return self._open(self.app.page_projects)", "def project_view(request, project_id):\n\n # Retrieve the project to to be displayed. Raise an error if this project does not exist\n project = get_object_or_404(Projet, id=project_id)\n\n if request.method == 'GET':\n\n filters = Q()\n list_of_key = []\n query_string = request.META['QUERY_STRING']\n query_tab = query_string.split('&')\n filter_id_tab = []\n filter_dic = {}\n\n print(query_tab)\n\n if (query_tab != ['']):\n for query in query_tab:\n query_arg = query.split('=')\n id = query_arg[0]\n\n if not (id in filter_id_tab):\n filter_id_tab.append(id)\n try:\n filter_dic[id].append(query_arg[1])\n except KeyError:\n filter_dic[id] = [query_arg[1]]\n\n for key in request.GET:\n list_of_key.append(key)\n\n print(list_of_key)\n filters = creat_filters_rec(project, filter_dic, filter_id_tab)\n else:\n filters = Q()\n\n #\n # for key in filter_id_tab:\n #\n #\n # entry = filter_dic[key]\n #\n # if (len(entry) != 3):\n # continue\n #\n # filters = add_filter(filters, entry)\n\n tasks = project.task_set.filter(filters).order_by('-priority')\n else:\n # Retrieve all the task of the project and order them\n tasks = project.task_set.all().order_by('-priority')\n\n # Check if the logged in user is allowed to see this project\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n status = Status.objects.all()\n users = project.members.all()\n return render(request, 'project.html', locals())\n else:\n return redirect(\"projects\")", "def edit_task_page(request):\n data = {}\n try:\n tasklist = request.GET.get(\"tasklist\")\n task = request.GET.get(\"task\")\n data[\"tasklist\"] = tasklist\n\n task_obj = Todo.objects.get(title=task)\n data[\"data\"] = task_obj\n\n return render(request, \"pages/update-task.html\", data)\n except Exception as ex:\n return HttpResponse(ex)", "def upload_project(request):\n current_user = request.user\n current_user_name = current_user.username\n # project_ratings=Rating.objects.filter(id=project_id)\n if request.method == 'POST':\n form = ProjectForm(request.POST, request.FILES)\n if form.is_valid():\n project_post = form.save(commit=True) \n else:\n raise Http404 \n \n return redirect(view_projects)\n else: \n project_form=ProjectForm()\n \n return render(request, 'upload_project.html', {'project_form':project_form})", "def update_project_info(data):\n\tif 'pk' in data:\n\t\tif data['pk'] is not None:\n\t\t\tproject = get_or_none(ProjectInfo, pk=data['pk'])\n\t\t\tif project:\n\t\t\t\tproject.name = data['name']\n\t\t\t\tproject.description = data['description']\n\t\t\t\tproject.start_date = data['start_date']\n\t\t\t\tproject.end_date = data['end_date']\n\t\t\t\tproject.save()\n\t\t\t\tprint ('Updated')\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\telse:\n\t\t\treturn False\n\n\telse:\n\t\tprint (\"please provide pk for updating\")\n\t\treturn False", "def task_edit(request, pk):\n task_manager = TaskManager.objects.get(id=pk)\n task = task_manager.task\n if request.method == 'POST':\n \ttask_form = TaskForm(request.POST)\n \ttask_owner = request.user\n\n \tif task_form.is_valid():\n \t\ttask_name = task_form.cleaned_data.get('task_name')\n \t\ttask_description = task_form.cleaned_data.get('task_description')\n\n \t\tif task_manager.task_owner == task_owner:\n \t\t\ttask.task_name = task_name\n \t\t\ttask.task_description = task_description\n \t\t\ttask.save()\n \t\t\treturn redirect('task_list')\n else:\n \tform = TaskForm(instance=task)\n\n context = {'form': form, 'task_manager':task_manager}\n return render(request, 'tasker/task_edit.html', context)", "def test_projects_patch(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='PATCH',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def edit():", "def projects():\n \n if 'username' in session:\n current_user = mongo.db.user.find_one({'username': session['username']}) \n projects = mongo.db.projects.find().sort('date',pymongo.DESCENDING)\n return render_template('pages/projects.html', title='Projects', projects=projects, current_user=current_user)\n \n flash('Please login to view user projects.', 'warning')\n return redirect(url_for('login'))", "def projects(self, request, pk=None):\n\n obj = self.get_object()\n try:\n query = models.Project.objects.filter(\n subject=obj.subject,\n assign=obj\n )\n serializer = self.get_serializer(query, many=True)\n\n id = self.request.query_params.get('id')\n\n if id:\n query = get_object_or_404(\n models.Project,\n id=id,\n assign=obj\n )\n return self.filtering(request, query)\n\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def delete_project(request, project_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n project = get_object_or_404(Project, pk=project_id)\n project.delete()\n messages.success(request, 'Project deleted!')\n return redirect(reverse('portfolio'))", "def edit_deployment(request, deployment, **_kwargs):\n pass", "def edit(self, *args, **kw):\n tmpl_context.widget = self.edit_form\n #pks = self.provider.get_primary_fields(self.model)\n \n log.debug(\"soyRomperLB= %s\" %kw)\n\n ###########################################\n pks = self.provider.get_primary_fields(self.model)\n \n ###########################################\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n value = self.edit_filler.get_value(kw)\n value['_method'] = 'PUT'\n return dict(value=value, model=self.model.__name__, pk_count=len(pks))", "def put(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n key = db.Key.from_path('Project', int(guid))\n project = db.get(key)\n if not project == None:\n # collect the json from the request\n project_json = simplejson.loads(self.request.body)\n # update the project record\n project = helpers.apply_json_to_model_instance(project, project_json)\n # save the updated data\n project.put()\n \n # return the same record...\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(project_json))\n \n else:\n self.response.set_status(404, \"Project not found\")\n else:\n self.response.set_status(401, \"Not Authorized\")", "def edit(self, *args, **kw):\n tmpl_context.widget = self.edit_form\n pks = self.provider.get_primary_fields(self.model)\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n\n value = self.edit_filler.get_value(kw)\n value['_method'] = 'PUT'\n \n return dict(value=value, model=self.model.__name__, pk_count=len(pks))", "def update(self, oid, name=None, domain=None, enabled=None, \n description=None):\n data = {\"project\": {}}\n \n if name is not None:\n data['project']['name'] = name\n if domain is not None:\n data['project']['domain_id'] = domain\n if enabled is not None:\n data['project']['enabled'] = enabled\n if description is not None:\n data['project']['description'] = description\n \n path = '/projects/%s' % oid\n res = self.client.call(path, 'PATCH', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Update openstack project: %s' % truncate(res))\n return res[0]['project']", "def project_clone(request, proj_id=None):\n\n if not proj_id or not request.user.is_authenticated():\n raise Http404\n\n project = get_object_or_404(Project, id=proj_id)\n\n if project.user != request.user and project.is_private:\n raise Http404\n\n project.pk = None\n project.user = request.user\n project.save()\n\n for scenario in Scenario.objects \\\n .filter(project_id=proj_id) \\\n .order_by('created_at'):\n scenario.pk = None\n scenario.project = project\n scenario.save()\n\n return redirect('/project/{0}'.format(project.id))", "def project(self, value):\n\n if self._project != value:\n self._project = value\n self._update_page()", "def update(self):\n update_url = f'{self._tower.api}/projects/{self.id}/update/'\n response = self._tower.session.post(update_url)\n if not response.ok:\n self._logger.error(f\"Error updating the project '{self.name}'. response was: {response.text})\")\n return response.json() if response.ok else {}", "def project_detail(request, project_id):\n\n game_project = get_object_or_404(GameProject, pk=project_id)\n donation_options = Donation.objects.all()\n profile = get_object_or_404(Profile, user=request.user)\n\n game_project.total_amount = 0\n for order in Order.objects.filter(\n game_project=game_project).filter(status='PA'):\n game_project.total_amount += order.donation_item.amount\n\n template = 'gameproject/project_detail.html'\n context = {\n 'game_project': game_project,\n 'donation_options': donation_options,\n 'profile': profile,\n }\n return render(request, template, context)", "def index():\n active = None\n projects = Projects.query.all()\n tasks = Tasks.query.all()\n\n if len(projects) == 1:\n projects[0].active = True\n active = projects[0].project_id\n db.session.commit()\n\n if projects:\n for project in projects:\n if project.active:\n active = project.project_id\n if not active:\n projects[0].active = True\n active = projects[0].project_id\n else:\n projects = None\n\n if projects:\n return render_template('clamytoe.html', tasks=tasks, projects=projects, active=active)\n else:\n return render_template('clamytoe.html', tasks=tasks, active=active)", "def rename_project(request):\n data = json.loads(request.body.decode('utf-8'))\n try:\n proj = models.Project.objects.get(pk=data['projid'])\n except models.Project.DoesNotExist:\n return JsonResponse({'error': f'Project with that ID does not exist in DB'}, status=404)\n # check if new project not already exist, and user have permission for all dsets\n proj_exist = models.Project.objects.filter(name=data['newname'])\n if proj_exist.count():\n if proj_exist.get().id == proj.id:\n return JsonResponse({'error': f'Cannot change name to existing name for project {proj.name}'}, status=403)\n else:\n return JsonResponse({'error': f'There is already a project by that name {data[\"newname\"]}'}, status=403)\n if is_invalid_proj_exp_runnames(data['newname']):\n return JsonResponse({'error': f'Project name cannot contain characters except {settings.ALLOWED_PROJEXPRUN_CHARS}'}, status=403)\n dsets = models.Dataset.objects.filter(runname__experiment__project=proj)\n if not all(check_ownership(request.user, ds) for ds in dsets):\n return JsonResponse({'error': f'You do not have the rights to change all datasets in this project'}, status=403)\n # queue jobs to rename project, update project name after that since it is needed in job for path\n create_job('rename_top_lvl_projectdir', newname=data['newname'], proj_id=data['projid'])\n proj.name = data['newname']\n proj.save()\n return JsonResponse({})", "def get_project(projectname):\n return jsonify(admin.get_project_info(current_app.scoped_session(), projectname))", "def project_refresh(project_name):\n if not db_find_project(project_name):\n abort(404)\n analyser.add_repos(current_user.username, [project_name])\n return redirect(url_for('main.admin_manage'))", "def search(request):\n if 'find_project' in request.GET and request.GET['find_project']:\n project_name=request.GET.get('find_project')\n \n searched_project=Project.search_project(project_name)\n \n return render(request,'search_results.html',{'searched_project':searched_project})", "def projects(request):\n projects = (\n Project.objects.visible()\n .visible_for(request.user)\n .prefetch_related(\"latest_translation__user\")\n .order_by(\"name\")\n )\n\n if not projects:\n return render(request, \"no_projects.html\", {\"title\": \"Projects\"})\n\n return render(\n request,\n \"projects/projects.html\",\n {\"projects\": projects, \"top_instances\": projects.get_top_instances()},\n )", "def edit(self, args: EditArgs) -> List[Task]:\n try:\n original_task, new_task = self.tasks.edit(args.index, args.name, args.label,\n args.project, args.due_date, args.time_spent)\n\n self.snapshots.update([original_task, new_task])\n return self.display_tasks(QueryResult([new_task]))\n except TaskKeyError:\n return self.display_invalid_index_error(args.index)", "def project_assign(request, project_code):\n if request.user.is_authenticated:\n projects = Project.objects.all()\n context = {'projects': projects}\n selected_project = get_object_or_404(Project, code=project_code)\n try:\n selected_project.status = 2 # project is assigned\n selected_project.save()\n\n # getting the head\n assigned_head = User.objects.get(department=selected_project.department,\n role__name__iexact=role_department_head)\n # create new task history object\n task_history = TaskHistory()\n task_history.project = selected_project\n task_history.description = (model_to_dict(selected_project))\n task_history.status = 'New Project'\n task_history.user = assigned_head\n task_history.save()\n\n \"\"\" Setting notification as project is assigned to a head \"\"\"\n assigned_head.notification_count += 1\n assigned_head.save()\n selected_project.assigned_at = datetime.now() # setting the assigned time\n selected_project.save()\n # print(assigned_head, '------------------------------------------*********************',\n # assigned_head.notification_count)\n messages.success(request, f\"Project '{selected_project.name}' is assigned to the department head.\")\n return redirect('project-list')\n except Exception as e:\n # print('error at assign project ====', e)\n messages.error(request, f\"Error: {e}\")\n return render(request, 'projectmanager/project_list.html', context)", "def edit_page(self, document_data: dict):\n wiki_obj = WikiService()\n token = wiki_obj.get_token()\n wiki_obj.check_token(token)\n\n project_wikitext_data = self.generate_page_sections_dict(\n document_data\n )\n\n updated_text = wiki_obj.generate_page_text_from_dict(\n self.project_page_template,\n f\"=={self.page_initial_section}==\",\n project_wikitext_data,\n self.users_list_section\n )\n\n project_page_name = f\"{document_data['project']['name']}\"\n\n wiki_obj.edit_page(\n token,\n project_page_name,\n updated_text\n )", "def edit(id):\n r = requests.get(API_ROUTE + '/' + str(id), headers={'Auth': _auth()})\n if r.status_code != requests.codes.ok:\n return r.text, r.status_code\n\n return render_template('editor.html', article=r.json())", "def competitors_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n competitors_reference = get_object_or_404(Competitors, id=id,company=company)\n\n return render_to_response('competitors_form.html', \n {'details': competitors_reference,'info':competitors_reference},\n context_instance=RequestContext(request))", "def test_project_view(self):\n response = self.client.get('/projects/')\n self.assertEqual(response.status_code, 200)", "def pet_detail_edit(pet_id):\n\n pet = Pet.query.get_or_404(pet_id)\n form = PetEditForm(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.commit()\n flash(f\"Pet{pet_id} updated!\")\n return redirect(f\"/{pet_id}\")\n\n else:\n return render_template(\"pet_detail.html\", form=form, pet=pet)", "def add_project():\n \n if 'username' in session: \n form=ProjectForm()\n \n if request.method == 'POST':\n if form.validate_on_submit():\n user = mongo.db.user.find_one({'username': session['username']})\n mongo.db.projects.insert_one({'username': user['username'],\n 'date': datetime.utcnow(),\n 'title': form.title.data,\n 'deadline': datetime.strptime(form.deadline.data, \"%d/%m/%Y\"),\n 'brief': form.brief.data,\n 'status': \"open\",\n 'note': form.note.data,\n 'user_id': user['_id']\n })\n \n flash('Your project has been created.', 'success')\n return redirect(url_for('projects'))\n \n return render_template('pages/addproject.html', title='New Project', form=form, legend=\"Add a project\")\n \n flash('You need to be logged in to post any content.', 'info')\n return redirect(url_for('login'))", "def view_edit(\n request: HttpRequest,\n pk: Optional[int] = None,\n workflow: Optional[Workflow] = None,\n view: Optional[View] = None,\n) -> JsonResponse:\n # Form to read/process data\n form = ViewAddForm(request.POST or None, instance=view, workflow=workflow)\n\n return save_view_form(\n request,\n form,\n 'table/includes/partial_view_edit.html')", "def test_handle_edit_name(self):\r\n project = Project(\"GTID\", [\"a\", \"b\"])\r\n project.display_name = \"name1\"\r\n project_id = project.project_id\r\n self.mock_facade.retrieve.return_value = project\r\n with self.app.app_context():\r\n resp, code = self.testcommand.handle(\r\n \"project edit %s --name name2\" % project_id, user)\r\n project.display_name = \"name2\"\r\n project_attach = [project.get_attachment()]\r\n expect = {'attachments': project_attach}\r\n self.assertDictEqual(resp, expect)\r\n self.assertEqual(code, 200)\r\n self.mock_facade.retrieve.assert_called_once_with(Project, project_id)\r\n self.mock_facade.store.assert_called_once_with(project)", "def document_edit_view(document_id):\n\n doc = Document.query.filter(Document.id == document_id).first_or_404()\n return render_template('admin/documents/edit.html', document=doc, path='/admin/documents')", "def update_project(self, project_id, project):\n\n with self._transaction.cursor() as cur:\n # ensure this project exists\n cur.execute(\n \"SELECT project_id \"\n \"FROM barcodes.project \"\n \"WHERE project_id=%s;\",\n (project_id,))\n\n row = cur.fetchone()\n if row is None:\n raise NotFound(\"No project with ID %s\" % project_id)\n\n query = f\"\"\"\n UPDATE barcodes.project\n SET {p.DB_PROJ_NAME_KEY}=%s,\n {p.SUBPROJECT_NAME_KEY}=%s,\n {p.ALIAS_KEY}=%s,\n {p.IS_MICROSETTA_KEY}=%s,\n {p.SPONSOR_KEY}=%s,\n {p.COORDINATION_KEY}=%s,\n {p.CONTACT_NAME_KEY}=%s,\n {p.ADDTL_CONTACT_NAME_KEY}=%s,\n {p.CONTACT_EMAIL_KEY}=%s,\n {p.DEADLINES_KEY}=%s,\n {p.NUM_SUBJECTS_KEY}=%s,\n {p.NUM_TIMEPOINTS_KEY}=%s,\n {p.START_DATE_KEY}=%s,\n {p.BANK_SAMPLES_KEY}=%s,\n {p.PLATING_START_DATE_KEY}=%s,\n {p.DISPOSITION_COMMENTS_KEY}=%s,\n {p.COLLECTION_KEY}=%s,\n {p.IS_FECAL_KEY}=%s,\n {p.IS_SALIVA_KEY}=%s,\n {p.IS_SKIN_KEY}=%s,\n {p.IS_BLOOD_KEY}=%s,\n {p.IS_OTHER_KEY}=%s,\n {p.DO_16S_KEY}=%s,\n {p.DO_SHALLOW_SHOTGUN_KEY}=%s,\n {p.DO_SHOTGUN_KEY}=%s,\n {p.DO_RT_QPCR_KEY}=%s,\n {p.DO_SEROLOGY_KEY}=%s,\n {p.DO_METATRANSCRIPTOMICS_KEY}=%s,\n {p.DO_MASS_SPEC_KEY}=%s,\n {p.MASS_SPEC_COMMENTS_KEY}=%s,\n {p.MASS_SPEC_CONTACT_NAME_KEY}=%s,\n {p.MASS_SPEC_CONTACT_EMAIL_KEY}=%s,\n {p.DO_OTHER_KEY}=%s,\n {p.BRANDING_ASSOC_INSTRUCTIONS_KEY}=%s,\n {p.BRANDING_STATUS_KEY}=%s\n WHERE project_id=%s;\"\"\"\n\n cur.execute(query,\n (\n project.project_name,\n project.subproject_name,\n project.alias,\n project.is_microsetta,\n project.sponsor,\n project.coordination,\n project.contact_name,\n project.additional_contact_name,\n project.contact_email,\n project.deadlines,\n project.num_subjects,\n project.num_timepoints,\n project.start_date,\n project.bank_samples,\n project.plating_start_date,\n project.disposition_comments,\n project.collection,\n project.is_fecal,\n project.is_saliva,\n project.is_skin,\n project.is_blood,\n project.is_other,\n project.do_16s,\n project.do_shallow_shotgun,\n project.do_shotgun,\n project.do_rt_qpcr,\n project.do_serology,\n project.do_metatranscriptomics,\n project.do_mass_spec,\n project.mass_spec_comments,\n project.mass_spec_contact_name,\n project.mass_spec_contact_email,\n project.do_other,\n project.branding_associated_instructions,\n project.branding_status,\n project_id\n ))\n return cur.rowcount == 1", "def create_project_form(request):\n \n # First we check to see the site has been set up, otherwise we throw the user to the config screen\n if not bool(os.path.isdir(Project.project_options.repository_directory)):\n request.user.message_set.create(message=\"The site has not been set up yet. Log in as your admin user and create your settings!\")\n return HttpResponseRedirect(reverse('site-config'))\n \n if request.is_ajax():\n template ='project/project_create_ajax.html'\n else:\n template = 'project/project_create.html'\n \n # Lets check if this form is being shown or processed\n if request.method == \"POST\":\n # We're processing the form, so lets create the instance\n form = NewProjectForm(request.POST, auto_id=False)\n # The form is correct, lets proceeed.\n if form.is_valid():\n # Lets check the user has conformed to a sites T&C's\n if form.cleaned_data['t_and_c'] == True:\n # Create the project instance\n project = Project(\n project_id = string.lower(form.cleaned_data['project_id']),\n project_name = form.cleaned_data['project_name'],\n short_description = form.cleaned_data['short_description'],\n full_description = form.cleaned_data['full_description'],\n project_manager = request.user,\n hgweb_style = form.cleaned_data.get('hgweb_style', ''),\n project_icon = form.cleaned_data['project_icon'],\n )\n # Ok, we're all good, so lets save.\n project.save()\n # We'll tell the user that there site has been saved\n request.user.message_set.create(message=_(\"The project \" + form.cleaned_data['project_name'] + \" has been created\"))\n if request.is_ajax():\n return HttpResponse(\n \"{'success': 'true', 'url': '\" + reverse('project-detail', kwargs={'slug':form.cleaned_data['project_id']}) + \"', 'project': \" + json_encode(project) + \"}\"\n , mimetype=\"application/json\")\n else:\n return HttpResponseRedirect(reverse('project-detail', kwargs={'slug': form.cleaned_data['project_id']}))\n else:\n return render_to_response(template,\n {\n 'form':form.as_table(),\n }, context_instance=RequestContext(request)\n )\n #return HttpResponseRedirect(reverse('project-detail', kwargs={'slug':form.cleaned_data['name_short']}))\n else:\n form = NewProjectForm()\n is_auth = request.user.is_authenticated()\n \n return render_to_response(template,\n {\n 'form':form.as_table(),\n 'is_auth': is_auth\n }, context_instance=RequestContext(request)\n )", "def edit_person(self, pk):" ]
[ "0.7728187", "0.73595434", "0.7352115", "0.7184356", "0.703075", "0.67557067", "0.6421052", "0.6396073", "0.6380097", "0.6355604", "0.6334676", "0.63148147", "0.62868273", "0.6275144", "0.6263248", "0.62387496", "0.61801016", "0.6175357", "0.61719286", "0.61687547", "0.6160295", "0.6104489", "0.60983706", "0.6026067", "0.60135716", "0.6008381", "0.60005915", "0.5997544", "0.59944415", "0.5984973", "0.59822226", "0.5979926", "0.5928679", "0.5844091", "0.5842516", "0.582869", "0.5816892", "0.5816443", "0.58147395", "0.58054", "0.5798585", "0.5795871", "0.57902956", "0.5778561", "0.5750723", "0.5749431", "0.5747605", "0.5735695", "0.57333153", "0.5728052", "0.57152444", "0.57132083", "0.5701975", "0.57006085", "0.56966984", "0.5687032", "0.5672844", "0.56677186", "0.5665184", "0.5634963", "0.562724", "0.56078976", "0.56018686", "0.55676913", "0.5565488", "0.5563438", "0.55581224", "0.55520487", "0.55289525", "0.55275816", "0.55264467", "0.551728", "0.5504753", "0.54873836", "0.5483457", "0.5482077", "0.54646075", "0.5454579", "0.5451395", "0.544742", "0.5436783", "0.5434722", "0.5431706", "0.54284996", "0.5422912", "0.5418355", "0.5417564", "0.54132783", "0.5406909", "0.54025143", "0.5400909", "0.5400001", "0.5391841", "0.5390675", "0.53892034", "0.5386067", "0.5379042", "0.53745574", "0.5367516", "0.53583515" ]
0.7640755
1
A view to delete a project from the portfolio
def delete_project(request, project_id): if not request.user.is_superuser: messages.error(request, 'Sorry, only store owners can do that.') return redirect(reverse('home')) project = get_object_or_404(Project, pk=project_id) project.delete() messages.success(request, 'Project deleted!') return redirect(reverse('portfolio'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_project(id):\n result = delete_project_to_db(id)\n flash(result)\n return redirect(url_for(\"portfolio\"))", "def delete_project_view(request, id):\n\n # retrieve the project to be deleted through his id. Raise an error if the project does not exist\n project = get_object_or_404(Projet, id=id)\n\n # Check if the logged in user is allowed to delete this project\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n # Eventually delete the project\n project.delete()\n\n return redirect(\"projects\")", "def delete_project(request, project_id):\n\n profile = get_object_or_404(Profile, user=request.user)\n project = get_object_or_404(GameProject, pk=project_id)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n if project.owner != profile:\n messages.error(request, 'Sorry, only the project owner can do that.')\n return redirect(reverse('home'))\n\n project = get_object_or_404(GameProject, pk=project_id)\n project.delete()\n messages.success(request, 'Project deleted!')\n return redirect(reverse('all_projects'))", "def delete_project(projectname):\n response = jsonify(admin.delete_project(current_app.scoped_session(), projectname))\n return response", "def destroy(self, request, pk=None):\n try:\n project = Project.objects.get(pk=pk)\n project.delete()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n\n except Project.DoesNotExist as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)\n\n except Exception as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def delete_project(project_id):\n \n project = mongo.db.projects\n project.delete_one({'_id': ObjectId(project_id)})\n flash('Your project has been deleted.', 'success')\n return redirect(url_for('projects'))", "def delete(self, request, p_name):\n project = Project.objects.get(name=p_name)\n connectors = project.connector_set.all()\n connectors.delete()\n if os.path.isfile(project.project_location):\n os.remove(project.project_location)\n project.delete()\n return HttpResponse(HTTPStatus.OK)", "def delete_project(request, project_id):\n try:\n project = Project.objects.get(pk=project_id)\n except Project.DoesNotExist:\n raise Http404(\"Project does not exist\")\n # check whether the user is the one who created this project\n if project.user.email != request.session['email']:\n return HttpResponseRedirect('/projects/' + str(project_id))\n else:\n if request.method == \"POST\":\n if project:\n project.delete()\n return HttpResponseRedirect('/projects/')\n else:\n return render(request, 'projects/delete_project.html',\n {'project': project})\n return render(request, 'projects/delete_project.html', {'project': project})", "def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)", "def test_projects_delete(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='DELETE',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def delete(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}\"\n\n self.connector.http_call(\"delete\", _url)\n\n self.project_id = None\n self.name = None", "def delete_project(project):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.delete_project(project)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def delete(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n # search for the Project and delete if found\n key = db.Key.from_path('Project', int(guid))\n project = db.get(key)\n if not project == None:\n project.delete()\n self.response.set_status(204, \"Deleted\")\n else:\n self.response.set_status(404, \"Not Found\")\n else:\n self.response.set_status(401, \"Not Authorized\")", "def delete_project(\n name\n):\n\n cmd = dict()\n cmd[\"type_\"] = \"delete_project\"\n cmd[\"name_\"] = name\n\n comm.send(cmd)", "def delete_stored_project():\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(STORED_ID['project_id']))\n client.execute_request()", "def delete_project(self, project_name):\n # type(project_name) == unicode\n project = self.db.get_project_by_name(project_name)\n if not project:\n print(u\"*** Error: The project '{}' was not found.\"\n \"\".format(project_name))\n return\n print('Caution! The related tracking will be deleted as well.{eol}'\n 'Do you really want to delete the project? [y/N] '\n ''.format(eol=os.linesep), end='')\n if not helpers.get_yes_no(default='n'):\n return\n self.db.delete_project_by_name(project_name)\n print(u\"The project '%s' has been deleted.\" % project_name)\n self.set_prompt()", "def test_delete_project(self):\n pass", "def test_delete_project(self):\n pass", "def delete_project(project_id):\n project = Project.query.filter_by(id=project_id).first()\n if not project:\n return {\n 'success': False,\n 'message': f\"No project with the specified id {project_id} found.\",\n }\n\n else:\n if is_project_manager(project, g.user):\n # delete related tasks\n Task.query.filter_by(project=project).delete()\n #delete related invites\n Invitation.query.filter_by(project=project).delete()\n db_session.delete(project)\n db_session.commit()\n return {\n 'success': True,\n 'result': {},\n 'message': \"Project Deleted Successfully.\",\n }", "def delete(conn, project):\n with conn:\n c = conn.cursor()\n c.execute(\"DELETE FROM projects WHERE project =?\", (project,))", "def delete(\n self, url: str\n ) -> pymongo.results.DeleteResult:\n return self._mongo.delete({\n 'url': url\n },\n 'projects'\n )", "def delete_project(arn=None):\n pass", "def test_projects_id_delete(self):\n response = self.client.open('/project-tracker/projects/{id}'.format(id=3.4),\n method='DELETE')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def project_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)", "def delete(self, oid):\n path = '/projects/%s' % oid\n res = self.client.call(path, 'DELETE', data='', token=self.manager.identity.token)\n self.logger.debug('Delete openstack project: %s' % truncate(res))\n return True", "def delete_project(project_id):\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(project_id))\n client.execute_request()", "def delete(self, *args, **kwargs):\n if 'user' not in kwargs or not args:\n self.raise401()\n\n user = kwargs['user']\n path = parse_path(args[0])\n project = Project.objects(name=path[0], members__in=[user])\n if not project:\n self.raise401()\n try:\n project.delete()\n self.set_status(204)\n self.finish()\n except Exception as e:\n reason = e.message\n self.raise400(reason=reason)", "def delete_project(self, project_id):\n self._run(\n url_path=\"projects/delete\",\n id=project_id,\n )\n return True", "def delete_project(self, project_id):\n return self._delete('/projects/{0}'.format(project_id))", "def deleteProject(self, projectId):\n uri = \"/v1/projects/\" +str(projectId)\n response = self.client.delete(uri)\n return response", "def _on_del_project(self):\n project = self.ddnCurProject.get()\n# if len(project) > 0:\n if project:\n if '.prj'!= project[-4:]:\n project += '.prj'\n if os.path.exists(self.BibTerm + '/'+ project):\n os.remove(self.BibTerm + '/'+ project)\n self.list_projects = [f.rstrip('.prj') \\\n for f in os.listdir(self.BibTerm) \\\n if f.endswith('.prj')]\n self.ddnCurProject['values'] = self.list_projects\n# if len(self.list_projects) > 0:\n if self.list_projects:\n self.ddnCurProject.set(self.list_projects[0])\n else:\n self.ddnCurProject.set('')\n pass", "def delete(self, project_id):\n project_model = ProjectDBModel.query.get(project_id)\n if not project_model:\n ns.abort(404, status=PROJECT_NOT_FOUND_ERROR)\n try:\n data = request.get_json()\n # cambiarlo cuando se vuelva a tener dos PKs\n deleted = FavoritesProjectDBModel.delete(\n data['user_id'], project_id)\n if deleted:\n users = \\\n FavoritesProjectDBModel.get_favorites_of_project_id(\n project_id)\n response_object = {\n \"project_id\": project_id,\n \"users_id\": users,\n }\n return response_object, 200\n else:\n ns.abort(404, status=PROJECT_NOT_FOUND_ERROR)\n except KeyError:\n ns.abort(404, status=MISSING_VALUES_ERROR)", "def destroy(config, args):\n log = logging.getLogger('kraftwerk.destroy')\n if confirm(\"Remove project %s from node %s along with all services and data?\" % \n (args.project.name, args.node.hostname)):\n args.node.ssh(config.template(\"scripts/project_destroy.sh\", project=args.project))\n print \"Project %s removed from node %s\" % \\\n (args.project.name, args.node.hostname )\n for service in args.project.services(args.node):\n args.node.ssh(service.destroy_script)", "def delete_project(self, project_id):\n _url = f\"{self.base_url}/projects/{project_id}\"\n self.http_call(\"delete\", _url)\n return", "def project_delete(cursor, project):\n haystack = (project['_id'], )\n\n query = \"DELETE FROM projects WHERE _id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n\n query = \"DELETE FROM namespaces WHERE project_id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n else:\n cursor.connection.commit()\n raise Return((True, None))", "def test_delete_project(self):\n self.assertEqual(Project.objects.count(), 1)\n self.assertEqual(Group.objects.count(), 2)\n\n delete_project(Project.objects.get(name=\"project A\"))\n\n self.assertEqual(Project.objects.count(), 0)\n self.assertEqual(Group.objects.count(), 0)", "def post_project_delete(self, resource_id, resource_dict):\n pass", "def delete_project(self, project_name, check=True):\n page_projects = self._page_projects()\n\n with page_projects.table_projects.row(\n name=project_name).dropdown_menu as menu:\n menu.button_toggle.click()\n menu.item_delete.click()\n\n page_projects.form_delete_project_confirm.submit()\n\n if check:\n self.close_notification('success')\n page_projects.table_projects.row(\n name=project_name).wait_for_absence()", "def purge_project(request):\n data = json.loads(request.body.decode('utf-8'))\n if 'item_id' not in data or not data['item_id']:\n return JsonResponse({'state': 'error', 'error': 'No project specified for reactivating'}, status=400)\n projquery = models.Project.objects.filter(pk=data['item_id'], active=False)\n if not projquery:\n return JsonResponse({'state': 'error', 'error': 'Project does not exist or is still active'}, status=403)\n dsetowners = models.DatasetOwner.objects.filter(dataset__runname__experiment__project_id=data['item_id'], dataset__purged=False).select_related('dataset')\n if not request.user.is_staff:\n return JsonResponse({'state': 'error', 'error': 'User has no permission to purge this project, must be staff'}, status=403)\n result = {'errormsgs': []}\n for dso in dsetowners.distinct('dataset'):\n purged = delete_dataset_from_cold(dso.dataset)\n if purged['state'] == 'error':\n result.update({'state': 'error', 'error': 'Not all project datasets could be purged'})\n result['errormsgs'].append(purged['error'])\n # if any dataset cannot be purged, report it, do not mark proj as purged\n if result['errormsgs']:\n result['error'] = '{} Errors: {}'.format(result['error'], '; '.join(result.pop('errormsgs')))\n return JsonResponse(result, status=500)\n else:\n projquery.update(active=False)\n return JsonResponse({})", "def test_remove_project(self):\n pass", "def delete_project(proj_id):\n project_obj = Project.objects.get(id=proj_id)\n print('Deleting project the fastq files within the project: ', project_obj.description)\n\n description = project_obj.description.replace(' ', '') # remove any space in the project name\n project_dir = 'documents/%s/%s' % (str(project_obj.date.date()), description)\n shutil.rmtree(project_dir, ignore_errors=True)\n print(\"Files deleted.\")", "def delete(self, project_id):\n try:\n authenticated_user_id = token_auth.current_user()\n if not ProjectAdminService.is_user_action_permitted_on_project(\n authenticated_user_id, project_id\n ):\n raise ValueError()\n except ValueError:\n return {\n \"Error\": \"User is not a manager of the project\",\n \"SubCode\": \"UserPermissionError\",\n }, 403\n\n try:\n ProjectAdminService.delete_project(project_id, authenticated_user_id)\n return {\"Success\": \"Project deleted\"}, 200\n except ProjectAdminServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 403", "def delete_keystone_v3_project(self, project_id, domain_id):\n LOG_OBJ.debug(\"Disable the project.\")\n kwargs = {\"project_id\": project_id, \"enabled\": False}\n self.set_keystone_v3_project(**kwargs)\n\n LOG_OBJ.debug(\"Deleting the project.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/projects/\" + \\\n str(project_id)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n response = self.request(\"DELETE\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while deleting the project\")\n print (\"No response from Server while deleting the project\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Deleting project Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\" Deleting project Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n return True", "def delete_record(request, slug, pk):\n # Try except to make sure the user is a member of this project\n try:\n ProjectMember.objects.get(user=request.user, project=Project.objects.get(slug=slug))\n except ObjectDoesNotExist:\n # User is not a member\n return HttpResponse(\"You're trying to access a project you're not a member of or a project that does not exist.\")\n else:\n # User is a member\n pm = ProjectMember.objects.get(user=request.user, project=Project.objects.get(slug=slug))\n # Access control.. if not owner or editor - access denied.\n if pm.is_owner or pm.is_editor:\n # User has access\n record = get_object_or_404(models.Record, pk=pk)\n # Delete record\n models.Record.objects.filter(project=get_object_or_404(models.Project, slug=slug), pk=pk).delete()\n # Send user back to project detail, the overview of all records in the project.\n return redirect('projects:single', slug=slug)\n else:\n # Access denied...\n return HttpResponse(\"You don't have the permission to do this\")", "def delete(self):\n return self.client._perform_empty(\n \"DELETE\", \"/projects/%s/managedfolders/%s\" % (self.project_key, self.odb_id))", "async def delete(self, ctx, project_name: str) -> None:\n if not ctx.projects.find_project(project_name):\n channel = discord.utils.get(\n ctx.guild.channels, name=f\"{project_name}-project\")\n\n if channel and channel.category.name == \"Flux Projects\":\n if ctx.author.permissions_in(channel).manage_channels:\n message = await ctx.send(\"That project doesn't appear to\"\n \" exist in my database, but the \"\n \"channel still exists. \"\n \"Would you like to delete it?\")\n yes = \"<:greenTick:596576670815879169>\"\n no = \"<:redTick:596576672149667840>\"\n await message.add_reaction(yes)\n await message.add_reaction(no)\n reaction, user = await ctx.bot.wait_for(\n \"reaction_add\",\n check=lambda reaction, user: (user == ctx.author) and\n (str(reaction.emoji) == yes or no) and\n (reaction.message.channel == ctx.channel)\n )\n if reaction.emoji.id == ctx.bot.config.tick_yes:\n await channel.delete(reason=\"Project not found.\")\n await ctx.send(\"The channel was deleted sucessfully.\")\n return\n\n elif reaction.emoji.id == ctx.bot.config.tick_no:\n await ctx.send(\"Not deleting the channel.\")\n return\n\n else: # If author doesn't have access to deleting channels.\n await ctx.send(\"That project does not appear to be in my \"\n \"database, but the channel for it still \"\n \"exists. Please have someone with\"\n \" manage channels run this chommand.\"\n )\n return\n else:\n await ctx.send(\"I could not find this project.\")\n return\n\n if str(ctx.author.id) != ctx.projects.find_project(project_name).get(\n \"owner\"):\n await ctx.send(\"Only the project owner \"\n \"can delete this project.\")\n return\n message = await ctx.send(\"This action __cannot__ be undone. \"\n \"Once you do this, everything is gone. \"\n \"Are you sure you want to continue?\")\n yes = \"<:greenTick:596576670815879169>\"\n no = \"<:redTick:596576672149667840>\"\n await message.add_reaction(yes)\n await message.add_reaction(no)\n reaction, user = await ctx.bot.wait_for(\n \"reaction_add\", check=lambda reaction, user:\n (user == ctx.author) and\n (str(reaction.emoji) == yes or no) and\n (reaction.message.channel == ctx.channel)\n )\n if reaction.emoji.id == ctx.bot.config.tick_yes:\n channel = ctx.projects.find_project(\n project_name).get(\"channel\")\n channel = discord.utils.get(ctx.guild.channels,\n id=int(channel))\n ctx.projects.delete_project(project_name)\n if channel:\n await channel.delete(reason=\"Project deleted.\")\n await ctx.send(\"The project has been deleted.\")\n elif reaction.emoji.id == ctx.bot.config.tick_no:\n await ctx.send(\"Not deleting the project.\")", "def delete_project(self):\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n task_status = tasks[self.tasks_view.currentRow()].Status\n\n if task_status is 0:\n warning = Warning(\n \"<html><head/><body><p align=\\\"center\\\"><span style=\\\" font-weight:600;\\\">\"\n \"Unable delete Task. \"\n \"Make sure the Task is Done\"\n \"</span></p></body></html>\"\n )\n warning.exec_()\n else:\n self.tasks_flow.delete_task(task_id)\n self.write_tasks_table()", "def remove_project(project_id):\n response_object = {'status': 'success'}\n with database.engine.begin() as connection:\n\n stmt = select([models.projects.c.path]).where(\n models.projects.c.project_id == project_id)\n project = connection.execute(stmt).first()\n\n if project:\n app = flask.current_app\n project_path = os.path.join(\n app.root_path, app.config['DATA_DIRECTORY'], project['path'])\n if 'morphocut' in project_path and app.config['DATA_DIRECTORY'] in project_path:\n print('removing project with id {}'.format(project_id))\n if os.path.exists(project_path):\n helpers.remove_directory(project_path)\n\n stmt = models.projects.delete().where( # pylint: disable=no-value-for-parameter\n models.projects.c.project_id == project_id)\n\n connection.execute(stmt)\n\n return jsonify(response_object)", "def trash(request):\n return Task.objects.select_related('project').filter(user=request.user, folder='trash')", "def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete','projects'):\n abort(403)", "def DelProject(projname):\n\tif projname == \"\" or projname == None:\n\t\tpjnm = raw_input(\"\\nNombre del proyecto: \").lower()\n\t\tif pjnm == \"\" or pjnm == None:\n\t\t\tcancel()\n\telse:\n\t\t# Proceso para borrar todo el proyecto\n\t\tpass\n\n\tpa = open(\"author_name.txt\", \"r\")\t#Abre el archivo con el nombre del autor\n\tpa.read()\n\tpc = open(\"project_code.txt\", \"r\")\t#Abre el archivo con el codigo de proyecto\n\tpc.read()\n\n\tuserpa = raw_input(\"Ingrese el nombre del autor: \").lower()\n\tuserpc = raw_input(\"Ingrese el codigo del proyecto: \").lower()\n\n\tif userpa == pa and userpc == pc:\t#Se verifica que userpa(nombre del autor por el usuario) sea igual a pa(nombre original del autor) y lo mismo con el codigo del proyecto\n\t\tprint \"Iniciando el Borrado del Proyecto...\"\n\t\tpcommands.del_project()\n\t\tprint \"El proyecto se ha borrado con exito!\"\n\telse:\n\t\tprint \"El codigo del proyecto o el nombre del autor no es correcto.\"\n\t\tcancel()", "def es_delete(project=None):\n if project is not None:\n script_indexer.delete_project(project)\n else:\n script_indexer.delete_all()", "def remove_single_project(project_name):\n p = subprocess.Popen('rm -rf {}/{}'.format(context.__PROJECTS_PATH__, project_name), shell=True)\n p.wait()", "def pre_project_delete(self, resource_id):\n pass", "def delete(self, git_repo_id: int):\n self.datastore.delete(document_id=git_repo_id)\n return None, 204", "def projects_view(request):\n\n # The projects to be displayed. Only the ones in which the logged in user is involved\n projects = request.user.projets.all().order_by('name')\n return render(request, 'projects.html', locals())", "def delete(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n # search for the Project and delete if found\n key = db.Key.from_path('Task', int(guid))\n task = db.get(key)\n wantsNotifications = {\"true\": True, \"false\": False}.get(self.request.params['notify'].lower())\n currentUserId = self.request.params['UUID']\n cukey = db.Key.from_path('User', int(currentUserId))\n user = db.get(cukey)\n if not task == None:\n # cache current values before updates\n taskName = task.name\n taskType = task.type\n taskPriority = task.priority\n taskStatus = task.developmentStatus\n taskValidation = task.validation\n taskSubmitterId = task.submitterId\n taskAssigneeId = task.assigneeId\n taskEffort = task.effort\n taskProjectId = task.projectId\n taskDescription = task.description\n # Push notification email on the queue if we need to notify\n if notification.should_notify(currentUserId,task,\"deleteTask\",wantsNotifications):\n taskqueue.add(url='/mailer', params={'taskId': int(guid), 'currentUUID': self.request.params['UUID'], 'action': \"deleteTask\", 'name': taskName, 'type': taskType, 'priority': taskPriority, 'status': taskStatus, 'validation': taskValidation, 'submitterId': taskSubmitterId, 'assigneeId': taskAssigneeId, 'effort': taskEffort, 'projectId': taskProjectId, 'description': taskDescription})\n task.delete()\n self.response.set_status(204, \"Deleted\")\n else:\n self.response.set_status(404, \"Not Found\")\n else:\n self.response.set_status(401, \"Not Authorized\")", "def delete_project_by_name(self, project_name):\n with self._transaction.cursor() as cur:\n # delete associations between this project and any barcodes\n cur.execute(\"DELETE FROM barcodes.project_barcode \"\n \"WHERE project_id in (\"\n \"SELECT project_id FROM barcodes.project \"\n \"WHERE project = %s)\",\n (project_name,))\n\n # now delete the project itself\n cur.execute(\"DELETE FROM barcodes.project WHERE project = %s\",\n (project_name,))\n return cur.rowcount == 1", "def delete(self, name, project=None):\n qlist = self._list(project)\n key = self._queue(project, name)\n self._db.delete(key)\n self._db.zremrangebyscore(qlist, -1, 1)", "def delete(self, request , pk=None): \n return Response({'message':'DELETE'})", "def delete_portfolio(self, name, user):\n if self.exist_portfolio(name, user) and self.db_tool.is_connected():\n portfolio = self.get_portfolio_object()\n if portfolio and portfolio.id is not None:\n for order in portfolio.orders:\n self.db_tool.delete(order)\n self.db_tool.delete(portfolio)\n self.__remove_from_bulk(portfolio, \"portfolio_update\")\n self.__remove_from_bulk(portfolio, \"portfolio\")", "def competitors_delete(request, slug,id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n competitors_reference = get_object_or_404(Competitors, id=id,company=company)\n\n #deletes the view and redirects to the page.\n competitors_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def delete(self):\r\n delete_tracks(self.project, [self])", "def delete(self, team_id, project_id):\n try:\n if not ProjectAdminService.is_user_action_permitted_on_project(\n token_auth.current_user, project_id\n ):\n raise ValueError()\n TeamService.delete_team_project(team_id, project_id)\n return {\"Success\": True}, 200\n except ValueError:\n return {\n \"Error\": \"User is not a manager of the project\",\n \"SubCode\": \"UserPermissionError\",\n }, 403", "def delete(self, request, project):\n ProcessingIssue.objects.discard_all_processing_issue(project=project)\n return Response(status=200)", "def delete_remote_project(profile, project):\n return delete_remote_project_worker.delay(profile_id=profile.id,\n project_id=project.id)", "def delete(request, todo_id):\n\n todo = get_object_or_404(Todo, pk=todo_id)\n todo.delete()\n\n return redirect('index')", "def project_delete_event(self, proj_info):\n\n LOG.debug(\"Processing project_delete_event...\")\n proj_id = proj_info.get('resource_info')\n proj_name = self.get_project_name(proj_id)\n if proj_name:\n try:\n self.dcnm_client.delete_project(proj_name,\n self.cfg.dcnm.\n default_partition_name)\n except dexc.DfaClientRequestFailed:\n # Failed to delete project in DCNM.\n # Save the info and mark it as failure and retry it later.\n LOG.error(_LE(\"Failed to create project %s on DCNM.\"),\n proj_name)\n self.update_project_info_cache(proj_id, name=proj_name,\n opcode='delete',\n result=constants.DELETE_FAIL)\n else:\n self.update_project_info_cache(proj_id, opcode='delete')\n LOG.debug('Deleted project:%s', proj_name)\n self.project_delete_notif(proj_id, proj_name)", "def delete(request):\n return render(request, 'modify.html')", "def project_clear_files(request, **kwargs):\n project = kwargs.get(\"project\")\n if request.user.is_authenticated and request.user == project.user:\n project.clear_project_folder()\n return Response(status=status.HTTP_200_OK)\n else:\n raise PermissionDenied", "def delete(repo):\n print('Repo: %s' % repo)\n print('Deleted')", "def delete_view(self, request, object_id):\r\n obj = self.get_object(unquote(object_id))\r\n obj.delete()\r\n return HttpResponse(\"Deleted\")", "def delete_project(self, name=None, delete_dir=False):\n victim = name or self.current\n if victim not in self:\n raise ValueError(\"{} is not a project\".format(victim))\n\n if len(self) == 1:\n raise ValueError(\"Can't delete only remaining project\")\n\n ProjectDataset.delete().where(ProjectDataset.name == victim).execute()\n\n if delete_dir:\n dir_path = self._base_data_dir / safe_filename(victim)\n assert dir_path.is_dir(), \"Can't find project directory\"\n shutil.rmtree(dir_path)\n\n if name is None or name == self.current:\n if \"default\" in self:\n self.set_current(\"default\")\n else:\n self.set_current(next(iter(self)).name)\n return self.current", "def delete(ctx: click.Context, repository_path):\n root_commands.cmd_delete(ctx.obj, repository_path)", "def delete_bucket_from_project(projectname, bucketname):\n return jsonify(\n admin.delete_bucket_on_project(\n current_app.scoped_session(), projectname, bucketname\n )\n )", "def delete_favorite(request):\n company_id = request.data.get('id')\n company = Company.objects.get(id=company_id)\n\n request.user.profile.companies.remove(company)\n return Response({'favorite': False})", "def userproject_post_delete(sender, instance, **kwargs):\n instance.document.delete(False)", "def numeros_by_id_delete_view(request):\n # Check authorization\n if not Utils.has_permission(request, request.registry.settings['affaire_numero_edition']):\n raise exc.HTTPForbidden()\n\n settings = request.registry.settings\n projet_id = int(settings['numero_projet_id'])\n abandonne_id = int(settings['numero_abandonne_id'])\n\n # Get numero by id\n id = request.matchdict['id']\n query = request.dbsession.query(Numero).filter(\n Numero.id == id).first()\n\n if query:\n if query.etat_id == projet_id:\n query.etat_id = abandonne_id\n elif query.etat_id == abandonne_id:\n query.etat_id = projet_id\n\n return Utils.get_data_save_response(Constant.SUCCESS_DELETE.format(Numero.__tablename__))", "def delete_namespaced_project(self, name, **kwargs):\n\n all_params = ['name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_project\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_project`\")\n\n resource_path = '/oapi/v1/projects/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def funding_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n funding_reference = get_object_or_404(Funding, id=id,company=company)\n\n #deletes the view and redirects to the page.\n funding_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def delete_project_file(self, project=None):\n if type(project) is not Project:\n return False\n\n path = self.data_path + self.project_dir\n\n # generate filenames\n filename = path + '/' + self.us(project.project_id()) + '.flproject'\n\n # check if the file exists and delete it\n if os.path.isfile(filename):\n os.remove(filename)\n return True\n else:\n return False", "def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)", "def delete_all_projects():\n client = RequestManager()\n client.set_method(\"GET\")\n client.set_endpoint(\"/projects\")\n response = client.execute_request()\n for project in response.json():\n try:\n ProjectHelper.delete_project(project[\"id\"])\n except TypeError:\n LOGGER.info(project)", "def delete(self):\n return self.client._perform_empty(\n \"DELETE\", \"/project-folders/%s\" % self.project_folder_id)", "def destroy(self,request,pk = None):\n return Response({'http_method':'DELETE'})", "def view_projects(request):\n current_user=request.user\n current_user_name=current_user.username\n projects=Project.objects.all()\n return render(request, 'view_projects.html',{'projects':projects, 'current_user_name':current_user})", "def destroy(self, request, pk=None): #delete a specific object\n return Response({'http_method': 'DELETE'})", "def delete_plante(id):\n plante = get_plante(id)\n nom=plante.get_name()\n db.session.delete(plante)\n get_parterre(plante.get_parterre()).delete_plante(plante)\n p = Actions(\n contenu = \"Suppression de la plante \"+nom + \" au parterre \"+ get_parterre(plante.get_parterre()).get_name(),\n liste = 1\n )\n db.session.add(p)\n db.session.commit()\n return redirect(url_for(\"parterre\"))", "def cmd_apps__destroy(args):\n \n if args.name is None and in_git_repo():\n args.name = _get_current_project_name()\n\n if args.name is None:\n print \"Please provide a project name.\"\n sys.exit(1)\n\n print \"Destroying project %s...\" % args.name\n remote.destroy_project(args.name)\n print \"Project %s destroyed.\" % args.name\n if in_git_repo() and _get_current_project_name() == args.name:\n git(None, 'remote', 'rm', 'tinyserv')\n print \"Removed remote '%s'.\" % args.name", "def community_post_delete_view(request, slug):\n post = CommunityPostModel.objects.get(slug=slug) # Get the post\n\n if request.method == 'POST': # If the form has been submitted...\n post.delete() # Delete the object from the database\n return redirect('community-home') # Redirect to the home page\n\n context = {'post': post} # Pass the variables to the template\n return render(request,\n 'pages/patient-community/community-delete-post.html',\n context) # render the patient community delete page", "def view_delete():\n\n return jsonify(\n get_dict(\"url\", \"args\", \"form\", \"data\", \"origin\", \"headers\", \"files\", \"json\")\n )", "def test_projects_id_contacts_delete(self):\n project = Contact()\n response = self.client.open('/project-tracker/projects/{id}/contacts'.format(id=56),\n method='DELETE',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def delete(request):\n issue = request.issue\n tbd = [issue]\n for cls in [models.PatchSet, models.Patch, models.Comment,\n models.Message, models.Content]:\n tbd += cls.query(ancestor=issue.key)\n ndb.delete_multi(entity.key for entity in tbd)\n return HttpResponseRedirect(reverse(mine))", "def edit_project(request, project_id):\n try:\n project = Project.objects.get(pk=project_id)\n except Project.DoesNotExist:\n raise Http404(\"Project does not exist\")\n # check whether the user is the one who created this project\n \n if project.user.email != request.session['email']:\n return HttpResponseRedirect('/projects/'+str(project_id))\n else:\n if request.method == \"POST\":\n temp = json.loads(request.body)\n form = ProjectForm(temp, instance=project)\n # form = ProjectForm(request.POST, instance=project)\n # check whether it's valid:\n if form.is_valid():\n #clear any previously stored tags to fix the bug \n #where we remove the tags and its not reflected\n try:\n project.tags.clear()\n project.articles.clear()\n except:\n pass\n m = form.save(commit=False)\n # m.description = bleach.clean(m.description, strip=True)\n m.save()\n tag_objects_list = _get_tags(form.cleaned_data['tags_list'])\n article_object_list = _get_articles(form.cleaned_data['articles'])\n for tag_object in tag_objects_list:\n m.tags.add(tag_object)\n for article_object in article_object_list:\n m.articles.add(article_object)\n m.save()\n # return HttpResponseRedirect('/projects/' + str(m.id))\n # return project_detail(request, m.id)\n return HttpResponse(str(m.id))\n else:\n return render(request, 'projects/edit_project.html',\n {'project': project})\n # return render(request, 'projects/error_edit.html', {'form': form})\n else:\n return render(request, 'projects/edit_project.html',\n {'project': project})\n return project_detail(request, project_id)", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n self.object.delete()\n return JsonResponse({'status': 'ok'})", "def destroy(self, request, pk=None):\n try:\n deleted_team = self.controller.delete_team(pk)\n return Response(status=status.HTTP_204_NO_CONTENT)\n except ObjectDoesNotExist:\n return Response(ObjectDoesNotExist, status=status.HTTP_400_BAD_REQUEST)", "def record_destroy_for_project(project_id):\n session = get_session()\n with session.begin():\n session.query(models.ProjectAccountRecord).\\\n filter_by(project_id=project_id).\\\n update({'deleted': True,\n 'deleted_at': datetime.datetime.utcnow(),\n 'updated_at': datetime.datetime.utcnow()})", "def project(request, proj_id=None, scenario_id=None):\n\n if proj_id:\n project = get_object_or_404(Project, id=proj_id)\n\n if project.user != request.user and project.is_private:\n raise Http404\n\n return render_to_response('home/home.html', get_context(request))" ]
[ "0.78291446", "0.77674896", "0.7705836", "0.7590049", "0.757986", "0.7458269", "0.7449735", "0.7428579", "0.7415701", "0.725367", "0.72272354", "0.71767354", "0.7172579", "0.6984525", "0.69331306", "0.69194317", "0.6911854", "0.6911854", "0.6897094", "0.6887393", "0.68611", "0.68117636", "0.6803068", "0.67991966", "0.6786162", "0.67531735", "0.6700644", "0.66726637", "0.6611343", "0.6611032", "0.65562284", "0.65472", "0.6533511", "0.6516196", "0.65016884", "0.6477105", "0.6443327", "0.6440251", "0.64313793", "0.6429306", "0.63233715", "0.6319385", "0.63159955", "0.6287224", "0.6208052", "0.62073386", "0.6191444", "0.6183393", "0.61755466", "0.61725193", "0.6153007", "0.6150377", "0.61462975", "0.60955584", "0.6092837", "0.6091273", "0.60727257", "0.6020462", "0.60187733", "0.60114574", "0.598109", "0.5956386", "0.5949785", "0.59414303", "0.5938845", "0.5933843", "0.5924895", "0.5908738", "0.5902762", "0.5873901", "0.58605635", "0.58569944", "0.5854838", "0.5841832", "0.5840724", "0.5828175", "0.58222026", "0.58169943", "0.58084637", "0.58062696", "0.5800368", "0.57869565", "0.57747006", "0.5763242", "0.57616496", "0.57532346", "0.5746531", "0.5746459", "0.57398874", "0.5734883", "0.57193136", "0.57159775", "0.5713112", "0.57063174", "0.57009834", "0.57009834", "0.569696", "0.56941456", "0.5657629", "0.5649738" ]
0.8196082
0
wraps builtin print for additional extendability
def log(*arg): context = str(*arg) print("[Texture Builder] {}".format(context))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print(*args, **kwargs):\n with P_LOCK:\n __builtins__.print(*args, **kwargs)", "def _print(self, *args):\n return _ida_hexrays.vd_printer_t__print(self, *args)", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def print(self, *args, **kwargs):\n print(*args, **kwargs)", "def _redefine_print(is_main):\n import builtins as __builtin__\n\n builtin_print = __builtin__.print\n\n def print(*args, **kwargs):\n force = kwargs.pop(\"force\", False)\n if is_main or force:\n builtin_print(*args, **kwargs)\n\n __builtin__.print = print", "def hook_print():\n sys.stdout = PrintHook()", "def _print_custom(self):\n pass", "def real_print(*args, **kwargs):\n\n kwargs.setdefault('file', real_stdout)\n _python_print_function(*args, **kwargs)", "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "def print_(self, s: str) -> None:", "def print_and_return(*args, **kwargs):\n print(*args, end=\"\\r\", **kwargs)", "def cmdPrint( self, *args):\n return self.cmd( *args, **{ 'verbose': True } )", "def static_print(*args, __p=print, **kwargs):\n __p(*args, **kwargs)", "def printed(method):\n\t\tdef wrapper(cls, *args):\n\t\t\tif cls.verbose:\n\t\t\t\treturn method(cls, *args)\n\t\treturn wrapper", "def print_func(self, *args):\n return _ida_hexrays.Hexrays_Hooks_print_func(self, *args)", "def print(self):\n # Your implementation here", "def _default_vprint_worker(*args, **kwargs):\r\n print(*args, **kwargs)", "def _print(self, *args):\n return _ida_hexrays.qstring_printer_t__print(self, *args)", "def _printable(self):\n pass", "def print(text):\n\n return builtin_print('{} | {}'.format(\n time.strftime('%H:%M:%S', time.gmtime()),\n text\n ))", "def print(*args, sep=\" \"):\n pass", "def _print(self, *args, **kwargs) -> None:\n # Only print in verbose mode\n if self._verbose:\n arglist = list(args)\n arglist[0] = f\"[buddy-{self._experiment_name}] {args[0]}\"\n print(*arglist, **kwargs)", "def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text", "def printv(self, *arg):\n if self.verbose:\n print(*arg)", "def print_cust(self, msg):\n print(msg, end='')", "def out(*args):\r\n print(*args)", "def printout(*args, **kwargs):\n console_print(sys.stdout, *args, **kwargs)", "def test_print(chikin):\n chikin.print()", "def _print(self, *args):\n return _ida_hexrays.cnumber_t__print(self, *args)", "def print_out():\n pass", "def print(*args, **kwargs):\n new_args = []\n for arg in args:\n if builtins.isinstance(arg, models.Point):\n new_args.append(\"({0}, {1})\".format(arg.x, arg.y))\n else:\n new_args.append(arg)\n\n builtins.print(*new_args, **kwargs)", "def _Print(self, t):\n self.RaiseError(t, \"Print not supported\")", "def _p(self, *args, level=2, **kwargs):\n if self._verbosity >= level:\n print(*args, **kwargs)", "def eprint(*args, **kwargs):\n\tprint(*args, file=sys.stderr, **kwargs)", "def printc(*a, **kw):\n print(*a, **kw)", "def create_print(prefix):\n def inner(*args):\n print prefix + str(args)\n return inner", "def print_cmd(ctx, klass=None):\n connecter = ScalingoInterface(ctx.obj)\n connecter.print_cmd()", "def verbose_print(verbose, print_function=None):\n\n if verbose:\n return print_function or print\n else:\n def vprint(*args, **kwars):\n pass\n return vprint", "def print_msg(*vargs, **kwargs):\n print(*vargs, **kwargs)", "def Print(self, text):\n pass", "def adv_print(*args, start='', in_file = False, **kwargs):\n max_line = kwargs.pop('max_line', False)\n print(kwargs)\n old_stdout = sys.stdout\n value = StringIO()\n sys.stdout = value\n print(*args, **kwargs)\n sys.stdout = old_stdout\n value = value.getvalue()\n value = start + value\n if max_line:\n value = value[:max_line] + '\\n' + value[max_line:]\n if in_file:\n if 'filename' in kwargs:\n filename = kwargs['filename']\n else:\n filename = 'output.txt'\n with open(filename, 'w') as f:\n f.write(value)\n print(value)", "def print_substep(text, style=\"\"):\n console.print(text, style=style)", "def result_display(self, arg):\n if self.rc.pprint:\n out = stringify_func(arg)\n\n if '\\n' in out:\n print\n\n print out\n else:\n print repr(arg)", "def emu_print(text):\n print \"%s %s\" % (EMU_PRINT_PREFIX, text)", "def xprint(*args, **kwargs):\n # Compile text\n if 'prefix' in kwargs:\n xprint.prefix = kwargs['prefix']\n del kwargs['prefix']\n kwargs['flush'] = kwargs.get('flush', xprint.flush_by_default)\n sep = kwargs.get('sep', ' ')\n text = \"\"\n if xprint.prefix:\n text += xprint.prefix_function()\n for idx, arg in enumerate(args):\n text += sep + str(arg) if idx > 0 else str(arg)\n\n # Print text to stdout\n print(text, **kwargs)\n\n # Fetch end of line\n end = kwargs['end'] if 'end' in kwargs else '\\n'\n\n # Print text to log file\n if xprint.log_file and not xprint.log_file.closed:\n xprint.log_file.write(text + end)\n xprint.log_file.flush()\n\n # Prepare next printout\n xprint.prefix = end.rstrip(' \\t\\r').endswith('\\n')", "def print_function():\n print(\"I'm {}, and I'm printing now\".format(print_function.__name__))", "def setPrint():\n (e,d,sr,sw) = codecs.lookup('utf-8')\n unicode_to_utf8 = sw(sys.stdout)\n sys.stdout = unicode_to_utf8", "def vprint (*args, take_action=False, **kwargs):\n\n take_action = take_action and not opts.take_action\n\n if opts.verbose or take_action:\n print (*args, **kwargs)\n\n return take_action", "def print(self,\n *args,\n bullet: Union[str, bool] = None,\n style: StyleOptions = None,\n key_style: StyleOptions = None,\n **kwargs) -> None:\n if len(args) == 0:\n print(self.flush(), **kwargs)\n else:\n print(self.format(*args,\n bullet=bullet,\n style=style,\n key_style=key_style),\n **kwargs)", "def print_(*input_x):\n print_op = _get_cache_prim(Print)()\n return print_op(*input_x)", "def cprint(*arg):\n\tif sys.version_info[:2] == (3,2):\n\t\tprint(3.2)\n\telif sys.version_info[:2] == (2,7):\n\t\tprint(2.7)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)", "def foo_printer(self):\n print(\"\\nHi I'm {}\".format(self.foo))", "def _get_print_fn(file=sys.stdout):\n def _print_fn(op, xin,):\n for attr in op.attrs:\n temp = getattr(xin, attr)\n if callable(temp):\n pmsg = temp()\n else:\n pmsg = temp\n print(op.message, attr, '=', pmsg, file=file)\n return _print_fn", "def print_(*args, **kwargs):\n fp = kwargs.pop(\"file\", sys.stdout)\n if fp is None:\n return\n\n def write(data):\n if not isinstance(data, basestring):\n data = str(data)\n fp.write(data)\n want_unicode = False\n sep = kwargs.pop(\"sep\", None)\n if sep is not None:\n if isinstance(sep, unicode):\n want_unicode = True\n elif not isinstance(sep, str):\n raise TypeError(\"sep must be None or a string\")\n end = kwargs.pop(\"end\", None)\n if end is not None:\n if isinstance(end, unicode):\n want_unicode = True\n elif not isinstance(end, str):\n raise TypeError(\"end must be None or a string\")\n if kwargs:\n raise TypeError(\"invalid keyword arguments to print()\")\n if not want_unicode:\n for arg in args:\n if isinstance(arg, unicode):\n want_unicode = True\n break\n if want_unicode:\n newline = unicode(\"\\n\")\n space = unicode(\" \")\n else:\n newline = \"\\n\"\n space = \" \"\n if sep is None:\n sep = space\n if end is None:\n end = newline\n for i, arg in enumerate(args):\n if i:\n write(sep)\n write(arg)\n write(end)", "def do_print(self, cmd):\n try:\n print(self.EvalExpression(cmd))\n except:\n pass", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def test_03_pass_print(self):\n print('Hello World!')", "def d_print(level, *args, **kwargs):\n if not isinstance(level, int):\n print(level, *args, **kwargs)\n elif debug >= level:\n print(*args, **kwargs)", "def displayhook(self, obj):\n # reproduce the behavior of the standard displayhook, not printing None\n if obj is not None:\n print >> self.stdout, repr(obj)", "def print_old(self, text, *args, **kwargs):\n if 'file' in kwargs:\n _builtin_print(text, *args, **kwargs)\n return self\n\n warnings.showwarning = _builtin_warning\n\n if 'sep' in kwargs:\n sep = kwargs['sep']\n else:\n sep = ' '\n\n if 'wrap' in kwargs:\n wrap = kwargs['wrap']\n else:\n wrap = None\n\n if wrap is None:\n if self.file is None:\n wrap = True\n else:\n wrap = False\n\n # join arguments to one string\n text = sep.join([str(arg) for arg in (text,) + args])\n\n # wrap text ----------------------------------\n if wrap:\n if isinstance(wrap, bool):\n wrap = self.get_wrap_length()\n\n lines = textwrap.wrap(\n text, wrap, break_on_hyphens=False\n )\n else:\n lines = [text]\n\n if len(lines) == 0:\n lines = ['']\n # --------------------------------------------\n\n # print --------------------------------------\n for nline, line in enumerate(lines):\n if nline == 0:\n prefix = self.prefix_for_issue()\n else:\n prefix = ' ' * self.prefix_length()\n self.update_screen(prefix + line)\n self.Nlines_last_message = 0\n self.show(force=True)\n # --------------------------------------------\n warnings.showwarning = self.custom_warning\n\n return self", "def debug_print(*args, sep=' ', end='\\n', file=sys.stdout, flush=False, lvl=1):\n if debuglvl >= lvl:\n print(*args, sep=sep, end=end, file=file, flush=flush)", "def print(self):\n\n print(self)", "def use_pypprint_for_implicit_print(self) -> None:\n if self.implicit_print is not None:\n self.implicit_print.func.id = \"pypprint\" # type: ignore\n # Make sure we import it later\n self.undefined.add(\"pypprint\")", "def _print(txt):\n\n # Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.\n # Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.\n # Style: DIM, NORMAL, BRIGHT, RESET_ALL\n print('{0}{1}'.format(Style.BRIGHT + txt, Fore.RESET + Back.RESET + Style.RESET_ALL))", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def debug_print(fmt: str, *args, ordered=False, **kwargs) -> None:\n effect = DebugEffect.ORDERED_PRINT if ordered else DebugEffect.PRINT\n debug_callback(functools.partial(_format_print_callback, fmt), effect, *args,\n **kwargs)", "def displayhook(arg):\n if arg is not None:\n __builtin__._ = None\n print stringify_func(arg)\n __builtin__._ = arg", "def __pprint(object, stream=None, indent=1, width=80, depth=None):\n printer = PrettyPrinterExt(\n stream=stream, indent=indent, width=width, depth=depth)\n printer.pprint(object)", "def to_print_out(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('print')\n else:\n self.output('print')", "def printf(str):\r\n print(str, flush=True)", "def printer(end,message):\n\n sys.stdout.write('\\r'+message+'\\t')\n sys.stdout.flush()\n if end: sys.stdout.write('\\n')", "def vprint(*args, **kwargs ):\n\n forceprint = False\n for key in kwargs:\n if key == \"forceprint\":\n forceprint =kwargs[key]\n \n line = ''\n if debug or forceprint : \n for arg in args:\n line += str(arg) +\" \"\n log = open(exepath + 'pyframe.log', 'a') \n log.write(line + \"\\n\")\n log.close() \n print line", "def prnt(printstring, silent=False):\n if not silent:\n stdout.write(printstring)", "def verbose_print(text,verbose_level):\n if Args.verbose >= verbose_level:\n print '\\t' * (verbose_level-1) + text", "def console_print(out, *args, **kwargs):\n const_charset = stream_encoding(out)\n out.write(' '.join([a.encode(cons_charset, 'replace') for a in args]))\n if kwargs.get('newline', True):\n out.write('\\n')", "def eprint(*pargs, **kargs):\n print('\\u001b[31m', end='', file=sys.stderr)\n print(*pargs, file=sys.stderr, **kargs)\n print('\\u001b[0m', end='', file=sys.stderr)", "def printv(self, string, **kwargs):\n if self.verbose:\n print(string, **kwargs)", "def _default_eprint_worker(*args, **kwargs):\r\n kwargs[\"file\"] = sys.stderr\r\n print(*args, **kwargs)", "def init_printing(pretty_print=True, order=None, use_unicode=None):\n if pretty_print:\n stringify_func = lambda arg: pretty(arg, order=order, use_unicode=use_unicode)\n else:\n stringify_func = sstrrepr\n\n try:\n import IPython\n\n ip = IPython.ipapi.get()\n\n if ip is not None:\n def result_display(self, arg):\n \"\"\"IPython's pretty-printer display hook.\n\n This function was adapted from:\n\n ipython/IPython/hooks.py:155\n\n \"\"\"\n if self.rc.pprint:\n out = stringify_func(arg)\n\n if '\\n' in out:\n print\n\n print out\n else:\n print repr(arg)\n\n ip.set_hook('result_display', result_display)\n return\n except ImportError:\n pass\n\n import __builtin__, sys\n\n def displayhook(arg):\n \"\"\"Python's pretty-printer display hook.\n\n This function was adapted from:\n\n http://www.python.org/dev/peps/pep-0217/\n\n \"\"\"\n if arg is not None:\n __builtin__._ = None\n print stringify_func(arg)\n __builtin__._ = arg\n\n sys.displayhook = displayhook", "def print(self):\n self.print_avec_separateur(\" \")", "def v_print(msg):\n if (VERBOSE == 1):\n print(msg)", "def _print(self, *args):\n return _ida_hexrays.cinsn_t__print(self, *args)", "def print(self):\r\n self.print_avec_separateur()", "def pr(string, verbose):\n if(verbose):\n print(string)", "def pypprint(*args, **kwargs): # type: ignore\n from typing import Iterable\n\n if len(args) != 1:\n print(*args, **kwargs)\n return\n x = args[0]\n if isinstance(x, dict):\n for k, v in x.items():\n print(f\"{k}:\", v, **kwargs)\n elif isinstance(x, Iterable) and not isinstance(x, str):\n for i in x:\n print(i, **kwargs)\n else:\n print(x, **kwargs)", "def l_print_no_barrier(*args):\n print(comm.rank, ':', end=' ')\n for i in args:\n print(i, end=' ')\n # noinspection PyArgumentList\n print()", "def DEBUG_PRINT(msg, obj='', suffix=''):\n if PRINT_DEBUGS:\n print msg, obj, suffix", "def printer(msg):\r\n sys.stdout.write(\"\\r\" + msg)\r\n sys.stdout.flush()", "def pflush(*args, **kwargs):\n print(*args, **kwargs)\n sys.stdout.flush()", "def func_printed(self, *args):\n return _ida_hexrays.Hexrays_Hooks_func_printed(self, *args)", "def pprint_helper(self, angle, indent):\n # just here for defining the interface; work is done in subclasses\n pass", "def repl_print_statements():\n pass", "def debug_print(self, *content):\n if self.debug:\n print(*content)", "def dprint(msg, debug):\n if debug:\n six.print_(msg)" ]
[ "0.72718763", "0.7198888", "0.7185116", "0.71198493", "0.69697016", "0.6957658", "0.6845616", "0.6798065", "0.6773647", "0.67016596", "0.66754776", "0.6661273", "0.66153914", "0.6614028", "0.6603238", "0.65869075", "0.6565293", "0.65613693", "0.65500474", "0.64926285", "0.6460793", "0.6431964", "0.64197546", "0.6395954", "0.6385862", "0.63802344", "0.6374293", "0.6356939", "0.6286463", "0.62329024", "0.62145734", "0.62140906", "0.62128764", "0.620452", "0.6202523", "0.6193259", "0.6179521", "0.6177883", "0.6143498", "0.60999745", "0.6072949", "0.6070835", "0.60347307", "0.60322136", "0.6030267", "0.6023803", "0.6023556", "0.6012794", "0.60004866", "0.5979838", "0.5978379", "0.596329", "0.596329", "0.596329", "0.596329", "0.596329", "0.596329", "0.595476", "0.59530514", "0.59523326", "0.5951161", "0.59494257", "0.59494257", "0.59478295", "0.58875185", "0.58714426", "0.58703566", "0.58651197", "0.5857485", "0.585079", "0.5846356", "0.58425707", "0.5834892", "0.5823359", "0.58211356", "0.58169866", "0.58106875", "0.5809231", "0.58066976", "0.58041865", "0.58030725", "0.57968", "0.577658", "0.5774002", "0.57703084", "0.5765934", "0.576067", "0.5753861", "0.57514834", "0.57505924", "0.57493633", "0.57465845", "0.5745778", "0.57408595", "0.57407117", "0.5739785", "0.5731534", "0.5727604", "0.5723444", "0.5721377", "0.57183343" ]
0.0
-1
start oberserver in another separated thread, and WatchDog thread only monitors it
def run(self): observer = Observer() observer.schedule(self.ehandler, "./gl", True) observer.start() observer.join()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\n threading.Thread(target=self.serve_forever).start()", "def start(self):\r\n monitor_thread = Thread(target = self.monitor)\r\n monitor_thread.setDaemon(True)\r\n monitor_thread.start()\r\n\r\n main_thread = Thread(target = self.run)\r\n main_thread.setDaemon(True)\r\n main_thread.start()", "def __init__(self):\r\n super().__init__()\r\n self.daemon = True", "def start(self):\n def f():\n if (self.started): return\n self.started = True\n with client.ServerProxy(self.host) as proxy:\n while (not self.req_shutdown):\n self.update_speed(proxy)\n time.sleep(self.com_freq)\n self.started = False\n self.req_shutdwon = False\n\n Thread(target=f).start()", "def _start_monitor(instance=\"default\"):\n global logger_ic\n logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance)\n logger_ic.info(\"ipmi-console monitor thread starts to run.\")\n monitor_thread = threading.Thread(target=monitor, args=(instance,))\n monitor_thread.setDaemon(True)\n monitor_thread.start()", "def start(self):\n\t\tif self._send_greenlet is None:\n\t\t\tself._send_greenlet = gevent.spawn(self._send_loop)", "def run(self):\n self.monitor.start()", "def start_monitoring(self):\n pass", "def start(self):\n logging.info(\"ICMPecho health monitor plugin: Starting to watch \"\n \"instances.\")\n\n self.monitor_thread = threading.Thread(target = self.start_monitoring,\n name = self.thread_name)\n self.monitor_thread.daemon = True\n self.monitor_thread.start()", "def _start(self):\n\n super(PySwitchLibApiDaemonRunner, self)._start()", "def __init__(self):\n super(MemoryMonitoringThread, self).__init__()\n self.daemon = True", "def start():\n\n start_server()", "def on_run(self):\n wxMediator.on_run(self)\n listener_evt = InterThreadEventWX(self,\n wxEVT_NEW_LISTEN_CONN) \n talker_evt = InterThreadEventWX(self,\n wxEVT_NEW_TALK_CONN) \n server = self.server()\n sys.stderr.write('Starting server threads...\\n')\n sys.stderr.flush()\n server.start_other_threads(listener_evt, talker_evt)", "def start(self):\n try:\n server = Thread(target=self._thread(self._server), name=\"server\")\n server.setDaemon(True)\n server.start()\n for i in range(0, 10):\n client = Thread(target=self._thread(self._client), name=\"client\")\n client.setDaemon(True)\n client.start()\n\n print \"Bugsnag Agent started. http://%s:%s -> %s\" % (self.listen, self.port, self.endpoint)\n while True:\n sleep(1000)\n except KeyboardInterrupt:\n # give threads time to print exceptions\n sleep(0.1)", "def start(self):\n gevent.spawn_later(self._period, self._run)", "def start(self):\n self._watchdog_thread.start()", "def start(self) -> None:\n self.should_exit = False\n self._main_thread = threading.Thread(target=self._wrap_start, daemon=True)\n self._main_thread.start()", "def start(self):\n gevent.spawn(self.run)", "def main():\n channel_watcher = ChannelWatcher()\n channel_watcher.create_threads()\n for thread in channel_watcher.threads:\n thread.join()\n return", "def start(self):\n self._isAlive = True\n super(ThreadedServer, self).start()\n logger.debug(\"Threaded Server has been started.\")", "def start(self):\n if self.driver:\n eventlet.spawn_n(self.driver.monitor_events)", "def run():\n server = current_server()\n server._auto_stop = True\n return start()", "def connect_to_server(self):\n\t\tself.outside.start()\n\t\tself.outside.register(self.config.server_ip, self.config.server_port)\n\n\t\tself.thin.start()\n\t\tself.thin.register(self.config.server_ip, self.config.server_port)", "def run(self):\n self.logger.info(\"start consuming api calls\")\n while not self.shutdown:\n self.rpc.listen()", "def run(self):\r\n self.rpc_server.serve_forever(0.5)", "def start(self, _):\n logger.debug(\"Spawning metric & span reporting threads\")\n self.should_threads_shutdown.clear()\n self.sensor.start()\n instana.singletons.tracer.recorder.start()", "def _start(self, host):\n pass", "def main():\n server = ThreadedServer(MasterControllerService, port=5000)\n server.start()", "def run_forever(self, *args, **kwargs):\n try:\n self.logger.debug('Begin account update')\n\n # get account-updater server ownership\n self.get_ownership_obj = threading.Thread(target = self.msg.get_my_ownership)\n self.get_ownership_obj.setDaemon(True)\n self.get_ownership_obj.start()\n\n self.walker_obj = Walker(self.walker_map, self.__param, self.logger)\n self.walker_obj.setDaemon(True)\n self.walker_obj.start()\n self.logger.info(\"Walker Started\")\n self.reader_obj = Reader(self.walker_map, self.reader_map, \\\n self.__param, self.logger)\n self.reader_obj.setDaemon(True)\n self.reader_obj.start() \n self.logger.info(\"Reader Started\")\n self.account_sweeper = AccountSweep(self.__param, self.logger)\n self.account_sweeper.setDaemon(True)\n self.account_sweeper.start()\n self.logger.info(\"Account Sweeper Started\") \n self.updater_obj = Updater(self.walker_map, self.reader_map, \\\n self.__param, self.logger)\n self.updater_obj.setDaemon(True)\n self.updater_obj.start() \n self.logger.info(\"Updater Started\") \n self.container_sweeper = ContainerSweeper(self.walker_map, \\\n self.reader_map, self.__param, self.logger)\n self.container_sweeper.setDaemon(True)\n self.container_sweeper.start()\n self.logger.info(\"Container Sweeper Started\") \n\n account_updater_server = ThreadedAccountUpdaterServer(\\\n (self.__get_node_ip(gethostname()), \\\n self.__account_updater_port), HttpListener)\n account_updater_server.serve_forever()\n except Exception as ex:\n self.logger.error(\"Exception occured: %s\" % ex)", "def run(self):\n\n self._daemon_thread.start()\n\n while True:\n time.sleep(5)", "def run(self):\n server = TCPServer((self.host, self.port), TCPHandler)\n server.lymphocytes_getter = self.lymphocytes_getter\n\n #runs forever - so make this thread daemon\n server.serve_forever()", "def start(self):\n self.build_client_snapshot()\n self.load_local_dir_state()\n\n # Operations necessary to start the daemon\n self.create_observer()\n self.observer.start()\n self.sync_with_server()\n\n self.listener_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.listener_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.listener_socket.bind((self.cfg['cmd_address'], self.cfg['cmd_port']))\n self.listener_socket.listen(self.cfg['backlog_listener_sock'])\n r_list = [self.listener_socket]\n self.daemon_state = 'started'\n self.running = 1\n polling_counter = 0\n try:\n while self.running:\n r_ready, w_ready, e_ready = select.select(r_list, [], [], self.cfg['timeout_listener_sock'])\n\n for s in r_ready:\n\n if s == self.listener_socket:\n # handle the server socket\n client_socket, client_address = self.listener_socket.accept()\n r_list.append(client_socket)\n else:\n # handle all other sockets\n length = s.recv(Daemon.INT_SIZE)\n if length:\n # i need to do [0] and cast int because the struct.unpack return a tupla like (23234234,)\n # with the length as a string\n length = int(struct.unpack('!i', length)[0])\n message = json.loads(s.recv(length))\n for cmd, data in message.items():\n if cmd == 'shutdown':\n raise KeyboardInterrupt\n self.conn_mng.dispatch_request(cmd, data)\n else:\n s.close()\n r_list.remove(s)\n\n # synchronization polling\n # makes the polling every 3 seconds, so it waits six cycle (0.5 * 6 = 3 seconds)\n # maybe optimizable but now functional\n polling_counter += 1\n if polling_counter == 6:\n self.sync_with_server()\n polling_counter = 0\n\n except KeyboardInterrupt:\n self.stop(0)\n self.observer.stop()\n self.observer.join()\n self.listener_socket.close()", "def connect(self):\n self.start()", "def start_socket_thread(self):\n self.socket_thread = BCPServer(self, self.receive_queue,\n self.sending_queue)\n self.socket_thread.daemon = True\n self.socket_thread.start()", "def start_daemon(self, *args, **kwargs):\n pass", "def start(self):\n listening_thread = Thread(\n target=self.sock.start_listening, daemon=True)\n listening_thread.start()\n sending_thread = Thread(target=self.sock.start_sending, daemon=True)\n sending_thread.start()\n\n ack_watch_thread = Thread(target=self.watch_for_acks, daemon=True)\n ack_watch_thread.start()\n\n ack_timeout_thread = Thread(\n target=self.watch_for_ack_timeout, daemon=True)\n ack_timeout_thread.start()\n\n self.report()", "def start_server(self):\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n\n # The two services we added in the proto. You can find these functions in\n # jellybeanrobot_pb2_grpc.py.\n jellybeanrobot_pb2_grpc.add_JellyServicer_to_server(Robot(), server)\n\n # Start listening on a port.\n server.add_insecure_port(\"localhost:%d\" % self.port)\n print \"Listening on localhost:%d!\\n\" % self.port\n server.start()\n\n try:\n while True:\n time.sleep(3600) # one hour. \n except KeyboardInterrupt:\n server.stop(0)", "def monitor(self):\n if self.startup():\n time.sleep(0.250)\n self.run()", "def run(self):\n\n listen_port = DEBUGGER_PORT if \"RENPY_DEBUGGER_PORT\" not in os.environ else os.environ[\"RENPY_DEBUGGER_PORT\"]\n\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server.bind((\"0.0.0.0\", listen_port))\n server.listen(0)\n\n while True:\n client, client_address = server.accept()\n self.attach_one_client(client)", "def init_client_seeker():\n client_seeker = threading.Thread(target=seek_for_client)\n client_seeker.daemon = True\n client_seeker.start()", "def _start_in_thread(self):\n return spawn_waitready(self._listen, self.start)[0]", "def run(self):\n self.connect()\n self.run_forever()", "def daemon_main():\n # handle SIGTERM gracefully\n signal.signal(signal.SIGTERM, sigterm)\n\n try:\n dispatcher = dispatcher_type(args.server,\n args.dispatch_uri,\n daemon.logger,\n args.cafile)\n except Exception as e:\n daemon.logger.error(\"Startup error: {}\".format(e))\n sys.exit(1)\n mochad_client = MochadClient(args.server, daemon.logger, dispatcher)\n global loop\n loop = asyncio.get_event_loop()\n # dispatcher.watchdog() runs continuously to monitor the dispatcher's health\n # and act on any problems asyncronously\n asyncio.async(dispatcher.watchdog(loop))\n asyncio.async(mochad_client.worker(loop))\n loop.run_forever()", "def run(self):\n self._start_servers()\n monitor = KodiMonitor(self.nx_common, self.nx_common.log)\n while not monitor.abortRequested():\n monitor.update_playback_progress()\n try:\n if self.library_update_scheduled() and self._is_idle():\n self.update_library()\n except RuntimeError as exc:\n self.nx_common.log(\n 'RuntimeError: {}'.format(exc), xbmc.LOGERROR)\n if monitor.waitForAbort(5):\n break\n self._shutdown()", "def serve(self):\n\t\timport thread\n\t\tthread.start_new_thread(self._server_thread, tuple())", "def start(self):\n self.synchronizer = SyncThread(self.api, self.sync_dir)\n self.synchronizer.start()\n self.tray.on_login()", "async def _start_service_monitor(cls):\n cls.service_monitor = Monitor()\n await cls.service_monitor.start()", "def start_sync(self):\n\n if 'daemon_log' in self.data:\n self.log_to_daemonlog()\n\n self.start_listener()\n\n self.update_state(pid=os.getpid())\n\n try:\n self.keep_alive()\n except errors.StopProcess:\n self._terminate()\n finally:\n self.update_state(pid=None)\n self.finished_event.set()\n self._running = False\n if self._listener:\n try:\n send_action(self.name, 'exitloop')\n except:\n pass", "def start_non_blocking(self):\n self._start_thread(self.start, daemon=True)", "def _start_polling(self):\n self._handle = asyncio.get_event_loop().create_task(self._poll())", "def start(self):\n self.watcher.start()\n self._asyncio_loop.run_forever()", "def main(argv):\n try:\n # log device initialized successfully\n print \"Device initialized for the configuration updates\"\n #Start a thread to do the polling\n t = Thread(target=poll_snabb)\n t.daemon = True\n t.start()\n opw = OpServer()\n server.register_instance(opw)\n print (\"Starting the reactor\")\n server.serve_forever()\n\n except Exception as e:\n # log device initialization failed\n print(\"JET app exiting due to exception: %s\" %str(e.message))\n os._exit(0)\n return", "def _connect(self) -> None:\n if self._agent_thread.is_alive():\n raise ValueError(\"Agent already running.\")\n self._agent_thread.start()\n\n while not self._agent.runtime.is_running: # check agent completely running\n time.sleep(0.01)", "def start(self) -> None:\n self.stopping.clear()\n self.thread = threading.Thread(target=self._run, daemon=True, name=self.thread_name)\n self.thread.start()", "def run(self):\n self.logger.info(\"starting Dashi consumer\")\n while not self.shutdown:\n self.rpc.listen()", "def __init__(self):\n Thread.__init__(self)\n self.start()", "def __init__(self):\n Thread.__init__(self)\n self.start()", "def start_task():\n get_results_from_message_queue()\n test_all_servers_connection()", "def startLoop():\n patchAsyncio()", "def start(self):\n self.socket_manager.start()\n\n if self.poc != None:\n self._start_thread(self.contact_poc, daemon=True)\n self.send_discovery_message(self.poc)\n self._start_thread(self.watch_for_discovery_messages, daemon=True)\n self._start_thread(self.watch_for_heartbeat_messages, daemon=True)\n self._start_thread(self.send_heartbeat_messages, daemon=True)\n self._start_thread(self.watch_for_heartbeat_timeouts, daemon=True)\n self._start_thread(self.watch_for_rtt_messages, daemon=True)\n self._start_thread(self.calculate_rtt_timer, daemon=True)\n self._start_thread(self.watch_for_app_messages, daemon=True)\n\n while True: # Blocking. Nothing can go below this\n self.check_for_inactivity()", "def start(self):\n\n # ioloop.install()\n threading.Thread(target=self.loop.start).start()\n time.sleep(1)", "def start(self):\n thread.start_new_thread(Pyro4.naming.startNSloop, tuple())\n\n self.ns = Pyro4.locateNS()\n if self.ns == None:\n logging.error('Cannot locate Pyro NS.')\n return\n\n daemon = export(self)\n thread.start_new_thread(daemon.requestLoop, tuple())\n thread.start_new_thread(self.healthcheck, tuple())\n logging.info('%s started' % self.name)", "def start(self):\n self.thread.start()", "def on_start(self):\n self.run_in_background(self.__run_client)", "def peer_server(self):\n try:\n listener_thread = threading.Thread(target=self.peer_server_listener)\n listener_thread.setDaemon(True)\n\n operations_thread = threading.Thread(target=self.peer_server_host)\n operations_thread.setDaemon(True)\n\n listener_thread.start()\n operations_thread.start()\n\n threads = []\n threads.append(listener_thread)\n threads.append(operations_thread)\n\n for t in threads:\n t.join()\n except Exception as e:\n print \"Peer Server Error, %s\" % e\n sys.exit(1)", "def main():\n s = start_server()\n accept_connection(s)", "def serveThread(self):\r\n while True:\r\n try:\r\n client = self.clients.get()\r\n self.serveClient(client)\r\n except Exception, x:\r\n logging.exception(x)", "def run(self):\n ioloop.IOLoop.current().start()", "def start():\n Networker.stop()\n Networker.Instance = Networker()", "def main():\n # Get the current event loop\n loop = asyncio.get_event_loop()\n # While the moonitor is running\n with aiomonitor.start_monitor(loop=loop):\n # Keep the loop working\n loop.run_forever()", "def start(self):\n waiting_for_clients = Thread(target=self.accept_client)\n waiting_for_clients.start()", "def _hook_on_ping_and_motd(self):\n self._join()", "def setUpZServerThread(self):\n\n from ZServer import zhttp_server, zhttp_handler, logger\n from cStringIO import StringIO\n\n zlog = logger.file_logger(StringIO())\n\n zserver = zhttp_server(ip=self.host,\n port=self.port, \n resolver=None,\n logger_object=zlog)\n zhandler = zhttp_handler(module=bobo_app_name, uri_base='')\n zserver.install_handler(zhandler)\n\n self.zserver = zserver\n name = self.__class__.__name__\n self.zthread = ZServerThread(name=\"%s server\" % name)\n self.zthread.start()", "def start_monitoring(self, widget, data):\n\t\t#cambio le impostazioni dei locks\n\t\tself.RunFlag = True\n\t\tself.SetLocks()\n\n\t\tprint \"### Sending start signal to Monitor...\"\n\t\tself.start_monitor()\n\t\ttime.sleep(Configure.Interval)", "def run(self):\n self._server = self._get_server()\n self._server.serve_forever()", "def start(self) -> None:\n start_thread(super().start, self.__class__.__name__)", "def server():", "def server():", "async def server_main(loop, proxy_config, server_config):\n\n controller = Controller(\n MessageProxy(proxy_config),\n hostname=server_config['listen']['addr'],\n port=server_config['listen']['port'],\n )\n controller.start()", "def thread_serve(self):\n self.threaded_server = StoppableThread(target=self.start)\n self.threaded_server.start()\n\n while not self.threaded_server.stopped():\n time.sleep(1)\n\n # Stop the listeners...\n self.dp.qprint(\"setting b_stopThread on all listeners...\")\n for i in range(0, self.listeners):\n self.dp.qprint(\"b_stopThread on listener %d and executing join()...\" % i)\n self.l_listener[i].b_stopThread = True\n self.l_listener[i].join()\n\n # Stop the fileIO\n self.fileIO.b_stopThread = True\n self.dp.qprint(\"b_stopThread on fileIO executing join()...\")\n self.fileIO.join()\n\n self.dp.qprint(\"Shutting down the zmq infrastructure...\")\n try:\n self.dp.qprint('calling self.socket_back.close()')\n self.socket_back.close()\n except:\n self.dp.qprint('Caught exception in closing back socket')\n\n try:\n self.dp.qprint('calling self.socket_front.close()')\n self.socket_front.close()\n except zmq.error.ZMQError:\n self.dp.qprint('Caught exception in closing front socket...')\n\n self.dp.qprint('calling zmq_context.term()')\n # self.zmq_context.term()\n\n self.dp.qprint(\"calling join() on all this thread...\")\n self.threaded_server.join()\n self.dp.qprint(\"shutdown successful...\")", "def start(self):\n self._thread.start()", "def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])", "def run(self):\n self._connection = self.connect()\n self._connection.ioloop.start()", "def run(self):\n self._connection = self.connect()\n self._connection.ioloop.start()", "def __start_client(self, clientsocket, ip, port):\n #create synchronizer\n if self.profile[u'log_file_path']:\n self.logger.debug(u'Create synchronizer with log file \"%s\" handling' % self.profile[u'log_file_path'])\n synchronizer = SynchronizerExecEnv(ip, port, clientsocket, self.profile[u'mappings'], self.profile[u'log_file_path'], self.debug)\n elif self.remote_logging:\n self.logger.debug(u'Create synchronizer with internal application log (lib mode) handling')\n synchronizer = SynchronizerExecEnv(ip, port, clientsocket, self.profile[u'mappings'], None, self.debug)\n else:\n self.logger.debug(u'Create synchronizer with no log handling')\n synchronizer = SynchronizerExecEnv(ip, port, clientsocket, self.profile[u'mappings'], False, self.debug)\n synchronizer.start()\n\n #create filesystem watchdogs on each mappings\n for src in list(self.profile[u'mappings'].keys()):\n dest = self.__clean_path(self.profile[u'mappings'][src][u'dest'])\n if not os.path.exists(dest):\n #create missing directory to be able to watch changes\n os.makedirs(dest)\n drop_files = [self.profile[u'log_file_path']]\n self.logger.debug(u'Create filesystem observer for dir \"%s\"' % dest)\n observer = Observer()\n observer.schedule(\n RequestFileCreator(synchronizer.add_request, dest, mappings=self.profile[u'mappings'], drop_files=drop_files),\n path=dest,\n recursive=True)\n observer.start()\n\n self.__observers.append(observer)\n\n return synchronizer", "def __init__(self):\n Thread.__init__(self)\n self.start() # start the thread", "def startListening(self):\n \n self.listener_thread = threading.Thread(target=self.listening, daemon=True)\n self.listener_thread.start()\n\n # stateupdate = threading.Thread(target=self.showStatus, daemon=True)\n # stateupdate.start()\n\n # Main App Loop (Keeps the Client opened)\n while self.listener_thread.is_alive():\n time.sleep(1)\n else:\n print('Shutting Main Thread-1')\n sys.exit()", "def start(self) -> None:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n self.wserver = websockets.serve(self.__producer_handler, port=self.port, loop=loop)\n try:\n # run server forever\n self.server = asyncio.get_event_loop()\n self.server.run_until_complete(self.wserver)\n self.server.run_forever()\n except Exception:\n self.close()\n\n loop.run_forever()", "def start_pull_thread(self):\r\n threading.Thread(target=self._pull_thread).start()", "def start(self):\n\n self.__new_bus_Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.__bus_stations_Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.__ipv4 = (socket.gethostbyname(socket.gethostname()))\n if self.__telegram_bot == None:\n print(\"telegram bot connection is not set yet\")\n return\n if self.__message_sender == None:\n print(\"message sender connection is not set yet\")\n return\n new_bus_receiver = threading.Thread(target=self.__new_bus_reciever, args=(), name=\"new_bus_reciever\")\n new_bus_receiver.start()\n updates_tracker = threading.Thread(target=self.__track_updates, args=(), name=\"updates_tracker\")\n updates_tracker.start()\n heart_beat = threading.Thread(target=self.__heart, args=(), name=\"Heart beats\")\n heart_beat.start()", "def _bg_thread_main(self) -> None:\n while not self._done:\n self._run_server_cycle()", "def start(self):\n self.open()\n #t = Thread(target=self._cache_update, args=())\n #t.daemon = True\n #t.start()", "def start(self):\n\t\tself.init_trajectory_gripper()\n\t\tself.gripperserver.start()\n\t\tprint(\"The action server for this driver has been started\")", "def run_service(self):\n self.alerts_queue = Queue(maxsize=SERVICE_ALERT_QUEUE_SIZE)\n self.thread_server = Thread(target=self._on_server_start)\n # self.thread_server.daemon = True\n self.thread_server.start()\n\n try:\n while self.thread_server.is_alive():\n try:\n new_alert = self.alerts_queue.get(timeout=1)\n self.emit(**new_alert)\n except Empty:\n continue\n except KeyboardInterrupt:\n self.logger.debug(\"Caught KeyboardInterrupt, shutting service down gracefully\")\n raise\n except Exception as exc:\n self.logger.exception(exc)\n finally:\n self._on_server_shutdown()", "def setUp(self):\n self.server = ResourceManagerServer(log_to_screen=False)\n self._server_thread = Thread(target=self.server.start)\n\n self._server_thread.start()\n time.sleep(self.SERVER_STARTUP_TIME)", "def start(self):\n try:\n self.getEverything()\n self._watchFolder()\n except Unauthorized, e:\n self.authorize()\n self.start()\n \n #TODO: make this work\n #self._setPeriodicSync()\n \n print 'stopped'", "def start(self):\n self.serve_forever()", "def start(self):\n self.serve_forever()", "async def start(self):", "async def start(self):", "def start(self):\n self.listener.listen(self.backlog)\n h, p = self.listener.getsockname()\n self.logger.info(\"server started on %s:%s\", h, p)\n self.active = True\n if self.auto_register:\n t = THG.Thread(target = self._bg_register)\n t.setDaemon(True)\n t.start()\n #if sys.platform == \"win32\":\n # hack so we can receive Ctrl+C on windows\n self.listener.settimeout(0.5)\n try:\n try:\n while True:\n self.accept()\n except EOFError:\n pass # server closed by another thread\n except SystemExit:\n self.logger.warn(\"System exit\")\n except KeyboardInterrupt:\n self.logger.warn(\"keyboard interrupt!\")\n finally:\n self.logger.info(\"server has terminated\")\n self.close()" ]
[ "0.6764007", "0.661172", "0.6549474", "0.64551026", "0.6381024", "0.6361601", "0.63481355", "0.63394445", "0.6275483", "0.62610716", "0.6260074", "0.62537545", "0.6252852", "0.6233397", "0.6223834", "0.61703527", "0.61615974", "0.6160586", "0.61547226", "0.6127885", "0.6118095", "0.61140704", "0.60949445", "0.6087372", "0.6073718", "0.6038754", "0.60223246", "0.60071206", "0.599459", "0.598858", "0.597729", "0.5971403", "0.59650934", "0.5964582", "0.596332", "0.5956303", "0.59546655", "0.5951894", "0.59405714", "0.5927838", "0.5925236", "0.5909828", "0.5894637", "0.588672", "0.5885857", "0.58818966", "0.58746105", "0.5866355", "0.5864581", "0.5863937", "0.58558", "0.5854648", "0.58534616", "0.58530736", "0.58475244", "0.5843592", "0.5843592", "0.58418685", "0.5824628", "0.58230525", "0.5822391", "0.5819539", "0.5812334", "0.5812219", "0.5808145", "0.5796879", "0.57858247", "0.5783615", "0.57815576", "0.57768166", "0.5774128", "0.5771238", "0.57690144", "0.5766545", "0.57552135", "0.57539916", "0.57455516", "0.57455516", "0.5736498", "0.5733698", "0.57309264", "0.57248014", "0.571503", "0.571503", "0.57145405", "0.57137245", "0.5712776", "0.570964", "0.5705027", "0.5704386", "0.5703952", "0.5702155", "0.57008797", "0.56994194", "0.56993467", "0.56981456", "0.5696055", "0.5696055", "0.5692154", "0.5692154", "0.5691958" ]
0.0
-1
generate simplest screen filling quad
def screen_vao(cls, gl, program): vbo = [ -1.0, -1.0, +1.0, -1.0, -1.0, +1.0, +1.0, +1.0, ] vbo = np.array(vbo).astype(np.float32) vbo = [(gl.buffer(vbo), "2f", "in_pos")] ibo = [0, 1, 2, 1, 2, 3] ibo = np.array(ibo).astype(np.int32) ibo = gl.buffer(ibo) vao = gl.vertex_array(program, vbo, ibo) return vao
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_rhombus(self, screen):\n pygame.gfxdraw.filled_polygon(screen, self.list_of_coordinates, self.color)\n\n return screen", "def DrawBase(screen, base_x, base_y, base_len, base_width):\n pygame.draw.rect(screen, (255,0,0),(base_x, base_y, base_len*2, base_width*2), 4)", "def draw_grid():\r\n screen.fill((0,0,0))\r\n pygame.draw.line(screen, (255,255,255),(WIDTH/3,0),(WIDTH/3,HEIGHT))\r\n pygame.draw.line(screen, (255,255,255),(2*WIDTH/3,0),(2*WIDTH/3,HEIGHT))\r\n pygame.draw.line(screen, (255,255,255),(0,HEIGHT/3),(WIDTH,HEIGHT/3))\r\n pygame.draw.line(screen, (255,255,255),(0,2*HEIGHT/3),(WIDTH,2*HEIGHT/3))", "def solid(t, coord, ii, n_pixels, random_values):\n\n\n return (100,100,100)", "def draw_multicolor_square(t,sz):\r\n for i in [\"red\", \"purple\", \"hotpink\", \"blue\"]:\r\n t.color(i)\r\n t.forward(sz)\r\n t.left(90)", "def fill_grid(self, gx, gy, color=Color['white']):\n area = [gx * self.px, gy * self.py, self.px, self.py]\n pygame.draw.rect(self.display, color, area)", "def dessinerRectangle(p0, p1, p2, p3,texture=None, textureRepeat = True, color = (0,1,0)):\n \n \n if texture == None:\n r,v,b = color\n glDisable(GL_TEXTURE_2D)\n glColor3f(r,v,b)\n glBegin(GL_QUADS)\n glVertex3f(p0[0],p0[1],p0[2])\n glVertex3f(p1[0],p1[1],p1[2])\n glVertex3f(p2[0],p2[1],p2[2])\n glVertex3f(p3[0],p3[1],p3[2])\n glEnd()\n glEnable(GL_TEXTURE_2D)\n else:\n\n if textureRepeat:\n a = fabs(p0[0] - p1[0])\n b = fabs(p0[1] - p1[1])\n c = fabs(p0[2] - p1[2])\n\n if a >= b and a >= c:\n d = a\n elif b >= a and b >= c:\n d = b\n elif c >= a and c >= b:\n d = c\n else:\n d = a\n\n a = fabs(p1[0] - p2[0])\n b = fabs(p1[1] - p2[1])\n c = fabs(p1[2] - p2[2])\n\n if a >= b and a >= c:\n e = a\n elif b >= a and b >= c:\n e = b\n elif c >= a and c >= b:\n e = c\n else:\n e = a\n\n del a\n del b\n del c\n\n glColor4f(1,1,1,1)\n glBindTexture(GL_TEXTURE_2D,texture.id)\n glBegin(GL_QUADS)\n glTexCoord2f(0.0,0.0)\n glVertex3f(p0[0],p0[1],p0[2])\n glTexCoord2f(d,0.0)\n glVertex3f(p1[0],p1[1],p1[2])\n glTexCoord2f(d,e)\n glVertex3f(p2[0],p2[1],p2[2])\n glTexCoord2f(0,e)\n glVertex3f(p3[0],p3[1],p3[2])\n glEnd()\n else:\n glColor4f(1,1,1,1)\n glBindTexture(GL_TEXTURE_2D,texture.id)\n glBegin(GL_QUADS)\n glTexCoord2f(0.0,0.0)\n glVertex3f(p0[0],p0[1],p0[2])\n glTexCoord2f(0.0,1.0)\n glVertex3f(p1[0],p1[1],p1[2])\n glTexCoord2f(1.0,1.0)\n glVertex3f(p2[0],p2[1],p2[2])\n glTexCoord2f(1.0,0.0)\n glVertex3f(p3[0],p3[1],p3[2])\n glEnd()", "def test_plot_quad(geometry):\n ## get index arrays\n rows, cols = geometry.get_pixel_coord_indexes('QUAD:V1', 1, pix_scale_size_um=None, xy0_off_pix=None, do_tilt=True)\n\n # get intensity array\n arr = cspad_ndarr(n2x1=rows.shape[0])\n arr.shape = (8,185,388)\n amp_range = (0,185+388)\n\n logger.info('shapes rows: %s cols: %s weight: %s' % (str(rows.shape), str(cols.shape), str(arr.shape)))\n img = img_from_pixel_arrays(rows,cols,W=arr)\n\n gg.plotImageLarge(img,amp_range=amp_range)\n gg.move(500,10)\n gg.show()", "def drawRectangle(width, height, tilt, penColor, fillColor):\n Lucia.color(penColor,fillColor)\n Lucia.seth(tilt)\n Lucia.begin_fill()\n for i in range(2):\n Lucia.forward(width)\n Lucia.left(90)\n Lucia.forward(height)\n Lucia.left(90)\n Lucia.end_fill()", "def draw_square(display, coord, box_size, color, bg_color):\n left, top = coord\n half = int(box_size * 0.5)\n quarter = int(box_size * 0.25)\n pygame.draw.rect(\n display, color, (left + quarter, top + quarter, half, half))\n return", "def draw(self, screen):\n halfScale = int(self.screenScale / 2)\n\n x = int(self.x)\n y = int(self.y)\n for i in range(-halfScale, halfScale):\n for j in range(-halfScale, halfScale):\n\n pygame.Surface.set_at(\n screen, (x * self.screenScale + i, y * self.screenScale + j), self.color)", "def drawSquare(t, sz):\n\n t.shape(\"turtle\")\n while 1:\n\t if sz > 200:\n\t \tbreak\n\t for j in range (36):\n\t \tt.left(10)\n\t \tsz = sz + 1 \n\n\t \tif j%2 == 1:\n\t \t\tt.color(\"red\")\n\t \telse:\n\t \t\tt.color(\"blue\")\n\t \tfor i in range(4):\n\t \t\tt.forward(sz)\n\t \t\tt.left(90)\n\t sz = sz + 1", "def make_square(turt,sz):\n for i in range(4):\n turt.forward(sz)\n turt.left(90)", "def squarePrimitive(self):\n\n def drawWireframe(self):\n\n # Draw line\n glFT.glBegin(OpenMayaRender.MGL_LINE_STRIP)\n\n # Get Color\n # self.getColor(\"edge\")\n\n for edges in SQUARE[\"EDGES\"]:\n for edge in edges:\n\n point = OpenMaya.MVector(edge[0], edge[1], edge[2])\n point *= self.shape_size\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n \n \n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n def drawShaded(self):\n\n # Draw quad\n glFT.glBegin(OpenMayaRender.MGL_POLYGON)\n\n # Get Color\n # self.getColor(\"polygon\")\n\n for polygons in SQUARE[\"POLYGONS\"]:\n for polygon in polygons:\n\n point = OpenMaya.MVector(polygon[0], polygon[1], polygon[2])\n point *= self.shape_size\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n glFT.glNewList(self.vList_id, OpenMayaRender.MGL_COMPILE)\n\n # Draw lines\n if self.draw_type == 0 or self.draw_type == 2:\n drawWireframe(self)\n \n # Draww Polygons\n if self.draw_type == 1 or self.draw_type == 2:\n drawShaded(self)\n\n glFT.glEndList()", "def drawBackground(self,screen):\n pygame.draw.rect(screen,(240,240,240),(self.basepos[0],self.basepos[1],204,504))\n pygame.draw.rect(screen,(0,0,0),(self.basepos[0]+2,self.basepos[1]+2,200,500))", "def draw_pavement():\n\n roberto.penup()\n roberto.goto(-345, -100)\n roberto.pendown()\n roberto.begin_fill()\n for i in range(4): # this loop draws a big black rectangle that is positioned at the bottom part of the screen\n roberto.forward(684)\n roberto.right(90)\n roberto.end_fill()", "def draw_rectangle(t, w, h):\r\n for i in range(2):\r\n t.forward(w)\r\n t.left(90)\r\n t.forward(h)\r\n t.left(90)", "def draw_offscreen(context):\n offscreen = SprytileGui.offscreen\n target_img = SprytileGui.texture_grid\n tex_size = SprytileGui.tex_size\n\n offscreen.bind()\n glClear(GL_COLOR_BUFFER_BIT)\n glDisable(GL_DEPTH_TEST)\n glEnable(GL_BLEND)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluOrtho2D(0, tex_size[0], 0, tex_size[1])\n\n def draw_full_quad():\n texco = [(0, 0), (0, 1), (1, 1), (1, 0)]\n verco = [(0, 0), (0, tex_size[1]), (tex_size[0], tex_size[1]), (tex_size[0], 0)]\n glBegin(bgl.GL_QUADS)\n for i in range(4):\n glTexCoord2f(texco[i][0], texco[i][1])\n glVertex2f(verco[i][0], verco[i][1])\n glEnd()\n\n glColor4f(0.0, 0.0, 0.0, 0.5)\n draw_full_quad()\n\n if target_img is not None:\n glColor4f(1.0, 1.0, 1.0, 1.0)\n target_img.gl_load(0, GL_NEAREST, GL_NEAREST)\n glBindTexture(GL_TEXTURE_2D, target_img.bindcode[0])\n # We need to backup and restore the MAG_FILTER to avoid messing up the Blender viewport\n old_mag_filter = Buffer(GL_INT, 1)\n glGetTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, old_mag_filter)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glEnable(GL_TEXTURE_2D)\n draw_full_quad()\n glTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, old_mag_filter)\n\n # Translate the gl context by grid matrix\n grid_matrix = sprytile_utils.get_grid_matrix(SprytileGui.loaded_grid)\n matrix_vals = [grid_matrix[j][i] for i in range(4) for j in range(4)]\n grid_buff = bgl.Buffer(bgl.GL_FLOAT, 16, matrix_vals)\n\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glLoadIdentity()\n glLoadMatrixf(grid_buff)\n\n glDisable(GL_TEXTURE_2D)\n\n # Get data for drawing additional overlays\n grid_size = SprytileGui.loaded_grid.grid\n padding = SprytileGui.loaded_grid.padding\n margin = SprytileGui.loaded_grid.margin\n curr_sel = SprytileGui.loaded_grid.tile_selection\n is_pixel_grid = sprytile_utils.grid_is_single_pixel(SprytileGui.loaded_grid)\n is_use_mouse = context.scene.sprytile_ui.use_mouse\n is_selecting = SprytileGui.is_selecting\n\n glLineWidth(1)\n\n # Draw box for currently selected tile(s)\n # Pixel grid selection is drawn in draw_tile_select_ui\n sprytile_data = context.scene.sprytile_data\n is_not_base_layer = sprytile_data.work_layer != \"BASE\"\n draw_outline = sprytile_data.outline_preview or is_not_base_layer\n if draw_outline and is_selecting is False and not is_pixel_grid:\n if is_not_base_layer:\n glColor4f(0.98, 0.94, 0.12, 1.0)\n elif SprytileGui.is_moving:\n glColor4f(1.0, 0.0, 0.0, 1.0)\n else:\n glColor4f(1.0, 1.0, 1.0, 1.0)\n curr_sel_min, curr_sel_max = SprytileGui.get_sel_bounds(\n grid_size, padding, margin,\n curr_sel[0], curr_sel[1],\n curr_sel[2], curr_sel[3]\n )\n SprytileGui.draw_selection(curr_sel_min, curr_sel_max)\n\n # Inside gui, draw appropriate selection for under mouse\n if is_use_mouse and is_selecting is False and SprytileGui.cursor_grid_pos is not None:\n\n cursor_pos = SprytileGui.cursor_grid_pos\n # In pixel grid, draw cross hair\n if is_pixel_grid and SprytileGui.is_moving is False:\n glColor4f(1.0, 1.0, 1.0, 0.5)\n glBegin(GL_LINE_STRIP)\n glVertex2i(0, int(cursor_pos.y + 1))\n glVertex2i(tex_size[0], int(cursor_pos.y + 1))\n glEnd()\n\n glBegin(GL_LINE_STRIP)\n glVertex2i(int(cursor_pos.x + 1), 0)\n glVertex2i(int(cursor_pos.x + 1), tex_size[1])\n glEnd()\n # Draw box around selection\n elif SprytileGui.is_moving is False:\n glColor4f(1.0, 0.0, 0.0, 1.0)\n cursor_min, cursor_max = SprytileGui.get_sel_bounds(grid_size, padding, margin,\n int(cursor_pos.x), int(cursor_pos.y),)\n SprytileGui.draw_selection(cursor_min, cursor_max)\n\n glPopMatrix()\n offscreen.unbind()", "def draw_equitriangle(t,sz):\r\n\r\n\tdraw_poly(t, 3, sz)", "def draw_square():\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)", "def part1(width, height, size, color):\n pass", "def create_quad(scale=(1.0,1.0), st=False, rgba=False, dtype='float32', type='triangles'):\n shape = [4, 3]\n rgba_offset = 3\n\n width, height = scale\n # half the dimensions\n width /= 2.0\n height /= 2.0\n\n vertices = np.array([\n # top right\n ( width, height, 0.0,),\n # top left\n (-width, height, 0.0,),\n # bottom left\n (-width,-height, 0.0,),\n # bottom right\n ( width,-height, 0.0,),\n ], dtype=dtype)\n\n st_values = None\n rgba_values = None\n\n if st:\n # default st values\n st_values = np.array([\n (1.0, 1.0,),\n (0.0, 1.0,),\n (0.0, 0.0,),\n (1.0, 0.0,),\n ], dtype=dtype)\n\n if isinstance(st, bool):\n pass\n elif isinstance(st, (int, float)):\n st_values *= st\n elif isinstance(st, (list, tuple, np.ndarray)):\n st = np.array(st, dtype=dtype)\n if st.shape == (2,2,):\n # min / max\n st_values *= st[1] - st[0]\n st_values += st[0]\n elif st.shape == (4,2,):\n # st values specified manually\n st_values[:] = st\n else:\n raise ValueError('Invalid shape for st')\n else:\n raise ValueError('Invalid value for st')\n\n shape[-1] += st_values.shape[-1]\n rgba_offset += st_values.shape[-1]\n\n if rgba:\n # default rgba values\n rgba_values = np.tile(np.array([1.0, 1.0, 1.0, 1.0], dtype=dtype), (4,1,))\n\n if isinstance(rgba, bool):\n pass\n elif isinstance(rgba, (int, float)):\n # int / float expands to RGBA with all values == value\n rgba_values *= rgba \n elif isinstance(rgba, (list, tuple, np.ndarray)):\n rgba = np.array(rgba, dtype=dtype)\n\n if rgba.shape == (3,):\n rgba_values = np.tile(rgba, (4,1,))\n elif rgba.shape == (4,):\n rgba_values[:] = rgba\n elif rgba.shape == (4,3,):\n rgba_values = rgba\n elif rgba.shape == (4,4,):\n rgba_values = rgba\n else:\n raise ValueError('Invalid shape for rgba')\n else:\n raise ValueError('Invalid value for rgba')\n\n shape[-1] += rgba_values.shape[-1]\n\n data = np.empty(shape, dtype=dtype)\n data[:,:3] = vertices\n if st_values is not None:\n data[:,3:5] = st_values\n if rgba_values is not None:\n data[:,rgba_offset:] = rgba_values\n\n if type == 'triangles':\n # counter clockwise\n # top right -> top left -> bottom left\n # top right -> bottom left -> bottom right\n indices = np.array([0, 1, 2, 0, 2, 3])\n elif type == 'triangle_strip':\n # verify\n indices = np.arange(len(data))\n elif type == 'triangle_fan':\n # verify\n indices = np.arange(len(data))\n elif type == 'quads':\n indices = np.arange(len(data))\n elif type == 'quad_strip':\n indices = np.arange(len(data))\n else:\n raise ValueError('Unknown type')\n\n return data, indices", "def draw_4(n: int):\n\n # Top half + middle\n for row in range(n // 2 + (n % 2)):\n cols = n - row * 2\n \n for col in range((n - cols) // 2):\n print(' ', end='')\n\n for col in range(cols):\n print('*', end='')\n \n print()\n\n # Bottom half\n for row in range(n // 2):\n cols = (row + 1) * 2 + (n % 2)\n \n for col in range((n - cols) // 2):\n print(' ', end='')\n\n for col in range(cols):\n print('*', end='')\n \n print()", "def pygDraw(self):\n x1,y1 = float(self.x), float(self.y) # bottom left\n x2,y2 = float(self.x+self.width), float(self.y) # bottom right\n x3,y3 = float(self.x+self.width), float(self.y+self.height) # Top right \n x4,y4 = float(self.x), float(self.y+self.height) # Top left\n \n glBegin(GL_QUADS)\n glVertex3f(x4, y4, 0.0)\t# Top left\n glVertex3f(x3, y3, 0.0)\t# Top right\n glVertex3f(x2, y2, 0.0)\t# bottom right\n glVertex3f(x1, y1, 0.0)\t# bottom left\n glEnd()", "def draw_rectangle_filled(center_x, center_y, width, height, color,\n tilt_angle=0):\n\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n GL.glEnable(GL.GL_LINE_SMOOTH)\n GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n\n # Set color\n if len(color) == 4:\n GL.glColor4ub(color[0], color[1], color[2], color[3])\n elif len(color) == 3:\n GL.glColor4ub(color[0], color[1], color[2], 255)\n\n GL.glLoadIdentity()\n GL.glTranslatef(center_x, center_y, 0)\n if tilt_angle:\n GL.glRotatef(tilt_angle, 0, 0, 1)\n\n GL.glBegin(GL.GL_QUADS)\n GL.glVertex3f(-width // 2, -height // 2, 0.5)\n GL.glVertex3f(width // 2, -height // 2, 0.5)\n GL.glVertex3f(width // 2, height // 2, 0.5)\n GL.glVertex3f(-width // 2, height // 2, 0.5)\n GL.glEnd()", "def draw(self, screen):", "def draw():", "def drawFloor(width, height, texture):\n glBindTexture(GL_TEXTURE_2D, texture)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE) # try GL_DECAL/GL_REPLACE/GL_MODULATE\n glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST) # try GL_NICEST/GL_FASTEST\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE) # try GL_CLAMP/GL_REPEAT/GL_CLAMP_TO_EDGE\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) # try GL_LINEAR/GL_NEAREST\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n\n sx = width / 2\n ex = -sx\n sz = height / 2\n ez = -sz\n\n # Enable/Disable each time or OpenGL ALWAYS expects texturing!\n glEnable(GL_TEXTURE_2D)\n\n glBegin(GL_QUADS)\n glTexCoord2f(0, 0)\n glVertex3f(sx, 0, sz)\n glTexCoord2f(0, 1)\n glVertex3f(sx, 0, ez)\n glTexCoord2f(1, 1)\n glVertex3f(ex, 0, ez)\n glTexCoord2f(1, 0)\n glVertex3f(ex, 0, sz)\n glEnd()\n\n glDisable(GL_TEXTURE_2D)", "def moving_square_frame(t):\n w, h = 1280, 720\n s = min(h, 100)\n canvas = np.zeros((h, w, 3), dtype=np.ubyte)\n canvas[:,:,:] = 255\n canvas[(h-s)//2:(h+s)//2,int(w * t / 2):int(w * t / 2) + s,1:3] = 0\n return canvas", "def drawRectangle(x,y,width,height,rounding=0,ucoords=1):\n if ucoords:\n dislin.rlrnd(x,y,width,height,rounding)\n else:\n dislin.rndrec(x,y,width,height,rounding)", "def _draw_square(self, left_x, top_y, side, color, fill):\n self.pen.up()\n self.pen.color(color)\n self.pen.goto(left_x, top_y)\n self.pen.down()\n self.pen.begin_fill()\n for _ in range(4):\n self.pen.forward(side)\n self.pen.right(90)\n self.pen.end_fill()", "def draw_cuboid(self, x_pos, z_pos, half_width, half_depth, height):\n GL.glBegin(GL.GL_QUADS)\n GL.glNormal3f(0, -1, 0)\n GL.glVertex3f(x_pos - half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos + half_depth)\n GL.glNormal3f(0, 1, 0)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos + half_depth)\n GL.glNormal3f(-1, 0, 0)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos + half_depth)\n GL.glNormal3f(1, 0, 0)\n GL.glVertex3f(x_pos + half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos + half_depth)\n GL.glNormal3f(0, 0, -1)\n GL.glVertex3f(x_pos - half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos - half_depth)\n GL.glNormal3f(0, 0, 1)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos + half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos + half_depth)\n GL.glEnd()", "def square(square_x, square_y, square_width, square_height, square_color):\n arcade.draw_rectangle_filled(square_x, square_y, square_width, square_height, square_color)", "def square(x, y):\n path.up()\n path.goto(x, y)\n path.down()\n path.begin_fill()\n\n for count in range(4):\n path.forward(20)\n path.left(90)\n\n path.end_fill()", "def draw_board(screen):\n colors = [p.Color(\"white\"), p.Color(\"dark gray\")]\n\n for row in range(DIMENSION):\n for col in range(DIMENSION):\n # For all light squares: row + col => even\n # dark squares: row + col => odd\n color = colors[(row + col) % 2]\n p.draw.rect(screen, color, p.Rect(col * SQ_SIZE, row * SQ_SIZE, SQ_SIZE, SQ_SIZE))", "def draw():\n screen.fill((0, 0, 0))\n alien.draw()", "def render(self):\n GL.glColor(*self._color)\n\n GL.glLoadIdentity()\n GL.glTranslate(self._x, self._y, 0)\n\n GL.glBegin(GL.GL_QUADS)\n GL.glVertex3f(0, 0, 0)\n GL.glVertex3f(self._width, 0, 0)\n GL.glVertex3f(self._width, self._height, 0)\n GL.glVertex3f(0, self._height, 0)\n GL.glEnd()", "def gen_rhombus(width):\n for row in range(1, width +1, 2):\n yield f\"{(STAR * row).center(width)}\"\n\n for row in range(width -2, 0, -2):\n yield f\"{(STAR * row).center(width)}\"", "def drawRegularSurface(matrix, nx, ny, xinterp, yinterp):\n dislin.surmat(matrix, nx, ny, xinterp, yinterp)", "def draw_xywh_rectangle_filled(top_left_x, top_left_y, width, height, color):\n center_x = top_left_x + (width / 2)\n center_y = top_left_y + (height / 2)\n draw_rectangle_filled(center_x, center_y, width, height, color)", "def draw_rect_filled(self, x, y, w, h, color=None, aa=False):\n for i in range(x, x + w):\n self._draw_fast_vline(i, y, h, color, aa)", "def DrawPanel(screen, panel_x, panel_y, panel_len, panel_width):\n pygame.draw.rect(screen, (255,0,0),(panel_x, panel_y, panel_len*2, panel_width*2), 4)", "def drawBackground(self):\n if self.newFrameArrived and not self.reshaping:\n imgHeight, imgwidth, _ = self.currentFrame.shape\n if imgHeight == self.height and imgwidth == self.width:\n glDisable(GL_DEPTH_TEST)\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glLoadIdentity()\n glMatrixMode(GL_PROJECTION)\n glPushMatrix()\n #print \"Happy printings1\"\n #glMatrixMode(GL_MODELVIEW)\n #glLoadIdentity()\n\n #print \"Happy printings\"\n glLoadIdentity()\n #print \"Happy printings\"\n glOrtho(0, self.width, 0, self.height, -1.0, 1.0)\n #print \"Happy printings\"\n glViewport(0, 0, self.width, self.height)\n #print \"Happy printings\"\n glDisable(GL_TEXTURE_2D)\n glPixelZoom(1, -1)\n glRasterPos3f(0, self.height-0.5, -1)\n #print \"Happy printings5\"\n glDrawPixels(self.width, self.height, GL_RGB, GL_UNSIGNED_BYTE, self.currentFrame)\n #print \"Happy printings6\"\n # glBegin(GL_QUADS)\n # glTexCoord2f(0.0,0.0); glVertex3f(-4.0,-3.0,-10.0)\n # glTexCoord2f(1.0,0.0); glVertex3f( 4.0,-3.0,-10.0)\n # glTexCoord2f(1.0,1.0); glVertex3f( 4.0, 3.0,-10.0)\n # glTexCoord2f(0.0,1.0); glVertex3f(-4.0, 3.0,-10.0)\n # glEnd()\n glPopMatrix()\n glMatrixMode(GL_MODELVIEW)\n glPopMatrix()\n glEnable(GL_DEPTH_TEST)\n #self.newFrameArrived = False", "def draw_square(self, surface, color, position):\n rect = pygame.Rect(position, (50, 50))\n pygame.draw.rect(surface, color, rect)", "def draw_main_surface(win, color, dimensions):\n width, height = dimensions\n\n pygame.draw.rect(win, color, (BORDER, BORDER,\n width*CELL_SIZE - BORDER*2,\n height*CELL_SIZE - BORDER*2))", "def drawPyramid( self ):\n glBegin(GL_TRIANGLES);\n glColor3f(1.0,0.0,0.0)\n glVertex3f( 0.0, 1.0, 0.0)\n glColor3f(0.0,1.0,0.0)\n glVertex3f(-1.0,-1.0, 1.0)\n glColor3f(0.0,0.0,1.0)\n glVertex3f( 1.0,-1.0, 1.0)\n glColor3f(1.0,0.0,0.0)\n glVertex3f( 0.0, 1.0, 0.0)\n glColor3f(0.0,0.0,1.0)\n glVertex3f( 1.0,-1.0, 1.0);\n glColor3f(0.0,1.0,0.0);\n glVertex3f( 1.0,-1.0, -1.0);\n glColor3f(1.0,0.0,0.0);\n glVertex3f( 0.0, 1.0, 0.0);\n glColor3f(0.0,1.0,0.0);\n glVertex3f( 1.0,-1.0, -1.0);\n glColor3f(0.0,0.0,1.0);\n glVertex3f(-1.0,-1.0, -1.0);\n glColor3f(1.0,0.0,0.0);\n glVertex3f( 0.0, 1.0, 0.0);\n glColor3f(0.0,0.0,1.0);\n glVertex3f(-1.0,-1.0,-1.0);\n glColor3f(0.0,1.0,0.0);\n glVertex3f(-1.0,-1.0, 1.0);\n glEnd()", "def create_screen(self, width, height):", "def draw_a50(self):\r\n\t\tpg.draw.rect(self.image, (100, 200, 100), self.rect)\r\n\t\r\n\t\t#self.display_surface.blit(self.image, self.rect)\r", "def draw_block(position, color):\n x = position.col*DX+DX+2\n y = position.row*DY+DY+2\n width = DX-4\n height = DY-4\n pygame.draw.rect(screen, color, (x,y,width,height), 0)", "def generateTransparentBackground(sizex, sizey):\n\tsizex += sizex % 16\n\tsizey += sizey % 16\n\tsingleTileData = (\n\t\t\"GdkP\"\n\t\t\"\\0\\0\\0\\263\"\n\t\t\"\\2\\1\\0\\2\"\n\t\t\"\\0\\0\\0@\"\n\t\t\"\\0\\0\\0\\20\"\n\t\t\"\\0\\0\\0\\20\"\n\t\t\"\\210jjj\\377\\210\\233\\233\\233\\377\\210jjj\\377\\210\\233\\233\\233\\377\\210jj\"\n\t\t\"j\\377\\210\\233\\233\\233\\377\\210jjj\\377\\210\\233\\233\\233\\377\\210jjj\\377\\210\"\n\t\t\"\\233\\233\\233\\377\\210jjj\\377\\210\\233\\233\\233\\377\\210jjj\\377\\210\\233\\233\"\n\t\t\"\\233\\377\\210jjj\\377\\220\\233\\233\\233\\377\\210jjj\\377\\210\\233\\233\\233\\377\"\n\t\t\"\\210jjj\\377\\210\\233\\233\\233\\377\\210jjj\\377\\210\\233\\233\\233\\377\\210jj\"\n\t\t\"j\\377\\210\\233\\233\\233\\377\\210jjj\\377\\210\\233\\233\\233\\377\\210jjj\\377\\210\"\n\t\t\"\\233\\233\\233\\377\\210jjj\\377\\210\\233\\233\\233\\377\\210jjj\\377\"\n\t)\n\tsingleTile = gtk.gdk.pixbuf_new_from_inline(len(singleTileData), singleTileData, False)\n\tbackgroundPixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True, 8, sizex, sizey)\n\tfor x in xrange(0, sizex - 8, 16):\n\t\tfor y in xrange(0, sizey - 8, 16):\n\t\t\tsingleTile.copy_area(0, 0, 16, 16, backgroundPixbuf, x, y)\n\treturn backgroundPixbuf", "def fill_vrect(self, x, y, w, h, color):\n if self.is_off_grid(x, y, x + w - 1, y + h - 1):\n return\n chunk_width = 1024 // h\n chunk_count, remainder = divmod(w, chunk_width)\n chunk_size = chunk_width * h\n chunk_x = x\n if chunk_count:\n buf = color.to_bytes(2, 'big') * chunk_size\n for c in range(0, chunk_count):\n self.set_window(chunk_x, y,\n chunk_x + chunk_width - 1, y + h - 1,\n buf)\n chunk_x += chunk_width\n\n if remainder:\n buf = color.to_bytes(2, 'big') * remainder * h\n self.set_window(chunk_x, y,\n chunk_x + remainder - 1, y + h - 1,\n buf)", "def draw_infinity_bg(screen: Surface, image: Surface, rect1: Rect, rect2: Rect) -> None:\n rect1.left += 1\n rect2.left += 1\n\n if rect1.left == WIDTH:\n rect1.left = -WIDTH\n if rect2.left == WIDTH:\n rect2.left = -WIDTH\n\n screen.blit(image, rect1)\n screen.blit(image, rect2)", "def form(x, y, s):\n rnd = int(random(3))\n shuffle(colors) # this is my own implementation of shuffle (rn_utils)\n noStroke()\n fill(colors[0])\n pushMatrix()\n translate(x, y)\n rotate(int(random(4)) * PI * 0.5)\n if random(1) < 0.5:\n rect(0, 0, s + 0.9, s, s, 0, s, s)\n # myShape(s * 0.75, -s * 0.25, s * 0.5, 0);\n else:\n rect(0, 0, s + 0.9, s, s, s, 0, s)\n # myShape(s * 0.75, s * 0.25, s * 0.5, TAU * 0.75);\n\n fill(colors[3])\n ellipse(0, 0, s * 0.8, s * 0.8)\n\n fill(colors[1])\n ellipse(0, 0, s * 0.5, s * 0.5)\n\n # if (rnd == 0) drawVortex(0, 0, s * 0.5);\n # if (rnd == 1) ellipse(0, 0, s * 0.5, s * 0.5);\n # if (rnd == 2) {\n # \tfill(colors[1]);\n # \tellipse(0, 0, s * 0.5, s * 0.5);\n # \tdrawHeart(0, s * 0.05, s * 0.35);\n # }\n\n if random(1) < 0.1:\n fill(colors[0])\n arc(0, 0, s, s, PI, TAU)\n\n popMatrix()", "def drawTriangle(t, color, x, y):\n ## t.color(color)\n ## t.begin_fill()\n for i in range(3):\n t.forward(x)\n t.right(y)", "def drawGrid(w, rows, surface):\r\n sizeBtwn = w // rows\r\n\r\n x = 0\r\n y = 0\r\n for l in range(rows):\r\n x = x + sizeBtwn\r\n y = y + sizeBtwn\r\n\r\n #line color-white #start end\r\n # pygame.draw.line(surface, (255,255,255), (x,0), (x,w)) #vertical\r\n #pygame.draw.line(surface, (255,255,255), (0,y), (w,y)) #horizontal\r", "def __init__(self, width, height):\n self.width = width\n self.height = height\n self.pixels = []\n self.r = 255\n self.g = 0\n self.b = 0\n self.pointSize = 30\n self.vr = 255\n self.vg = 200\n self.vb = 200\n self.glclear()", "def drawGeneration(self,screen):\n screen.blit(self.genSurface,(0,0))", "def draw_h(self):\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(20)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(20)\r\n pen.down()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(50)", "def drawBoard():\t\n\t#draw 64 Rectangles from (MARGINH,MARGINV) with CASESIZE sizes\n\tfor i in range(BOARDSIZE):\n\t\tfor j in range(BOARDSIZE):\n\t\t\tpygame.draw.rect(DISPLAYSURF, BLACK, [MARGINH + (i)*CASESIZE, MARGINV + (j)*CASESIZE, CASESIZE, CASESIZE], 1)", "def render(self,surf,box,r,size=None,offset=None):\n\n if box == 0: return\n\n if is_color(box):\n surf.fill(box,r)\n return\n \n x,y,w,h=r.x,r.y,r.w,r.h\n\n if (size and offset):\n pass\n# destx = x\n# desty = y\n\n # Calculate the size of each tile\n tilew, tileh = int(box.get_width()/3), int(box.get_height()/3)\n xx, yy = x+w, y+h\n src = pygame.rect.Rect(0, 0, tilew, tileh)\n dest = pygame.rect.Rect(0, 0, tilew, tileh)\n\n # Render the interior of the box\n surf.set_clip(pygame.Rect(x+tilew, y+tileh, w-tilew*2, h-tileh*2))\n src.x,src.y = tilew,tileh\n for dest.y in range(y+tileh,yy-tileh,tileh):\n for dest.x in range(x+tilew,xx-tilew,tilew): \n surf.blit(box,dest,src)\n\n # Render the top side of the box\n surf.set_clip(pygame.Rect(x+tilew,y,w-tilew*2,tileh))\n src.x,src.y,dest.y = tilew,0,y\n for dest.x in range(x+tilew, xx-tilew*2+tilew, tilew): \n surf.blit(box,dest,src)\n \n # Render the bottom side\n surf.set_clip(pygame.Rect(x+tilew,yy-tileh,w-tilew*2,tileh))\n src.x,src.y,dest.y = tilew,tileh*2,yy-tileh\n for dest.x in range(x+tilew,xx-tilew*2+tilew,tilew): \n surf.blit(box,dest,src)\n\n # Render the left side\n surf.set_clip(pygame.Rect(x,y+tileh,xx,h-tileh*2))\n src.y,src.x,dest.x = tileh,0,x\n for dest.y in range(y+tileh,yy-tileh*2+tileh,tileh): \n surf.blit(box,dest,src)\n\n # Render the right side\n surf.set_clip(pygame.Rect(xx-tilew,y+tileh,xx,h-tileh*2))\n src.y,src.x,dest.x=tileh,tilew*2,xx-tilew\n for dest.y in range(y+tileh,yy-tileh*2+tileh,tileh): \n surf.blit(box,dest,src)\n\n # Render the upper-left corner\n surf.set_clip()\n src.x,src.y,dest.x,dest.y = 0,0,x,y\n surf.blit(box,dest,src)\n \n # Render the upper-right corner\n src.x,src.y,dest.x,dest.y = tilew*2,0,xx-tilew,y\n surf.blit(box,dest,src)\n \n # Render the lower-left corner\n src.x,src.y,dest.x,dest.y = 0,tileh*2,x,yy-tileh\n surf.blit(box,dest,src)\n \n # Render the lower-right corner\n src.x,src.y,dest.x,dest.y = tilew*2,tileh*2,xx-tilew,yy-tileh\n surf.blit(box,dest,src)", "def fill_box(self, x, y, w, h):\n\t\tpass", "def draw_board(self):\n pygame.draw.rect(background, BLACK, self.outline, 3)\n # Outline is inflated here for future use as a collidebox for the mouse\n self.outline.inflate_ip(20, 20)\n for i in range(self.size-1):\n for j in range(self.size-1):\n rect = pygame.Rect(5+GRID_SIZE+(GRID_SIZE*i), 5+GRID_SIZE+(GRID_SIZE*j), GRID_SIZE, GRID_SIZE)\n pygame.draw.rect(background, COLOR[BLACK], rect, 1)\n if self.size >= 13:\n for i in range(3):\n for j in range(3):\n coords = (5+4*GRID_SIZE+(GRID_SIZE*6*i), 5+4*GRID_SIZE+(GRID_SIZE*6*j))\n pygame.draw.circle(background, COLOR[BLACK], coords, 5, 0)\n screen.blit(background, (0, 0))\n pygame.display.update()", "def draw_grid(self):\n for x in range(0, WIDTH, TILESIZE):\n pg.draw.line(self.screen, LIGHTGREY, (x, 0), (x, HEIGHT))\n \n for y in range(0, HEIGHT, TILESIZE):\n pg.draw.line(self.screen, LIGHTGREY, (0, y), (WIDTH, y))", "def rectStreatch(self,(x,y,xs,ys),(u,v,us,vs)):\n # do clipping now:\n \n color = Vec4(1,1,1,1)\n \n w = self.w\n h = self.h\n \n u,v,us,vs = u/w,1-v/h,(u+us)/w,1-(v+vs)/h\n \n self.drawer.rectangle( \n x,y,xs,ys,\n u,v,us-u,vs-v,\n #u/self.w,v/self.h,us/self.w,vs/self.h,\n color)", "def draw(self):\n self.screen.fill(WHITE)\n self.color_invalid()\n self.draw_selected()\n self.shade_locked_cells()\n self.draw_grid()\n self.draw_buttons()\n self.draw_numbers()", "def glow_boundary(bound):\n assert bound < 4\n global layout\n temp = len(layout) - 1\n for i in range(bound, bound + len_square(bound)):\n for j in range(bound, bound + len_square(bound)): # TODO: assign this to a variable\t\n layout[i][j] = 1", "def draw_field_divider(self, screen) -> None:\n pygame.draw.rect(screen, self.white,\n (self.width / 2, 0, 3, self.height))", "def draw_parabola_filled(start_x, start_y, end_x, height, color, tilt_angle=0):\n center_x = (start_x + end_x) / 2\n center_y = start_y + height\n start_angle = 0\n end_angle = 180\n width = (start_x - end_x)\n draw_arc_filled(center_x, center_y, width, height, color,\n start_angle, end_angle, tilt_angle)", "def draw_nonogram(self):\n image = Image.new(\"RGB\", (self.nonogram_size * 50, self.nonogram_size * 50), (255, 255, 255))\n draw = ImageDraw.Draw(image)\n\n for index, square in enumerate(reduce(lambda x, y: x+y, self.grid), 0):\n\n #print(square)\n x = index % self.nonogram_size\n y = index // self.nonogram_size\n coord = [(x * 50, y * 50), ((x + 1) * 50, (y + 1) * 50)]\n if square == EMPTY:\n draw.rectangle(coord, fill=(255, 255, 255))\n if square == FILLED:\n draw.rectangle(coord, fill=(0, 0, 0))\n return image", "def clean_area(screen,origin,width,height,color):\r\n ox,oy = origin\r\n points = [(ox,oy),(ox+width,oy),(ox+width,oy+height),(ox,oy+height),(ox,oy)]\r\n pygame.draw.polygon(screen, color, points, 0)", "def _create_drawing_area(self):\n\n self.drawing_x = -self.size/2 + self.margin\n self.drawing_y = self.size/2 - self.margin\n self.drawing_width = self.size - self.margin * 2\n self.drawing_height = (self.size/2 + self.flat_fragment) - self.margin * 2\n \n self.drawing_x_step = self.drawing_width \n self.drawing_y_step = self.drawing_height", "def draw_board(self):\n for i in range(0, 800, 80):\n if i == 80:\n pygame.draw.line(self.screen, 'black', (i, 80), (i, 800), width=3)\n pygame.draw.line(self.screen, (0, 0, 128), (0, i), (720, i), width=5)\n continue\n pygame.draw.line(self.screen, 'black', (i, 80), (i, 800), width=3)\n pygame.draw.line(self.screen, 'black', (0, i), (720, i), width=3)\n for j in range(240, 800, 240):\n pygame.draw.line(self.screen, (0, 0, 128), (j, 80), (j, 800), width=5)\n pygame.draw.line(self.screen, (0, 0, 128), (0, j + 80), (720, j + 80), width=5)\n pygame.draw.line(self.screen, (0, 0, 128), (0, 80), (0, 800), width=5)", "def draw_bg(self):\n for y in range(WIN_HEIGHT/32): #TODO: make sure this process is correct and efficient.\n for x in range(WIN_WIDTH/32):\n self.screen_image.blit(self.bg, (x * 32, y * 32))", "def render_wall(win, color, direction, pos):\n x, y = pos\n\n if direction == 'S':\n width = CELL_SIZE\n height = BORDER\n x = x*CELL_SIZE\n y = (y+1)*CELL_SIZE\n\n elif direction == 'E':\n width = BORDER\n height = CELL_SIZE\n x = (x+1)*CELL_SIZE\n y = y*CELL_SIZE\n\n pygame.draw.rect(win, color, (x, y, width, height))", "def drawE8(\n pl=57, ## Parallel length\n gw=12.5, ## Gauge width\n tw=20, ## Grip width\n tl=50.0, ## Grip length\n rd=12.5, ## Radius\n):\n import numpy as np\n A = tw/2. - gw/2.\n th = -90.-np.arccos((rd-A)/rd)*180./np.pi\n x = rd * np.sin(th*np.pi/180.)\n ## Round...\n th0=-90\n th_delta=np.arccos((rd-A)/rd)*180/np.pi\n th1=th0+th_delta\n ths=np.linspace(th0*np.pi/180.,th1*np.pi/180.)\n xs = rd*np.cos(ths)\n ys = rd*np.sin(ths)\n ## translate xs,ys\n xs = xs + (x-xs[-1])\n ys = ys + (-A+tw-ys[-1])\n xyRound=[xs.tolist(),ys.tolist()]\n \n \n ## parallel\n x0,y0=xs[0],ys[0]\n xyParallel = [[x0-0.5*pl,x0],[y0,y0]]\n \n ## Right grip\n XS=[x+tl,x+tl,x][::-1]\n YS=[-A+0.5*tw,-A+tw,-A+tw][::-1]\n xyRG=[XS,YS]\n \n x=xyParallel[0]+xyRound[0]+xyRG[0]\n y=xyParallel[1]+xyRound[1]+xyRG[1]\n \n xyCoords=np.array([x,y])\n \n # print xyCoords.shape\n \n ## translate the coordinate so that the center of gravity is (0,0)\n xyCoords[0]=xyCoords[0]-xyCoords[0][0]\n xyCoords[1]=xyCoords[1]-xyCoords[1][-1]\n # plot(xyCoords[0],xyCoords[1],'-')\n \n ## Apply 2-fold symmetry.\n sym0 =[[ 1,0],[0, 1]] ## Identical\n sym1 =[[-1,0],[0, 1]] ## Reflect y axis\n sym2 =[[ 1,0],[0,-1]] ## Reflect x axis\n sym3 =[[-1,0],[0,-1]] ## Reflect origin\n \n sym = np.array([sym0,sym2,sym3,sym1])\n # plot(xyCoords[0,0],xyCoords[1,0],'x')\n\n xyTot=[[],[]]\n for i in range(len(sym)):\n symOp = sym[i][:,:]# (2,2)\n temp = np.tensordot(symOp,xyCoords,axes=[1,0])\n if i==1 or i==3:\n temp[0]=temp[0][::-1]\n temp[1]=temp[1][::-1]\n elif i==0 or i==2:\n temp=temp[::]\n\n for j in range(len(temp[0])):\n xyTot[0].append(temp[0][j])\n xyTot[1].append(temp[1][j])\n\n xyTot=np.array(xyTot)\n\n\n x0=min(xyTot[0])\n y0=min(xyTot[1])+tw/2.\n\n xyTot[0] = xyTot[0] - x0\n xyTot[1] = xyTot[1] - y0\n\n return xyTot", "def spirala(t):\n t.penup()\n t.setx(random.randrange(-200,200))\n t.sety(random.randrange(-200,200))\n t.pencolor(random.randrange(0,255),random.randrange(0,255),200)\n t.width(random.randrange(2,13))\n t.pendown()\n\n for i in range(120):\n \tt.forward(20+i)\n \tt.left(30 - i/1.5)", "def drawPlane(width, height, texture):\n glBindTexture(GL_TEXTURE_2D, texture)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL) # try GL_DECAL/GL_REPLACE/GL_MODULATE\n glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST) # try GL_NICEST/GL_FASTEST\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT) # try GL_CLAMP/GL_REPEAT/GL_CLAMP_TO_EDGE\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) # try GL_LINEAR/GL_NEAREST\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n\n # Enable/Disable each time or OpenGL ALWAYS expects texturing!\n glEnable(GL_TEXTURE_2D)\n\n ex = width / 2\n sx = -ex\n ey = height\n sy = 0\n glBegin(GL_QUADS)\n glNormal3f(0, 0, 1)\n glTexCoord2f(0, 0)\n glVertex3f(sx, sy, 0)\n glTexCoord2f(2, 0)\n glVertex3f(ex, sy, 0)\n glTexCoord2f(2, 2)\n glVertex3f(ex, ey, 0)\n glTexCoord2f(0, 2)\n glVertex3f(sx, ey, 0)\n glEnd()\n\n glDisable(GL_TEXTURE_2D)", "def drawCube( self ):\n glBegin(GL_QUADS);\n mTexture(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()", "def draw_grid(self, verbosity=0):\n log.debug(\"Drawing grid\")\n (x0, y0) = self.origin\n color = (191, 191, 191)\n\n (w, h) = self.surface.get_size()\n\n i = x0\n while True:\n (x, ignore) = self.map_to_screen((i, 0))\n if x > w:\n break\n pygame.draw.line(self.surface, color, (x, 0), (x, h), 1)\n i += 10\n\n j = y0\n while True:\n (ignore, y) = self.map_to_screen((0, j))\n if y > h:\n break\n pygame.draw.line(self.surface, color, (0, y), (w, y), 1)\n j -= 10", "def draw_s(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(20)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(50)", "def drawRectangle_3():\n\n # Calculate the coordinates for the four corners of the rectangle\n\n x1 = Lucia.xcor()\n y1 = Lucia.ycor()\n\n fourCorners = [(x1 + 50, y1), (x1 + 50, y1 + 100), (x1, y1 + 100), (x1, y1)]\n \n Lucia.color(\"green\", \"yellow\")\n Lucia.begin_fill()\n \n Lucia.goto(fourCorners[0][0], fourCorners[0][1])\n Lucia.goto(fourCorners[1][0], fourCorners[1][1])\n Lucia.goto(fourCorners[2][0], fourCorners[2][1])\n Lucia.goto(fourCorners[3][0], fourCorners[3][1])\n\n Lucia.end_fill()", "def make_grass_field(shape, x, y, z):\n shape.penup()\n shape.speed(10)\n shape.setpos(x,y)\n shape.color(z)\n shape.begin_fill()\n for side in range(2):\n shape.forward(800)\n shape.right(90)\n shape.forward(800)\n shape.right(90)\n shape.end_fill()\n\n # ...", "def draw_room(screen, grid, start_location):\n wall_image = pygame.image.load(\"images/pillar.png\")\n wall_image_transparent = pygame.image.load(\"images/pillar_80.png\")\n floor_image = pygame.image.load(\"images/floor.png\")\n computer_image = pygame.image.load(\"images/desk_computer.png\")\n\n # map_to_image = [floor_image, # 0\n # wall_image, # 1\n # wall_image_transparent, # 2\n # computer_image] # 3\n map_to_image = {\n \"0\": floor_image,\n \"1\": wall_image,\n \"2\": wall_image_transparent,\n \"3\": computer_image,\n \"10\": wall_image # Secret passage\n }\n # better tile management for multiple environments / create multiple environments.\n # 0 = floor, 1 = wall (pillar)\n # First draw floor everywhere\n max_dimensions = grid.shape\n for r in range(max_dimensions[0]):\n for c in range(max_dimensions[1]):\n screen.blit(floor_image, (c * 30 + start_location[0],\n r * 30 + start_location[1]))\n\n for tile_type in [1, 2, 3, 10]:\n the_rows, the_cols = np.where(grid == tile_type)\n for i in range(len(the_cols)):\n screen.blit(map_to_image[str(tile_type)], (the_cols[i] * 30 + start_location[0],\n the_rows[i] * 30 + start_location[1]))", "def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)", "def draw_building():\n\n gerardo.penup()\n gerardo.backward(135)\n gerardo.pendown()\n gerardo.begin_fill()\n for i in range(2): # this loop draws out the rectangle for the building\n gerardo.forward(200)\n gerardo.right(90)\n gerardo.forward(100)\n gerardo.right(90)\n gerardo.end_fill()\n gerardo.hideturtle()", "def draw(self, window, color):\n rect = (self.row*self.size, self.col*self.size, self.size, self.size)\n pygame.draw.rect(window, color, rect)", "def drawCube( self ):\n glBegin(GL_QUADS);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()", "def init(self, windowsize:tuple):\r\n y_count, x_count = 3, 0 #< Set the starting counter for the look_up_table. y starts with three because the first three lines are just Nones\r\n # Creating the constant maze \r\n maze_size = windowsize[0], windowsize[1] - 2 * self.grid_size\r\n self.maze = pg.Surface(maze_size) \r\n \r\n \r\n \r\n # Draw the outermost rectangles on self.maze\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((0, 3 * self.grid_size), (28 * self.grid_size, 31 * self.grid_size)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((0 + self.grid_size // 2, 3 * self.grid_size + self.grid_size // 2),(27 * self.grid_size, 30 * self.grid_size)), 4) \r\n # Draw the inner rectangles\r\n for y in self.look_up_table[3 : -2]: #< y is a list of one row from the maze\r\n for x in y: #< x is a string that is decoded as already explained\r\n pos = [self.grid_size * x_count, self.grid_size * y_count]\r\n # Set reference position in the middle of one square\r\n pos[0] += self.grid_size // 2\r\n pos[1] += self.grid_size // 2\r\n x_count += 1\r\n # Check if x is rectangle\r\n if x != None and x[0] == 'r':\r\n # When the size of the string is equal or greater than 4 it's rectangle with a specific size and not just a border.\r\n if len(x) >= 4:\r\n # get the x and y size of the rectangle. x will be something like 'rx1_y1' x1 resprestens the size in x direction and y1 in y direction.\r\n xy_dim = x[1:].split(\"_\") \r\n xy_dim[0] = int(xy_dim[0])\r\n xy_dim[1] = int(xy_dim[1])\r\n rect = tuple(pos), (xy_dim[0] * self.grid_size , xy_dim[1] * self.grid_size )\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], rect, self.width)\r\n # If the last char is a w (white), u (up) or l (left) a line gets draw one a specific position \r\n if x[-1] == 'w':\r\n self.draw_line(self.maze, 'u', (x_count,y_count), True)\r\n if x[-1] == 'u' or x[-1] == 'l':\r\n if x_count == 0:\r\n self.draw_line(self.maze, x[-1], (len(y), y_count))\r\n else:\r\n self.draw_line(self.maze, x[-1], (x_count, y_count))\r\n \r\n y_count += 1\r\n x_count = 0\r\n # Just some cosmetic drawing\r\n pg.draw.rect(self.maze, Colors.colors['BLACK'], ((0, 12 * self.grid_size + self.grid_size // 2 + 4), (self.grid_size // 2 + 1, 10 * self.grid_size - 4)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLACK'], ((28 * self.grid_size - self.grid_size // 2 - 1, 12 * self.grid_size + self.grid_size // 2 + 4), (self.grid_size // 2 + 1, 10 * self.grid_size - 4)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((-self.width, 13 * self.grid_size), (5 * self.grid_size, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((-self.width, 19 * self.grid_size), (5 * self.grid_size, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((23 * self.grid_size, 13 * self.grid_size), (5 * self.grid_size + 10, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((23 * self.grid_size, 19 * self.grid_size), (5 * self.grid_size + 10, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((11 * self.grid_size, 16 * self.grid_size), (6 * self.grid_size, 3 * self.grid_size)), self.width)\r\n \r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (0, 16 * self.grid_size + self.grid_size // 2 - 1), (self.grid_size // 2 + self.width, 16 * self.grid_size + self.grid_size // 2 - 1), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (0, 18 * self.grid_size + self.grid_size // 2), (self.grid_size // 2 + self.width, 18 * self.grid_size + self.grid_size // 2), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (self.grid_size * 28 - self.grid_size, 16 * self.grid_size + self.grid_size // 2 - 1), (self.grid_size * 28 + self.width, 16 * self.grid_size + self.grid_size // 2 - 1), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (self.grid_size * 28 - self.grid_size, 18 * self.grid_size + self.grid_size // 2), (self.grid_size * 28 + self.width, 18 * self.grid_size + self.grid_size // 2), self.width)\r\n self.is_init = True", "def draw_3(n: int):\n\n # Top half + middle\n for row in range(n // 2):\n for col in range(row + 1):\n print('*', end='')\n print()\n\n # Bottom half\n for row in range(n // 2, n):\n for col in range(n - row):\n print('*', end='')\n print()", "def unitQuad_Edge(lens, N=3):\n template = [ np.array([0,0]), np.array([lens[0], 0]), None, None ] #Template from which to generate other Quad Vertex Lists\n leftDegenerate = template.copy() #Left Limit of quad if you were to rotate edge 3 CCW about the origin until you no longer can\n rightDegenerate = template.copy() #Right Limit of quad if you were to rotate edge 2 CW about point 1 until you no longer can,\n # or alternatively, how far edge 3 can rotate CW until the quad is degenerate\n try:\n leftDegenerate[3] = np.array( circleIntersection(leftDegenerate[0], lens[3], leftDegenerate[1], lens[1]+lens[2]) )\n leftDegenerate[2] = ( lens[1] / (lens[2]+lens[1]) ) * (leftDegenerate[3]-leftDegenerate[1]) + leftDegenerate[1]\n except: \n leftDegenerate[3] = np.array([-lens[3],0])\n leftDegenerate[2] = np.array( circleIntersection(leftDegenerate[3], lens[2], leftDegenerate[1], lens[1]) )\n\n try:\n rightDegenerate[2] = np.array( circleIntersection(rightDegenerate[0], lens[2]+lens[3], rightDegenerate[1], lens[1]) )\n rightDegenerate[3] = ( lens[3] / (lens[3]+lens[2]) ) * rightDegenerate[2]\n except:\n rightDegenerate[2] = np.array([lens[0]+lens[1], 0])\n rightDegenerate[3] = np.array( circleIntersection(rightDegenerate[0], lens[3], rightDegenerate[2], lens[2]))\n \n rightOfOrigin = np.array([1,0]) #Theta = 0 on the Unit Circle\n thetaMin = angle_between(leftDegenerate[3], rightOfOrigin) #Angle of \n thetaMax = angle_between(rightDegenerate[3], rightOfOrigin)\n pitch = (thetaMax - thetaMin) / (N-1)\n\n result = []\n result.append(leftDegenerate) \n for i in range(1, N-1):\n result.append(template.copy())\n result[i][3] = lens[3]*unitCircPt(i*pitch+thetaMin)\n result[i][2] = np.array(circleIntersection( result[i][3], lens[2], result[i][1], lens[1]))\n result.append(rightDegenerate) \n\n return listify(result)", "def borders(w, h):\r\n pygame.draw.line(window, WHITE, [25, 0], [25, h - 50], 6)\r\n pygame.draw.line(window, WHITE, [w - 25, 0], [w - 25, h - 50], 6)\r\n pygame.draw.line(window, WHITE, [25, h - 50], [w - 25, h - 50], 6)\r\n pygame.draw.line(window, WHITE, [25, 25], [w - 25, 25], 6)", "def sketch(self, mpl_axes=None, quad_colors = ['k', 'g', 'purple', 'b']):\n\n pixel_positions = np.squeeze(self.xyz)\n print pixel_positions.shape\n \n if not mpl_axes:\n from matplotlib import pyplot as plt\n import matplotlib.patches as plt_patches\n plt.figure()\n ax = plt.subplot(111)\n else:\n ax = mpl_axes\n\n for i in range(4):\n for j in range(8):\n x = pixel_positions[i,j,:,:,0]\n y = pixel_positions[i,j,:,:,1]\n corners = np.zeros((5,2))\n\n corners[0,:] = np.array([ x[0,0], y[0,0] ]) # bottom left\n corners[1,:] = np.array([ x[0,-1], y[0,-1] ]) # bottom right\n corners[3,:] = np.array([ x[-1,0], y[-1,0] ]) # top left\n corners[2,:] = np.array([ x[-1,-1], y[-1,-1] ]) # top right\n corners[4,:] = np.array([ x[0,0], y[0,0] ]) # make rectangle\n\n ax.plot(corners[:,0], corners[:,1], lw=2, color=quad_colors[i])\n ax.scatter(x[0,0], y[0,0])\n \n beam_center = plt_patches.Circle((0, 0), 2, fill=True, lw=1, color='orange')\n ax.add_patch(beam_center)\n \n # mirror x axis for CXI convention\n if not ax.xaxis_inverted():\n ax.invert_xaxis()\n\n if mpl_axes:\n return ax\n else:\n plt.show()\n return", "def gradientRect( window, left_colour, right_colour, target_rect ):\n colour_rect = pygame.Surface( ( 2, 2 ) ) # tiny! 2x2 bitmap\n pygame.draw.line( colour_rect, left_colour, ( 0,0 ), ( 0,1 ) ) # left colour line\n pygame.draw.line( colour_rect, right_colour, ( 1,0 ), ( 1,1 ) ) # right colour line\n colour_rect = pygame.transform.smoothscale( colour_rect, ( target_rect.width, target_rect.height ) ) # stretch!\n window.blit( colour_rect, target_rect ) # paint it", "def init(width, height):\n\tglClearColor(0.0, 0.0, 1.0, 0.0) #blue bg\n\tglMatrixMode(GL_PROJECTION)\n\tglLoadIdentity()\n\tglOrtho(-0.5, 2.5, -1.5, 1.5, -1.0, 1.0)", "def display_board(self, screen):\n for wall in self.cube_walls_list:\n screen = wall.draw_rhombus(screen)\n for tile in self.tile_rhombus_list:\n screen = tile.draw_rhombus(screen)\n\n return screen", "def fill_hrect(self, x, y, w, h, color):\n if self.is_off_grid(x, y, x + w - 1, y + h - 1):\n return\n chunk_height = 1024 // w\n chunk_count, remainder = divmod(h, chunk_height)\n chunk_size = chunk_height * w\n chunk_y = y\n if chunk_count:\n buf = color.to_bytes(2, 'big') * chunk_size\n for c in range(0, chunk_count):\n self.set_window(x, chunk_y,\n x + w - 1, chunk_y + chunk_height - 1,\n buf)\n chunk_y += chunk_height\n\n if remainder:\n buf = color.to_bytes(2, 'big') * remainder * w\n self.set_window(x, chunk_y,\n x + w - 1, chunk_y + remainder - 1,\n buf)", "def drawSimple(self, screen):\r\n self.worlds[0].renderer.render(screen)", "def drawSolu(self): \n vinemaze = MakeMaze()\n solulist = vinemaze.build_maze(self.width-4, 0, (8,24))\n \n #draw starting point square at bottom right corner\n start = pygame.Rect(self.width, self.height, -4, -4)\n pygame.draw.rect(gameView.background, self.color, start)\n dimenlist = []\n \n #determine the rectangle's starting coordinate, width, and height\n for i in range(1, len(solulist)):\n cur_coord = solulist[i]\n prev_coord = solulist[i-1]\n RectW = prev_coord[0] - cur_coord[0]\n RectH = prev_coord[1] - cur_coord[1]\n\n #keep track of coordinates where non-overlapping vines occur \n if RectH > 0:\n dimenlist.append(prev_coord)\n \n #Increase thickness of vines, depending on if they lie vertical or horizontal \n if abs(RectW) < abs(RectH):\n newRectW = self.increasepix(RectW)\n vine = pygame.Rect(cur_coord[0], cur_coord[1], newRectW, RectH) \n else:\n newRectH = self.increasepix(RectH)\n vine = pygame.Rect(cur_coord[0], cur_coord[1], RectW, newRectH)\n pygame.draw.rect(gameView.background, self.color, vine)\n \n #fill in the missing corners due to non-overlapping vines\n for i in range(1, len(dimenlist)):\n prevW = dimenlist[i][0]\n prevH = dimenlist[i][1]\n if prevW > 0:\n fillcoord = (prevW, prevH)\n fill = pygame.Rect(fillcoord[0], fillcoord[1], 4, 4)\n pygame.draw.rect(gameView.background, self.color, fill)\n\n gameView.screen.blit(gameView.background,(0,0))\n pygame.display.update()", "def drawRectangle(x, y, width, height):\n pen1.up()\n pen1.goto(x, y)\n pen1.down()\n pen1.fd(width)\n pen1.right(90)\n pen1.fd(height)\n pen1.right(90)\n pen1.fd(width)\n pen1.right(90)\n pen1.fd(height)", "def test_mask_quad(geometry, mbits):\n ## get index arrays\n rows, cols = geometry.get_pixel_coord_indexes('QUAD:V1', 1, pix_scale_size_um=None, xy0_off_pix=None, do_tilt=True)\n\n # get intensity array\n arr = geometry.get_pixel_mask('QUAD:V1', 1, mbits)\n arr.shape = (8,185,388)\n amp_range = (-1,2)\n\n logger.info('shapes rows: %s cols: %s weight: %s' % (str(rows.shape), str(cols.shape), str(arr.shape)))\n img = img_from_pixel_arrays(rows, cols, W=arr, vbase=0.5)\n\n gg.plotImageLarge(img,amp_range=amp_range)\n gg.move(500,10)\n gg.show()", "def draw_arc_filled(center_x, center_y,\n width, height,\n color,\n start_angle, end_angle,\n tilt_angle=0):\n num_segments = 128\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n GL.glEnable(GL.GL_LINE_SMOOTH)\n GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n\n GL.glLoadIdentity()\n GL.glTranslatef(center_x, center_y, 0)\n GL.glRotatef(tilt_angle, 0, 0, 1)\n\n # Set color\n if len(color) == 4:\n GL.glColor4ub(color[0], color[1], color[2], color[3])\n elif len(color) == 3:\n GL.glColor4ub(color[0], color[1], color[2], 255)\n\n GL.glBegin(GL.GL_TRIANGLE_FAN)\n\n start_segment = int(start_angle / 360 * num_segments)\n end_segment = int(end_angle / 360 * num_segments)\n GL.glVertex3f(0, 0, 0.5)\n\n for segment in range(start_segment, end_segment + 1):\n theta = 2.0 * 3.1415926 * segment / num_segments\n\n x = width * math.cos(theta)\n y = height * math.sin(theta)\n\n GL.glVertex3f(x, y, 0.5)\n\n GL.glEnd()\n GL.glLoadIdentity()" ]
[ "0.6329539", "0.6251077", "0.6159504", "0.61296666", "0.6059617", "0.6056886", "0.60274506", "0.6013795", "0.5928783", "0.5917718", "0.59110874", "0.58925414", "0.5879362", "0.58776313", "0.58744633", "0.58570075", "0.58549136", "0.5837235", "0.5829144", "0.5827169", "0.5818842", "0.5811103", "0.5803942", "0.5798769", "0.579857", "0.5797851", "0.5794164", "0.5767683", "0.57581437", "0.5747002", "0.5734119", "0.5726955", "0.57168335", "0.57028556", "0.5697188", "0.568599", "0.5678771", "0.56738394", "0.5653909", "0.5644867", "0.5641125", "0.56207174", "0.56161714", "0.560538", "0.5604418", "0.55927026", "0.5585115", "0.55733526", "0.55706674", "0.5562168", "0.55539787", "0.5548384", "0.5530851", "0.55290097", "0.55253726", "0.5521985", "0.5517559", "0.55096316", "0.55040175", "0.55036074", "0.5492329", "0.5490379", "0.548658", "0.54822963", "0.5475741", "0.5473994", "0.5472241", "0.54699296", "0.545001", "0.54489", "0.5443898", "0.5442944", "0.54393333", "0.5439235", "0.5431918", "0.54318166", "0.5428018", "0.5427763", "0.5407919", "0.54051614", "0.5402639", "0.53987813", "0.5394848", "0.53928715", "0.5390649", "0.53771544", "0.53761005", "0.5376004", "0.537258", "0.53699017", "0.53666085", "0.5365239", "0.5360682", "0.53512657", "0.53474575", "0.53463227", "0.5338741", "0.53377336", "0.53347075", "0.53336173", "0.53284585" ]
0.0
-1
need better performance here
def serialize_buffer(cls, gl_buffer, w, h): data = gl_buffer.read() data = np.frombuffer(data, dtype=np.float32) data = data.reshape((h, w, 4)) data = np.multiply(data, 255.0) data = data.astype(np.uint8) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply(self):", "def _optimise(self):\n pass", "def apply(self) -> None:", "def apply(self) -> None:", "def map():", "def regular(self):", "def substantiate():", "def common(self):", "def transform(self):", "def process(self):", "def process(self):", "def process(self):", "def preprocess(self):", "def elems(self):", "def exo2():", "def query3() :", "def result(self):", "def result(self):", "def iterate(self):", "def _collect_all(self):", "def values():", "def CL(self):", "def reduce_run():", "def ncore(self):", "def _regr_basic():", "def count():", "def all(self):", "def all(self):", "def MINET(self):", "def fn():", "def first(self):", "def solution(s):", "def oneIteration(self):\n\t\traise NotImplementedError", "def all(c):", "def process():", "def use(self):", "def algorithm_loop(self):", "def degibber(self):", "def getChunks():", "def nits(self):", "def mezclar_bolsa(self):", "def array(self):", "def performance(self, id):", "def cx():", "def concatenate_data():", "def compute_statistics(self):", "def firstFunction(self):", "def transform():", "def pre_compute(self, e_list):\n\t\tpass", "def segment(data):", "def reconstruct_input(self, ix):", "def test_full_house_flush_ind(self):", "def apply(self):\r\n return", "def apply(self):\r\n return", "def apply(self):\r\n return", "def apply(self):\r\n return", "def apply(self):\r\n return", "def task4_1(self):\n\n pass", "def hit(self):", "def make_tag_data_raw_fast(mdp,filename):\n #\n fin = open(filename,'r')\n iter = 0\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"comb_path\":\n update_params(mdp,lsp)\n if not mdp.flag_out_open: ## -- try to open output file\n try:\n if mdp.flag_overwrite == \"True\": ## check string value!\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ## -- try open output file\n for file in glob.glob(mdp.input_path):\n # get sign which corrects for boundary condition\n tvals = file.split('/')[-1].split('_')[3].split('t')\n try:\n ## flip sign if requested\n bcsign = ((int(tvals[1])+int(tvals[2])) != (int(tvals[1])+int(tvals[2])) % mdp.corr_len)\n except IndexError:\n ## 2-point function\n bcsign = False\n try:\n # open correlator file\n mdp.corr_file = open(file,'r')\n except IOError:\n print \"Could not open file \",file\n continue\n ## -- get tag\n ## baryons:\n #mdp.tag = '_'+file.split('/')[-1].split('_')[1][1:]+'_r'+file.split('/')[-1].split('_')[4][-1]\n ## with time source tag\n #mdp.tag = file.split('/')[-1].split('_')[3][:3]\\\n # +'_'+file.split('/')[-1].split('_')[1][1:]+'_'+file.split('/')[-1].split('_')[4][0]\\\n # +file.split('/')[-1].split('_')[4][3:]\n ## no time source tag\n mdp.tag = '_'+file.split('/')[-1].split('_')[1][1:]+'_'+file.split('/')[-1].split('_')[4][0]\\\n +file.split('/')[-1].split('_')[4][3:]\n #print file,',',mdp.tag\n iter+=1\n ##endif ! flag_out_open\n\n #save_data_fast(mdp)\n save_data_fast_bc(mdp,bcsign)\n mdp.corr_file.close()\n if iter%400 == 0:\n print \"file\",iter\n max_iter = None\n if not(max_iter is None) and iter==max_iter:\n print \"reached max file iterations, ending loop...\"\n break\n ## end comb_path\n pass\n\n elif lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n try:\n # open correlator file\n mdp.corr_file = open(mdp.input_path + '/' + mdp.input_fname,'r')\n except IOError:\n print \"Could not open file \",mdp.input_fname\n continue\n print mdp.input_fname\n if not mdp.flag_out_open:\n try:\n if mdp.flag_overwrite:\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n #except (IOError):\n # pass\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ##endif ! flag_out_open\n save_data_fast(mdp)\n mdp.corr_file.close()\n ##else \"for\" not found in control file\n else:\n update_params(mdp,lsp)\n ##endif lsp[0]==for\n ##endif len(lsp) > 1\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n except (IOError,AttributeError):\n pass\n fin.close()\n return", "def paired_interval_extend(uniq_fragment,fragment_cov,gtf_dic):\n out_dic = {}\n total_reads = 0\n for key in uniq_fragment.keys():\n chr_no = key[0]\n #print (frag_start,frag_end)\n frag_strand = key[3]\n interval_comp = uniq_fragment[key][0]\n complete_info = uniq_fragment[key][1]\n frag_cov = fragment_cov[key]\n total_reads += frag_cov\n geneNA = 'NA'\n geneType = 'NA'\n geneRegion = 'NA'\n flag = 0\n for trans in gtf_dic[(chr_no,frag_strand)]:\n frag_start,frag_end = key[1:3]\n # for trans in gtf_dic[('chr1','-')]:\n # if chr_no == 'chr1' and frag_strand == '-':\n if frag_start > trans[0] and frag_end < trans[1]:\n #print 'Hello!'\n # print (trans)\n geneNA = trans[4]\n geneType = trans[5]\n if geneType == 'protein_coding':\n CDS_start,CDS_end = trans[2:4]\n if frag_start >= CDS_start and frag_end <= CDS_end:\n geneRegion = 'CDS'\n elif frag_strand == '+':\n if frag_end <= CDS_start:\n geneRegion = '5UTR'\n elif frag_start < CDS_start and frag_end > CDS_start:\n geneRegion = '5UTR-CDS'\n elif frag_start < CDS_end and frag_end > CDS_end:\n geneRegion = 'CDS-3UTR'\n elif frag_start >= CDS_end:\n geneRegion = '3UTR'\n elif frag_strand == '-':\n if frag_end <= CDS_start:\n geneRegion = '3UTR'\n elif frag_start < CDS_start and frag_end > CDS_start:\n geneRegion = 'CDS-3UTR'\n elif frag_start < CDS_end and frag_end > CDS_end:\n geneRegion = '5UTR-CDS'\n elif frag_start >= CDS_end:\n geneRegion = '5UTR'\n else:\n geneRegion = 'Null'\n # print (frag_start,frag_end,CDS_start,CDS_end,geneNA,geneRegion)\n#------------------------------------------------------------------------------ intersect of fragments interval and exons interval\n frag_intersect = interval_comp & trans[-1]\n interval_comp_length = sum([interval_comp[a].upper- interval_comp[a].lower for a in range(0,len(interval_comp))])\n # print (interval_comp)\n # print (frag_intersect)\n#------------------------------------------------------------------------------ fragments located in introns\n if frag_intersect == P.empty(): \n flag = 1\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),'intron',str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n else:\n if complete_info == 'complete':\n flag = 3\n #print interval_comp\n#------------------------------------------------------------------------------ reduce alignment noise\n frag_intersect_length = sum([frag_intersect[a].upper-frag_intersect[a].lower for a in range(0,len(frag_intersect))])\n absolute_diff = abs(frag_intersect_length-interval_comp_length)\n if absolute_diff == 0:\n#------------------------------------------------------------------------------ \n start_region = []\n length_region = []\n for region in frag_intersect:\n start_region.append(str(int(region.lower - frag_start)))\n length_region.append(str(int(region.upper - region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_region)),\\\n ','.join(length_region),','.join(start_region),str(frag_cov),flag,complete_info))\n else:\n start_region = []\n length_region = []\n for region in interval_comp:\n start_region.append(str(int(region.lower - frag_start)))\n length_region.append(str(int(region.upper - region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),geneNA,geneType,\\\n frag_strand,str(frag_start),str(frag_end),'intron-containing',str(len(start_region)),\\\n ','.join(length_region),','.join(start_region),str(frag_cov),flag,complete_info))\n else:\n #print interval_comp\n #print frag_intersect\n#------------------------------------------------------------------------------ fragments boundaries located in exons\n #print frag_intersect[0][0],frag_start,frag_intersect[-1][1],frag_end\n #print abs_position\n # print (P.closedopen(frag_start,frag_end),trans[-1])\n interval_update = P.closedopen(frag_start,frag_end) & trans[-1]\n # print (interval_update)\n frag_trans_length = sum([interval_update[a].upper-interval_update[a].lower for a in range(0,len(interval_update))])\n absolute_diff = abs(frag_trans_length-interval_comp_length)\n #print absolute_diff\n #print geneRegion\n #print interval_comp\n #print abs_position\n if absolute_diff <= 300: #insert sequence length <=200nt\n #print frag_trans_length,interval_comp_length\n #print geneRegion\n flag = 2\n start_out = []\n length_out = []\n for interval_region in list(interval_update):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n else:\n # print (trans)\n flag = 1\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic.setdefault((chr_no,frag_start,frag_end,frag_strand),[]).append((chr_no,str(frag_start),str(frag_end),\\\n geneNA,geneType,frag_strand,\\\n str(frag_start),str(frag_end),'intron-containing',str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info))\n if flag == 0:\n start_out = []\n length_out = []\n for interval_region in list(interval_comp):\n start_out.append(str(int(interval_region.lower - frag_start)))\n length_out.append(str(int(interval_region.upper - interval_region.lower)))\n out_dic[(chr_no,frag_start,frag_end,frag_strand)] = [(chr_no,str(frag_start),str(frag_end),'intergenic','intergenic',frag_strand,\\\n str(frag_start),str(frag_end),geneRegion,str(len(start_out)),\\\n ','.join(length_out),','.join(start_out),str(frag_cov),flag,complete_info)]\n print ('Total treated fragments: ' + str(total_reads))\n return out_dic", "def search_id(self,obj):\r\n ##### create the new id ###########\r\n #for x in self.objectValues('Image'):\r\n for x in obj:\r\n liste_id.append(str(x.id())[0:6])\r\n for digit0 in liste_digit:\r\n for digit1 in liste_digit:\r\n for digit2 in liste_digit:\r\n for digit3 in liste_digit:\r\n for digit4 in liste_digit:\r\n for digit5 in liste_digit:\r\n searched_dict=0\r\n searched=str(digit0)+str(digit1)+str(digit2)+str(digit3)+str(digit4)+str(digit5)\r\n if(self.toolbox.hasProperty('eigene_formate')):\r\n self_val=self.toolbox.getProperty('eigene_formate').split(',')\r\n for x in self_val:\r\n liste_val.append('_'+x+'.jpeg')\r\n for extension in liste_val:\r\n searched_extension=str(searched)\r\n if searched_extension in liste_id:\r\n searched_dict=searched_dict+1\r\n if searched_dict==0:\r\n return searched\r\n return ''", "def calculate(self):", "def __getitem__(self, index):\r\n src_seq = self.src_seqs[index]\r\n trg_seq = self.trg_seqs[index]\r\n index_s = self.index_seqs[index]\r\n index_ans=self.ans_seq[index]\r\n src_plain=self.conv_seq[index]\r\n src_seq = self.preprocess(src_seq, self.src_word2id, trg=False)\r\n trg_seq = self.preprocess(trg_seq, self.trg_word2id)\r\n ir_seq = self.ir_seq[index]\r\n\r\n index_s = self.preprocess_inde(index_s, src_seq)\r\n index_ans=self.preprocess_inde(index_ans, ir_seq)\r\n\r\n conv_ir_seq=self.ir_seq[index]\r\n conv_ir_seq = self.preprocess(conv_ir_seq, self.src_word2id, trg=False)\r\n ID = self.ID[index]\r\n\r\n\r\n return src_seq, trg_seq, index_s, self.max_len, src_plain, self.trg_seqs[index], \\\r\n self.ent[index], ID,index_ans,ir_seq,conv_ir_seq,self.max_r_ans,self.entity_cal[index],self.entity_nav[index],self.entity_wet[index] #ir_seq:word; conv_ir_seq:seq\r", "def support(self):", "def func():", "def compute_debug(self):", "def falcon():", "def pulp_smash():", "def _build_iterable(self):", "def strongest(dict_v, dict_e, dict_branches):\n dict_strongest_branches={}\n branch_no=0\n for endpoint in dict_branches:\n #print \"I am processing : \", endpoint\n strongest_branch = 0\n strongest_punctation = 0\n strongest_length = 0\n strongest_intensity = 0\n for branch in dict_branches[endpoint]:\n (punctation, length, intensity) = count_len_int(branch, dict_e)\n if punctation> strongest_punctation:\n strongest_punctation = punctation\n strongest_branch = branch\n strongest_length = length\n strongest_intensity = intensity \n data=dict()\n data['endpoint']=str(endpoint)\n data['image'] = image\n data['branch']=strongest_branch\n data['trunkpoint'] = branch[len(branch)-1]\n data['length']=strongest_length\n data['intensity']=round(strongest_intensity,2)\n data['end_x_y_trunk_x_y'],data['label']=get_ends_coord_label(data['endpoint'],data['trunkpoint'], dict_v)\n data['angle']=get_angle(data['end_x_y_trunk_x_y'],0)\n dict_strongest_branches[branch_no]=data\n branch_no+=1\n return dict_strongest_branches", "def find_offsets(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def one(self):", "def g():", "def raw_features_extractor(database='./red_cod.db.pkl', sites=-1, elements = -1, maxatoms= -1,\r\n dictionary='diccionario', features='datosrahm.csv'):\r\n \r\n df=create_collection(database=database,sites=sites, elements=elements, maxatoms=maxatoms, \r\n dictionary=dictionary)\r\n \r\n start=time.time()\r\n \r\n datos=pd.read_csv(features)\r\n datos=datos.fillna(-1)\r\n\r\n dicc=dict(datos[['Symbol','Z']].values)\r\n\r\n dicc['D']=1\r\n dicc['Bk']=97\r\n dicc['Cf']=98\r\n dicc['Es']=99\r\n dicc['Fm']=100\r\n dicc['Md']=101\r\n dicc['No']=102\r\n dicc['Lr']=103\r\n \r\n max_sitios = max(df['sitios'].values)\r\n\r\n df=df[df['sitios'] <= max_sitios].reset_index(drop=True)\r\n \r\n X=np.zeros((len(df),max_sitios,104))\r\n y=np.zeros((len(df),1))\r\n mult=np.zeros((len(df),max_sitios))\r\n wyckmul=np.load('support/WyckoffSG_dict.npy').item()['wyckmul']\r\n \r\n for row in range(len(df)):\r\n \r\n item=df['WyckOcc'][row]\r\n sitios=list(item.values()) \r\n sitocc=np.zeros((len(sitios),104))\r\n spacegroup = str(df['sgnum'][row]).zfill(3)\r\n \r\n try:\r\n \r\n s=[int(wyckmul[spacegroup][i]) for j in [list(item.keys()) for item in \\\r\n sitios] for i in j]\r\n \r\n except:\r\n print('There exists an error concerning with the space group of CIF ', df['cif'][row],'\\n')\r\n print('Please check in www.crystallography.net to provide the correct space group number of that CIF',\r\n '\\n','\\n')\r\n spacegroup=input('Give me the correct spacegroup:'+'\\n'+'\\n')\r\n s=[int(wyckmul[spacegroup][i]) for j in [list(item.keys()) for item in \\\r\n list(df['WyckOcc'][row].values())] for i in j]\r\n \r\n occs=[]\r\n for i in range(len(sitios)):\r\n\r\n for j in list(sitios[i].values()):\r\n \r\n ocupacion=np.array(list(j.values()))\r\n llaves=[llave.replace('+','').replace('-','').replace('1',\r\n '').replace('2','').replace('3','').replace('4',\r\n '') for llave in np.array(list(j.keys()))]\r\n llaves=[llave.replace('.','') for llave in llaves]\r\n llaves=[llave.replace('5','').replace('6','').replace('7',\r\n '').replace('8','').replace('9','').replace('0',\r\n '') for llave in llaves]\r\n vector=np.zeros((1,104))\r\n occs=[sum(ocupacion)]+occs\r\n \r\n try:\r\n \r\n idx=[dicc[k] for k in llaves]\r\n \r\n except:\r\n \r\n print(' ELEMENTO NO IDENTIFICADO EN LA LISTA ',llaves,'\\n',\r\n 'REVISA EL SIGUIENTE CIF PARA HACER LA CORRECCION:','\\t',df['cif'][row])\r\n \r\n former = input('Elemento Incorrecto: ')\r\n current = input('Elemento Correcto: ')\r\n \r\n llaves=[current if x == former else x for x in llaves]\r\n idx=[dicc[k] for k in llaves]\r\n \r\n \r\n for k in idx:\r\n vector[0][k-1] = ocupacion[idx.index(k)]\r\n \r\n \r\n sitocc[i]=vector\r\n \r\n while sitocc.shape[0] != max_sitios:\r\n sitocc=np.concatenate((np.zeros((1,104)),sitocc))\r\n s=[0]+s\r\n \r\n X[row,:,:]=sitocc\r\n y[row]=df['target'][row]\r\n mult[row]=s\r\n \r\n S = np.expand_dims(mult,axis=2)\r\n features=datos.iloc[:,2:].values\r\n x=X[:,:,:96]\r\n \r\n fracsum = np.expand_dims(np.sum(x,axis=2), axis=2)\r\n \r\n x=np.dot(x,features) \r\n\r\n print('Atomic radii and electronegativities for each Wyckoff site extracted in',\r\n round(time.time()-start,2),' s') \r\n \r\n np.save('raw_features', x)\r\n np.save('output_values', y)\r\n np.save('multiplicities', S)\r\n np.save('occupation_fractions', fracsum)\r\n \r\n return x, y, S, fracsum, df", "def stats(self):", "def think(s):", "def call(self):", "def MEM (self,n):", "def process(self, mat):", "def next():", "def next():", "def merge_two_calls(self) -> None:", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def fast_comparison(path = \"Data/data_fronts/\",path1 = \"Results/modified_images/fronts/\"):\n #computes the areas for the first frame in order to normalize the other areas\n pol0dx = grid(path1+\"m_0.png_dx.txt\")\n pol0dx.columns = [\"y\",\"x\"]\n pol0sx = grid(path1+\"m_0.png_sx.txt\")\n pol0sx.columns = [\"y\",\"x\"]\n if pol0dx[\"x\"][0]>100:\n pol0dx = pol0dx.reindex(index=pol0dx.index[::-1])\n if pol0sx[\"x\"][0]<100:\n pol0sx = pol0sx.reindex(index=pol0sx.index[::-1])\n pol0sx = pol0sx.append(pol0dx)\n pol0sx = np.array(pol0sx)\n pol0 = Polygon(pol0sx)\n\n polsx = grid(path + \"Sham_8-2-18_Field 5_1_sx.txt\",l = 633,delimiter ='\\t')\n polsx.columns = [\"y\",\"x\"]\n polsx[\"y\"] =polsx[\"y\"]/844*1600\n polsx[\"x\"] =polsx[\"x\"]/633*1200\n poldx = grid(path + \"Sham_8-2-18_Field 5_1_dx.txt\",l = 633,delimiter ='\\t')\n poldx.columns = [\"y\",\"x\"]\n poldx[\"y\"] =poldx[\"y\"]/844*1600\n poldx[\"x\"] =poldx[\"x\"]/633*1200\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n #makes an object polygon in order to compute the area\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n pol1 = Polygon(polsx)\n\n\n areas = []\n areas_hand = []\n #computes the areas for all the frames\n for i in range(42):\n poldx = grid(path1+\"m_\"+str(i)+\".png_dx.txt\")\n poldx.columns = [\"y\",\"x\"]\n polsx = grid(path1+\"m_\"+str(i)+\".png_sx.txt\")\n polsx.columns = [\"y\",\"x\"]\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n\n #makes an object polygon in order to compute the area\n\n pol = Polygon(polsx)\n\n #normalize the areas with respect to the area of the first frame\n areas.append(pol.area/pol0.area)\n\n polsx = grid(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_sx.txt\",l = 633,delimiter ='\\t')\n polsx.columns = [\"y\",\"x\"]\n polsx[\"y\"] =polsx[\"y\"]/844*1600\n polsx[\"x\"] =polsx[\"x\"]/633*1200\n poldx = grid(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_dx.txt\",l = 633,delimiter='\\t')\n poldx.columns = [\"y\",\"x\"]\n poldx[\"y\"] =poldx[\"y\"]/844*1600\n poldx[\"x\"] =poldx[\"x\"]/633*1200\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n\n pol2 = Polygon(polsx)\n #normalize the areas with respect to the area of the first frame\n areas_hand.append(pol2.area/pol1.area)\n #returns the two arrays with the normalized areas\n return np.array(areas) , np.array(areas_hand)", "def parse(self):", "def revise():", "def executor(self):", "def __call__(self) -> None:", "def __call__():", "def __call__():", "def __call__():" ]
[ "0.5904686", "0.587094", "0.58542585", "0.58542585", "0.57282573", "0.5480052", "0.544143", "0.5374042", "0.5308655", "0.53024113", "0.53024113", "0.53024113", "0.52879596", "0.5280594", "0.5263961", "0.5251992", "0.5239149", "0.5239149", "0.5212692", "0.5189539", "0.51704985", "0.5160695", "0.5115016", "0.51149195", "0.50773865", "0.5065203", "0.5035019", "0.5035019", "0.5033602", "0.50217426", "0.5016145", "0.50157136", "0.5012727", "0.50114864", "0.5002582", "0.4985988", "0.4980663", "0.49713013", "0.4969004", "0.49388212", "0.4909398", "0.4889161", "0.48873943", "0.4873835", "0.4866497", "0.48662347", "0.4864761", "0.48496038", "0.48461065", "0.48417845", "0.48393548", "0.48359343", "0.48280236", "0.48280236", "0.48280236", "0.48280236", "0.48280236", "0.48241413", "0.48163798", "0.4813434", "0.480855", "0.47887436", "0.4786985", "0.47834885", "0.47802183", "0.47788024", "0.47765967", "0.47716615", "0.47699377", "0.47687316", "0.4768281", "0.47595456", "0.47546718", "0.47492442", "0.47399336", "0.47345397", "0.47332147", "0.4731881", "0.4731006", "0.47278035", "0.47275913", "0.47275913", "0.47237325", "0.47038898", "0.47038898", "0.47038898", "0.47038898", "0.47038898", "0.47038898", "0.47038898", "0.47038898", "0.47038898", "0.47038898", "0.46990895", "0.4698024", "0.4690078", "0.46880206", "0.46850052", "0.46848908", "0.46848908", "0.46848908" ]
0.0
-1
simple compute shader run after screen rendering
def build_cs(self, gl): cs = gl.compute_shader(GLUtil.shader("./gl/cs/cs.glsl")) u_time = None u_width = None u_height = None if "u_time" in cs: u_time = cs["u_time"] if "u_width" in cs: u_width = cs["u_width"] if "u_height" in cs: u_height = cs["u_height"] buf_in = gl.buffer(reserve=width * height * 4 * 4) buf_in.bind_to_storage_buffer(0) buf_out = gl.buffer(reserve=width * height * 4 * 4) buf_out.bind_to_storage_buffer(1) return cs, [u_time, u_width, u_height], [buf_in, buf_out]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _executeShader(self, node, threadsX, threadsY, threadsZ=1):\n sattr = node.get_attrib(ShaderAttrib)\n Globals.base.graphicsEngine.dispatch_compute(\n (threadsX, threadsY, threadsZ), sattr, Globals.base.win.get_gsg())", "def render( self, shader, mode, index ):\n location = shader.getLocation( mode, self.name, uniform=True )\n if location is not None and location != -1:\n value = self.currentValue( shader, mode )\n if value:\n self.baseFunction( location, index )\n glActiveTexture( GL_TEXTURE0 + index )\n value.render( mode.visible, mode.lighting, mode )\n return True \n return False", "def recompile(self):\n\n self.vaos = []\n try:\n self.program, uniforms = self.build_prog(self.gl)\n self.u_time, self.u_width, self.u_height = uniforms\n vao = GLUtil.screen_vao(self.gl, self.program)\n self.vaos.append(vao)\n\n self.compute, uniforms, buffers = self.build_cs(self.gl)\n self.u_cstime, self.u_cswidth, self.u_csheight = uniforms\n self.buf_in, self.buf_out = buffers\n\n self.set_gpu_wh(width, height)\n\n self.gx, self.gy = int(width / 8), int(height / 8)\n self.set_gpu_time()\n\n log(\"[Renderer] shader recompiled.\")\n\n except Exception as e:\n log(e)", "def render( self, shader, mode, index ):\n location = shader.getLocation( mode, self.name, uniform=True )\n if location is not None and location != -1:\n value = self.currentValue( shader, mode )\n if value:\n self.baseFunction( location, index )\n glActiveTexture( GL_TEXTURE0 + index )\n glBindTexture( GL_TEXTURE_BUFFER, self.texture( mode ) )\n vbo = value.vbo(mode)\n vbo.bind()\n try:\n glTexBuffer( GL_TEXTURE_BUFFER, self.get_format(), int(vbo) )\n finally:\n vbo.unbind()\n return True \n return False", "def _reload_shader(self):\n self.render_pipeline.reload_shaders()\n\n self.render_pipeline.set_effect(self.terrain.get_node(), \"effects/terrain.yaml\", {\n \"render_gbuffer\": True,\n \"render_shadows\": False,\n\n })\n\n self.render_pipeline.set_effect(self.terrain_shadow.get_node(), \"effects/terrain_shadow.yaml\", {\n \"render_gbuffer\": False,\n \"render_shadows\": True,\n }, 5000)", "def updateShaderState(self):\n\n dopts = self.opts\n copts = self.canvas.opts\n lightPos = None\n flatColour = dopts.getConstantColour()\n useNegCmap = (not dopts.useLut) and dopts.useNegativeCmap\n\n if self.threedee:\n lightPos = np.array(copts.lightPos)\n lightPos *= (copts.zoom / 100.0)\n else:\n lightPos = None\n\n if dopts.useLut:\n delta = 1.0 / (dopts.lut.max() + 1)\n cmapXform = transform.scaleOffsetXform(delta, 0.5 * delta)\n else:\n cmapXform = self.cmapTexture.getCoordinateTransform()\n\n fslgl.glmesh_funcs.updateShaderState(\n self,\n useNegCmap=useNegCmap,\n cmapXform=cmapXform,\n flatColour=flatColour,\n lightPos=lightPos)", "def use(self):\r\n opengles.glUseProgram(self.program)", "def appGL(deltaT):#-------------------------------- OpenGL UPDATE\n pass # -> Delete this line if you do something here !", "def render( self, mode, shader=None ):\n renderer = mode.cache.getData(self)\n if renderer is None:\n renderer = self.compile( mode, shader )\n if renderer is False:\n log.warn(\"\"\"%s\"\"\",\n self.compileLog,\n )\n if renderer not in (None,False):\n try:\n GL_shaders.glUseProgram( renderer )\n except error.GLError, err:\n log.error( '''Failure compiling: %s''', '\\n'.join([\n '%s: %s'%(shader.url or shader.source,shader.compileLog)\n for shader in self.shaders\n ]))\n raise\n else:\n for uniform in mode.uniforms:\n uniform.render( self, mode )\n for uniform in self.uniforms:\n uniform.render( self, mode )\n # TODO: retrieve maximum texture count and restrict to that...\n i = 0\n for texture in self.textures:\n if texture.render( self, mode, i ):\n i += 1\n else:\n log.warn( 'Renderer for %s was null: %s', self, self.compileLog )\n return True,True,True,renderer", "def update(self):\n\n self.pta_time[0] = 1 + Globals.clock.get_frame_time() * self.options.time_scale\n\n Globals.base.graphicsEngine.dispatch_compute(\n (self.options.size // 16, self.options.size // 16, 1),\n self.attr_update,\n Globals.base.win.get_gsg())\n\n self.fftX.execute()\n self.fftY.execute()\n self.fftZ.execute()\n\n # Execute the shader which combines the 3 displacement maps into\n # 1 displacement texture and 1 normal texture. We could use dFdx in\n # the fragment shader, however that gives no accurate results as\n # dFdx returns the same value for a 2x2 pixel block\n Globals.base.graphicsEngine.dispatch_compute(\n (self.options.size // 16, self.options.size // 16, 1),\n self.attr_combine,\n Globals.base.win.get_gsg())", "def _on_load_scene_shaders(self):\n\n artellapipe.ShadersMgr().load_scene_shaders()", "def compile(self):\n if not self.isCompiled():\n if self.file is not None:\n try:\n if self.tipo == VERTEX:\n self.shader = glCreateShader(GL_VERTEX_SHADER)\n else:\n self.shader = glCreateShader(GL_FRAGMENT_SHADER)\n glShaderSource(self.shader, self.file)\n glCompileShader(self.shader)\n self.compiled = True\n except:\n raise Exception(\"error al compilar el shader\")\n else:\n raise Exception(\"no se ha cargado un archivo\")\n else:\n print \"Error :: el shader ya ha sido compilado\"", "def render( self, shader, mode, location=None ):\n if location is None:\n location = self.location( shader, mode )\n if location is not None and location != -1:\n value = self.currentValue( shader, mode )\n shape = value.shape \n shape_length = len(self.shape)\n if shape[-shape_length:] != self.shape:\n # uninitialized at the Python level, do not set...\n return None\n if shape[:-shape_length]:\n size = reduce( operator.mul, shape[:-shape_length] )\n else:\n size = 1\n if self.NEED_TRANSPOSE is not None:\n return self.baseFunction( location, size, self.NEED_TRANSPOSE, value )\n else:\n return self.baseFunction( location, size, value )\n return None", "def compile(self, mode, shader):\n holder = self.holderDepend( mode.cache.holder(self,None) )\n # TODO: depend on shader.material as well...\n # TODO: the compiled shader needs to depend on *everything* \n # down the set of objects...\n program = glCreateProgram()\n holder.data = program\n subShaders = []\n for shader in self.shaders:\n # TODO: cache links...\n subShader = shader.compile()\n if subShader:\n glAttachShader(program, subShader )\n subShaders.append( subShader )\n elif shader.source:\n log.warn( 'Failure compiling: %s %s', shader.compileLog, shader.url or shader.source )\n if len(subShaders) == len(self.shaders):\n glLinkProgram(program)\n glUseProgram( program )\n # TODO: retrieve maximum texture count and restrict to that...\n i = 0\n for texture in self.textures:\n if texture.bind( self, mode, i ):\n i += 1\n \n glValidateProgram( program )\n validation = glGetProgramiv( program, GL_VALIDATE_STATUS )\n if validation == GL_FALSE:\n self.compileLog += \"\"\"Validation failure (%s): %s\"\"\"%(\n validation,\n glGetProgramInfoLog( program ),\n )\n program = False \n else:\n link_status = glGetProgramiv( program, GL_LINK_STATUS )\n if link_status == GL_FALSE:\n self.compileLog += \"\"\"Link failure (%s): %s\"\"\"%(\n link_status,\n glGetProgramInfoLog( program ),\n )\n program = False\n for subShader in subShaders:\n glDeleteShader( subShader )\n holder.data = program\n return program\n else:\n log.debug( 'Not done loading shader source yet' )\n holder.data = 0\n return None", "def setShader(self, *args):\n return _osgAnimation.RigTransformHardware_setShader(self, *args)", "def init_shaders():\n global shaders\n\n vertex_shader = glCreateShader(GL_VERTEX_SHADER)\n glShaderSource(vertex_shader,open('shaders/vs-phong-interp.c','r').read())\n glCompileShader(vertex_shader)\n result = glGetShaderiv(vertex_shader, GL_COMPILE_STATUS)\n if result:\n print('Vertex shader compilation successful.')\n else:\n print('Vertex shader compilation FAILED:')\n print(glGetShaderInfoLog(vertex_shader))\n sys.exit(-1)\n\n fragment_shader = glCreateShader(GL_FRAGMENT_SHADER)\n glShaderSource(fragment_shader, open('shaders/fs-phong-interp.c','r').read())\n glCompileShader(fragment_shader)\n result = glGetShaderiv(fragment_shader, GL_COMPILE_STATUS)\n if result:\n print('Fragment shader compilation successful.')\n else:\n print('Fragment shader compilation FAILED:')\n print(glGetShaderInfoLog(fragment_shader))\n sys.exit(-1)\n\n shaders = glCreateProgram()\n glAttachShader(shaders,vertex_shader)\n glAttachShader(shaders,fragment_shader)\n glLinkProgram(shaders)", "def convert_shaders(self):\n raise NotImplementedError()", "def run():\n\n def assignToon(context):\n def instanciate_group(nodes, group_name):\n group = nodes.new(type='ShaderNodeGroup')\n group.node_tree = bpy.data.node_groups[group_name]\n return group\n\n def assignToonShader(material):\n '''To do Handle if the material output doesnt exist'''\n toonShader = instanciate_group(material.node_tree.nodes, \"ToonShader_2\")\n node2 = material.node_tree.nodes['Material Output']\n material.node_tree.links.new(toonShader.outputs[0], node2.inputs[0])\n\n objects = bpy.context.selected_objects\n for obj in objects:\n if len(obj.material_slots) < 1:\n\n bpy.ops.object.material_slot_add()\n\n if obj.name not in bpy.data.materials:\n\n mat = bpy.data.materials.new(obj.name)\n else:\n mat = bpy.data.materials[obj.name]\n\n obj.data.materials[0] = mat\n mat.use_nodes = True\n\n for mat in obj.data.materials:\n if mat.name == '':\n mat.name = obj.name\n\n matNodes = mat.node_tree.nodes\n\n assignToonShader(mat)\n if 'Principled BSDF' in matNodes:\n matNodes.remove(matNodes['Principled BSDF'])\n # else:\n # for n in matNodes:\n # if n != material.node_tree.nodes['Material Output']:\n # matNodes.remove(n)\n\n\n shaderPath = r'D:/COMPANIES/loneCoconut/render/MILVIO_CGL/assets/lib/TOONSCEENSETUP/shd/publish/001.000/high/lib_TOONSCEENSETUP_shd.blend'\n collection_name = 'ToonSceneSetup'\n # dict_ = {'company': 'loneCoconut',\n # 'context': 'render',\n # 'project': 'MILVIO',\n # 'scope': 'assets',\n # 'seq': 'lib',\n # 'shot': 'TOONSCEENSETUP',\n # 'task': 'shd',\n # 'user': 'publish',\n # 'resolution': 'high'}\n # shaderPath = lm.LumberObject(dict_)\n # print(shaderPath.latest_version().path_root)\n #\n # collection_name = shaderPath.shot\n\n if collection_name not in bpy.data.collections:\n\n # link all collections starting with 'MyCollection'\n with bpy.data.libraries.load(shaderPath, link=False) as (data_from, data_to):\n data_to.collections = [c for c in data_from.collections if c.startswith(collection_name)]\n\n # link collection to scene collection\n for coll in data_to.collections:\n if coll is not None:\n bpy.data.scenes['Scene'].collection.children.link(coll)\n\n else:\n print(\"Toon Shader Exist\")\n\n\n assignToon(bpy.context)", "def init_shader(self):\r\n self.attrib_locs = {\r\n \"mc_vertex\": -1,\r\n \"vert_tex_coord\": -1,\r\n }\r\n self.uniform_locs = {\r\n \"model_matrix\": -1,\r\n \"view_matrix\": -1,\r\n \"proj_matrix\": -1,\r\n }\r\n vert_prog = self._compile_shader(ORTH_VERT_SOURCE, gl.GL_VERTEX_SHADER)\r\n frag_prog = self._compile_shader(\r\n ORTH_FRAG_SOURCE, gl.GL_FRAGMENT_SHADER)\r\n self.shader = gl.glCreateProgram()\r\n gl.glAttachShader(self.shader, vert_prog)\r\n gl.glAttachShader(self.shader, frag_prog)\r\n gl.glLinkProgram(self.shader)\r\n assert (gl.glGetProgramiv(self.shader, gl.GL_LINK_STATUS) ==\r\n gl.GL_TRUE), (\r\n \"Error: %s\" % (gl.glGetProgramInfoLog(self.shader)))\r\n\r\n self.attrib_locs = {\r\n name: gl.glGetAttribLocation(self.shader, name)\r\n for name in self.attrib_locs\r\n }\r\n self.uniform_locs = {\r\n name: gl.glGetUniformLocation(self.shader, name)\r\n for name in self.uniform_locs\r\n }\r\n\r\n # Load vertices for final ortho view\r\n self.vao = gl.glGenVertexArrays(1)\r\n gl.glBindVertexArray(self.vao)\r\n self.buffers['mc_vertex'] = gl.glGenBuffers(1)\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers['mc_vertex'])\r\n\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, len(ORTH_VERTICES),\r\n ORTH_VERTICES, gl.GL_STATIC_DRAW)\r\n gl.glVertexAttribPointer(self.attrib_locs['mc_vertex'], 4,\r\n gl.GL_FLOAT, False, 0, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(self.attrib_locs['mc_vertex'])\r\n\r\n self.buffers['vert_tex_coord'] = gl.glGenBuffers(1)\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers['vert_tex_coord'])\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, len(TEXTURE_VERTICES),\r\n TEXTURE_VERTICES, gl.GL_STATIC_DRAW)\r\n gl.glVertexAttribPointer(self.attrib_locs['vert_tex_coord'], 2,\r\n gl.GL_FLOAT, False, 0, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(self.attrib_locs['vert_tex_coord'])\r\n gl.glActiveTexture(gl.GL_TEXTURE0)", "def _copy_to_gpu(self):\n self.dispatch('on_texture')", "def initializeGL(self):\n # background color\n gl.glClearColor(0.8, 0.8, 0.8, 0)\n # Make initial data array.\n # compile the vertex shader\n vs = compile_shader(VERTEX, gl.GL_VERTEX_SHADER)\n # compile the geometry shader\n gs = compile_shader(GEOMETRY, gl.GL_GEOMETRY_SHADER)\n # compile the fragment shader\n fs = compile_shader(FRAGMENT, gl.GL_FRAGMENT_SHADER)\n # Link the programs.\n self.render_program = link_shaders(vs, gs, fs)\n # Compile the compute shader\n cs = compile_shader(COMPUTE, gl.GL_COMPUTE_SHADER)\n # Create the compute shader buffers.\n self.makeBuffers()\n #self.vbo = glvbo.VBO(self.attributes)\n self.vbo = gl.glGenBuffers(1)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)\n gl.glBufferData(gl.GL_ARRAY_BUFFER, self.attributes.nbytes,\n self.attributes, gl.GL_DYNAMIC_COPY)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)\n\n self.ssbo = gl.glGenBuffers(1)\n gl.glBindBufferBase(gl.GL_SHADER_STORAGE_BUFFER, 1, self.ssbo)\n gl.glBufferData(gl.GL_SHADER_STORAGE_BUFFER, self.velocities.nbytes,\n self.velocities, gl.GL_DYNAMIC_COPY)\n self.compute_program = link_shaders(cs)", "def compileShaders(self):\n if self.flatShader is not None: self.flatShader.destroy()\n if self.dataShader is not None: self.dataShader.destroy()\n\n self.activeShader = None\n\n fslgl.glmesh_funcs.compileShaders(self)", "def pre_render(self) -> None:\n self.buffer = Surface((self.render_width, self.render_height), SRCALPHA)\n self.buffer.fill(list(self.halo_texture.surfaces.values())[0].get_at((0, 0)))\n\n self.buffer.fill((0, 0, 0, 0), Rect(\n (self.render_width - self.halo_texture.get_width()) // 2,\n (self.render_height - self.halo_texture.get_height()) // 2,\n self.halo_texture.get_width(),\n self.halo_texture.get_height()\n ))", "def render(self):\n glPushMatrix()\n glMultMatrixf(np.transpose(self.translation_matrix))\n glMultMatrixf(self.scaling_matrix)\n color = color.COLORS[self.color_index]\n glColor3f(color[0], color[1], color[2])\n\n if self.selected:\n # Emit light\n glMaterialfv(GL_FRONT, GL_EMISSION, [0.0, 0.0, 0.0])\n\n glPopMatrix()", "def render( self, shader, mode ):\n location = shader.getLocation( mode, self.name, uniform=False )\n if location is not None and location != -1:\n vbo = self.buffer.bind( mode )\n glVertexAttribPointer( \n location, self.size, GL_FLOAT, False, self.stride, \n vbo+self.offset\n )\n glEnableVertexAttribArray( location )\n return (vbo,location)\n return None", "def updateShaderState(self):\n raise NotImplementedError('updateShaderState must be implemented by '\n '{} subclasses'.format(type(self).__name__))", "def _start(self):\r\n opengles.glBindFramebuffer(GL_FRAMEBUFFER, self.framebuffer[0])\r\n opengles.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,\r\n GL_TEXTURE_2D, self._tex.value, 0)\r\n #thanks to PeterO c.o. RPi forum for pointing out missing depth attchmnt\r\n opengles.glBindRenderbuffer(GL_RENDERBUFFER, self.depthbuffer[0])\r\n opengles.glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16,\r\n self.ix, self.iy)\r\n opengles.glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,\r\n GL_RENDERBUFFER, self.depthbuffer[0])\r\n opengles.glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT)\r\n\r\n #assert opengles.glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE\r", "def render_aa_t(\n scene, camera, func, HEIGHT=100, WIDTH=100, V_SAMPLES=4,\n H_SAMPLES=4\n):\n output = np.zeros((HEIGHT, WIDTH, RGB_CHANNELS), dtype=np.uint8)\n if not scene or scene.is_empty() or not camera or camera.inside(\n scene.objects\n ):\n print(\"Cannot generate an image\")\n return output\n total_samples = H_SAMPLES * V_SAMPLES\n # This is for showing progress %\n iterations = HEIGHT * WIDTH\n step_size = np.ceil((iterations * PERCENTAGE_STEP) / 100).astype('int')\n counter = 0\n bar = Bar('Rendering', max=100 / PERCENTAGE_STEP)\n # This is needed to use it in Git Bash\n bar.check_tty = False\n for j in range(HEIGHT):\n for i in range(WIDTH):\n color = np.array([0, 0, 0], dtype=float)\n for n in range(V_SAMPLES):\n for m in range(H_SAMPLES):\n r0, r1 = np.random.random_sample(2)\n # Floats x, y inside the image plane grid\n x = i + ((float(m) + r0) / H_SAMPLES)\n y = HEIGHT - 1 - j + ((float(n) + r1) / V_SAMPLES)\n # Get x projected in view coord\n xp = (x / float(WIDTH)) * camera.scale_x\n # Get y projected in view coord\n yp = (y / float(HEIGHT)) * camera.scale_y\n pp = camera.p00 + xp * camera.n0 + yp * camera.n1\n npe = utils.normalize(pp - camera.position)\n ray = Ray(pp, npe)\n\n color += func(ray, scene) / float(total_samples)\n counter += 1\n if counter % step_size == 0:\n bar.next()\n output[j][i] = color.round().astype(np.uint8)\n bar.finish()\n return output", "def compile(self, mode=None):\n colorSet = self.colorSet()\n if len(colorSet):\n vertices, colors = self.buildSphere( colorSet )\n glMultMatrixf( mode.matrix )\n first = displaylist.DisplayList()\n first.start()\n try:\n glVertexPointerf(vertices)\n glColorPointerf ( colors )\n glEnableClientState( GL_VERTEX_ARRAY )\n glEnableClientState( GL_COLOR_ARRAY )\n glDrawArrays( GL_TRIANGLE_STRIP, 0, len(vertices))\n finally:\n first.end()\n second = displaylist.DisplayList()\n second.start()\n try:\n glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE)\n glDisable( GL_DEPTH_TEST ) # we don't want to do anything with the depth buffer...\n glDisable( GL_LIGHTING )\n glEnable( GL_COLOR_MATERIAL )\n glDisable( GL_CULL_FACE )\n \n for index in range( int(SEGMENTS) ):\n first()\n glRotated( 360.0/SEGMENTS, 0,1,0)\n glDisableClientState( GL_VERTEX_ARRAY )\n glDisableClientState( GL_COLOR_ARRAY )\n # now, completely wipe out the depth buffer, so this appears as a \"background\"... no idea how expensive this is\n glClear( GL_DEPTH_BUFFER_BIT )\n\n glEnable( GL_DEPTH_TEST )\n glEnable( GL_LIGHTING )\n glColor( 0.0,0.0,0.0)\n glDisable( GL_COLOR_MATERIAL )\n glEnable( GL_CULL_FACE )\n glFrontFace( GL_CCW )\n \n holder = mode.cache.holder(self, (first, second))\n for field in protofunctions.getFields( self ):\n # change to any field requires a recompile\n if field.name != 'bound':\n holder.depend( self, field )\n return second\n finally:\n second.end()\n holder = mode.cache.holder(self, (None, ()))\n return None", "def Render( self, mode, clear = 1 ):\n if mode.passCount == 0:\n if self.bound:\n dl = mode.cache.getData(self)\n if dl is None:\n dl = self.compile( mode=mode )\n else:\n # see note on compile's return value/store value\n dl = dl[1]\n if clear:\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);\n if dl:\n if callable( dl ):\n dl()\n return 1\n return 0", "def run(self):\r\n ##boucle appellant render() 30fois par seconde\r\n r = 0\r\n while r == 0:\r\n r = self.update()\r\n self.render()\r\n time.sleep(1/30)\r\n return r", "def __prepare_shaders(self, rotation_matrix=None, light_matrix=None,\n depth=True):\n self.__sh.add_attribute(0, self.__mean_face, 'mean_position')\n self.__sh.bind_buffer()\n\n self.__sh.use_shaders()\n\n self.__sh.bind_uniform_matrix(light_matrix.dot(rotation_matrix),\n 'light_matrix')\n if not depth:\n self.__sh.bind_uniform_matrix(rotation_matrix, 'rotation_matrix')\n self.__sh.bind_uniform_vector(self.__face.light_cartesian,\n 'light_vector')\n coefficients_amount = len(self.__face.coefficients)\n indices = -ones(199, dtype='i')\n indices[:coefficients_amount] = array(range(coefficients_amount))\n self.__sh.bind_uniform_ints(indices, 'indices')\n\n coefficients = zeros(199, dtype='f')\n coefficients[:coefficients_amount] = self.__face.coefficients\n self.__sh.bind_uniform_floats(coefficients, 'coefficients')\n\n glActiveTexture(GL_TEXTURE0)\n self.__sh.bind_texture(0)\n if not depth:\n glActiveTexture(GL_TEXTURE1)\n self.__sh.bind_texture(1)", "def getCompiled(self):\n if self.isCompiled():\n return self.shader\n else:\n raise Exception(\"el shader no ha sido compilado aun\")", "def step(machine, screen):\n op = machine.step()\n if op[1].startswith('DRAW') or op[1].startswith('CLS'):\n size = screen.get_size()\n image = Image.frombuffer('L', (64, 32), machine.framebuffer)\n image = ImageOps.colorize(image, '#111', '#0a0')\n image = image.resize(size, resample=Image.BOX)\n frame = pygame.image.frombuffer(image.tobytes(), size, 'RGB')\n screen.blit(frame, (0, 0))", "def on_execute(self):\n\n # cannot run without visualization\n if self.on_init() is False:\n self._running = False\n\n # while running listen for events and render these\n while(self._running):\n for event in pygame.event.get():\n self.on_event(event)\n self.on_render()\n\n # after running, quit pygame\n self.on_cleanup()", "def glRender(*args, accumBufferPasses: Union[int, bool]=0, alphaSource: Union[AnyStr, bool]=\"\",\n antiAliasMethod: Union[AnyStr, bool]=\"\", cameraIcons: bool=True, clearClr:\n Union[List[float, float, float], bool]=None, collisionIcons: bool=True,\n crossingEffect: bool=True, currentFrame: bool=True, drawStyle: Union[AnyStr,\n bool]=\"\", edgeSmoothness: Union[float, bool]=0.0, emitterIcons: bool=True,\n fieldIcons: bool=True, flipbookCallback: Union[AnyStr, bool]=\"\", frameEnd:\n Union[int, bool]=0, frameIncrement: Union[int, bool]=0, frameStart: Union[int,\n bool]=0, fullResolution: bool=True, grid: bool=True, imageDirectory: Union[AnyStr,\n bool]=\"\", imageName: Union[AnyStr, bool]=\"\", imageSize: Union[List[int, int,\n float], bool]=None, lightIcons: bool=True, lightingMode: Union[AnyStr, bool]=\"\",\n lineSmoothing: bool=True, offScreen: bool=True, renderFrame: Union[AnyStr,\n bool]=\"\", renderSequence: Union[AnyStr, bool]=\"\", sharpness: Union[float,\n bool]=0.0, shutterAngle: Union[float, bool]=0.0, textureDisplay: bool=True,\n transformIcons: bool=True, useAccumBuffer: bool=True, viewport: Union[List[int,\n int, float], bool]=None, writeDepthMap: bool=True, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass", "def drawSimple(self, screen):\r\n self.worlds[0].renderer.render(screen)", "def start_capture(self):\r\n super(PostProcess, self)._start()\r\n from pi3d.Display import Display\r\n xx = Display.INSTANCE.width / 2.0 * (1.0 - self.scale)\r\n yy = Display.INSTANCE.height / 2.0 * (1.0 - self.scale)\r\n ww = Display.INSTANCE.width * self.scale\r\n hh = Display.INSTANCE.height * self.scale\r\n opengles.glEnable(GL_SCISSOR_TEST)\r\n opengles.glScissor(ctypes.c_int(int(xx)), ctypes.c_int(int(yy)),\r\n ctypes.c_int(int(ww)), ctypes.c_int(int(hh)))", "def compute(self, func):\n idx = 0\n for y in range(self.h):\n for x in range(self.w):\n color = func(float(x)/(self.w-1), float(y)/(self.h-1), self)\n self.temp[idx] = color\n idx = idx+1\n swap = self.data\n self.data = self.temp\n self.temp = swap", "def paintGL(self):\n self._sceneviewer.renderScene()\n # paintGL end", "def needShader(self):\n return (self.threedee or\n (self.draw2DOutlineEnabled() and\n self.opts.vertexData is not None))", "def __init__(self, shader=\"post_base\", mipmap=False, add_tex=None,\r\n scale=1.0, camera=None, divide=1):\r\n super(PostProcess, self).__init__(\"postprocess\")\r\n self.scale = scale\r\n # load shader\r\n self.shader = Shader(shader)\r\n if camera == None:\r\n self.viewcam = Camera.instance() # in case this is prior to one being created\r\n else:\r\n self.viewcam = camera\r\n self.camera = Camera(is_3d=False)\r\n self.sprite = LodSprite(z=20.0, w=self.ix, h=self.iy, n=divide)\r\n self.sprite.set_2d_size(w=self.ix, h=self.iy)\r\n for b in self.sprite.buf:\r\n b.unib[6] = self.scale # ufact\r\n b.unib[7] = self.scale # vfact\r\n b.unib[9] = (1.0 - self.scale) * 0.5 # uoffset\r\n b.unib[10] = (1.0 - self.scale) * 0.5 # voffset\r\n self.alpha = False\r\n self.blend = True\r\n self.mipmap = mipmap\r\n self.tex_list = [self] # TODO check if this self reference causes graphics memory leaks\r\n if add_tex:\r\n self.tex_list.extend(add_tex)", "def dataShader(self):\n\t\treturn self._shader", "def getFragmentShader(self):\n return self.fshader", "def _on_unload_scene_shaders(self):\n\n artellapipe.ShadersMgr().unload_shaders()", "def late_gradient_fusion():\n pass", "def applyShader(name, obj, color=(.5,.5,.5), sType='lambert', sSet='__none__'):\n ##print 'evaluating'\n if sSet=='__none__':\n sSet=name+'SG'\n ##print 'no SG set given'\n\n if pm.objExists(name)==0 and pm.objExists(sSet)==0:\n ##print 'creating shader'\n myShader=pm.shadingNode(sType, asShader=1, name=name)\n pm.sets(n=sSet, renderable=1, empty=1, noSurfaceShader=1)\n if sType=='surfaceShader':\n myAt='.outColor'\n else:\n myAt='.color'\n pm.connectAttr(myShader+myAt, sSet+'.surfaceShader')\n pm.setAttr(myShader+myAt, color)\n pm.sets(sSet, fe=obj)\n return name", "def test_visual_2(qtbot, canvas, vertex_shader, fragment_shader):\n\n class MyVisual2(BaseVisual):\n def __init__(self):\n super(MyVisual2, self).__init__()\n self.vertex_shader = vertex_shader\n self.fragment_shader = fragment_shader\n self.set_primitive_type('points')\n self.transforms.add(Scale((.1, .1)))\n self.transforms.add(Translate((-1, -1)))\n self.transforms.add(Range(\n (-1, -1, 1, 1), (-1.5, -1.5, 1.5, 1.5)))\n s = 'gl_Position.y += (1 + 1e-8 * u_window_size.x);'\n self.inserter.insert_vert(s, 'after_transforms')\n self.inserter.add_varying('float', 'v_var', 'gl_Position.x')\n\n def set_data(self):\n self.n_vertices = 1000\n data = np.random.uniform(0, 20, (1000, 2))\n pos = self.transforms.apply(data).astype(np.float32)\n self.program['a_position'] = pos\n\n bounds = subplot_bounds(shape=(2, 3), index=(1, 2))\n canvas.gpu_transforms.add([Subplot((2, 3), (1, 2)), Clip(bounds)])\n\n # We attach the visual to the canvas. By default, a BaseLayout is used.\n v = MyVisual2()\n canvas.add_visual(v)\n v.set_data()\n\n v = MyVisual2()\n canvas.add_visual(v)\n v.set_data()\n\n canvas.show()\n qtbot.waitForWindowShown(canvas)\n # qtbot.stop()", "def initializeGL(self):\n # background color\n gl.glClearColor(0, 0, 0, 0)\n # create a Vertex Buffer Object with the specified data\n self.vbo = glvbo.VBO(self.data)\n # compile the vertex shader\n vs = compile_vertex_shader(VS)\n # compile the fragment shader\n fs = compile_fragment_shader(FS)\n # compile the vertex shader\n self.shaders_program = link_shader_program(vs, fs)\n vs2 = compile_vertex_shader(VS2)\n fs2 = compile_fragment_shader(FS2)\n self.my_shaders_program = link_shader_program(vs2, fs2)", "def main():\n viewer = Viewer()\n shader = Shader(\"Shaders/poisson.vert\", \"Shaders/poisson.frag\")\n shaderLight = Shader(\"Shaders/phong.vert\", \"Shaders/phong.frag\")\n\n # Création du terrain\n node_terrain = Node(transform=translate((-250,-11.7,-250)))\n node_terrain.add(Terrain(\"Terrain/sand.jpg\",viewer.depth))\n viewer.add(node_terrain)\n\n # Chargement de tous les objets\n # Chaque objet est mis dans un node qui permet de définir\n # sa position, rotation et taille et aussi de se rattacher\n # à une autre entité\n\n barnabe_obj = \"Fish/Fish/WhaleShark/WhaleShark.obj\"\n barnabe_png = \"Fish/Fish/WhaleShark/WhaleShark_Base_Color.png\"\n\n hector_obj = \"Fish/Fish/ReefFish5/ReefFish5.obj\"\n hector_png = \"Fish/Fish/ReefFish5/ReefFish5_Base_Color.png\"\n\n susie_obj = \"Fish/Fish/SeaHorse/SeaHorse.obj\"\n susie_png = \"Fish/Fish/SeaHorse/SeaHorse_Base_Color.png\"\n\n edgar_obj = \"Fish/Fish/BlueTang/BlueTang.obj\"\n edgar_png = \"Fish/Fish/BlueTang/BlueTang_Base_Color.png\"\n\n nemo_obj = \"Fish/Fish/ClownFish2/ClownFish2.obj\"\n nemo_png = \"Fish/Fish/ClownFish2/Clownfish2_Base_Color.png\"\n\n caroline_obj = \"Fish/Fish/Turtle/Turtle.obj\"\n caroline_png = \"Fish/Fish/Turtle/Turtle.jpg\"\n\n corail_obj = \"Fish/Fish/Corail/Corail.obj\"\n corail_png = \"Fish/Fish/Corail/Corail.jpg\"\n\n sebastien_obj = \"Fish/Fish/Crab/Crab.obj\"\n sebastien_png = \"Fish/Fish/Crab/Crab.jpg\"\n\n star_obj = \"Fish/Fish/BlueStarfish/BluieStarfish.obj\"\n star_png = \"Fish/Fish/BlueStarfish/BlueStarfish_Base_Color.png\"\n\n cube_obj = \"Fish/Fish/Cube/cube.obj\"\n cube_png = \"Fish/Fish/Cube/cube.png\"\n\n suzanne_obj = \"Fish/Fish/Suzanne/Suzanne.obj\"\n\n barnabe_node = Node(2)\n meshes = load_shadowed_texture(barnabe_obj, shader, viewer.depth, barnabe_png, 2)\n for mesh in meshes:\n barnabe_node.add(mesh)\n\n edgar_node = Node(0, transform=translate((1.5, 0.0, 1.5)) @ scale((0.1, 0.1, 0.1)))\n meshes = load_shadowed_texture(edgar_obj, shader, viewer.depth, edgar_png, 0)\n for mesh in meshes:\n edgar_node.add(mesh)\n barnabe_node.add(edgar_node)\n viewer.add(barnabe_node)\n\n cube_node = Node(transform=translate((5.0,-5.0,0.0)))\n meshes = load_shadowed_texture(cube_obj, shader,viewer.depth, cube_png, 1)\n for mesh in meshes:\n cube_node.add(mesh)\n\n suzanne_bubble_node = Node(transform=translate((-0.4,-0.25,0.65)))\n suzanne_node = Node(transform=scale((0.25,0.25,0.25)) @ rotate((0,1,1),-30))\n meshes = load_phong_mesh(suzanne_obj,shaderLight,viewer.depth)\n for mesh in meshes:\n suzanne_node.add(mesh)\n suzanne_bubble_node.add(suzanne_node)\n\n bubble_translate = {0: vec(-0.15,-0.17,0.25), 3: vec(-0.2,0,0.25), 5: vec(-0.15, 0.15, 0.25), 7: vec(-0.175, 0.27, 0.25)}\n bubble_rotate = {0: quaternion()}\n bubble_scale = {0: 0.02, 7: 0.06}\n bubble_node = KeyFrameControlNode(bubble_translate, bubble_rotate, bubble_scale)\n bubble_node.add(Bubble(15))\n suzanne_bubble_node.add(bubble_node)\n cube_node.add(suzanne_bubble_node)\n\n susie_trans = {0: vec(1.2, 1, 0), 2: vec(1.2, 2, 0), 5: vec(1.2, 1, 0)}\n susie_scale = {0: 0.03}\n susie_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=-45)}\n susie_node = KeyFrameControlNode(susie_trans, susie_rotate, susie_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie_node.add(mesh)\n cube_node.add(susie_node)\n\n susie2_trans = {0: vec(1, 2, 0), 3: vec(1, 1.5, 0), 5: vec(1, 2, 0)}\n susie2_scale = {0: 0.05}\n susie2_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=45)}\n susie2_node = KeyFrameControlNode(susie2_trans, susie2_rotate, susie2_scale)\n meshes = load_shadowed_texture(susie_obj, shader, viewer.depth, susie_png, 1)\n for mesh in meshes:\n susie2_node.add(mesh)\n cube_node.add(susie2_node)\n\n nemo_trans = {0: vec(-25, 2, 25), 22: vec(-2, 2, 2), 40: vec(20, 2, -20)}\n nemo_scale = {0: 0.1}\n nemo_rotate = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans, nemo_rotate, nemo_scale)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans2 = {0: vec(-28, 2, 26), 20: vec(0, 2, 3), 40: vec(20, 2, -23)}\n nemo_scale2 = {0: 0.07}\n nemo_rotate2 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans2, nemo_rotate2, nemo_scale2)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans3 = {0: vec(-22, 2, 21), 41: vec(20, 2, -20)}\n nemo_scale3 = {0: 0.07}\n nemo_rotate3 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans3, nemo_rotate3, nemo_scale3)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans4 = {0: vec(-22, 2.3, 21), 39: vec(20, 2.5, -20)}\n nemo_scale4 = {0: 0.07}\n nemo_rotate4 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans4, nemo_rotate4, nemo_scale4)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans5 = {0: vec(-22, 2.2, 21), 36: vec(30, 2.2, -20)}\n nemo_scale5 = {0: 0.1}\n nemo_rotate5 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans5, nemo_rotate5, nemo_scale5)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n nemo_trans6 = {0: vec(-20, 1.7, 21), 38: vec(30, 2, -20)}\n nemo_scale6 = {0: 0.1}\n nemo_rotate6 = {0: quaternion_from_axis_angle((0, 1, 0), degrees=120)}\n nemo_node = KeyFrameControlNode(nemo_trans6, nemo_rotate6, nemo_scale6)\n meshes = load_shadowed_texture(nemo_obj, shader, viewer.depth, nemo_png, 0)\n for mesh in meshes:\n nemo_node.add(mesh)\n cube_node.add(nemo_node)\n\n star_node = Node(transform=translate((0,0.5,0)) @ scale((0.3, 0.3, 0.3)))\n meshes = load_shadowed_texture(star_obj, shader, viewer.depth, star_png, 1)\n for mesh in meshes:\n star_node.add(mesh)\n cube_node.add(star_node)\n\n translate_keys = {0: vec(1,-0.5,0.5), 10: vec(1,-0.5,-0.5), 20: vec(1,-0.5,0.5)}\n rotate_keys = {0: quaternion_mul(quaternion_from_axis_angle((1,0,0), degrees=-90), quaternion_from_axis_angle((0,0,1), degrees=90))}\n scale_keys = {0: 0.02}\n sebastien_node = KeyFrameControlNode(translate_keys, rotate_keys, scale_keys)\n meshes = load_shadowed_texture(sebastien_obj, shader, viewer.depth, sebastien_png, 1)\n for mesh in meshes:\n sebastien_node.add(mesh)\n cube_node.add(sebastien_node)\n viewer.add(cube_node)\n\n corail_turtle_node = Node(transform=translate((2.5, -5.0, -5.0)))\n corail_node = Node(transform=scale((0.01,0.01,0.01)))\n meshes = load_shadowed_texture(corail_obj, shader, viewer.depth, corail_png, 1)\n for mesh in meshes:\n corail_node.add(mesh)\n corail_turtle_node.add(corail_node)\n\n hector_trans = {0: vec(-0.5, 1, 0.5)}\n hector_scale = {0: 0.07}\n hector_rotate = {0: quaternion_from_axis_angle((0,1,0), degrees=-90), 1: quaternion_from_axis_angle((0,1,0), degrees=-180), 2: quaternion_from_axis_angle((0,1,0), degrees=-270), 3: quaternion_from_axis_angle((0,1,0), degrees=-360), 4: quaternion_from_axis_angle((0,1,0), degrees=-90)}\n hector_node = KeyFrameControlNode(hector_trans, hector_rotate, hector_scale)\n meshes = load_shadowed_texture(hector_obj, shader,viewer.depth, hector_png, 3)\n for mesh in meshes:\n hector_node.add(mesh)\n corail_turtle_node.add(hector_node)\n\n caroline_node = Node(transform=translate((-0.5, 0.5, 0.0)) @ scale((0.01,0.01,0.01)) @ rotate((1,0,0), 270) @ rotate((0,0,1),315) @ rotate((0,1,0), 45))\n meshes = load_shadowed_texture(caroline_obj, shader,viewer.depth,caroline_png, 0)\n for mesh in meshes:\n caroline_node.add(mesh)\n corail_turtle_node.add(caroline_node)\n viewer.add(corail_turtle_node)\n\n # Commande de clavier\n print(\"\\n\\n ----------------- Les commandes de clavier sont les flèches ------------------- \\n\\n\")\n\n # start rendering loop\n viewer.run()", "def add_fragment_main(self, *args, **kwargs):\n kwargs['shader'] = 'fragment'\n self.add_main(*args, **kwargs)", "def update_carried(self, data):\n self.use()\n gpu_data = np.array(data, dtype=np.float32)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3])\n gl.glBufferData(gl.GL_ARRAY_BUFFER, gpu_data.nbytes, gpu_data, gl.GL_DYNAMIC_DRAW)", "def _prepare_gl(self):\n # init gl\n shader = Shader()\n shader.attachShader(GL_VERTEX_SHADER, VERTEX_SHADER)\n shader.attachShader(GL_FRAGMENT_SHADER, FRAGMENT_SHADER)\n shader.linkProgram()\n self.shader = shader\n\n self._gl_uniforms = {}\n # cache uniform locations (much faster)\n self._gl_uniforms['tex'] = self._uloc('tex')\n self._gl_uniforms['color'] = self._uloc('color')\n self._gl_uniforms['mat_projection'] = self._uloc('mat_projection')\n self._gl_uniforms['mat_modelview'] = self._uloc('mat_modelview')\n self._gl_uniforms['mat_real_projection'] = self._uloc('mat_real_projection')\n self.vao_id = glGenVertexArrays(1)\n self.vbo_id = glGenBuffers(2)", "def surfaceShaderList(*args, add: name=None, remove: name=None, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass", "def early_gradient_fusion():\n pass", "def renderizar(self):\n\t\t# Limpiar la pantalla\n\t\tglClear(GL_COLOR_BUFFER_BIT)\n\t\t# Renderizar la escena\n\t\tself.escena.renderizar()\n\t\t# Renderizar los buffers a la pantalla\n\t\tpygame.display.flip()", "def get_shader_codes(self):\n vs = VS_TEMPLATE\n fs = FS_TEMPLATE\n \n # Shader headers\n vs_header = self.get_header('vertex')\n fs_header = self.get_header('fragment')\n \n # Varyings\n for varying in self.varyings:\n s1, s2 = get_varying_declarations(varying)\n vs_header += s1\n fs_header += s2\n \n # vs_header += \"\".join(self.vs_headers)\n # fs_header += \"\".join(self.fs_headers)\n \n # Integrate shader headers\n vs = vs.replace(\"%VERTEX_HEADER%\", vs_header)\n fs = fs.replace(\"%FRAGMENT_HEADER%\", fs_header)\n \n # Vertex and fragment main code\n vs_main = self.get_main('vertex')\n fs_main = self.get_main('fragment')\n \n # Integrate shader headers\n vs = vs.replace(\"%VERTEX_MAIN%\", vs_main)\n fs = fs.replace(\"%FRAGMENT_MAIN%\", fs_main)\n \n # frag color or frag data\n if self.fragdata is None:\n fs = fs.replace('%FRAG%', \"\"\"gl_FragColor = out_color;\"\"\")\n else:\n fs = fs.replace('%FRAG%', \"\"\"gl_FragData[%d] = out_color;\"\"\" % self.fragdata)\n \n # Make sure there are no Windows carriage returns\n vs = vs.replace(b\"\\r\\n\", b\"\\n\")\n fs = fs.replace(b\"\\r\\n\", b\"\\n\")\n \n # OLDGLSL does not know the texture function\n if not OLDGLSL:\n fs = fs.replace(\"texture1D(\", \"texture(\" % 2)\n fs = fs.replace(\"texture2D(\", \"texture(\" % 2)\n \n # set default color\n fs = fs.replace('%DEFAULT_COLOR%', str(self.default_color))\n \n # replace GLSL version header\n vs = vs.replace('%GLSL_VERSION_HEADER%', self.version_header)\n fs = fs.replace('%GLSL_VERSION_HEADER%', self.version_header)\n \n # replace GLSL precision header\n vs = vs.replace('%GLSL_PRECISION_HEADER%', self.precision_header)\n fs = fs.replace('%GLSL_PRECISION_HEADER%', self.precision_header)\n \n return vs, fs", "def step(self, action):\r\n new_img, reward, done, info = self.env.step(action)\r\n self.update_buffer(new_img)\r\n return self.framebuffer, reward, done, info", "def finish_render():\n get_window().static_display = True\n get_window().flip_count = 0\n get_window().flip()", "def execute(self):\n self.W = self.X+self.y+self.a\n self.Z = 2*self.W", "def warmup():\n print camera.CoolerOFF()\n camera.status.update()", "def render_mp(scene, camera, height, width):\n output = np.zeros((height, width, RGB_CHANNELS), dtype=np.uint8)\n if not scene or scene.is_empty() or not camera or camera.inside(\n scene.objects\n ):\n print(\"Cannot generate an image\")\n return output\n print(\"Creating rays...\")\n rays = create_rays(camera, height, width)\n pool = mp.Pool(mp.cpu_count())\n print(\"Shooting rays...\")\n ray_colors = pool.map(\n raytrace_mp_wrapper, [(ray, scene) for ray in rays]\n )\n pool.close()\n print(\"Arranging pixels...\")\n for j in range(height):\n for i in range(width):\n output[j][i] = ray_colors[i + j * width]\n return output", "def render_and_save():\n\n rendering_config = configuration.get_config()\n rendering_config = ml_collections.FrozenConfigDict(rendering_config)\n aspect_ratio = rendering_config.aspect_ratio\n height = rendering_config.height\n width = int(aspect_ratio * height)\n\n scene_camera = build_camera(rendering_config, aspect_ratio)\n world = build_world(rendering_config)\n\n # Render.\n logging.info(\"Tracing rays...\")\n render_image_fn = jax.jit(\n render.generate_image,\n static_argnames=[\"height\", \"width\", \"config\"])\n image = render_image_fn(height, width, scene_camera, world, rendering_config)\n\n image = render.correct_gamma(image, gamma=rendering_config.gamma_correction)\n\n logging.info(\"Saving to file...\")\n output.export_as_ppm(image, rendering_config.output_file)\n\n return image", "def execute(self):\n self.z = self.x + self.y", "def addCacheableShaderFromSourceCode(self, Union, QOpenGLShader_ShaderType=None, QOpenGLShader_ShaderTypeBit=None, *args, **kwargs): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def apply(self, simulation):\n t = simulation.time\n dt = simulation.timeStep\n if main_rank == 0:\n simulation.printState()\n # OpenCL update\n self.numMethod(self.gpu_field.gpu_data[self.component],\n self.color)\n self.window.widget.updateGL()\n if simulation.currentIteration > 1:\n self.window.label.setText(\n self.labelText + \"t={0:6.2f}, fps={1:6.2f}\".format(\n t + dt,\n 1. / (self.timer.f_timers.values()[0].t - self.ctime)))\n self.ctime = self.timer.f_timers.values()[0].t", "def set_shader(self, shader):\r\n\r\n self.shader = shader\r\n for b in self.buf:\r\n b.shader = shader", "def calculateLighting(x,y,z, xnormal, ynormal, znormal):\n dummy = 0\n clr = dislin.getlit(x,y,z,xn,yn,zn,dummy)", "def _build_shaders(self, program):\n\n # Check if we have at least something to attach\n if not self._verts:\n raise ValueError(\"No vertex shader has been given\")\n if not self._frags:\n raise ValueError(\"No fragment shader has been given\")\n\n log.debug(\"GPU: Attaching shaders to program\")\n\n # Attach shaders\n attached = gl.glGetAttachedShaders(program)\n shaders = self._verts + self._frags + self._geoms\n for shader in shaders: #self._verts:\n if shader.need_update:\n if shader.handle in attached:\n gl.glDetachShader(program, handle)\n shader.activate()\n if isinstance(shader, GeometryShader):\n if shader.vertices_out is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_VERTICES_OUT_EXT,\n shader.vertices_out)\n if shader.input_type is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_INPUT_TYPE_EXT,\n shader.input_type)\n if shader.output_type is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_OUTPUT_TYPE_EXT,\n shader.output_type)\n gl.glAttachShader(program, shader.handle)\n shader._program = self", "def _load_opengl(self):\r\n pass", "def draw(self):\n\n glEnable(self.texture.target)\n glBindTexture(self.texture.target, self.texture.id)\n if self.mipmaps:\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)", "def __init__(self, screen_width, screen_height):\n self.display = (screen_width, screen_height)\n self.screen = pygame.display.set_mode(self.display, DOUBLEBUF | OPENGL)\n self.percpective = gluPerspective(45, (self.display[0]/self.display[1]), 0.1, 50.0)\n self.step_back = glTranslatef(0.0, 0.0, -15)\n self.red_cube = Red_cube()\n self.green_cube = Green_cube()\n self.blue_cube = Blue_cube()\n self.black_cube = Black_cube()\n self.looper()", "def draw(self, screen):", "def test(self):\n with torch.no_grad():\n self.forward()\n self.compute_visuals()", "def _activate(self):\n\n log.debug(\"GPU: Activating program (id=%d)\" % self._id)\n gl.glUseProgram(self.handle)\n\n for uniform in self._uniforms.values():\n if uniform.active:\n uniform.activate()\n\n for attribute in self._attributes.values():\n if attribute.active:\n attribute.activate()", "def draw(self, proj_mat, view_mat, time=0):\n if self.mesh_shader:\n self.mesh_shader.draw(self, proj_mat, view_mat, time=time)", "def main(self):\n update = self.update\n draw = self.draw\n screen = self.screen\n flip = pg.display.update\n clock = time.time\n frame_length = (1. / self.fps)\n time_since_draw = 0\n last_update = clock()\n fps_timer = 0\n frames = 0\n\n while not self.done:\n clock_tick = clock() - last_update\n last_update = clock()\n time_since_draw += clock_tick\n update(clock_tick)\n if time_since_draw >= frame_length:\n time_since_draw -= frame_length\n draw(screen)\n flip()\n frames += 1\n\n fps_timer, frames = self.handle_fps(clock_tick, fps_timer, frames)\n time.sleep(.01)", "def camera_exec():\n pygame.init()\n locals()\n\n plot_num = 0\n running, Clock, font = camera_connect()\n while running:\n Clock.tick(100)\n\n # read framebuffer\n fb = None\n while (True) :\n try:\n fb = pyopenmv.fb_dump()\n break\n except Exception as e:\n # try and reconnect on failure\n camera_connect()\n\n # signal to UArm that camera has connected\n camera_started.set()\n if fb is not None:\n # create image from RGB888\n image = pygame.image.frombuffer(fb[2].flat[0:], (fb[0], fb[1]), 'RGB')\n screen = pygame.display.set_mode((fb[0], fb[1]), pygame.DOUBLEBUF, 32)\n\n fps = Clock.get_fps()\n # blit stuff\n screen.blit(image, (0, 0))\n screen.blit(font.render(\"FPS %.2f\"%(fps), 1, (255, 0, 0)), (0, 0))\n\n # update display\n pygame.display.flip()\n\n # get output from text buffer\n tx_len = pyopenmv.tx_buf_len()\n\n # object was found by camera if there is outputted text\n if tx_len:\n\n '''\n if UArm has signaled to the camera to identify the object and the camera has not already\n assigned values to the global variables associated with the object's location\n '''\n if camera_event.is_set() and (data_ready.is_set() is False):\n\n # read the most recent data at index 0 from the text buffer\n buff = pyopenmv.tx_buf(tx_len).decode()\n split_buff = str(buff).splitlines()\n if h_angle_key in split_buff[0]:\n\n # Most recent line in buff contains needed information\n global h_angle, v_angle, is_centered\n tok = split_buff[0].split()\n\n # set angles to corresponding values determined by camera\n h_angle, v_angle = float(tok[1]), float(tok[3])\n if tok[5] == \"True\":\n is_centered = True\n else:\n is_centered = False\n # signal that global variables have been set\n data_ready.set()\n\n if plot_ready.is_set():\n print(\"success_rate: \", success_history)\n plot_distance(distance_history, plot_num)\n plot_success(success_history, plot_num)\n plot_num += 1\n plot_ready.clear()\n print(\"success rate for \", len(success_history), \" tests: \",\n success_history.count(True) / len(success_history))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n running = False\n if event.key == pygame.K_c:\n pygame.image.save(image, \"capture.png\")\n\n pygame.quit()\n pyopenmv.stop_script()", "def Render(self, mode):\n\n shaders.glUseProgram(self.shader)\n try:\n self.vbo.bind()\n try:\n glEnableClientState(GL_VERTEX_ARRAY)\n GLVertexPointer(self.vbo)\n glDrawArrays(GL_TRIANGLES, 0, 9)\n finally:\n self.vbo.unbind()\n glDisableClientState(GL_VERTEX_ARRAY)\n finally:\n shaders.glUseProgram(0)", "def render(self, proj):\n if self.text == '' or not self.mesh:\n return\n\n model = self.model.getTransformation()\n mvp = proj * self.transform.getTransformation() * model\n\n gl.glEnable(gl.GL_FRAMEBUFFER_SRGB)\n\n gl.glEnable(gl.GL_BLEND)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n self.shader.bind()\n if self.color:\n self.shader.setUniform('u_color', self.color)\n self.font.bindAtlas()\n self.shader.setUniform('T_MVP', mvp)\n self.mesh.draw()\n gl.glDisable(gl.GL_BLEND)\n self.shader.unbind()\n self.font.unbindAtlas()\n gl.glDisable(gl.GL_FRAMEBUFFER_SRGB)", "def paintGL(self):\n print \"Entereing paintGL\"\n if self.bDrawing == True:\n print \"Drawing was true so quit\"\n return\n \n \n self.bDrawing = True\n threadDrawGL = threading.Thread(target = self.drawGLScene)\n threadDrawGL.start()\n #self.drawGLScene()", "def render_mp(scene, camera, height, width, rgb=False):\n output = np.zeros((height, width, RGB_CHANNELS), dtype=np.uint8)\n if not scene or not scene.objects or not camera or camera.inside(\n scene.objects\n ):\n print(\"Cannot generate an image\")\n return output\n print(\"Creating rays...\")\n rays = create_rays(camera, height, width)\n pool = mp.Pool(mp.cpu_count())\n print(\"Shooting rays...\")\n ray_colors = pool.map(\n raytrace_mp_wrapper, [(ray, scene, rgb) for ray in rays]\n )\n pool.close()\n print(\"Arranging pixels...\")\n for j in range(height):\n for i in range(width):\n output[j][i] = ray_colors[i + j * width]\n return output", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def __call__(self, pred_texture: Image.Image, gt_texture: Image.Image) -> float:\n from plan2scene.evaluation.metric_impl.color_hist import hsl_hist_l1\n return hsl_hist_l1(pred=pred_texture, gt=gt_texture, bins=self.bins)", "def example_BSR():\n pts = [(1,1),(2,2),(3,3)]\n lines = [ [ (1,1), (1,2), (2,1)], [ (6,1), (1,6), (5,-1)] ]\n\n bloody_simple_2drender('2d_render.png', pts=pts, vecs=pts, lines=lines )", "def render_dof(scene, camera, HEIGHT=100, WIDTH=100, V_SAMPLES=6, H_SAMPLES=6):\n output = np.zeros((HEIGHT, WIDTH, RGB_CHANNELS), dtype=np.uint8)\n if not scene or scene.is_empty() or not camera or camera.inside(\n scene.objects\n ):\n print(\"Cannot generate an image\")\n return output\n total_samples = H_SAMPLES * V_SAMPLES\n # This is for showing progress %\n iterations = HEIGHT * WIDTH * total_samples\n step_size = np.ceil((iterations * PERCENTAGE_STEP) / 100).astype('int')\n counter = 0\n bar = Bar('Raytracing', max=100 / PERCENTAGE_STEP)\n # This is needed to use it in Git Bash\n bar.check_tty = False\n for j in range(HEIGHT):\n for i in range(WIDTH):\n color = np.array([0, 0, 0], dtype=float)\n lens_sample_offsets = []\n n0 = camera.n0\n n1 = camera.n1\n for n in range(V_SAMPLES):\n for m in range(H_SAMPLES):\n r0, r1 = np.random.random_sample(2)\n ap_sx = camera.lens_params.ap_sx\n ap_sy = camera.lens_params.ap_sy\n x_offset = ((r0 - 0.5) * m) / H_SAMPLES * ap_sx\n y_offset = ((r1 - 0.5) * n) / V_SAMPLES * ap_sy\n lens_sample_offsets.append((x_offset, y_offset))\n random_start = np.random.random_integers(0, total_samples - 1)\n for n in range(V_SAMPLES):\n for m in range(H_SAMPLES):\n r0, r1 = np.random.random_sample(2)\n x = i + ((float(m) + r0) / H_SAMPLES)\n y = HEIGHT - 1 - j + ((float(n) + r1) / V_SAMPLES)\n # Get x projected in view coord\n xp = (x / float(WIDTH)) * camera.scale_x\n # Get y projected in view coord\n yp = (y / float(HEIGHT)) * camera.scale_y\n pp = camera.p00 + xp * camera.n0 + yp * camera.n1\n npe = utils.normalize(pp - camera.position)\n sample_idx = n + m * H_SAMPLES - random_start\n x_offset, y_offset = lens_sample_offsets[sample_idx]\n ps = pp + x_offset * n0 + y_offset * n1\n fp = pp + npe * camera.lens_params.f\n director = utils.normalize(fp - ps)\n ray = Ray(ps, director)\n\n color += raytrace(ray, scene) / float(total_samples)\n counter += 1\n if counter % step_size == 0:\n bar.next()\n output[j][i] = color.round().astype(np.uint8)\n bar.finish()\n return output", "def _end(self):\r\n opengles.glBindTexture(GL_TEXTURE_2D, 0)\r\n opengles.glBindFramebuffer(GL_FRAMEBUFFER, 0)", "def draw(self, shape):\n shape.draw(shader=self.shader)", "def __enter__(self):\n gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)", "def showshaderlog(self, shader):\r\n N = 1024\r\n log = (ctypes.c_char * N)()\r\n loglen = ctypes.c_int()\r\n opengles.glGetShaderInfoLog(\r\n shader, N, ctypes.byref(loglen), ctypes.byref(log))\r\n print('shader {}, {}'.format(self.shfile, log.value))", "def compile_fragment_shader(source):\n fragment_shader = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)\n gl.glShaderSource(fragment_shader, source)\n gl.glCompileShader(fragment_shader)\n # check compilation error\n result = gl.glGetShaderiv(fragment_shader, gl.GL_COMPILE_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetShaderInfoLog(fragment_shader))\n return fragment_shader", "def setup_view(self, shader_program):\n n = self.normalize(self.eyepoint - self.lookat)\n u = self.normalize(np.cross(self.normalize(self.up), n))\n v = self.normalize(np.cross(n, u))\n\n view_mat = np.array([u[0], v[0], n[0], 0.0,\n u[1], v[1], n[1], 0.0,\n u[2], v[2], n[2], 0.0,\n -np.dot(u, self.eyepoint),\n -np.dot(v, self.eyepoint),\n -np.dot(n, self.eyepoint), 1.0],\n dtype=np.float32)\n\n view_location = glGetUniformLocation(shader_program, \"view\")\n glUseProgram(shader_program)\n glUniformMatrix4fv(view_location, 1, GL_FALSE, view_mat)", "def update_render_passes(self, scene=None, renderlayer=None):\n self.register_pass(scene, renderlayer, \"Combined\", 4, \"RGBA\", 'COLOR')\n\n # Denoiser\n if scene.luxcore.denoiser.enabled:\n self.register_pass(scene, renderlayer, \"DENOISED\", 3, \"RGB\", \"COLOR\")\n\n aovs = renderlayer.luxcore.aovs\n\n # Notes:\n # - It seems like Blender can not handle passes with 2 elements. They must have 1, 3 or 4 elements.\n # - The last argument must be in (\"COLOR\", \"VECTOR\", \"VALUE\") and controls the socket color.\n if aovs.rgb:\n self.register_pass(scene, renderlayer, \"RGB\", 3, \"RGB\", \"COLOR\")\n if aovs.rgba:\n self.register_pass(scene, renderlayer, \"RGBA\", 4, \"RGBA\", \"COLOR\")\n if aovs.alpha:\n self.register_pass(scene, renderlayer, \"ALPHA\", 1, \"A\", \"VALUE\")\n if aovs.depth:\n # In the compositor we need to register the Depth pass\n self.register_pass(scene, renderlayer, \"Depth\", 1, \"Z\", \"VALUE\")\n if aovs.albedo:\n self.register_pass(scene, renderlayer, \"ALBEDO\", 3, \"RGB\", \"COLOR\")\n if aovs.material_id:\n self.register_pass(scene, renderlayer, \"MATERIAL_ID\", 1, \"X\", \"VALUE\")\n if aovs.material_id_color:\n self.register_pass(scene, renderlayer, \"MATERIAL_ID_COLOR\", 3, \"RGB\", \"COLOR\")\n if aovs.object_id:\n self.register_pass(scene, renderlayer, \"OBJECT_ID\", 1, \"X\", \"VALUE\")\n if aovs.emission:\n self.register_pass(scene, renderlayer, \"EMISSION\", 3, \"RGB\", \"COLOR\")\n if aovs.direct_diffuse:\n self.register_pass(scene, renderlayer, \"DIRECT_DIFFUSE\", 3, \"RGB\", \"COLOR\")\n if aovs.direct_glossy:\n self.register_pass(scene, renderlayer, \"DIRECT_GLOSSY\", 3, \"RGB\", \"COLOR\")\n if aovs.indirect_diffuse:\n self.register_pass(scene, renderlayer, \"INDIRECT_DIFFUSE\", 3, \"RGB\", \"COLOR\")\n if aovs.indirect_glossy:\n self.register_pass(scene, renderlayer, \"INDIRECT_GLOSSY\", 3, \"RGB\", \"COLOR\")\n if aovs.indirect_specular:\n self.register_pass(scene, renderlayer, \"INDIRECT_SPECULAR\", 3, \"RGB\", \"COLOR\")\n if aovs.position:\n self.register_pass(scene, renderlayer, \"POSITION\", 3, \"XYZ\", \"VECTOR\")\n if aovs.shading_normal:\n self.register_pass(scene, renderlayer, \"SHADING_NORMAL\", 3, \"XYZ\", \"VECTOR\")\n if aovs.avg_shading_normal:\n self.register_pass(scene, renderlayer, \"AVG_SHADING_NORMAL\", 3, \"XYZ\", \"VECTOR\")\n if aovs.geometry_normal:\n self.register_pass(scene, renderlayer, \"GEOMETRY_NORMAL\", 3, \"XYZ\", \"VECTOR\")\n if aovs.uv:\n # We need to pad the UV pass to 3 elements (Blender can't handle 2 elements)\n self.register_pass(scene, renderlayer, \"UV\", 3, \"UVA\", \"VECTOR\")\n if aovs.direct_shadow_mask:\n self.register_pass(scene, renderlayer, \"DIRECT_SHADOW_MASK\", 1, \"X\", \"VALUE\")\n if aovs.indirect_shadow_mask:\n self.register_pass(scene, renderlayer, \"INDIRECT_SHADOW_MASK\", 1, \"X\", \"VALUE\")\n if aovs.raycount:\n self.register_pass(scene, renderlayer, \"RAYCOUNT\", 1, \"X\", \"VALUE\")\n if aovs.samplecount:\n self.register_pass(scene, renderlayer, \"SAMPLECOUNT\", 1, \"X\", \"VALUE\")\n if aovs.convergence:\n self.register_pass(scene, renderlayer, \"CONVERGENCE\", 1, \"X\", \"VALUE\")\n if aovs.noise:\n self.register_pass(scene, renderlayer, \"NOISE\", 1, \"X\", \"VALUE\")\n if aovs.irradiance:\n self.register_pass(scene, renderlayer, \"IRRADIANCE\", 3, \"RGB\", \"COLOR\")\n\n # Light groups\n lightgroups = scene.luxcore.lightgroups\n lightgroup_pass_names = lightgroups.get_pass_names()\n default_group_name = lightgroups.get_lightgroup_pass_name(is_default_group=True)\n # If only the default group is in the list, it doesn't make sense to show lightgroups\n # Note: this behaviour has to be the same as in the _add_passes() function in the engine/final.py file\n if lightgroup_pass_names != [default_group_name]:\n for name in lightgroup_pass_names:\n self.register_pass(scene, renderlayer, name, 3, \"RGB\", \"COLOR\")", "def set_shader_range_ui(*args):\n shader_range = pm.floatField(\"f_shaderRange\", value=True, query=True)\n set_shader_range(shader_range)", "def main(context, event):\n for area in bpy.context.screen.areas:\n if area.type == 'VIEW_3D':\n for reg in area.regions:\n if reg.type == 'WINDOW':\n region = reg\n for space in area.spaces:\n if space.type == 'VIEW_3D':\n if hasattr(space, 'region_3d'):\n rv3d = space.region_3d\n \n user32 = windll.user32\n screensize = user32.GetSystemMetrics(78), user32.GetSystemMetrics(79)\n \n X= region.x\n Y= region.y\n top = screensize[1]\n\n win_x = bpy.context.window_manager.windows[0].x\n win_y = bpy.context.window_manager.windows[0].y\n\n flipped = top - (event['y'] + Y + win_y)\n \n coord = (event['x'] - win_x - X, flipped)\n\n view3d_utils.region_2d_to_location_3d\n \n view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)\n ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)\n ray_target = ray_origin + view_vector\n \n guide = create_giude()\n\n def visible_objects_and_duplis():\n \"\"\"Loop over (object, matrix) pairs (mesh only)\"\"\"\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())\n\n def obj_ray_cast(obj, matrix):\n \"\"\"Wrapper for ray casting that moves the ray into object space\"\"\"\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv * ray_origin\n ray_target_obj = matrix_inv * ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n\n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index\n else:\n return None, None, None\n\n # cast rays and find the closest object\n best_length_squared = -1.0\n best_obj = None\n for obj, matrix in visible_objects_and_duplis():\n hit, normal, face_index = obj_ray_cast(obj, matrix)\n if hit is not None:\n hit_world = matrix * hit\n vidx = [v for v in obj.data.polygons[face_index].vertices]\n verts = np.array([matrix * obj.data.shape_keys.key_blocks['modeling cloth key'].data[v].co for v in obj.data.polygons[face_index].vertices])\n vecs = verts - np.array(hit_world)\n closest = vidx[np.argmin(np.einsum('ij,ij->i', vecs, vecs))]\n length_squared = (hit_world - ray_origin).length_squared\n if best_obj is None or length_squared < best_length_squared:\n best_length_squared = length_squared\n best_obj = obj\n guide.location = matrix * obj.data.shape_keys.key_blocks['modeling cloth key'].data[closest].co\n extra_data['latest_hit'] = matrix * obj.data.shape_keys.key_blocks['modeling cloth key'].data[closest].co\n extra_data['name'] = obj.name\n extra_data['obj'] = obj\n extra_data['closest'] = closest\n \n if extra_data['just_clicked']:\n extra_data['just_clicked'] = False\n best_length_squared = length_squared\n best_obj = obj", "def addCacheableShaderFromSourceFile(self, Union, QOpenGLShader_ShaderType=None, QOpenGLShader_ShaderTypeBit=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__\n pass", "def __init__(self, name):\r\n super(OffScreenTexture, self).__init__(name)\r\n from pi3d.Display import Display\r\n self.ix, self.iy = Display.INSTANCE.width, Display.INSTANCE.height\r\n self.im = Image.new(\"RGBA\",(self.ix, self.iy))\r\n self.image = self.im.convert(\"RGBA\").tostring('raw', \"RGBA\")\r\n self.alpha = True\r\n self.blend = False\r\n\r\n self._tex = ctypes.c_int()\r\n self.framebuffer = (ctypes.c_int * 1)()\r\n opengles.glGenFramebuffers(1, self.framebuffer)\r\n self.depthbuffer = (ctypes.c_int * 1)()\r\n opengles.glGenRenderbuffers(1, self.depthbuffer)", "def __init__(self):\n super(Clashtest, self).__init__(\"clashtest\")\n # load clashtest shader\n self.shader = Shader(\"clashtest\")\n\n size = self.ix * self.iy * 3\n self.img = (ctypes.c_char * size)()", "def render(self, mode='human'):\n\n if self.RENDER_ENV_ONLY:\n SCREEN_W = 600\n SCREEN_H = 600\n \n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(SCREEN_W, SCREEN_H)\n self.viewer.set_bounds(0, SCREEN_W, 0, SCREEN_H)\n\n self.viewer.draw_polygon([(0, 0), (SCREEN_W, 0), (SCREEN_W, SCREEN_H), (0, SCREEN_H)], color=np.array([120, 120, 120])/255.0)\n bezel = 10\n \n self._env_render(self.get_full_state,\n [bezel, bezel], [SCREEN_W-2*bezel, SCREEN_H-2*bezel])\n self._agent_render(self.get_full_state,\n [bezel, bezel], [SCREEN_W-2*bezel, SCREEN_H-2*bezel])\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n if (self.RENDER_INDIV_MEMORY == True and self.INDIV_MEMORY == \"fog\") or (self.RENDER_TEAM_MEMORY == True and self.TEAM_MEMORY == \"fog\"):\n SCREEN_W = 1200\n SCREEN_H = 600\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(SCREEN_W, SCREEN_H)\n self.viewer.set_bounds(0, SCREEN_W, 0, SCREEN_H)\n \n self.viewer.draw_polygon([(0, 0), (SCREEN_W, 0), (SCREEN_W, SCREEN_H), (0, SCREEN_H)], color=(0, 0, 0))\n\n self._env_render(self._static_map,\n [7, 7], [SCREEN_H//2-10, SCREEN_H//2-10])\n self._env_render(self.get_obs_blue_render,\n [7+1.49*SCREEN_H//3, 7], [SCREEN_H//2-10, SCREEN_H//2-10])\n self._env_render(self.get_obs_red_render,\n [7+1.49*SCREEN_H//3, 7+1.49*SCREEN_H//3], [SCREEN_H//2-10, SCREEN_H//2-10])\n self._env_render(self.get_full_state,\n [7, 7+1.49*SCREEN_H//3], [SCREEN_H//2-10, SCREEN_H//2-10])\n\n # ind blue agent memory rendering\n for num_blue, blue_agent in enumerate(self._team_blue):\n if num_blue < 2:\n blue_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if blue_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(blue_agent.get_obs(self),\n [900+num_blue*SCREEN_H//4, 7], [SCREEN_H//4-10, SCREEN_H//4-10])\n else:\n blue_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if blue_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(blue_agent.get_obs(self),\n [900+(num_blue-2)*SCREEN_H//4, 7+SCREEN_H//4], [SCREEN_H//4-10, SCREEN_H//4-10])\n\n # ind red agent memory rendering\n for num_red, red_agent in enumerate(self._team_red):\n if num_red < 2:\n red_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if red_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(red_agent.get_obs(self),\n [900+num_red*SCREEN_H//4, 7+1.49*SCREEN_H//2], [SCREEN_H//4-10, SCREEN_H//4-10])\n \n else:\n red_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if red_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(red_agent.get_obs(self),\n [900+(num_red-2)*SCREEN_H//4, 7+SCREEN_H//2], [SCREEN_H//4-10, SCREEN_H//4-10])\n\n if self.TEAM_MEMORY == \"fog\" and self.RENDER_TEAM_MEMORY == True:\n # blue team memory rendering\n blue_visited = np.copy(self._static_map)\n blue_visited[self.blue_memory] = UNKNOWN\n self._env_render(blue_visited,\n [7+2.98*SCREEN_H//3, 7], [SCREEN_H//2-10, SCREEN_H//2-10])\n\n # red team memory rendering \n red_visited = np.copy(self._static_map)\n red_visited[self.red_memory] = UNKNOWN\n self._env_render(red_visited,\n [7+2.98*SCREEN_H//3, 7+1.49*SCREEN_H//3], [SCREEN_H//2-10, SCREEN_H//2-10])\n else:\n SCREEN_W = 600\n SCREEN_H = 600\n \n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(SCREEN_W, SCREEN_H)\n self.viewer.set_bounds(0, SCREEN_W, 0, SCREEN_H)\n\n self.viewer.draw_polygon([(0, 0), (SCREEN_W, 0), (SCREEN_W, SCREEN_H), (0, SCREEN_H)], color=(0, 0, 0))\n \n self._env_render(self._static_map,\n [5, 10], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._env_render(self.get_obs_blue_render,\n [5+SCREEN_W//2, 10], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._agent_render(self.get_full_state,\n [5+SCREEN_W//2, 10], [SCREEN_W//2-10, SCREEN_H//2-10], self._team_blue)\n self._env_render(self.get_obs_red_render,\n [5+SCREEN_W//2, 10+SCREEN_H//2], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._env_render(self.get_full_state,\n [5, 10+SCREEN_H//2], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._agent_render(self.get_full_state,\n [5, 10+SCREEN_H//2], [SCREEN_W//2-10, SCREEN_H//2-10])\n\n if self.SILENCE_RENDER:\n return self.viewer.get_array()\n else:\n return self.viewer.render(return_rgb_array = mode=='rgb_array')", "def add_vertex_main(self, *args, **kwargs):\n kwargs['shader'] = 'vertex'\n self.add_main(*args, **kwargs)", "def addShader(self, QOpenGLShader): # real signature unknown; restored from __doc__\n return False" ]
[ "0.71154165", "0.64277923", "0.6419814", "0.6234535", "0.6087237", "0.60823435", "0.586072", "0.58073187", "0.5745648", "0.57310456", "0.5686501", "0.565925", "0.56536305", "0.5627507", "0.56140345", "0.5604499", "0.5579304", "0.55418664", "0.5501335", "0.54909116", "0.5490467", "0.54724336", "0.5449743", "0.54343605", "0.5432718", "0.53947717", "0.53750706", "0.5354524", "0.53540784", "0.53460133", "0.53206795", "0.5315652", "0.5308532", "0.52690476", "0.52533436", "0.52354354", "0.52166575", "0.5212128", "0.5191561", "0.5188683", "0.51856095", "0.51625395", "0.51510245", "0.5142714", "0.51154155", "0.51029396", "0.51001793", "0.50967634", "0.5086856", "0.5081024", "0.50719106", "0.50590813", "0.5056625", "0.5040019", "0.50205374", "0.5014788", "0.50146824", "0.49883118", "0.4987431", "0.4979996", "0.4971683", "0.49680606", "0.49585736", "0.4951885", "0.49501896", "0.49470872", "0.49131036", "0.4909547", "0.48921067", "0.48915395", "0.48867035", "0.4880734", "0.48791564", "0.48729542", "0.48711175", "0.48709643", "0.48638237", "0.48607355", "0.4859449", "0.485813", "0.48548982", "0.48530892", "0.48508108", "0.48470312", "0.48464707", "0.48322114", "0.48220116", "0.48207885", "0.48176736", "0.48145604", "0.48133025", "0.48121792", "0.48087335", "0.47974658", "0.47972482", "0.47969595", "0.4796757", "0.47959387", "0.47849244", "0.4783667", "0.4782497" ]
0.0
-1
called everytime any files under gl directory changes
def recompile(self): self.vaos = [] try: self.program, uniforms = self.build_prog(self.gl) self.u_time, self.u_width, self.u_height = uniforms vao = GLUtil.screen_vao(self.gl, self.program) self.vaos.append(vao) self.compute, uniforms, buffers = self.build_cs(self.gl) self.u_cstime, self.u_cswidth, self.u_csheight = uniforms self.buf_in, self.buf_out = buffers self.set_gpu_wh(width, height) self.gx, self.gy = int(width / 8), int(height / 8) self.set_gpu_time() log("[Renderer] shader recompiled.") except Exception as e: log(e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_file_changed(self, path):\n\t\tpass", "def on_modified(self, event):\n \n if not event.is_directory: \n\n file_name = os.path.basename(event.src_path)\n \n if file_name not in self.ignore_files:\n parent = os.path.dirname(event.src_path)\n file_id = list(filter(lambda f: f[\"name\"] == file_name, self.filesystem[parent][\"files\"]))[0][\"id\"]\n self.gapy.update_file(file_id, path=parent)\n self.gapy.logger.info(\"The file {} was modified, the content was updated\".format(file_name, parent))\n print(f\"\\nThe file {file_name} was modified and synchronized\")", "def on_modified(self, event):\n path = Path(event.src_path)\n if path.is_file() and path.suffix == '.json':\n self.load_configuration(path)\n self.hook(self.configuration)", "def autoBuildTick (self, event = None):\r\n for pathname, oldmtime in self.autobuildfiles.iteritems():\r\n newmtime = os.stat(pathname).st_mtime\r\n if newmtime != oldmtime:\r\n #print \"Auto rebuild triggered by: \", pathname\r\n self.autobuildfiles[pathname] = newmtime\r\n self.rebuild()\r\n break", "def refresh(self):\n self.dir = dirs['app']\n ssBase = GPath(mwIniFile.getSetting('General','Screen Shot Base Name','ScreenShot'))\n if ssBase.head:\n self.dir = self.dir.join(ssBase.head)\n newData = {}\n reImageExt = re.compile(r'\\.(bmp|jpg)$',re.I)\n #--Loop over files in directory\n for fileName in self.dir.list():\n filePath = self.dir.join(fileName)\n maImageExt = reImageExt.search(fileName.s)\n if maImageExt and filePath.isfile(): \n newData[fileName] = (maImageExt.group(1).lower(),filePath.mtime)\n changed = (self.data != newData)\n self.data = newData\n return changed", "def _post_update_paths(self, **kwargs):\n\n files_updated = kwargs.get('files_updated', list())\n if not files_updated:\n return\n\n maya_utils.reload_textures(files_updated)\n\n # Dependencies are already reloaded during update paths process\n # maya_utils.reload_dependencies(files_updated)", "def on_dir_change(self, event):\r\n\r\n if self.dir_change_callback is not None:\r\n self.dir_change_callback(event)\r\n event.Skip()", "def update(self):\n if os.path.isdir(self.full_path):\n self.file_list = os.listdir(self.full_path)\n else:\n self.file_list = []", "def check_line_edits_and_refresh_filestate(self):\r\n\t\t# line edit changes (other places where filestate is updated: browse button clicks, ok click)\r\n\t\tif self.source_img_entry.isModified():\r\n\t\t\tself.filestate.set_source_img_filename(self.source_img_entry.text().replace(\"\\\\\", \"/\"))\r\n\t\tif self.existing_case and self.source_db_entry.isModified():\r\n\t\t\tself.filestate.set_source_db_filename(self.source_db_entry.text().replace(\"\\\\\", \"/\"))\r\n\t\tif self.sink_dir_entry.isModified():\r\n\t\t\tself.filestate.set_sink_dir_name(self.sink_dir_entry.text().replace(\"\\\\\", \"/\"))", "def statusupdate(filepath):\n pass", "def edited_file_locations(self):", "def from_dir_changed(self):\n text = self.from_dir.toPlainText().strip()\n if os.path.exists(text):\n sqlite.w('update settings set source_path = (?) where id is 1', text)\n all_files = self.get_all_files_from_path(text, extension='PDF')\n self.pdf_files = self.make_all_files_dictionary(all_files)\n\n if not self.pdf_files:\n return\n\n self.reset_widgets(all=True)\n self.draw_pdf_files()", "def _watchFolder(self):\n wm = pyinotify.WatchManager()\n wm.add_watch(self.gdocs_folder, pyinotify.IN_MODIFY, rec=True)\n \n handler = EventHandler(self)\n notifier = pyinotify.Notifier(wm, handler)\n \n print 'waiting for changes . . .'\n notifier.loop()", "def touch_files_dependent_on_changes(kymera_path, dirs, suffixes, changes):\n for dir in dirs:\n if dir[0] != '/':\n # This is a relative path to kymera root\n dir = kymera_path + dir\n if not os.path.exists(dir):\n print \"Directory %s included in ALL_SRCDIRS, ALL_INCDIRS or CFG_LIBS doesn't exist, continuing...\" % dir\n else:\n for file_name in os.listdir(dir):\n full_file_path= os.path.join(dir, file_name)\n # Filter a list of filenames down to those with one of the given suffixes\"\n if matching_file(suffixes, full_file_path):\n # Find all the files from a set with one of a list of suffices\n # containing one of the changed definitions\n if grep_words(changes, full_file_path):\n print \"Mark file for rebuild:\", full_file_path\n touch_file(full_file_path)", "def reloadfile(self, ):\n self.loadfile()", "def touched_files(self, parent):", "def on_dir_changed(self, event):\r\n\r\n if not self.searchin_update:\r\n pth = event.directory\r\n if pth is not None and exists(pth):\r\n self.searchin_update = True\r\n self.m_searchin_text.safe_set_value(pth)\r\n self.searchin_update = False\r\n event.Skip()", "def syncfolder():", "def watch_for_file_changes(self, root_dir, callback):\n # type: (str, Callable[[], None]) -> None\n raise NotImplementedError(\"watch_for_file_changes\")", "def reload(self):\n if len(self.files) > 0:\n self.load(self.files, regfiles=self.regions)", "def background_import_dir_and_watch(bv):\n background_import_dir(bv, watch=True)", "def on_modified(self, event):\n super(myEventHandler,self).on_modified(event)\n if event.is_directory:\n try:\n source = event.src_path\n dest = event.src_dest\n pathtoonedir = self.onedir.getonedirrectory()\n source = source.replace(pathtoonedir ,\"\")\n dest = dest.replace(pathtoonedir ,\"\")\n self.onedir.renamedirectory(source, dest)\n except Exception as e:\n print e\n exit(1)\n else:\n source = event.src_path\n try:\n #use os.path.split to get file name and path\n splitpath = split(source)\n file = splitpath[1]\n if file.startswith('.'):\n return\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = splitpath[0].replace(pathtoonedir ,\"\")\n self.onedir.sendfile(file, relpath)\n except OSError as e:\n print \"Error copying file! \" + e.strerror\n exit(1)\n except IOError as e:\n print \"IOerror creating file \" + e.strerror\n exit(1)", "def change_dir(filename):", "def __CB_ProjectChanged(self, files):\r\n \r\n for fn in files:\r\n if fn.endswith('settings.py') or fn.endswith('manage.py'):\r\n self.ScheduleUpdate()\r\n return", "def on_any_event(self, event):\n\n logging.info(f\"New event - {event.event_type} - {'directory' if event.is_directory else 'file'} - {event.src_path}\")", "def onUpdated(self):", "def on_change(self, event):\n event_path = event.src_path\n observed_paths = []\n\n for watchdog_path, child_observed_paths in self._watch_dog_observed_paths.items():\n if event_path.startswith(watchdog_path):\n observed_paths += child_observed_paths\n\n if not observed_paths:\n return\n\n changed_paths = []\n for path in observed_paths:\n path_obj = Path(path)\n # The path got deleted\n if not path_obj.exists():\n self._observed_paths.pop(path, None)\n changed_paths += [path]\n else:\n new_checksum = calculate_checksum(path)\n if new_checksum != self._observed_paths.get(path, None):\n changed_paths += [path]\n self._observed_paths[path] = new_checksum\n if changed_paths:\n self._input_on_change(changed_paths)", "def _on_path(self, change):\n if change.new:\n self._git = G.Repo.init(change.new)\n ignore = Path(change.new) / \".gitignore\"\n if not ignore.exists():\n ignore.write_text(\".ipynb_checkpoints/\")\n self.commit(\"initial commit\")\n\n self._initialize_watcher()\n self._update_head_history()", "def update_source_files(source_directory_list, source_extension_list):\n # get source files in the directory list\n source_total = 0\n for unused, source_directory in enumerate(source_directory_list):\n source_files_list = []\n get_requested_files(source_directory, source_extension_list, source_files_list)\n # update the files with shared object references\n for unused, source_file in enumerate(source_files_list):\n updated_file = []\n file_changed = modify_input_file(source_file, updated_file)\n if file_changed:\n filepath = get_printble_filepath(source_file)\n print(filepath)\n source_total += 1\n if __file_update:\n write_output_file(updated_file, source_file)\n print(\"Total Files\", source_total)\n print()", "def __renderCallback(self, filesRendered=[]):\n # caching issues workaround\n for f in filesRendered:\n self.fileServiceLocal.refreshCache(f)\n log('__renderCallback:: Reloading image %s' %self.repath.localize(f))\n self.addFeedback(\"reloadImages\", filesRendered)", "def refresh(self):\n self.config.read(self.filename)\n self.loadRecentFiles()", "def dolibupdate(root, subdir):\n\n global fileCount, grooveCount, gdDate, grooveDB, processedFiles, mkGrooveList\n\n db = grooveDB[0][1]\n\n if subdir == '.':\n print \"Skipping: '.'\"\n return\n\n if subdir:\n print \" Processing library directory '%s'.\" % subdir\n\n\n \"\"\" Get a list of the files in this directory. If the list\n includes a file called 'MMAIGNORE' the entire directory\n (and subdirs) is ignored. Otherwise, each file in the\n directory ending in 'mma' is parsed for groove defs.\n \"\"\"\n\n p = os.path.join(root,subdir)\n dirfiles = os.listdir(p)\n\n if \"MMAIGNORE\" in dirfiles:\n print \"Skipping: %s\" % p\n return\n\n for fn in sorted(dirfiles):\n\n # Ignore hidden files and emacs auto-save and dead.\n\n if fn.startswith('.') or fn.startswith('#'):\n continue\n\n f=os.path.join(root, subdir, fn) # Create full path name\n\n if os.path.isdir(f):\n dolibupdate(root, os.path.join(subdir,fn)) # recursive!\n\n elif f.endswith(gbl.ext):\n ename = os.path.join(subdir, fn)\n\n processedFiles.append(ename)\n \n if gdDate and ename in db and os.path.getmtime(f) < gdDate:\n print \" Existing: %s\" % f\n grooveCount += len(db[ename])\n continue\n\n if ename in db:\n print \" Updating: %s\" % f\n else:\n print \" Creating: %s\" % f\n mkGrooveList = []\n MMA.grooves.grooveClear([])\n gbl.mtrks = {}\n MMA.swing.mode = 0\n for c in gbl.midiAssigns.keys():\n gbl.midiAssigns[c]=[]\n for a,v in enumerate(gbl.midiAvail):\n gbl.midiAvail[a]=0\n gbl.mtrks[0]=MMA.midi.Mtrk(0)\n\n gbl.tnames = {}\n\n MMA.parse.parseFile(f) # read current file, grab grooves\n\n fileCount += 1 # just so we can report to user\n grooveCount += len(mkGrooveList)\n db[ename]=mkGrooveList\n\n else:\n if not f.endswith(mmadir):\n print \" Ignoring: %s\" % f", "def refresh(self):\n self.update_from_file()\n self.update_from_env()", "def refresh(self):\n \n ffm = FlagFileManager(basedir=self.basedir)\n flagfiles = ffm.search(projectname=self.projectname)\n if flagfiles:\n self.tag = flagfiles[0].tag # we assume only 1 flagfile per project\n self.filename = '%s.%s.%s' %(self.projectname, self.timestamp, self.tag)", "def process_IN_ISDIR(self, event):", "def _notebook_dir_changed(self, name, old, new):\n\t\tself.notebook_dir = new", "def reload(self):\n puts('Reloading application...')\n local('touch ../reload.txt')", "def _LoadNewPaths(self):\n paths = sorted(path\n for path in io_wrapper.ListDirectoryAbsolute(self._directory)\n if self._path_filter(path))\n for path in paths:\n if path not in self._paths:\n logger.info('New path detected: %s.' % path)\n self._paths[path] = _EventPathLoader(path, self._loader_factory)", "def file_changed(self):\n if not self.lst_file_item:\n return\n state = self.txt_state.text()\n new_filename = self.txt_file.text()\n self.lst_file_item.setText(new_filename)\n self.symbols[state] = new_filename\n\n error, self.preview_file = self.check_image(new_filename)\n if not error:\n self.lbl_image.setText(\"\")\n self.preview = True\n else:\n self.lbl_image.setText(error)\n self.update()", "def hook_file_opened(self):", "def modified(self, eventsrc):\n if not self.has_changed(eventsrc):\n return\n matched = False\n for pattern in self.globs:\n if fnmatch.fnmatch(eventsrc, pattern):\n matched = True\n break\n if matched:\n for action in self.actions:\n action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))\n self.announce()", "def svn_fs_paths_changed(*args):\r\n return _fs.svn_fs_paths_changed(*args)", "def onApply(self, event):\n\n # Rename all of the files based on the substitution.\n for (old, new) in zip(self.m_diskNames, self.m_newNames):\n if old != new:\n old = os.path.join(self.m_curPath, old)\n new = os.path.join(self.m_curPath, new)\n try:\n os.rename(old, new)\n except OSError:\n pass\n\n # Now we out the lists so that what the user sees after this\n # reflects what's on disk.\n self.m_diskNames[:] = []\n self.m_newNames[:] = []\n\n # Update.\n self.updateDiskFileList()", "def update(src):", "def git_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None\n for dirpath, _dirnames, filenames in os.walk(os.path.join(data_context().content.root, '.git')):\n paths = [os.path.join(dirpath, filename) for filename in filenames]\n files.extend((path, os.path.relpath(path, data_context().content.root)) for path in paths)", "def process_IN_MODIFY(s, event):\n s.doReload(event)", "def cb_func(event):\n global GLOB_SIGNAL\n global GLOB_RC\n GLOB_RC = event.event.ev_error\n GLOB_SIGNAL.set()", "def test_change_mtime(self):\n with pike.Graph('g') as graph:\n pike.glob('.', '*') | pike.ChangeListenerNode(fingerprint='mtime')\n self.make_files(foo='a', bar='b')\n ret = graph.run()\n self.assert_files_equal(ret['default'], ['foo', 'bar'])\n new_mtime = time.time() + 1\n os.utime('foo', (new_mtime, new_mtime))\n ret = graph.run()\n self.assert_files_equal(ret['default'], ['foo'])", "def last_file_updated(self):\n query = '*.xml'\n keymap_files = glob.glob(query)\n\n sorted_files = sorted(keymap_files, key=self.mtime, reverse=1)\n last_modified_file = sorted_files[0]\n second_last_modified_file = sorted_files[1]\n\n t1 = self.mtime(last_modified_file)\n t2 = self.mtime(second_last_modified_file)\n\n logger.debug('Last modified time: {0}'.format(t1))\n logger.debug('Second Last modified time: {0}'.format(t2))\n\n last_modified_time = self.mtime(last_modified_file)\n last_access_time = self.atime(last_modified_file)\n\n if sys.platform == \"win32\":\n logger.info('Detected Windows environment')\n # self.regenerate_osx(last_access_time, last_modified_time)\n elif sys.platform == 'darwin':\n logger.info('Detected OSX environment')\n # self.regenerate_windows(last_access_time, last_modified_time)\n else:\n logger.error('Unhandled platform: {0}'.format(sys.platform))\n pass", "def _update_files():\n configuration_settings = get_configuration()\n\n # Need to find all of the files that are stored in the input_files directories in order to start building the\n # reports that will be used to generate the static log files.\n for input_path in configuration_settings.processing.inputs:\n search_path = pathlib.Path(input_path)\n\n # Currently going to make the assumption that everyone is using the path naming convention that I'm dictating\n # which is YYYY/MM/DD/file.ext\n for file_component in search_path.glob('*/*/*/*'):\n # Store all of the files into a dictionary containing the keys and a list of the files that are associated\n # with that day\n updaters.update_files(search_path, file_component)", "def on_modified(self, event):\n self.log.debug(f\"ManifestFileChangeHandler: file '{event.src_path}' has been modified.\")\n manifest = self.component_cache._load_manifest(filename=event.src_path)\n if manifest: # only update the manifest if there is work to do\n for catalog, action in manifest.items():\n self.log.debug(f\"ManifestFileChangeHandler: inserting ({catalog},{action}) into update queue...\")\n if action == \"delete\":\n # The metadata instance has already been deleted, so we must\n # fabricate an instance that only consists of a catalog name\n catalog_instance = ComponentCatalogMetadata(name=catalog)\n\n else: # cache_action == 'modify':\n # Fetch the catalog instance associated with this action\n catalog_instance = MetadataManager(\n schemaspace=ComponentCatalogs.COMPONENT_CATALOGS_SCHEMASPACE_ID\n ).get(name=catalog)\n\n self.component_cache.update(catalog=catalog_instance, action=action)\n self.component_cache.update_manifest(filename=event.src_path) # clear the manifest", "def handleFileChanged(self, dir, filename=None):\n if not filename:\n # TODO: find first rst file if index.rst doesn't exist.\n filename = \"index.rst\"\n self.file_path = Path(dir, filename)\n file_stem = str(self.file_path.stem)\n #html_str = \"build/html/{0}.html\".format(file_stem)\n\n #self.output_html_path = Path(dir, html_str).absolute()\n \n # Load the directory containing the file into the tree.\n self.tree.load_from_dir(dir)\n \n if not self.file_path.endswith('.rst'):\n try:\n html_path = os.path.dirname(os.path.relpath(self.tree.get_current_item_path(), dir + '/source'))\n self.output_html_path = \"{0}/build/html/{1}/{2}\".format(dir, html_path, filename)\n print(self.output_html_path)\n self.preview.load_html(self.output_html_path)\n except:\n print(traceback.format_exc())\n return\n \n # Load the file into the editor\n self.editor.open_file(self.tree.get_current_item_path())\n try:\n html_path = os.path.dirname(os.path.relpath(self.tree.get_current_item_path(), dir + '/source'))\n self.output_html_path = \"{0}/build/html/{1}/{2}.html\".format(dir, html_path, file_stem)\n except:\n pass\n #print(self.tree.get_current_item_path())\n \n # Load corresponding HTML file from pre-built Sphinx docs\n self.preview.load_html(self.output_html_path)", "def reload(self):", "def reload(self):", "def _open_changed ( self ):\n file_name = open_file( extensions = FileInfo(), id = demo_id )\n if file_name != '':\n self.file_name = file_name", "def handleReload(self, confInfo=None):", "def onReload(self,moduleName=\"NeedleFinder\"):\n if profiling : profbox()\n #framework\n globals()[moduleName] = slicer.util.reloadScriptedModule(moduleName)", "def readdata(self, filepaths):\n pass", "def appGL(deltaT):#-------------------------------- OpenGL UPDATE\n pass # -> Delete this line if you do something here !", "async def start_watching_roots(self):\n db.clear_visits(self.db_conn)\n for root in self.config.roots:\n await self.watch_tree(root)\n\n for path in db.get_unvisited_files(self.db_conn):\n print(path)\n await self.process_change(path, None)", "def reload(self, subdirs: list):\n self.__cogs = [f'cogs.{cog.replace(\".py\",\"\")}'\n for cog in listdir(self.__COG_PATH) if self.__is_cog(cog)]\n\n for sub in subdirs:\n if not sub:\n continue\n sub_path = path.join(self.__COG_PATH, sub)\n if path.isdir(sub_path):\n self.__cogs += [f'cogs.{sub_path}.{cog.replace(\".py\",\"\")}'\n for cog in listdir(sub_path) if self.__is_cog(cog)]", "def libUpdate():\n\n global fileCount, gdDate, grooveDB, processedfiles\n dupMessage = []\n\n print \"Creating MMA groove directory database(s). Standby...\"\n\n \"\"\" gbl.libPath points to one main directory tree. We create a separate\n .mmaDB file for each directory found in the main tree. IE, if we have \n the directories stdlib and bvstuff we end up with stdlib/.mmaDB and\n bvstuff/.mmaDB.\n \"\"\"\n\n for dir in os.listdir(gbl.libPath):\n libpath = os.path.join(gbl.libPath, dir)\n\n if not os.path.isdir(libpath): # skip files, just process directories\n continue\n\n gdDate = None\n grooveDB = [[dir, {}]]\n\n # load up our database with this directory's DB file, skip if -G\n\n if gbl.makeGrvDefs == 1:\n g=loadDB(dir)\n if g:\n grooveDB=[[dir, g]]\n gdDate = os.path.getmtime(os.path.join(gbl.libPath, dir, mmadir))\n\n dolibupdate(libpath, '') # update all files in this dir\n\n # Strip out defs of deleted (not found) files.\n\n db = grooveDB[0][1]\n\n for f in db.keys():\n if f not in processedFiles:\n print \" Deleting: %s\" % f\n del g[f]\n\n try:\n outpath = file(os.path.join(libpath, mmadir), 'wb')\n except:\n error(\"Error creating lib-database file '%s'. \" \\\n \"Do you need to be root?\" % libpath)\n\n outpath.write(\"### mmaDB ... AUTOGENERATED BINARY DATA. \"\n \"DO NOT EDIT!!!\\n\")\n pickle.dump(db, outpath, pickle.HIGHEST_PROTOCOL )\n outpath.close()\n\n # check the database we just saved for duplicate entries.\n \n dprinted = None\n for f in db:\n for g in db[f]:\n for ff in db:\n if f == ff:\n continue\n if g in db[ff]:\n if not dprinted:\n dupMessage.append(\" Lib %s: %s & %s have dups.\" % \\\n (libpath, f, ff))\n dprinted=1\n if dprinted:\n break\n \n print\n print \"Database update complete.\"\n print \" Files processed: %s\" % fileCount\n print \" Total number of grooves: %s\" % grooveCount\n print\n\n if dupMessage:\n print \"Warning: Duplicate groove definitions found.\"\n for a in dupMessage:\n print a\n\n sys.exit(0)", "def update_path():\n #TODO update path information\n pass", "def DirEV():\n\n target.BoundarySync()", "def source_changed(source, cache):\n return os.path.getmtime(source)>os.path.getmtime(cache)", "def file_stat(self, file_path):", "def run_file_change(op_list_file):\n if os.path.exists(\"flag_change_file.txt\"):\n print(\n \"-----maybe op_file has changed, so don't need to change again------\"\n )\n else:\n run_multi_thread(op_list_file)", "def refresh_source(self):\n pass", "def update( ):\r\n pass", "def on_files(files, config):\n courses = list_courses()\n config['nav'] = [course.make_nav() for course in courses]", "def updateCodeFiles(self):\n # if this annoying slow, could probably drop to bash or soemthing\n # for a search/replace\n for filename, filetype in self._get_code_files():\n lines = open(filename).readlines()\n found_version_line = False\n\n if self.Verbose:\n print 'Reading %s' % filename\n\n if filetype is 'Python':\n lines, write_out = self._update_python_file(lines, filename)\n elif filetype is 'PyRex':\n lines, write_out = self._update_pyrex_file(lines, filename)\n elif filetype is 'C':\n lines, write_out = self._update_c_file(lines, filename)\n else:\n raise TypeError, \"Unknown code file type %s\" % filetype\n\n if write_out:\n self._file_writer(lines, filename)", "def watch_file(self, path: Path) -> None:\n directory = path.parent\n logger.debug(\"Starting watch: %s\", path)\n with self.lock:\n if directory in self.directories:\n self.directories[directory].increment(path.name)\n return\n\n watch = self.observer.schedule(self.handler, str(directory))\n self.directories[directory] = self.AssetWatch(\n Counter({path.name: 1}), watch\n )", "def _load(self, directory):\n pass", "def linkdir_callback(self):\n pass", "def refresh(self):\n self.dir = dirs['app']\n #-# Since there is only one utils file, its name is hardcoded.\n utilsFile = \"utils.dcg\"\n newData = {}\n if os.path.isfile(utilsFile) and os.access(utilsFile, os.R_OK):\n f = open(utilsFile, \"r\")\n lines = f.readlines()\n f.close()\n for line in lines:\n line = line.strip()\n if line.startswith(\";\") == False and line != \"\":\n name, commandLine, arguments, description = line.split(\";\")\n newData[name] = (commandLine.strip(), arguments, description.strip())\n changed = (self.data != newData)\n self.data = newData\n return changed", "def update():", "def update():", "def save_old_change_status_files():\n mastcontrol=dirutil.get_mast_control_path()\n if not os.path.exists(os.path.join(mastcontrol,\"changestatusfiles\")):\n os.mkdir(os.path.join(mastcontrol,\"changestatusfiles\"))\n mastscratch=dirutil.get_mast_scratch_path()\n recipedirs=dirutil.immediate_subdirs(mastscratch)\n for recipedir in recipedirs:\n ingreddirs=dirutil.immediate_subdirs(os.path.join(mastscratch,recipedir))\n for ingreddir in ingreddirs:\n csfile = os.path.join(mastscratch,recipedir,ingreddir,\"change_status.txt\")\n if os.path.isfile(csfile):\n changestatusfile = MASTFile(csfile)\n trydir = os.path.join(mastcontrol,\"changestatusfiles\",recipedir)\n if not os.path.exists(trydir):\n os.mkdir(trydir)\n trydir2 = os.path.join(trydir, ingreddir)\n if not os.path.exists(trydir2):\n os.mkdir(trydir2)\n changestatusfile.to_file(\"%s/change_status.txt\" % trydir2)\n return True", "def GetModifiedDirectory():\n return os.path.join(GetDataDir(), \"modified\")", "def refresh(self):\n hasChanged = self.hasChanged()\n if hasChanged: self.loadIni()\n if len(self.loadFiles) > 255:\n del self.loadFiles[255:]\n self.safeSave()\n return hasChanged", "def on_created(self, event):\n \n file_name = os.path.basename(event.src_path)\n parent = os.path.dirname(event.src_path)\n parents_id = self.filesystem[parent][\"id\"]\n\n if event.is_directory:\n if file_name not in self.ignore_dirs:\n file_id = self.gapy.create_file(file_name, path=parent, parents_id=[parents_id], isFolder=True)\n self.filesystem[file_name.rstrip(\"/\")] = file_id \n self.gapy.logger.info(\"The directory {} was created with id {}\".format(file_name, file_id))\n else:\n if file_name not in self.ignore_files:\n with open(event.src_path, \"w\") as empty_file:\n empty_file.write(\"\\t\")\n file_id = self.gapy.create_file(file_name, path=parent, parents_id=[parents_id])\n self.filesystem[parent.rstrip(\"/\")][\"files\"].append({\"name\": file_name, \"id\": file_id})\n self.gapy.logger.info(\"The file {} was created with id {}\".format(file_name, file_id))\n print(f\"\\nFile created: {file_name} at {datetime.now()}\")\n\n self.update_fs()", "def dir_resolution(self, src_path, frag_length=128):\n src_path = os.path.join(self.root_path, src_path)\n files = os.listdir(src_path)\n\n MFCCs = None\n labels = None\n cnt = 1\n total_num = len(files)\n for wav in files:\n wav_path = os.path.join(src_path, wav)\n MFCCs_each, labels_each = self.features_and_labels(wav_path, frag_length)\n if MFCCs is not None:\n MFCCs = torch.cat((MFCCs, MFCCs_each))\n labels = torch.cat((labels, labels_each))\n else:\n MFCCs, labels = MFCCs_each, labels_each\n\n if cnt % 1000 == 0:\n print('{} data pieces have been loaded in and {} are left'.format(cnt, total_num-cnt))\n cnt += 1\n\n np.save(self.feature_file, MFCCs.numpy()) \n np.save(self.label_file, labels.numpy())\n print('Loading into files finished!')", "def _update_modified_data_sources(self):\n new_last_imported = datetime.utcnow()\n self._update_modified_since(self.last_imported)\n self.last_imported = new_last_imported", "def check_folder_state(self):\n while self:\n diff = self.get_diff()\n print(diff or 'No changes detected')\n if diff:\n self.parent.send_diff_data(diff)\n time.sleep(1)", "def svn_fs_contents_changed(*args):\r\n return _fs.svn_fs_contents_changed(*args)", "def sync_dir(self):\n\n # mark the trajectories that we have seen\n trajectories = os.listdir(self.trajectory_dir)\n \n for trajectory_file in trajectories:\n\n if trajectory_file not in self.seen_trajectories:\n\n created = self.upload_trajectory(trajectory_file)\n self.seen_trajectories.add(trajectory_file)\n\n if created is True:\n print \"Total of %s solved trajectories\" % \\\n SolvedTrajectory.objects.count(), created", "def update_freq_dist(filename):\r\n pass", "def changed(self, filename='.md5', glob=None):\n if glob is not None:\n filename += '.glob-' + ''.join(ch.lower()\n for ch in glob if ch.isalpha())\n return changed(self, filename, glob=glob)", "def compile_levels():\n \n for ogmo_filename in [x for x in os.listdir(MAP_SRC_DIR) if x.endswith('.oel')]:\n ogmo_path = os.path.join(MAP_SRC_DIR, ogmo_filename)\n ogmo_flattened_path = os.path.join(MAP_COMPILED_DIR, ogmo_filename)\n\n if os.path.exists(ogmo_flattened_path):\n if os.path.getmtime(ogmo_flattened_path) > os.path.getmtime(ogmo_path):\n sys.stdout.write(\"--%s up to date\\n\" % ogmo_flattened_path)\n continue\n \n flatten_ogmo_tilemaps(ogmo_path, ogmo_flattened_path)", "def test_watch_graph_changes(self):\n self.make_files(foo='foo', bar='bar')\n with pike.Graph('g') as graph:\n pike.glob('.', '*')\n watcher = pike.watch_graph(graph)\n ret = watcher.run()\n self.assertItemsEqual([f.data.read() for f in ret['default']],\n [b'foo', b'bar'])\n self.make_files(foo='foo', bar='foo')\n ret = watcher.run()\n self.assertItemsEqual([f.data.read() for f in ret['default']],\n [b'foo', b'foo'])", "def update_reports():\n return os.listdir('./reports')", "def refresh_UI(self):\r\n\t\tself.source_img_entry.setText(self.filestate.get_source_img_filename())\r\n\t\tself.sink_dir_entry.setText(self.filestate.get_sink_dir_name())\r\n\t\tself.sink_db_name_entry.setText(self.filestate.get_sink_db_filename())\r\n\t\tif self.existing_case: self.source_db_entry.setText(self.filestate.get_source_db_filename())", "def load_files_to_compare(self):\n self.first_source_data = load_path(self.path1)\n self.second_source_data = load_path(self.path2)", "def _walk_dir(self, rootpath):\n assert os.path.isabs(rootpath)\n assert rootpath not in self._dirs\n relpath = self._get_rel_path(rootpath)\n self._dirs[relpath] = Directory(rootpath, relpath, None)\n for dirpath, dirnames, filenames in os.walk(rootpath):\n if 'refdata' in dirnames:\n dirnames.remove('refdata')\n currentdir = self._dirs[self._get_rel_path(dirpath)]\n # Loop through a copy so that we can modify dirnames.\n for dirname in list(dirnames):\n fullpath = os.path.join(dirpath, dirname)\n if fullpath == self._build_root:\n dirnames.remove(dirname)\n continue\n relpath = self._get_rel_path(fullpath)\n self._dirs[relpath] = Directory(fullpath, relpath, currentdir)\n extensions = ('.h', '.cuh', '.hpp', '.c', '.cc', '.cpp', '.cu', '.bm')\n for filename in filenames:\n basename, extension = os.path.splitext(filename)\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = File(fullpath, relpath, currentdir)\n elif extension == '.cmakein':\n extension = os.path.splitext(basename)[1]\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n sourcefile = GeneratorSourceFile(fullpath, relpath, currentdir)\n self._files[relpath] = sourcefile\n fullpath = os.path.join(dirpath, basename)\n relpath = self._get_rel_path(fullpath)\n fullpath = os.path.join(self._build_root, relpath)\n generatedfile = GeneratedFile(fullpath, relpath, currentdir)\n self._files[relpath] = generatedfile\n generatedfile.set_generator_source(sourcefile)\n elif extension in ('.l', '.y', '.pre'):\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = GeneratorSourceFile(fullpath, relpath, currentdir)", "def handle_reload_toolbox(self):", "def initialize_directory_filenames():\n\n\n Gb.ha_config_directory = Gb.hass.config.path()\n Gb.ha_storage_directory = Gb.hass.config.path(STORAGE_DIR)\n Gb.ha_storage_icloud3 = Gb.hass.config.path(STORAGE_DIR, 'icloud3')\n Gb.icloud3_config_filename = Gb.hass.config.path(STORAGE_DIR, 'icloud3', 'configuration')\n Gb.icloud3_restore_state_filename = Gb.hass.config.path(STORAGE_DIR, 'icloud3', 'restore_state')\n Gb.wazehist_database_filename = Gb.hass.config.path(STORAGE_DIR, 'icloud3', 'waze_location_history.db')\n Gb.icloud3_directory = Gb.hass.config.path('custom_components', 'icloud3')\n Gb.entity_registry_file = Gb.hass.config.path(STORAGE_DIR, STORAGE_KEY_ENTITY_REGISTRY)\n\n # Note: The Event Log directory & filename are initialized in config_file.py\n # after the configuration file has been read\n\n #Set up pyicloud cookies directory & file names\n Gb.icloud_cookies_dir = Gb.hass.config.path(STORAGE_DIR, 'icloud')\n Gb.icloud_cookies_file = \"\".join([c for c in Gb.username if match(r\"\\w\", c)])\n if not os.path.exists(Gb.icloud_cookies_dir):\n os.makedirs(Gb.icloud_cookies_dir)", "def register(self, observer):\r\n dirnames = set()\r\n for filename in sh(coffeescript_files(), capture=True).splitlines():\r\n dirnames.add(path(filename).dirname())\r\n for dirname in dirnames:\r\n observer.schedule(self, dirname)", "def fileCounter(directory):", "def reload(self,folder_id, file_id):\n if folder_id != self.current_folder or file_id != self.current_file:\n self.current_folder = folder_id\n self.current_file = file_id\n self.X, self.Y = self.load_a_couple(self.load_a_path(self.current_folder, self.current_file))", "def inotify_code_changed():\n wm = pyinotify.WatchManager()\n notifier = pyinotify.Notifier(wm, EventHandler())\n\n def update_watch(sender=None, **kwargs):\n if sender and getattr(sender, 'handles_files', False):\n # No need to update watches when request serves files.\n # (sender is supposed to be a django.core.handlers.BaseHandler subclass)\n return\n\n mask = (\n pyinotify.IN_MODIFY |\n pyinotify.IN_DELETE |\n pyinotify.IN_ATTRIB |\n pyinotify.IN_MOVED_FROM |\n pyinotify.IN_MOVED_TO |\n pyinotify.IN_CREATE |\n pyinotify.IN_DELETE_SELF |\n pyinotify.IN_MOVE_SELF\n )\n\n wm.add_watch('/home/matthew/Projects/mattbot', mask)\n\n # Block until an event happens.\n update_watch()\n notifier.check_events(timeout=None)\n notifier.read_events()\n notifier.process_events()\n notifier.stop()\n\n # If we are here the code must have changed.\n return EventHandler.modified_code", "def changed(self):\n\t\tpass" ]
[ "0.7321618", "0.67455846", "0.63352966", "0.6173503", "0.6163231", "0.6076398", "0.60359526", "0.6032375", "0.5989207", "0.5915403", "0.58932906", "0.58539456", "0.5828015", "0.5816045", "0.58157974", "0.58147573", "0.5802325", "0.5790458", "0.5786011", "0.57448286", "0.56976694", "0.5695481", "0.5679762", "0.5676018", "0.5674896", "0.5623578", "0.5617278", "0.5590083", "0.5574177", "0.5563217", "0.556102", "0.55516744", "0.55302376", "0.55292255", "0.5511875", "0.5493456", "0.5483159", "0.5483012", "0.54787624", "0.5472446", "0.5453197", "0.5446942", "0.5427388", "0.5417043", "0.5407798", "0.5375361", "0.536847", "0.5350876", "0.5346905", "0.53435624", "0.5343113", "0.53137463", "0.53030235", "0.53030235", "0.5298654", "0.5292881", "0.5291224", "0.5288526", "0.5286807", "0.5282381", "0.5281426", "0.5280824", "0.52776927", "0.52721345", "0.5269999", "0.5257994", "0.525507", "0.5253583", "0.5240227", "0.5233216", "0.52321714", "0.5229366", "0.52258694", "0.52075434", "0.5204697", "0.5202511", "0.5202511", "0.5193881", "0.5192498", "0.51912683", "0.51896006", "0.5188576", "0.518578", "0.5179726", "0.517806", "0.51775783", "0.5175596", "0.51609653", "0.5159693", "0.5152971", "0.515187", "0.51466244", "0.5146412", "0.5143073", "0.51407397", "0.5135532", "0.51302284", "0.51278687", "0.51250124", "0.51243913", "0.5123672" ]
0.0
-1
called only once when start
def initializeGL(self): self.gl = mg.create_context() self.recompile() self.to_capture = False self.capture_texture = self.gl.texture((capture_width, capture_height), 4, dtype="f4") capture_framebuffer = self.gl.framebuffer([self.capture_texture]) self.capture_scope = self.gl.scope(capture_framebuffer) self.to_record = False self.record_texture = self.gl.texture((record_width, record_height), 4, dtype="f4") record_framebuffer = self.gl.framebuffer([self.record_texture]) self.record_scope = self.gl.scope(record_framebuffer) self.recording = None self.to_capture_buffer_in = False self.to_capture_buffer_out = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_start(self):", "def started(self):", "def _start(self):", "def on_start(self):", "def on_start(self):", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def start(self):\n ...", "def on_run(self):\r\n\r\n\t\tpass", "def start(self) -> None:", "def start(self) -> None:", "def _start(self):\n pass", "def Start(self) :\n\t\t...", "def start(self):\r\n pass", "def start (self):\n pass", "def start (self):\n pass", "def start():", "def start():", "def start():", "def start():", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def on_run(self):\n pass", "def run(self): \r\n return", "def on_start(self, ctx):\n pass", "def startup(self) -> None:", "def sync_start(self):", "def post_init(self):\n\t\tpass", "def start(self) -> None:\n ...", "def start(self) -> None:\n ...", "def run(self):\r\n pass", "def postRun(self):\n pass", "def _post_init(self):\n pass", "def on_start(self):\n self.init()", "def run(self):\n\t\t\n\t\tpass", "def afterInit(self):", "def on_startup(self) -> None:\n ...", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def startup(self):\n pass", "def running(self):\n pass", "def pre_start(self) -> None:\n pass", "def on(self):", "def pre_stop(self):", "def _afterInit(self):\n pass", "def onInit(self):\n pass", "def _post_init(self) -> None:\n return", "def on_load(self):", "def on(self) -> None:", "def run(self):\n \n pass", "async def on_start(self):", "def pre_execute(self):", "def start_processing(self):", "def start_of_game(self):\n pass", "def _onStart(self, name):\n logging.debug(\"onStart...\")", "def on_load(self):\n pass", "def on_load(self):\n pass", "def on_start(self, session):\n pass", "def start(self):\n raise NotImplementedError", "def start(self):\n raise NotImplementedError", "def start(self):\n raise NotImplementedError", "def run(self):\n self.started()", "def launch(self):", "def use(self):", "def post_execute(self):", "def beforeUpdate(self):" ]
[ "0.83904356", "0.8242063", "0.8004934", "0.79939187", "0.79939187", "0.7951618", "0.7951618", "0.7951618", "0.7951618", "0.7951618", "0.7951618", "0.7951618", "0.7951618", "0.7815382", "0.78118557", "0.7695339", "0.7695339", "0.7693047", "0.7675579", "0.7652083", "0.75488406", "0.75488406", "0.7539093", "0.7539093", "0.7539093", "0.7539093", "0.75342566", "0.75342566", "0.75342566", "0.75342566", "0.75342566", "0.75342566", "0.75342566", "0.75342566", "0.7490488", "0.7490488", "0.7490488", "0.7490488", "0.7490488", "0.7490488", "0.7490488", "0.7490488", "0.7490488", "0.7490488", "0.74624103", "0.74246204", "0.7389291", "0.7388984", "0.7362261", "0.735571", "0.73479253", "0.73479253", "0.73057324", "0.73010737", "0.72549176", "0.7217327", "0.7211995", "0.72021055", "0.71714437", "0.7155708", "0.7155708", "0.7155708", "0.7155708", "0.7155708", "0.7155708", "0.7155708", "0.7155708", "0.7155708", "0.7155708", "0.7155708", "0.7155708", "0.7155708", "0.7155708", "0.7155708", "0.7137951", "0.7133355", "0.71147764", "0.7089143", "0.7088224", "0.7079762", "0.7057016", "0.7055391", "0.70496833", "0.7039869", "0.7037296", "0.7026645", "0.7012545", "0.7006692", "0.69955575", "0.6987018", "0.69740516", "0.69740516", "0.69619685", "0.69404715", "0.69404715", "0.69404715", "0.6939228", "0.6922849", "0.68944836", "0.6866422", "0.6861251" ]
0.0
-1
get and store tweets based on a given function
def getter(collection, modulename=None, filename="AlltweetsNoOp.json"): count = 0 print "total number of tweets in this database is ", collection.find().count() # open a new file ### outfile = open(filename, "w") # according to the json list format outfile.write("[") if modulename == None: option = NoOp else: module = imp.load_source('module.name', modulename) option = module.check for tweet in collection.find(): count += 1 if count % 5000 == 0: print count if option(tweet): tweet.pop(u'_id', None) json.dump(tweet, outfile, indent = 4) outfile.write(",") # close all files outfile.seek(-1, 1) outfile.write("]") outfile.close() print "finish writing to the file"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def get_tweets(api, listOfTweets, keyword, numOfTweets=20, date_since='2019-1-1', lang=\"en\"):\n spinner = yaspin()\n spinner.start()\n for tweet in tweepy.Cursor(api.search, q=keyword, lang=lang, since=date_since).items(numOfTweets):\n # Add tweets in this format\n dict_ = {'Screen Name': tweet.user.screen_name,\n 'User Name': tweet.user.name,\n 'Tweet Created At': str(tweet.created_at),\n 'Tweet Text': tweet.text,\n 'Cleaned Tweet Text': func.clean_tweets(tweet.text),\n 'User Location': str(tweet.user.location),\n 'Tweet Coordinates': str(tweet.coordinates),\n 'Retweet Count': str(tweet.retweet_count),\n 'Retweeted': str(tweet.retweeted),\n 'Phone Type': str(tweet.source),\n 'Favorite Count': str(tweet.favorite_count),\n 'Favorited': str(tweet.favorited),\n 'Replied': str(tweet.in_reply_to_status_id_str)\n }\n listOfTweets.append(dict_)\n spinner.stop()\n return listOfTweets", "def collect_tweets(ticker):\n\n # Authenticate Tweepy credentials\n auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_SECRET_CONSUMER_KEY)\n auth.set_access_token(settings.TWITTER_TOKEN_KEY, settings.TWITTER_SECRET_TOKEN_KEY)\n api = tweepy.API(auth)\n\n stock = Stock.objects.get(ticker=ticker)\n\n # Search for recent Tweets with the specific ticker\n collected_tweets = api.search(q=ticker, result_type='recent', count=100)\n\n # Iterate over the collected Tweets and save them\n for tweet in collected_tweets:\n try:\n Tweet.objects.create(\n text=tweet.text,\n created_at=tweet.created_at,\n user_id=tweet.user.id,\n user_screen_name=tweet.user.screen_name,\n verified=tweet.user.verified,\n followers_count=tweet.user.followers_count,\n friends_count=tweet.user.friends_count,\n favourites_count=tweet.user.favourites_count,\n retweet_count=tweet.retweet_count,\n stock=stock,\n )\n except IntegrityError:\n pass", "def get_tweets(self):\n keyword = 'covid'\n\n # Load tokens from file\n with open('../data/tokens.json', 'r') as f:\n tokens = json.load(f)\n\n # Stream tweets\n auth = tweepy.OAuthHandler(tokens['consumer_key'], tokens['consumer_secret'])\n auth.set_access_token(tokens['access_token_key'], tokens['access_token_secret'])\n api = tweepy.API(auth)\n\n # listen for tweets\n while True:\n\n # TODO: save file in Cloud Storage\n file_name = date.today().strftime('corpus-%d-%m-%Y.json')\n print(f'Updating {file_name} ...')\n\n StreamListener = StreamListener(\n file_name=file_name, \n max_tweets=1000)\n myStream = tweepy.Stream(\n auth=api.auth, \n listener=StreamListener)\n\n myStream.filter(track=[keyword], languages=['en'])\n \n time.sleep(60)", "def get_tweets():\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def recoverTweets(authors=[], words=[], removeRetweets=False, sortBy='newest',**kwargs):\n authors = mapToValid(authors)\n words = mapToValid(words)\n\n def getTopNTweets(retrievedTweets, numberOfTweets):\n \"\"\"Sort the retrievedTweets by sortBy specified and returns the top-N Tweets\"\"\"\n if sortBy=='newest':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['id'], reverse=True)\n elif sortBy=='oldest':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['id'],reverse=False)\n elif sortBy=='favorite_count':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['favorite_count'],reverse=True)\n elif sortBy=='retweet_count':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['retweet_count'],reverse=True)\n else:\n retrievedTweets = random.sample(retrievedTweets, numberOfTweets)\n return retrievedTweets[:numberOfTweets]\n\n def getTweetsByUser(username, maxTweets=1000):\n \"\"\"Returns a list of (json) objects representing the tweets for a specified Twitter username.\n If any words is queried, it will filter out every tweet that doesn't contain any of those words.\"\"\"\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n myTweets=[]\n if words:\n apiRes = tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items()\n for tweet in apiRes:\n if any(containsWord(tweet._json['full_text'],word) for word in words):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n\n return getTopNTweets(myTweets, maxTweets)\n\n def searchTweets():\n \"\"\" returns a list of (json) objects representing the tweets retrieved for a specified query.\n It doesn't work if any authors is specified.\n Then, startingDate and endingDate cannot be older than one week ago because of Twitter restrictions for standardAPI\n :reference: https://developer.twitter.com/en/docs/tweets/search/api-reference/get-search-tweets\n \"\"\"\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName='apiConf2.txt'))\n #SEARCHING TWEETS CONTAINING THE HASHTAG \"#bitcoin\" USING TWEEPY LIBRARY\n myTweets= []\n #words=list(map(str,words))\n if words:\n myQuery=' OR '.join(words)\n else:\n myQuery = '*'\n if removeRetweets:\n myQuery += ' - filter:retweets'\n kwargs['q']=myQuery\n kwargs['count']=100\n kwargs['tweet_mode']='extended'\n if 'startingDate' in kwargs:\n kwargs['since']=kwargs['startingDate']\n del(kwargs['startingDate'])\n if 'endingDate' in kwargs:\n kwargs['until']=kwargs['endingDate']\n del(kwargs['endingDate'])\n if 'maxTweets' in kwargs:\n del(kwargs['maxTweets'])\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.search, kwargs).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.search, kwargs).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n return getTopNTweets(myTweets, maxTweets)\n\n\n def getTwitterscraperTweets():\n \"\"\" returns a list of (json) objects representing the tweets retrieved for the specified inputs.\n It's very useful to avoid restrictions such as number of requests or dates not older than 7 days ago for twitterAPI (and tweepy).\n It will call the recoverTweets.sh script to properly query the API by twitterscraper.\n :reference: https://github.com/taspinar/twitterscraper\n \"\"\"\n import subprocess\n numOfAuthors = len(authors)\n numOfWords = len(words)\n callVars = ['./recoverTweets.sh',str(numOfWords),str(numOfAuthors)]\n callVars.extend([word for word in words]+[author for author in authors])\n if startingDate:\n callVars.extend(['-sd',startingDate])\n if endingDate:\n callVars.extend(['-ed',endingDate])\n #if maxTweets:\n # callVars.extend(['-max',str(maxTweets)])\n callVars.append(\"data/twitterscrapertmp\")\n print(\"Querying twitterAPI by using TwitterScraper... (it may take a long time)\")\n subprocess.call(callVars)\n with open('data/twitterscrapertmp') as json_data:\n tweets = json.load(json_data)\n if removeRetweets:\n tweets = [tweet for tweet in tweets if not isRetweet(tweet)]\n print(\"Query ended. Retrieved: \",len(tweets),\" tweets\")\n #saveTweets(tweets,outputCollection,onFile=True,onDb=True)\n os.remove('data/twitterscrapertmp')\n return tweets\n\n\n if \"maxTweets\" in kwargs:\n maxTweets=kwargs['maxTweets']\n else:\n maxTweets=1000\n\n if len(authors)==0 and len(words)==0:\n return(\"qua\") ###call sample function with maxTweets and (if any) dates\n if 'startingDate' in kwargs or 'endingDate' in kwargs:\n return getTwitterscraperTweets()\n\n if len(authors)!=0:\n tweets, splits, i = [], splitIntegerIntoIntegers(maxTweets,len(authors)), 0\n for author in authors:\n tweets.extend(getTweetsByUser(username=author, maxTweets=splits[i]))\n i+=1\n return tweets\n return getTweets()", "def get_tweets():\n if not Tweet.objects.all():\n # If the db is empty, don't get max_id.\n tweets = api.search(\n q='#python',\n count=100\n )\n else:\n # If the db is not empty, get max_id.\n subtask(clean_tweetdb)\n max_id = min([tweet.tweet_id for tweet in Tweet.objects.all()])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n\n # Store the tweet data in lists.\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n # Iterate over these lists and add data to db.\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n # Check that they are valid.\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def process(self, filter_words, count=1):\n user = self.__api.get_user(self.__username)\n\n # print user.screen_name\n # print user.followers_count\n if self.__appMode == 1 and self.__TimeLineMode == 1:\n self.get_timeline(filter_words)\n else:\n if self.__friendMode:\n print(\"Getting all Twitter Friends \\n\")\n for friend in user.friends():\n self.get_tweet(friend.screen_name, filter_words, count)\n else:\n for screen_name in self.__priorityCoin:\n self.get_tweet(screen_name, filter_words, count)\n print('Twitter Data Extraction done!!')", "def on_tweet(self, tweet):\n pass", "def get_live_tweets_from_twitter_stream(auth, terms, num_tweets):\n listener = TwitterListener()\n listener._max_tweets = num_tweets\n twitter_stream = Stream(auth, listener)\n twitter_stream.filter(track=terms, languages=['en'])\n listener.store_live_tweets()", "def handler(event,context):\n tweet = setup_and_get_tweet()\n send_tweet(tweet)", "def process_tweet(tweet):\n global start_date\n global end_date\n global geo_enabled_tweets\n global retweets\n\n # Check for filters before processing any further\n if args.filter and tweet.source:\n if not args.filter.lower() in tweet.source.lower():\n return\n\n tw_date = tweet.created_at\n\n # Updating most recent tweet\n end_date = end_date or tw_date\n start_date = tw_date\n\n # Handling retweets\n try:\n # We use id to get unique accounts (screen_name can be changed)\n rt_id_user = tweet.retweeted_status.user.id_str\n retweeted_users[rt_id_user] += 1\n\n if tweet.retweeted_status.user.screen_name not in id_screen_names:\n id_screen_names[rt_id_user] = \"@%s\" % tweet.retweeted_status.user.screen_name\n\n retweets += 1\n except:\n pass\n\n # Adding timezone from profile offset to set to local hours\n if tweet.user.utc_offset and not args.no_timezone:\n tw_date = (tweet.created_at + datetime.timedelta(seconds=tweet.user.utc_offset))\n\n if args.utc_offset:\n tw_date = (tweet.created_at + datetime.timedelta(seconds=args.utc_offset))\n\n # Updating our activity datasets (distribution maps)\n activity_hourly[\"%s:00\" % str(tw_date.hour).zfill(2)] += 1\n activity_weekly[str(tw_date.weekday())] += 1\n\n # Updating langs\n detected_langs[tweet.lang] += 1\n\n # Updating sources\n detected_sources[tweet.source] += 1\n\n # Detecting geolocation\n if tweet.place:\n geo_enabled_tweets += 1\n tweet.place.name = tweet.place.name\n detected_places[tweet.place.name] += 1\n\n # Updating hashtags list\n if tweet.entities['hashtags']:\n for ht in tweet.entities['hashtags']:\n ht['text'] = \"#%s\" % ht['text']\n detected_hashtags[ht['text']] += 1\n\n # Updating domains list\n if tweet.entities['urls']:\n for url in tweet.entities['urls']:\n domain = urlparse(url['expanded_url']).netloc\n if domain != \"twitter.com\": # removing twitter.com from domains (not very relevant)\n detected_domains[domain] += 1\n\n # Updating mentioned users list\n if tweet.entities['user_mentions']:\n for ht in tweet.entities['user_mentions']:\n mentioned_users[ht['id_str']] += 1\n if not ht['screen_name'] in id_screen_names:\n id_screen_names[ht['id_str']] = \"@%s\" % ht['screen_name']", "def get_tweets(self, query, count=10):\n # empty list to store parsed tweets\n tweets = []\n\n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q=query, count=count)\n\n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n\n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets\n\n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))", "def get_tweets(self, user, count):\n topTweetsList = self.api.user_timeline(screen_name=user, count=count, tweet_mode='extended')\n clnTweets = {}\n for tweet in topTweetsList:\n clnTweets[processTweet(getNonRetweet(tweet))] = ({'like':getFavoriteCount(tweet),'RT':getNumRetweet(tweet),'follower':getNumFollowers(tweet)}) \n\n tweetTxt = [twt for twt in clnTweets.keys()]\n \n if user in self.userTweetsStat:\n self.userTweetsStat[user].append(clnTweets)\n else:\n tmp = []\n tmp.append(clnTweets)\n self.userTweetsStat[user] = tmp\n return tweetTxt, self.userTweetsStat", "def collect_tweets(redis_client, twitter_client, search_term):\n search = Search(redis_client, twitter_client, search_term)\n search.get_term_state()\n search.parse_term_state()\n search.set_query_string()\n search.set_execution_time()\n search.execute_query()\n search.incr_query_counters()\n search.set_newest_id()\n search.set_oldest_id()\n search.set_scenario()\n search.set_term_state()\n search.store_results()\n search.set_score()\n search.log_state()", "def getTwitterscraperTweets():\n import subprocess\n numOfAuthors = len(authors)\n numOfWords = len(words)\n callVars = ['./recoverTweets.sh',str(numOfWords),str(numOfAuthors)]\n callVars.extend([word for word in words]+[author for author in authors])\n if startingDate:\n callVars.extend(['-sd',startingDate])\n if endingDate:\n callVars.extend(['-ed',endingDate])\n #if maxTweets:\n # callVars.extend(['-max',str(maxTweets)])\n callVars.append(\"data/twitterscrapertmp\")\n print(\"Querying twitterAPI by using TwitterScraper... (it may take a long time)\")\n subprocess.call(callVars)\n with open('data/twitterscrapertmp') as json_data:\n tweets = json.load(json_data)\n if removeRetweets:\n tweets = [tweet for tweet in tweets if not isRetweet(tweet)]\n print(\"Query ended. Retrieved: \",len(tweets),\" tweets\")\n #saveTweets(tweets,outputCollection,onFile=True,onDb=True)\n os.remove('data/twitterscrapertmp')\n return tweets", "def get_tweets(api, username, fh, limit):\n if args.json is False:\n for status in tqdm(tweepy.Cursor(api.user_timeline, screen_name=username).items(limit), unit=\"tw\", total=limit):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")\n else:\n for status in (tweepy.Cursor(api.user_timeline, screen_name=username).items(limit)):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")", "def collect_tweets(search_id, search_term, number_of_tweets):\n\n tweets = []\n for tweet in api_collector.collect(search_term, number_of_tweets):\n tweets.append((tweet.id_str, tweet.created_at, tweet.full_text))\n if len(tweets) == 0:\n search = Search.objects.get(pk=search_id)\n search.empty = True\n search.save()\n notify_searchers.delay(search_id)\n else:\n classify_tweets.delay(search_id, tweets)", "def get_tweets(user, num = 200):\n tweets = []\n \n for tweet in user.home_timeline(count = num):\n edited_tweet = tweet.text\n edited_tweet = edited_tweet.encode(encoding='UTF-8', errors='Ignore') \n tweets.append(edited_tweet)\n return tweets", "def get_tweets():\n\n\tuser ='kaiserkumars'\n\t# api = twitter.Api(consumer_key='iJoZZuV7etVrJfE4K9ir8sIqa',\n\t# consumer_secret='uyJyWoP05z2MUKnggW7vHnIG2sckmM1aHRMgGveZLyrz8401Xs',\n\t# access_token_key='622588040-TYDgG1UlGUvA1hW8PA7mOG5CiMw0WiuPZlkoP8cc',\n\t# access_token_secret='laAmFjeLhWzOK7Y524VevdMdeLeNpnmCUmjee1AQU7osj')\n\tapi = twitter.Api(consumer_key=get_secret('consumer_key'),\n\t consumer_secret=get_secret('consumer_secret'),\n\t access_token_key=get_secret('access_token_key'),\n\t access_token_secret=get_secret('access_token_secret'))\n\n\tstatuses = api.GetUserTimeline(user_id=622588040,count=0)\n\t# print(statuses)\n\t# duplicate='UNIQUE constraint failed: mtwitter_weatherdata.location, core_weatherdata.metric, core_weatherdata.date'\n\tbulk_insert=[]\n\t# print(dir(TwitterData))\n\tfor s in statuses:\n\t\t# print(s)\n\t\tdt = parse(s.created_at)\n\t\t# print(dt)\n\t\tdata = TwitterData(org_name=s.user.name,profile_url=s.user.profile_image_url,tweet_id =s.id,screen_name=s.user.screen_name, tweet = s.text, date= dt, favCount =0)\n\t\tbulk_insert.append(data)\n\ttry:\n\t\tTwitterData.objects.bulk_create(bulk_insert)\n\t\tprint(\"Success.\")\n\texcept Exception as e:\n\t\t# if(str(e)==duplicate):\n\t\t# \tprint('Duplicate Data')\n\t\t# else:\n\t\tprint(str(e))\n\n\treturn statuses", "async def get_tweets(self, ctx, username: str, count: int):\n cnt = count\n if count > 25:\n cnt = 25\n\n if username is not None:\n if cnt < 1:\n await self.bot.say(\"I can't do that, silly! Please specify a \\\n number greater than or equal to 1\")\n return\n msg_list = []\n api = self.authenticate()\n try:\n for status in\\\n tw.Cursor(api.user_timeline, id=username).items(cnt):\n if not status.text.startswith(\"@\"):\n msg_list.append(status)\n except tw.TweepError as e:\n await self.bot.say(\"Whoops! Something went wrong here. \\\n The error code is \" + str(e))\n return\n if len(msg_list) > 0:\n await self.tweet_menu(ctx, msg_list, page=0, timeout=30)\n else:\n await self.bot.say(\"No tweets available to display!\")\n else:\n await self.bot.say(\"No username specified!\")\n return", "def streamTweets(words = [], authors = [], timeLimit=120, removeRetweets=False, **kwargs):\n if 'stream' not in globals():\n global stream\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n listener = StdOutListener(removeRetweets=removeRetweets)\n auth = api.auth\n stream = tweepy.Stream(auth, listener, tweet_mode='extended')\n else:\n stream.listener.setRemoveRetweets(removeRetweets)\n stream.listener.resetTweets()\n\n words = mapToValid(words)\n authors = mapToValid(authors)\n if not words and not authors:\n words=[\"the\", \"i\", \"to\", \"a\", \"and\", \"'s\", \"is\", \"in\", \"it\", \"you\", \"of\", \"for\", \"on\", \"my\", \"that\", \"e\", \"with\", \"me\", \"do\", \"have\", \"ciao\", \"o\", \"u\", \"cool\", \"good\", \"nice\", \"#\", \"*\", \":\", \";\", \",\", \".\", \"?\", \"-\", \"%\", \"$\", \"€\", \"!\", \"(\", \")\", \"=\", \"'\"]\n\n #myQuery = ' OR '.join(kwargs[\"words\"])\n if authors:\n kwargs[\"follow\"]=[user.id_str for user in list(map(api.get_user,authors))]\n else:\n kwargs[\"track\"]=words\n #if removeRetweets:\n # myQuery += \" -filter:retweets\"\n\n #myQuery += ' from:'\n #myQuery += ' OR from:'.join(kwargs[\"authors\"])\n #print(myQuery)\n import signal\n # Register the signal function handler\n signal.signal(signal.SIGALRM, __streamHandler__)\n # Define a timeout for your function\n signal.alarm(timeLimit)\n try:\n __stream__(stream,**kwargs)\n except Exception:\n print(\"Streaming over after time period of\", timeLimit, \"seconds... Retrieved\", len(stream.listener.getTweets()), \"tweets.\")\n stream.disconnect()\n if authors and words:\n print(\"Filtering out tweets that don't contain the specified words...\")\n myTweets=[]\n for tweet in stream.listener.getTweets():\n if 'full_text' in tweet:\n tweet['text'] = tweet['full_text']\n del (tweet['full_text'])\n if any(containsWord(tweet['text'],word) for word in words):\n myTweets.append(tweet)\n print(\"Done. Retrieved\", len(myTweets), \"tweets written by the authors specified and containing (any of) the words specified.\")\n return myTweets\n return stream.listener.getTweets()", "def extract_tweets(consumer_key,consumer_secret,access_token,access_token_secret,search_key):\n # Step 1 - Authenticate\n consumer_key= str(consumer_key)\n consumer_secret= str(consumer_secret)\n\n access_token=str(access_token)\n access_token_secret=str(access_token_secret)\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth)\n\n #Step 3 - Retrieve Tweets\n public_tweets = api.search(search_key)\n tweets_list=[]\n for tweet in public_tweets:\n tweets_list.append(tweet.text)\n return tweets_list", "def get_tweets(self):\r\n return self.tweets", "def collect_twitter_sentiment():\r\n # Open/create a file to append data to\r\n csvFile = open(NAME+'_posts.csv', 'a')\r\n # Use csv writer\r\n csvWriter = csv.writer(csvFile)\r\n # Calling the user function with current parameters\r\n results = twitter.user_timeline(id=NAME, count=TWEET_COUNT)\r\n for tweet in results:\r\n print(tweet.created_at, tweet.text)\r\n csvWriter.writerow([tweet.created_at, tweet.text.encode('utf-8')])\r\n return csvFile", "async def add_tweet(self, tid=None): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n #print(\"Coordinates: {}\".format(data[\"coordinates\"]))\n if \"place\" in data:\n print(\"Place: {}\".format(data[\"place\"]))\n\n #print(\"User location: {}\".format(data[\"user\"][\"location\"]))\n #print(\"User lang: {}\".format(data[\"user\"][\"lang\"]))\n t=Tweet()\n t.tweet_id = tid\n t = self.fill_tweet(t, data)\n tweet_cache.append(t.to_dict())\n if \"retweeted_status\" in data:\n t.retweeted_status=data[\"retweeted_status\"]\n # \n # save the tweet\n #\n t.upsert()\n #\n # now handle the retweet\n #\n if \"retweeted_status\" in data:\n # this is a retweet so\n # do it once more for the original tweet\n tr=Tweet()\n tr.tweet_id = data[\"retweeted_status\"][\"id_str\"]\n tr = self.fill_tweet(tr, data[\"retweeted_status\"])\n tweet_cache.append(tr.to_dict())\n #tr.upsert()\n #r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #await self.fire_callbacks(r.json())\n #print(t.to_json(),file=ofile)\n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def pushTweets(tweets,user,cacheKey=False):\n \n tweetDump = filterTweets(tweets) # Extract mentions, URLs, replies hashtags etc...\n\n pushRenderedTweets2Neo.delay(user,tweetDump) \n pushRenderedTweets2Cass.delay(user,tweetDump)\n pushRenderedTweets2Solr.delay(tweetDump['tweets']+tweetDump['retweets'])\n\n if cacheKey: # These are the last Tweets, tell the scaper we're done.\n cache.set(cacheKey,'done')\n print '*** '+user+': DONE WITH TWEETS ***' \n \n #return True", "def tweet_processor(self, tweets):\n with Timer() as timer:\n detection_count = self.tweet_processor_fct(tweets) or 0\n # Increment the total number of detections.\n self.redis.hincrby(self.metadata_cache_key, 'detection',\n detection_count)\n\n log.debug(\"Processed {} tweets in {:2.3f} secs.\".format(\n len(tweets), timer.interval))", "def get_tweets(twitter, screen_name, num_tweets):\n\n request = robust_request(twitter, 'search/tweets', {'q': screen_name, 'count': num_tweets})\n tweets = [a['text'] for a in request]\n\n return tweets", "def get_tweets():\n\n # Read bearer token from secrets file\n with open(\"./secrets.yml\", \"r\") as f:\n bearer_token = yaml.load(f, Loader=yaml.FullLoader)[\"BEARER_TOKEN\"]\n\n # Set start and end times as current time rounded down to nearest minute with supplied offset\n dt_fmt = \"%Y-%m-%dT%H:%M:00Z\"\n dt_now = datetime.datetime.now().replace(second=0, microsecond=0)\n start_time_offset = int(sys.argv[1])\n end_time_offset = int(sys.argv[2])\n dt_end = dt_now - datetime.timedelta(minutes=end_time_offset)\n dt_start = dt_now - datetime.timedelta(minutes=start_time_offset)\n dt_end = dt_end.strftime(dt_fmt)\n dt_start = dt_start.strftime(dt_fmt)\n\n # Make request, checking for mentions in specified time period\n logging.info(\"Getting mentions from Twitter\")\n uri = \"https://api.twitter.com/2/tweets/search/recent\"\n headers = {\"Authorization\": f\"Bearer {bearer_token}\"}\n query = {\"query\": f\"@{ACCOUNT_NAME}\",\n \"expansions\" : \"author_id\",\n \"user.fields\" : \"username\",\n \"start_time\" : dt_start,\n \"end_time\" : dt_end}\n response = requests.get(uri, headers=headers, params=query)\n\n # Make connection to local database\n connection = sqlite3.connect(\"../database/procrystaldb.db\")\n cursor = connection.cursor()\n\n # Get current total number of rows in database\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n initial_rows = cursor.fetchall()[0][0]\n\n # Get usernames and tweet ids from tweets and save to database\n if response.status_code == 200:\n content = response.json()\n num_results = content[\"meta\"][\"result_count\"]\n if num_results > 0:\n # First get dictionary of usernames\n user_id_to_name = {}\n for user in content[\"includes\"][\"users\"]:\n user_id_to_name[user[\"id\"]] = user[\"username\"]\n # Then get tweet id, username and save to database\n for result in content[\"data\"]:\n # if KEYWORD in result[\"text\"].lower():\n tweet_id = result[\"id\"]\n username = user_id_to_name[result[\"author_id\"]]\n sql_insert = f\"\"\"\n INSERT OR IGNORE INTO Twitter (tweet_id, username, reply_sent)\n VALUES ('{tweet_id}', '{username}', false);\n \"\"\"\n cursor.execute(sql_insert)\n logging.info(f\"Mentions fetched: {num_results}\")\n else:\n logging.error(f\"Get mentions errored with: {response.json()}\")\n\n # Get final total number of rows in database and therefore number of rows added\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n final_rows = cursor.fetchall()[0][0]\n rows_added = final_rows - initial_rows\n logging.info(f\"New mentions added: {rows_added}\")\n\n # Close database connection\n connection.commit()\n connection.close()\n\n return rows_added", "def fetch_tweets(self, screen_name, count):\n return {}", "def get_all_tweets(screen_name: object):\r\n temptweets = []\r\n alltweets = []\r\n new_tweets = api.user_timeline(screen_name=screen_name, count=199)\r\n alltweets.extend(new_tweets)\r\n print(alltweets[1].id)\r\n oldest = alltweets[-1].id - 1\r\n while 0 < len(new_tweets) < 200:\r\n new_tweets = tweepy.Cursor(api.user_timeline, screen_name=screen_name, count=199, max_id=oldest).items(1500)\r\n alltweets.extend(new_tweets)\r\n for tweet in alltweets:\r\n if (not tweet.retweeted) and ('RT @' not in tweet.text):\r\n temptweets.append(tweet)\r\n oldest = alltweets[-1].id - 1\r\n print(\"Total tweets downloaded from %s are %s\" % (screen_name, len(temptweets)))\r\n return temptweets", "def store_tweet(tweet, keyword):\n\tglobal _docs_to_store\n\tdoc = {'tweet': tweet, 'keyword': keyword, 'timestamp': int(time.time())}\n\t_docs_to_store.append(doc)\n\tif len(_docs_to_store) == UPDATE_CHUNK:\n\t\tcloudant.update(_docs_to_store)\n\t\t_docs_to_store = []", "def handler(event,context):\n send_tweet(random.choice(potential_tweets))", "async def tweet():\n with logger.contextualize(request_id=str(uuid.uuid4())):\n tweets = generate()\n upload(tweets)", "def get_tweets(api):\n return api.user_timeline()", "def tweet(self, tweet, at=None):\n if tweet.strip() == \"\":\n return\n\n num_tweets, tweets = self._divide_tweet(tweet, at)\n if num_tweets > 0:\n # replace @'s with #'s and convert unicode emojis before tweeting\n [self.api.update_status(tw.replace(\"@\", \"#\").encode(\"utf-8\")) for tw in tweets]\n self.log(f\"Tweeted: {' '.join(tweets)}\")\n return tweets[0]", "def getTweets(user,maxTweets=3000,count=0,tweetId=0,cacheKey=False,credentials=False):\n api = ratedTwitter(credentials=credentials)\n limit = api.get_user_timeline_limited()\n if limit:\n print '*** TWITTER RATE-LIMITED: statuses.user_timeline:'+user+':'+str(count)+' ***'\n raise getTweets.retry(countdown = limit)\n else:\n args = {'screen_name':user,'exclude_replies':False,'include_rts':True,'trim_user':False,'count':200}\n if tweetId:\n args['max_id'] = tweetId\n \n okay, result = api.get_user_timeline(**args)\n \n if okay:\n print '*** TWITTER USER_TIMELINE: '+user+':'+str(tweetId)+' ***'\n if result:\n newCount = count + len(result)\n if maxTweets:\n if newCount > maxTweets: # No need for the task to call itself again.\n pushTweets.delay(result,user,cacheKey=cacheKey) # Give pushTweets the cache-key to end the job.\n return\n else:\n pushTweets.delay(result,user)\n\n newTweetId = min([t['id'] for t in result]) - 1 \n # Not done yet, the task calls itself with an updated count and tweetId.\n getTweets.delay(user,maxTweets=maxTweets,count=newCount,tweetId=newTweetId,cacheKey=cacheKey,credentials=credentials)\n else:\n pushTweets.delay([],user,cacheKey=cacheKey) # Nothing more found, so tell pushTweets the job is done.\n else:\n if result == '404':\n setUserDefunct(user)\n cache.set('scrape_tweets','done')\n if result == 'limited':\n raise getTweets.retry(countdown = api.get_user_timeline_limited())", "def get_tweets(username, amount):\n tweets = []\n twitter = Twython()\n\n finished = False\n page = 1\n while not finished:\n\n if amount <= 200:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count=str(amount))\n finished = True\n\n else:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count='200')\n amount -= 200\n page += 1\n\n if isinstance(search_results, dict) and search_results['error']:\n raise TwitterAPIException(str(search_results['error']))\n elif not search_results:\n raise TwitterAPIException('User has no tweets.')\n\n for result in search_results:\n tweets.append(result['text']) \n\n return tweets", "def getTweetsByUser(username, maxTweets=1000):\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n myTweets=[]\n if words:\n apiRes = tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items()\n for tweet in apiRes:\n if any(containsWord(tweet._json['full_text'],word) for word in words):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n\n return getTopNTweets(myTweets, maxTweets)", "def save_user_tweets(user, n, auth):\r\n t = twitter.Twitter(auth=auth)\r\n print(\"Fetching %i tweets from @%s\" % (n, user))\r\n tweets = t.statuses.user_timeline(screen_name=user, count=n)\r\n print(\" (actually fetched %i)\" % len(tweets))\r\n for tweet in tweets:\r\n save_tweet(tweet, outfile)", "def filter_tweets(tweets):\n # We keep only tweets by chrisalbon with pictures\n search_tweets = [tw for tw in tweets if tw['username'] == '@chrisalbon' and len(tw['images']) > 0]\n # He made multiple tweets on the same topic, we keep only the most recent tweets\n # We use the indexes of the reversed tweet list and dictionnaries to keep only key \n unique_search_index = sorted(list({t['text'].lower():i for i,t in list(enumerate(search_tweets))[::-1]}.values()))\n unique_search_tweets = [search_tweets[i] for i in unique_search_index]\n\n # Keep non-downloaded tweets\n most_recent_file = sorted([datetime.datetime.fromtimestamp(os.path.getmtime(path)) \n for path in glob.glob(\"./downloaded_pics/*.jpg\")], reverse=True)[0]\n recent_seach_tweets = [tw for tw in unique_search_tweets if tw['date'] > most_recent_file]\n\n # Uncomment for testing new tweets\n # recent_seach_tweets = [tw for tw in unique_search_tweets if tw['date'] > datetime.datetime(2017, 7, 6, 13, 41, 48)]\n return recent_seach_tweets", "def handler(event, context):\n send_tweet(random.choice(potential_tweets))", "def handler(event, context):\n send_tweet(random.choice(potential_tweets))", "def get_all_tweets(user, alltweets):\n\n #TODO check that user is a valid screen name??\n\n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(user, count=200)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n #print alltweets[0].text\n\n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n #print \"starting loop\"\n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n\n #all subsiquent requests starting with oldest\n new_tweets = api.user_timeline(user, count=200, max_id=oldest)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1", "def get_tweets():\n\n return Tweet.query.all()", "async def tweet_feeder(self): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n t=Tweet()\n t.tweet_id = data[\"tweet_id\"]\n t.text=data[\"text\"]\n #\n # update the hashtags cache\n #\n try:\n t.hashtags=data[\"hashtags\"] \n for htag in t.hashtags:\n #print(\"adding to hashtags: {} to cache:\".format(htag[\"text\"], ))\n if htag[\"text\"] in hash_cache:\n hash_cache[htag[\"text\"]] += 1\n else:\n hash_cache[htag[\"text\"]] = 1\n except:\n t.hashtags=[]\n \n #\n # update the user cache\n #\n try:\n user_id = \"@\" + data[\"user_screenname\"]\n if user_id in user_cache:\n user_cache[user_id] += 1\n else:\n user_cache[user_id] = 1\n except:\n print(\" ERR No User: should never happen\")\n\n try:\n t.user_screenname=data[\"user_screenname\"]\n except:\n t.user_screenname=\"\"\n try:\n t.profile_image_url_https = data[\"profile_image_url_https\"]\n except:\n t.profile_image_url_https = \"\"\n #\n # update the tweets cache\n #\n try:\n t.timestamp = data[\"timestamp\"]\n except:\n t.timestamp = datetime.datetime.utcnow()\n tweet_cache.append(t.to_dict())\n \n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def get_tweets(self, start_date, end_date):\r\n pass", "def TweetHandler(self):\n self.response.out.write('<br/><br/>Tweeting<br/>')\n self.response.out.write('this info will be tweeted:<br/>')\n # oldest non-tweeted and prepared\n oldest_changeset = Changeset.all().order('created_at').filter('is_tweeted =', False).filter('is_prepared =', True).fetch(1)\n if not oldest_changeset:\n self.response.out.write('nothing to tweet')\n return\n else:\n c = oldest_changeset[0]\n \n config = get_config()\n\n # do not tweet from localhost\n if not 'localhost' in self.request.url:\n auth = tweepy.OAuthHandler(config[\"consumer_key\"], config[\"consumer_secret\"])\n auth_data = OAuthAccessToken.all().filter('specifier =', config[\"twitter_username\"]).fetch(1)[0]\n auth.set_access_token(auth_data.oauth_token, auth_data.oauth_token_secret)\n self.response.out.write('<br/>tweeting with oauth:<br/>')\n api = tweepy.API(auth)\n self.response.out.write(\"id: %d\" % c.id)\n self.response.out.write(\"user: %s\" % c.user)\n self.response.out.write(\"comment: %s\" % c.comment)\n self.response.out.write(\"tweet: %s\" % c.tweet)\n try:\n api.update_status(c.tweet)\n except tweepy.error.TweepError, e: \n self.response.out.write( 'failed: %s' % e.reason )\n if \"Status is a duplicate\" in e.reason:\n c.is_tweeted = True\n c.put()\n return\n else:\n self.response.out.write('<br/>localhost - nothing actually tweeted:')\n\n self.response.out.write('<br/>%s' % c.tweet)\n\n c.is_tweeted = True\n c.put()", "def get_tweet(self, id):\r\n return self.tweets[id]", "def process_tweets(tweets_response, keep_all=False, debug=False):\n tweets = tweets_response\n\n #print(json.dumps(tweets, indent=4, ensure_ascii=False))\n\n output_tweets = []\n for tweet in tweets:\n # loop through every tweet\n output_tweet = {}\n output_tweet['likes'] = 0\n for k, v in tweet.items():\n if k == \"favorite_count\" or k == \"retweeted_status\":\n # print('checking favorite_count at {}'.format(k))\n # print(v)\n if k == \"favorite_count\" and v:\n output_tweet['likes'] = v\n elif k == \"retweeted_status\" and v:\n # print(\"rt:\", v)\n try:\n output_tweet['likes'] = v['favorite_count']\n except:\n print('favorites not found')\n print(v)\n pass\n\n elif k == \"media\" and v:\n # turn media dict into img url\n output_tweet[k] = []\n for m in v:\n output_tweet[k].append(m['media_url_https'])\n\n elif k == \"id\" and v:\n # make url from id and dispose id\n output_tweet['url'] = \"https://twitter.com/anyuser/status/\" + str(v)\n\n elif k == \"retweet_count\":\n if v:\n if debug: print(' picking this: ', k, v)\n output_tweet[k] = v\n else:\n if debug: print(' skipping this: ', k, v)\n # not keeping those with 0 RT\n output_tweet[k] = 0\n\n elif k == \"created_at\":\n tweet_creation_time = str_2_datetime(v, input_format=time_format_twitter_created_at)\n tweet_checked_time = datetime.datetime.now(tz=pytz.utc)\n\n output_tweet['timestamp'] = {\n \"created\": datetime_2_str(tweet_creation_time, output_format=time_format_full_with_timezone),\n \"last_checked\": datetime_2_str(tweet_checked_time, output_format=time_format_full_with_timezone)\n }\n\n else:\n # keep k:v same\n if debug: print('keeping this: ', k, repr(v))\n output_tweet[k] = v\n\n print('num of likes: ', output_tweet['likes'])\n\n output_tweets.append(output_tweet)\n\n output = []\n if not keep_all:\n for o in output_tweets:\n if o['likes'] > 0 and o['retweet_count'] > 0:\n output.append(o)\n else:\n output = output_tweets\n\n return output", "def twitter(self):\n message = \"\"\n count = self.collection.count()\n\n twitter = Twitter(auth = OAuth(self.access_key, self.access_secret, self.consumer_key, self.consumer_secret))\n for keyword in self.twitter_keywords:\n query = twitter.search.tweets(q = keyword)\n for result in query['statuses']:\n try:\n data = {\"id\": count+1, \"source\": \"twitter\", \"timestamp\": datetime.now()}\n data['tweet'] = result['text']\n data['name'] = result[\"user\"][\"screen_name\"]\n data['url'] = \"https://twitter.com/\" + data[\"name\"] + \"/status/\" + str(result['id'])\n data['search_string'] = keyword\n try:\n dataid = self.collection.insert(data)\n except DuplicateKeyError as e:\n continue\n count += 1\n\n # Slack push notification\n length = 82 - len(data['url'])\n message += \"\\nURL: \" + data['url'] + \" search string: \".rjust(length) + keyword\n\n except Exception as e:\n print(e)\n pass\n \n if message:\n print(self.G + \"[+] Twitter\" + self.B + message)\n self.message += \"\\n*Twitter*:\\n```\"\n self.message += message\n self.message += \"\\n```\"\n\n return", "def get_tweets(self, kafka_obj):\n\n try:\n\n # call twitter api to fetch tweets\n # for tweet in api.search('#machinelearning', count=5):\n\n for tweet in tweepy.Cursor(api.search, q='#machinelearning', since='2019-06-25', until='2019-07-07').items():\n\n # empty dictionary to store required params of a tweet\n parsed_tweet = dict()\n parsed_tweet['text'] = tweet.text\n parsed_tweet['date'] = str(tweet.created_at)\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n parsed_tweet['tweet_id'] = tweet.id_str\n parsed_tweet['location'] = tweet.user.location\n parsed_tweet['user'] = tweet.user.screen_name\n parsed_tweet['retweet_count'] = tweet.retweet_count\n\n if tweet.entities.get('hashtags'):\n parsed_tweet['hashtags'] = ', '.join([i['text'] for i in tweet.entities.get('hashtags')])\n else:\n parsed_tweet['hashtags'] = ''\n \n print('Search API', parsed_tweet)\n\n #Pushing all the tweets to the Kafka Topic\n\n kafka_producer = kafka_obj.producer_instance()\n kafka_obj.publish_urls(kafka_producer, 'twitter', 'tweet', json.dumps(parsed_tweet))\n\n except Exception as e:\n print(e)", "def statistics(all_new_tweets, all_retweets, all_quote_tweets):\n length_all_quote_tweets = len(all_quote_tweets)\n length_all_retweets = len(all_retweets)\n length_all_tweets = len(all_new_tweets)\n\n # print(db_twitter.collections.stats())\n total_tweets = length_all_quote_tweets + length_all_retweets + length_all_tweets\n print(\n f\"Number of all tweets via streaming collected: {total_tweets - return_rest_tweets_number()}\"\n )\n print(f\"Number of new tweets collected: {length_all_tweets}\")\n print(f\"Number of retweets collected: {length_all_retweets}\")\n print(f\"Number of quote tweets collected: {length_all_quote_tweets}\")\n print(f\"Number of tweets collected via rest is {return_rest_tweets_number()}\")\n\n # Calculates mean sentiment, where 1 is very positive, -1 is very negative\n mean_sentiment = 0.0\n\n for tweet in all_new_tweets:\n mean_sentiment += tweet[\"sentiment_polarity\"]\n mean_sentiment = mean_sentiment / length_all_tweets\n print(\"The mean sentiment of tweets is: \", mean_sentiment)\n\n # Calculates mean subjectivity, where 1 is very subjective, -1 is very objective\n mean_subjectivity = 0.0\n\n for tweet in all_new_tweets:\n mean_subjectivity += tweet[\"subjectivity\"]\n mean_subjectivity = mean_subjectivity / length_all_tweets\n print(\"The mean subjectivity of retweets is: \", mean_subjectivity)\n return mean_sentiment, mean_subjectivity, total_tweets", "def TweetsRealTime(dbname, user, password, table_name, APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET, loop_gathering = False, search_terms = [\"Happy\"]):\n try:\n \"\"\"Be careful with the following global variables. They are necessary to make this script run from the main function\n This is because Twython streamer does not allow other inputs.\n If you run this script stand-alone you can safely remove the globals and it will still work.\"\"\"\n global con \n con = psycopg2.connect(\"dbname = {} user = {} password = {}\".format(dbname,user,password))\n global cur\n cur = con.cursor()\n global tablename\n tablename = table_name\n print \"Connected\"\n except:\n print \"Database connection error\" \n \n try:\n stream = MyStreamer(APP_KEY, APP_SECRET,OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n print 'Connecting to twitter: will take a minute'\n except ValueError:\n con.close()\n cur.close()\n print 'Something went wrong while making connection with Twitter: '+str(ValueError)\n\n try:\n stream.statuses.filter(track = search_terms) \n except:\n # Shortcut to restarting the script - if the connection cancels then it gracefully terminates the db lock and establishes a new connection\n cur.close\n con.close \n print \"########### Stream terminated ###########\"\n if loop_gathering != False:\n TweetsRealTime(dbname = dbname,\n user = user,\n password = password,\n table_name = table_name,\n search_terms = search_terms,\n APP_KEY = APP_KEY,\n APP_SECRET = APP_SECRET,\n OAUTH_TOKEN = OAUTH_TOKEN,\n OAUTH_TOKEN_SECRET = OAUTH_TOKEN_SECRET,\n loop_gathering = loop_gathering)", "def get_user_tweets(api, screen_name, output_path):\n logger = logging.getLogger(__name__)\n logger.info('Pulling tweets')\n\n # Create empty list for tweet objects\n tweets = []\n # Pulls users must recent 200 tweets\n new_tweets = api.user_timeline(screen_name=screen_name, count=200)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n # Continues to pull tweets 200 at a time until limit is hit\n while len(new_tweets) > 0:\n new_tweets = api.user_timeline(screen_name=screen_name,\n count=200, max_id=oldest)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n logger.info(\"...%s tweets downloaded and cleaned\" % (len(tweets)))\n\n # Write all text of tweets to a file\n filename = screen_name + '.csv'\n file = open(join(output_path, filename), 'w')\n\n # Iterates through all tweets and cleans them before outputting\n for tweet in tweets:\n clean_tweet = clean_string(tweet.text)\n line = screen_name + ', ' + clean_tweet + '\\n'\n file.write(line)\n logger.info(\"Done pulling tweets for %s\" % screen_name)\n file.close()", "def fill_tweet(self, t, data):\n t.text=data[\"text\"]\n #\n # update the hashtags cache\n #\n try:\n t.hashtags=data[\"entities\"][\"hashtags\"] \n for htag in t.hashtags:\n #print(\"adding to hashtags: {} to cache:\".format(htag[\"text\"], ))\n if htag[\"text\"] in hash_cache:\n hash_cache[htag[\"text\"]] += 1\n else:\n hash_cache[htag[\"text\"]] = 1\n except:\n t.hashtags=[]\n #\n # update the country cache\n #\n try:\n # see: https://bitbucket.org/richardpenman/reverse_geocode/src/default/\n #country = reverse_geocode.search(data[\"coordinates\"][\"coordinates\"][0])[\"country\"]\n country = data[\"place\"][\"country_code\"]\n if country in country_cache:\n country_cache[country] += 1\n else:\n country_cache[country] = 1\n except:\n print(\" .... Could not identify county by coordinates\")\n \n #\n # update the user cache\n #\n try:\n user_id = \"@\" + data[\"user\"][\"screen_name\"]\n if user_id in user_cache:\n user_cache[user_id] += 1\n else:\n user_cache[user_id] = 1\n except:\n print(\" ERR No User: should never happen\")\n #\n # update the tweets per minute cache\n # \n\n #tweets_descending = OrderedDict(sorted(self.application.tweet_cache.items(), key=lambda kv: kv[1], reverse=True))\n #hash_descending = OrderedDict(sorted(hash_cache.items(), key=lambda kv: kv[1], reverse=True))\n #for counter, elem in enumerate(hash_descending):\n # if counter < 9:\n # print(\"hash top #{} : {} : {}\".format(counter, elem, str(hash_descending[elem])))\n # else:\n # break\n try:\n t.user_screenname=data[\"user\"][\"screen_name\"]\n except:\n t.user_screenname=\"\"\n try:\n t.profile_image_url_https = data[\"user\"][\"profile_image_url_https\"]\n except:\n t.profile_image_url_https = \"\"\n #\n # update the tweets cache\n #\n try:\n t.timestamp = dateutil.parser.parse(data[\"created_at\"])\n except:\n t.timestamp = datetime.datetime.utcnow()\n return t", "def get_posts(username):\r\n\r\n # Authenticate to Twitter\r\n auth = tweepy.OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\r\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\r\n\r\n api = tweepy.API(auth)\r\n\r\n try:\r\n api.verify_credentials()\r\n print(\"Authentication OK\")\r\n except:\r\n print(\"Error during authentication\")\r\n\r\n alltweets=[]\r\n\r\n new_tweets = api.user_timeline(screen_name = username,count=200,tweet_mode='extended')\r\n status = new_tweets[0]\r\n json_str = json.dumps(status._json)\r\n\r\n #convert to string\r\n json_str = json.dumps(status._json)\r\n #deserialise string into python object\r\n parsed = json.loads(json_str)\r\n print(json.dumps(parsed, indent=4, sort_keys=True))\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # save the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n # keep grabbing tweets until there are no tweets left to grab\r\n while len(new_tweets) > 0:\r\n print(f\"getting tweets before {oldest}\")\r\n\r\n # all subsiquent requests use the max_id param to prevent duplicates\r\n new_tweets = api.user_timeline(screen_name=username, count=200, max_id=oldest,tweet_mode='extended')\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # update the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n print(f\"...{len(alltweets)} tweets downloaded so far\")\r\n\r\n\r\n outtweets=[]\r\n\r\n\r\n for item in alltweets:\r\n\r\n mined = {\r\n 'tweet_id': item.id,\r\n 'name': item.user.name,\r\n 'screen_name': item.user.screen_name,\r\n 'retweet_count': item.retweet_count,\r\n 'lang' : item.lang,\r\n 'text': item.full_text,\r\n 'mined_at': datetime.datetime.now(),\r\n 'created_at': item.created_at,\r\n 'favourite_count': item.favorite_count,\r\n 'hashtags': item.entities['hashtags'],\r\n 'status_count': item.user.statuses_count,\r\n 'location': item.place,\r\n 'source_device': item.source\r\n }\r\n\r\n try:\r\n mined['retweet_text'] = item.retweeted_status.full_text # In case the tweet is a RT, there is a need to\r\n # retrieve the retweet_text field which contains the full comment (up to 280 char) accompanying the retweet\r\n except:\r\n mined['retweet_text'] = ''\r\n\r\n outtweets.extend([mined])\r\n\r\n return outtweets", "def handle_new_tweet(tweet_data):\n\n assert tweet_data.get('id'), \"Tweet Must have ID\"\n assert tweet_data.get('search_string'), \"Tweet must have search string\"\n\n # check for this tweet already being tracked\n set_key = keys.tweet_search_set(tweet_data.get('search_string'))\n tweet_id = tweet_data.get('id')\n found = rc.zrank(set_key, tweet_id)\n print 'set key: %s' % set_key\n print 'found: %s' % found\n\n if not found:\n\n # set main hash\n key = keys.tweet_data(tweet_data.get('id'))\n rc.hmset(key, tweet_data)\n\n # add to our weighted set\n # keep the value as the id and the weight\n print 'adding: %s' % tweet_id\n rc.zadd(set_key, tweet_id, tweet_id)\n\n # fire event that tweet was added to db\n revent.fire('new_tweet_saved', tweet_data)\n\n return True\n\n return False", "def grab_tweets():\n\n tweets = []\n long_tweets = []\n\n for each in lists:\n tweets = tweets + twitter.GetListTimeline(list_id=each.id,\n count=count,\n include_rts=True)\n for tweet in tweets:\n if len(tweet.text) >= min_tweet_len:\n long_tweets.append(tweet)\n shuffle(long_tweets)\n\n if len(long_tweets) >= num_tweets:\n return long_tweets[:num_tweets]\n else:\n return long_tweets", "def runner(args):\n apikey, users = args\n api = collect.mk_api(apikey)\n for user in users:\n db_connection = db.mk_connection()\n collect.collect_user_tweets(api, user, collect.mk_sql_insert_handler(db_connection))\n db.close_connection(db_connection)", "def searchTweets():\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName='apiConf2.txt'))\n #SEARCHING TWEETS CONTAINING THE HASHTAG \"#bitcoin\" USING TWEEPY LIBRARY\n myTweets= []\n #words=list(map(str,words))\n if words:\n myQuery=' OR '.join(words)\n else:\n myQuery = '*'\n if removeRetweets:\n myQuery += ' - filter:retweets'\n kwargs['q']=myQuery\n kwargs['count']=100\n kwargs['tweet_mode']='extended'\n if 'startingDate' in kwargs:\n kwargs['since']=kwargs['startingDate']\n del(kwargs['startingDate'])\n if 'endingDate' in kwargs:\n kwargs['until']=kwargs['endingDate']\n del(kwargs['endingDate'])\n if 'maxTweets' in kwargs:\n del(kwargs['maxTweets'])\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.search, kwargs).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.search, kwargs).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n return getTopNTweets(myTweets, maxTweets)", "def sendTweets(self):\n\n if self.__status_type == 'link':\n\n for index, item in self.list.iterrows():\n\n title = item['title']\n url = item['url']\n message = (url + \" \" + title)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n elif self.__status_type == 'single_msg':\n\n message = (self.__status)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n elif self.__status_type == 'reply':\n\n for index, item in self.list.iterrows():\n\n message = (\".@\" + item['user'] + \" \" + self.__status)[0:140]\n\n try:\n if self.__image == None:\n self.__api.update_status(status=message, in_reply_to_status_id=item['id'])\n else:\n self.__api.update_with_media(filename=self.__image, status=message,\n in_reply_to_status_id=item['id'])\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"reply status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n elif self.__status_type == 'at':\n\n for index, item in self.list.iterrows():\n\n try:\n\n message = (\".@\" + item['user'] + \" \" + self.__status)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"at status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n elif self.__status_type == 'rt':\n\n for index, item in self.list.iterrows():\n try:\n self.__api.retweet(item['id'])\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"at status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n else:\n print(\"Invalid status type. Change status type through configure_tweet method.\")\n\n return", "def tweet(self, message: str) -> None:\n\n # YOUR CODE HERE\n tweet = Tweet(self.userid, date.today(), message)\n self.tweets.append(tweet)", "def list_tweets():\n tweets = []\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE])\n for tuple in tuples:\n tweet = {}\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweets.append(tweet)\n return jsonify({'tweets':tweets}),200", "def run_twittercrawler(type, *params):\n tweets = None\n if type == 'timeline': # Call specific crawl function based on type\n tweets = by_timeline(params[0])\n elif type == 'search':\n tweets = by_search(params[0])\n elif type == 'streaming':\n print('Streaming functionality not yet implemented')\n return None\n\n return [tweet._json for tweet in tweets]", "def getTweetsFromPheme(self):\n self.helper.buildDict4Tweets(self.folderpath)", "def __update_local_tweets(self):\n f_tweets = open(f'{TWEETS}', 'w')\n f_tweeted = open(f'{TWEETED}', 'w')\n try:\n f_tweets.write(json.dumps(self.tweets, sort_keys=True, indent=4))\n f_tweeted.write(json.dumps(self.tweeted, sort_keys=True, indent=4))\n finally:\n f_tweets.close()\n f_tweeted.close()", "async def make_tweet(tweet: str = Query(...),\n # attachment_url: Optional[str] = Query(None, alias=\"link of tweet to quote\", regex=\"https://twitter.com/([\\w_]+)/status/([\\d]+)\"),\n # in_reply_to: Optional[int] = Query(None, alias=\"link of tweet to reply to\", regex=\"https://twitter.com/([\\w_]+)/status/([\\d]+)\"), \n user: User = Depends(get_current_user),\n session: Session = Depends(get_db)\n )-> TweetSchema:\n if not user.active:\n raise HTTPException(401, detail=\"Your account seems to be inactive, please login with twitter to make tweets\")\n # if in_reply_to:\n # regex = re.match(\"https://twitter.com/(?P<username>[\\w]+)/status/(?P<id>[\\d]+)\", in_reply_to)\n # status_id = regex.group(\"id\")\n url = \"https://api.twitter.com/1.1/statuses/update.json\"\n params = dict(status=tweet,\n # attachment_url=attachment_url,\n # in_reply_to_status_id=status_id,\n )\n auth = user.get_oauth1_token()\n\n r = requests.post(url, params=params, auth=auth)\n if not r.ok:\n raise HTTPException(400, detail={\"message\":\"Something went wrong with Twitter, please try again or contact me @redDevv\",\n \"error from twitter\": r.text})\n tweet = r.json()\n\n new_tweet = Tweet(**tweet)\n user.tweets.append(new_tweet)\n user.requests_made += 1\n\n session.commit()\n return tweet", "def user_tweets(username, TTL=30):\n # profile_user = query_db('select * from user where username = ?',\n # [username], one=True)\n profile_user = userdetails_API_query(username)\n print \"profile \", profile_user\n if profile_user is None:\n abort(404)\n followed = False\n\n if g.user:\n followed = mongo.db.users.find_one(\n {'_id': g.user[0]}, {'follows': profile_user['_id']}) is not None\n # followed = query_db('''select 1 from follower where\n # follower.who_id = ? and follower.whom_id = ?''',\n # [g.user[0], profile_user['user_id']],\n # one=True) is not None\n # Create a hash key\n user_profile = \"\"\n hash = hashlib.sha224(user_profile).hexdigest()\n key = \"user_timeline_key\" + hash\n # print \"Created Key\\t : %s\" % key\n\n############### REDIS SESSION CODE #####################\n\n # Check if data is in cache.\n if (R_SERVER.get(key)):\n print \"** Messages returned from Redis Cache **\"\n return cPickle.loads(R_SERVER.get(key))\n\n else:\n print \"** Messages returned from MongoDB **\"\n messages = user_query(profile_user)\n data = []\n # print messages\n for row in messages:\n data.append({'user': row['username'], 'message': row['text'],\n 'pub_date': format_datetime(row['pub_date'])})\n # print data\n user_profile = jsonify(messages=data, Status_code=status.HTTP_200_OK)\n\n R_SERVER.set(key, cPickle.dumps(user_profile))\n R_SERVER.expire(key, TTL)\n return user_profile", "def get_tweets_from_username(api, screen_name):\n\n # initialize a list to hold all the Tweets\n alltweets = []\n output = []\n\n # make initial request for most recent tweets\n # (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # save the id of the oldest tweet less one to avoid duplication\n oldest = alltweets[-1].id - 1\n\n # keep grabbing tweets until there are no tweets left\n while len(new_tweets) > 0:\n print(\"Getting tweets before %s\" % (oldest))\n\n # all subsequent requests use the max_id param to prevent\n # duplicates\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n print(\"... %s tweets downloaded so far\" % (len(alltweets)))\n\n # transform the tweepy tweets into a 2D array that will\n for tweet in alltweets:\n output.append([tweet.id_str,\n tweet.created_at,\n tweet.full_text,\n tweet.in_reply_to_screen_name,\n tweet.user.name,\n tweet.user.location,\n tweet.user.followers_count,\n tweet.user.friends_count,\n tweet.geo,\n tweet.coordinates,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.lang,\n tweet.retweeted])\n\n # Convert to dataframe\n df = pd.DataFrame.from_records(output, columns=[\"id_str\",\n \"created_at\",\n \"full_text\",\n \"in_reply_to_screen_name\",\n \"user_name\",\n \"user_location\",\n \"user_followers_count\",\n \"user_friends_count\",\n \"geo\",\n \"coordinates\",\n \"retweet_count\",\n \"favorite_count\",\n \"lang\",\n \"retweeted\"])\n return df", "def stream_tweets(api_token: str, api_secret: str, access_token: str, access_secret: str, saver,\n keywords: list = None, users: list = None, locations: list = None, stall_warning: bool = False):\n\n auth = OAuthHandler(api_token, api_secret)\n auth.set_access_token(access_token, access_secret)\n api = API(auth)\n listener = TwitterListener(manager=saver, api=api)\n stream = Stream(auth=auth, listener=listener)\n log.write_log('Streaming started', 'execution')\n stream.filter(track=keywords, follow=users, locations=locations, stall_warnings=stall_warning)", "def get_tweet(username, n):\n return twitterAPI.home_timeline(count=n)[-1:][0] # return specified tweet", "def get_tweets(self):\r\n now = datetime.datetime.now()\r\n tweet_json = self.api.get_tweets(self.last, now)\r\n self.last = now\r\n return [Tweet(x) for x in tweet_json]", "def get_tweets(keyword, max_tweets=200):\n\n # API keys.\n consumer_key = \"kNOG1klRMMUYbsjMuY5TKl4lE\"\n consumer_secret = \"ieghv6WI1qseYly43A0Ra1MPksEw1i5Onma0txfEu5aHantD2v\"\n access_key = \"3291622062-15ssVc0qpJXf2SFXbA7vgfl1Sooz4Ueo2DGPQVz\"\n access_secret = \"9XJuzgGSVLnx93tq6NfRzMT07S6o2lzjmHfjt3VRlkqXn\"\n\n # Initialize tweepy API object and authorize using API key.\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n\n \"\"\" Get tweets.\"\"\"\n\n alltweets = []\n for status in tweepy.Cursor(\n api.search,\n q=keyword + \" -RT\", # the -RT flag excludes retweets.\n count=1000,\n result_type=\"recent\",\n include_entities=True,\n monitor_rate_limit=True,\n wait_on_rate_limit=True,\n lang=\"en\",\n ).items():\n\n # get text of the tweet, encoding as utf-8.\n text = str(status.text.encode(\"utf-8\"))\n\n # add to the data structure, alltweets, holding the tweets.\n alltweets.append(text)\n\n # if we've reached max_tweets, break.\n if len(alltweets) >= max_tweets:\n break\n\n return alltweets", "def fetch_tweets(event):\r\n return pd.read_csv(\"data/tweets/%s.csv\" % event, \r\n dtype={\r\n 'tweet_id': str,\r\n 'in_reply_tweet': str,\r\n 'thread': str,\r\n 'user_id': str,\r\n 'in_reply_user': str\r\n },\r\n engine=\"python\")", "def tweets_features(tweet):\n tweet = remove_stop_words(tweet)\n return {'TWEET': tweet}", "def trendingTweets():\n api = twitter.Api()\n trending_topics = api.GetTrendsWoeid(PHILA_WOEID)\n for topic in trending_topics:\n topicSearchTerm = topic.name\n trending_tweets = api.GetSearch(topicSearchTerm)\n for tweet in trending_tweets:\n util.safe_print(tweet.GetText())\n # pass", "def __save_tweet(self, twitter_result):\n timestamp = twitter_result['timestamp']\n\n # Remove +0000 from timestamp\n timestamp_split = timestamp.split(' ')\n timestamp = ''\n for piece in timestamp_split:\n if piece[0] is not '+':\n timestamp += piece + ' '\n\n # Remove trailing space\n timestamp = timestamp[:-1]\n\n # Cast to iso format\n timestamp = datetime.strptime(timestamp, \"%a %b %d %H:%M:%S %Y\").isoformat()\n\n crawl = self.mongo_controller.add_crawl_twitter(\n twitter_result['keyword_id'],\n twitter_result['tweet_id'],\n twitter_result['text'],\n twitter_result['likes'],\n twitter_result['retweets'],\n timestamp,\n return_object=True,\n cast=True,\n )\n\n app.send_task('process-crawl', kwargs={ 'crawl_dict': crawl.to_json() }, queue=queues['processor'])\n\n return crawl", "def save(self):\n return getattr(self, \"_tweets\", None)", "def get_tweets():\r\n tweets = models.Tweet.query.all()\r\n output = []\r\n\r\n for tweet in tweets:\r\n tweet_data = {'id': tweet.id,\r\n 'content': tweet.text_content,\r\n 'username': tweet.username,\r\n 'timestamp': tweet.timestamp.isoformat(),\r\n 'likes_count': models.Like.query.filter(models.Like.post_id == tweet.id).count(),\r\n 'retweets_count': models.Retweet.query.filter(models.Retweet.post_id == tweet.id).count()}\r\n\r\n output.append(tweet_data)\r\n\r\n return {\"tweets\": output}", "def accepted(eachtweet):\n import general_functions\n from authenticator import oauth\n try:\n ##\n tweet = '{}: {} #{}'.format(eachtweet[0], eachtweet[1], eachtweet[3].upper())\n r = oauth.request('statuses/update', {'status': tweet})\n\n replace(\"clients.csv\",\"ReadyForAck.csv\")\n replace2(\"ReadyForAck.csv\",\"clients.csv\")\n except:\n print('ietsgaatfout')", "def populate_twitter_acct_tweets(retrieve_until_dt=datetime.now(tz=timezone.utc) - timedelta(days=60)):\n spinner = itertools.cycle(['|', '/', '-', '\\\\'])\n api = twitter.Api(**settings.TWITTER_OAUTH, sleep_on_rate_limit=False)\n twitter_accts = CredibleUSTwitterAccount.objects.all()\n\n while 1:\n for acct in twitter_accts:\n # acct_oldest_tweet = USTwitterNewsFeed.objects.filter(posted_by=acct).first()\n acct_oldest_tweet = USTwitterNewsFeed.objects.filter(posted_by=acct, created_datetime__gte=date(2018, 2, 7)).first()\n\n max_id = None\n if acct_oldest_tweet is not None:\n max_id = acct_oldest_tweet.feedid - 1\n\n # do api call 15 for each account times due to twitter rate limit\n for _ in range(15):\n feed_created_dt = None\n try:\n statuses = api.GetUserTimeline(screen_name=acct.screen_name, include_rts=False, max_id=max_id)\n for s in statuses:\n write_and_restart_line(next(spinner))\n created_feed = USTwitterNewsFeed.objects.create(posted_by=acct,\n created_datetime=datetime.strptime(s.created_at, '%a %b %d %X %z %Y'),\n text=s.text,\n feedid=s.id)\n max_id = created_feed.feedid - 1\n feed_created_dt = created_feed.created_datetime\n except TwitterError as e:\n print(e.message)\n except IntegrityError as e:\n print('integrity error')\n break\n\n # only retrieve until last status created datetime earlier than retrieve until\n # if (feed_created_dt is None) or (feed_created_dt < retrieve_until_dt):\n # break", "def __init__(self, topics=None, tweet_file=None, mode='batch'):\n self.topics = topics\n # (The twitter API will only return a max of 100 count)\n self.GEN_MAX_TWEET = 100 # the max number of tweets to generate\n self.tweet_file = tweet_file\n self.mode = mode\n self.tweets = []\n\n if topics and tweet_file:\n print(\"WARNING! you input both topics and the tweet file, only one is expected\")\n exit(-1)\n\n if not topics and not tweet_file:\n print(\"WARNING! you input either topics or tweet file, one is expected\")\n exit(-1)\n\n # If file argument is given, it will not connect to twitter server\n # It will just save tweets in self.tweets\n if tweet_file:\n with open(tweet_file, 'r') as infile:\n for line in infile:\n self.tweets.append(json.loads(line))\n\n else:\n consumer_key = 'bbqKfXEU2VJNoWlYJvbdtptOE'\n consumer_secret = 'afPk2JuMMMD6IhP5Xijo60ni4FUK39PDzhU7ylgT9FgNZX9ngh'\n access_token = '434708489-DTeHfK4OYKRuIXlfoWnNgzzwpEZTPCEpSMv8C0ll'\n access_token_secret = 'SjWFYfX2k3q4RJKQXcP1LP9ikhRfckPKOEcrb2cpQ0A0n'\n\n # Attempt authentication\n try:\n # create OAuthHandler object\n self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n # set access token and secret\n self.auth.set_access_token(access_token, access_token_secret)\n # create tweepy API object to fetch tweets\n self.api = tweepy.API(self.auth)\n except:\n print(\"Error: Authentication Failed\")\n exit(-1)", "def tweet(text):\n # Twitter authentication\n auth = tweepy.OAuthHandler(C_KEY, C_SECRET)\n auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)\n api = tweepy.API(auth)\n\n # Send the tweet and log success or failure\n try:\n api.update_status(text)\n except tweepy.error.TweepError as e:\n log(e.message)\n print(e.message)\n else:\n log(\"Tweeted: \" + text)\n print(\"Tweeted: \" + text)", "def run(self):\n new_tweets = pd.DataFrame(\n columns=[\"tweet_id\", \"congress\", \"session\", \"date\", \"vote\"],\n dtype=str,\n )\n for item in self.senate_data[\"vote_summary\"][\"votes\"][\"vote\"]:\n query = (\n \"congress == @self.congress \"\n \"and session == @self.session \"\n \"and date == @item['vote_date'] \"\n \"and vote == @item['vote_number']\"\n )\n\n # If the current vote isn't already processed, then process it\n if self.tweets.query(query).empty:\n try:\n text, party_data, vote_data = self.senate_obj.process_vote(\n item\n )\n status = self.twitter_api.update_status(text)\n # Keep track of new tweets to be reconciled with old\n # tweets later\n new_tweets = new_tweets.append(\n {\n \"tweet_id\": status.id_str,\n \"congress\": self.congress,\n \"session\": self.session,\n \"date\": item[\"vote_date\"],\n \"vote\": item[\"vote_number\"],\n **party_data,\n **vote_data,\n },\n ignore_index=True,\n )\n except Exception as e:\n # Tweet failed for some reason\n logging.error(\"Tweet failed\")\n logging.error(item)\n logging.error(e)\n\n # Only process a limited number of tweets in a single run\n if len(new_tweets) == self.MAX_TWEETS:\n break\n\n if not new_tweets.empty:\n logging.info(f\"Tweeted {len(new_tweets)} new votes\")\n self.__save(self.tweets.append(new_tweets))\n # Function needs to return something to work as a Cloud Function\n return new_tweets[\"tweet_id\"].to_json()\n else:\n return \"{}\" # Empty JSON object", "def add_tweet(self, tweet):\r\n self.tweets.append(tweet)", "def tweet(text):\n # Twitter authentication\n auth = tweepy.OAuthHandler(C_KEY, C_SECRET)\n auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)\n api = tweepy.API(auth)\n\n # Send the tweet and log success or failure\n try:\n api.update_status(text)\n except tweepy.error.TweepError as e:\n log(e.message)\n else:\n log(\"Tweeted: \" + text)", "def like_tweets(pos_tweets):\n\n for tweet in pos_tweets:\n twitter.CreateFavorite(status_id=tweet.id)\n\n return", "def send_fixtures_tweets(tweet1, tweet2, tweet3):\n\n # Authorises Twitter API connection\n api = twitter_auth()\n\n # Checks if tweet has already been made today\n get_tweet = api.user_timeline(count=1,tweet_mode=\"extended\")\n last_tweet = get_tweet[0].full_text\n tweet = tweet1[:-1]\n if last_tweet == tweet:\n return print('Tweet already sent')\n \n # Sends tweets to timeline, depending on how many tweets created\n # Multiple tweets sent as a thread by responding to previous tweet\n if tweet3:\n first_tweet = api.update_status(tweet1)\n first_id = first_tweet.id\n second_tweet = api.update_status(tweet2, first_id)\n second_id = second_tweet.id\n api.update_status(tweet3, second_id)\n return print('Successfully sent tweet(s)')\n elif tweet2:\n first_tweet = api.update_status(tweet1)\n first_id = first_tweet.id\n api.update_status(tweet2, first_id)\n return print('Successfully sent tweet(s)')\n else:\n api.update_status(tweet1)\n return print('Successfully sent tweet(s)')", "def fetch_tweets(n_tweets=100, data_home=None, token=None, tweets_ids=None):\n pass", "def __refresh_local_tweets(self):\n f_tweets = open(f'{TWEETS}', 'r')\n f_tweeted = open(f'{TWEETED}', 'r')\n\n try:\n self.tweets = json.load(f_tweets)\n self.tweeted = json.load(f_tweeted)\n finally:\n f_tweets.close()\n f_tweeted.close()", "def read_tweets(self)-> None:\n self.no_of_tweets = len(self.list_of_files)\n for i in range(0, self.no_of_tweets):\n # for i in range(0,10): # running a small loop for testing purpose\n try:\n with open(self.list_of_files[i]) as json_file:\n file = json.load(json_file)\n tweet = {'id': file['id']}\n try:\n tweet['created_time'] = file['retweeted_status']['created_at']\n tweet['text'] = file['retweeted_status']['full_text']\n except:\n tweet['created_time'] = file['created_at']\n tweet['text'] = file['full_text']\n self.tweets.append(tweet)\n except:\n print(\"Error for \",self.list_of_files[i])\n if i%1000 == 0:\n print(str(round(i/self.no_of_tweets,2)*100),\"% read\")\n print(\"All Tweets read into memory\")", "def post_to_twitter(tweet):\n auth = tweepy.OAuthHandler(\n os.environ['BLADAMADUR_CONSUMER_KEY'],\n os.environ['BLADAMADUR_CONSUMER_SECRET'])\n auth.set_access_token(\n os.environ['BLADAMADUR_ACCESS_TOKEN'],\n os.environ['BLADAMADUR_ACCESS_TOKEN_SECRET'])\n api = tweepy.API(auth)\n\n api.update_status(tweet)", "def query_tweets_once(query, limit=None, num_tweets=0):\n logging.info(\"Querying {}\".format(query))\n query = query.replace(' ', '%20').replace(\"#\", \"%23\").replace(\":\", \"%3A\")\n pos = None\n tweets = []\n try:\n while True:\n new_tweets, pos = query_single_page(\n INIT_URL.format(q=query) if pos is None\n else RELOAD_URL.format(q=query, pos=pos),\n pos is None\n )\n if len(new_tweets) == 0:\n logging.info(\"Got {} tweets for {}.\".format(\n len(tweets), query))\n return tweets\n\n logging.info(\"Got {} tweets ({} new).\".format(\n len(tweets) + num_tweets, len(new_tweets)))\n\n tweets += new_tweets\n\n if limit is not None and len(tweets) + num_tweets >= limit:\n return tweets\n except KeyboardInterrupt:\n logging.info(\"Program interrupted by user. Returning tweets gathered \"\n \"so far...\")\n except BaseException:\n logging.exception(\"An unknown error occurred! Returning tweets \"\n \"gathered so far.\")\n\n return tweets", "def extract_tweets(secret: str, query: str, outfile: str, count: int = 0, wait: int = 300) -> None:\n logger = logging.getLogger(\"extracter\")\n logger.info(\"Authenticating with Tweepy\")\n\n logger.info(\"Reading secrets file %s\", secret)\n token_fp = open(secret, \"r\")\n auth = tweepy.OAuthHandler(token_fp.readline().strip(), token_fp.readline().strip())\n auth.set_access_token(token_fp.readline().strip(), token_fp.readline().strip())\n api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n token_fp.close()\n\n logger.info(\"Attempting to authenticate\")\n api.verify_credentials()\n\n logger.info(\"Authenticated! Examining outfile.\")\n if not os.path.exists(outfile):\n logger.info(\"%s doesn't exist - it will be created.\", outfile)\n file_p = open(outfile, \"w\", encoding=\"utf-8\")\n tweet_writer = csv.writer(file_p)\n tweet_writer.writerow(\n [\n \"full_text\",\n \"created_at\",\n \"source\",\n \"id\",\n \"retweet_count\",\n \"favorite_count\",\n \"user_name\",\n \"user_id_str\",\n \"user_handle\",\n \"user_location\",\n \"user_desc\",\n \"user_protected\",\n \"user_followers\",\n \"user_created\",\n \"user_verified\",\n \"user_tweet_count\",\n ]\n )\n else:\n logger.info(\"%s exists - will append.\", outfile)\n file_p = open(outfile, \"a\", encoding=\"utf-8\")\n tweet_writer = csv.writer(file_p)\n\n logger.info(\"Starting Tweet extraction for query '%s'\", query)\n\n if not count:\n logger.info(\"(executing forever)\")\n else:\n logger.info(\"(executing %s times)\", count)\n\n i = 1\n bookmark = \"1\"\n\n while True:\n # Our search query.\n #\n # q - search query. We use the -filter:retweets\n # specifier in order to prune any retweets.\n # Otherwise we'd have to prune Tweets that\n # are prefaced with 'RT'\n #\n # lang - English Tweets only\n #\n # count - 100 is the max as per the Twitter API\n #\n # tweet_mode - we use extended tweet mode in\n # order to access Tweets that are greater\n # than 140 char. in length this is to keep\n # legacy Twitter API applications intact\n #\n # result_type - we use recent so as to create\n # a chronological record of Tweets\n #\n # since_id - we keep track of the last Tweet\n # saved and use it as a bookmark in order\n # to only get the Tweets coming after it\n #\n for tweet in api.search(\n q=f\"{query} -filter:retweets\",\n lang=\"en\",\n count=100,\n tweet_mode=\"extended\",\n result_type=\"recent\",\n max_id=bookmark,\n ):\n # These are the features we write\n tweet_writer.writerow(\n [\n tweet.full_text,\n tweet.created_at,\n tweet.source,\n tweet.id_str,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.user.name,\n tweet.user.id_str,\n tweet.user.screen_name,\n tweet.user.location,\n tweet.user.description,\n tweet.user.protected,\n tweet.user.followers_count,\n tweet.user.created_at,\n tweet.user.verified,\n tweet.user.statuses_count,\n ]\n )\n\n # Flush the stream every time just in case\n file_p.flush()\n\n # Set the most recent Tweet as a bookmark\n bookmark = tweet.id_str\n\n # Transparency/monitoring\n limits = api.rate_limit_status()\n rem = limits[\"resources\"][\"application\"][\"/application/rate_limit_status\"][\"remaining\"]\n logger.info(\"Tweets written to %s (%s hourly API accesses left)\", outfile, rem)\n\n # Do not loop if demo\n if i == count:\n break\n i += 1\n\n # Respect API\n time.sleep(wait)", "def index_new_tweet(self, id_str, tweet_tokens: list):\n self.tweet_count += 1\n unique_words = set(tweet_tokens)\n unique_word_pairs = set()\n for i in unique_words:\n for j in unique_words - {i}:\n # To us [a, b] = [b, a], and sorting gives us a distinct representation.\n unique_word_pairs.add(tuple(sorted([i, j])))\n for w in unique_words | unique_word_pairs:\n self.index[self.epoch][w] = id_str\n current_freq = self.frequency_map.get(w, 0)\n self.frequency_map[w] = current_freq + 1\n # Get word statistics from hash table\n statistics_present = w in self.stats_map\n if not statistics_present:\n (mu, sigma) = (math.inf, math.inf)\n for h in self.hash_functions:\n c = get_hash(h(), repr(w)) % 2 ** self.bit_count\n if self.buckets[c][\"ewma\"] < mu:\n mu = self.buckets[c][\"ewma\"]\n sigma = self.buckets[c][\"ewmvar\"]\n self.stats_map[w] = (mu, sigma)\n (mu, sigma) = self.stats_map[w]\n # Test for significance threshold\n x = self.frequency_map[w]\n if self._is_frequency_significant(mu, sigma, x):\n self.refinement.append((w, self._get_significance(mu, sigma, x)))\n # if self.refinement:\n # r = self.refinement\n # self.refinement = []\n # return r", "def original_three_tweets():\n test_tweets = [\n \"is #bigdata finally the answer to end poverty? \\\n @lavanyarathnam http://ow.ly/o8gt3 #analytics\",\n \"interview: xia wang, astrazeneca on #bigdata and the promise of effective \\\n healthcare #kdn http://ow.ly/ot2uj\",\n \"big data is not just for big business. on how #bigdata is being deployed for \\\n small businesses: http://bddy.me/1bzukb3 @cxotodayalerts #smb\"\n ]\n return test_tweets", "def analyse_tweets(nb_tweets, classifier, Resource, threshold, language='en'):\n return [(bytes(line, 'utf-8'), _minimal_analysis(bytes(line, 'utf-8'), classifier, Resource, threshold, language))\n for line in\n collect_tweet(nb_tweets)]", "def get_random_tweets(sqlite_db, twt_tbl, auth_tbl, auth_id, num_req, rnd_seed):\n conn = sqlite3.connect(sqlite_db)\n c = conn.cursor()\n # get the number of tweets available for a given author and select threshold + 1 for experiments\n # get number of tweets\n num_twts = get_num_tweets(sqlite_db, auth_tbl, auth_id)\n # print(num_twts)\n # random seed for reproducing experimental results\n random.seed(rnd_seed)\n # list of message id's to use in testing\n message_list = random.sample(range(1, num_twts), num_req)\n print(message_list)\n # build the sql statement\n param = '?'\n params = ','.join(param*len(message_list))\n sql = \"SELECT TWEET_MSG FROM {tn} WHERE AUTHOR_ID='{a_id}' AND MESSAGE_NUM IN ({prms})\".\\\n format(tn=twt_tbl, a_id=auth_id, prms=params)\n print(sql)\n # c.execute('SELECT TWEET_MSG FROM {tn} WHERE AUTHOR_ID=\"{a_id}\" AND MESSAGE_NUM IN \"{m_lst}\"'. \\\n # format(tn=twt_tbl, a_id=auth_id), m_lst=','.join(['?']*len(message_list)))\n c.execute(sql,message_list)\n conn.commit()\n twts = c.fetchall()\n # printing the tweets to validate selection\n # for tweet_tup in twts:\n # for tweet in tweet_tup:\n # print(tweet.rstrip())\n conn.close()\n return(twts)", "def get_tweets_count_times(twitter, count, query=None):\n # get id to start from\n oldest_id, newest_id = _get_oldest_id(query=query)\n newest_id = newest_id or oldest_id\n\n all_tweets = []\n i = 0\n while i < count:\n i += 1\n # use search api to request 100 tweets. Twitter returns the most recent (max_id) first\n if oldest_id <= newest_id:\n tweets = get_tweets(query=query, max_id=oldest_id - 1, count=TWEETS_PER_SEARCH, twitter=twitter)\n else:\n tweets = get_tweets(query=query, max_id=oldest_id - 1, since_id=newest_id, count=TWEETS_PER_SEARCH, twitter=twitter)\n rate_limit_remaining = twitter.get_lastfunction_header('x-rate-limit-remaining')\n rate_limit_reset = twitter.get_lastfunction_header('x-rate-limit-reset')\n\n if not len(tweets):\n # not rate limitted, just no tweets returned by query\n oldest_id = oldest_id + ((newest_id or oldest_id) - oldest_id + 1) * 10000\n break\n elif isinstance(tweets, dict):\n # rate limit hit, or other twython response error\n print(tweets)\n break\n\n all_tweets.extend(tweets)\n\n # determine new oldest id\n tweet_ids = {t['id'] for t in tweets}\n if oldest_id:\n tweet_ids.add(oldest_id)\n oldest_id, newest_id = min(tweet_ids), max(tweet_ids)\n if rate_limit_remaining == 1:\n time.sleep(rate_limit_reset - time.time())\n\n save_tweets(all_tweets, query=query)\n\n # set id to start from for next time\n _set_oldest_id(oldest_id, newest_id, query=query)\n\n if len(all_tweets) == 0:\n os.remove(make_oldest_id_path(query))\n\n return len(all_tweets), twitter.get_lastfunction_header('x-rate-limit-remaining')" ]
[ "0.67840785", "0.64763457", "0.6372614", "0.63208246", "0.62932944", "0.6282456", "0.6270488", "0.62480295", "0.6231377", "0.62282085", "0.6203771", "0.6201833", "0.616992", "0.6169758", "0.61481005", "0.6143227", "0.61307067", "0.6097203", "0.6073542", "0.6066333", "0.60474765", "0.60374886", "0.6021078", "0.60194284", "0.60106206", "0.599761", "0.59878075", "0.5982232", "0.5969999", "0.59626144", "0.59618616", "0.5956147", "0.5919791", "0.5915945", "0.5899752", "0.58994037", "0.5896632", "0.58937883", "0.58880615", "0.58572584", "0.5848763", "0.58382124", "0.5838197", "0.5838197", "0.5834473", "0.58170253", "0.5810614", "0.58000255", "0.5799905", "0.57959515", "0.57873034", "0.57835895", "0.57457805", "0.5745715", "0.5742698", "0.57379025", "0.5730973", "0.57303154", "0.57134825", "0.57060474", "0.5701654", "0.5698016", "0.5691716", "0.56902933", "0.56887054", "0.56839496", "0.5654729", "0.56493336", "0.56487995", "0.5642043", "0.563876", "0.5636076", "0.563482", "0.56345695", "0.5633794", "0.5627325", "0.56252086", "0.56221", "0.56082326", "0.56019706", "0.55979997", "0.5590786", "0.5569293", "0.5568202", "0.556575", "0.5555229", "0.55498856", "0.5545484", "0.55429673", "0.55411166", "0.5531181", "0.5528354", "0.55261153", "0.5526085", "0.5525172", "0.5518891", "0.5509091", "0.55014473", "0.5497773", "0.54967123", "0.54858464" ]
0.0
-1
get and store tweets based on a given function
def randomprint(collection, modulename=None, num = 300): count = 0 print "total number of tweets in this database is ", collection.find().count() if modulename == None: option = NoOp else: module = imp.load_source('module.name', modulename) option = module.check # probability that one tweet will be printed out total = 28360 # number of tweets printed accu = 0 for tweet in collection.find(): count += 1 # if count % 5000 == 0: # print count if option(tweet): prob = random.randint(1, 2) if prob <= total - num: accu += 1 print " --------------------------- Tweet ", accu, " ---------------------------" print tweet[u'text'].encode("utf-8", "ignore") print tweet[u'created_at'] print tweet[u'user'][u'screen_name'].encode("utf-8", "ignore"), " | ", tweet[u'user'][u'name'].encode("utf-8", "ignore"), " | ", tweet[u'user'][u'description'].encode("utf-8", "ignore") print print "finish searching all tweets"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def get_tweets(api, listOfTweets, keyword, numOfTweets=20, date_since='2019-1-1', lang=\"en\"):\n spinner = yaspin()\n spinner.start()\n for tweet in tweepy.Cursor(api.search, q=keyword, lang=lang, since=date_since).items(numOfTweets):\n # Add tweets in this format\n dict_ = {'Screen Name': tweet.user.screen_name,\n 'User Name': tweet.user.name,\n 'Tweet Created At': str(tweet.created_at),\n 'Tweet Text': tweet.text,\n 'Cleaned Tweet Text': func.clean_tweets(tweet.text),\n 'User Location': str(tweet.user.location),\n 'Tweet Coordinates': str(tweet.coordinates),\n 'Retweet Count': str(tweet.retweet_count),\n 'Retweeted': str(tweet.retweeted),\n 'Phone Type': str(tweet.source),\n 'Favorite Count': str(tweet.favorite_count),\n 'Favorited': str(tweet.favorited),\n 'Replied': str(tweet.in_reply_to_status_id_str)\n }\n listOfTweets.append(dict_)\n spinner.stop()\n return listOfTweets", "def collect_tweets(ticker):\n\n # Authenticate Tweepy credentials\n auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_SECRET_CONSUMER_KEY)\n auth.set_access_token(settings.TWITTER_TOKEN_KEY, settings.TWITTER_SECRET_TOKEN_KEY)\n api = tweepy.API(auth)\n\n stock = Stock.objects.get(ticker=ticker)\n\n # Search for recent Tweets with the specific ticker\n collected_tweets = api.search(q=ticker, result_type='recent', count=100)\n\n # Iterate over the collected Tweets and save them\n for tweet in collected_tweets:\n try:\n Tweet.objects.create(\n text=tweet.text,\n created_at=tweet.created_at,\n user_id=tweet.user.id,\n user_screen_name=tweet.user.screen_name,\n verified=tweet.user.verified,\n followers_count=tweet.user.followers_count,\n friends_count=tweet.user.friends_count,\n favourites_count=tweet.user.favourites_count,\n retweet_count=tweet.retweet_count,\n stock=stock,\n )\n except IntegrityError:\n pass", "def get_tweets(self):\n keyword = 'covid'\n\n # Load tokens from file\n with open('../data/tokens.json', 'r') as f:\n tokens = json.load(f)\n\n # Stream tweets\n auth = tweepy.OAuthHandler(tokens['consumer_key'], tokens['consumer_secret'])\n auth.set_access_token(tokens['access_token_key'], tokens['access_token_secret'])\n api = tweepy.API(auth)\n\n # listen for tweets\n while True:\n\n # TODO: save file in Cloud Storage\n file_name = date.today().strftime('corpus-%d-%m-%Y.json')\n print(f'Updating {file_name} ...')\n\n StreamListener = StreamListener(\n file_name=file_name, \n max_tweets=1000)\n myStream = tweepy.Stream(\n auth=api.auth, \n listener=StreamListener)\n\n myStream.filter(track=[keyword], languages=['en'])\n \n time.sleep(60)", "def get_tweets():\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def recoverTweets(authors=[], words=[], removeRetweets=False, sortBy='newest',**kwargs):\n authors = mapToValid(authors)\n words = mapToValid(words)\n\n def getTopNTweets(retrievedTweets, numberOfTweets):\n \"\"\"Sort the retrievedTweets by sortBy specified and returns the top-N Tweets\"\"\"\n if sortBy=='newest':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['id'], reverse=True)\n elif sortBy=='oldest':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['id'],reverse=False)\n elif sortBy=='favorite_count':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['favorite_count'],reverse=True)\n elif sortBy=='retweet_count':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['retweet_count'],reverse=True)\n else:\n retrievedTweets = random.sample(retrievedTweets, numberOfTweets)\n return retrievedTweets[:numberOfTweets]\n\n def getTweetsByUser(username, maxTweets=1000):\n \"\"\"Returns a list of (json) objects representing the tweets for a specified Twitter username.\n If any words is queried, it will filter out every tweet that doesn't contain any of those words.\"\"\"\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n myTweets=[]\n if words:\n apiRes = tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items()\n for tweet in apiRes:\n if any(containsWord(tweet._json['full_text'],word) for word in words):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n\n return getTopNTweets(myTweets, maxTweets)\n\n def searchTweets():\n \"\"\" returns a list of (json) objects representing the tweets retrieved for a specified query.\n It doesn't work if any authors is specified.\n Then, startingDate and endingDate cannot be older than one week ago because of Twitter restrictions for standardAPI\n :reference: https://developer.twitter.com/en/docs/tweets/search/api-reference/get-search-tweets\n \"\"\"\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName='apiConf2.txt'))\n #SEARCHING TWEETS CONTAINING THE HASHTAG \"#bitcoin\" USING TWEEPY LIBRARY\n myTweets= []\n #words=list(map(str,words))\n if words:\n myQuery=' OR '.join(words)\n else:\n myQuery = '*'\n if removeRetweets:\n myQuery += ' - filter:retweets'\n kwargs['q']=myQuery\n kwargs['count']=100\n kwargs['tweet_mode']='extended'\n if 'startingDate' in kwargs:\n kwargs['since']=kwargs['startingDate']\n del(kwargs['startingDate'])\n if 'endingDate' in kwargs:\n kwargs['until']=kwargs['endingDate']\n del(kwargs['endingDate'])\n if 'maxTweets' in kwargs:\n del(kwargs['maxTweets'])\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.search, kwargs).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.search, kwargs).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n return getTopNTweets(myTweets, maxTweets)\n\n\n def getTwitterscraperTweets():\n \"\"\" returns a list of (json) objects representing the tweets retrieved for the specified inputs.\n It's very useful to avoid restrictions such as number of requests or dates not older than 7 days ago for twitterAPI (and tweepy).\n It will call the recoverTweets.sh script to properly query the API by twitterscraper.\n :reference: https://github.com/taspinar/twitterscraper\n \"\"\"\n import subprocess\n numOfAuthors = len(authors)\n numOfWords = len(words)\n callVars = ['./recoverTweets.sh',str(numOfWords),str(numOfAuthors)]\n callVars.extend([word for word in words]+[author for author in authors])\n if startingDate:\n callVars.extend(['-sd',startingDate])\n if endingDate:\n callVars.extend(['-ed',endingDate])\n #if maxTweets:\n # callVars.extend(['-max',str(maxTweets)])\n callVars.append(\"data/twitterscrapertmp\")\n print(\"Querying twitterAPI by using TwitterScraper... (it may take a long time)\")\n subprocess.call(callVars)\n with open('data/twitterscrapertmp') as json_data:\n tweets = json.load(json_data)\n if removeRetweets:\n tweets = [tweet for tweet in tweets if not isRetweet(tweet)]\n print(\"Query ended. Retrieved: \",len(tweets),\" tweets\")\n #saveTweets(tweets,outputCollection,onFile=True,onDb=True)\n os.remove('data/twitterscrapertmp')\n return tweets\n\n\n if \"maxTweets\" in kwargs:\n maxTweets=kwargs['maxTweets']\n else:\n maxTweets=1000\n\n if len(authors)==0 and len(words)==0:\n return(\"qua\") ###call sample function with maxTweets and (if any) dates\n if 'startingDate' in kwargs or 'endingDate' in kwargs:\n return getTwitterscraperTweets()\n\n if len(authors)!=0:\n tweets, splits, i = [], splitIntegerIntoIntegers(maxTweets,len(authors)), 0\n for author in authors:\n tweets.extend(getTweetsByUser(username=author, maxTweets=splits[i]))\n i+=1\n return tweets\n return getTweets()", "def get_tweets():\n if not Tweet.objects.all():\n # If the db is empty, don't get max_id.\n tweets = api.search(\n q='#python',\n count=100\n )\n else:\n # If the db is not empty, get max_id.\n subtask(clean_tweetdb)\n max_id = min([tweet.tweet_id for tweet in Tweet.objects.all()])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n\n # Store the tweet data in lists.\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n # Iterate over these lists and add data to db.\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n # Check that they are valid.\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def process(self, filter_words, count=1):\n user = self.__api.get_user(self.__username)\n\n # print user.screen_name\n # print user.followers_count\n if self.__appMode == 1 and self.__TimeLineMode == 1:\n self.get_timeline(filter_words)\n else:\n if self.__friendMode:\n print(\"Getting all Twitter Friends \\n\")\n for friend in user.friends():\n self.get_tweet(friend.screen_name, filter_words, count)\n else:\n for screen_name in self.__priorityCoin:\n self.get_tweet(screen_name, filter_words, count)\n print('Twitter Data Extraction done!!')", "def on_tweet(self, tweet):\n pass", "def get_live_tweets_from_twitter_stream(auth, terms, num_tweets):\n listener = TwitterListener()\n listener._max_tweets = num_tweets\n twitter_stream = Stream(auth, listener)\n twitter_stream.filter(track=terms, languages=['en'])\n listener.store_live_tweets()", "def handler(event,context):\n tweet = setup_and_get_tweet()\n send_tweet(tweet)", "def process_tweet(tweet):\n global start_date\n global end_date\n global geo_enabled_tweets\n global retweets\n\n # Check for filters before processing any further\n if args.filter and tweet.source:\n if not args.filter.lower() in tweet.source.lower():\n return\n\n tw_date = tweet.created_at\n\n # Updating most recent tweet\n end_date = end_date or tw_date\n start_date = tw_date\n\n # Handling retweets\n try:\n # We use id to get unique accounts (screen_name can be changed)\n rt_id_user = tweet.retweeted_status.user.id_str\n retweeted_users[rt_id_user] += 1\n\n if tweet.retweeted_status.user.screen_name not in id_screen_names:\n id_screen_names[rt_id_user] = \"@%s\" % tweet.retweeted_status.user.screen_name\n\n retweets += 1\n except:\n pass\n\n # Adding timezone from profile offset to set to local hours\n if tweet.user.utc_offset and not args.no_timezone:\n tw_date = (tweet.created_at + datetime.timedelta(seconds=tweet.user.utc_offset))\n\n if args.utc_offset:\n tw_date = (tweet.created_at + datetime.timedelta(seconds=args.utc_offset))\n\n # Updating our activity datasets (distribution maps)\n activity_hourly[\"%s:00\" % str(tw_date.hour).zfill(2)] += 1\n activity_weekly[str(tw_date.weekday())] += 1\n\n # Updating langs\n detected_langs[tweet.lang] += 1\n\n # Updating sources\n detected_sources[tweet.source] += 1\n\n # Detecting geolocation\n if tweet.place:\n geo_enabled_tweets += 1\n tweet.place.name = tweet.place.name\n detected_places[tweet.place.name] += 1\n\n # Updating hashtags list\n if tweet.entities['hashtags']:\n for ht in tweet.entities['hashtags']:\n ht['text'] = \"#%s\" % ht['text']\n detected_hashtags[ht['text']] += 1\n\n # Updating domains list\n if tweet.entities['urls']:\n for url in tweet.entities['urls']:\n domain = urlparse(url['expanded_url']).netloc\n if domain != \"twitter.com\": # removing twitter.com from domains (not very relevant)\n detected_domains[domain] += 1\n\n # Updating mentioned users list\n if tweet.entities['user_mentions']:\n for ht in tweet.entities['user_mentions']:\n mentioned_users[ht['id_str']] += 1\n if not ht['screen_name'] in id_screen_names:\n id_screen_names[ht['id_str']] = \"@%s\" % ht['screen_name']", "def get_tweets(self, user, count):\n topTweetsList = self.api.user_timeline(screen_name=user, count=count, tweet_mode='extended')\n clnTweets = {}\n for tweet in topTweetsList:\n clnTweets[processTweet(getNonRetweet(tweet))] = ({'like':getFavoriteCount(tweet),'RT':getNumRetweet(tweet),'follower':getNumFollowers(tweet)}) \n\n tweetTxt = [twt for twt in clnTweets.keys()]\n \n if user in self.userTweetsStat:\n self.userTweetsStat[user].append(clnTweets)\n else:\n tmp = []\n tmp.append(clnTweets)\n self.userTweetsStat[user] = tmp\n return tweetTxt, self.userTweetsStat", "def get_tweets(self, query, count=10):\n # empty list to store parsed tweets\n tweets = []\n\n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q=query, count=count)\n\n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n\n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets\n\n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))", "def collect_tweets(redis_client, twitter_client, search_term):\n search = Search(redis_client, twitter_client, search_term)\n search.get_term_state()\n search.parse_term_state()\n search.set_query_string()\n search.set_execution_time()\n search.execute_query()\n search.incr_query_counters()\n search.set_newest_id()\n search.set_oldest_id()\n search.set_scenario()\n search.set_term_state()\n search.store_results()\n search.set_score()\n search.log_state()", "def getTwitterscraperTweets():\n import subprocess\n numOfAuthors = len(authors)\n numOfWords = len(words)\n callVars = ['./recoverTweets.sh',str(numOfWords),str(numOfAuthors)]\n callVars.extend([word for word in words]+[author for author in authors])\n if startingDate:\n callVars.extend(['-sd',startingDate])\n if endingDate:\n callVars.extend(['-ed',endingDate])\n #if maxTweets:\n # callVars.extend(['-max',str(maxTweets)])\n callVars.append(\"data/twitterscrapertmp\")\n print(\"Querying twitterAPI by using TwitterScraper... (it may take a long time)\")\n subprocess.call(callVars)\n with open('data/twitterscrapertmp') as json_data:\n tweets = json.load(json_data)\n if removeRetweets:\n tweets = [tweet for tweet in tweets if not isRetweet(tweet)]\n print(\"Query ended. Retrieved: \",len(tweets),\" tweets\")\n #saveTweets(tweets,outputCollection,onFile=True,onDb=True)\n os.remove('data/twitterscrapertmp')\n return tweets", "def get_tweets(api, username, fh, limit):\n if args.json is False:\n for status in tqdm(tweepy.Cursor(api.user_timeline, screen_name=username).items(limit), unit=\"tw\", total=limit):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")\n else:\n for status in (tweepy.Cursor(api.user_timeline, screen_name=username).items(limit)):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")", "def collect_tweets(search_id, search_term, number_of_tweets):\n\n tweets = []\n for tweet in api_collector.collect(search_term, number_of_tweets):\n tweets.append((tweet.id_str, tweet.created_at, tweet.full_text))\n if len(tweets) == 0:\n search = Search.objects.get(pk=search_id)\n search.empty = True\n search.save()\n notify_searchers.delay(search_id)\n else:\n classify_tweets.delay(search_id, tweets)", "def get_tweets(user, num = 200):\n tweets = []\n \n for tweet in user.home_timeline(count = num):\n edited_tweet = tweet.text\n edited_tweet = edited_tweet.encode(encoding='UTF-8', errors='Ignore') \n tweets.append(edited_tweet)\n return tweets", "def get_tweets():\n\n\tuser ='kaiserkumars'\n\t# api = twitter.Api(consumer_key='iJoZZuV7etVrJfE4K9ir8sIqa',\n\t# consumer_secret='uyJyWoP05z2MUKnggW7vHnIG2sckmM1aHRMgGveZLyrz8401Xs',\n\t# access_token_key='622588040-TYDgG1UlGUvA1hW8PA7mOG5CiMw0WiuPZlkoP8cc',\n\t# access_token_secret='laAmFjeLhWzOK7Y524VevdMdeLeNpnmCUmjee1AQU7osj')\n\tapi = twitter.Api(consumer_key=get_secret('consumer_key'),\n\t consumer_secret=get_secret('consumer_secret'),\n\t access_token_key=get_secret('access_token_key'),\n\t access_token_secret=get_secret('access_token_secret'))\n\n\tstatuses = api.GetUserTimeline(user_id=622588040,count=0)\n\t# print(statuses)\n\t# duplicate='UNIQUE constraint failed: mtwitter_weatherdata.location, core_weatherdata.metric, core_weatherdata.date'\n\tbulk_insert=[]\n\t# print(dir(TwitterData))\n\tfor s in statuses:\n\t\t# print(s)\n\t\tdt = parse(s.created_at)\n\t\t# print(dt)\n\t\tdata = TwitterData(org_name=s.user.name,profile_url=s.user.profile_image_url,tweet_id =s.id,screen_name=s.user.screen_name, tweet = s.text, date= dt, favCount =0)\n\t\tbulk_insert.append(data)\n\ttry:\n\t\tTwitterData.objects.bulk_create(bulk_insert)\n\t\tprint(\"Success.\")\n\texcept Exception as e:\n\t\t# if(str(e)==duplicate):\n\t\t# \tprint('Duplicate Data')\n\t\t# else:\n\t\tprint(str(e))\n\n\treturn statuses", "async def get_tweets(self, ctx, username: str, count: int):\n cnt = count\n if count > 25:\n cnt = 25\n\n if username is not None:\n if cnt < 1:\n await self.bot.say(\"I can't do that, silly! Please specify a \\\n number greater than or equal to 1\")\n return\n msg_list = []\n api = self.authenticate()\n try:\n for status in\\\n tw.Cursor(api.user_timeline, id=username).items(cnt):\n if not status.text.startswith(\"@\"):\n msg_list.append(status)\n except tw.TweepError as e:\n await self.bot.say(\"Whoops! Something went wrong here. \\\n The error code is \" + str(e))\n return\n if len(msg_list) > 0:\n await self.tweet_menu(ctx, msg_list, page=0, timeout=30)\n else:\n await self.bot.say(\"No tweets available to display!\")\n else:\n await self.bot.say(\"No username specified!\")\n return", "def streamTweets(words = [], authors = [], timeLimit=120, removeRetweets=False, **kwargs):\n if 'stream' not in globals():\n global stream\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n listener = StdOutListener(removeRetweets=removeRetweets)\n auth = api.auth\n stream = tweepy.Stream(auth, listener, tweet_mode='extended')\n else:\n stream.listener.setRemoveRetweets(removeRetweets)\n stream.listener.resetTweets()\n\n words = mapToValid(words)\n authors = mapToValid(authors)\n if not words and not authors:\n words=[\"the\", \"i\", \"to\", \"a\", \"and\", \"'s\", \"is\", \"in\", \"it\", \"you\", \"of\", \"for\", \"on\", \"my\", \"that\", \"e\", \"with\", \"me\", \"do\", \"have\", \"ciao\", \"o\", \"u\", \"cool\", \"good\", \"nice\", \"#\", \"*\", \":\", \";\", \",\", \".\", \"?\", \"-\", \"%\", \"$\", \"€\", \"!\", \"(\", \")\", \"=\", \"'\"]\n\n #myQuery = ' OR '.join(kwargs[\"words\"])\n if authors:\n kwargs[\"follow\"]=[user.id_str for user in list(map(api.get_user,authors))]\n else:\n kwargs[\"track\"]=words\n #if removeRetweets:\n # myQuery += \" -filter:retweets\"\n\n #myQuery += ' from:'\n #myQuery += ' OR from:'.join(kwargs[\"authors\"])\n #print(myQuery)\n import signal\n # Register the signal function handler\n signal.signal(signal.SIGALRM, __streamHandler__)\n # Define a timeout for your function\n signal.alarm(timeLimit)\n try:\n __stream__(stream,**kwargs)\n except Exception:\n print(\"Streaming over after time period of\", timeLimit, \"seconds... Retrieved\", len(stream.listener.getTweets()), \"tweets.\")\n stream.disconnect()\n if authors and words:\n print(\"Filtering out tweets that don't contain the specified words...\")\n myTweets=[]\n for tweet in stream.listener.getTweets():\n if 'full_text' in tweet:\n tweet['text'] = tweet['full_text']\n del (tweet['full_text'])\n if any(containsWord(tweet['text'],word) for word in words):\n myTweets.append(tweet)\n print(\"Done. Retrieved\", len(myTweets), \"tweets written by the authors specified and containing (any of) the words specified.\")\n return myTweets\n return stream.listener.getTweets()", "def extract_tweets(consumer_key,consumer_secret,access_token,access_token_secret,search_key):\n # Step 1 - Authenticate\n consumer_key= str(consumer_key)\n consumer_secret= str(consumer_secret)\n\n access_token=str(access_token)\n access_token_secret=str(access_token_secret)\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth)\n\n #Step 3 - Retrieve Tweets\n public_tweets = api.search(search_key)\n tweets_list=[]\n for tweet in public_tweets:\n tweets_list.append(tweet.text)\n return tweets_list", "def get_tweets(self):\r\n return self.tweets", "def collect_twitter_sentiment():\r\n # Open/create a file to append data to\r\n csvFile = open(NAME+'_posts.csv', 'a')\r\n # Use csv writer\r\n csvWriter = csv.writer(csvFile)\r\n # Calling the user function with current parameters\r\n results = twitter.user_timeline(id=NAME, count=TWEET_COUNT)\r\n for tweet in results:\r\n print(tweet.created_at, tweet.text)\r\n csvWriter.writerow([tweet.created_at, tweet.text.encode('utf-8')])\r\n return csvFile", "async def add_tweet(self, tid=None): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n #print(\"Coordinates: {}\".format(data[\"coordinates\"]))\n if \"place\" in data:\n print(\"Place: {}\".format(data[\"place\"]))\n\n #print(\"User location: {}\".format(data[\"user\"][\"location\"]))\n #print(\"User lang: {}\".format(data[\"user\"][\"lang\"]))\n t=Tweet()\n t.tweet_id = tid\n t = self.fill_tweet(t, data)\n tweet_cache.append(t.to_dict())\n if \"retweeted_status\" in data:\n t.retweeted_status=data[\"retweeted_status\"]\n # \n # save the tweet\n #\n t.upsert()\n #\n # now handle the retweet\n #\n if \"retweeted_status\" in data:\n # this is a retweet so\n # do it once more for the original tweet\n tr=Tweet()\n tr.tweet_id = data[\"retweeted_status\"][\"id_str\"]\n tr = self.fill_tweet(tr, data[\"retweeted_status\"])\n tweet_cache.append(tr.to_dict())\n #tr.upsert()\n #r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #await self.fire_callbacks(r.json())\n #print(t.to_json(),file=ofile)\n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def pushTweets(tweets,user,cacheKey=False):\n \n tweetDump = filterTweets(tweets) # Extract mentions, URLs, replies hashtags etc...\n\n pushRenderedTweets2Neo.delay(user,tweetDump) \n pushRenderedTweets2Cass.delay(user,tweetDump)\n pushRenderedTweets2Solr.delay(tweetDump['tweets']+tweetDump['retweets'])\n\n if cacheKey: # These are the last Tweets, tell the scaper we're done.\n cache.set(cacheKey,'done')\n print '*** '+user+': DONE WITH TWEETS ***' \n \n #return True", "def tweet_processor(self, tweets):\n with Timer() as timer:\n detection_count = self.tweet_processor_fct(tweets) or 0\n # Increment the total number of detections.\n self.redis.hincrby(self.metadata_cache_key, 'detection',\n detection_count)\n\n log.debug(\"Processed {} tweets in {:2.3f} secs.\".format(\n len(tweets), timer.interval))", "def get_tweets(twitter, screen_name, num_tweets):\n\n request = robust_request(twitter, 'search/tweets', {'q': screen_name, 'count': num_tweets})\n tweets = [a['text'] for a in request]\n\n return tweets", "def get_tweets():\n\n # Read bearer token from secrets file\n with open(\"./secrets.yml\", \"r\") as f:\n bearer_token = yaml.load(f, Loader=yaml.FullLoader)[\"BEARER_TOKEN\"]\n\n # Set start and end times as current time rounded down to nearest minute with supplied offset\n dt_fmt = \"%Y-%m-%dT%H:%M:00Z\"\n dt_now = datetime.datetime.now().replace(second=0, microsecond=0)\n start_time_offset = int(sys.argv[1])\n end_time_offset = int(sys.argv[2])\n dt_end = dt_now - datetime.timedelta(minutes=end_time_offset)\n dt_start = dt_now - datetime.timedelta(minutes=start_time_offset)\n dt_end = dt_end.strftime(dt_fmt)\n dt_start = dt_start.strftime(dt_fmt)\n\n # Make request, checking for mentions in specified time period\n logging.info(\"Getting mentions from Twitter\")\n uri = \"https://api.twitter.com/2/tweets/search/recent\"\n headers = {\"Authorization\": f\"Bearer {bearer_token}\"}\n query = {\"query\": f\"@{ACCOUNT_NAME}\",\n \"expansions\" : \"author_id\",\n \"user.fields\" : \"username\",\n \"start_time\" : dt_start,\n \"end_time\" : dt_end}\n response = requests.get(uri, headers=headers, params=query)\n\n # Make connection to local database\n connection = sqlite3.connect(\"../database/procrystaldb.db\")\n cursor = connection.cursor()\n\n # Get current total number of rows in database\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n initial_rows = cursor.fetchall()[0][0]\n\n # Get usernames and tweet ids from tweets and save to database\n if response.status_code == 200:\n content = response.json()\n num_results = content[\"meta\"][\"result_count\"]\n if num_results > 0:\n # First get dictionary of usernames\n user_id_to_name = {}\n for user in content[\"includes\"][\"users\"]:\n user_id_to_name[user[\"id\"]] = user[\"username\"]\n # Then get tweet id, username and save to database\n for result in content[\"data\"]:\n # if KEYWORD in result[\"text\"].lower():\n tweet_id = result[\"id\"]\n username = user_id_to_name[result[\"author_id\"]]\n sql_insert = f\"\"\"\n INSERT OR IGNORE INTO Twitter (tweet_id, username, reply_sent)\n VALUES ('{tweet_id}', '{username}', false);\n \"\"\"\n cursor.execute(sql_insert)\n logging.info(f\"Mentions fetched: {num_results}\")\n else:\n logging.error(f\"Get mentions errored with: {response.json()}\")\n\n # Get final total number of rows in database and therefore number of rows added\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n final_rows = cursor.fetchall()[0][0]\n rows_added = final_rows - initial_rows\n logging.info(f\"New mentions added: {rows_added}\")\n\n # Close database connection\n connection.commit()\n connection.close()\n\n return rows_added", "def fetch_tweets(self, screen_name, count):\n return {}", "def get_all_tweets(screen_name: object):\r\n temptweets = []\r\n alltweets = []\r\n new_tweets = api.user_timeline(screen_name=screen_name, count=199)\r\n alltweets.extend(new_tweets)\r\n print(alltweets[1].id)\r\n oldest = alltweets[-1].id - 1\r\n while 0 < len(new_tweets) < 200:\r\n new_tweets = tweepy.Cursor(api.user_timeline, screen_name=screen_name, count=199, max_id=oldest).items(1500)\r\n alltweets.extend(new_tweets)\r\n for tweet in alltweets:\r\n if (not tweet.retweeted) and ('RT @' not in tweet.text):\r\n temptweets.append(tweet)\r\n oldest = alltweets[-1].id - 1\r\n print(\"Total tweets downloaded from %s are %s\" % (screen_name, len(temptweets)))\r\n return temptweets", "def store_tweet(tweet, keyword):\n\tglobal _docs_to_store\n\tdoc = {'tweet': tweet, 'keyword': keyword, 'timestamp': int(time.time())}\n\t_docs_to_store.append(doc)\n\tif len(_docs_to_store) == UPDATE_CHUNK:\n\t\tcloudant.update(_docs_to_store)\n\t\t_docs_to_store = []", "def handler(event,context):\n send_tweet(random.choice(potential_tweets))", "def get_tweets(api):\n return api.user_timeline()", "async def tweet():\n with logger.contextualize(request_id=str(uuid.uuid4())):\n tweets = generate()\n upload(tweets)", "def tweet(self, tweet, at=None):\n if tweet.strip() == \"\":\n return\n\n num_tweets, tweets = self._divide_tweet(tweet, at)\n if num_tweets > 0:\n # replace @'s with #'s and convert unicode emojis before tweeting\n [self.api.update_status(tw.replace(\"@\", \"#\").encode(\"utf-8\")) for tw in tweets]\n self.log(f\"Tweeted: {' '.join(tweets)}\")\n return tweets[0]", "def getTweets(user,maxTweets=3000,count=0,tweetId=0,cacheKey=False,credentials=False):\n api = ratedTwitter(credentials=credentials)\n limit = api.get_user_timeline_limited()\n if limit:\n print '*** TWITTER RATE-LIMITED: statuses.user_timeline:'+user+':'+str(count)+' ***'\n raise getTweets.retry(countdown = limit)\n else:\n args = {'screen_name':user,'exclude_replies':False,'include_rts':True,'trim_user':False,'count':200}\n if tweetId:\n args['max_id'] = tweetId\n \n okay, result = api.get_user_timeline(**args)\n \n if okay:\n print '*** TWITTER USER_TIMELINE: '+user+':'+str(tweetId)+' ***'\n if result:\n newCount = count + len(result)\n if maxTweets:\n if newCount > maxTweets: # No need for the task to call itself again.\n pushTweets.delay(result,user,cacheKey=cacheKey) # Give pushTweets the cache-key to end the job.\n return\n else:\n pushTweets.delay(result,user)\n\n newTweetId = min([t['id'] for t in result]) - 1 \n # Not done yet, the task calls itself with an updated count and tweetId.\n getTweets.delay(user,maxTweets=maxTweets,count=newCount,tweetId=newTweetId,cacheKey=cacheKey,credentials=credentials)\n else:\n pushTweets.delay([],user,cacheKey=cacheKey) # Nothing more found, so tell pushTweets the job is done.\n else:\n if result == '404':\n setUserDefunct(user)\n cache.set('scrape_tweets','done')\n if result == 'limited':\n raise getTweets.retry(countdown = api.get_user_timeline_limited())", "def get_tweets(username, amount):\n tweets = []\n twitter = Twython()\n\n finished = False\n page = 1\n while not finished:\n\n if amount <= 200:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count=str(amount))\n finished = True\n\n else:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count='200')\n amount -= 200\n page += 1\n\n if isinstance(search_results, dict) and search_results['error']:\n raise TwitterAPIException(str(search_results['error']))\n elif not search_results:\n raise TwitterAPIException('User has no tweets.')\n\n for result in search_results:\n tweets.append(result['text']) \n\n return tweets", "def getTweetsByUser(username, maxTweets=1000):\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n myTweets=[]\n if words:\n apiRes = tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items()\n for tweet in apiRes:\n if any(containsWord(tweet._json['full_text'],word) for word in words):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n\n return getTopNTweets(myTweets, maxTweets)", "def save_user_tweets(user, n, auth):\r\n t = twitter.Twitter(auth=auth)\r\n print(\"Fetching %i tweets from @%s\" % (n, user))\r\n tweets = t.statuses.user_timeline(screen_name=user, count=n)\r\n print(\" (actually fetched %i)\" % len(tweets))\r\n for tweet in tweets:\r\n save_tweet(tweet, outfile)", "def filter_tweets(tweets):\n # We keep only tweets by chrisalbon with pictures\n search_tweets = [tw for tw in tweets if tw['username'] == '@chrisalbon' and len(tw['images']) > 0]\n # He made multiple tweets on the same topic, we keep only the most recent tweets\n # We use the indexes of the reversed tweet list and dictionnaries to keep only key \n unique_search_index = sorted(list({t['text'].lower():i for i,t in list(enumerate(search_tweets))[::-1]}.values()))\n unique_search_tweets = [search_tweets[i] for i in unique_search_index]\n\n # Keep non-downloaded tweets\n most_recent_file = sorted([datetime.datetime.fromtimestamp(os.path.getmtime(path)) \n for path in glob.glob(\"./downloaded_pics/*.jpg\")], reverse=True)[0]\n recent_seach_tweets = [tw for tw in unique_search_tweets if tw['date'] > most_recent_file]\n\n # Uncomment for testing new tweets\n # recent_seach_tweets = [tw for tw in unique_search_tweets if tw['date'] > datetime.datetime(2017, 7, 6, 13, 41, 48)]\n return recent_seach_tweets", "def handler(event, context):\n send_tweet(random.choice(potential_tweets))", "def handler(event, context):\n send_tweet(random.choice(potential_tweets))", "def get_all_tweets(user, alltweets):\n\n #TODO check that user is a valid screen name??\n\n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(user, count=200)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n #print alltweets[0].text\n\n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n #print \"starting loop\"\n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n\n #all subsiquent requests starting with oldest\n new_tweets = api.user_timeline(user, count=200, max_id=oldest)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1", "def get_tweets():\n\n return Tweet.query.all()", "async def tweet_feeder(self): \n try:\n data=json.loads(self.request.body.decode('utf-8'))\n except: \n print(\"No data body!\")\n\n t=Tweet()\n t.tweet_id = data[\"tweet_id\"]\n t.text=data[\"text\"]\n #\n # update the hashtags cache\n #\n try:\n t.hashtags=data[\"hashtags\"] \n for htag in t.hashtags:\n #print(\"adding to hashtags: {} to cache:\".format(htag[\"text\"], ))\n if htag[\"text\"] in hash_cache:\n hash_cache[htag[\"text\"]] += 1\n else:\n hash_cache[htag[\"text\"]] = 1\n except:\n t.hashtags=[]\n \n #\n # update the user cache\n #\n try:\n user_id = \"@\" + data[\"user_screenname\"]\n if user_id in user_cache:\n user_cache[user_id] += 1\n else:\n user_cache[user_id] = 1\n except:\n print(\" ERR No User: should never happen\")\n\n try:\n t.user_screenname=data[\"user_screenname\"]\n except:\n t.user_screenname=\"\"\n try:\n t.profile_image_url_https = data[\"profile_image_url_https\"]\n except:\n t.profile_image_url_https = \"\"\n #\n # update the tweets cache\n #\n try:\n t.timestamp = data[\"timestamp\"]\n except:\n t.timestamp = datetime.datetime.utcnow()\n tweet_cache.append(t.to_dict())\n \n #\n # get the embed html from twitter oembed API\n #\n r=requests.get(\"https://publish.twitter.com/oembed?url=https://twitter.com/Interior/status/\"+ t.tweet_id )\n #print(r.json())\n \n #print(self.__class__.callbacks)\n await self.fire_callbacks(r.json())\n #self.success(message=\"Added tweet id: {} \".format(str(id)), data=t.to_json(), format=\"json\", pure=True)", "def get_tweets(self, start_date, end_date):\r\n pass", "def TweetHandler(self):\n self.response.out.write('<br/><br/>Tweeting<br/>')\n self.response.out.write('this info will be tweeted:<br/>')\n # oldest non-tweeted and prepared\n oldest_changeset = Changeset.all().order('created_at').filter('is_tweeted =', False).filter('is_prepared =', True).fetch(1)\n if not oldest_changeset:\n self.response.out.write('nothing to tweet')\n return\n else:\n c = oldest_changeset[0]\n \n config = get_config()\n\n # do not tweet from localhost\n if not 'localhost' in self.request.url:\n auth = tweepy.OAuthHandler(config[\"consumer_key\"], config[\"consumer_secret\"])\n auth_data = OAuthAccessToken.all().filter('specifier =', config[\"twitter_username\"]).fetch(1)[0]\n auth.set_access_token(auth_data.oauth_token, auth_data.oauth_token_secret)\n self.response.out.write('<br/>tweeting with oauth:<br/>')\n api = tweepy.API(auth)\n self.response.out.write(\"id: %d\" % c.id)\n self.response.out.write(\"user: %s\" % c.user)\n self.response.out.write(\"comment: %s\" % c.comment)\n self.response.out.write(\"tweet: %s\" % c.tweet)\n try:\n api.update_status(c.tweet)\n except tweepy.error.TweepError, e: \n self.response.out.write( 'failed: %s' % e.reason )\n if \"Status is a duplicate\" in e.reason:\n c.is_tweeted = True\n c.put()\n return\n else:\n self.response.out.write('<br/>localhost - nothing actually tweeted:')\n\n self.response.out.write('<br/>%s' % c.tweet)\n\n c.is_tweeted = True\n c.put()", "def get_tweet(self, id):\r\n return self.tweets[id]", "def process_tweets(tweets_response, keep_all=False, debug=False):\n tweets = tweets_response\n\n #print(json.dumps(tweets, indent=4, ensure_ascii=False))\n\n output_tweets = []\n for tweet in tweets:\n # loop through every tweet\n output_tweet = {}\n output_tweet['likes'] = 0\n for k, v in tweet.items():\n if k == \"favorite_count\" or k == \"retweeted_status\":\n # print('checking favorite_count at {}'.format(k))\n # print(v)\n if k == \"favorite_count\" and v:\n output_tweet['likes'] = v\n elif k == \"retweeted_status\" and v:\n # print(\"rt:\", v)\n try:\n output_tweet['likes'] = v['favorite_count']\n except:\n print('favorites not found')\n print(v)\n pass\n\n elif k == \"media\" and v:\n # turn media dict into img url\n output_tweet[k] = []\n for m in v:\n output_tweet[k].append(m['media_url_https'])\n\n elif k == \"id\" and v:\n # make url from id and dispose id\n output_tweet['url'] = \"https://twitter.com/anyuser/status/\" + str(v)\n\n elif k == \"retweet_count\":\n if v:\n if debug: print(' picking this: ', k, v)\n output_tweet[k] = v\n else:\n if debug: print(' skipping this: ', k, v)\n # not keeping those with 0 RT\n output_tweet[k] = 0\n\n elif k == \"created_at\":\n tweet_creation_time = str_2_datetime(v, input_format=time_format_twitter_created_at)\n tweet_checked_time = datetime.datetime.now(tz=pytz.utc)\n\n output_tweet['timestamp'] = {\n \"created\": datetime_2_str(tweet_creation_time, output_format=time_format_full_with_timezone),\n \"last_checked\": datetime_2_str(tweet_checked_time, output_format=time_format_full_with_timezone)\n }\n\n else:\n # keep k:v same\n if debug: print('keeping this: ', k, repr(v))\n output_tweet[k] = v\n\n print('num of likes: ', output_tweet['likes'])\n\n output_tweets.append(output_tweet)\n\n output = []\n if not keep_all:\n for o in output_tweets:\n if o['likes'] > 0 and o['retweet_count'] > 0:\n output.append(o)\n else:\n output = output_tweets\n\n return output", "def twitter(self):\n message = \"\"\n count = self.collection.count()\n\n twitter = Twitter(auth = OAuth(self.access_key, self.access_secret, self.consumer_key, self.consumer_secret))\n for keyword in self.twitter_keywords:\n query = twitter.search.tweets(q = keyword)\n for result in query['statuses']:\n try:\n data = {\"id\": count+1, \"source\": \"twitter\", \"timestamp\": datetime.now()}\n data['tweet'] = result['text']\n data['name'] = result[\"user\"][\"screen_name\"]\n data['url'] = \"https://twitter.com/\" + data[\"name\"] + \"/status/\" + str(result['id'])\n data['search_string'] = keyword\n try:\n dataid = self.collection.insert(data)\n except DuplicateKeyError as e:\n continue\n count += 1\n\n # Slack push notification\n length = 82 - len(data['url'])\n message += \"\\nURL: \" + data['url'] + \" search string: \".rjust(length) + keyword\n\n except Exception as e:\n print(e)\n pass\n \n if message:\n print(self.G + \"[+] Twitter\" + self.B + message)\n self.message += \"\\n*Twitter*:\\n```\"\n self.message += message\n self.message += \"\\n```\"\n\n return", "def get_tweets(self, kafka_obj):\n\n try:\n\n # call twitter api to fetch tweets\n # for tweet in api.search('#machinelearning', count=5):\n\n for tweet in tweepy.Cursor(api.search, q='#machinelearning', since='2019-06-25', until='2019-07-07').items():\n\n # empty dictionary to store required params of a tweet\n parsed_tweet = dict()\n parsed_tweet['text'] = tweet.text\n parsed_tweet['date'] = str(tweet.created_at)\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n parsed_tweet['tweet_id'] = tweet.id_str\n parsed_tweet['location'] = tweet.user.location\n parsed_tweet['user'] = tweet.user.screen_name\n parsed_tweet['retweet_count'] = tweet.retweet_count\n\n if tweet.entities.get('hashtags'):\n parsed_tweet['hashtags'] = ', '.join([i['text'] for i in tweet.entities.get('hashtags')])\n else:\n parsed_tweet['hashtags'] = ''\n \n print('Search API', parsed_tweet)\n\n #Pushing all the tweets to the Kafka Topic\n\n kafka_producer = kafka_obj.producer_instance()\n kafka_obj.publish_urls(kafka_producer, 'twitter', 'tweet', json.dumps(parsed_tweet))\n\n except Exception as e:\n print(e)", "def statistics(all_new_tweets, all_retweets, all_quote_tweets):\n length_all_quote_tweets = len(all_quote_tweets)\n length_all_retweets = len(all_retweets)\n length_all_tweets = len(all_new_tweets)\n\n # print(db_twitter.collections.stats())\n total_tweets = length_all_quote_tweets + length_all_retweets + length_all_tweets\n print(\n f\"Number of all tweets via streaming collected: {total_tweets - return_rest_tweets_number()}\"\n )\n print(f\"Number of new tweets collected: {length_all_tweets}\")\n print(f\"Number of retweets collected: {length_all_retweets}\")\n print(f\"Number of quote tweets collected: {length_all_quote_tweets}\")\n print(f\"Number of tweets collected via rest is {return_rest_tweets_number()}\")\n\n # Calculates mean sentiment, where 1 is very positive, -1 is very negative\n mean_sentiment = 0.0\n\n for tweet in all_new_tweets:\n mean_sentiment += tweet[\"sentiment_polarity\"]\n mean_sentiment = mean_sentiment / length_all_tweets\n print(\"The mean sentiment of tweets is: \", mean_sentiment)\n\n # Calculates mean subjectivity, where 1 is very subjective, -1 is very objective\n mean_subjectivity = 0.0\n\n for tweet in all_new_tweets:\n mean_subjectivity += tweet[\"subjectivity\"]\n mean_subjectivity = mean_subjectivity / length_all_tweets\n print(\"The mean subjectivity of retweets is: \", mean_subjectivity)\n return mean_sentiment, mean_subjectivity, total_tweets", "def TweetsRealTime(dbname, user, password, table_name, APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET, loop_gathering = False, search_terms = [\"Happy\"]):\n try:\n \"\"\"Be careful with the following global variables. They are necessary to make this script run from the main function\n This is because Twython streamer does not allow other inputs.\n If you run this script stand-alone you can safely remove the globals and it will still work.\"\"\"\n global con \n con = psycopg2.connect(\"dbname = {} user = {} password = {}\".format(dbname,user,password))\n global cur\n cur = con.cursor()\n global tablename\n tablename = table_name\n print \"Connected\"\n except:\n print \"Database connection error\" \n \n try:\n stream = MyStreamer(APP_KEY, APP_SECRET,OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n print 'Connecting to twitter: will take a minute'\n except ValueError:\n con.close()\n cur.close()\n print 'Something went wrong while making connection with Twitter: '+str(ValueError)\n\n try:\n stream.statuses.filter(track = search_terms) \n except:\n # Shortcut to restarting the script - if the connection cancels then it gracefully terminates the db lock and establishes a new connection\n cur.close\n con.close \n print \"########### Stream terminated ###########\"\n if loop_gathering != False:\n TweetsRealTime(dbname = dbname,\n user = user,\n password = password,\n table_name = table_name,\n search_terms = search_terms,\n APP_KEY = APP_KEY,\n APP_SECRET = APP_SECRET,\n OAUTH_TOKEN = OAUTH_TOKEN,\n OAUTH_TOKEN_SECRET = OAUTH_TOKEN_SECRET,\n loop_gathering = loop_gathering)", "def get_user_tweets(api, screen_name, output_path):\n logger = logging.getLogger(__name__)\n logger.info('Pulling tweets')\n\n # Create empty list for tweet objects\n tweets = []\n # Pulls users must recent 200 tweets\n new_tweets = api.user_timeline(screen_name=screen_name, count=200)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n # Continues to pull tweets 200 at a time until limit is hit\n while len(new_tweets) > 0:\n new_tweets = api.user_timeline(screen_name=screen_name,\n count=200, max_id=oldest)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n logger.info(\"...%s tweets downloaded and cleaned\" % (len(tweets)))\n\n # Write all text of tweets to a file\n filename = screen_name + '.csv'\n file = open(join(output_path, filename), 'w')\n\n # Iterates through all tweets and cleans them before outputting\n for tweet in tweets:\n clean_tweet = clean_string(tweet.text)\n line = screen_name + ', ' + clean_tweet + '\\n'\n file.write(line)\n logger.info(\"Done pulling tweets for %s\" % screen_name)\n file.close()", "def get_posts(username):\r\n\r\n # Authenticate to Twitter\r\n auth = tweepy.OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)\r\n auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)\r\n\r\n api = tweepy.API(auth)\r\n\r\n try:\r\n api.verify_credentials()\r\n print(\"Authentication OK\")\r\n except:\r\n print(\"Error during authentication\")\r\n\r\n alltweets=[]\r\n\r\n new_tweets = api.user_timeline(screen_name = username,count=200,tweet_mode='extended')\r\n status = new_tweets[0]\r\n json_str = json.dumps(status._json)\r\n\r\n #convert to string\r\n json_str = json.dumps(status._json)\r\n #deserialise string into python object\r\n parsed = json.loads(json_str)\r\n print(json.dumps(parsed, indent=4, sort_keys=True))\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # save the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n # keep grabbing tweets until there are no tweets left to grab\r\n while len(new_tweets) > 0:\r\n print(f\"getting tweets before {oldest}\")\r\n\r\n # all subsiquent requests use the max_id param to prevent duplicates\r\n new_tweets = api.user_timeline(screen_name=username, count=200, max_id=oldest,tweet_mode='extended')\r\n\r\n # save most recent tweets\r\n alltweets.extend(new_tweets)\r\n\r\n # update the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n\r\n print(f\"...{len(alltweets)} tweets downloaded so far\")\r\n\r\n\r\n outtweets=[]\r\n\r\n\r\n for item in alltweets:\r\n\r\n mined = {\r\n 'tweet_id': item.id,\r\n 'name': item.user.name,\r\n 'screen_name': item.user.screen_name,\r\n 'retweet_count': item.retweet_count,\r\n 'lang' : item.lang,\r\n 'text': item.full_text,\r\n 'mined_at': datetime.datetime.now(),\r\n 'created_at': item.created_at,\r\n 'favourite_count': item.favorite_count,\r\n 'hashtags': item.entities['hashtags'],\r\n 'status_count': item.user.statuses_count,\r\n 'location': item.place,\r\n 'source_device': item.source\r\n }\r\n\r\n try:\r\n mined['retweet_text'] = item.retweeted_status.full_text # In case the tweet is a RT, there is a need to\r\n # retrieve the retweet_text field which contains the full comment (up to 280 char) accompanying the retweet\r\n except:\r\n mined['retweet_text'] = ''\r\n\r\n outtweets.extend([mined])\r\n\r\n return outtweets", "def fill_tweet(self, t, data):\n t.text=data[\"text\"]\n #\n # update the hashtags cache\n #\n try:\n t.hashtags=data[\"entities\"][\"hashtags\"] \n for htag in t.hashtags:\n #print(\"adding to hashtags: {} to cache:\".format(htag[\"text\"], ))\n if htag[\"text\"] in hash_cache:\n hash_cache[htag[\"text\"]] += 1\n else:\n hash_cache[htag[\"text\"]] = 1\n except:\n t.hashtags=[]\n #\n # update the country cache\n #\n try:\n # see: https://bitbucket.org/richardpenman/reverse_geocode/src/default/\n #country = reverse_geocode.search(data[\"coordinates\"][\"coordinates\"][0])[\"country\"]\n country = data[\"place\"][\"country_code\"]\n if country in country_cache:\n country_cache[country] += 1\n else:\n country_cache[country] = 1\n except:\n print(\" .... Could not identify county by coordinates\")\n \n #\n # update the user cache\n #\n try:\n user_id = \"@\" + data[\"user\"][\"screen_name\"]\n if user_id in user_cache:\n user_cache[user_id] += 1\n else:\n user_cache[user_id] = 1\n except:\n print(\" ERR No User: should never happen\")\n #\n # update the tweets per minute cache\n # \n\n #tweets_descending = OrderedDict(sorted(self.application.tweet_cache.items(), key=lambda kv: kv[1], reverse=True))\n #hash_descending = OrderedDict(sorted(hash_cache.items(), key=lambda kv: kv[1], reverse=True))\n #for counter, elem in enumerate(hash_descending):\n # if counter < 9:\n # print(\"hash top #{} : {} : {}\".format(counter, elem, str(hash_descending[elem])))\n # else:\n # break\n try:\n t.user_screenname=data[\"user\"][\"screen_name\"]\n except:\n t.user_screenname=\"\"\n try:\n t.profile_image_url_https = data[\"user\"][\"profile_image_url_https\"]\n except:\n t.profile_image_url_https = \"\"\n #\n # update the tweets cache\n #\n try:\n t.timestamp = dateutil.parser.parse(data[\"created_at\"])\n except:\n t.timestamp = datetime.datetime.utcnow()\n return t", "def handle_new_tweet(tweet_data):\n\n assert tweet_data.get('id'), \"Tweet Must have ID\"\n assert tweet_data.get('search_string'), \"Tweet must have search string\"\n\n # check for this tweet already being tracked\n set_key = keys.tweet_search_set(tweet_data.get('search_string'))\n tweet_id = tweet_data.get('id')\n found = rc.zrank(set_key, tweet_id)\n print 'set key: %s' % set_key\n print 'found: %s' % found\n\n if not found:\n\n # set main hash\n key = keys.tweet_data(tweet_data.get('id'))\n rc.hmset(key, tweet_data)\n\n # add to our weighted set\n # keep the value as the id and the weight\n print 'adding: %s' % tweet_id\n rc.zadd(set_key, tweet_id, tweet_id)\n\n # fire event that tweet was added to db\n revent.fire('new_tweet_saved', tweet_data)\n\n return True\n\n return False", "def grab_tweets():\n\n tweets = []\n long_tweets = []\n\n for each in lists:\n tweets = tweets + twitter.GetListTimeline(list_id=each.id,\n count=count,\n include_rts=True)\n for tweet in tweets:\n if len(tweet.text) >= min_tweet_len:\n long_tweets.append(tweet)\n shuffle(long_tweets)\n\n if len(long_tweets) >= num_tweets:\n return long_tweets[:num_tweets]\n else:\n return long_tweets", "def runner(args):\n apikey, users = args\n api = collect.mk_api(apikey)\n for user in users:\n db_connection = db.mk_connection()\n collect.collect_user_tweets(api, user, collect.mk_sql_insert_handler(db_connection))\n db.close_connection(db_connection)", "def searchTweets():\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName='apiConf2.txt'))\n #SEARCHING TWEETS CONTAINING THE HASHTAG \"#bitcoin\" USING TWEEPY LIBRARY\n myTweets= []\n #words=list(map(str,words))\n if words:\n myQuery=' OR '.join(words)\n else:\n myQuery = '*'\n if removeRetweets:\n myQuery += ' - filter:retweets'\n kwargs['q']=myQuery\n kwargs['count']=100\n kwargs['tweet_mode']='extended'\n if 'startingDate' in kwargs:\n kwargs['since']=kwargs['startingDate']\n del(kwargs['startingDate'])\n if 'endingDate' in kwargs:\n kwargs['until']=kwargs['endingDate']\n del(kwargs['endingDate'])\n if 'maxTweets' in kwargs:\n del(kwargs['maxTweets'])\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.search, kwargs).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.search, kwargs).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n return getTopNTweets(myTweets, maxTweets)", "def sendTweets(self):\n\n if self.__status_type == 'link':\n\n for index, item in self.list.iterrows():\n\n title = item['title']\n url = item['url']\n message = (url + \" \" + title)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n elif self.__status_type == 'single_msg':\n\n message = (self.__status)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n elif self.__status_type == 'reply':\n\n for index, item in self.list.iterrows():\n\n message = (\".@\" + item['user'] + \" \" + self.__status)[0:140]\n\n try:\n if self.__image == None:\n self.__api.update_status(status=message, in_reply_to_status_id=item['id'])\n else:\n self.__api.update_with_media(filename=self.__image, status=message,\n in_reply_to_status_id=item['id'])\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"reply status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n elif self.__status_type == 'at':\n\n for index, item in self.list.iterrows():\n\n try:\n\n message = (\".@\" + item['user'] + \" \" + self.__status)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"at status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n elif self.__status_type == 'rt':\n\n for index, item in self.list.iterrows():\n try:\n self.__api.retweet(item['id'])\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"at status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n else:\n print(\"Invalid status type. Change status type through configure_tweet method.\")\n\n return", "def list_tweets():\n tweets = []\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE])\n for tuple in tuples:\n tweet = {}\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweets.append(tweet)\n return jsonify({'tweets':tweets}),200", "def tweet(self, message: str) -> None:\n\n # YOUR CODE HERE\n tweet = Tweet(self.userid, date.today(), message)\n self.tweets.append(tweet)", "def run_twittercrawler(type, *params):\n tweets = None\n if type == 'timeline': # Call specific crawl function based on type\n tweets = by_timeline(params[0])\n elif type == 'search':\n tweets = by_search(params[0])\n elif type == 'streaming':\n print('Streaming functionality not yet implemented')\n return None\n\n return [tweet._json for tweet in tweets]", "def getTweetsFromPheme(self):\n self.helper.buildDict4Tweets(self.folderpath)", "def __update_local_tweets(self):\n f_tweets = open(f'{TWEETS}', 'w')\n f_tweeted = open(f'{TWEETED}', 'w')\n try:\n f_tweets.write(json.dumps(self.tweets, sort_keys=True, indent=4))\n f_tweeted.write(json.dumps(self.tweeted, sort_keys=True, indent=4))\n finally:\n f_tweets.close()\n f_tweeted.close()", "async def make_tweet(tweet: str = Query(...),\n # attachment_url: Optional[str] = Query(None, alias=\"link of tweet to quote\", regex=\"https://twitter.com/([\\w_]+)/status/([\\d]+)\"),\n # in_reply_to: Optional[int] = Query(None, alias=\"link of tweet to reply to\", regex=\"https://twitter.com/([\\w_]+)/status/([\\d]+)\"), \n user: User = Depends(get_current_user),\n session: Session = Depends(get_db)\n )-> TweetSchema:\n if not user.active:\n raise HTTPException(401, detail=\"Your account seems to be inactive, please login with twitter to make tweets\")\n # if in_reply_to:\n # regex = re.match(\"https://twitter.com/(?P<username>[\\w]+)/status/(?P<id>[\\d]+)\", in_reply_to)\n # status_id = regex.group(\"id\")\n url = \"https://api.twitter.com/1.1/statuses/update.json\"\n params = dict(status=tweet,\n # attachment_url=attachment_url,\n # in_reply_to_status_id=status_id,\n )\n auth = user.get_oauth1_token()\n\n r = requests.post(url, params=params, auth=auth)\n if not r.ok:\n raise HTTPException(400, detail={\"message\":\"Something went wrong with Twitter, please try again or contact me @redDevv\",\n \"error from twitter\": r.text})\n tweet = r.json()\n\n new_tweet = Tweet(**tweet)\n user.tweets.append(new_tweet)\n user.requests_made += 1\n\n session.commit()\n return tweet", "def user_tweets(username, TTL=30):\n # profile_user = query_db('select * from user where username = ?',\n # [username], one=True)\n profile_user = userdetails_API_query(username)\n print \"profile \", profile_user\n if profile_user is None:\n abort(404)\n followed = False\n\n if g.user:\n followed = mongo.db.users.find_one(\n {'_id': g.user[0]}, {'follows': profile_user['_id']}) is not None\n # followed = query_db('''select 1 from follower where\n # follower.who_id = ? and follower.whom_id = ?''',\n # [g.user[0], profile_user['user_id']],\n # one=True) is not None\n # Create a hash key\n user_profile = \"\"\n hash = hashlib.sha224(user_profile).hexdigest()\n key = \"user_timeline_key\" + hash\n # print \"Created Key\\t : %s\" % key\n\n############### REDIS SESSION CODE #####################\n\n # Check if data is in cache.\n if (R_SERVER.get(key)):\n print \"** Messages returned from Redis Cache **\"\n return cPickle.loads(R_SERVER.get(key))\n\n else:\n print \"** Messages returned from MongoDB **\"\n messages = user_query(profile_user)\n data = []\n # print messages\n for row in messages:\n data.append({'user': row['username'], 'message': row['text'],\n 'pub_date': format_datetime(row['pub_date'])})\n # print data\n user_profile = jsonify(messages=data, Status_code=status.HTTP_200_OK)\n\n R_SERVER.set(key, cPickle.dumps(user_profile))\n R_SERVER.expire(key, TTL)\n return user_profile", "def get_tweets_from_username(api, screen_name):\n\n # initialize a list to hold all the Tweets\n alltweets = []\n output = []\n\n # make initial request for most recent tweets\n # (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # save the id of the oldest tweet less one to avoid duplication\n oldest = alltweets[-1].id - 1\n\n # keep grabbing tweets until there are no tweets left\n while len(new_tweets) > 0:\n print(\"Getting tweets before %s\" % (oldest))\n\n # all subsequent requests use the max_id param to prevent\n # duplicates\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n print(\"... %s tweets downloaded so far\" % (len(alltweets)))\n\n # transform the tweepy tweets into a 2D array that will\n for tweet in alltweets:\n output.append([tweet.id_str,\n tweet.created_at,\n tweet.full_text,\n tweet.in_reply_to_screen_name,\n tweet.user.name,\n tweet.user.location,\n tweet.user.followers_count,\n tweet.user.friends_count,\n tweet.geo,\n tweet.coordinates,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.lang,\n tweet.retweeted])\n\n # Convert to dataframe\n df = pd.DataFrame.from_records(output, columns=[\"id_str\",\n \"created_at\",\n \"full_text\",\n \"in_reply_to_screen_name\",\n \"user_name\",\n \"user_location\",\n \"user_followers_count\",\n \"user_friends_count\",\n \"geo\",\n \"coordinates\",\n \"retweet_count\",\n \"favorite_count\",\n \"lang\",\n \"retweeted\"])\n return df", "def stream_tweets(api_token: str, api_secret: str, access_token: str, access_secret: str, saver,\n keywords: list = None, users: list = None, locations: list = None, stall_warning: bool = False):\n\n auth = OAuthHandler(api_token, api_secret)\n auth.set_access_token(access_token, access_secret)\n api = API(auth)\n listener = TwitterListener(manager=saver, api=api)\n stream = Stream(auth=auth, listener=listener)\n log.write_log('Streaming started', 'execution')\n stream.filter(track=keywords, follow=users, locations=locations, stall_warnings=stall_warning)", "def get_tweets(self):\r\n now = datetime.datetime.now()\r\n tweet_json = self.api.get_tweets(self.last, now)\r\n self.last = now\r\n return [Tweet(x) for x in tweet_json]", "def get_tweet(username, n):\n return twitterAPI.home_timeline(count=n)[-1:][0] # return specified tweet", "def get_tweets(keyword, max_tweets=200):\n\n # API keys.\n consumer_key = \"kNOG1klRMMUYbsjMuY5TKl4lE\"\n consumer_secret = \"ieghv6WI1qseYly43A0Ra1MPksEw1i5Onma0txfEu5aHantD2v\"\n access_key = \"3291622062-15ssVc0qpJXf2SFXbA7vgfl1Sooz4Ueo2DGPQVz\"\n access_secret = \"9XJuzgGSVLnx93tq6NfRzMT07S6o2lzjmHfjt3VRlkqXn\"\n\n # Initialize tweepy API object and authorize using API key.\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n\n \"\"\" Get tweets.\"\"\"\n\n alltweets = []\n for status in tweepy.Cursor(\n api.search,\n q=keyword + \" -RT\", # the -RT flag excludes retweets.\n count=1000,\n result_type=\"recent\",\n include_entities=True,\n monitor_rate_limit=True,\n wait_on_rate_limit=True,\n lang=\"en\",\n ).items():\n\n # get text of the tweet, encoding as utf-8.\n text = str(status.text.encode(\"utf-8\"))\n\n # add to the data structure, alltweets, holding the tweets.\n alltweets.append(text)\n\n # if we've reached max_tweets, break.\n if len(alltweets) >= max_tweets:\n break\n\n return alltweets", "def fetch_tweets(event):\r\n return pd.read_csv(\"data/tweets/%s.csv\" % event, \r\n dtype={\r\n 'tweet_id': str,\r\n 'in_reply_tweet': str,\r\n 'thread': str,\r\n 'user_id': str,\r\n 'in_reply_user': str\r\n },\r\n engine=\"python\")", "def tweets_features(tweet):\n tweet = remove_stop_words(tweet)\n return {'TWEET': tweet}", "def trendingTweets():\n api = twitter.Api()\n trending_topics = api.GetTrendsWoeid(PHILA_WOEID)\n for topic in trending_topics:\n topicSearchTerm = topic.name\n trending_tweets = api.GetSearch(topicSearchTerm)\n for tweet in trending_tweets:\n util.safe_print(tweet.GetText())\n # pass", "def __save_tweet(self, twitter_result):\n timestamp = twitter_result['timestamp']\n\n # Remove +0000 from timestamp\n timestamp_split = timestamp.split(' ')\n timestamp = ''\n for piece in timestamp_split:\n if piece[0] is not '+':\n timestamp += piece + ' '\n\n # Remove trailing space\n timestamp = timestamp[:-1]\n\n # Cast to iso format\n timestamp = datetime.strptime(timestamp, \"%a %b %d %H:%M:%S %Y\").isoformat()\n\n crawl = self.mongo_controller.add_crawl_twitter(\n twitter_result['keyword_id'],\n twitter_result['tweet_id'],\n twitter_result['text'],\n twitter_result['likes'],\n twitter_result['retweets'],\n timestamp,\n return_object=True,\n cast=True,\n )\n\n app.send_task('process-crawl', kwargs={ 'crawl_dict': crawl.to_json() }, queue=queues['processor'])\n\n return crawl", "def save(self):\n return getattr(self, \"_tweets\", None)", "def get_tweets():\r\n tweets = models.Tweet.query.all()\r\n output = []\r\n\r\n for tweet in tweets:\r\n tweet_data = {'id': tweet.id,\r\n 'content': tweet.text_content,\r\n 'username': tweet.username,\r\n 'timestamp': tweet.timestamp.isoformat(),\r\n 'likes_count': models.Like.query.filter(models.Like.post_id == tweet.id).count(),\r\n 'retweets_count': models.Retweet.query.filter(models.Retweet.post_id == tweet.id).count()}\r\n\r\n output.append(tweet_data)\r\n\r\n return {\"tweets\": output}", "def accepted(eachtweet):\n import general_functions\n from authenticator import oauth\n try:\n ##\n tweet = '{}: {} #{}'.format(eachtweet[0], eachtweet[1], eachtweet[3].upper())\n r = oauth.request('statuses/update', {'status': tweet})\n\n replace(\"clients.csv\",\"ReadyForAck.csv\")\n replace2(\"ReadyForAck.csv\",\"clients.csv\")\n except:\n print('ietsgaatfout')", "def populate_twitter_acct_tweets(retrieve_until_dt=datetime.now(tz=timezone.utc) - timedelta(days=60)):\n spinner = itertools.cycle(['|', '/', '-', '\\\\'])\n api = twitter.Api(**settings.TWITTER_OAUTH, sleep_on_rate_limit=False)\n twitter_accts = CredibleUSTwitterAccount.objects.all()\n\n while 1:\n for acct in twitter_accts:\n # acct_oldest_tweet = USTwitterNewsFeed.objects.filter(posted_by=acct).first()\n acct_oldest_tweet = USTwitterNewsFeed.objects.filter(posted_by=acct, created_datetime__gte=date(2018, 2, 7)).first()\n\n max_id = None\n if acct_oldest_tweet is not None:\n max_id = acct_oldest_tweet.feedid - 1\n\n # do api call 15 for each account times due to twitter rate limit\n for _ in range(15):\n feed_created_dt = None\n try:\n statuses = api.GetUserTimeline(screen_name=acct.screen_name, include_rts=False, max_id=max_id)\n for s in statuses:\n write_and_restart_line(next(spinner))\n created_feed = USTwitterNewsFeed.objects.create(posted_by=acct,\n created_datetime=datetime.strptime(s.created_at, '%a %b %d %X %z %Y'),\n text=s.text,\n feedid=s.id)\n max_id = created_feed.feedid - 1\n feed_created_dt = created_feed.created_datetime\n except TwitterError as e:\n print(e.message)\n except IntegrityError as e:\n print('integrity error')\n break\n\n # only retrieve until last status created datetime earlier than retrieve until\n # if (feed_created_dt is None) or (feed_created_dt < retrieve_until_dt):\n # break", "def __init__(self, topics=None, tweet_file=None, mode='batch'):\n self.topics = topics\n # (The twitter API will only return a max of 100 count)\n self.GEN_MAX_TWEET = 100 # the max number of tweets to generate\n self.tweet_file = tweet_file\n self.mode = mode\n self.tweets = []\n\n if topics and tweet_file:\n print(\"WARNING! you input both topics and the tweet file, only one is expected\")\n exit(-1)\n\n if not topics and not tweet_file:\n print(\"WARNING! you input either topics or tweet file, one is expected\")\n exit(-1)\n\n # If file argument is given, it will not connect to twitter server\n # It will just save tweets in self.tweets\n if tweet_file:\n with open(tweet_file, 'r') as infile:\n for line in infile:\n self.tweets.append(json.loads(line))\n\n else:\n consumer_key = 'bbqKfXEU2VJNoWlYJvbdtptOE'\n consumer_secret = 'afPk2JuMMMD6IhP5Xijo60ni4FUK39PDzhU7ylgT9FgNZX9ngh'\n access_token = '434708489-DTeHfK4OYKRuIXlfoWnNgzzwpEZTPCEpSMv8C0ll'\n access_token_secret = 'SjWFYfX2k3q4RJKQXcP1LP9ikhRfckPKOEcrb2cpQ0A0n'\n\n # Attempt authentication\n try:\n # create OAuthHandler object\n self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n # set access token and secret\n self.auth.set_access_token(access_token, access_token_secret)\n # create tweepy API object to fetch tweets\n self.api = tweepy.API(self.auth)\n except:\n print(\"Error: Authentication Failed\")\n exit(-1)", "def tweet(text):\n # Twitter authentication\n auth = tweepy.OAuthHandler(C_KEY, C_SECRET)\n auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)\n api = tweepy.API(auth)\n\n # Send the tweet and log success or failure\n try:\n api.update_status(text)\n except tweepy.error.TweepError as e:\n log(e.message)\n print(e.message)\n else:\n log(\"Tweeted: \" + text)\n print(\"Tweeted: \" + text)", "def run(self):\n new_tweets = pd.DataFrame(\n columns=[\"tweet_id\", \"congress\", \"session\", \"date\", \"vote\"],\n dtype=str,\n )\n for item in self.senate_data[\"vote_summary\"][\"votes\"][\"vote\"]:\n query = (\n \"congress == @self.congress \"\n \"and session == @self.session \"\n \"and date == @item['vote_date'] \"\n \"and vote == @item['vote_number']\"\n )\n\n # If the current vote isn't already processed, then process it\n if self.tweets.query(query).empty:\n try:\n text, party_data, vote_data = self.senate_obj.process_vote(\n item\n )\n status = self.twitter_api.update_status(text)\n # Keep track of new tweets to be reconciled with old\n # tweets later\n new_tweets = new_tweets.append(\n {\n \"tweet_id\": status.id_str,\n \"congress\": self.congress,\n \"session\": self.session,\n \"date\": item[\"vote_date\"],\n \"vote\": item[\"vote_number\"],\n **party_data,\n **vote_data,\n },\n ignore_index=True,\n )\n except Exception as e:\n # Tweet failed for some reason\n logging.error(\"Tweet failed\")\n logging.error(item)\n logging.error(e)\n\n # Only process a limited number of tweets in a single run\n if len(new_tweets) == self.MAX_TWEETS:\n break\n\n if not new_tweets.empty:\n logging.info(f\"Tweeted {len(new_tweets)} new votes\")\n self.__save(self.tweets.append(new_tweets))\n # Function needs to return something to work as a Cloud Function\n return new_tweets[\"tweet_id\"].to_json()\n else:\n return \"{}\" # Empty JSON object", "def add_tweet(self, tweet):\r\n self.tweets.append(tweet)", "def tweet(text):\n # Twitter authentication\n auth = tweepy.OAuthHandler(C_KEY, C_SECRET)\n auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)\n api = tweepy.API(auth)\n\n # Send the tweet and log success or failure\n try:\n api.update_status(text)\n except tweepy.error.TweepError as e:\n log(e.message)\n else:\n log(\"Tweeted: \" + text)", "def like_tweets(pos_tweets):\n\n for tweet in pos_tweets:\n twitter.CreateFavorite(status_id=tweet.id)\n\n return", "def send_fixtures_tweets(tweet1, tweet2, tweet3):\n\n # Authorises Twitter API connection\n api = twitter_auth()\n\n # Checks if tweet has already been made today\n get_tweet = api.user_timeline(count=1,tweet_mode=\"extended\")\n last_tweet = get_tweet[0].full_text\n tweet = tweet1[:-1]\n if last_tweet == tweet:\n return print('Tweet already sent')\n \n # Sends tweets to timeline, depending on how many tweets created\n # Multiple tweets sent as a thread by responding to previous tweet\n if tweet3:\n first_tweet = api.update_status(tweet1)\n first_id = first_tweet.id\n second_tweet = api.update_status(tweet2, first_id)\n second_id = second_tweet.id\n api.update_status(tweet3, second_id)\n return print('Successfully sent tweet(s)')\n elif tweet2:\n first_tweet = api.update_status(tweet1)\n first_id = first_tweet.id\n api.update_status(tweet2, first_id)\n return print('Successfully sent tweet(s)')\n else:\n api.update_status(tweet1)\n return print('Successfully sent tweet(s)')", "def fetch_tweets(n_tweets=100, data_home=None, token=None, tweets_ids=None):\n pass", "def __refresh_local_tweets(self):\n f_tweets = open(f'{TWEETS}', 'r')\n f_tweeted = open(f'{TWEETED}', 'r')\n\n try:\n self.tweets = json.load(f_tweets)\n self.tweeted = json.load(f_tweeted)\n finally:\n f_tweets.close()\n f_tweeted.close()", "def read_tweets(self)-> None:\n self.no_of_tweets = len(self.list_of_files)\n for i in range(0, self.no_of_tweets):\n # for i in range(0,10): # running a small loop for testing purpose\n try:\n with open(self.list_of_files[i]) as json_file:\n file = json.load(json_file)\n tweet = {'id': file['id']}\n try:\n tweet['created_time'] = file['retweeted_status']['created_at']\n tweet['text'] = file['retweeted_status']['full_text']\n except:\n tweet['created_time'] = file['created_at']\n tweet['text'] = file['full_text']\n self.tweets.append(tweet)\n except:\n print(\"Error for \",self.list_of_files[i])\n if i%1000 == 0:\n print(str(round(i/self.no_of_tweets,2)*100),\"% read\")\n print(\"All Tweets read into memory\")", "def post_to_twitter(tweet):\n auth = tweepy.OAuthHandler(\n os.environ['BLADAMADUR_CONSUMER_KEY'],\n os.environ['BLADAMADUR_CONSUMER_SECRET'])\n auth.set_access_token(\n os.environ['BLADAMADUR_ACCESS_TOKEN'],\n os.environ['BLADAMADUR_ACCESS_TOKEN_SECRET'])\n api = tweepy.API(auth)\n\n api.update_status(tweet)", "def query_tweets_once(query, limit=None, num_tweets=0):\n logging.info(\"Querying {}\".format(query))\n query = query.replace(' ', '%20').replace(\"#\", \"%23\").replace(\":\", \"%3A\")\n pos = None\n tweets = []\n try:\n while True:\n new_tweets, pos = query_single_page(\n INIT_URL.format(q=query) if pos is None\n else RELOAD_URL.format(q=query, pos=pos),\n pos is None\n )\n if len(new_tweets) == 0:\n logging.info(\"Got {} tweets for {}.\".format(\n len(tweets), query))\n return tweets\n\n logging.info(\"Got {} tweets ({} new).\".format(\n len(tweets) + num_tweets, len(new_tweets)))\n\n tweets += new_tweets\n\n if limit is not None and len(tweets) + num_tweets >= limit:\n return tweets\n except KeyboardInterrupt:\n logging.info(\"Program interrupted by user. Returning tweets gathered \"\n \"so far...\")\n except BaseException:\n logging.exception(\"An unknown error occurred! Returning tweets \"\n \"gathered so far.\")\n\n return tweets", "def extract_tweets(secret: str, query: str, outfile: str, count: int = 0, wait: int = 300) -> None:\n logger = logging.getLogger(\"extracter\")\n logger.info(\"Authenticating with Tweepy\")\n\n logger.info(\"Reading secrets file %s\", secret)\n token_fp = open(secret, \"r\")\n auth = tweepy.OAuthHandler(token_fp.readline().strip(), token_fp.readline().strip())\n auth.set_access_token(token_fp.readline().strip(), token_fp.readline().strip())\n api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n token_fp.close()\n\n logger.info(\"Attempting to authenticate\")\n api.verify_credentials()\n\n logger.info(\"Authenticated! Examining outfile.\")\n if not os.path.exists(outfile):\n logger.info(\"%s doesn't exist - it will be created.\", outfile)\n file_p = open(outfile, \"w\", encoding=\"utf-8\")\n tweet_writer = csv.writer(file_p)\n tweet_writer.writerow(\n [\n \"full_text\",\n \"created_at\",\n \"source\",\n \"id\",\n \"retweet_count\",\n \"favorite_count\",\n \"user_name\",\n \"user_id_str\",\n \"user_handle\",\n \"user_location\",\n \"user_desc\",\n \"user_protected\",\n \"user_followers\",\n \"user_created\",\n \"user_verified\",\n \"user_tweet_count\",\n ]\n )\n else:\n logger.info(\"%s exists - will append.\", outfile)\n file_p = open(outfile, \"a\", encoding=\"utf-8\")\n tweet_writer = csv.writer(file_p)\n\n logger.info(\"Starting Tweet extraction for query '%s'\", query)\n\n if not count:\n logger.info(\"(executing forever)\")\n else:\n logger.info(\"(executing %s times)\", count)\n\n i = 1\n bookmark = \"1\"\n\n while True:\n # Our search query.\n #\n # q - search query. We use the -filter:retweets\n # specifier in order to prune any retweets.\n # Otherwise we'd have to prune Tweets that\n # are prefaced with 'RT'\n #\n # lang - English Tweets only\n #\n # count - 100 is the max as per the Twitter API\n #\n # tweet_mode - we use extended tweet mode in\n # order to access Tweets that are greater\n # than 140 char. in length this is to keep\n # legacy Twitter API applications intact\n #\n # result_type - we use recent so as to create\n # a chronological record of Tweets\n #\n # since_id - we keep track of the last Tweet\n # saved and use it as a bookmark in order\n # to only get the Tweets coming after it\n #\n for tweet in api.search(\n q=f\"{query} -filter:retweets\",\n lang=\"en\",\n count=100,\n tweet_mode=\"extended\",\n result_type=\"recent\",\n max_id=bookmark,\n ):\n # These are the features we write\n tweet_writer.writerow(\n [\n tweet.full_text,\n tweet.created_at,\n tweet.source,\n tweet.id_str,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.user.name,\n tweet.user.id_str,\n tweet.user.screen_name,\n tweet.user.location,\n tweet.user.description,\n tweet.user.protected,\n tweet.user.followers_count,\n tweet.user.created_at,\n tweet.user.verified,\n tweet.user.statuses_count,\n ]\n )\n\n # Flush the stream every time just in case\n file_p.flush()\n\n # Set the most recent Tweet as a bookmark\n bookmark = tweet.id_str\n\n # Transparency/monitoring\n limits = api.rate_limit_status()\n rem = limits[\"resources\"][\"application\"][\"/application/rate_limit_status\"][\"remaining\"]\n logger.info(\"Tweets written to %s (%s hourly API accesses left)\", outfile, rem)\n\n # Do not loop if demo\n if i == count:\n break\n i += 1\n\n # Respect API\n time.sleep(wait)", "def index_new_tweet(self, id_str, tweet_tokens: list):\n self.tweet_count += 1\n unique_words = set(tweet_tokens)\n unique_word_pairs = set()\n for i in unique_words:\n for j in unique_words - {i}:\n # To us [a, b] = [b, a], and sorting gives us a distinct representation.\n unique_word_pairs.add(tuple(sorted([i, j])))\n for w in unique_words | unique_word_pairs:\n self.index[self.epoch][w] = id_str\n current_freq = self.frequency_map.get(w, 0)\n self.frequency_map[w] = current_freq + 1\n # Get word statistics from hash table\n statistics_present = w in self.stats_map\n if not statistics_present:\n (mu, sigma) = (math.inf, math.inf)\n for h in self.hash_functions:\n c = get_hash(h(), repr(w)) % 2 ** self.bit_count\n if self.buckets[c][\"ewma\"] < mu:\n mu = self.buckets[c][\"ewma\"]\n sigma = self.buckets[c][\"ewmvar\"]\n self.stats_map[w] = (mu, sigma)\n (mu, sigma) = self.stats_map[w]\n # Test for significance threshold\n x = self.frequency_map[w]\n if self._is_frequency_significant(mu, sigma, x):\n self.refinement.append((w, self._get_significance(mu, sigma, x)))\n # if self.refinement:\n # r = self.refinement\n # self.refinement = []\n # return r", "def original_three_tweets():\n test_tweets = [\n \"is #bigdata finally the answer to end poverty? \\\n @lavanyarathnam http://ow.ly/o8gt3 #analytics\",\n \"interview: xia wang, astrazeneca on #bigdata and the promise of effective \\\n healthcare #kdn http://ow.ly/ot2uj\",\n \"big data is not just for big business. on how #bigdata is being deployed for \\\n small businesses: http://bddy.me/1bzukb3 @cxotodayalerts #smb\"\n ]\n return test_tweets", "def analyse_tweets(nb_tweets, classifier, Resource, threshold, language='en'):\n return [(bytes(line, 'utf-8'), _minimal_analysis(bytes(line, 'utf-8'), classifier, Resource, threshold, language))\n for line in\n collect_tweet(nb_tweets)]", "def get_random_tweets(sqlite_db, twt_tbl, auth_tbl, auth_id, num_req, rnd_seed):\n conn = sqlite3.connect(sqlite_db)\n c = conn.cursor()\n # get the number of tweets available for a given author and select threshold + 1 for experiments\n # get number of tweets\n num_twts = get_num_tweets(sqlite_db, auth_tbl, auth_id)\n # print(num_twts)\n # random seed for reproducing experimental results\n random.seed(rnd_seed)\n # list of message id's to use in testing\n message_list = random.sample(range(1, num_twts), num_req)\n print(message_list)\n # build the sql statement\n param = '?'\n params = ','.join(param*len(message_list))\n sql = \"SELECT TWEET_MSG FROM {tn} WHERE AUTHOR_ID='{a_id}' AND MESSAGE_NUM IN ({prms})\".\\\n format(tn=twt_tbl, a_id=auth_id, prms=params)\n print(sql)\n # c.execute('SELECT TWEET_MSG FROM {tn} WHERE AUTHOR_ID=\"{a_id}\" AND MESSAGE_NUM IN \"{m_lst}\"'. \\\n # format(tn=twt_tbl, a_id=auth_id), m_lst=','.join(['?']*len(message_list)))\n c.execute(sql,message_list)\n conn.commit()\n twts = c.fetchall()\n # printing the tweets to validate selection\n # for tweet_tup in twts:\n # for tweet in tweet_tup:\n # print(tweet.rstrip())\n conn.close()\n return(twts)", "def get_tweets_count_times(twitter, count, query=None):\n # get id to start from\n oldest_id, newest_id = _get_oldest_id(query=query)\n newest_id = newest_id or oldest_id\n\n all_tweets = []\n i = 0\n while i < count:\n i += 1\n # use search api to request 100 tweets. Twitter returns the most recent (max_id) first\n if oldest_id <= newest_id:\n tweets = get_tweets(query=query, max_id=oldest_id - 1, count=TWEETS_PER_SEARCH, twitter=twitter)\n else:\n tweets = get_tweets(query=query, max_id=oldest_id - 1, since_id=newest_id, count=TWEETS_PER_SEARCH, twitter=twitter)\n rate_limit_remaining = twitter.get_lastfunction_header('x-rate-limit-remaining')\n rate_limit_reset = twitter.get_lastfunction_header('x-rate-limit-reset')\n\n if not len(tweets):\n # not rate limitted, just no tweets returned by query\n oldest_id = oldest_id + ((newest_id or oldest_id) - oldest_id + 1) * 10000\n break\n elif isinstance(tweets, dict):\n # rate limit hit, or other twython response error\n print(tweets)\n break\n\n all_tweets.extend(tweets)\n\n # determine new oldest id\n tweet_ids = {t['id'] for t in tweets}\n if oldest_id:\n tweet_ids.add(oldest_id)\n oldest_id, newest_id = min(tweet_ids), max(tweet_ids)\n if rate_limit_remaining == 1:\n time.sleep(rate_limit_reset - time.time())\n\n save_tweets(all_tweets, query=query)\n\n # set id to start from for next time\n _set_oldest_id(oldest_id, newest_id, query=query)\n\n if len(all_tweets) == 0:\n os.remove(make_oldest_id_path(query))\n\n return len(all_tweets), twitter.get_lastfunction_header('x-rate-limit-remaining')" ]
[ "0.6786159", "0.6478341", "0.63744974", "0.6324953", "0.6295513", "0.6283256", "0.6272268", "0.6249374", "0.6233402", "0.62311983", "0.62054044", "0.62021184", "0.61722195", "0.6171884", "0.61485344", "0.6144559", "0.6132322", "0.6097602", "0.6076344", "0.6068674", "0.60495096", "0.60401356", "0.60234183", "0.60230106", "0.6011575", "0.59985614", "0.5987293", "0.5982847", "0.59719974", "0.59659165", "0.5964758", "0.595911", "0.5918486", "0.5916425", "0.59032756", "0.5900309", "0.5897789", "0.58953565", "0.5890274", "0.58597225", "0.58497494", "0.58398193", "0.58386075", "0.58386075", "0.583747", "0.5820427", "0.58129054", "0.58042717", "0.5801191", "0.5798338", "0.5788625", "0.57851076", "0.5748587", "0.57479286", "0.57456297", "0.57398534", "0.57334906", "0.57326525", "0.57120776", "0.5709347", "0.57038283", "0.5699209", "0.56930256", "0.5691635", "0.5690148", "0.5683958", "0.56575245", "0.565142", "0.5648889", "0.5644229", "0.56413007", "0.56382173", "0.56376576", "0.5636985", "0.56359464", "0.5630247", "0.5626902", "0.562573", "0.5607297", "0.5602899", "0.5600771", "0.5591176", "0.55722284", "0.5569886", "0.5567001", "0.5556636", "0.554933", "0.55467206", "0.554361", "0.55425733", "0.553402", "0.553129", "0.5528805", "0.5526336", "0.5526017", "0.55196685", "0.5509095", "0.55032396", "0.54996616", "0.5498354", "0.5486796" ]
0.0
-1
Create a new database from attributes
def create_database(self, instance, **attrs): instance = self._get_resource(_instance.Instance, instance) return self._create( _database.Database, instance_id=instance.id, **attrs )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_db(self):", "def create():\n\tcreate_db()", "def generate_database_object(**kwargs):\n return app.database.Database(\"test.db\", **kwargs)", "def make_db():\n\n db.create_all()", "def create_database():\n create_db(app)", "def create():\n\n from slicr.extensions import db\n\n click.echo('creating database...')\n\n db.create_all()", "def create_db():\n db.create_all()\n print('Database structure created successfully')", "def init_new_db(args):\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)\n session = Session()\n session.add(Environment(name='normal', slickurl='http://slicker.homestead-corp.com/slickij', buildurl='?', filename='hs-tcrunij.tar.gz', tcrunijsubdir='hs-tcrunij/tcrunij'))\n session.add(Environment(name='dev', slickurl='http://octomom.homestead-corp.com/slickij', buildurl='?', filename='tcrunij.tar.gz', tcrunijsubdir='tcrunij/tcrunij'))\n session.commit()", "def create(self):\n db.create_all()", "def create_db(self):\n return None", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def _create_database(self):\n self._connect()\n cursor = self._connection.cursor()\n cursor.execute(make_table_creation_command(\"reviews\", FIELD_DESCRIPTIONS))\n self._connection.commit()", "def create_db():\n _init_db()\n db.create_all()", "def create_db():\n db.create_all()\n print(\"DB Created\")", "def create_database():\n with connection:\n connection.execute(CREATE_MOVIE_TABLE)\n connection.execute(CREATE_USER_TABLE)\n connection.execute(CREATE_WATCHED_TABLE)", "def create_database():\n Base.metadata.create_all(bind=engine)", "def create_db():\n db.create_all()\n click.echo(\"DB criado com sucesso!\")", "def createdb():\n db.create_all()", "def create_db():\n db.create_all()\n click.echo(\"Banco de dados criado\")", "def _create_db(self):\n self.db = easydms.dbcore.Database(\":memory:\")\n self.db.create_db()", "def create_DB (name):\n engine = create_engine('sqlite:///%s' % name)\n Base.metadata.create_all(engine)", "def create_db():\n init_postgres(current_app.config['SQLALCHEMY_DATABASE_URI'])", "def create():\n upgrade()\n populate()", "def test_database_object_can_be_created(self):\n database = generate_database_object()", "def database_setup():\n Base.metadata.create_all(bind=engine)\n db = LocalSession()\n try:\n populate_from_json(db, Vector, str(VECTORS))\n populate_from_json(db, Gender, str(GENDERS))\n populate_from_json(db, Tag, str(TAGS))\n finally:\n db.close()", "def create(name):\n\t\treturn \"CREATE DATABASE {0};\".format(name)", "def create_database(self, instance, name, character_set=None,\n collate=None):\n return instance.create_database(name, character_set=character_set,\n collate=collate)", "def create_db():\n db.create_all()\n print ('Intialized....!')", "def new_db(path: str):\r\n conn = sqlite3.connect(path)\r\n cursor = conn.cursor()\r\n cursor.executescript(\"\"\"\r\n CREATE TABLE \"Ads\" (\r\n\t\"id\"\tINTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\r\n\t\"Price\"\tREAL,\r\n\t\"Size\"\tREAL,\r\n\t\"DistrictId\"\tINTEGER,\r\n\t\"SeriesId\"\tINTEGER,\r\n\t\"StreetId\"\tINTEGER,\r\n\t\"StrNum\"\tINTEGER,\r\n\t\"Link\"\tTEXT,\r\n\t\"ImportDate\"\tTEXT,\r\n\t\"TypeOfDealId\"\tINTEGER,\r\n\t\"AmenitiesId\"\tINTEGER,\r\n\t\"UploadDate\"\tTEXT,\r\n\t\"Floor\"\tINTEGER,\r\n\t\"BuildingId\"\tINTEGER\r\n);\r\nCREATE TABLE \"Amenities\" (\r\n\t\"AmenitiesId\"\tINTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\r\n\t\"Name\"\tTEXT NOT NULL UNIQUE\r\n);\r\nCREATE TABLE \"Buildings\" (\r\n\t\"BuildingId\"\tINTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\r\n\t\"Name\"\tTEXT UNIQUE\r\n);\r\nCREATE TABLE \"Districts\" (\r\n\t\"DistrictId\"\tINTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\r\n\t\"Name\"\tTEXT UNIQUE\r\n);\r\nCREATE TABLE \"Series\" (\r\n\t\"SeriesId\"\tINTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\r\n\t\"Name\"\tTEXT UNIQUE\r\n);\r\nCREATE TABLE \"Streets\" (\r\n\t\"StreetId\"\tINTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\r\n\t\"Name\"\tTEXT UNIQUE\r\n);\r\nCREATE TABLE \"TypeOfDeal\" (\r\n\t\"TypeOfDealId\"\tINTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\r\n\t\"Name\"\tTEXT UNIQUE\r\n)\r\n \"\"\")\r\n conn.commit()\r\n conn.close()", "def db_createall():\n db.create_all()", "def create_database_structure(self):\n Base.metadata.create_all(self.engine)", "def db_create(option):\r\n from flask_migrate import upgrade, migrate\r\n #from app.models import Role, User, Forecast\r\n \r\n print(\"option:\", option)\r\n\r\n engine=db.get_engine(app)\r\n \r\n \r\n if option == \"help\":\r\n print(\"db_create [User|Role|Froecast]\")\r\n return True\r\n if option == \"User\":\r\n print(\"db_create User()\")\r\n User.__table__.create(engine)\r\n print(\"User.__table__.create()\")\r\n \r\n if option == \"Role\":\r\n print(\"db_create Role()\")\r\n Role.__table__.create(engine)\r\n print(\"Role.__table__.create()\")\r\n\r\n if option == \"Forecast\":\r\n print(\"db_create Forecast()\")\r\n Forecast.__table__.create(engine)\r\n print(\"Forecast.__table__.create()\")\t\t\r\n \t\r\n if option == \"all\":\r\n print(\"db_create all()\")\r\n db.create_all()\r\n print(\"db.create_all()\")\t\r\n \r\n upgrade()\r\n return True", "def __init__(self, *args, **kwargs):\n self.database = args[0] if len(args) else kwargs.get('database', 'jping.db')\n is_new = not os.path.exists(self.database)\n self._connection = sqlite3.connect(self.database)\n self._connection.row_factory = sqlite3.Row\n if is_new:\n self.create_schema()", "def create(self) -> SQLAlchemy:\n self.db.create_all()\n self.db.session.commit()\n return self.db", "def db():\n db = peewee.SqliteDatabase(\":memory:\")\n models.Dog.bind(db)\n models.Dog.create_table()\n return db", "def createDb():\n db.drop_all()\n db.create_all()", "def create_db_structure(self):\n logger.info(\"Creating CRH database structure.\")\n CrhDbModel.metadata.create_all(bind=self.engine)", "def create(self):\n if self.filename:\n self.db = dbm.open(self.filename, \"n\") #raises anydbm.error\n self.db[\"--Reserved--type\"] = self.type\n self.db.sync()\n else:\n self.db = {}", "def db_create(pth, verbose=False):\n for pragma in PRAGMAS:\n db_execute_general(pragma, pth, verbose=verbose)\n\n # Get json files\n try:\n import importlib.resources as pkg_resources\n except ImportError:\n # Try backported to PY<37 `importlib_resources`.\n import importlib_resources as pkg_resources\n\n # Get and upload adsorbate property types\n ads_props_json = pkg_resources.read_text(\n 'pygaps.data', 'adsorbate_props.json'\n )\n ads_props = json.loads(ads_props_json)\n for ap_type in ads_props:\n pgsqlite.adsorbate_property_type_to_db(\n ap_type, db_path=pth, verbose=verbose\n )\n\n # Get and upload adsorbates\n ads_json = pkg_resources.read_text('pygaps.data', 'adsorbates.json')\n adsorbates = json.loads(ads_json)\n for ads in adsorbates:\n pgsqlite.adsorbate_to_db(\n pygaps.Adsorbate(**ads), db_path=pth, verbose=verbose\n )\n\n # Upload standard isotherm types\n pgsqlite.isotherm_type_to_db({'type': 'isotherm'}, db_path=pth)\n pgsqlite.isotherm_type_to_db({'type': 'pointisotherm'}, db_path=pth)\n pgsqlite.isotherm_type_to_db({'type': 'modelisotherm'}, db_path=pth)", "def CreateDB(self) :\r\n\t\ttry :\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateClassTable'])\r\n\t\t\tfor ii,classname in enumerate(self.SQLCMDs['ClassesList']) :\r\n\t\t\t\tself.DB_Cursor.execute(self.SQLCMDs['InsertClass'],(ii,classname))\r\n\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateSetTable'])\r\n\t\t\tfor ii,setname in enumerate(self.SQLCMDs['SetList']) :\r\n\t\t\t\tself.DB_Cursor.execute(self.SQLCMDs['InsertSet'],(ii,setname))\r\n\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateSampleTable'])\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateDictListTable'])\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateDictBuildTable'])\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateWordLists'])\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateFeatureTable'])\r\n\t\t\tself.DB_Connect.commit()\r\n\t\texcept Exception as detail:\r\n\t\t\tlogging.error(\"Failed to create the database: %s\"%detail)\r\n\t\t\tself.DB_Connect.rollback()\r\n\t\treturn", "def create_db(self):\n\t\tcur, conn = self.open_connection()\n\n\t\t# this creates the meta table\n\t\tcommand = \"\"\" DROP TABLE IF EXISTS main;\n\t\tCREATE TABLE meta (\n\t\t\tid INTEGER PRIMARY KEY AUTOINCREMENT,\n\t\t\tUT_date TEXT,\n\t\t\ttime_of_obs TEXT,\n\t\t\tobject_name TEXT,\n\t\t\tintegration_time FLOAT,\n\t\t\tgrating INTEGER,\n\t\t\tcentral_wavelength FLOAT,\n\t\t\tslit_width INTEGER,\n\t\t\tphase_angle FLOAT,\n\t\t\tcomments TEXT\n\t\t\t);\"\"\"\n\n\t\tcur.executescript(command)\n\n\t\t# this creates the spectra table\n\t\tcommand = \"\"\" DROP TABLE IF EXISTS spectrum;\n\t\tCREATE TABLE spectra (\n\t\t\tspec_id INTEGER PRIMARY KEY AUTOINCREMENT,\n\t\t\tid INTEGER,\n\t\t\twave array,\n\t\t\tspectrum array,\n\t\t\tFOREIGN KEY(id) REFERENCES meta(id)\n\t\t\t);\"\"\"\n\n\t\tcur.executescript(command)\n\n\t\tconn.commit()\n\t\tconn.close()", "def newDb(options, dbName, adminPswd, userPswd, viewerPswd):\n if not re.match(\"^[A-Za-z][A-Za-z0-9_]*$\", dbName):\n errorPrint(\"'%s' is not a valid database name\" % dbName)\n return\n\n adminName = dbName + \"_admin\"\n userName = dbName + \"_user\"\n viewerName = dbName + \"_viewer\"\n\n setupDictionaryDatabases(options, {\n 'databases': {\n dbName: {\n 'ownerRole': adminName,\n 'roles': {\n adminName: {\n 'password': adminPswd,\n 'role': 'admin'\n },\n userName: {\n 'password': userPswd,\n 'role': 'writer'\n },\n viewerName: {\n 'password': viewerPswd,\n 'role': 'reader'\n }\n }\n }\n }\n })", "def create_db(num_users=5):\n db.create_all()", "def db_create():\n db.drop_all()\n db.create_all()\n db.session.commit()", "def create_database(database):\n # open an existing postgres database\n with Database(database=\"postgres\") as connection:\n # set isolation level (dunno why tbqh)\n connection.db.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n connection.query(\"CREATE DATABASE \" + database)", "def init_database(db: sa.engine.Connectable):\n\n # setup the Postgres extensions and schema\n db.execute(\"\"\"\n CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\" WITH SCHEMA public;\n \"\"\")\n db.execute(\n ';\\n'.join(\n 'CREATE SCHEMA IF NOT EXISTS {}'.format(s) for s in SCHEMAS.values()\n )\n )\n\n # create the schema from the models\n METADATA.create_all(bind=db)", "def create():", "def create():", "def dbinit( *args, **kwargs ):", "def database(name, **kwargs):\n if not database_exists(name, **kwargs):\n create_database(name, **kwargs)", "def create_databases():\n db_connection = connect_to_db()\n\n # Create database tables.\n create_tables(db_connection)\n\n # Populate water tables.\n populate_water_tables(db_connection)\n\n # station_data = get_station_data()\n # station = station_data.query('ŠIFRA == 30301')\n # print(station)\n # index = station.index[0]\n # lat = station.at[index, 'LAT']\n # lng = station.at[index, 'LON']\n # name = f\"{station.at[index, 'VODOMERNA POSTAJA']} ({station.at[index, 'VODOTOK']})\"\n # print(index, lat, lng, name)\n\n # Populate location tables\n # populate_locations(db_connection)\n\n # Populate weather tables\n populate_weather(db_connection)\n\n db_connection.commit()\n db_connection.close()", "def init_db(self):\n\n # The user can provide a custom string\n if self.database is None:\n self.logger.error(\"You must provide a database url, exiting.\")\n sys.exit(1)\n\n self.engine = create_engine(self.database, convert_unicode=True)\n self.session = scoped_session(\n sessionmaker(autocommit=False, autoflush=False, bind=self.engine)\n )\n\n # Database Setup\n Base.query = self.session.query_property()\n\n # import all modules here that might define models so that\n # they will be registered properly on the metadata. Otherwise\n # you will have to import them first before calling init_db()\n import expfactory.database.models\n\n self.Base = Base\n self.Base.metadata.create_all(bind=self.engine)", "def create_all():\n db.create_all()", "def create_db():\n db_url = engine.url\n if not database_exists(db_url):\n create_database(db_url)\n base.metadata.create_all()", "def create_db():\n\n require('environment', provided_by=env.environments)\n sudo('createdb -O %(database_user)s -T %(template_db)s %(database_name)s' % env, user='postgres')", "def create_db(args, engine=None):\n if engine is None:\n if args.RDS:\n engine_string = get_engine_string()\n else:\n engine_string = args.local_URI\n logger.info(\"RDS:%s\" % args.RDS)\n engine = sql.create_engine(engine_string)\n\n Base.metadata.create_all(engine)\n logging.info(\"database created\")\n\n return engine", "def init_db():\n import cerbereapp.models\n Base.metadata.create_all(bind=engine)", "def create_db():\n app = create_app(dotenv.get('FLASK_CONFIG'))\n with app.app_context():\n db.create_all()", "def __init__(self, database='/tmp/blingalytics_cache'):\n self.database = database\n self._create_metadata_table()", "def create_pre_db():\n db.drop_all()\n db.create_all()\n admin = Admin(\n email=app.config['BLOG_ADMIN_EMAIL'],\n password=\"123456\",\n )\n work_exp = [WorkExperience(\n work_title=u\"Flask Blog-%d\" % i,\n work_type=u\"Personal Project\",\n pos_in_work=u\"Total\",\n work_desc=u\"Use Flask implement a blog application\",\n start_time=datetime.date(2016, 2, 5),\n owner=admin\n ) for i in range(3)]\n edu_exp = [EducationExperience(\n institution=u\"TongJi University-%d\" % i,\n learn_what=u\"Information Security\",\n gpa=3.89,\n start_time=datetime.date(2016, 2, 5),\n owner=admin\n ) for i in range(3)]\n skills = [Skill(\n skill_name=u\"Python-%d\" % i,\n master_degree=4,\n owner=admin\n ) for i in range(3)]\n tags = [Tag(name=u\"tag-%d\" % i) for i in range(10)]\n db.session.add_all([admin]+work_exp+edu_exp+skills+tags)\n db.session.commit()\n Post.generate_fake_posts(12)\n Comment.generate_fake_comments(5)", "def initialize():\n \n db.connect()\n db.create_tables([Product], safe=True)", "def create_db():\n database.db.create_all()\n get_ulm()\n for fixture_file in glob.glob(config.DevelopmentConfig.FIXTURES_DIRS + '/*.json'):\n fixtures = JSONLoader().load(fixture_file)\n load_fixtures(database.db, fixtures)\n MigrationManager().stamp_db()", "def config_create_db(name):\n config = settings.load_config()\n create_database(name=name, config=config)", "def create_prod_db():\n _create_database(is_production=True)", "def create_db(self, dbname, **params):\n return self.get_db(dbname, create=True, **params)", "def init_database(self):\n engine = create_engine('sqlite:///todo.db?check_same_thread=False')\n self.Base.metadata.create_all(engine)\n self.session = sessionmaker(bind=engine)()", "def create_database_and_add_model(models,database):\n conn =sqlite3.connect(database)\n for model in models:\n mod_tab_name = model.replace('-','_')\n sql_create_projects_table = \"\"\" CREATE TABLE IF NOT EXISTS %s (\n case_var text NOT NULL PRIMARY KEY,\n model_case text NOT NULL,\n var text NOT NULL,\n case_name text NOT NULL,\n original_var_name text,\n units text ,\n units_original text,\n lev_is_dim integer NOT NULL,\n is_computed_var integer,\n path_computed_data text,\n pressure_coordinate_path text\n ); \"\"\"%(mod_tab_name)\n\n # create a database connection\n if conn is not None:\n # create projects table\n create_table(conn, sql_create_projects_table)\n else:\n print(\"Error! cannot create the database connection.\")\n conn.close()\n return", "def _create_database_sql_new(database_name, owner='dcc_owner'):\n tmpl = \"create database {0} with owner = {1} template = template0 \" \\\n \"encoding = 'UTF8' lc_collate = 'en_US.UTF-8' lc_ctype = 'en_US.UTF-8'\"\n return tmpl.format(database_name, owner),", "def create(self, fields_list):\n if fields_list == \"\" or fields_list == \"()\":\n fields_list = \"(id INTEGER PRIMARY KEY AUTOINCREMENT)\"\n #fields_list = \"\"\n # if fields and len(fields)>0:\n #fields_list = \"(\"\n # for key in fields:\n # if fields_list != \"(\":\n #fields_list +=\", \"\n #fields_list += str(key)\n #fields_list += \")\"\n database = managers.database_manager.get_database(self.owner_id, self.database_id)\n cur = database.get_connection().cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS \\'%s\\'%s\" % (self.name, fields_list))\n self.restore_structure(True)", "def create_database(cursor: Cursor, owner: Owner, name: str=None) -> Result[str]:\n role = pgsql.get_role(cursor, owner_name(owner))\n name = name or role[0]\n result = pgsql.create_database(cursor, name, role)\n result.value = name\n return result", "def setup_db():\n\n engine = config['tg.app_globals'].sa_engine\n # model.init_model(engine)\n # model.metadata.create_all(engine)", "def create_db_from_scratch():\n if os.path.isfile('data.db'):\n os.remove('data.db')\n Base.metadata.create_all(engine)", "def create(self):\n dbcase = models.create(models.TestCase, **self._data)\n _dbsession.add(dbcase)\n for key, value in self._many2many.items():\n setattr(dbcase, key, value)\n _dbsession.commit()\n return dbcase", "def init_db() -> None: \n \n Base.metadata.create_all(bind=engine)", "def initialize():\n\n db.connect() # Se conecta\n db.create_tables([Entry], safe=True) # Crea las tablas\n # safe=true evita crear modelos ya creados", "def create_new_db():\n global data_base, table\n data_base = asksaveasfilename(title=\"Select file\", filetypes=((\"DATA BASE\", \"*.db\"), (\"all files\", \"*.*\")),\n defaultextension='.db')\n\n if Path(data_base).suffix == '.db':\n create_win_create_table()\n else:\n mistake_db_file()", "def init_db():\n db = get_db()\n Page.create_table(db)\n PageVersion.create_table(db)\n User.create_table(db)", "def main(db_path, schema_json):\n create_db(db_path, schema_json)", "def createdb():\n print \"here\"\n db.create_all()", "def initialize_database():\n # TODO: Refactor the funtime library\n this.db = Store(this.host).create_lib(this.store_name).get_store()", "def create_database_tables():\n with APP.app_context():\n DB.create_all()", "def create_sqlitedb(name, type, path):\n arcpy.gp.CreateSQLiteDatabase(os.path.join(path, name), type)", "def create_student_db(connection):\r\n with connection:\r\n connection.execute(CREATE_TABLE_STUDENTS_DATA)", "def _initialize_database(self, version, maintainer_name = \"\", maintainer_email = \"\"):\n odb_version = self.graph.client.version\n\n create = {\n \"NeuroArch_version\": na_version.__version__,\n \"min_NeuroArch_version_supported\": \"0.4.1\",\n \"OrientDB_version\": \"{}.{}.{}\".format(\n odb_version.major,\n odb_version.minor,\n odb_version.build),\n \"created_date\": datetime.now().isoformat()\n }\n maintainer = {\n \"name\": maintainer_name,\n \"email\": maintainer_email\n }\n self.graph.MetaDatas.create(version = version,\n created_by = create,\n maintainer = maintainer)", "def create_database(self, name):\n osmO5M = os.path.join(self.OSMDIR, \"%s-latest.o5m\" % name)\n osmPBF = os.path.join(self.OSMDIR, \"%s-latest.pbf\" % name)\n if self.args.ask_before_creating_a_new_db:\n answer = raw_input(\"\\n- Creo database %s tramite osmosis?[Y/n]\" % name)\n if answer in (\"n\", \"N\"):\n return False\n\n print \"\\n- converti file filtrato\"\n print \" %s\\n -->\\n %s\" % (osmO5M, osmPBF)\n call(\"osmconvert %s -o=%s\" % (osmO5M, osmPBF), shell=True)\n\n print \"\\n0- cancella eventuale database preesistente\"\n call(\"echo 'DROP DATABASE %s;'| psql\" % name, shell=True)\n\n print \"\\n1- crea database\"\n call(\"createdb %s\" % name, shell=True)\n\n print \"\\n2- aggiungi estensioni postgis al database:\"\n call(\"echo 'CREATE EXTENSION postgis;'| psql -U %s -d %s\" % (self.user, name), shell=True)\n call(\"echo 'CREATE EXTENSION hstore;'| psql -U %s -d %s\" % (self.user, name), shell=True)\n #Non si può più utilizzare con gli id in formato bigint\n #call(\"echo 'CREATE EXTENSION intarray;'| psql -U %s -d %s\" % (self.user, name), shell=True)\n\n print \"\\n3- importa schemi:\"\n call(\"psql -U %s -d %s -f /usr/share/doc/osmosis/examples/pgsnapshot_schema_0.6.sql\" % (self.user, name), shell=True)\n call(\"psql -U %s -d %s -f /usr/share/doc/osmosis/examples/pgsnapshot_schema_0.6_linestring.sql\" % (self.user, name), shell=True)\n\n print \"\\n4- importa dati OSM:\"\n #If you want to specify a temporary directory for osmosis use this prefix:\n # JAVACMD_OPTIONS='-Djava.io.tmpdir=/path/to/a/temp/dir/' osmosis...\n call(\"osmosis --rb %s --wp database=%s user=%s password=%s\" % (osmPBF, name, self.user, self.password), shell=True)\n\n #Alternative command using --write-pgsql-dump\n #pgdir = os.path.join(SCRIPTDIR, \"pgdump\")\n #call(\"osmosis --rb %s --write-pgsql-dump directory=%s enableLinestringBuilder=yes enableBboxBuilder=no\" % (osmPBF, pgdir), shell=True)\n #os.chdir(pgdir)\n #call(\"psql -U %s -d %s -f pgsnapshot_load_0.6.sql\" % (self.user, name), shell=True)\n return True", "def create(self):\r\n if self.filename:\r\n self.db = anydbm.open(self.filename, \"n\") #raises anydbm.error\r\n self.db[\"--Reserved--type\"] = self.type\r\n self.db.sync()\r\n else:\r\n self.db = {}", "def register_db():\n models = (Storage,\n AccessInfo\n )\n engine = create_engine(CONF.database.connection, echo=False)\n for model in models:\n model.metadata.create_all(engine)", "def create_database_stock_master():\n sql = \"\"\"\n CREATE DATABASE stock_master;\n \"\"\"\n excute_sql(sql,None)", "def create_db(db_name: str = DB_NAME) -> DBConnection:\n connection = open_db(db_name)\n connection.execute(\"\"\"\n CREATE TABLE docs\n (did INTEGER PRIMARY KEY, \n title TEXT NOT NULL, \n url TEXT NOT NULL)\n \"\"\")\n connection.execute(\"\"\"\n CREATE TABLE tfs \n (did INTEGER,\n term TEXT NOT NULL,\n tf INTEGER)\n \"\"\")\n connection.execute(\"\"\"\n CREATE TABLE boost\n (did INTEGER,\n date INTEGER,\n page INTEGER\n )\"\"\")\n print(f\"[+] Created db {DB_NAME}\")\n return connection", "def create_db_execute(self):\n self.execute(query=self.db_create_schema.format(self.db_name))" ]
[ "0.769547", "0.74889106", "0.73719215", "0.7142584", "0.70426226", "0.7019294", "0.7007661", "0.69997644", "0.6994348", "0.6984608", "0.6952927", "0.6952927", "0.6952927", "0.6952927", "0.6952927", "0.6952927", "0.6952927", "0.6952927", "0.6952927", "0.6952927", "0.6952927", "0.69121516", "0.6864701", "0.68602884", "0.68362546", "0.67990345", "0.6773872", "0.67702353", "0.67548937", "0.6745499", "0.67354316", "0.6680828", "0.66723514", "0.6658163", "0.665122", "0.66508335", "0.66387975", "0.6624905", "0.65997076", "0.6558209", "0.6536789", "0.6529951", "0.65276855", "0.64963984", "0.64936495", "0.64911264", "0.64777994", "0.64725333", "0.64657533", "0.64617294", "0.64586073", "0.6433326", "0.6425205", "0.64180666", "0.64112407", "0.6393497", "0.63867176", "0.63867176", "0.63714415", "0.63695335", "0.63657886", "0.6364858", "0.6351729", "0.6351655", "0.63326675", "0.63230294", "0.6317169", "0.6314203", "0.6287201", "0.62842053", "0.62838775", "0.6282374", "0.6281623", "0.6281379", "0.62800956", "0.6278206", "0.6264653", "0.62641764", "0.6257256", "0.62572265", "0.6256713", "0.62307096", "0.62274", "0.6223866", "0.6221739", "0.621945", "0.62162846", "0.6207986", "0.6205822", "0.61982995", "0.6197508", "0.6197074", "0.6193369", "0.6191197", "0.6187761", "0.61856633", "0.61784244", "0.61775255", "0.6175863", "0.61730266" ]
0.7182269
3
Find a single database
def find_database(self, name_or_id, instance, ignore_missing=True): instance = self._get_resource(_instance.Instance, instance) return self._find( _database.Database, name_or_id, instance_id=instance.id, ignore_missing=ignore_missing, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_database(self, name):\n try:\n return [db for db in self.list_databases()\n if db.name == name][0]\n except IndexError:\n raise exc.NoSuchDatabase(\"No database by the name '%s' exists.\" %\n name)", "def searchDatabase(self, name: str) -> Database:\n for db in self._typeCheckerList:\n if db.name.lower() == name.lower():\n return db\n return None", "def get_database(self, database, instance=None):\n return self._get(_database.Database, database)", "def get_db(db_name):\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db", "def get_database(self, instance, name):\n return instance.get_database(name)", "def get_db(db_name):\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db", "def _get_database(self, options):\n database_key = options.get('database')\n if not database_key:\n if len(settings.DATABASES) >= 2:\n errmsg = \"Because this project contains more than one database, you\"\n errmsg += \" must specify the --database option.\"\n raise CommandError(errmsg)\n database_key = settings.DATABASES.keys()[0]\n return settings.DATABASES[database_key]", "def get_database(conn, name):\n\n if conn.hasDatabase(name) is False:\n return conn.createDatabase(name)\n\n return conn[name]", "def get_database() -> Database:\n db_config = DatabaseConfig(DB_NAME)\n return connect_to_db(db_config)", "def find_server(message, db):\n db_list = sql.database_list()\n if db in db_list:\n server = db_list[db]\n message.reply(Strings['DATABASE_SERVER'].format(db, server))\n else:\n message.reply(Strings['DATABASE_UNKNOWN'].format(db))", "def get_database(self, dbid: str, account: str) -> Optional[dict]:\n self._check_connection(check_db=False)\n db_ids = []\n all_dbs = []\n for this_db in self.get_databases():\n if this_db[\"system:resource_name\"][\"@value\"] == dbid:\n db_ids.append(this_db[\"@id\"])\n all_dbs.append(this_db)\n\n resources_ids = []\n for scope in self._dispatch_json(\"get\", self._api)[\"system:role\"][\n \"system:capability\"\n ][\"system:capability_scope\"]:\n if (\n scope[\"@type\"] == \"system:Organization\"\n and scope[\"system:organization_name\"][\"@value\"] == account\n ):\n if type(scope[\"system:resource_includes\"]) is list:\n for resource in scope[\"system:resource_includes\"]:\n resources_ids.append(resource[\"@id\"])\n\n target_db = None\n for target in set(db_ids).intersection(set(resources_ids)):\n target_db = target\n\n for this_db in all_dbs:\n if this_db[\"@id\"] == target_db:\n return this_db", "def database():\n return conf().database", "def get_db(self):\n self.logger.info('in get_db()')\n try:\n return self.client[self.db_name]\n except Exception as e:\n self.logger.error(f'Error occurred while getting client {e}')", "def get_db(db_label):\n defaults = get_defaults()\n db_name = defaults[db_label]\n m = re.match('(\\w+)://.*?/([\\w.]+)', db_name)\n if m is None:\n logger.error(\"Poorly formed db name: %s\" % db_name)\n return\n sqltype = m.groups()[0]\n return DatabaseManager(db_name, sqltype=sqltype, label=db_label)", "def get_database(self):\n return self.database", "def get_db(self, dbname, **params):\n return Database(self._db_uri(dbname), server=self, **params)", "def get_mongo_db(host, port, name):\n client = MongoClient(host, port)\n db = client[name]\n return db", "def get_db():\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client.seattle\n return db", "def get_database(self, database=None):\n\t\tdatabase = database if database !=None else self.database\n\t\t\n\t\tif self._database is None:\n\t\t\tconn = self.get_connection()\n\t\t\tdb = conn[database]\n\t\t\tself._database = db\n\t\t\n\t\treturn self._database", "def get_database (name, parent=None):\n if \".\" in name:\n parent, name = name.split(\".\")\n\n if parent is not None:\n if not isinstance(parent, DatabaseFolder):\n parent = globals().get(parent, None)\n\n if parent is None or not isinstance(parent, DatabaseFolder):\n return None\n\n return parent.get(name, None)\n\n return globals().get(name, None)", "def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")", "def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")", "def get_db(db=None):\n if db is None:\n db = ideagenstest\n return get_mongodb(db['url'],\n db['port'],\n db['dbName'],\n db['user'],\n db['pswd'])", "def get_test_db():\n defaults = get_defaults()\n test_defaults = {k: v for k, v in defaults.items() if 'test' in k}\n key_list = list(test_defaults.keys())\n key_list.sort()\n db = None\n for k in key_list:\n test_name = test_defaults[k]\n m = re.match('(\\w+)://.*?/([\\w.]+)', test_name)\n if m is None:\n logger.warning(\"Poorly formed db name: %s\" % test_name)\n continue\n sqltype = m.groups()[0]\n try:\n db = DatabaseManager(test_name, sqltype=sqltype, label=k)\n db.grab_session()\n except Exception as e:\n logger.error(\"%s didn't work\" % test_name)\n logger.exception(e)\n continue # Clearly this test database won't work.\n logger.info(\"Using test database %s.\" % k)\n break\n if db is None:\n logger.error(\"Could not find any test database names.\")\n return db", "def get_db():\n if ( g.get( 'db' ) is None ):\n g.db = connect_db()\n\n return g.db.connect()", "def db_lookup(client):\n dblist_dict= client.get_list_database()\n # print(\"def db_lookup 010:\", dblist_dict)\n # print(\"def db_lookup 020:\", dblist_dict[3]['name'])\n # for element in dblist_dict:\n # print(\"db_lookup 3:\", element['name'])\n return dblist_dict", "def isDatabase(self, dbName):\n url = '%s/_database/%s' % (self.uri, dbName)\n data, resp = self.execute(method='GET', url=url, decode=True)\n return data", "def get_db(self, typename):\n return self._dbs[typename]", "def get_db(database):\n db = getattr(g, '_database', None)\n if db is None:\n intents_db = IntentsDatabaseEngine()\n expressions_db = ExpressionsDatabaseEngine()\n database_dict = {'intents': intents_db,\n 'expressions': expressions_db}\n g._database = db = database_dict\n return db[database]", "def database_exist(database_name):\n with MongoDBConnection() as mongo:\n database_list = mongo.connection.list_database_names()\n\n exist_flag = True\n if database_name not in database_list:\n print(f'Database {database_name} not found.')\n exist_flag = False\n\n return exist_flag", "def get_database(self):\n if self._database is None:\n conn = self.get_connection()\n db = conn[self.database]\n self._database = db\n\n return self._database", "def getDb(self):\n return self.db", "def get_db():\n if not hasattr(g, 'mongo_db'):\n g.db = get_mongo_db()\n\n return g.db", "def get_db():\n client = MongoClient(\"mongodb://admin:therightfit@ds125555.\" +\n \"mlab.com:25555/the_right_fit\")\n db_object = client['the_right_fit']\n return db_object", "def find_db_by_name(message, db):\n user_list = []\n for filename in os.listdir(\"userdata/\"):\n\n with open(\"userdata/{}\".format(filename)) as data_file:\n userdata = json.load(data_file)\n\n for i in range(len(userdata[\"access\"])):\n if userdata[\"access\"][i][\"db\"] == db:\n user_list.append(userdata[\"name\"])\n break\n\n with open(\"data/databases.json\") as data_file:\n data = json.load(data_file)\n\n correct_server = \"\"\n for server in data:\n if db in data[server]:\n correct_server = server\n\n if correct_server == \"\":\n message.reply(\"No database found!\")\n return\n\n user_access = \"\"\n if user_list:\n user_access = \"The following users currently have access: {}\".format(\", \".join(user_list))\n\n message.reply(\"The database \\\"{}\\\" is located on server \\\"{}\\\". {}\".format(db,\n correct_server + config.SERVER_SUFFIX,\n user_access))", "def db(cls):\n return getattr(db, cls.__name__)", "def get_db(name, complete=False):\n cursor = flask.g.syscnx.cursor()\n sql = (\n \"SELECT owner, title, description, public, readonly,\"\n \" created, modified FROM dbs WHERE name=?\"\n )\n cursor.execute(sql, (name,))\n rows = cursor.fetchall()\n if len(rows) != 1:\n return None # 'rowcount' does not work?!\n db = {\"name\": name}\n db.update(rows[0])\n db[\"public\"] = bool(db[\"public\"])\n db[\"readonly\"] = bool(db[\"readonly\"])\n db[\"size\"] = os.path.getsize(utils.get_dbpath(name))\n db[\"hashes\"] = {}\n sql = \"SELECT hashname, hashvalue FROM dbs_hashes WHERE name=?\"\n cursor.execute(sql, (name,))\n for row in cursor:\n db[\"hashes\"][row[0]] = row[1]\n if complete:\n cursor = get_cnx(name).cursor()\n sql = \"SELECT name, schema FROM %s\" % constants.TABLES\n cursor.execute(sql)\n db[\"tables\"] = dict([(row[0], json.loads(row[1])) for row in cursor])\n sql = \"SELECT name, schema FROM %s\" % constants.INDEXES\n cursor.execute(sql)\n db[\"indexes\"] = dict([(row[0], json.loads(row[1])) for row in cursor])\n sql = \"SELECT name, schema FROM %s\" % constants.VIEWS\n cursor.execute(sql)\n db[\"views\"] = dict([(row[0], json.loads(row[1])) for row in cursor])\n return db", "def read_db():\n # read config file\n config = configparser.ConfigParser()\n config.read_file(open(\"options.cfg\"))\n\n return config['DEFAULT']['DatabaseFilename']", "def get_database() -> StandardDatabase:\n client = get_arango_client()\n return client.db(DB_NAME, username=ARANGO_USER, password=ARANGO_PASSWORD)", "def get_primary_db(force_new=False):\n defaults = get_defaults()\n if 'primary' in defaults.keys():\n primary_host = defaults['primary']\n else:\n raise IndraDatabaseError(\"No primary host available in defaults file.\")\n\n global __PRIMARY_DB\n if __PRIMARY_DB is None or force_new:\n __PRIMARY_DB = DatabaseManager(primary_host, label='primary')\n __PRIMARY_DB.grab_session()\n return __PRIMARY_DB", "def database(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database\")", "def database_exists(self, db_name):\n conn = self.__get_open_connection(self.sys_conn_hash)\n conn.autocommit(True)\n sql = \"SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = '{0}'\".format(db_name)\n # print sql\n r = self.exec_sql_get_records(conn, sql)\n return (len(r) == 1)", "def database(db):\n if type(db) is str:\n # Database name\n if db.endswith('.py'):\n # Python source, exec it\n globals = {}\n exec(compile(open(db).read(), db, 'exec'), globals)\n if 'DB' in globals:\n db = globals['DB']\n else:\n storage = globals['Storage']\n from ZODB.DB import DB\n db = DB(storage, cache_size=4000)\n elif db.endswith(\".fs\"):\n from ZODB.DB import DB\n from ZODB.FileStorage import FileStorage\n storage = FileStorage(db)\n db = DB(storage, cache_size=4000)\n\n # The following will fail unless the application has been configured.\n from zope.event import notify\n notify(zope.processlifetime.DatabaseOpened(db))\n\n return db", "def database(dburl=None, **params):\n if not dburl and not params:\n dburl = os.environ['DATABASE_URL']\n if dburl:\n params = dburl2dict(dburl)\n dbn = params.pop('dbn')\n if dbn in _databases:\n return _databases[dbn](**params)\n else:\n raise UnknownDB, dbn", "def get_db():\n\tpath = get_path_db()\n\tif path is None:\n\t\tprint(\"\\n=> Info - Cannot fetch database yet because it has not been configured.\\n\")\n\telse:\n\t\tdb = SqliteExtDatabase(path)\n\t\treturn db", "def database(self):\n try:\n return self._database\n except:\n database = self.application.connection[self.database_name]\n self._database = database\n return database", "def database(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database\")", "def get_config_db():\n\n datab = {'db_name': 'database_name',\n 'db_url': 'database_url'}\n\n return datab", "def database(self):\n return self._database", "def database(self):\n return self._database", "def database(self):\n return self._database", "def database(self):\n return self._database", "def api_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_URI).get_database()", "def _get_db(self, db_name: str) -> shelve.DbfilenameShelf:\n db_path = os.path.join(self.cache_folder, db_name)\n db = shelve.open(db_path)\n logging.info(f'Opened cache file {db_path!r}')\n return db", "def db_for_read(self, model, **hints):\n if is_recon_model(model):\n return settings.RECON_NG_DATABASE_NAME\n\n return None", "def __getitem__(self, dbname):\n return Database(dbname=dbname, connection=self)", "def db(self):\n return self.application.db", "def db(self):\n return self.application.db", "def db_for_read(self, model, **hints):\n\n return self.db_name", "def database_exists (name, parent=None):\n return get_database(name, parent) is not None", "def select_database(db_name, db_type, db_url, user, password, repo_conn=None):\n try:\n if repo_conn is None:\n repo_conn = _repo_connect()\n bmg = _base_meta_gen()\n rm = _repo_manager(repo_conn)\n db_obj = bmg.generate_database_meta(db_name, db_type, db_url,\n user, password)\n rm.save(db_obj)\n return (\"Database Meta Object: '%s' has been selected successfully!\" % (db_obj))\n except Exception as err:\n return \"Database Meta Object selection failed => %s\" % (err)", "def read_db():\n\n # Look for database in the same folder as this script\n script_dir = os.path.dirname(os.path.realpath(__file__))\n db_filepath = os.path.join(script_dir, 'cn_loads_database.dat')\n\n db = None\n if os.path.isfile(db_filepath):\n with open(db_filepath, 'r') as f:\n db = yaml.load(f.read())\n if db == None:\n db = dict()\n else:\n db = dict()\n\n return db", "def get_dbinfo(error=True):\n try:\n return sesh.query(DbInfo).one()\n except NoResultFound:\n if error:\n logger.warning(\"No entry _dbinfo table, database appears to be blank\")\n raise\n else:\n return None", "def get_db():\n # this is a bit of a hack, since it assumes all the models talk to the same\n # db. that said a lot of our code relies on that assumption.\n # this import is here because of annoying dependencies\n return Database(settings.COUCH_DATABASE)", "def database():\n return sqlite3.connect(DATABASE)", "def db(self) -> Database:\n return self.impl.db", "def resolve_db(database) -> tuple:\n db = ':memory:'\n if (database is not None) and (database != ':memory:'):\n database = database.lower()\n db = database if database.endswith('.db') else f'{database}.db'\n db = os.path.join(settings.DB_PATH, Path(db).name)\n\n return db, Path(db).stem.replace(':', '')", "def get_db(self):\n return self._db", "def db_for_read(self, model, **hints):\n if model._meta.app_label == self.app_label:\n return self.db_name\n return None", "def get_db(request: Request) -> MongoWrapper:\n return request.app.state.db", "def get_db():\n if 'db' not in g:\n g.db = sqlite3.connect(str(current_app.config['DATABASE']))\n return g.db", "def db(self):\n return self._project.db", "def db_for_read(self, model, **hints):\r\n if model._meta.app_label == self.APP_LABEL:\r\n return self.DB_NAME\r\n return None", "def get_db():\n\n if not hasattr(g, 'mongo_db'):\n client = MongoClient(C.MONGODB_DATABASE_URI)\n g.db = client.test\n return g.db", "def get_db_name(self):\n\t\treturn conf.db_name", "def _get_db_instance(db: Union[str, Database]) -> MongoClient:\n if isinstance(db, str):\n db_name = parse_uri(db).get('database')\n if db_name is None:\n # TODO: Improve validation message\n raise ValueError(\"Invalid db: Could not extract database from uri: %s\", db)\n db = MongoClient(db)[db_name]\n return db", "def get_db(self):\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = connect(DATABASE)\n return db", "def mysql_database():\n return DATABASE", "def get (self, database, default=None):\n if hasattr(self, database):\n return getattr(self, database)\n else:\n return default", "def get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(current_app.config['DB_NAME'])\n return db", "def database_status(self, database_name=None):\n if database_name is not None:\n databases = self.list_databases()\n for d in databases:\n if d['name'] == database_name:\n database_id = d['id']\n break\n else:\n raise ClientError('Could not find database, does not exist.')\n end_point = '/'.join([self.host, 'api', 'databases', str(database_id), ''])\n resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})\n return resp.json()\n else:\n end_point = '/'.join([self.host, 'api', 'databases', ''])\n resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})\n return resp.json()", "def db_for_read(self, model, **hints):\n state_db = self._db_name(model)\n if state_db in settings.DATABASES:\n name = state_db\n else:\n name = 'default'\n logger.debug('db_for_read({}): {}'.format(state_db, name))\n return name", "def get_db():\n if not hasattr(g, \"sql_db\"):\n g.sql_db = connect_db()\n return g.sql_db", "def get_db():\n global _cached\n if not _cached:\n _cached = MongoClient(config.DB_URI).get_database()\n return _cached", "def check_db():\n try:\n conn = sqlite3.connect(DB_PATH)\n cursor = conn.cursor()\n cursor.execute(\"SELECT id FROM query LIMIT 1;\")\n conn.close()\n except:\n init_db()", "def get_database(database: Optional[str] = None,\n instance: Optional[str] = None,\n project: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseResult:\n __args__ = dict()\n __args__['database'] = database\n __args__['instance'] = instance\n __args__['project'] = project\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('google-native:sqladmin/v1beta4:getDatabase', __args__, opts=opts, typ=GetDatabaseResult).value\n\n return AwaitableGetDatabaseResult(\n charset=pulumi.get(__ret__, 'charset'),\n collation=pulumi.get(__ret__, 'collation'),\n etag=pulumi.get(__ret__, 'etag'),\n instance=pulumi.get(__ret__, 'instance'),\n kind=pulumi.get(__ret__, 'kind'),\n name=pulumi.get(__ret__, 'name'),\n project=pulumi.get(__ret__, 'project'),\n self_link=pulumi.get(__ret__, 'self_link'),\n sqlserver_database_details=pulumi.get(__ret__, 'sqlserver_database_details'))", "def lookup(name, db):\n database = load(db)\n matches = [ key for key in database if name in key ]\n if len(matches):\n for name in matches:\n print(\"%s (%s)\" % (name, database[name]))\n else:\n print(\"0 results found\")", "def log_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_LOG_URI).get_database()", "def read_db_one(id, tablename = None):\n\n # Set the default tablename\n if tablename is None:\n tablename = config[\"default-table\"]\n\n conn, tunnel = create_db_conn()\n result = None\n\n try:\n cur = conn.cursor()\n cur.execute(\"USE %s\"%(config['db']))\n cur.execute(\"SELECT * FROM %s WHERE id = %d;\"%(tablename,id))\n conn.commit()\n result = cur.fetchone()\n if len(result) == 0:\n result = None\n\n except Exception as e:\n print(\"read_data_list failed\")\n print(e)\n\n conn.close()\n tunnel.close()\n return result", "def get_db():\n if \"db\" not in g:\n host = current_app.config[\"HOST\"]\n dbname = current_app.config[\"DATABASE\"]\n #params = \"host='{}' dbname='{}' user=root\".format(host, dbname)\n params = \"dbname='{}' user=root\".format(dbname)\n g.db = psycopg2.connect(params)\n # 'g.db' corresponsds to a DB conn\n return g.db", "def get_db(request, name=None):\n\n dbname = name\n registry = request.registry\n\n if name is None:\n dbname = registry.settings.get(DBNAME)\n\n if dbname is None:\n raise ConfigurationError('There is no defined database name')\n\n mongodbs = getattr(request, '_mongo_dbs', dict())\n\n db = mongodbs.get(dbname)\n\n if db is None:\n conn = getattr(registry, '_mongo_conn', None)\n\n if conn is None:\n raise ConfigurationError(\n 'There is no database connection available')\n\n db = conn[dbname]\n\n mongodbs[dbname] = db\n request._mongo_dbs = mongodbs\n\n username = registry.settings.get(USERNAME + '.' + dbname)\n password = registry.settings.get(PASSWORD + '.' + dbname)\n\n if not username is None and not password is None:\n db.authenticate(username, password)\n\n def end_request(request):\n db.logout()\n db.connection.end_request() \n\n request.add_finished_callback(end_request)\n\n return db", "def db_for_read(self, model, **hints):\n if model._meta.app_label == 'compras':\n return 'db2'\n return None", "def get_db():\r\n con = sqlite3.connect(\"whinge.db\")\r\n con.row_factory = sqlite3.Row # return dict-like rows\r\n return con", "def get_database(self, name='presentations.db'):\r\n if name not in self._databases:\r\n self._databases[name] = QtDBConnector(self.get_filepath(name), PluginManager(self))\r\n return self._databases[name]", "def __init_db(self, db_name):\n\t\tclient = pymongo.MongoClient(self.__db_url)\n\t\treturn client[db_name]", "def get_db():\n top = flask._app_ctx_stack.top\n if not hasattr(top, 'shelve'):\n top.shelve = MODEL\n\n return top.shelve", "def db(self):\n return self._db or router.db_for_read(self.model, **self._hints)", "def GetDatabase(self):\r\n\r\n if self.database:\r\n return self.database\r\n \r\n if not os.path.exists(self.GetDataDir()):\r\n # Create the data folder, it still doesn't exist\r\n os.makedirs(self.GetDataDir())\r\n\r\n self.database = os.path.join(self.GetDataDir(), \"NDT_Database.db\")\r\n return self.database", "def get_database():\n if not REPO:\n site = pwb.Site(\"wikidata\", \"wikidata\")\n repo = site.data_repository()\n return repo\n return REPO", "def db(self) -> str:\n return self._db" ]
[ "0.7311903", "0.6849391", "0.6839373", "0.6820903", "0.6808621", "0.67496127", "0.67408377", "0.6706965", "0.6702317", "0.6701318", "0.66828966", "0.6620947", "0.65982574", "0.6543755", "0.6513072", "0.6468301", "0.64516336", "0.6444562", "0.640927", "0.6397873", "0.63197", "0.63197", "0.6319367", "0.627529", "0.62604314", "0.6252022", "0.6238203", "0.62273437", "0.6213253", "0.6198132", "0.619752", "0.61938363", "0.6191147", "0.61879766", "0.61871815", "0.6184237", "0.6169425", "0.61687297", "0.6156536", "0.6148677", "0.6131957", "0.61228824", "0.6114334", "0.611239", "0.6107182", "0.60943526", "0.608886", "0.60768723", "0.60487086", "0.60487086", "0.60487086", "0.60487086", "0.60298765", "0.60147077", "0.60143566", "0.6014264", "0.600273", "0.600273", "0.5998745", "0.59934515", "0.5985879", "0.59792876", "0.59782666", "0.59728503", "0.59715", "0.59682035", "0.5965989", "0.5964436", "0.5963656", "0.59617734", "0.59448093", "0.59423006", "0.5939045", "0.5937682", "0.5937576", "0.592271", "0.59040904", "0.5900612", "0.5897785", "0.58968884", "0.58866733", "0.58802146", "0.58782554", "0.5876759", "0.58754086", "0.5853751", "0.5853231", "0.5849001", "0.5847779", "0.58425915", "0.58335555", "0.58250886", "0.5819347", "0.5809049", "0.58043075", "0.58010364", "0.57846045", "0.57811487", "0.5775216", "0.5773143" ]
0.7636754
0
Return a generator of databases
def databases(self, instance, **query): instance = self._get_resource(_instance.Instance, instance) return self._list(_database.Database, instance_id=instance.id, **query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_databases ():\n return _dbobjects[:]", "def database():\n db = Database()\n yield db\n db.close()", "def get_databases(self):\n pass", "def get_db() -> Generator:\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()", "def get_db() -> Iterator[Session]:\n yield from _get_fastapi_sessionmaker().get_db()", "def __get_available_databases(self, root):\n\t\tfor i in walk_tree(root):\n\t\t\tif '.sqlite3' in i:\n\t\t\t\tyield os.path.abspath(i)", "def testing_get_db() -> Generator:\n db = TestSessionLocal()\n try:\n yield db\n finally:\n db.close()", "def get_database_names(self) -> Iterable[str]:\n custom_database_name = self.service_connection.__dict__.get(\"databaseName\")\n\n database_name = self.service_connection.__dict__.get(\n \"database\", custom_database_name or \"default\"\n )\n # By default, set the inspector on the created engine\n self.inspector = inspect(self.engine)\n yield database_name", "def produce_all_database(is_debug):\n\tproduce_database([\"apnea-ecg\", \"train\"], is_debug)\n\tproduce_database([\"apnea-ecg\", \"test\"], is_debug)", "def generate_database_object(**kwargs):\n return app.database.Database(\"test.db\", **kwargs)", "def list_databases():\n config = load_config()\n\n databases = [x for x in config.keys() if \"schemas\" in config[x]]\n return databases", "def unit_database() -> Iterator[units.UnitDatabase]:\n yield units.UnitDatabase.PushSingleton()\n units.UnitDatabase.PopSingleton()", "def get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()", "def mock_db() -> Iterable[sqlite3.Connection]:\n with sqlite3.connect(\":memory:\") as conn:\n initialize_database(conn)\n yield conn", "def get_db():\n db = None\n try:\n db = SessionLocal()\n yield db\n finally:\n db.close()", "def multi_database(database_factories):\n databases = {}\n result = []\n for factory in database_factories:\n name = factory.name or ''\n if name in databases:\n raise ValueError(\"Duplicate database name: %r\" % name)\n db = factory.open()\n db.databases = databases\n db.database_name = name\n databases[name] = db\n # Grrr bug in ZODB. Database doesn't declare that it implements\n # IDatabase.\n if not ZODB.interfaces.IDatabase.providedBy(db):\n zope.interface.directlyProvides(db, ZODB.interfaces.IDatabase)\n zope.component.provideUtility(db, ZODB.interfaces.IDatabase, name)\n db.setActivityMonitor(ZODB.ActivityMonitor.ActivityMonitor())\n result.append(db)\n\n return result, databases", "def yield_database(self, database_name: str) -> Iterable[CreateDatabaseRequest]:\n\n yield CreateDatabaseRequest(\n name=database_name,\n service=EntityReference(\n id=self.context.database_service.id,\n type=\"databaseService\",\n ),\n )", "def get_db():\n try:\n db = SessionLocal()\n yield db\n finally:\n db.close()", "def get_databases(self):\n query = mssqlqueries.get_databases()\n logger.info(u'Databases query: %s', query)\n for tabular_result in self.execute_query(query):\n return [x[0] for x in tabular_result[0]]", "def get_db():\n WhatismybrowserUseragentModel.metadata.create_all(engine)\n try:\n db = TestingSessionLocal()\n import_db_dump(db) # import database from json dump\n yield db\n finally:\n db.close()\n WhatismybrowserUseragentModel.__table__.drop(engine)", "async def database(self, *_):\n config = {'dsn': os.getenv('DATABASE_URL')}\n self.db = await aiopg.sa.create_engine(**config)\n yield\n self.db.close()\n await self.db.wait_closed()", "def databases(database_container):\n database_container.setupall()\n return database_container", "def db_schema_32():\n with old_db_schema(\"32\"):\n yield", "def list_dbs(self):\n return self.get('_all_dbs').json()", "def make_db():\n cwd = os.getcwd()\n name = \"regolith_fake\"\n repo = os.path.join(tempfile.gettempdir(), name)\n if os.path.exists(repo):\n rmtree(repo)\n subprocess.run([\"git\", \"init\", repo])\n os.chdir(repo)\n with open(\"README\", \"w\") as f:\n f.write(\"testing \" + name)\n with open(\"regolithrc.json\", \"w\") as f:\n json.dump(\n {\n \"groupname\": \"ERGS\",\n \"databases\": [\n {\n \"name\": \"test\",\n \"url\": repo,\n \"public\": True,\n \"path\": \"db\",\n \"local\": True,\n }\n ],\n \"stores\": [\n {\n \"name\": \"store\",\n \"url\": repo,\n \"path\": repo,\n \"public\": True,\n }\n ],\n \"force\": False,\n },\n f,\n )\n os.mkdir(\"db\")\n # Write collection docs\n for coll, example in deepcopy(EXEMPLARS).items():\n if isinstance(example, list):\n d = {dd[\"_id\"]: dd for dd in example}\n else:\n d = {example[\"_id\"]: example}\n dump_yaml(\"db/{}.yaml\".format(coll), d)\n subprocess.run([\"git\", \"add\", \".\"])\n subprocess.run([\"git\", \"commit\", \"-am\", \"Initial readme\"])\n yield repo\n os.chdir(cwd)\n rmtree(repo)", "def databases(self):\n return self._databases", "def databases(self) -> Session:\n uri = f\"{self.uri}/databases\"\n return self.request(uri=uri, method=\"GET\").json()", "def generation_hardcoded_db():\n print(\"[root-get] DEBUG: Genearating hc DB for modules\")\n database = Db4pkg()\n db_manifest = database.hardcoded_db()\n if not db_manifest:\n print(\"[root-get] Failed to generate DB for modules (test)\")\n return False\n return db_manifest", "def override_get_db():\n try:\n db = TestingSessionLocal()\n yield db\n finally:\n db.close()", "def get_schemas(self):\n result = self.sql(\"SHOW DATABASES\").execute()\n return [row[0] for row in result.fetch_all()]", "def _db():\n db_fd, db_path = tempfile.mkstemp()\n app = create_app()\n app.config['TESTING'] = True\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n app.config['DATABASE'] = db_path\n app.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite://\"\n\n with app.app_context():\n db.init_app(app)\n db.create_all()\n\n yield db\n\n os.close(db_fd)\n os.unlink(db_path)", "def all_dbs(self):\n return self.cloudant_client.all_dbs()", "def unit_database_posc() -> Iterator[units.UnitDatabase]:\n unit_database = units.UnitDatabase()\n unit_database.FillUnitDatabaseWithPosc(unit_database)\n units.UnitDatabase.PushSingleton(unit_database)\n\n yield unit_database\n\n units.UnitDatabase.PopSingleton()", "def generation_db_modules():\n print(\"[root-get] DEBUG: Genearating DB for modules\")\n database = Db4pkg()\n db_manifest = database.generated_manifest()\n if not db_manifest:\n print(\"[root-get] Failed to generate DB for modules only\")\n return False\n return db_manifest", "def get_test_db():\n defaults = get_defaults()\n test_defaults = {k: v for k, v in defaults.items() if 'test' in k}\n key_list = list(test_defaults.keys())\n key_list.sort()\n db = None\n for k in key_list:\n test_name = test_defaults[k]\n m = re.match('(\\w+)://.*?/([\\w.]+)', test_name)\n if m is None:\n logger.warning(\"Poorly formed db name: %s\" % test_name)\n continue\n sqltype = m.groups()[0]\n try:\n db = DatabaseManager(test_name, sqltype=sqltype, label=k)\n db.grab_session()\n except Exception as e:\n logger.error(\"%s didn't work\" % test_name)\n logger.exception(e)\n continue # Clearly this test database won't work.\n logger.info(\"Using test database %s.\" % k)\n break\n if db is None:\n logger.error(\"Could not find any test database names.\")\n return db", "def get_available_databases():\n\n available_databases = dict()\n all_databases = resource_keys('database', strip=[])\n for database in all_databases:\n try:\n database_entry_point = load_resource(database, 'database')\n\n available_databases[database] = dict()\n\n # Checking if the database has data for the ZT normalization\n available_databases[database][\"has_zt\"] = hasattr(database_entry_point, \"zobjects\") and hasattr(database_entry_point, \"tobjects\")\n available_databases[database][\"groups\"] = []\n # Searching for database groups\n try:\n groups = list(database_entry_point.groups()) or [\"dev\"]\n for g in [\"dev\", \"eval\"]:\n available_databases[database][\"groups\"] += [g] if g in groups else []\n except Exception:\n # In case the method groups is not implemented\n available_databases[database][\"groups\"] = [\"dev\"]\n except Exception:\n pass\n return available_databases", "def get_all(self):\n return self.db", "def setupDatabases(con, options, dbList):\n currentDatabases = dbGetFirstColumnAsMap(con, \"select datname from pg_database where datistemplate = false\")\n currentRolenames = dbGetFirstColumnAsMap(con, \"select rolname from pg_roles\")\n trace(\"currentDatabases = \" + str(currentDatabases))\n for dbName in dbList:\n trace(\"dbName='%s'\" % str(dbName))\n setupDatabase(con, options, currentDatabases, currentRolenames, dbName, dbList[dbName])", "def __call__(self, chunk_size: int = None, limit: int = None) -> Union[Iterator[Database], Iterator[DatabaseList]]:\n return self._list_generator(\n list_cls=DatabaseList, resource_cls=Database, chunk_size=chunk_size, method=\"GET\", limit=limit\n )", "def get_dbs_obj(self):\n dbs_xml = self.get_DatabaseAndServer_XML()\n return self.get_DatabaseAndServer_obj(dbs_xml)", "def prep_database(sqla):\n create_multiple_people(sqla, random.randint(5, 15))\n create_multiple_accounts(sqla)\n return [account.id for account in sqla.query(Account.id).all()]", "def databases(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"databases\")", "async def prepare_databases(self):", "def get_available_databases():\n return map(\n lambda (key, value): (key, value[\"description\"]),\n DumpConverter.DATABASES.items())", "def _connect_db():\n try:\n conn = connect()\n cur = conn.cursor() \n yield conn, cur\n\n finally:\n cur.close()\n conn.close()", "def get_sqla_makers():\n registry = dict()\n for schema in omix_schemas.get_schemas().values():\n sqla_maker = TablelikeSqlaMaker(schema)\n tbl_name = sqla_maker.get_table_name() \n registry[tbl_name] = sqla_maker\n \n users_nm = nest_users.COLLECTION_NAME\n users_sqlam = core_db.get_nest_users_sqla_maker()\n registry[users_nm] = users_sqlam\n\n return registry", "def _get_db(self):\n gt_db = ...\n return gt_db", "def list_databases(self, limit=None, marker=None):\n return self._database_manager.list(limit=limit, marker=marker)", "def init_db(configuration):\n db = ZODB.config.databaseFromString(configuration)\n for init in IDBInitializer.subscription(db):\n init(db)\n return db", "def get_available_databases() -> List[str]:\r\n\tcur = psycopg2.connect(dbname='postgres').cursor()\r\n\tcur.execute(\"SELECT datname FROM pg_database WHERE datistemplate=FALSE;\")\r\n\treturn [row[0][:-6] for row in cur if row[0].endswith('wikidb')]", "def schema_generators():\n return {\n \"trips\": trips_schema,\n \"status_changes\": status_changes_schema,\n \"events\": events_schema,\n \"vehicles\": vehicles_schema,\n \"stops\": stops_schema\n }", "def db(request):\n url, container = run_docker_container()\n try:\n time.sleep(100)\n db = dbrequests.Database(url, sql_dir=sql_dir)\n yield db # providing fixture value for a test case\n # tear_down\n db.close()\n kill_remove_docker_container(container)\n except Exception as e:\n kill_remove_docker_container(container)\n raise(e)", "def read_db_list(tablename = None):\n\n # Set the default tablename\n if tablename is None:\n tablename = config[\"default-table\"]\n\n conn, tunnel = create_db_conn()\n result = None\n\n try:\n cur = conn.cursor()\n cur.execute(\"USE %s\"%(config['db']))\n cur.execute(\"SELECT * FROM %s;\"%(tablename,))\n conn.commit()\n result = cur.fetchall()\n\n except Exception as e:\n print(\"read_data_list failed\")\n print(e)\n\n conn.close()\n tunnel.close()\n return result", "def getDatabases(self):\n query = 'SELECT name FROM sys.databases'\n df = pd.read_sql(query, self.conn)\n return df", "def meta_db_tables(self) -> list:\r\n def _passer(**kwargs):\r\n data = self.engine.execute(\"\"\"\r\n SELECT * FROM sqlite_master WHERE type='table';\r\n \"\"\").fetchall()\r\n table_names = [i[1] for i in data]\r\n return table_names\r\n return self._connectionController(_passer)", "def db(app):\n _db.app = app\n with app.app_context():\n _db.create_all()\n\n yield _db\n\n # Explicitly close DB connection.\n _db.session.close()\n _db.drop_all()", "def get_databases(self) -> List[Dict]:\n self._check_connection(check_db=False)\n all_dbs = []\n for scope in self._dispatch_json(\"get\", self._api)[\"system:role\"][\n \"system:capability\"\n ][\"system:capability_scope\"]:\n if scope[\"@type\"] == \"system:Database\":\n all_dbs.append(scope)\n return all_dbs", "def db(app):\n _db.app = app\n with app.app_context():\n _db.create_all()\n\n yield _db\n\n # Explicitly close DB connection\n _db.session.close()\n _db.drop_all()", "def db(app):\n _db.app = app\n with app.app_context():\n _db.create_all()\n\n yield _db\n\n # Explicitly close DB connection\n _db.session.close()\n _db.drop_all()", "def sql_engine(tmpdir: Path) -> Generator[SQLEngine, None, None]:\n db_file_path = tmpdir / \"test.db\"\n sql_engine = create_sql_engine(db_file_path)\n yield sql_engine\n sql_engine.dispose()", "def get_db():\n\n def dict_factory(cursor, row):\n \"\"\"\n Creates dict from row.\n\n Args:\n cursor: DB cursor.\n row: Row.\n\n Returns:\n dict: Dict of results.\n \"\"\"\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n if '_database' not in g:\n g.db = sqlite3.connect(\n current_app.config['DATABASE'],\n detect_types=sqlite3.PARSE_DECLTYPES\n )\n g.db.row_factory = dict_factory\n return g.db", "def sqlite_db():\n smm.init_db(\"test.db\")\n yield smm.DATABASE\n os.remove(\"test.db\")", "def _database(self):\n ...", "def test_db():\n db.create_all() # setup\n yield # testing happens here\n db.drop_all() # teardown", "async def _engine() -> AsyncGenerator[AsyncEngine, None]:\n await create_database()\n\n engine = create_async_engine(str(settings.db_url))\n async with engine.begin() as conn:\n await conn.run_sync(Base.metadata.create_all)\n\n try:\n yield engine\n finally:\n await engine.dispose()\n await drop_database()", "def open_database(dbpath=DB_PATH):\n\n # __enter__ (Establish the connection)\n db = sqlite3.connect(dbpath)\n cursor = db.cursor()\n\n yield BlockChainDB(cursor)\n\n # __exit__ (Commit and close the connection)\n db.commit()\n db.close()", "async def database():\n db = await Database.connect_pool()\n return db", "def empty_graph_db(request) -> graph_tuple_database.Database:\n yield from testing_databases.YieldDatabase(\n graph_tuple_database.Database, request.param\n )", "def database():\n return conf().database", "def read_db():\n f_result = []\n result = execute_query('select sitename, id from {} order by sitename;'.format(TABLES[0]))\n sites = [(x['sitename'], x['id']) for x in result]\n for sitename, site_id in sites:\n sitedict = {'name': sitename}\n querystring = 'select settname, settval from {} order by settname where site_id = %s;'\n result = execute_query(querystring.format(TABLES[1]), (site_id,))\n sitedict['settings'] = {x: y for x, y in cur.fetchall()}\n querystring = 'select dirname, id from {} order by dirname where site_id = %s;'\n result = execute_query(querystring.format(TABLES[2]), (site_id,))\n sitedirs = [(x['dirname'], x['id']) for x in cur.fetchall()]\n sitedict['docs'] = []\n # if we keep the site_id in the docstats table we could restrict this to one db-query\n # and filter the result set inside the loop\n # although this should also be possible with a subselect or something like that\n for dirname, dir_id in sitedirs:\n dirlist = []\n querystring = 'select * from {} order by docname where dir_id = %s;'\n result = execute_query(querystring.format(TABLES[3]), (dir_id,))\n for resultdict in cur:\n resultdict['dirname'] = dirname\n dirlist.append(resultdict)\n sitedict['docs'].append(dirlist)\n f_result.append(sitedict)\n return f_result", "def list_databases(self, instance, limit=None, marker=None):\n return instance.list_databases(limit=limit, marker=marker)", "def _get_requested_databases(self):\r\n requested_databases = []\r\n if ((self._requested_namespaces is not None) and\r\n (self._requested_namespaces != [])):\r\n for requested_namespace in self._requested_namespaces:\r\n if requested_namespace[0] is '*':\r\n return []\r\n elif requested_namespace[0] not in IGNORE_DBS:\r\n requested_databases.append(requested_namespace[0])\r\n return requested_databases", "def list_databases(self) -> List[Dict]:\n self._check_connection(check_db=False)\n all_data = self.get_databases()\n all_dbs = []\n for data in all_data:\n all_dbs.append(data[\"system:resource_name\"][\"@value\"])\n return all_dbs", "def get_database(self, instance, name):\n return instance.get_database(name)", "def list_databases(self):\n r = self.__get_response(settings.LST_DBS)\n if r[\"status\"] == 200:\n return r[\"result\"]\n raise Exception(r[\"result\"][\"message\"])", "def configuration():\n db = SystemDB(filename=\"file:seamm_db?mode=memory&cache=shared\")\n system = db.create_system(name=\"default\")\n configuration = system.create_configuration(name=\"default\")\n\n yield configuration\n\n db.close()\n try:\n del db\n except: # noqa: E722\n print(\"Caught error deleting the database\")", "def _do_build ():\n if os.path.exists(\"./database\"):\n data_path = \"./database/\"\n elif os.path.exists(\"../database\"):\n data_path = \"../database/\"\n elif os.path.exists(\"../../database\"):\n data_path = \"../../database/\"\n else:\n data_path = \".\"\n\n dir_specs = {}\n databases = []\n\n # first pass over the databases to create complete tree:\n for dirpath, dirnames, filenames in os.walk(data_path):\n # all databases are stored\n for name in filenames:\n if name.endswith(\".db\"):\n databases.append(os.path.join(dirpath, name).replace(data_path, \"\"))\n # but we need to store specs here otherwise things could get a bit confusing\n elif name.endswith(\".spec\"):\n possible_dir = os.path.join(dirpath, name[:-5]+\".db\")\n if os.path.exists(possible_dir) and os.path.isdir(possible_dir):\n spec_name = possible_dir.replace(data_path, \"\")\n dir_specs[spec_name] = parse_spec(os.path.join(dirpath, name))\n\n # and we create DatabaseFolders for each subfolder\n for name in dirnames:\n if name.endswith(\".db\"):\n # dump the extension here too\n obj_name = name[:-3]\n this_folder = DatabaseFolder(obj_name)\n\n if dir_specs.has_key(name):\n this_folder.spec = dir_specs.pop(name)\n\n if dirpath != data_path:\n search = dirpath.replace(data_path, \"\").split(PATH_DELIM)\n try:\n top_folder = globals()[search[0]]\n except KeyError:\n raise DatabaseError, \"Subdirectory of a db folder without a DatabaseFolder?\"\n for p in search[1:]:\n if p == name:\n break\n try:\n top_folder = getattr(top_folder, p)\n except AttributeError:\n raise DatabaseError, \"Subdirectory of a db subfolder without a DatabaseFolder subfolder!\"\n top_folder.append(this_folder)\n else:\n globals()[obj_name] = this_folder\n\n for database in databases:\n build_from_file_name(database, data_path)", "def database_session_wide(app) -> SQLAlchemy:\n if os.path.exists(get_test_database_path()):\n os.unlink(get_test_database_path())\n\n from pipwatch_api.datastore.models import DATABASE\n DATABASE.init_app(app=app)\n DATABASE.create_all()\n yield DATABASE\n\n DATABASE.drop_all()\n DATABASE.session.close()\n\n os.unlink(get_test_database_path())", "def inventory_db():\r\n \r\n config.parse_config()\r\n \r\n fake = Faker()\r\n dbname= '_'.join(['fakedb',\r\n fake.word(),\r\n fake.word(),\r\n fake.word(),\r\n ])\r\n \r\n config.cc.inventory.name = dbname\r\n \r\n # Create the database\r\n db= io_sql.sql_database()\r\n db.create_database(dbname)\r\n assert db.database_exists(dbname)\r\n del(db)\r\n \r\n print('Inventroy_db: ', dbname)\r\n \r\n # Pass the database to the test functions\r\n yield\r\n \r\n print('Done with inventory_db: ', dbname)\r\n \r\n # Delete the database after use\r\n db= io_sql.sql_database()\r\n db.delete_database(dbname)\r\n assert not db.database_exists(dbname)", "def get_database_session() -> Generator[sessionmaker, None, None]:\n s = None\n\n try:\n s = SessionLocal()\n yield s\n except Exception as e:\n raise e\n finally:\n if s:\n s.close()", "def make_db():\n\n db.create_all()", "def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"databases\")", "def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"databases\")", "def database_manager() -> DatabaseManager:\r\n filename = \"test_bookmarks.db\"\r\n dbm = DatabaseManager(filename)\r\n yield dbm\r\n dbm.__del__() # release the database manager\r\n os.remove(filename)", "def setup(self):\n return self.setupDatabases()", "def yield_db_cursor(connect_params=DB_PARAMS, cursor_type=DictCursor):\n\n with psycopg2.connect(**connect_params) as con:\n with con.cursor(cursor_factory=cursor_type) as cur:\n yield cur", "def __getitem__(self, dbname):\n return Database(dbname=dbname, connection=self)", "def getSequencesFullDatabase(self):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_sequences_for_fasta_fulldb', [results])\n for row in results:\n # sequence_name, sequence_string, md5_checksum\n yield [row[0], row[1], row[2]]\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)", "def createDatabase( db, \n filenames,\n force = False,\n synonyms = None,\n compression = None,\n random_access_points = None,\n regex_identifier = None):\n\n if compression:\n if compression == \"lzo\":\n import lzo\n def lzo_mangler( s ): return lzo.compress(s, 9)\n mangler = lzo_mangler\n db_name = db + \".lzo\"\n write_chunks = True\n elif compression == \"zlib\":\n def zlib_mangler( s ): return zlib.compress( s, 9)\n mangler = zlib_mangler\n db_name = db + \".zlib\"\n write_chunks = True \n elif compression == \"gzip\":\n mangler = gzip_mangler\n db_name = db + \".gz\"\n write_chunks = True \n elif compression == \"dictzip\":\n import dictzip\n mangler = lambda x: x\n db_name = db + \".dz\"\n write_chunks = False\n elif compression == \"debug\":\n mangler = lambda x: x\n db_name = db + \".debug\"\n write_chunks = True\n else:\n raise \"unknown compression library: %s\" % compression\n \n else:\n mangler = lambda x: x\n db_name = db + \".fasta\"\n write_chunks = False\n \n index_name = db + \".idx\"\n \n if db in filenames:\n raise ValueError( \"database (%s) is part of input set.\" % db_name)\n\n if os.path.exists( db_name ) and not force:\n raise ValueError( \"database %s already exists.\" % db_name )\n\n if os.path.exists( index_name ) and not force:\n raise ValueError( \"database index %s already exists.\" % index_name )\n \n outfile_index = open( index_name, \"w\" )\n if compression == \"dictzip\":\n import dictzip\n if random_access_points == None or random_access_points <= 0:\n raise ValueError(\"specify dictzip chunksize in --random-access-points\")\n outfile_fasta = dictzip.open( db_name, \"wb\", buffersize=1000000, chunksize=random_access_points )\n compression = None\n else:\n outfile_fasta = open( db_name, \"wb\" )\n\n if type(filenames) == types.StringType:\n filenames = [filenames]\n\n identifiers = {}\n lsequence = 0\n identifier_pos, sequence_pos = 0, 0\n\n translation = string.maketrans(\"xX\", \"nN\")\n \n for filename in filenames:\n\n if filename == \"-\": \n infile = sys.stdin\n elif filename[-3:] == \".gz\":\n infile = gzip.open( filename, \"r\" )\n else:\n infile = open( filename, \"r\")\n\n fragments = []\n lfragment = 0\n first = True\n \n for line in infile:\n\n if line[0] == \"#\": continue\n \n if line[0] == \">\" :\n \n if not first:\n \n if write_chunks:\n writeFragments( outfile_fasta, outfile_index, fragments, mangler,\n random_access_points, True )\n \n fragments = []\n lfragment = 0\n else:\n outfile_fasta.write( \"\\n\" )\n \n outfile_index.write(\"\\t%i\\n\" % lsequence)\n\n first = False\n \n if regex_identifier:\n try:\n identifier = re.search(regex_identifier, line[1:-1]).groups()[0]\n except AttributeError:\n raise \"could not parse identifer from line %s\" % line[1:-1]\n else:\n identifier = re.split(\"\\s\", line[1:-1])[0]\n \n ## check for duplicate identifiers\n if identifier in identifiers:\n raise ValueError, \"%s occurs more than once in %s and %s: line=%s\" %\\\n (identifier, identifiers[identifier], filename, line[1:-1])\n identifiers[identifier] = filename\n \n # write identifier, the identifier includes a new-line\n identifier_pos = outfile_fasta.tell()\n outfile_fasta.write( \"%s\" % mangler(line) )\n sequence_pos = outfile_fasta.tell()\n \n outfile_index.write( \"%s\\t%i\" % (identifier,\n identifier_pos ) )\n if write_chunks:\n outfile_index.write( \"\\t%i\" % random_access_points )\n else:\n outfile_index.write( \"\\t%i\" % sequence_pos )\n \n lsequence = 0\n \n else:\n \n s = re.sub( \"\\s\", \"\", line.strip() )\n\n if options.clean_sequence:\n s = s.translate( translation )\n \n lsequence += len(s)\n \n if write_chunks:\n fragments.append(s)\n lfragment += len(s)\n if lfragment > random_access_points:\n rest = writeFragments( outfile_fasta, outfile_index,\n fragments, mangler, random_access_points,\n False)\n fragments = [rest]\n lfragment = len(rest)\n else:\n outfile_fasta.write( mangler(s) )\n \n if write_chunks:\n writeFragments( outfile_fasta, outfile_index, fragments, mangler, random_access_points, True )\n else:\n outfile_fasta.write( \"\\n\" )\n \n outfile_index.write(\"\\t%i\\n\" % lsequence )\n\n # add synonyms for the table\n if synonyms:\n for key, vals in synonyms.items():\n for val in vals:\n outfile_index.write( \"%s\\t%s\\n\" % (key, val) )", "def connect(db_name, host='localhost', port=27017, **kwargs):\n m_client = pymongo.MongoClient(host, port, **kwargs)\n try:\n db_instance = m_client.get_database(db_name)\n yield db_instance\n finally:\n m_client.close()", "def __init__(self):\n self.databases = []", "def _db(_app):\n return db", "def get_db(name, complete=False):\n cursor = flask.g.syscnx.cursor()\n sql = (\n \"SELECT owner, title, description, public, readonly,\"\n \" created, modified FROM dbs WHERE name=?\"\n )\n cursor.execute(sql, (name,))\n rows = cursor.fetchall()\n if len(rows) != 1:\n return None # 'rowcount' does not work?!\n db = {\"name\": name}\n db.update(rows[0])\n db[\"public\"] = bool(db[\"public\"])\n db[\"readonly\"] = bool(db[\"readonly\"])\n db[\"size\"] = os.path.getsize(utils.get_dbpath(name))\n db[\"hashes\"] = {}\n sql = \"SELECT hashname, hashvalue FROM dbs_hashes WHERE name=?\"\n cursor.execute(sql, (name,))\n for row in cursor:\n db[\"hashes\"][row[0]] = row[1]\n if complete:\n cursor = get_cnx(name).cursor()\n sql = \"SELECT name, schema FROM %s\" % constants.TABLES\n cursor.execute(sql)\n db[\"tables\"] = dict([(row[0], json.loads(row[1])) for row in cursor])\n sql = \"SELECT name, schema FROM %s\" % constants.INDEXES\n cursor.execute(sql)\n db[\"indexes\"] = dict([(row[0], json.loads(row[1])) for row in cursor])\n sql = \"SELECT name, schema FROM %s\" % constants.VIEWS\n cursor.execute(sql)\n db[\"views\"] = dict([(row[0], json.loads(row[1])) for row in cursor])\n return db", "def create_db(self):", "def createDatabaseInstances(self, geneticInstances):\n self.createImages(geneticInstances)\n\n databaseInstances = []\n for instance in geneticInstances:\n genome = instance.toGenomeRepresentation()\n dbi = DatabaseInstance(idinst=None, # This will be updated after the upload is done\n genome = genome,\n generation=self.params.currentGeneration)\n databaseInstances.append(dbi)\n return databaseInstances", "def app():\n # create a temporary file to isolate the database for each test\n # create the app with common test config\n app = create_app({\"TESTING\": True, \"DATABASE_NAME\": \"AAPI_DB_Test\"})\n\n # create the database and load test data\n with app.app_context():\n init_db()\n\n yield app", "def makeDatabaseNamesList(n, ):", "def map_database(connection):\n eng = create_engine(connection)\n metadata = MetaData()\n metadata.reflect(eng)\n base = automap_base(metadata=metadata)\n base.prepare()\n return base.classes, eng", "def list_databases(self):\n end_point = '/'.join([self.host, 'api', 'databases', ''])\n resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})\n if resp.status_code != 200:\n raise ClientError('Encountered error getting list of databases: {}'.format(resp.json()))\n return resp.json()", "def create_databases():\n db_connection = connect_to_db()\n\n # Create database tables.\n create_tables(db_connection)\n\n # Populate water tables.\n populate_water_tables(db_connection)\n\n # station_data = get_station_data()\n # station = station_data.query('ŠIFRA == 30301')\n # print(station)\n # index = station.index[0]\n # lat = station.at[index, 'LAT']\n # lng = station.at[index, 'LON']\n # name = f\"{station.at[index, 'VODOMERNA POSTAJA']} ({station.at[index, 'VODOTOK']})\"\n # print(index, lat, lng, name)\n\n # Populate location tables\n # populate_locations(db_connection)\n\n # Populate weather tables\n populate_weather(db_connection)\n\n db_connection.commit()\n db_connection.close()" ]
[ "0.7408082", "0.72182983", "0.71949655", "0.7083334", "0.69460434", "0.6883696", "0.68634665", "0.6770623", "0.6742591", "0.6707848", "0.6675422", "0.66511965", "0.65341586", "0.6520894", "0.6501018", "0.64990884", "0.64912605", "0.6466524", "0.6464015", "0.6418823", "0.63651603", "0.63646597", "0.63511217", "0.6344924", "0.6335589", "0.6300313", "0.62859327", "0.62602854", "0.6232509", "0.6218924", "0.62043977", "0.6175014", "0.6147683", "0.6138643", "0.61264586", "0.6121394", "0.6100734", "0.6100597", "0.6095098", "0.6090436", "0.60851383", "0.60628355", "0.6050726", "0.6050003", "0.60298985", "0.6026787", "0.6024187", "0.6007802", "0.6007167", "0.6006188", "0.599438", "0.599346", "0.5989306", "0.5981271", "0.59602785", "0.5932265", "0.5926926", "0.5924556", "0.5924556", "0.59229255", "0.59216934", "0.5908948", "0.5905398", "0.5905367", "0.5900186", "0.5882407", "0.58630794", "0.5860487", "0.5855596", "0.5852387", "0.5848807", "0.5842233", "0.5833198", "0.58306193", "0.58297676", "0.58278936", "0.58214605", "0.58190006", "0.58120054", "0.58089906", "0.5805173", "0.58001554", "0.58001554", "0.5797252", "0.5792061", "0.5791523", "0.57891047", "0.57821816", "0.57820755", "0.57800937", "0.5772474", "0.5757928", "0.5756314", "0.5744314", "0.5741295", "0.571843", "0.57129157", "0.56997484", "0.5699277", "0.5686696" ]
0.6240589
28
Get a single database
def get_database(self, database, instance=None): return self._get(_database.Database, database)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_database() -> Database:\n db_config = DatabaseConfig(DB_NAME)\n return connect_to_db(db_config)", "def get_database(self, instance, name):\n return instance.get_database(name)", "def get_db(self, dbname, **params):\n return Database(self._db_uri(dbname), server=self, **params)", "def _get_database(self, options):\n database_key = options.get('database')\n if not database_key:\n if len(settings.DATABASES) >= 2:\n errmsg = \"Because this project contains more than one database, you\"\n errmsg += \" must specify the --database option.\"\n raise CommandError(errmsg)\n database_key = settings.DATABASES.keys()[0]\n return settings.DATABASES[database_key]", "def get_database(self):\n return self.database", "def get_database(self, name):\n try:\n return [db for db in self.list_databases()\n if db.name == name][0]\n except IndexError:\n raise exc.NoSuchDatabase(\"No database by the name '%s' exists.\" %\n name)", "def get_db(self):\n self.logger.info('in get_db()')\n try:\n return self.client[self.db_name]\n except Exception as e:\n self.logger.error(f'Error occurred while getting client {e}')", "def get_database(self, database=None):\n\t\tdatabase = database if database !=None else self.database\n\t\t\n\t\tif self._database is None:\n\t\t\tconn = self.get_connection()\n\t\t\tdb = conn[database]\n\t\t\tself._database = db\n\t\t\n\t\treturn self._database", "def get_db(db_name):\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db", "def get_db():\n if ( g.get( 'db' ) is None ):\n g.db = connect_db()\n\n return g.db.connect()", "def get_database(conn, name):\n\n if conn.hasDatabase(name) is False:\n return conn.createDatabase(name)\n\n return conn[name]", "def get_db(db_label):\n defaults = get_defaults()\n db_name = defaults[db_label]\n m = re.match('(\\w+)://.*?/([\\w.]+)', db_name)\n if m is None:\n logger.error(\"Poorly formed db name: %s\" % db_name)\n return\n sqltype = m.groups()[0]\n return DatabaseManager(db_name, sqltype=sqltype, label=db_label)", "def get_db(db_name):\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db", "def get_database() -> StandardDatabase:\n client = get_arango_client()\n return client.db(DB_NAME, username=ARANGO_USER, password=ARANGO_PASSWORD)", "def get_database(self):\n if self._database is None:\n conn = self.get_connection()\n db = conn[self.database]\n self._database = db\n\n return self._database", "def database():\n return conf().database", "def get_db():\n if not hasattr(g, 'mongo_db'):\n g.db = get_mongo_db()\n\n return g.db", "def get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(current_app.config['DB_NAME'])\n return db", "def get_db():\n if 'db' not in g:\n g.db = sqlite3.connect(str(current_app.config['DATABASE']))\n return g.db", "def get_db():\n\tpath = get_path_db()\n\tif path is None:\n\t\tprint(\"\\n=> Info - Cannot fetch database yet because it has not been configured.\\n\")\n\telse:\n\t\tdb = SqliteExtDatabase(path)\n\t\treturn db", "def getDb(self):\n return self.db", "def get_database(database: Optional[str] = None,\n instance: Optional[str] = None,\n project: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseResult:\n __args__ = dict()\n __args__['database'] = database\n __args__['instance'] = instance\n __args__['project'] = project\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('google-native:sqladmin/v1beta4:getDatabase', __args__, opts=opts, typ=GetDatabaseResult).value\n\n return AwaitableGetDatabaseResult(\n charset=pulumi.get(__ret__, 'charset'),\n collation=pulumi.get(__ret__, 'collation'),\n etag=pulumi.get(__ret__, 'etag'),\n instance=pulumi.get(__ret__, 'instance'),\n kind=pulumi.get(__ret__, 'kind'),\n name=pulumi.get(__ret__, 'name'),\n project=pulumi.get(__ret__, 'project'),\n self_link=pulumi.get(__ret__, 'self_link'),\n sqlserver_database_details=pulumi.get(__ret__, 'sqlserver_database_details'))", "def get_db(db=None):\n if db is None:\n db = ideagenstest\n return get_mongodb(db['url'],\n db['port'],\n db['dbName'],\n db['user'],\n db['pswd'])", "def get_db(self):\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = connect(DATABASE)\n return db", "def get_db():\n if not hasattr(g, \"sql_db\"):\n g.sql_db = connect_db()\n return g.sql_db", "def get_db():\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client.seattle\n return db", "def get_db():\n if \"db\" not in g:\n host = current_app.config[\"HOST\"]\n dbname = current_app.config[\"DATABASE\"]\n #params = \"host='{}' dbname='{}' user=root\".format(host, dbname)\n params = \"dbname='{}' user=root\".format(dbname)\n g.db = psycopg2.connect(params)\n # 'g.db' corresponsds to a DB conn\n return g.db", "def database(dburl=None, **params):\n if not dburl and not params:\n dburl = os.environ['DATABASE_URL']\n if dburl:\n params = dburl2dict(dburl)\n dbn = params.pop('dbn')\n if dbn in _databases:\n return _databases[dbn](**params)\n else:\n raise UnknownDB, dbn", "def get_db(database):\n db = getattr(g, '_database', None)\n if db is None:\n intents_db = IntentsDatabaseEngine()\n expressions_db = ExpressionsDatabaseEngine()\n database_dict = {'intents': intents_db,\n 'expressions': expressions_db}\n g._database = db = database_dict\n return db[database]", "def get_db():\n\n if not hasattr(g, 'mongo_db'):\n client = MongoClient(C.MONGODB_DATABASE_URI)\n g.db = client.test\n return g.db", "def get_db():\n global _cached\n if not _cached:\n _cached = MongoClient(config.DB_URI).get_database()\n return _cached", "def get_db():\n db = getattr(g, 'database', None)\n if db is None:\n db = sqlite3.connect(app.config['DATABASE'])\n db.row_factory = sqlite3.Row\n g.database = db\n return db", "def get_db():\n if \"db\" not in g:\n g.db = sqlite3.connect(current_app.config[\"DATABASE\"], detect_types=sqlite3.PARSE_DECLTYPES)\n g.db.row_factory = sqlite3.Row\n\n return g.db", "def get_db():\n if not hasattr(g, 'postgres_db'):\n g.postgres_db = connect_db()\n return g.postgres_db", "def get_db():\n # this is a bit of a hack, since it assumes all the models talk to the same\n # db. that said a lot of our code relies on that assumption.\n # this import is here because of annoying dependencies\n return Database(settings.COUCH_DATABASE)", "def get_config_db():\n\n datab = {'db_name': 'database_name',\n 'db_url': 'database_url'}\n\n return datab", "def database():\n return sqlite3.connect(DATABASE)", "def database(self):\n try:\n return self._database\n except:\n database = self.application.connection[self.database_name]\n self._database = database\n return database", "def get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(DATABASE)\n db.row_factory = sqlite3.Row\n return db", "def get_db():\n if not hasattr(g, 'mysql_db'):\n g.mysql_db = connect_db()\n return g.mysql_db", "def getdb():\n if 'db' not in g:\n g.db = sqlite3.connect(\n config.DATABASE,\n detect_types=sqlite3.PARSE_DECLTYPES\n )\n g.db.row_factory = sqlite3.Row\n return g.db", "def get_db():\n if 'db' not in g:\n g.db = sqlite3.connect(\n current_app.config['DATABASE'],\n detect_types=sqlite3.PARSE_DECLTYPES\n )\n g.db.row_factory = sqlite3.Row\n\n return g.db", "def mysql_database():\n return DATABASE", "def get_db():\n\tif not hasattr(g, 'sqlite_db'):\n\t\tg.sqlite_db = connect_db()\n\treturn g.sqlite_db", "def get_db():\n\tif not hasattr(g, 'sqlite_db'):\n\t\tg.sqlite_db = connect_db()\n\treturn g.sqlite_db", "def get_db():\n if not hasattr(g, 'db_connection'):\n g.db_connection = connect_db()\n return g.db_connection", "def get_db(self):\n return self._db", "def get_db():\n if not hasattr(g, \"sqlite_db\"):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db", "def get_db(request, name=None):\n\n dbname = name\n registry = request.registry\n\n if name is None:\n dbname = registry.settings.get(DBNAME)\n\n if dbname is None:\n raise ConfigurationError('There is no defined database name')\n\n mongodbs = getattr(request, '_mongo_dbs', dict())\n\n db = mongodbs.get(dbname)\n\n if db is None:\n conn = getattr(registry, '_mongo_conn', None)\n\n if conn is None:\n raise ConfigurationError(\n 'There is no database connection available')\n\n db = conn[dbname]\n\n mongodbs[dbname] = db\n request._mongo_dbs = mongodbs\n\n username = registry.settings.get(USERNAME + '.' + dbname)\n password = registry.settings.get(PASSWORD + '.' + dbname)\n\n if not username is None and not password is None:\n db.authenticate(username, password)\n\n def end_request(request):\n db.logout()\n db.connection.end_request() \n\n request.add_finished_callback(end_request)\n\n return db", "def api_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_URI).get_database()", "def find_database(self, name_or_id, instance, ignore_missing=True):\n instance = self._get_resource(_instance.Instance, instance)\n return self._find(\n _database.Database,\n name_or_id,\n instance_id=instance.id,\n ignore_missing=ignore_missing,\n )", "def get_db():\r\n if not hasattr(g, 'sqlite_db'):\r\n g.sqlite_db = connect_db()\r\n return g.sqlite_db", "def get_db():\n if not hasattr(g, 'pg_db'):\n g.pg_db = connect_db()\n return g.pg_db", "def get_db():\n if not hasattr(g, 'pg_db'):\n g.pg_db = connect_db()\n return g.pg_db", "def get_database(self, dbid: str, account: str) -> Optional[dict]:\n self._check_connection(check_db=False)\n db_ids = []\n all_dbs = []\n for this_db in self.get_databases():\n if this_db[\"system:resource_name\"][\"@value\"] == dbid:\n db_ids.append(this_db[\"@id\"])\n all_dbs.append(this_db)\n\n resources_ids = []\n for scope in self._dispatch_json(\"get\", self._api)[\"system:role\"][\n \"system:capability\"\n ][\"system:capability_scope\"]:\n if (\n scope[\"@type\"] == \"system:Organization\"\n and scope[\"system:organization_name\"][\"@value\"] == account\n ):\n if type(scope[\"system:resource_includes\"]) is list:\n for resource in scope[\"system:resource_includes\"]:\n resources_ids.append(resource[\"@id\"])\n\n target_db = None\n for target in set(db_ids).intersection(set(resources_ids)):\n target_db = target\n\n for this_db in all_dbs:\n if this_db[\"@id\"] == target_db:\n return this_db", "def get_db():\n if not hasattr(g, \"site_db\"):\n connection = pg.connect(\n dbname=\"dollsite\",\n user=\"dollsite\",\n password=app.config[\"DS_DB_PASSW\"]\n )\n g.site_db = connection\n return g.site_db", "def GetDatabase(self):\r\n\r\n if self.database:\r\n return self.database\r\n \r\n if not os.path.exists(self.GetDataDir()):\r\n # Create the data folder, it still doesn't exist\r\n os.makedirs(self.GetDataDir())\r\n\r\n self.database = os.path.join(self.GetDataDir(), \"NDT_Database.db\")\r\n return self.database", "def get_primary_db(force_new=False):\n defaults = get_defaults()\n if 'primary' in defaults.keys():\n primary_host = defaults['primary']\n else:\n raise IndraDatabaseError(\"No primary host available in defaults file.\")\n\n global __PRIMARY_DB\n if __PRIMARY_DB is None or force_new:\n __PRIMARY_DB = DatabaseManager(primary_host, label='primary')\n __PRIMARY_DB.grab_session()\n return __PRIMARY_DB", "def get_db():\n if 'db' not in g:\n g.db = sqlite3.connect(\n current_app.config['DATABASE'],\n detect_types=sqlite3.PARSE_DECLTYPES\n )\n g.db.row_factory = sqlite3.Row\n\n return g.db", "def get_db():\n client = MongoClient(\"mongodb://admin:therightfit@ds125555.\" +\n \"mlab.com:25555/the_right_fit\")\n db_object = client['the_right_fit']\n return db_object", "def get_database (name, parent=None):\n if \".\" in name:\n parent, name = name.split(\".\")\n\n if parent is not None:\n if not isinstance(parent, DatabaseFolder):\n parent = globals().get(parent, None)\n\n if parent is None or not isinstance(parent, DatabaseFolder):\n return None\n\n return parent.get(name, None)\n\n return globals().get(name, None)", "def get_mongo_db(host, port, name):\n client = MongoClient(host, port)\n db = client[name]\n return db", "def get_db(self, typename):\n return self._dbs[typename]", "def get_db():\n conn = g.get('sqlite_db', None)\n if conn is None:\n conn = g.sqlite_db = connect_db()\n return conn", "def database(self):\n return self._database", "def database(self):\n return self._database", "def database(self):\n return self._database", "def database(self):\n return self._database", "def get_db():\n config = {\n 'host': 'localhost',\n 'port': 3306,\n 'user': 'root',\n 'database':'edb',\n 'passwd': 'root',\n 'charset': 'utf8',\n 'cursorclass': pymysql.cursors.DictCursor\n\n }\n\n if 'db' not in g:\n g.db = pymysql.connect(**config)\n\n return g.db", "def db(self) -> Database:\n return self.impl.db", "def db(self):\n return self.application.db", "def db(self):\n return self.application.db", "def get_db():\r\n con = sqlite3.connect(\"whinge.db\")\r\n con.row_factory = sqlite3.Row # return dict-like rows\r\n return con", "def get_db(server_id):\n DATABASE = \"DATABASE\" + str(server_id)\n print(DATABASE)\n tracktop = _app_ctx_stack.top\n if not hasattr(tracktop, 'track_db0') and server_id == 0:\n tracktop.track_db0 = sqlite3.connect(app.config[DATABASE], detect_types=sqlite3.PARSE_DECLTYPES)\n tracktop.track_db0.row_factory = sqlite3.Row\n if not hasattr(tracktop, 'track_db1') and server_id == 1:\n tracktop.track_db1 = sqlite3.connect(app.config[DATABASE], detect_types=sqlite3.PARSE_DECLTYPES)\n tracktop.track_db1.row_factory = sqlite3.Row\n if not hasattr(tracktop, 'track_db2') and server_id == 2:\n tracktop.track_db2 = sqlite3.connect(app.config[DATABASE], detect_types=sqlite3.PARSE_DECLTYPES)\n tracktop.track_db2.row_factory = sqlite3.Row\n\n if server_id == 0:\n return tracktop.track_db0\n elif server_id == 1:\n return tracktop.track_db1\n else:\n return tracktop.track_db2", "def _get_db(self, db_name: str) -> shelve.DbfilenameShelf:\n db_path = os.path.join(self.cache_folder, db_name)\n db = shelve.open(db_path)\n logging.info(f'Opened cache file {db_path!r}')\n return db", "def database(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database\")", "def get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(DATABASE)\n db.execute('PRAGMA foreign_keys = ON')\n return db", "def _get_db(self):\n gt_db = ...\n return gt_db", "def get_db(request: Request) -> MongoWrapper:\n return request.app.state.db", "def get_test_db():\n defaults = get_defaults()\n test_defaults = {k: v for k, v in defaults.items() if 'test' in k}\n key_list = list(test_defaults.keys())\n key_list.sort()\n db = None\n for k in key_list:\n test_name = test_defaults[k]\n m = re.match('(\\w+)://.*?/([\\w.]+)', test_name)\n if m is None:\n logger.warning(\"Poorly formed db name: %s\" % test_name)\n continue\n sqltype = m.groups()[0]\n try:\n db = DatabaseManager(test_name, sqltype=sqltype, label=k)\n db.grab_session()\n except Exception as e:\n logger.error(\"%s didn't work\" % test_name)\n logger.exception(e)\n continue # Clearly this test database won't work.\n logger.info(\"Using test database %s.\" % k)\n break\n if db is None:\n logger.error(\"Could not find any test database names.\")\n return db", "def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")", "def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")" ]
[ "0.8189091", "0.7899076", "0.7844062", "0.7762001", "0.7726166", "0.772395", "0.7723282", "0.7685286", "0.765148", "0.762424", "0.7616151", "0.76047456", "0.7597356", "0.7583495", "0.7570153", "0.75534976", "0.74211323", "0.74180853", "0.7410544", "0.7392949", "0.7318623", "0.7315056", "0.73138237", "0.731299", "0.7289476", "0.7260866", "0.7227758", "0.72201204", "0.7200716", "0.71671736", "0.71408194", "0.713998", "0.71147037", "0.7099291", "0.70912874", "0.7085623", "0.7067797", "0.7065756", "0.70635957", "0.7058203", "0.70434386", "0.7040552", "0.7039763", "0.7030233", "0.7030233", "0.7027138", "0.70271105", "0.7023133", "0.7020396", "0.7020396", "0.7020396", "0.7020396", "0.7020396", "0.7020396", "0.7020396", "0.7020396", "0.7020396", "0.7020396", "0.7020396", "0.7020396", "0.7020396", "0.7020396", "0.7020396", "0.7020396", "0.7020396", "0.7020396", "0.7020071", "0.70121825", "0.70090824", "0.70070297", "0.698581", "0.698581", "0.69778085", "0.69747233", "0.69731534", "0.6957302", "0.6942034", "0.6939812", "0.69286585", "0.6908308", "0.69078314", "0.6907469", "0.6900892", "0.6900892", "0.6900892", "0.6900892", "0.6896793", "0.68933916", "0.689332", "0.689332", "0.6878542", "0.68765163", "0.6862447", "0.68581253", "0.6844691", "0.6838404", "0.6833862", "0.6820225", "0.67981315", "0.67981315" ]
0.7978113
1
Find a single flavor
def find_flavor(self, name_or_id, ignore_missing=True): return self._find( _flavor.Flavor, name_or_id, ignore_missing=ignore_missing )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_flavor(name):\r\n return nova.flavors.find(name=name)", "def flavor(self, name=None):\n return self.find(self.flavors(), name=name)", "def find_flavor(self, name_or_id, ignore_missing=False):\n return self._find(_flavor.Flavor, name_or_id,\n ignore_missing=ignore_missing)", "def get_flavor(self, flavor):\n return self._get(_flavor.Flavor, flavor)", "def get_flavor(self, flavor):\n return self._get(_flavor.Flavor, flavor)", "def _existing_flavor(self):\n return instance_types.get_all_types().keys()[0]", "def find_flavor_id(flavor_name: str):\n for flavor in get_flavors()[\"flavors\"]:\n if flavor_name == flavor[\"name\"]:\n return flavor[\"id\"]\n\n raise AttributeError(f\"No flavor '{flavor_name}' found\")", "def get_flavor_by_uuid(cls, flavor_uuid):\n return cls.dbdriver.get_flavor_by_uuid(flavor_uuid)", "def get_flavor(self, flavor_id):\n return self._flavor_manager.get(flavor_id)", "def GetFlavor(params):\n flavors = {\n 'cygwin': 'win',\n 'win32': 'win',\n 'darwin': 'mac',\n }\n if 'flavor' in params:\n return params['flavor']\n if sys.platform in flavors:\n return flavors[sys.platform]\n if sys.platform.startswith('sunos'):\n return 'solaris'\n if sys.platform.startswith('freebsd'):\n return 'freebsd'\n if sys.platform.startswith('openbsd'):\n return 'openbsd'\n if sys.platform.startswith('netbsd'):\n return 'netbsd'\n if sys.platform.startswith('aix'):\n return 'aix'\n if sys.platform.startswith('zos'):\n return 'zos'\n if sys.platform.startswith('os390'):\n return 'zos'\n return 'linux'", "def flavor(self):\n return self._flavor", "def flavor(self, name=None):\n raise NotImplementedError", "def test_aws_service_api_flavor_get(self):\n pass", "def get_flavor(self, flavor_id):\n url = '%s/flavors/%s' % (self.catalog['compute'], flavor_id)\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['flavor']\n else:\n LOG.error('Get flavor failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def test_get_flavor(self):\n response = self.flavors_client.get_flavor_details(self.flavor_ref)\n flavor = response.entity\n self.assertEqual(self.flavor_ref, flavor.id)", "def get_flavor_id(self, flavor_name):\n _url = \"http://\" + self.host_ip + \":8774/v2/\" +\\\n self.cloud_admin_info[\"project_id\"] + \\\n \"/flavors/detail\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from server while getting flavors.\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get flavor ID Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n\n for flavors in output['flavors']:\n if flavors['name'].lower() == flavor_name.lower():\n LOG_OBJ.debug(\"Flavor Name: %s, ID: %s\" % (flavor_name,\n flavors['id']))\n return flavors['id']\n\n LOG_OBJ.error(\"Flavor:%s is NOT found\" % flavor_name)", "def get_flavors() -> dict:\n flavor_rq = request(\n method=\"GET\", url=app.config[\"FLAVORS_REF\"], headers=build_header(),\n )\n\n if not flavor_rq.ok:\n HTTPError(f\"Can not get flavor id for virtual machine: {flavor_rq.status_code}\")\n\n return flavor_rq.json()", "def FlavorName(flavor):\n if isinstance(flavor, tuple):\n return flavor[0]\n else:\n return flavor", "def get_flavor(flavor_id, include_deleted=False):\n\n try:\n flavor_id = int(flavor_id)\n if include_deleted:\n return Flavor.objects.get(id=flavor_id)\n else:\n return Flavor.objects.get(id=flavor_id, deleted=include_deleted)\n except (ValueError, TypeError):\n raise faults.BadRequest(\"Invalid flavor ID '%s'\" % flavor_id)\n except Flavor.DoesNotExist:\n raise faults.ItemNotFound('Flavor not found.')", "def get_flavor(self, request, tenant_id, flavor_id):\n response_data = get_flavor(flavor_id)\n request.setResponseCode(response_data[1])\n return json.dumps(response_data[0])", "def _get_flavor_ref(self, flavor):\n flavor_obj = None\n if isinstance(flavor, CloudDatabaseFlavor):\n flavor_obj = flavor\n elif isinstance(flavor, int):\n # They passed an ID or a size\n try:\n flavor_obj = self.get_flavor(flavor)\n except exc.NotFound:\n # Must be either a size or bad ID, which will\n # be handled below\n pass\n if flavor_obj is None:\n # Try flavor name\n flavors = self.list_flavors()\n try:\n flavor_obj = [flav for flav in flavors\n if flav.name == flavor][0]\n except IndexError:\n # No such name; try matching RAM\n try:\n flavor_obj = [flav for flav in flavors\n if flav.ram == flavor][0]\n except IndexError:\n raise exc.FlavorNotFound(\"Could not determine flavor from \"\n \"'%s'.\" % flavor)\n # OK, we have a Flavor object. Get the href\n href = [link[\"href\"] for link in flavor_obj.links\n if link[\"rel\"] == \"self\"][0]\n return href", "def flavors(self, **kwargs):\n if kwargs is None:\n result = self.get_list(self.cloudman.compute.flavors(),\n kind=\"flavor\")\n if \"name\" in kwargs:\n result = self.flavor(name=kwargs['name'])\n\n else:\n result = self.get_list(self.cloudman.compute.flavors(**kwargs),\n kind=\"flavor\")\n\n return result", "def _get_flavor_name(self, flavor_id):\n for name, f_id in FLAVOR_ID.items():\n if f_id == flavor_id:\n return name", "def flavors(self, **kwargs):\n raise NotImplementedError", "def flavors(self, **query):\n return self._list(_flavor.Flavor, **query)", "def _generate_flavorid(self):\n nonexistent_flavor = 2700\n flavor_ids = [value[\"id\"] for key, value in\n instance_types.get_all_types().iteritems()]\n while nonexistent_flavor in flavor_ids:\n nonexistent_flavor += 1\n else:\n return nonexistent_flavor", "def show_flavors():\n return get_flavors()", "def flavors(self, details=True):\n flv = _flavor.FlavorDetail if details else _flavor.Flavor\n return list(self._list(flv, paginated=True))", "def flavor_id(self):\n return self._flavor_id", "def flavor_id(self):\n return self._flavor_id", "def test_get_non_existent_flavor(self):\n try:\n self.flavors_client.get_flavor_details(999)\n self.fail('No exception thrown for a non-existent flavor id')\n except ItemNotFound:\n pass", "def test_list_flavors_with_detail(self):\n response = self.flavors_client.list_flavors_with_detail()\n flavors = response.entity\n self.assertTrue(len(flavors) > 0)\n response = self.flavors_client.get_flavor_details(self.flavor_ref)\n flavor = response.entity\n self.assertIn(flavor, flavors)", "def display_flavor(self):\n print(\"\\nWe currently count with the next flavors:\")\n for flavor in self.flavors:\n print(f\"{flavor}\")", "def get_flavors(self):\n url = '%s/flavors/detail' % self.catalog['compute']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['flavors']\n else:\n LOG.error('Get flavors failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def get_flavor_provider(flavor):\n disk_template = flavor.disk_template\n provider = None\n if disk_template.startswith(\"ext\"):\n disk_template, provider = disk_template.split(\"_\", 1)\n return disk_template, provider", "def test_aws_service_api_flavors_get(self):\n pass", "def flavor(self, flavor):\n self._flavor = flavor", "def test_will_not_get_instance_by_unknown_flavor_id(self):\n self.assertRaises(exception.FlavorNotFound,\n instance_types.get_instance_type_by_flavor_id,\n 'unknown_flavor')", "def create_flavor(self):\n logger.debug(\"Creating VM Flavor\")\n rc, flavor_id = self.cal.create_flavor(self.account, self.flavor)\n assert rc == RwTypes.RwStatus.SUCCESS\n\n return flavor_id", "def get_azure_flavor(flavor):\n if flavor not in CONF.ec2_flavor_to_size_map:\n raise Exception(\"Could not find mapping for the EC2 image size \"\n \"'%s', please edit 'ec2_flavor_to_size_map' in the \"\n \"configuration.\" % (flavor))\n\n return CONF.ec2_flavor_to_size_map[flavor]", "def display_flavors(self):\r\n print(\"We have the following flavors\"\"\")\r\n for flavor in self.flavors:\r\n print(\" ...\" + str(flavor.title()))", "def HashSelect(versions, flavor):\n return versions[HashKey(flavor)]", "def _get_deployment_flavor():\n flavor = cfg.CONF.paste_deploy.flavor\n return '' if not flavor else ('-' + flavor)", "def test_list_flavors(self):\n response = self.flavors_client.list_flavors()\n flavors = response.entity\n self.assertTrue(len(flavors) > 0)\n response = self.flavors_client.get_flavor_details(self.flavor_ref)\n flavor = response.entity\n flavor_ids = [x.id for x in flavors]\n self.assertIn(flavor.id, flavor_ids,\n \"The expected flavor: %s was not found in \"\n \"the flavor list\" % flavor.id)", "def _create_flavor(self, context, flavor):\n flavor_dict = flavor.__dict__\n name = self.prefix + flavor.name\n flavorid = self.prefix + flavor.id\n memory = flavor.ram\n vcpus = flavor.vcpus\n root_gb = flavor.disk\n ephemeral_gb = flavor_dict.get('OS-FLV-EXT-DATA:ephemeral', 0)\n u_swap = flavor_dict.get('swap', 0)\n rxtx_factor = flavor_dict.get('rxtx_factor', 1.0)\n is_public = flavor_dict.get('os-flavor-access:is_public', True)\n if u_swap == \"\":\n swap = 0\n else:\n swap = int(u_swap)\n\n try:\n return flavors.create(name, memory, vcpus, root_gb,\n ephemeral_gb=ephemeral_gb,\n flavorid=flavorid, swap=swap,\n rxtx_factor=rxtx_factor,\n is_public=is_public)\n except exception.InstanceExists as err:\n raise err", "def test_list_flavors_detailed_min_ram_larger_than_max_flavor_ram(self):\n response = self.flavors_client.list_flavors_with_detail(\n min_ram=self.max_ram+1)\n flavors = response.entity\n self.assertEqual(len(flavors), 0)", "def get_from_server_domain(cls, server_domain: str) -> \"Flavor\":\n subdomain, base_domain, tld = server_domain.rsplit(\".\", maxsplit=2)\n\n if subdomain == \"api.pro\":\n if base_domain == \"openfoodfacts\":\n return cls.off_pro\n raise ValueError(\"pro platform is only available for Open Food Facts\")\n\n for server_type in cls:\n if base_domain == server_type.get_base_domain():\n return server_type\n\n raise ValueError(f\"no Flavor matched for server_domain {server_domain}\")", "def test_check_add_flavor(self):\n for flavor_id, flavor in OPENSTACK_FLAVOR.items():\n self.cmd._add_flavor(flavor, flavor_id)\n ralph_flavor = CloudFlavor.objects.get(flavor_id=flavor_id)\n self.assertEqual(ralph_flavor.name, flavor['name'])\n self.assertEqual(ralph_flavor.cloudprovider, self.cloud_provider)\n self.assertIn(flavor['tag'], ralph_flavor.tags.names())\n self.assertEqual(flavor['cores'], ralph_flavor.cores)\n self.assertEqual(flavor['memory'], ralph_flavor.memory)\n self.assertEqual(flavor['disk'], ralph_flavor.disk)", "def find_family(self, needle):\n return self.__make_api_call('find/family/{}'.format(needle))", "def check_flan_flavor(flavor):\n if not flavor:\n flan_flavor = \"plain old boring\"\n else:\n flan_flavor = flavor\n return (flan_flavor + \" flavored flan\")", "def VersionSelect(versions, flavor):\n\n if isinstance(flavor, tuple):\n ids = [versions[i] for i in flavor[1:]]\n return ','.join(ids)\n if toolchainbinaries.IsPnaclFlavor(flavor):\n return versions['PNACL_VERSION']\n if toolchainbinaries.IsX86Flavor(flavor):\n if toolchainbinaries.IsNotNaClNewlibFlavor(flavor):\n return versions['GLIBC_VERSION']\n else:\n return versions['NEWLIB_VERSION']\n if toolchainbinaries.IsArmTrustedFlavor(flavor):\n return versions['ARM_TRUSTED_VERSION']\n raise Exception('Unknown flavor \"%s\"' % flavor)", "def setUpClass(cls):\n super(FlavorsTest, cls).setUpClass()\n flavors = cls.flavors_client.list_flavors_with_detail().entity\n\n # Find the flavor that provides the most RAM\n flavors.sort(key=lambda k: k.ram)\n cls.max_ram = flavors[-1].ram\n\n # Find the flavor that provides the most disk\n flavors.sort(key=lambda k: k.disk)\n cls.max_disk = flavors[-1].disk", "def test_list_flavors_detailed_using_marker(self):\n response = self.flavors_client.list_flavors_with_detail()\n flavors = response.entity\n self.assertGreater(len(flavors), 0, 'Flavors list is empty')\n flavor_marker = flavors[0]\n\n response = self.flavors_client.list_flavors_with_detail(\n marker=flavor_marker.id)\n filtered_flavors = response.entity\n self.assertNotIn(flavor_marker, filtered_flavors,\n msg='Filtered flavor was incorrectly '\n 'included in the list of returned flavors')", "def list_flavors(cls):\n return cls.dbdriver.list_flavors()", "def values_from_flavor(flavor):\n try:\n flavor = Network.FLAVORS[flavor]\n except KeyError:\n raise faults.BadRequest(\"Unknown network flavor\")\n\n mode = flavor.get(\"mode\")\n\n link = flavor.get(\"link\")\n if link == \"pool\":\n link = allocate_resource(\"bridge\")\n\n mac_prefix = flavor.get(\"mac_prefix\")\n if mac_prefix == \"pool\":\n mac_prefix = allocate_resource(\"mac_prefix\")\n\n tags = flavor.get(\"tags\")\n\n return mode, link, mac_prefix, tags", "def _check_for_extraspecs(self, flavor):\n flavor_extraspecs = self.driver.get_flavor_extraspecs(flavor)\n if flavor_extraspecs:\n scg_key = constants.SCG_KEY\n if scg_key in flavor_extraspecs:\n if not self.scg_id_list:\n return None\n if not flavor_extraspecs[scg_key] in self.scg_id_list:\n return None\n return (True, flavor_extraspecs)", "def get_variation(self, name, version):\n if not self.variations:\n return\n\n # Attempt to find the variation\n for variation in self.variations:\n if variation.name == name and variation.version == version:\n return variation", "def test_list_flavors_detailed_filter_by_min_ram(self):\n response = self.flavors_client.list_flavors_with_detail()\n flavors = response.entity\n\n # Sort the flavors by RAM in ascending order\n flavors.sort(key=lambda k: int(k.ram))\n\n # Remove any flavors from the list that are smaller than the\n # flavor with the second smallest RAM size\n filter_criteria = lambda x: int(x.ram) >= int(flavors[1].ram)\n expected_flavors = filter(filter_criteria, flavors)\n\n response = self.flavors_client.list_flavors_with_detail(\n min_ram=flavors[1].ram)\n actual_flavors = response.entity\n actual_flavors.sort(key=lambda k: k.id)\n expected_flavors.sort(key=lambda k: k.id)\n self.assertEqual(actual_flavors, expected_flavors)", "def test_list_flavors_detailed_min_disk_larger_than_max_flavor_disk(self):\n response = self.flavors_client.list_flavors_with_detail(\n min_disk='99999')\n flavors = response.entity\n self.assertEqual(len(flavors), 0)", "def getImageFinder(self, name: str) -> Optional[str]:\n trace = 'themes' in g.app.debug\n exists = g.os_path_exists\n getString = g.app.config.getString\n\n def dump(var: str, val: str) -> None:\n print(f\"{var:20}: {val}\")\n\n join = g.os_path_join\n #\n # \"Just works\" for --theme and theme .leo files *provided* that\n # theme .leo files actually contain these settings!\n #\n theme_name1 = getString('color-theme')\n theme_name2 = getString('theme-name')\n roots = [\n g.os_path_join(g.computeHomeDir(), '.leo'),\n g.computeLeoDir(),\n ]\n theme_subs = [\n \"themes/{theme}/Icons\",\n \"themes/{theme}\",\n \"Icons/{theme}\",\n ]\n # \".\" for icons referred to as Icons/blah/blah.png\n bare_subs = [\"Icons\", \".\"]\n paths = []\n for theme_name in (theme_name1, theme_name2):\n for root in roots:\n for sub in theme_subs:\n paths.append(join(root, sub.format(theme=theme_name)))\n for root in roots:\n for sub in bare_subs:\n paths.append(join(root, sub))\n table = [z for z in paths if exists(z)]\n for base_dir in table:\n path = join(base_dir, name)\n if exists(path):\n if trace:\n g.trace(f\"Found {name} in {base_dir}\")\n return path\n # if trace: g.trace(name, 'not in', base_dir)\n if trace:\n g.trace('not found:', name)\n return None", "def display_flavors(self):\n for flavor in self.flavors:\n print(f\"- {flavor}\")", "def test_create_flavor_existing(self):\n # Create Flavor\n flavor_settings = FlavorConfig(\n name=self.flavor_name, ram=1, disk=1, vcpus=1)\n self.flavor_creator = OpenStackFlavor(self.os_creds, flavor_settings)\n flavor = self.flavor_creator.create()\n self.assertTrue(validate_flavor(self.nova, flavor_settings, flavor))\n\n flavor_creator_2 = OpenStackFlavor(self.os_creds, flavor_settings)\n flavor2 = flavor_creator_2.create()\n\n self.assertEqual(flavor.id, flavor2.id)", "def test_list_flavors_min_disk_greater_than_max_flavor_ram(self):\n response = self.flavors_client.list_flavors(min_ram=self.max_ram+1)\n flavors = response.entity\n self.assertEqual(len(flavors), 0)", "def _generate_name(self):\n nonexistent_flavor = str(int(time.time()))\n flavors = instance_types.get_all_types()\n while nonexistent_flavor in flavors:\n nonexistent_flavor += \"z\"\n else:\n return nonexistent_flavor", "def find(ctx, name):\n conf = settings.devices.get(name, dict())\n if conf.get('type') == 'command':\n return conf, name, name\n\n uuids = ctx.obj['uuids']\n context = Context()\n for dev in iter(context.list_devices()):\n if 'ID_FS_TYPE' in dev:\n if name == uuids.get(dev.get('ID_FS_UUID')):\n return (settings.devices[name], dev['DEVNAME'],\n settings.devices[name].get('label',\n dev.get('ID_FS_LABEL')))\n\n print('Device \"%s\" not found.' % name)\n sys.exit(1)", "def get_server_flavor(self, postfix):\n (response, content) = self.successResultOf(json_request(\n self, self.root, b\"GET\", self.uri + postfix))\n self.assertEqual(200, response.code)\n return content", "def find_instance_by_type ( ec2_conn, base_name, instance_type ) :\n instance_name = get_instance_name( base_name, instance_type )\n instance_results = ec2_conn.get_only_instances( filters = { \"tag:Name\": [ instance_name ] } )\n if len( instance_results ) > 0 :\n return instance_results[ 0 ]\n\n return None", "def search_food(cls, name):\n obj = cls.objects(name=name).first()\n return obj", "def get(self, request, flavor_id):\n conn = get_sdk_connection(request)\n flavor = conn.load_balancer.find_flavor(flavor_id)\n return _get_sdk_object_dict(flavor)", "def delete_flavor(self, flavor='del_flvr'):\n try:\n self.novaclient.flavors.delete(\n self.get_flavor_id(flavor))\n except Exception as e:\n print \"Flavor %s failed to delete: %s\" % (flavor, repr(e))", "def getSpecie(name):\n for spec in Species:\n if spec.name == name:\n return spec\n return None", "def test_list_flavors_using_marker(self):\n response = self.flavors_client.list_flavors()\n flavors = response.entity\n self.assertGreater(len(flavors), 0, 'Flavors list is empty')\n flavor_marker = flavors[0]\n\n response = self.flavors_client.list_flavors(marker=flavor_marker.id)\n filtered_flavors = response.entity\n self.assertNotIn(flavor_marker, filtered_flavors,\n msg='Filtered flavor was incorrectly '\n 'included in the list of returned flavors')", "def test_list_flavors_detailed_filter_by_min_disk(self):\n response = self.flavors_client.list_flavors_with_detail()\n flavors = response.entity\n\n # Sort the flavors by disk size in ascending order\n flavors.sort(key=lambda k: int(k.disk))\n\n # Remove any flavors from the list that are smaller than the\n # flavor with the second smallest disk size\n filter_criteria = lambda x: int(x.disk) >= int(flavors[1].disk)\n expected_flavors = filter(filter_criteria, flavors)\n\n response = self.flavors_client.list_flavors_with_detail(\n min_disk=flavors[1].disk)\n actual_flavors = response.entity\n actual_flavors.sort(key=lambda k: k.id)\n expected_flavors.sort(key=lambda k: k.id)\n self.assertEqual(actual_flavors, expected_flavors)", "def _get_fact(self, fact):\n for kbfact in self.facts:\n if fact == kbfact:\n return kbfact", "def _get_fact(self, fact):\n for kbfact in self.facts:\n if fact == kbfact:\n return kbfact", "def _get_fact(self, fact):\n for kbfact in self.facts:\n if fact == kbfact:\n return kbfact", "def test_list_flavors_filter_by_min_ram(self):\n response = self.flavors_client.list_flavors_with_detail()\n flavors = response.entity\n\n # Sort the flavors by RAM in ascending order\n flavors.sort(key=lambda k: int(k.ram))\n\n # Remove any flavors from the list that are smaller than the\n # flavor with the second smallest RAM value\n filter_criteria = lambda x: int(x.ram) >= int(flavors[1].ram)\n expected_flavors = filter(filter_criteria, flavors)\n response = self.flavors_client.list_flavors(min_ram=flavors[1].ram)\n actual_flavors = response.entity\n\n actual_flavor_ids = set([flavor.id for flavor in actual_flavors])\n expected_flavor_ids = set([flavor.id for flavor in expected_flavors])\n self.assertEqual(actual_flavor_ids, expected_flavor_ids)", "def get_preset_by_id(preset_id: Text):\n presets = get_presets()[\"presets\"]\n for preset in presets:\n if preset_id == preset[\"id\"]:\n return preset", "def get_preset_by_id(preset_id: Text):\n presets = get_presets()[\"presets\"]\n for preset in presets:\n if preset_id == preset[\"id\"]:\n return preset", "def specific_surface(self):\n srf = BRepAdaptor_Surface(self.topods_shape())\n surf_type = self.surface_type()\n if surf_type == \"plane\":\n return srf.Plane()\n if surf_type == \"cylinder\":\n return srf.Cylinder()\n if surf_type == \"cone\":\n return srf.Cone()\n if surf_type == \"sphere\":\n return srf.Sphere()\n if surf_type == \"torus\":\n return srf.Torus()\n if surf_type == \"bezier\":\n return srf.Bezier()\n if surf_type == \"bspline\":\n return srf.BSpline()\n raise ValueError(\"Unknown surface type: \", surf_type)", "def test_list_flavors_detailed_limit_results(self):\n response = self.flavors_client.list_flavors_with_detail(limit=1)\n flavors = response.entity\n self.assertEqual(1, len(flavors))", "def find_stick():\n out = subprocess.check_output(\n \"gdbus introspect --system --dest org.freedesktop.UDisks \"\n \"--object-path /org/freedesktop/UDisks/devices --recurse \"\n \"--only-properties\".split())\n devs = zip(*((re.match(r\".* = '?(.*?)'?;\", x).group(1)\n for x in out.splitlines()\n if \"DriveConnectionInterface =\" in x\n or \"DeviceIsPartition =\" in x\n or \"DeviceFile = \" in x),)*3)\n try:\n return next(dev[2] for dev in devs if dev[0] == 'usb'\n and dev[1] == 'true')\n except StopIteration:\n return None", "def list_flavors(self, limit=None, marker=None):\n return self._flavor_manager.list(limit=limit, marker=marker)", "def test_list_flavors_min_disk_greater_than_max_flavor_disk(self):\n response = self.flavors_client.list_flavors(min_disk=self.max_disk+1)\n flavors = response.entity\n self.assertEqual(len(flavors), 0)", "def test_list_flavors_filter_by_min_disk(self):\n response = self.flavors_client.list_flavors_with_detail()\n flavors = response.entity\n\n # Sort the flavors by disk size in ascending order\n flavors.sort(key=lambda k: int(k.disk))\n\n # Remove any flavors from the list that are smaller than the\n # flavor with the second smallest disk size\n filter_criteria = lambda x: int(x.disk) >= int(flavors[1].disk)\n expected_flavors = filter(filter_criteria, flavors)\n response = self.flavors_client.list_flavors(min_disk=flavors[1].disk)\n actual_flavors = response.entity\n\n actual_flavor_ids = set([flavor.id for flavor in actual_flavors])\n expected_flavor_ids = set([flavor.id for flavor in expected_flavors])\n self.assertEqual(actual_flavor_ids, expected_flavor_ids)", "def _get_faction(text):\n for faction in _FACTIONS:\n if faction in text:\n return faction\n return None", "def get_fortune():\n data_file = get_data_file()\n fortunes=get_fortunes(data_file)\n return random.choice(fortunes)", "def relevant_food(self, animal):\n species = animal.__class__.__name__\n return self.remaining_food[species]", "def which_service(self, service, instance=None, resource=False, management=False, shared=False, override=None): # pylint:disable=r0911, r0912, r0913, r0915\n if service not in self.service_discovery:\n raise InvalidService()\n if resource and management and shared:\n raise InvalidServiceSpec()\n\n if override:\n if \"vault\" in override:\n if self.details[\"datacenter\"] == \"jng\": #pylint: disable=no-else-return\n return \"jng4vault1.skytap-dev.net\"\n elif self.details[\"env\"] == \"5\":\n return \"tuk5vaultvip1.qa.skytap.com\"\n elif self.details[\"env\"] in [\"6\", \"8\"]:\n return \"{}6vaultvip{}.qa.skytap.com\".format(self.details[\"datacenter\"], instance)\n elif self.details[\"env\"] == \"9\" and self.details[\"datacenter\"] == \"sea\":\n return \"tuk6vaultvip1.qa.skytap.com\"\n elif \"integ\" in override and (self.details[\"env\"] == \"5\" or self.details[\"platform\"] == \"integ\"):\n return override[\"integ\"]\n elif \"corp\" in override and self.details[\"env\"] == \"9\":\n return override[\"corp\"]\n elif \"qa\" in override and (self.details[\"env\"] in [\"5\", \"6\", \"7\", \"8\"] or self.details[\"platform\"] in [\"qa\", \"integ\", \"test\"]):\n return override[\"qa\"]\n elif self.details[\"env\"] == \"9\" and self.details[\"datacenter\"] == \"sea\":\n return override[\"qa\"]\n elif \"prod\" in override and self.details[\"env\"] == \"9\" and self.details[\"datacenter\"] == \"tuk\":\n return override[\"prod\"]\n elif \"prod\" in override and (self.details[\"env\"] == \"1\" or self.details[\"platform\"] == \"prod\"):\n return override[\"prod\"]\n\n service_details = self.details.copy()\n service_details[\"details\"] = self.service_discovery[service]\n\n if service_details[\"datacenter\"] is not None and \"ash\" in service_details[\"datacenter\"]:\n service_details[\"datacenter\"] = \"wdb\"\n if service_details[\"datacenter\"] is not None and \"dls\" in service_details[\"datacenter\"]:\n service_details[\"datacenter\"] = \"dal\"\n\n if shared and service_details[\"env\"] in [\"5\", \"6\", \"7\", \"8\"]:\n service_details[\"env\"] = \"8\"\n service_details[\"subplatform\"] = \"\"\n service_details[\"region\"] = \"\"\n service_details[\"platform\"] = \"qa\"\n elif shared and service_details[\"env\"] == \"9\" and service_details[\"datacenter\"] == \"tuk\":\n service_details[\"env\"] = \"1\"\n service_details[\"platform\"] = \"prod\"\n service_details[\"region\"] = \"\"\n service_details[\"subplatform\"] = \"\"\n elif shared:\n service_details[\"subplatform\"] = \"\"\n service_details[\"region\"] = \"\"\n elif not shared and service_details[\"env\"] == \"8\":\n service_details[\"env\"] = \"6\"\n service_details[\"platform\"] = \"test\"\n\n if not shared and service_details[\"subplatform\"] is None:\n service_details[\"subplatform\"] = \"mgt\"\n\n if service_details[\"subplatform\"]:\n service_details[\"subplatform\"] = \".\" + service_details[\"subplatform\"]\n else:\n service_details[\"subplatform\"] = \"\"\n\n if management and resource:\n # corp is always the special case here\n if service_details[\"env\"] == \"9\" and service_details[\"datacenter\"] == \"tuk\":\n service_details[\"env\"] = \"1\"\n service_details[\"region\"] = \"m1\"\n service_details[\"platform\"] = \"prod\"\n elif service_details[\"env\"] == \"9\" and service_details[\"datacenter\"] == \"sea\":\n service_details[\"datacenter\"] = \"tuk\"\n service_details[\"env\"] = \"6\"\n service_details[\"region\"] = \"m1\"\n service_details[\"platform\"] = \"qa\"\n\n # Service exists in management and resource region\n if service_details[\"region\"] is None:\n if self.details[\"datacenter\"] == \"tuk\":\n service_details[\"region\"] = \"m1\"\n elif self.details[\"datacenter\"]:\n service_details[\"region\"] = \"r1\"\n else:\n service_details[\"region\"] = \"\"\n # in case of x1\n if service_details[\"region\"] == \"x1\":\n service_details[\"region\"] = \"r1\"\n\n if service_details[\"datacenter\"] is None:\n service_details[\"datacenter\"] = \"\"\n if service_details[\"env\"] is None:\n service_details[\"env\"] = \"\"\n\n if instance is not None:\n if isinstance(instance, list):\n return [\"{datacenter}{env}{region}{service}{service_instance}{subplatform}.{platform}.skytap.com\".format(\n service=service, service_instance=node, **service_details\n ) for node in instance]\n return \"{datacenter}{env}{region}{service}{service_instance}{subplatform}.{platform}.skytap.com\".format(\n service=service, service_instance=instance, **service_details\n )\n\n raise InvalidServiceBadInstance()", "def get_changed_flavors(changed_files, flavors):\n changed_flavors = []\n for f in changed_files:\n pattern = r\"^(mlflow|tests)/(.+?)(_autolog(ging)?)?(\\.py|/)\"\n # ~~~~~\n # # This group captures a flavor name\n match = re.search(pattern, f)\n\n if (match is not None) and (match.group(2) in flavors):\n changed_flavors.append(match.group(2))\n\n return changed_flavors", "def find_descriptor(self, uuid):\n for desc in self.list_descriptors():\n if desc.uuid == uuid:\n return desc\n return None", "def get_default_variant(variants):\n for variant in variants:\n if variant.default:\n return variant", "def delete_flavor(cls, flavor_uuid):\n cls.dbdriver.delete_flavor(flavor_uuid)", "def test_list_flavors_limit_results(self):\n response = self.flavors_client.list_flavors(limit=1)\n flavors = response.entity\n self.assertEqual(1, len(flavors))", "def get_size(vm_):\n vm_size = config.get_cloud_config_value(\"size\", vm_, __opts__)\n sizes = avail_sizes()\n\n if not vm_size:\n return sizes[\"Small Instance\"]\n\n for size in sizes:\n combinations = (str(sizes[size][\"id\"]), str(size))\n if vm_size and str(vm_size) in combinations:\n return sizes[size]\n raise SaltCloudNotFound(\n \"The specified size, '{}', could not be found.\".format(vm_size)\n )", "def flavors(request): # pylint: disable=unused-argument\n # We call our method\n response = BACKEND.flavors()\n return JsonResponse(response)", "def get_service_name(service, rem):\n flavor = rem.os.package_type\n try:\n return _SERVICE_MAP[service][flavor]\n except KeyError:\n return None", "def findHotspot( self, name ):\n for hotspot in self._hotspots:\n if ( hotspot.name() == name ):\n return hotspot\n return None", "def find_one_bywhereclause(cls, whereclause):\n return cls.dbm().modelclass_find_one_bywhereclause(cls, whereclause)", "def detect():\n id = None\n\n if lsb_release:\n id = lsb_release.get_distro_information()['ID']\n else:\n try:\n lsb_cmd = subprocess.Popen(['lsb_release', '--id', '-s'],\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL)\n output = lsb_cmd.communicate()[0]\n if not lsb_cmd.returncode:\n id = output.decode().split('\\n')[0].strip()\n except OSError:\n # id is None in this case\n pass\n\n if id == whatmaps.debiandistro.DebianDistro.id:\n return whatmaps.debiandistro.DebianDistro\n elif id == whatmaps.redhatdistro.FedoraDistro.id:\n return whatmaps.redhatdistro.FedoraDistro\n else:\n if os.path.exists('/usr/bin/dpkg'):\n logging.warning(\"Unknown distro but dpkg found, assuming Debian\")\n return whatmaps.debiandistro.DebianDistro\n elif os.path.exists('/bin/rpm'):\n logging.warning(\"Unknown distro but rpm found, assuming Fedora\")\n return whatmaps.debiandistro.FedoraDistro\n else:\n return None" ]
[ "0.8060121", "0.78982663", "0.73436314", "0.7281482", "0.7281482", "0.7015246", "0.7004508", "0.68786645", "0.67651623", "0.67304444", "0.66880435", "0.66849285", "0.65951335", "0.6554591", "0.64654094", "0.6302968", "0.6265922", "0.62596685", "0.62154466", "0.6201769", "0.6198083", "0.61453795", "0.61352086", "0.60199374", "0.5965522", "0.59557176", "0.59351337", "0.59273946", "0.59021956", "0.59021956", "0.57842183", "0.574143", "0.5730473", "0.57124364", "0.5649323", "0.56089616", "0.55838954", "0.5552055", "0.5518379", "0.55071694", "0.55046403", "0.55008715", "0.5497056", "0.54393065", "0.5416245", "0.54120934", "0.54032737", "0.5363675", "0.5361448", "0.5347096", "0.5332777", "0.5321551", "0.53101206", "0.53061175", "0.53036034", "0.5257709", "0.5257431", "0.52532107", "0.52437603", "0.5243738", "0.52353853", "0.52204704", "0.52102554", "0.5210094", "0.5206234", "0.52035314", "0.5199048", "0.5194353", "0.51766056", "0.5167106", "0.5145117", "0.5139075", "0.51369494", "0.5116492", "0.5116492", "0.5116492", "0.51136667", "0.51086825", "0.51086825", "0.5106636", "0.510395", "0.50965965", "0.5085115", "0.5083749", "0.50819963", "0.5077715", "0.50654805", "0.5032777", "0.50152034", "0.5004279", "0.50007886", "0.49919873", "0.49816304", "0.49747875", "0.4969055", "0.49639338", "0.49610096", "0.49599555", "0.49571952", "0.4951802" ]
0.7353765
2
Get a single flavor
def get_flavor(self, flavor): return self._get(_flavor.Flavor, flavor)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_flavor(name):\r\n return nova.flavors.find(name=name)", "def flavor(self, name=None):\n return self.find(self.flavors(), name=name)", "def get_flavor(self, flavor_id):\n return self._flavor_manager.get(flavor_id)", "def get_flavor(self, flavor_id):\n url = '%s/flavors/%s' % (self.catalog['compute'], flavor_id)\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['flavor']\n else:\n LOG.error('Get flavor failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def flavor(self):\n return self._flavor", "def get_flavor_by_uuid(cls, flavor_uuid):\n return cls.dbdriver.get_flavor_by_uuid(flavor_uuid)", "def get_flavor(self, request, tenant_id, flavor_id):\n response_data = get_flavor(flavor_id)\n request.setResponseCode(response_data[1])\n return json.dumps(response_data[0])", "def get_flavor(flavor_id, include_deleted=False):\n\n try:\n flavor_id = int(flavor_id)\n if include_deleted:\n return Flavor.objects.get(id=flavor_id)\n else:\n return Flavor.objects.get(id=flavor_id, deleted=include_deleted)\n except (ValueError, TypeError):\n raise faults.BadRequest(\"Invalid flavor ID '%s'\" % flavor_id)\n except Flavor.DoesNotExist:\n raise faults.ItemNotFound('Flavor not found.')", "def flavor(self, name=None):\n raise NotImplementedError", "def test_get_flavor(self):\n response = self.flavors_client.get_flavor_details(self.flavor_ref)\n flavor = response.entity\n self.assertEqual(self.flavor_ref, flavor.id)", "def _existing_flavor(self):\n return instance_types.get_all_types().keys()[0]", "def test_aws_service_api_flavor_get(self):\n pass", "def find_flavor(self, name_or_id, ignore_missing=True):\n return self._find(\n _flavor.Flavor, name_or_id, ignore_missing=ignore_missing\n )", "def find_flavor(self, name_or_id, ignore_missing=False):\n return self._find(_flavor.Flavor, name_or_id,\n ignore_missing=ignore_missing)", "def get_flavors() -> dict:\n flavor_rq = request(\n method=\"GET\", url=app.config[\"FLAVORS_REF\"], headers=build_header(),\n )\n\n if not flavor_rq.ok:\n HTTPError(f\"Can not get flavor id for virtual machine: {flavor_rq.status_code}\")\n\n return flavor_rq.json()", "def flavor_id(self):\n return self._flavor_id", "def flavor_id(self):\n return self._flavor_id", "def FlavorName(flavor):\n if isinstance(flavor, tuple):\n return flavor[0]\n else:\n return flavor", "def get_flavor_id(self, flavor_name):\n _url = \"http://\" + self.host_ip + \":8774/v2/\" +\\\n self.cloud_admin_info[\"project_id\"] + \\\n \"/flavors/detail\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from server while getting flavors.\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get flavor ID Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n\n for flavors in output['flavors']:\n if flavors['name'].lower() == flavor_name.lower():\n LOG_OBJ.debug(\"Flavor Name: %s, ID: %s\" % (flavor_name,\n flavors['id']))\n return flavors['id']\n\n LOG_OBJ.error(\"Flavor:%s is NOT found\" % flavor_name)", "def get(self, request, flavor_id):\n conn = get_sdk_connection(request)\n flavor = conn.load_balancer.find_flavor(flavor_id)\n return _get_sdk_object_dict(flavor)", "def flavors(self, **kwargs):\n if kwargs is None:\n result = self.get_list(self.cloudman.compute.flavors(),\n kind=\"flavor\")\n if \"name\" in kwargs:\n result = self.flavor(name=kwargs['name'])\n\n else:\n result = self.get_list(self.cloudman.compute.flavors(**kwargs),\n kind=\"flavor\")\n\n return result", "def _get_flavor_ref(self, flavor):\n flavor_obj = None\n if isinstance(flavor, CloudDatabaseFlavor):\n flavor_obj = flavor\n elif isinstance(flavor, int):\n # They passed an ID or a size\n try:\n flavor_obj = self.get_flavor(flavor)\n except exc.NotFound:\n # Must be either a size or bad ID, which will\n # be handled below\n pass\n if flavor_obj is None:\n # Try flavor name\n flavors = self.list_flavors()\n try:\n flavor_obj = [flav for flav in flavors\n if flav.name == flavor][0]\n except IndexError:\n # No such name; try matching RAM\n try:\n flavor_obj = [flav for flav in flavors\n if flav.ram == flavor][0]\n except IndexError:\n raise exc.FlavorNotFound(\"Could not determine flavor from \"\n \"'%s'.\" % flavor)\n # OK, we have a Flavor object. Get the href\n href = [link[\"href\"] for link in flavor_obj.links\n if link[\"rel\"] == \"self\"][0]\n return href", "def GetFlavor(params):\n flavors = {\n 'cygwin': 'win',\n 'win32': 'win',\n 'darwin': 'mac',\n }\n if 'flavor' in params:\n return params['flavor']\n if sys.platform in flavors:\n return flavors[sys.platform]\n if sys.platform.startswith('sunos'):\n return 'solaris'\n if sys.platform.startswith('freebsd'):\n return 'freebsd'\n if sys.platform.startswith('openbsd'):\n return 'openbsd'\n if sys.platform.startswith('netbsd'):\n return 'netbsd'\n if sys.platform.startswith('aix'):\n return 'aix'\n if sys.platform.startswith('zos'):\n return 'zos'\n if sys.platform.startswith('os390'):\n return 'zos'\n return 'linux'", "def show_flavors():\n return get_flavors()", "def get_flavors(self):\n url = '%s/flavors/detail' % self.catalog['compute']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['flavors']\n else:\n LOG.error('Get flavors failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def flavors(self, details=True):\n flv = _flavor.FlavorDetail if details else _flavor.Flavor\n return list(self._list(flv, paginated=True))", "def find_flavor_id(flavor_name: str):\n for flavor in get_flavors()[\"flavors\"]:\n if flavor_name == flavor[\"name\"]:\n return flavor[\"id\"]\n\n raise AttributeError(f\"No flavor '{flavor_name}' found\")", "def flavor(self, flavor):\n self._flavor = flavor", "def _get_deployment_flavor():\n flavor = cfg.CONF.paste_deploy.flavor\n return '' if not flavor else ('-' + flavor)", "def flavors(self, **query):\n return self._list(_flavor.Flavor, **query)", "def get_server_flavor(self, postfix):\n (response, content) = self.successResultOf(json_request(\n self, self.root, b\"GET\", self.uri + postfix))\n self.assertEqual(200, response.code)\n return content", "def _get_flavor_name(self, flavor_id):\n for name, f_id in FLAVOR_ID.items():\n if f_id == flavor_id:\n return name", "def get(self, request, flavor_profile_id):\n conn = get_sdk_connection(request)\n flavor_profile = conn.load_balancer.find_flavor_profile(\n flavor_profile_id)\n return _get_sdk_object_dict(flavor_profile)", "def flavors(self, **kwargs):\n raise NotImplementedError", "def get_azure_flavor(flavor):\n if flavor not in CONF.ec2_flavor_to_size_map:\n raise Exception(\"Could not find mapping for the EC2 image size \"\n \"'%s', please edit 'ec2_flavor_to_size_map' in the \"\n \"configuration.\" % (flavor))\n\n return CONF.ec2_flavor_to_size_map[flavor]", "def _create_flavor(self, context, flavor):\n flavor_dict = flavor.__dict__\n name = self.prefix + flavor.name\n flavorid = self.prefix + flavor.id\n memory = flavor.ram\n vcpus = flavor.vcpus\n root_gb = flavor.disk\n ephemeral_gb = flavor_dict.get('OS-FLV-EXT-DATA:ephemeral', 0)\n u_swap = flavor_dict.get('swap', 0)\n rxtx_factor = flavor_dict.get('rxtx_factor', 1.0)\n is_public = flavor_dict.get('os-flavor-access:is_public', True)\n if u_swap == \"\":\n swap = 0\n else:\n swap = int(u_swap)\n\n try:\n return flavors.create(name, memory, vcpus, root_gb,\n ephemeral_gb=ephemeral_gb,\n flavorid=flavorid, swap=swap,\n rxtx_factor=rxtx_factor,\n is_public=is_public)\n except exception.InstanceExists as err:\n raise err", "def flavors(request): # pylint: disable=unused-argument\n # We call our method\n response = BACKEND.flavors()\n return JsonResponse(response)", "def values_from_flavor(flavor):\n try:\n flavor = Network.FLAVORS[flavor]\n except KeyError:\n raise faults.BadRequest(\"Unknown network flavor\")\n\n mode = flavor.get(\"mode\")\n\n link = flavor.get(\"link\")\n if link == \"pool\":\n link = allocate_resource(\"bridge\")\n\n mac_prefix = flavor.get(\"mac_prefix\")\n if mac_prefix == \"pool\":\n mac_prefix = allocate_resource(\"mac_prefix\")\n\n tags = flavor.get(\"tags\")\n\n return mode, link, mac_prefix, tags", "def test_aws_service_api_flavors_get(self):\n pass", "def test_list_flavors_with_detail(self):\n response = self.flavors_client.list_flavors_with_detail()\n flavors = response.entity\n self.assertTrue(len(flavors) > 0)\n response = self.flavors_client.get_flavor_details(self.flavor_ref)\n flavor = response.entity\n self.assertIn(flavor, flavors)", "def delete_flavor(self, flavor='del_flvr'):\n try:\n self.novaclient.flavors.delete(\n self.get_flavor_id(flavor))\n except Exception as e:\n print \"Flavor %s failed to delete: %s\" % (flavor, repr(e))", "def create_flavor(self):\n logger.debug(\"Creating VM Flavor\")\n rc, flavor_id = self.cal.create_flavor(self.account, self.flavor)\n assert rc == RwTypes.RwStatus.SUCCESS\n\n return flavor_id", "def test_get_non_existent_flavor(self):\n try:\n self.flavors_client.get_flavor_details(999)\n self.fail('No exception thrown for a non-existent flavor id')\n except ItemNotFound:\n pass", "def display_flavor(self):\n print(\"\\nWe currently count with the next flavors:\")\n for flavor in self.flavors:\n print(f\"{flavor}\")", "def get_flavor_provider(flavor):\n disk_template = flavor.disk_template\n provider = None\n if disk_template.startswith(\"ext\"):\n disk_template, provider = disk_template.split(\"_\", 1)\n return disk_template, provider", "def post(self, request):\n kwargs = {\n 'flavor': request.DATA.get('flavor')\n }\n return create_flavor(request, **kwargs)", "def list_flavors(cls):\n return cls.dbdriver.list_flavors()", "def get_from_server_domain(cls, server_domain: str) -> \"Flavor\":\n subdomain, base_domain, tld = server_domain.rsplit(\".\", maxsplit=2)\n\n if subdomain == \"api.pro\":\n if base_domain == \"openfoodfacts\":\n return cls.off_pro\n raise ValueError(\"pro platform is only available for Open Food Facts\")\n\n for server_type in cls:\n if base_domain == server_type.get_base_domain():\n return server_type\n\n raise ValueError(f\"no Flavor matched for server_domain {server_domain}\")", "def get(self, request):\n conn = get_sdk_connection(request)\n flavor_list = _sdk_object_to_list(\n conn.load_balancer.flavors()\n )\n\n return {'items': flavor_list}", "def display_flavors(self):\r\n print(\"We have the following flavors\"\"\")\r\n for flavor in self.flavors:\r\n print(\" ...\" + str(flavor.title()))", "def HashSelect(versions, flavor):\n return versions[HashKey(flavor)]", "def VersionSelect(versions, flavor):\n\n if isinstance(flavor, tuple):\n ids = [versions[i] for i in flavor[1:]]\n return ','.join(ids)\n if toolchainbinaries.IsPnaclFlavor(flavor):\n return versions['PNACL_VERSION']\n if toolchainbinaries.IsX86Flavor(flavor):\n if toolchainbinaries.IsNotNaClNewlibFlavor(flavor):\n return versions['GLIBC_VERSION']\n else:\n return versions['NEWLIB_VERSION']\n if toolchainbinaries.IsArmTrustedFlavor(flavor):\n return versions['ARM_TRUSTED_VERSION']\n raise Exception('Unknown flavor \"%s\"' % flavor)", "def create_flavor(request, **kwargs):\n data = request.DATA\n\n conn = get_sdk_connection(request)\n flavor = conn.load_balancer.create_flavor(\n name=data['flavor']['name'],\n flavor_profile_id=data['flavor']['flavor_profile_id'],\n description=data['flavor'].get('description'),\n enabled=data['flavor'].get('enabled'),\n )\n\n return _get_sdk_object_dict(flavor)", "def resize(self, flavor):\n # ram size of the instance\n if type(flavor) == Flavor:\n flavor = flavor.bookmark_link\n elif type(flavor) == dict:\n flavor = self.parent.flavors().find(**flavor)\n elif type(flavor) in (int, str, unicode):\n flavor = str(flavor)\n else:\n # TODO : proper error\n raise Exception()\n\n self.client.post(self.path+'/action', { 'resize': {'flavorRef': flavor} })\n return True", "def list_flavors(self, limit=None, marker=None):\n return self._flavor_manager.list(limit=limit, marker=marker)", "def display_flavors(self):\n for flavor in self.flavors:\n print(f\"- {flavor}\")", "def test_list_flavors(self):\n response = self.flavors_client.list_flavors()\n flavors = response.entity\n self.assertTrue(len(flavors) > 0)\n response = self.flavors_client.get_flavor_details(self.flavor_ref)\n flavor = response.entity\n flavor_ids = [x.id for x in flavors]\n self.assertIn(flavor.id, flavor_ids,\n \"The expected flavor: %s was not found in \"\n \"the flavor list\" % flavor.id)", "def resize(self, instance, flavor):\n return instance.resize(flavor)", "def get_instance(instance):\n command = 'nova show %s' % instance\n return parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])", "def _generate_flavorid(self):\n nonexistent_flavor = 2700\n flavor_ids = [value[\"id\"] for key, value in\n instance_types.get_all_types().iteritems()]\n while nonexistent_flavor in flavor_ids:\n nonexistent_flavor += 1\n else:\n return nonexistent_flavor", "def delete_flavor(cls, flavor_uuid):\n cls.dbdriver.delete_flavor(flavor_uuid)", "def get_variation(self, name, version):\n if not self.variations:\n return\n\n # Attempt to find the variation\n for variation in self.variations:\n if variation.name == name and variation.version == version:\n return variation", "def test_list_flavors_detailed_min_ram_larger_than_max_flavor_ram(self):\n response = self.flavors_client.list_flavors_with_detail(\n min_ram=self.max_ram+1)\n flavors = response.entity\n self.assertEqual(len(flavors), 0)", "def specific_surface(self):\n srf = BRepAdaptor_Surface(self.topods_shape())\n surf_type = self.surface_type()\n if surf_type == \"plane\":\n return srf.Plane()\n if surf_type == \"cylinder\":\n return srf.Cylinder()\n if surf_type == \"cone\":\n return srf.Cone()\n if surf_type == \"sphere\":\n return srf.Sphere()\n if surf_type == \"torus\":\n return srf.Torus()\n if surf_type == \"bezier\":\n return srf.Bezier()\n if surf_type == \"bspline\":\n return srf.BSpline()\n raise ValueError(\"Unknown surface type: \", surf_type)", "def create_flavor(cls, values):\n return cls.dbdriver.create_flavor(values)", "def get(self, request):\n conn = get_sdk_connection(request)\n flavor_profile_list = _sdk_object_to_list(\n conn.load_balancer.flavor_profiles()\n )\n\n return {'items': flavor_profile_list}", "def test_list_flavors_detailed_using_marker(self):\n response = self.flavors_client.list_flavors_with_detail()\n flavors = response.entity\n self.assertGreater(len(flavors), 0, 'Flavors list is empty')\n flavor_marker = flavors[0]\n\n response = self.flavors_client.list_flavors_with_detail(\n marker=flavor_marker.id)\n filtered_flavors = response.entity\n self.assertNotIn(flavor_marker, filtered_flavors,\n msg='Filtered flavor was incorrectly '\n 'included in the list of returned flavors')", "def get_food(self, game_state):\n if self.red:\n return game_state.get_blue_food()\n else:\n return game_state.get_red_food()", "def _get_fruit(self, _id):\n body = {\n '_id': _id,\n }\n headers = {\n 'content-type': 'application/json',\n }\n response = self.fetch(\n '/fruit/get',\n method='POST',\n headers=tornado.httputil.HTTPHeaders(headers),\n body=json.dumps(body))\n if response.code == httplib.NOT_FOUND:\n return None\n self.assertEqual(response.code, httplib.OK)\n return json.loads(response.body)", "def get_product_by_slug(self, slug):\n return self.get_products({ 'review_url': slug })[0]", "def test_list_flavors_detailed_min_disk_larger_than_max_flavor_disk(self):\n response = self.flavors_client.list_flavors_with_detail(\n min_disk='99999')\n flavors = response.entity\n self.assertEqual(len(flavors), 0)", "def test_list_flavors_detailed_limit_results(self):\n response = self.flavors_client.list_flavors_with_detail(limit=1)\n flavors = response.entity\n self.assertEqual(1, len(flavors))", "def get_food_with_id(cls, food_id):\n obj = cls.objects(food_id=food_id).first()\n return obj", "def _get_theme_selected():\n\n try:\n theme_selected = Configuration.objects.filter(group='theme', key='selected')[0]\n theme_name = theme_selected.value\n except:\n theme_name = 'default'\n\n return theme_name", "def check_flan_flavor(flavor):\n if not flavor:\n flan_flavor = \"plain old boring\"\n else:\n flan_flavor = flavor\n return (flan_flavor + \" flavored flan\")", "def test_list_flavors_limit_results(self):\n response = self.flavors_client.list_flavors(limit=1)\n flavors = response.entity\n self.assertEqual(1, len(flavors))", "def _get_new_size(self, instance, flavor):\n sizes = self.compute.virtual_machines.list_available_sizes(\n CONF.azure.resource_group, instance.uuid)\n try:\n vm_size = self._get_size_from_flavor(flavor)\n except exception.FlavorAzureMappingNotFound:\n return None\n else:\n for i in sizes:\n if vm_size == i.name:\n LOG.debug('Resize Instance, get new size %s',\n vm_size)\n return i.name\n LOG.error(_LE('Resize Instance, size %s invalid in Azure'),\n vm_size)\n return None", "def test_create_flavor_existing(self):\n # Create Flavor\n flavor_settings = FlavorConfig(\n name=self.flavor_name, ram=1, disk=1, vcpus=1)\n self.flavor_creator = OpenStackFlavor(self.os_creds, flavor_settings)\n flavor = self.flavor_creator.create()\n self.assertTrue(validate_flavor(self.nova, flavor_settings, flavor))\n\n flavor_creator_2 = OpenStackFlavor(self.os_creds, flavor_settings)\n flavor2 = flavor_creator_2.create()\n\n self.assertEqual(flavor.id, flavor2.id)", "def setUpClass(cls):\n super(FlavorsTest, cls).setUpClass()\n flavors = cls.flavors_client.list_flavors_with_detail().entity\n\n # Find the flavor that provides the most RAM\n flavors.sort(key=lambda k: k.ram)\n cls.max_ram = flavors[-1].ram\n\n # Find the flavor that provides the most disk\n flavors.sort(key=lambda k: k.disk)\n cls.max_disk = flavors[-1].disk", "def get(self, id):\n return Scenario.query.filter(Scenario.id == id).one()", "def set_flavor(self, oid, flavor):\n data = {\n \"resize\": {\n \"flavorRef\": flavor\n }\n }\n path = '/servers/%s/action' % oid\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Pause openstack server: %s' % truncate(res))\n return res[0]", "def update_flavor(cls, flavor_uuid, values):\n return cls.dbdriver.update_flavor(flavor_uuid, values)", "def test_list_flavors_min_disk_greater_than_max_flavor_ram(self):\n response = self.flavors_client.list_flavors(min_ram=self.max_ram+1)\n flavors = response.entity\n self.assertEqual(len(flavors), 0)", "def variant(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"variant\")", "def test_create_flavor(self):\n # Create Flavor\n flavor_settings = FlavorConfig(\n name=self.flavor_name, ram=1, disk=1, vcpus=1)\n self.flavor_creator = OpenStackFlavor(self.os_creds, flavor_settings)\n flavor = self.flavor_creator.create()\n self.assertTrue(validate_flavor(self.nova, flavor_settings, flavor))", "def test_will_not_get_instance_by_unknown_flavor_id(self):\n self.assertRaises(exception.FlavorNotFound,\n instance_types.get_instance_type_by_flavor_id,\n 'unknown_flavor')", "def test_list_flavors_using_marker(self):\n response = self.flavors_client.list_flavors()\n flavors = response.entity\n self.assertGreater(len(flavors), 0, 'Flavors list is empty')\n flavor_marker = flavors[0]\n\n response = self.flavors_client.list_flavors(marker=flavor_marker.id)\n filtered_flavors = response.entity\n self.assertNotIn(flavor_marker, filtered_flavors,\n msg='Filtered flavor was incorrectly '\n 'included in the list of returned flavors')", "def get_fortune():\n data_file = get_data_file()\n fortunes=get_fortunes(data_file)\n return random.choice(fortunes)", "def test_read_deleted_false_converting_flavorid(self):\n instance_types.create(\"instance_type1\", 256, 1, 120, 100, \"test1\")\n instance_types.destroy(\"instance_type1\")\n instance_types.create(\"instance_type1_redo\", 256, 1, 120, 100, \"test1\")\n\n instance_type = instance_types.get_instance_type_by_flavor_id(\n \"test1\", read_deleted=\"no\")\n self.assertEqual(\"instance_type1_redo\", instance_type[\"name\"])", "def _sync_flavor(self, ctx, flavor, extra_specs):\n flavor_in_local_db = None\n flavor_name = self.prefix + flavor.name\n try:\n flavor_in_local_db = db.flavor_get_by_name(ctx, flavor_name)\n except exception.FlavorNotFoundByName:\n self._insert_pvc_flavor_extraspecs(ctx, flavor, extra_specs)\n\n # Update the extra_speces of the flavor\n if flavor_in_local_db is not None:\n flavor_id = flavor_in_local_db.get('flavorid', '')\n if (flavor_id is not ''\n and extra_specs):\n self._update_flavor_extraspecs(ctx,\n flavor_id,\n extra_specs)", "def test_create_clean_flavor(self):\n # Create Flavor\n flavor_settings = FlavorConfig(\n name=self.flavor_name, ram=1, disk=1, vcpus=1)\n self.flavor_creator = OpenStackFlavor(self.os_creds, flavor_settings)\n flavor = self.flavor_creator.create()\n self.assertTrue(validate_flavor(self.nova, flavor_settings, flavor))\n\n # Clean Flavor\n self.flavor_creator.clean()\n\n self.assertIsNone(self.flavor_creator.get_flavor())\n self.assertIsNone(\n nova_utils.get_flavor_by_name(self.nova, flavor_settings.name))", "def test_list_flavors_min_disk_greater_than_max_flavor_disk(self):\n response = self.flavors_client.list_flavors(min_disk=self.max_disk+1)\n flavors = response.entity\n self.assertEqual(len(flavors), 0)", "def fishs_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=FISH_TYPE_URI,\n rdf_type_name=FISH_TYPE_NAME, \n kls=Fish)", "def get(self, name):\n try:\n return(self._d_features[name])\n except:\n log.error(\"Can't get feature '%s'\" % name)\n return", "def getFood(self):\n return self.data.food", "def brand(self) -> object:\n return self._brand", "def delete(self, flavor_id):\n\n args = {\n 'flavor_id': flavor_id\n }\n self.session.execute(CQL_DELETE, args)", "def test_can_read_deleted_types_using_flavor_id(self):\n inst_type_name = \"test\"\n inst_type_flavor_id = \"test1\"\n\n inst_type = instance_types.create(inst_type_name, 256, 1, 120, 100,\n inst_type_flavor_id)\n self.assertEqual(inst_type_name, inst_type[\"name\"])\n\n # NOTE(jk0): The deleted flavor will show up here because the context\n # in get_instance_type_by_flavor_id() is set to use read_deleted by\n # default.\n instance_types.destroy(inst_type[\"name\"])\n deleted_inst_type = instance_types.get_instance_type_by_flavor_id(\n inst_type_flavor_id)\n self.assertEqual(inst_type_name, deleted_inst_type[\"name\"])", "def get_value(value, key, client):\n if client is None:\n return value.__dict__[key]\n elif \"glance\" in str(client):\n return value[key]\n elif \"cinder\" in str(client):\n return value.__dict__[key]\n elif \"nova\" in str(client):\n return value.__dict__[key]" ]
[ "0.82900196", "0.8277593", "0.8069578", "0.789696", "0.7692458", "0.7663924", "0.75685847", "0.7417431", "0.7335429", "0.72927356", "0.71877533", "0.71117985", "0.69786406", "0.6909632", "0.6736771", "0.67233574", "0.67233574", "0.66672426", "0.66635484", "0.6631124", "0.66134924", "0.6592379", "0.6573417", "0.6547451", "0.63565516", "0.63455194", "0.63276637", "0.63066673", "0.6249983", "0.6176625", "0.61713696", "0.6170494", "0.61584324", "0.61080796", "0.60582364", "0.60122836", "0.60054827", "0.59856325", "0.5968913", "0.5931084", "0.59006727", "0.5875327", "0.5854975", "0.58316576", "0.5804619", "0.5797968", "0.5739031", "0.5693318", "0.56177187", "0.56037784", "0.5576863", "0.5571394", "0.5471623", "0.54604965", "0.54591596", "0.5444412", "0.54384273", "0.5436817", "0.54147524", "0.5399936", "0.53732", "0.53643537", "0.5337509", "0.53303957", "0.5310848", "0.53038234", "0.5249158", "0.52315235", "0.52081925", "0.52052224", "0.5196651", "0.5192142", "0.5164173", "0.5133707", "0.512692", "0.51265186", "0.5124875", "0.5107356", "0.50816256", "0.50769514", "0.50758696", "0.50718683", "0.5062101", "0.5059436", "0.5052181", "0.5045556", "0.50451726", "0.5044791", "0.50041354", "0.5001313", "0.49987367", "0.49953812", "0.4991254", "0.49682146", "0.4964831", "0.49409717", "0.4924059", "0.4920831", "0.49172106" ]
0.86241716
1
Return a generator of flavors
def flavors(self, **query): return self._list(_flavor.Flavor, **query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flavors(self, **kwargs):\n raise NotImplementedError", "def flavors(self, details=True):\n flv = _flavor.FlavorDetail if details else _flavor.Flavor\n return list(self._list(flv, paginated=True))", "def flavors(self, **kwargs):\n if kwargs is None:\n result = self.get_list(self.cloudman.compute.flavors(),\n kind=\"flavor\")\n if \"name\" in kwargs:\n result = self.flavor(name=kwargs['name'])\n\n else:\n result = self.get_list(self.cloudman.compute.flavors(**kwargs),\n kind=\"flavor\")\n\n return result", "def show_flavors():\n return get_flavors()", "def list_flavors(cls):\n return cls.dbdriver.list_flavors()", "def display_flavors(self):\r\n print(\"We have the following flavors\"\"\")\r\n for flavor in self.flavors:\r\n print(\" ...\" + str(flavor.title()))", "def list_flavors(self, limit=None, marker=None):\n return self._flavor_manager.list(limit=limit, marker=marker)", "def get_flavors(self):\n url = '%s/flavors/detail' % self.catalog['compute']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['flavors']\n else:\n LOG.error('Get flavors failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def flavor(self, name=None):\n return self.find(self.flavors(), name=name)", "def get_changed_flavors(changed_files, flavors):\n changed_flavors = []\n for f in changed_files:\n pattern = r\"^(mlflow|tests)/(.+?)(_autolog(ging)?)?(\\.py|/)\"\n # ~~~~~\n # # This group captures a flavor name\n match = re.search(pattern, f)\n\n if (match is not None) and (match.group(2) in flavors):\n changed_flavors.append(match.group(2))\n\n return changed_flavors", "def flavors(request): # pylint: disable=unused-argument\n # We call our method\n response = BACKEND.flavors()\n return JsonResponse(response)", "def display_flavors(self):\n for flavor in self.flavors:\n print(f\"- {flavor}\")", "def FlavorHashes(versions, flavor):\n if isinstance(flavor, tuple):\n return [HashSelect(versions, i) for i in flavor[1:]]\n else:\n return [HashSelect(versions, flavor)]", "def _generate_benchmark_variants(benchmark_spec):\n variants = []\n # Cold start.\n variants.append({\n 'name': benchmark_spec['name'] + ' (cold start)',\n 'app': benchmark_spec['app'],\n 'duration': benchmark_spec['duration'],\n 'measurements': benchmark_spec['measurements'],\n 'shell-args': benchmark_spec.get('shell-args',\n []) + _COLD_START_SHELL_ARGS})\n # Warm start.\n variants.append({\n 'name': benchmark_spec['name'] + ' (warm start)',\n 'app': benchmark_spec['app'],\n 'duration': benchmark_spec['duration'],\n 'measurements': benchmark_spec['measurements'],\n 'shell-args': benchmark_spec.get('shell-args', [])})\n return variants", "def get_flavors() -> dict:\n flavor_rq = request(\n method=\"GET\", url=app.config[\"FLAVORS_REF\"], headers=build_header(),\n )\n\n if not flavor_rq.ok:\n HTTPError(f\"Can not get flavor id for virtual machine: {flavor_rq.status_code}\")\n\n return flavor_rq.json()", "def _variants_gen(self, test):\n return self._get_variants_gen(test).gen(test)", "def flavor(self, name=None):\n raise NotImplementedError", "def FlavorName(flavor):\n if isinstance(flavor, tuple):\n return flavor[0]\n else:\n return flavor", "def gen_flavor_parameters(self, servers):\n\n # get all the flavors\n flavors = self.novaclient.flavors.list()\n server_flavors = set([x.flavor[\"id\"] for x in servers])\n self.set_of_flavors = set(filter(lambda flavor: flavor.id in server_flavors, flavors))\n flavor_idx = \"\"\n for idx, flavor in enumerate(self.set_of_flavors):\n data = {\"type\": \"string\",\n \"description\": \"Flavor to use for servers\",\n \"default\": flavor.name}\n self.compute_data[\"parameters\"][\"flavor%s\" % flavor_idx] = data\n if len(self.set_of_flavors) >= 1:\n flavor_idx = str(1+idx)", "def get_flavor(name):\r\n return nova.flavors.find(name=name)", "def get_initial_spectra(self, t, E, flavors=Flavor):\n pass", "def test_aws_service_api_flavors_get(self):\n pass", "def display_flavor(self):\n print(\"\\nWe currently count with the next flavors:\")\n for flavor in self.flavors:\n print(f\"{flavor}\")", "def getTransferDataFlavors(self) -> List[java.awt.datatransfer.DataFlavor]:\n ...", "def GetFlavor(params):\n flavors = {\n 'cygwin': 'win',\n 'win32': 'win',\n 'darwin': 'mac',\n }\n if 'flavor' in params:\n return params['flavor']\n if sys.platform in flavors:\n return flavors[sys.platform]\n if sys.platform.startswith('sunos'):\n return 'solaris'\n if sys.platform.startswith('freebsd'):\n return 'freebsd'\n if sys.platform.startswith('openbsd'):\n return 'openbsd'\n if sys.platform.startswith('netbsd'):\n return 'netbsd'\n if sys.platform.startswith('aix'):\n return 'aix'\n if sys.platform.startswith('zos'):\n return 'zos'\n if sys.platform.startswith('os390'):\n return 'zos'\n return 'linux'", "def create_flavor(cls, values):\n return cls.dbdriver.create_flavor(values)", "async def flavors():\n berry = []\n apple = []\n honey = []\n mango = []\n earthy = []\n mint = []\n blueberry = []\n ammonia = []\n coffee = []\n vanilla = []\n rose = []\n pine = []\n citrus = []\n sweet = []\n pineapple = []\n skunk = []\n orange = []\n strawberry = []\n lemon = []\n grape = []\n lime = []\n pepper = []\n lavender = []\n\n for i in list(range(len(strain))):\n if 'Coffee' in strain['flavor'][i]:\n coffee.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Pepper' in strain['flavor'][i]:\n pepper.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Lavender' in strain['flavor'][i]:\n lavender.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Mango' in strain['flavor'][i]:\n mango.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Earthy' in strain['flavor'][i]:\n earthy.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Citrus' in strain['flavor'][i]:\n citrus.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Lemon' in strain['flavor'][i]:\n lemon.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Strawberry' in strain['flavor'][i]:\n strawberry.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Pine' in strain['flavor'][i]:\n pine.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Vanilla' in strain['flavor'][i]:\n vanilla.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Honey' in strain['flavor'][i]:\n honey.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Pineapple' in strain['flavor'][i]:\n pineapple.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Blueberry' in strain['flavor'][i]:\n blueberry.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Orange' in strain['flavor'][i]:\n orange.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Skunk' in strain['flavor'][i]:\n skunk.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Grape' in strain['flavor'][i]:\n grape.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Berry' in strain['flavor'][i]:\n berry.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Lime' in strain['flavor'][i]:\n lime.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Rose' in strain['flavor'][i]:\n rose.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Sweet' in strain['flavor'][i]:\n sweet.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Apple' in strain['flavor'][i]:\n apple.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Mint' in strain['flavor'][i]:\n mint.append(strain['name'][i])\n for i in list(range(len(strain))):\n if 'Ammonia' in strain['flavor'][i]:\n ammonia.append(strain['name'][i])\n\n json_berry = json.dumps(berry)\n json_apple = json.dumps(apple)\n json_honey = json.dumps(honey)\n json_mango = json.dumps(mango)\n json_earthy = json.dumps(earthy)\n json_mint = json.dumps(mint)\n json_bluberry = json.dumps(blueberry)\n json_ammonia = json.dumps(ammonia)\n json_coffee = json.dumps(coffee)\n json_vanilla = json.dumps(vanilla)\n json_rose = json.dumps(rose)\n json_pine = json.dumps(pine)\n json_citrus = json.dumps(citrus)\n json_sweet = json.dumps(sweet)\n json_pineapple = json.dumps(pineapple)\n json_skunk = json.dumps(skunk)\n json_orange = json.dumps(orange)\n json_strawberry = json.dumps(strawberry)\n json_lemon = json.dumps(lemon)\n json_grape = json.dumps(grape)\n json_lime = json.dumps(lime)\n json_pepper = json.dumps(pepper)\n json_lavender = json.dumps(lavender)\n\n return 'Berry', json_berry, 'Apple', json_apple, 'Honey', json_honey,\\\n 'Mango', json_mango, 'Earthy', json_earthy, 'Mint', json_mint,\\\n 'Blueberry', json_bluberry, 'Ammonia', json_ammonia, 'Coffee', json_coffee,\\\n 'Vanilla', json_vanilla, 'Rose', json_rose, 'Pine', json_pine,\\\n 'Citrus', json_citrus, 'Sweet', json_sweet, 'Pineapple', json_pineapple,\\\n 'Skunk', json_skunk, 'Orange', json_orange, 'Strawberry', json_strawberry,\\\n 'Lemon', json_lemon, 'Grape', json_grape, 'Lime', json_lime,\\\n 'Pepper', json_pepper, 'Lavender', json_lavender", "def get_flavors_black_list(self):\n return self._sanitize(CONF.powervc.flavor_black_list)", "def get_flavor(self, flavor):\n return self._get(_flavor.Flavor, flavor)", "def get_flavor(self, flavor):\n return self._get(_flavor.Flavor, flavor)", "def test_aws_service_api_flavor_get(self):\n pass", "def resolve_variants(self):\n\n def evaluate_clause(clause):\n if 'or' in clause or 'and' in clause:\n raise Exception(\"Reserved keyword 'and || or' used.\")\n v = dict_contains(self.traits, clause)\n return v\n \n def process_effects(variant_name, variant_details):\n \"\"\"\n This nested function handles the effects of a \n given clause.\n \n Right now, the only relevant effect is 'replace',\n which causes a variant to replace an existing variant\n \n \"\"\"\n if 'replaces' in variant_details:\n enabled_variants.remove(variant_details['replaces'])\n enabled_variants.add(variant_name)\n\n if 'cflags' in variant_details:\n if type(variant_details['cflags']) == dict:\n self.config['cflags'] += variant_details['cflags']['gcc']\n else:\n self.config['cflags'] += \" \" + variant_details['cflags']\n # Beginning of main function\n if 'filtered_variants' in self.__dict__:\n return self.filtered_variants\n \n enabled_variants = set(['src'])\n variants = self.get_variants()\n \n for variant in variants:\n assert len(variant) == 1\n for name, details in variant.items():\n if 'when' in details:\n enabled = evaluate_clause(details['when'])\n if enabled:\n process_effects(name, details)\n self.variant_dirs = {}\n for variant_name in enabled_variants:\n self.variant_dirs[variant_name] = join(self.path, variant_name)\n\n self.filtered_variants = [a for a in self.get_variants() if list(a.keys())[0] in enabled_variants]\n return self.filtered_variants", "def values_from_flavor(flavor):\n try:\n flavor = Network.FLAVORS[flavor]\n except KeyError:\n raise faults.BadRequest(\"Unknown network flavor\")\n\n mode = flavor.get(\"mode\")\n\n link = flavor.get(\"link\")\n if link == \"pool\":\n link = allocate_resource(\"bridge\")\n\n mac_prefix = flavor.get(\"mac_prefix\")\n if mac_prefix == \"pool\":\n mac_prefix = allocate_resource(\"mac_prefix\")\n\n tags = flavor.get(\"tags\")\n\n return mode, link, mac_prefix, tags", "def FlavorUrls(options, versions, flavor):\n if isinstance(flavor, tuple):\n ids = [versions[i] for i in flavor[1:]]\n return [toolchainbinaries.EncodeToolchainUrl(\n options.base_once_url, i, 'new') for i in ids]\n else:\n return [toolchainbinaries.EncodeToolchainUrl(\n options.base_url, VersionSelect(versions, flavor), flavor)]", "def generate_fizzlers(vmf: VMF) -> None:\n has_fizz_border = 'fizz_border' in texturing.SPECIAL\n conf_tile_blacken = options.get_itemconf(('VALVE_FIZZLER', 'BlackenTiles'), False)\n\n for fizz in FIZZLERS.values():\n if fizz.base_inst not in vmf.entities:\n continue # The fizzler was removed from the map.\n\n fizz_name = fizz.base_inst['targetname']\n fizz_type = fizz.fizz_type\n\n # Static versions are only used for fizzlers which start on.\n # Permanently-off fizzlers are kinda useless, so we don't need\n # to bother optimising for it.\n # TODO: This needs to use connections to correctly check this.\n is_static = bool(\n fizz.base_inst.fixup.int('$connectioncount', 0) == 0\n and fizz.base_inst.fixup.bool('$start_enabled', True)\n )\n tile_blacken = conf_tile_blacken and fizz.fizz_type.blocks_portals\n\n pack_list = (\n fizz.fizz_type.pack_lists_static\n if is_static else\n fizz.fizz_type.pack_lists\n )\n for pack in pack_list:\n packing.pack_list(vmf, pack)\n\n if fizz_type.inst[FizzInst.BASE, is_static]:\n rng = rand.seed(b'fizz_base', fizz_name)\n fizz.base_inst['file'] = base_file = rng.choice(fizz_type.inst[FizzInst.BASE, is_static])\n conditions.ALL_INST.add(base_file.casefold())\n\n if not fizz.emitters:\n LOGGER.warning('No emitters for fizzler \"{}\"!', fizz_name)\n continue\n\n # Brush index -> entity for ones that need to merge.\n # template_brush is used for the templated one.\n single_brushes: dict[FizzlerBrush, Entity] = {}\n\n if fizz_type.temp_max or fizz_type.temp_min:\n template_brush_ent = vmf.create_ent(\n classname='func_brush',\n origin=fizz.base_inst['origin'],\n )\n conditions.set_ent_keys(\n template_brush_ent,\n fizz.base_inst,\n fizz_type.temp_brush_keys,\n )\n else:\n template_brush_ent = None\n\n up_dir = fizz.up_axis\n forward = (fizz.emitters[0][1] - fizz.emitters[0][0]).norm()\n\n min_orient = Matrix.from_basis(z=forward, y=up_dir)\n max_orient = Matrix.from_basis(z=-forward, y=up_dir)\n\n model_min = (\n fizz_type.inst[FizzInst.PAIR_MIN, is_static]\n or fizz_type.inst[FizzInst.ALL, is_static]\n )\n model_max = (\n fizz_type.inst[FizzInst.PAIR_MAX, is_static]\n or fizz_type.inst[FizzInst.ALL, is_static]\n )\n\n if not model_min or not model_max:\n raise user_errors.UserError(\n user_errors.TOK_FIZZLER_NO_MODEL_SIDE.format(id=fizz_type.id),\n voxels=[pos for minmax in fizz.emitters for pos in minmax],\n )\n\n # Define a function to do the model names.\n model_index = 0\n if fizz_type.model_naming is ModelName.SAME:\n def get_model_name(ind: int) -> str:\n \"\"\"Give every emitter the base's name.\"\"\"\n return fizz_name\n elif fizz_type.model_naming is ModelName.LOCAL:\n def get_model_name(ind: int) -> str:\n \"\"\"Give every emitter a name local to the base.\"\"\"\n return f'{fizz_name}-{fizz_type.model_name}'\n elif fizz_type.model_naming is ModelName.PAIRED:\n def get_model_name(ind: int) -> str:\n \"\"\"Give each pair of emitters the same unique name.\"\"\"\n return f'{fizz_name}-{fizz_type.model_name}{ind:02}'\n elif fizz_type.model_naming is ModelName.UNIQUE:\n def get_model_name(ind: int) -> str:\n \"\"\"Give every model a unique name.\"\"\"\n nonlocal model_index\n model_index += 1\n return f'{fizz_name}-{fizz_type.model_name}{model_index:02}'\n else:\n raise AssertionError(f'No model name {fizz_type.model_name!r}')\n\n # Generate env_beam pairs.\n for beam in fizz_type.beams:\n beam_template = Entity(vmf)\n conditions.set_ent_keys(beam_template, fizz.base_inst, beam.keys)\n beam_template['classname'] = 'env_beam'\n del beam_template['LightningEnd'] # Don't allow users to set end pos.\n name = beam_template['targetname'] + '_'\n\n counter = 1\n for seg_min, seg_max in fizz.emitters:\n for offset in beam.offset:\n min_off = offset.copy()\n max_off = offset.copy()\n min_off.localise(seg_min, min_orient)\n max_off.localise(seg_max, max_orient)\n beam_ent = beam_template.copy()\n vmf.add_ent(beam_ent)\n\n # Allow randomising speed and direction.\n if 0 < beam.speed_min < beam.speed_max:\n rng = rand.seed(b'fizz_beam', min_off, max_off)\n beam_ent['TextureScroll'] = rng.randint(beam.speed_min, beam.speed_max)\n if rng.choice((False, True)):\n # Flip to reverse direction.\n min_off, max_off = max_off, min_off\n\n beam_ent['origin'] = min_off\n beam_ent['LightningStart'] = beam_ent['targetname'] = (\n name + str(counter)\n )\n counter += 1\n beam_ent['targetpoint'] = max_off\n\n # Prepare to copy over instance traits for the emitters.\n fizz_traits = instance_traits.get(fizz.base_inst).copy()\n # Special case, mark emitters that have a custom position for Clean\n # models.\n if fizz.has_cust_position:\n fizz_traits.add('cust_shape')\n\n mat_mod_tex: dict[FizzlerBrush, set[str]] = {}\n for brush_type in fizz_type.brushes:\n if brush_type.mat_mod_var is not None:\n mat_mod_tex[brush_type] = set()\n\n # Record the data for trigger hurts so flinch triggers can match them.\n trigger_hurt_name = ''\n trigger_hurt_start_disabled = '0'\n\n for seg_ind, (seg_min, seg_max) in enumerate(fizz.emitters, start=1):\n length = (seg_max - seg_min).mag()\n rng = rand.seed(b'fizz_seg', seg_min, seg_max)\n if length == 128 and fizz_type.inst[FizzInst.PAIR_SINGLE, is_static]:\n # Assign to 'min' var so we can share some code.\n min_inst = conditions.add_inst(\n vmf,\n targetname=get_model_name(seg_ind),\n file=rng.choice(fizz_type.inst[FizzInst.PAIR_SINGLE, is_static]),\n origin=(seg_min + seg_max)/2,\n angles=min_orient,\n )\n else:\n # Both side models.\n min_inst = conditions.add_inst(\n vmf,\n targetname=get_model_name(seg_ind),\n file=rng.choice(model_min),\n origin=seg_min,\n angles=min_orient,\n )\n max_inst = conditions.add_inst(\n vmf,\n targetname=get_model_name(seg_ind),\n file=rng.choice(model_max),\n origin=seg_max,\n angles=max_orient,\n )\n max_inst.fixup.update(fizz.base_inst.fixup)\n instance_traits.get(max_inst).update(fizz_traits)\n min_inst.fixup.update(fizz.base_inst.fixup)\n instance_traits.get(min_inst).update(fizz_traits)\n\n if has_fizz_border or tile_blacken:\n # noinspection PyProtectedMember\n fizz._edit_border_tiles(vmf, seg_min, seg_max, has_fizz_border, tile_blacken)\n\n if fizz.embedded:\n fizz.set_tiles_behind_models(seg_min, forward, fizz_type.nodraw_behind)\n fizz.set_tiles_behind_models(seg_max, -forward, fizz_type.nodraw_behind)\n\n if fizz_type.inst[FizzInst.GRID, is_static]:\n # Generate one instance for each position.\n\n # Go 64 from each side, and always have at least 1 section\n # A 128 gap will have length = 0\n rng = rand.seed(b'fizz_mid', seg_min, seg_max)\n for dist in range(64, round(length) - 63, 128):\n mid_pos = seg_min + forward * dist\n mid_inst = conditions.add_inst(\n vmf,\n targetname=fizz_name,\n angles=min_orient.to_angle(),\n file=rng.choice(fizz_type.inst[FizzInst.GRID, is_static]),\n origin=mid_pos,\n )\n mid_inst.fixup.update(fizz.base_inst.fixup)\n instance_traits.get(mid_inst).update(fizz_traits)\n\n if template_brush_ent is not None:\n if length == 128 and fizz_type.temp_single:\n temp = template_brush.import_template(\n vmf,\n fizz_type.temp_single,\n (seg_min + seg_max) / 2,\n min_orient,\n force_type=template_brush.TEMP_TYPES.world,\n add_to_map=False,\n )\n template_brush_ent.solids.extend(temp.world)\n else:\n if fizz_type.temp_min:\n temp = template_brush.import_template(\n vmf,\n fizz_type.temp_min,\n seg_min,\n min_orient,\n force_type=template_brush.TEMP_TYPES.world,\n add_to_map=False,\n )\n template_brush_ent.solids.extend(temp.world)\n if fizz_type.temp_max:\n temp = template_brush.import_template(\n vmf,\n fizz_type.temp_max,\n seg_max,\n max_orient,\n force_type=template_brush.TEMP_TYPES.world,\n add_to_map=False,\n )\n template_brush_ent.solids.extend(temp.world)\n\n # Generate the brushes.\n for brush_type in fizz_type.brushes:\n brush_ent = None\n # If singular, we reuse the same brush ent for all the segments.\n if brush_type.singular:\n brush_ent = single_brushes.get(brush_type, None)\n\n # Non-singular or not generated yet - make the entity.\n if brush_ent is None:\n brush_ent = vmf.create_ent(classname='func_brush')\n\n for key_name, key_value in brush_type.keys.items():\n brush_ent[key_name] = fizz.base_inst.fixup.substitute(key_value, allow_invert=True)\n\n for key_name, key_value in brush_type.local_keys.items():\n brush_ent[key_name] = conditions.local_name(\n fizz.base_inst,\n fizz.base_inst.fixup.substitute(key_value, allow_invert=True),\n )\n\n brush_ent['targetname'] = conditions.local_name(\n fizz.base_inst, brush_type.name,\n )\n # Set this to the center, to make sure it's not going to leak.\n brush_ent['origin'] = (seg_min + seg_max)/2\n\n # For fizzlers flat on the floor/ceiling, scanlines look\n # useless. Turn them off.\n if 'usescanline' in brush_ent and fizz.normal().z:\n brush_ent['UseScanline'] = 0\n\n if brush_ent['classname'] == 'trigger_hurt':\n trigger_hurt_name = brush_ent['targetname']\n trigger_hurt_start_disabled = brush_ent['startdisabled']\n\n if brush_type.set_axis_var:\n brush_ent['vscript_init_code'] = (\n 'axis <- `{}`;'.format(\n fizz.normal().axis(),\n )\n )\n\n for out in brush_type.outputs:\n new_out = out.copy()\n new_out.target = conditions.local_name(\n fizz.base_inst,\n new_out.target,\n )\n brush_ent.add_out(new_out)\n\n if brush_type.singular:\n # Record for the next iteration.\n single_brushes[brush_type] = brush_ent\n\n # If we have a material_modify_control to generate,\n # we need to parent it to ourselves to restrict it to us\n # only. We also need one for each material, so provide a\n # function to the generator which adds to a set.\n if brush_type.mat_mod_var is not None:\n used_tex_func = mat_mod_tex[brush_type].add\n else:\n def used_tex_func(val):\n \"\"\"If not, ignore those calls.\"\"\"\n return None\n\n # Generate the brushes and texture them.\n brush_ent.solids.extend(\n brush_type.generate(\n vmf,\n fizz,\n seg_min,\n seg_max,\n used_tex_func,\n )\n )\n\n # We have a trigger_hurt in this fizzler, potentially generate\n # the flinching logic.\n if trigger_hurt_name:\n fizz.gen_flinch_trigs(\n vmf,\n trigger_hurt_name,\n trigger_hurt_start_disabled,\n )\n\n # If we have the config, but no templates used anywhere in this brush,\n # remove the empty brush entity.\n if template_brush_ent is not None and not template_brush_ent.solids:\n template_brush_ent.remove()\n\n # Generate the material modify controls.\n # One is needed for each texture used on the brush, unfortunately.\n for brush_type, used_tex in mat_mod_tex.items():\n brush_name = conditions.local_name(fizz.base_inst, brush_type.name)\n mat_mod_name = conditions.local_name(fizz.base_inst, brush_type.mat_mod_name)\n for off, tex in zip(itertools.cycle(MATMOD_OFFSETS), sorted(used_tex)):\n pos = off @ min_orient\n pos += Vec.from_str(fizz.base_inst['origin'])\n vmf.create_ent(\n classname='material_modify_control',\n origin=pos,\n targetname=mat_mod_name,\n materialName='materials/' + tex + '.vmt',\n materialVar=brush_type.mat_mod_var,\n parentname=brush_name,\n )", "def flavor(self):\n return self._flavor", "def _existing_flavor(self):\n return instance_types.get_all_types().keys()[0]", "def gen_extractors():\n return [klass() for klass in gen_extractor_classes()]", "def _filter_m1_flavors(self, results):\n new_results = []\n for flavor in results:\n if flavor['name'].startswith(\"m1.\"):\n new_results.append(flavor)\n return new_results", "def test_list_flavors(self):\n response = self.flavors_client.list_flavors()\n flavors = response.entity\n self.assertTrue(len(flavors) > 0)\n response = self.flavors_client.get_flavor_details(self.flavor_ref)\n flavor = response.entity\n flavor_ids = [x.id for x in flavors]\n self.assertIn(flavor.id, flavor_ids,\n \"The expected flavor: %s was not found in \"\n \"the flavor list\" % flavor.id)", "def test_list_flavors_detailed_min_ram_larger_than_max_flavor_ram(self):\n response = self.flavors_client.list_flavors_with_detail(\n min_ram=self.max_ram+1)\n flavors = response.entity\n self.assertEqual(len(flavors), 0)", "def _generate_name(self):\n nonexistent_flavor = str(int(time.time()))\n flavors = instance_types.get_all_types()\n while nonexistent_flavor in flavors:\n nonexistent_flavor += \"z\"\n else:\n return nonexistent_flavor", "def test_list_flavors_using_marker(self):\n response = self.flavors_client.list_flavors()\n flavors = response.entity\n self.assertGreater(len(flavors), 0, 'Flavors list is empty')\n flavor_marker = flavors[0]\n\n response = self.flavors_client.list_flavors(marker=flavor_marker.id)\n filtered_flavors = response.entity\n self.assertNotIn(flavor_marker, filtered_flavors,\n msg='Filtered flavor was incorrectly '\n 'included in the list of returned flavors')", "def VersionSelect(versions, flavor):\n\n if isinstance(flavor, tuple):\n ids = [versions[i] for i in flavor[1:]]\n return ','.join(ids)\n if toolchainbinaries.IsPnaclFlavor(flavor):\n return versions['PNACL_VERSION']\n if toolchainbinaries.IsX86Flavor(flavor):\n if toolchainbinaries.IsNotNaClNewlibFlavor(flavor):\n return versions['GLIBC_VERSION']\n else:\n return versions['NEWLIB_VERSION']\n if toolchainbinaries.IsArmTrustedFlavor(flavor):\n return versions['ARM_TRUSTED_VERSION']\n raise Exception('Unknown flavor \"%s\"' % flavor)", "def test_list_flavors_min_disk_greater_than_max_flavor_ram(self):\n response = self.flavors_client.list_flavors(min_ram=self.max_ram+1)\n flavors = response.entity\n self.assertEqual(len(flavors), 0)", "def get_flavors_white_list(self):\n return self._sanitize(CONF.powervc.flavor_white_list)", "def _iter_configurations() -> Iterable[pathlib.Path]:\n for ext in CONFIGURATION_FILE_FORMATS:\n yield from HERE.rglob(f\"*{ext}\")", "def _create_flavor(self, context, flavor):\n flavor_dict = flavor.__dict__\n name = self.prefix + flavor.name\n flavorid = self.prefix + flavor.id\n memory = flavor.ram\n vcpus = flavor.vcpus\n root_gb = flavor.disk\n ephemeral_gb = flavor_dict.get('OS-FLV-EXT-DATA:ephemeral', 0)\n u_swap = flavor_dict.get('swap', 0)\n rxtx_factor = flavor_dict.get('rxtx_factor', 1.0)\n is_public = flavor_dict.get('os-flavor-access:is_public', True)\n if u_swap == \"\":\n swap = 0\n else:\n swap = int(u_swap)\n\n try:\n return flavors.create(name, memory, vcpus, root_gb,\n ephemeral_gb=ephemeral_gb,\n flavorid=flavorid, swap=swap,\n rxtx_factor=rxtx_factor,\n is_public=is_public)\n except exception.InstanceExists as err:\n raise err", "def setUpClass(cls):\n super(FlavorsTest, cls).setUpClass()\n flavors = cls.flavors_client.list_flavors_with_detail().entity\n\n # Find the flavor that provides the most RAM\n flavors.sort(key=lambda k: k.ram)\n cls.max_ram = flavors[-1].ram\n\n # Find the flavor that provides the most disk\n flavors.sort(key=lambda k: k.disk)\n cls.max_disk = flavors[-1].disk", "def item3():\n from random import randint\n\n random_bits = 0\n for i in range(64):\n if randint(0, 1):\n random_bits |= 1 << i\n print(bin(random_bits))\n\n flavor_list = ['vanilla', 'chocolate', 'pecan', 'strawberry']\n for flavor in flavor_list:\n print(\"%s is delicious\" % flavor)\n\n for i in range(len(flavor_list)):\n flavor = flavor_list[i]\n print(\"%d: %s is delicious\" % (i + 1, flavor))\n\n print(list(enumerate(flavor_list))) # list exhausts generator\n print(enumerate(flavor_list)) # Returns enumerate object\n\n for i, flavor in enumerate(flavor_list):\n print('%d: %s' % (i + 1, flavor))\n\n for i, flavor in enumerate(flavor_list, 1): # Can start the enumeration with number 1\n print('%d: %s' % (i, flavor))", "def test_list_flavors_min_disk_greater_than_max_flavor_disk(self):\n response = self.flavors_client.list_flavors(min_disk=self.max_disk+1)\n flavors = response.entity\n self.assertEqual(len(flavors), 0)", "def generate():\n strategies = []\n strategies.extend(\n generate_meta_strategy_pair(GeobotBeaterStrategy))\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def get_flavor_provider(flavor):\n disk_template = flavor.disk_template\n provider = None\n if disk_template.startswith(\"ext\"):\n disk_template, provider = disk_template.split(\"_\", 1)\n return disk_template, provider", "def generate_animals(animal_choice, count):\n animals = []\n for _ in range(count):\n # uses the animal choice to map to appropriate function pointer and create Animal object\n animals.append(mappings[animal_choice]())\n\n return animals # list of Animal objects", "def get_feature_generator(feed, recipe):\n return get_instance(feed, **recipe)", "def available_binary_choices() -> Iterable[str]:\n for name, _ in inspect.getmembers(sys.modules[__name__], inspect.isclass):\n if name.startswith('Binary'):\n yield name", "def synchronize_flavors(self, ctx):\n LOG.info(_(\"Flavors synchronization starts.\"))\n # Get all public flavors. By default, detail and public is set.\n pvcFlavors = self.driver.list_flavors()\n # Sync flavors in list\n for flavor in pvcFlavors:\n LOG.info(_(\"Flavor:%s\") % str(flavor))\n greenthread.sleep(0)\n # This check is added to eliminate sync of private flavors\n # Can be removed once PowerVC fixes to return only public flavors\n # by default.\n if not(flavor.__dict__.get(constants.IS_PUBLIC)):\n continue\n\n if (self._check_for_sync(flavor.name)):\n response = self._check_for_extraspecs(flavor)\n if response is not None:\n self._sync_flavor(ctx, flavor, response[1])\n LOG.info(_(\"Flavors synchronization ends.\"))", "def _generate_src():\n for ext in extensions:\n yield self.src_format[ext](f=\"{}{}\".format(name, ext))", "def build_options(slot, snacks):\n \n if slot == 'Fast':\n return [\n {'text': 'Pizza', 'value': 'Pizza'},\n {'text': 'Fries', 'value': 'Fries'},\n {'text': 'Franky', 'value': 'Franky'},\n {'text': 'Burger', 'value': 'Burger'},\n {'text': 'Sandwich', 'value': 'Sandwich'}\n \n \n ]\n elif slot == 'drink':\n return [\n {'text': 'Coca-Cola', 'value': 'Coca-cola'},\n {'text': 'Appy', 'value': 'Appy'},\n \n {'text': 'Beer', 'value': 'Beer'},\n {'text': 'Frooti', 'value': 'Frooti'},\n {'text': 'Pepsi', 'value': 'Pepsi'}\n \n ]", "def test_list_flavors_limit_results(self):\n response = self.flavors_client.list_flavors(limit=1)\n flavors = response.entity\n self.assertEqual(1, len(flavors))", "def _yield_result_files(self, tpl, **kwargs):\n for sheet in self.shortcut_sheets:\n for ngs_library in sheet.all_ngs_libraries:\n extraction_type = ngs_library.test_sample.extra_infos[\"extractionType\"]\n if ngs_library.extra_infos[\"seqPlatform\"] in (\"ONP\", \"PacBio\"):\n suffix = \"_long\"\n else:\n suffix = \"\"\n yield from expand(\n tpl,\n mapper=self.config[\"tools\"][extraction_type.lower() + suffix],\n ngs_library=[ngs_library],\n **kwargs\n )", "def _generate_flavorid(self):\n nonexistent_flavor = 2700\n flavor_ids = [value[\"id\"] for key, value in\n instance_types.get_all_types().iteritems()]\n while nonexistent_flavor in flavor_ids:\n nonexistent_flavor += 1\n else:\n return nonexistent_flavor", "def _create_resize_down_flavors(self):\n output = self.nova('flavor-create',\n params='%s auto 128 0 1' % self.name_generate())\n larger_id = self._get_column_value_from_single_row_table(output, \"ID\")\n self.addCleanup(self.nova, 'flavor-delete', params=larger_id)\n\n output = self.nova('flavor-create',\n params='%s auto 64 0 1' % self.name_generate())\n smaller_id = self._get_column_value_from_single_row_table(output, \"ID\")\n self.addCleanup(self.nova, 'flavor-delete', params=smaller_id)\n\n return larger_id, smaller_id", "def gen_input_permutation():\n return [(arch, src, dst) for arch in architecture.ARCH_ACCEPTED for src in PRODUCT_TYPE for dst in PRODUCT_TYPE]", "def find_weather_presets():\n rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')\n def name(x): return ' '.join(m.group(0) for m in rgx.finditer(x))\n presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]\n return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]", "def test_list_flavors_detailed_using_marker(self):\n response = self.flavors_client.list_flavors_with_detail()\n flavors = response.entity\n self.assertGreater(len(flavors), 0, 'Flavors list is empty')\n flavor_marker = flavors[0]\n\n response = self.flavors_client.list_flavors_with_detail(\n marker=flavor_marker.id)\n filtered_flavors = response.entity\n self.assertNotIn(flavor_marker, filtered_flavors,\n msg='Filtered flavor was incorrectly '\n 'included in the list of returned flavors')", "def gen_versioned_files(self, wave=1):\n if self.file:\n yield self.file\n else:\n py_files = ['setup.cfg', 'setup.py', '*/__init__.py']\n js_files = ['bower.json', 'package.json', 'component.json']\n php_files = ['composer.json']\n misc_files = ['*.spec', '*.php', '*.py', '*.xml', '*.json']\n wave_one = py_files + js_files + php_files\n switch = {1: wave_one, 2: misc_files}\n\n for git_file in self.files:\n if any(fnmatch(git_file, file_) for file_ in switch[wave]):\n yield git_file", "def generate_schema_list():\n src = os.path.join(os.path.dirname(__file__), '../schemas')\n for root, dirs, files in os.walk(src):\n for fname in files:\n if not fname.endswith('.yaml'):\n continue\n if os.path.splitext(fname)[0] in (\n 'draft-01', 'asdf-schema-1.0.0'):\n continue\n yield os.path.join(root, fname)", "def test_create_flavor_all_settings(self):\n # Create Flavor\n if self.flavor_metadata:\n self.flavor_metadata.update(create_flavor.MEM_PAGE_SIZE_ANY)\n flavor_settings = openstack_tests.get_flavor_config(\n name=self.flavor_name, ram=1, disk=1, vcpus=1, ephemeral=2, swap=3,\n rxtx_factor=2.2, is_public=False,\n metadata=self.flavor_metadata)\n self.flavor_creator = OpenStackFlavor(self.os_creds, flavor_settings)\n flavor = self.flavor_creator.create()\n self.assertTrue(validate_flavor(self.nova, flavor_settings, flavor))\n\n # Delete Flavor\n nova_utils.delete_flavor(self.nova, flavor)\n self.assertIsNone(\n nova_utils.get_flavor_by_name(self.nova, flavor_settings.name))\n\n # Attempt to cleanup\n self.flavor_creator.clean()\n\n self.assertIsNone(self.flavor_creator.get_flavor())", "def HashSelect(versions, flavor):\n return versions[HashKey(flavor)]", "def variations():", "def test_list_flavors_with_detail(self):\n response = self.flavors_client.list_flavors_with_detail()\n flavors = response.entity\n self.assertTrue(len(flavors) > 0)\n response = self.flavors_client.get_flavor_details(self.flavor_ref)\n flavor = response.entity\n self.assertIn(flavor, flavors)", "def get_recommendation_genre_seeds(client = None):\n\n return client.recommendation_genre_seeds()['genres']", "def gen(self):\n for path, bg_idx, bbox in zip(self.img_paths, self.bgs, self.bbox):\n img = cv2.imread(self.background[bg_idx])\n for alpha, obj, box in zip(self.alphas, self.objects, bbox):\n img, mask = self.alpha_blend(img, obj, box, alpha)\n yield path, img, mask", "def build(self, context):\r\n return ['-Z', context.config.preset]", "def gen_vars(input_fqs):\n k_sizes = range(*CONFIG['abyss_bloom']['k_mer_sizes'])\n sr = re.search(PATH_RE, input_fqs[0])\n sr2 = re.search(PATH_RE, input_fqs[1])\n # should be of conventional directory hierarchy\n try:\n assert sr.groups() == sr2.groups()\n except AssertionError:\n print '{0} != {1}'.format(sr.groups(), sr2.groups())\n raise\n\n bfs, bf_flags, fas, fa_flags = [], [], [], []\n for k_size in k_sizes:\n # for abyss_bloom\n # bn: basename\n bf_bn = '{0}_k{1}.bf.gz'.format(sr.group('celltype'), k_size)\n bf_flag_bn = '{0}.SUCCESS'.format(bf_bn)\n bf_dir = os.path.join(sr.group('prefix'), 'kon', sr.group('chr'), 'bf')\n bf = os.path.join(bf_dir, bf_bn)\n bf_flag = os.path.join(bf_dir, bf_flag_bn)\n bfs.append(bf)\n bf_flags.append(bf_flag)\n\n # for konnector\n fa_all_bn = '{0}_k{1}_allpaths.fa.gz'.format(sr.group('celltype'), k_size)\n fa_mer_bn = '{0}_k{1}_merged.fa.gz'.format(sr.group('celltype'), k_size)\n fa_flag_bn = '{0}_k{1}.SUCCESS'.format(sr.group('celltype'), k_size)\n fa_dir = os.path.join(sr.group('prefix'), 'kon', sr.group('chr'), 'fafq')\n fa_all = os.path.join(fa_dir, fa_all_bn)\n fa_mer = os.path.join(fa_dir, fa_mer_bn)\n fa_flag = os.path.join(fa_dir, fa_flag_bn)\n fas.extend([fa_all, fa_mer])\n fa_flags.append(fa_flag)\n\n return k_sizes, bfs, bf_flags, fas, fa_flags", "def test_list_flavors_filter_by_min_ram(self):\n response = self.flavors_client.list_flavors_with_detail()\n flavors = response.entity\n\n # Sort the flavors by RAM in ascending order\n flavors.sort(key=lambda k: int(k.ram))\n\n # Remove any flavors from the list that are smaller than the\n # flavor with the second smallest RAM value\n filter_criteria = lambda x: int(x.ram) >= int(flavors[1].ram)\n expected_flavors = filter(filter_criteria, flavors)\n response = self.flavors_client.list_flavors(min_ram=flavors[1].ram)\n actual_flavors = response.entity\n\n actual_flavor_ids = set([flavor.id for flavor in actual_flavors])\n expected_flavor_ids = set([flavor.id for flavor in expected_flavors])\n self.assertEqual(actual_flavor_ids, expected_flavor_ids)", "def gencastshapes():\n for n in range(32):\n yield [n]\n ndim = randrange(4, 6)\n minshape = 1 if randrange(100) > 80 else 2\n yield [randrange(minshape, 5) for _ in range(ndim)]\n ndim = randrange(2, 4)\n minshape = 1 if randrange(100) > 80 else 2\n yield [randrange(minshape, 5) for _ in range(ndim)]", "def gencastshapes():\n for n in range(32):\n yield [n]\n ndim = randrange(4, 6)\n minshape = 1 if randrange(100) > 80 else 2\n yield [randrange(minshape, 5) for _ in range(ndim)]\n ndim = randrange(2, 4)\n minshape = 1 if randrange(100) > 80 else 2\n yield [randrange(minshape, 5) for _ in range(ndim)]", "def _generator():\n filename_1 = 'gene.txt'\n filename_2 = 'geneSynonym.txt'\n gene_set_1 = gene_names(filename_1)\n gene_syn = gene_names(filename_2, complete=False)\n genes = gene_set_1 | gene_syn\n return genes", "def option_registrations_iter(self):\n\n def normalize_kwargs(orig_args, orig_kwargs):\n nkwargs = copy.copy(orig_kwargs)\n dest = self.parse_dest(*orig_args, **nkwargs)\n nkwargs[\"dest\"] = dest\n if not (\"default\" in nkwargs and isinstance(nkwargs[\"default\"], RankedValue)):\n type_arg = nkwargs.get(\"type\", str)\n member_type = nkwargs.get(\"member_type\", str)\n default_val = self.to_value_type(nkwargs.get(\"default\"), type_arg, member_type)\n if isinstance(default_val, (ListValueComponent, DictValueComponent)):\n default_val = default_val.val\n nkwargs[\"default\"] = RankedValue(Rank.HARDCODED, default_val)\n return nkwargs\n\n # Yield our directly-registered options.\n for args, kwargs in self._option_registrations:\n normalized_kwargs = normalize_kwargs(args, kwargs)\n yield args, normalized_kwargs", "def test_list_flavors_detailed_limit_results(self):\n response = self.flavors_client.list_flavors_with_detail(limit=1)\n flavors = response.entity\n self.assertEqual(1, len(flavors))", "def test_list_flavors_detailed_min_disk_larger_than_max_flavor_disk(self):\n response = self.flavors_client.list_flavors_with_detail(\n min_disk='99999')\n flavors = response.entity\n self.assertEqual(len(flavors), 0)", "def gen_opener(filenames):\n for filename in filenames:\n if str(filename).endswith('.gz'):\n f = gzip.open(filename, 'rt')\n elif str(filename).endswith('.bz2'):\n f = bz2.open(filename, 'rt')\n else:\n f = open(filename, 'rt')\n yield f\n f.close()", "def generate():\n strategies = []\n strategies.extend(\n generate_meta_strategy_pair(\n GeometryV4Strategy,\n alpha=0.1))\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations", "def gen(self):\n c = self.channels\n for path, bg in zip(self.img_paths, self.bgs):\n plain = np.ones((self.h, self.w, c), dtype=np.uint8)\n yield path, (plain * bg).astype(np.uint8), None", "def generators(self, algorithm=\"farey\"):\n if self.level() == 1:\n # we return a fixed set of generators for SL2Z, for historical\n # reasons, which aren't the ones the Farey symbol code gives\n return [ self([0,-1,1,0]), self([1,1,0,1]) ]\n\n elif algorithm==\"farey\":\n return self.farey_symbol().generators()\n\n elif algorithm==\"todd-coxeter\":\n from sage.modular.modsym.p1list import P1List\n from .congroup import generators_helper\n level = self.level()\n if level == 1: # P1List isn't very happy working mod 1\n return [ self([0,-1,1,0]), self([1,1,0,1]) ]\n gen_list = generators_helper(P1List(level), level)\n return [self(g, check=False) for g in gen_list]\n\n else:\n raise ValueError(\"Unknown algorithm '%s' (should be either 'farey' or 'todd-coxeter')\" % algorithm)", "def populate_variants(self, inventory=None):\n self.variants = list()\n\n option_combos = self.generate_option_combos()\n\n for combo in option_combos:\n self.variants.append(Variant(\n self.style_number,\n option_combo=combo,\n inventory=inventory))", "def img_gender_gen(gen_img, gen_gender): \n \n while True:\n X1i = gen_img.next()\n X2i = gen_gender.next()\n yield [X1i[0], X2i[1]], X1i[1]", "def _build_functions_list():\n return {\"ec2-sg\": _build_ec2_mapping_from_sg,\n \"ec2-resources\": _build_ec2_mapping_from_resources,\n \"rds-sg\": _build_rds_mapping_from_sg,\n \"rds-resources\": _build_rds_mapping_from_resources,\n \"elbv2-sg\": _build_elbv2_mapping_from_sg,\n \"elbv2-resources\": _build_elbv2_mapping_from_resources}", "def generate():\n strategies = []\n strategies.extend(\n generate_meta_strategy_pair(\n GeometryV4Strategy, mirroring=False,\n alpha=0.5))\n do_rotations = [False for _ in strategies]\n assert len(strategies) == 1\n return strategies, do_rotations", "def configs(self):\n yield \"singleimage\", build_config.BuildConfig()", "def variants ( self ) :\n vars = []\n items = [ 'distrib' , 'default' ]\n items += [ 'stat_%s' % d for d in range ( 10 ) ]\n items += [ 'syst_%s' % d for d in range ( 10 ) ]\n \n from ostap.core.core import rootError \n from ostap.logger.logger import logFatal\n \n for item in items :\n if self.__variant == item : continue \n path = os.path.join ( self.__config_run.eosrootdir ,\n self.__config ,\n \"%s_%s.root\" % ( self.__dataset, item ) )\n with logFatal() , rootError () : \n rf = ROOT.TFile.Open ( path , 'READ' , exception = False )\n if rf and rf.IsOpen ( ) :\n vars.append ( item )\n rf.Close() \n \n return tuple ( vars )", "def full_image_list(self):\n for architecture in self.database.architectures:\n yield md.item(md.link(_format_architecture(architecture)))\n\n for architecture in self.database.architectures:\n yield \"\"\n yield md.header(_format_architecture(architecture), 3)\n yield \"\"\n yield \"Supported platforms:\"\n yield \"\"\n\n for platform in self.database.platforms:\n releases = self._release_list(architecture, platform)\n if releases:\n yield md.item(releases)\n\n for platform in self.database.platforms:\n for release in self.database.releases(platform):\n if not self.database.has(\n architecture=architecture, platform=platform, release=release\n ):\n continue\n\n yield \"\"\n yield md.header(\n _format_platform(platform, release, architecture), 4\n )\n yield \"\"\n\n for version in self.database.versions:\n image = self.database.get(\n version=version,\n architecture=architecture,\n platform=platform,\n release=release,\n )\n if not image:\n continue\n\n tags = [\n tag\n for tag in self.database.tags(image)\n if len(tag.version) < 4\n ]\n\n yield _format_image(image, tags)", "def _generators(self):\n return self.free_group.generators", "def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations", "def _check_for_extraspecs(self, flavor):\n flavor_extraspecs = self.driver.get_flavor_extraspecs(flavor)\n if flavor_extraspecs:\n scg_key = constants.SCG_KEY\n if scg_key in flavor_extraspecs:\n if not self.scg_id_list:\n return None\n if not flavor_extraspecs[scg_key] in self.scg_id_list:\n return None\n return (True, flavor_extraspecs)", "def test_list_flavors_filter_by_min_disk(self):\n response = self.flavors_client.list_flavors_with_detail()\n flavors = response.entity\n\n # Sort the flavors by disk size in ascending order\n flavors.sort(key=lambda k: int(k.disk))\n\n # Remove any flavors from the list that are smaller than the\n # flavor with the second smallest disk size\n filter_criteria = lambda x: int(x.disk) >= int(flavors[1].disk)\n expected_flavors = filter(filter_criteria, flavors)\n response = self.flavors_client.list_flavors(min_disk=flavors[1].disk)\n actual_flavors = response.entity\n\n actual_flavor_ids = set([flavor.id for flavor in actual_flavors])\n expected_flavor_ids = set([flavor.id for flavor in expected_flavors])\n self.assertEqual(actual_flavor_ids, expected_flavor_ids)", "def _get_deployment_flavor():\n flavor = cfg.CONF.paste_deploy.flavor\n return '' if not flavor else ('-' + flavor)", "def test_list_flavors_detailed_filter_by_min_ram(self):\n response = self.flavors_client.list_flavors_with_detail()\n flavors = response.entity\n\n # Sort the flavors by RAM in ascending order\n flavors.sort(key=lambda k: int(k.ram))\n\n # Remove any flavors from the list that are smaller than the\n # flavor with the second smallest RAM size\n filter_criteria = lambda x: int(x.ram) >= int(flavors[1].ram)\n expected_flavors = filter(filter_criteria, flavors)\n\n response = self.flavors_client.list_flavors_with_detail(\n min_ram=flavors[1].ram)\n actual_flavors = response.entity\n actual_flavors.sort(key=lambda k: k.id)\n expected_flavors.sort(key=lambda k: k.id)\n self.assertEqual(actual_flavors, expected_flavors)" ]
[ "0.74028367", "0.6779597", "0.6729681", "0.64055777", "0.6195069", "0.59106874", "0.59053063", "0.58540213", "0.5842893", "0.5784032", "0.57393897", "0.5738477", "0.5722725", "0.5588289", "0.5528176", "0.54953", "0.5488104", "0.54655933", "0.54139316", "0.5388655", "0.53481954", "0.53343064", "0.5302553", "0.5294203", "0.5262708", "0.5231103", "0.5220609", "0.5194343", "0.5077146", "0.5077146", "0.5072977", "0.50684214", "0.5067332", "0.50424075", "0.50412965", "0.50324154", "0.50322795", "0.5029527", "0.5019752", "0.49851", "0.49829808", "0.49539012", "0.49401873", "0.49333522", "0.49275154", "0.49185854", "0.49089503", "0.49029884", "0.4884771", "0.4873547", "0.48609725", "0.48529634", "0.48502386", "0.48444974", "0.4832472", "0.48309934", "0.48223242", "0.48047197", "0.47757", "0.47695538", "0.4762819", "0.4758504", "0.47538486", "0.4741443", "0.47293586", "0.4727523", "0.4726433", "0.4716785", "0.47033352", "0.46991605", "0.469275", "0.4685932", "0.46802923", "0.46719983", "0.46601266", "0.46424556", "0.46317118", "0.46307814", "0.46307814", "0.46261832", "0.4604091", "0.46037328", "0.45889917", "0.45849684", "0.45794514", "0.45777687", "0.45760566", "0.45745343", "0.45518243", "0.45511484", "0.45445204", "0.45354214", "0.45299113", "0.4529055", "0.45277584", "0.4523208", "0.4519602", "0.4519044", "0.4513377", "0.45118758" ]
0.679078
1
Create a new instance from attributes
def create_instance(self, **attrs): return self._create(_instance.Instance, **attrs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, **attributes):\n for key, value in attributes.items():\n setattr(self, key, value)", "def __init__(self, **attributes):\n self.set(**attributes)", "def __init__(self, **initial_attributes):\n\n for attribute_name, attribute_value in initial_attributes.items():\n setattr(self, attribute_name, attribute_value)", "def build(cls, **kwargs):\n new_object = cls()\n fields = get_fields(cls)\n fields = dict((field.field_name, field) for field in fields)\n for name, value in kwargs.items():\n object.__setattr__(new_object, name, value)\n \n return new_object", "def create(self, class_name, attrs, session):", "def __init__(self, **attrs):\n \n # set given attributes\n for name, value in attrs.items():\n if hasattr(self, name):\n setattr(self, name, value)\n else:\n raise AttributeError(\"Attribute not found! --> %s\" % name)", "def __init__(self,\n *,\n attributes: List['Attribute'] = None) -> None:\n self.attributes = attributes", "def new(cls, **kwargs):\n return cls(**kwargs)", "def __init__(self, attributes_names: list):\r\n self.attributes_names = attributes_names", "def __init__(self, attributes_names: list):\r\n self.attributes_names = attributes_names", "def __init__(self, new=None, session_id=None, attributes=None,\n application=None, user=None):\n default_attr = dict(new=bool(),\n session_id=str(),\n attributes=dict(),\n application=Application(),\n user=User())\n self.new = new\n self.session_id = session_id\n self.attributes = attributes\n self.application = application\n self.user = user\n self._set_default_attr(default_attr)", "def _create(self, **attributes: Dict[str, object]) -> str:\n pass", "def create(cls, dictionary):\n return cls(**dictionary)", "def create(cls, dictionary):\n return cls(**dictionary)", "def __init__(self, attrs: Dict[str, Any]) -> None:\n self.attrs = attrs", "def make_instance(self, data, **kwargs):\n instance = self.instance or self.get_instance(data)\n if instance is not None:\n for key, value in iteritems(data):\n setattr(instance, key, value)\n return instance\n kwargs, association_attrs = self._split_model_kwargs_association(data)\n instance = self.opts.model(**kwargs)\n for attr, value in iteritems(association_attrs):\n setattr(instance, attr, value)\n return instance", "def __init__(self, name: str, attributes: List[Attribute], description: str = \"\"):\n self.name: str = name\n self.attributes = sorted(\n attributes, key=lambda x: x.name\n ) # type: List[Attribute]\n self._check_validity()\n self.attributes_by_name = {a.name: a for a in self.attributes}\n self.description = description", "def __init__(self, attrs = None):\n\n if attrs != None:\n self.__dict__.update(attrs)", "def __init__(self, **kwargs):\n # loop over the given kwargs\n for key, value in kwargs.items():\n # treat them like attribute assignments\n setattr(self, key, value)", "def __init__(self, *args, **kwargs):\n if kwargs:\n for key, value in kwargs.items():\n if key != \"__class__\":\n if key == \"created_at\":\n self.created_at = datetime.strptime(\n value, \"%Y-%m-%dT%H:%M:%S.%f\")\n elif key == \"updated_at\":\n self.updated_at = datetime.strptime(\n value, \"%Y-%m-%dT%H:%M:%S.%f\")\n elif key == \"id\":\n self.id = value\n else:\n setattr(self, key, value)\n else:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()", "def create(cls, _):\n return cls", "def new(self, **kwargs):\n return self.__model__(**self._preprocess_params(kwargs))", "def __init__(self, *args, **kwargs):\n for key, value in kwargs.items():\n if key == \"created_at\":\n value = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%S.%f\")\n setattr(self, key, value)\n\n if key == \"updated_at\":\n value = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%S.%f\")\n setattr(self, key, value)\n\n if key == \"__class__\":\n continue\n else:\n setattr(self, key, value)\n\n if len(kwargs) == 0:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n storage.new(self)", "def test_init(attributes):\n instance = Participant(**attributes)\n for attr, value in attributes.items():\n assert getattr(instance, attr) == value", "def create(self, **attributes):\n return self.save(self.model(**attributes))", "def __init__(self):\n\n try:\n # read attributes from attributes file\n with open(const.Storage.ATTRIBUTES) as attributes_file:\n # read the file and parse it to JSON data\n json_data = attributes_file.read()\n attributes = json.loads(json_data)\n\n # set attributes\n self.id = str(attributes[\"id\"])\n self.length = float(attributes[\"length\"])\n self.width = float(attributes[\"width\"])\n except OSError:\n raise OSError(\"The attributes file could not be opened.\")", "def create(cls, **dictionary):\n dummy_obj = cls(1, 1)\n dummy_obj.update(**dictionary)\n return dummy_obj", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def _instantiate(cls, **kwargs):\n return cls(**kwargs)", "def __init__(self, name, age):\r\n self.name = name\r\n self.age = age", "def __init__(self,\r\n username=None,\r\n first_name=None,\r\n last_name=None,\r\n application_id=None,\r\n additional_properties = {}):\r\n\r\n # Initialize members of the class\r\n self.username = username\r\n self.first_name = first_name\r\n self.last_name = last_name\r\n self.application_id = application_id\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties", "def create(self, **kargs):\n return self(**kargs)", "def __init__(self, name, attr=None):\n self.name = name\n self.propertiesstr = attr", "def create(cls, **_params):\n cls_inst = cls()\n cls_inst = cls_inst.set(**_params)\n cls_inst.save()\n return cls_inst", "def __init__(self,\r\n name=None,\r\n given_name=None,\r\n middle_name=None,\r\n family_name=None,\r\n address=None,\r\n additional_properties = {}):\r\n\r\n # Initialize members of the class\r\n self.name = name\r\n self.given_name = given_name\r\n self.middle_name = middle_name\r\n self.family_name = family_name\r\n self.address = address\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties", "def create(cls, **dictionary):\n new_inst = cls.__new__(cls)\n if cls.__name__ == \"Rectangle\":\n new_inst.__init__(42, 98)\n elif cls.__name__ == \"Square\":\n new_inst.__init__(42)\n new_inst.update(**dictionary)\n return new_inst", "def __init__(self, attribs):\n self.__instanced = False\n self.__initAccessor(attribs)\n self.__setValues(attribs)\n self.__instanced = True", "def __init__(self, first_name, last_name, age):\n\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def make_instance(cls):\r\n def get_value(name):\r\n if name in attributes:\r\n return attributes[name]\r\n else:\r\n value = cls['get'](name)\r\n return bind_method(value, instance)\r\n\r\n def set_value(name, value):\r\n attributes[name] = value\r\n\r\n attributes = {}\r\n instance = {'get': get_value, 'set': set_value}\r\n return instance", "def make(cls, **kwargs):\r\n return cls().fill(**kwargs)", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)", "def __init__(self, name, age):\n\t\tself.name = name\n\t\tself.age = age", "def __init__(self, name, age):\n\t\tself.name = name\n\t\tself.age = age", "def __init__(self, name, age):\n\t\tself.name = name\n\t\tself.age = age", "def __init__(self, *args, **kwargs):\n if kwargs:\n for key, value in kwargs.items():\n if key in ('created_at', 'updated_at'):\n date = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f')\n setattr(self, key, date)\n elif key != '__class__':\n setattr(self, key, value)\n else:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n models.storage.new(self)", "def build(cls, **kwargs):\n return cls(kwargs) #pylint: disable=no-value-for-parameter", "def __init__(self, name, color, age):\n self.name = name\n self.color = color\n self.age = age\n self.breed = \"something\"", "def __init__(self, attributes: List[AttributeName], g1: G1Element, Y1: Dict[str, G1Element], g2: G2Element, X2: G2Element, Y2: Dict[AttributeName, G2Element]):\n self.attributes = attributes\n self.g1 = g1\n self.Y1 = Y1\n self.g2 = g2\n self.X2 = X2\n self.Y2 = Y2", "def __init__(self, *args, **kwargs):\r\n if kwargs:\r\n for key, value in kwargs.items():\r\n\r\n if key == \"created_at\" or key == \"updated_at\":\r\n setattr(self, key, datetime.strptime(value,\r\n \"%Y-%m-%dT%H:%M:%S.%f\"))\r\n\r\n elif key != \"__class__\":\r\n setattr(self, key, value)\r\n\r\n else:\r\n self.id = str(uuid.uuid4())\r\n self.created_at = datetime.now()\r\n self.updated_at = datetime.now()\r\n models.storage.new(self)", "def attr(*args, **kwargs):\n return Attr(*args, **kwargs)", "def _construct_instance(cls, names, values):\r\n field_dict = dict((cls._db_map.get(k, k), v) for k, v in zip(names, values))\r\n if cls._is_polymorphic:\r\n poly_key = field_dict.get(cls._polymorphic_column_name)\r\n\r\n if poly_key is None:\r\n raise PolyMorphicModelException('polymorphic key was not found in values')\r\n\r\n poly_base = cls if cls._is_polymorphic_base else cls._polymorphic_base\r\n\r\n klass = poly_base._get_model_by_polymorphic_key(poly_key)\r\n if klass is None:\r\n poly_base._discover_polymorphic_submodels()\r\n klass = poly_base._get_model_by_polymorphic_key(poly_key)\r\n if klass is None:\r\n raise PolyMorphicModelException(\r\n 'unrecognized polymorphic key {} for class {}'.format(poly_key, poly_base.__name__)\r\n )\r\n\r\n if not issubclass(klass, cls):\r\n raise PolyMorphicModelException(\r\n '{} is not a subclass of {}'.format(klass.__name__, cls.__name__)\r\n )\r\n\r\n field_dict = {k: v for k, v in field_dict.items() if k in klass._columns.keys()}\r\n\r\n else:\r\n klass = cls\r\n\r\n instance = klass(**field_dict)\r\n instance._is_persisted = True\r\n return instance", "def __init__(self, **kwargs):\n default_values = {\n 'name': 'Organization Name',\n 'ubi': 'Unified Business Identifier',\n 'address_line_1': '',\n 'address_line_2': '',\n 'city': '',\n 'state': '',\n 'zipcode': '',\n 'county': '',\n 'phone': '',\n 'license_id': '',\n 'license_type': '',\n 'license_status': '',\n 'license_creation_date': ''\n }\n\n # Set instance properties from keyword arguments or default values\n for (attr, default) in default_values.items():\n setattr(self, attr, kwargs.get(attr, default))", "def __init__(self, first_name, last_name, age):\n\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)\n\n for (key, value) in kwargs.iteritems():\n # use setattr so that validation is triggered\n setattr(self, key, value)", "def create(cls, **data):\n user = cls()\n for attribute in data:\n if hasattr(user, attribute):\n setattr(user, attribute, data[attribute])\n user.password = data[\"password\"]\n db.session.add(user)\n return user", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'key': 'str',\n 'display_name': 'str',\n 'description': 'str',\n 'catalog_id': 'str',\n 'uri': 'str',\n 'job_type': 'str',\n 'lifecycle_state': 'str',\n 'is_sample_data_extracted': 'bool',\n 'time_created': 'datetime'\n }\n\n self.attribute_map = {\n 'key': 'key',\n 'display_name': 'displayName',\n 'description': 'description',\n 'catalog_id': 'catalogId',\n 'uri': 'uri',\n 'job_type': 'jobType',\n 'lifecycle_state': 'lifecycleState',\n 'is_sample_data_extracted': 'isSampleDataExtracted',\n 'time_created': 'timeCreated'\n }\n\n self._key = None\n self._display_name = None\n self._description = None\n self._catalog_id = None\n self._uri = None\n self._job_type = None\n self._lifecycle_state = None\n self._is_sample_data_extracted = None\n self._time_created = None", "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def __init__(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)", "def make_class(attributes, base_classes=()):\r\n \"*** YOUR CODE HERE ***\"", "def create(self, validated_data):\n\n # Create the Attribute instance\n attribute = Attribute.objects.create(\n name=validated_data['name']\n )\n\n # Create each AttributeValue instance\n for item in validated_data.get('values', []):\n AttributeValue.objects.create(\n name=item['name'],\n value=item['value'],\n attribute=attribute)\n\n return attribute", "def create(cls, **kwargs):\n instance = cls(**kwargs)\n instance.save()\n return instance", "def create(cls, *args):\n c = cls({})\n c.apply(*args)\n return c", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'assign_public_ip': 'bool',\n 'defined_tags': 'dict(str, dict(str, object))',\n 'display_name': 'str',\n 'freeform_tags': 'dict(str, str)',\n 'hostname_label': 'str',\n 'nsg_ids': 'list[str]',\n 'private_ip': 'str',\n 'skip_source_dest_check': 'bool',\n 'subnet_id': 'str'\n }\n\n self.attribute_map = {\n 'assign_public_ip': 'assignPublicIp',\n 'defined_tags': 'definedTags',\n 'display_name': 'displayName',\n 'freeform_tags': 'freeformTags',\n 'hostname_label': 'hostnameLabel',\n 'nsg_ids': 'nsgIds',\n 'private_ip': 'privateIp',\n 'skip_source_dest_check': 'skipSourceDestCheck',\n 'subnet_id': 'subnetId'\n }\n\n self._assign_public_ip = None\n self._defined_tags = None\n self._display_name = None\n self._freeform_tags = None\n self._hostname_label = None\n self._nsg_ids = None\n self._private_ip = None\n self._skip_source_dest_check = None\n self._subnet_id = None", "def __init__(self, *args, **kwargs):\n if kwargs or len(kwargs) != 0:\n for key, value in kwargs.items():\n if key == \"id\":\n self.id = value\n elif key == \"created_at\" or key == \"updated_at\":\n self.__dict__[key] = datetime.strptime(\n value, \"%Y-%m-%dT%H:%M:%S.%f\")\n else:\n self.__dict__[key] = value\n else:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n models.storage.new(self)", "def __init__(self,\r\n primary_language=None,\r\n secondary_language=None,\r\n xml_signature=None,\r\n additional_properties = {}):\r\n\r\n # Initialize members of the class\r\n self.primary_language = primary_language\r\n self.secondary_language = secondary_language\r\n self.xml_signature = xml_signature\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties", "def __init__(self, tag=None, attributes=(), header=None, column_number=None):\n if tag:\n tag = tag.lower()\n self.tag = tag\n self.header = header\n self.column_number = column_number\n self.attributes = set([a.lower() for a in attributes])\n self.attribute_list = [a.lower() for a in attributes] # to preserve order", "def from_data(cls, data):\n self = object.__new__(cls)\n self.id = parse_id(data)\n self._set_icon(data)\n self.bot = parse_bot(data)\n self.description = parse_description(data)\n self.name = parse_name(data)\n return self", "def __init__(self, *args, **kwargs):\n self.id = str(uuid4())\n self.created_at = datetime.today()\n self.updated_at = datetime.today()\n\n format = \"%Y-%m-%dT%H:%M:%S.%f\"\n if len(kwargs) != 0:\n \"\"\"Conditionals for kwargs\"\"\"\n for ky, val in kwargs.items():\n if ky == \"created_at\" or ky == \"updated_at\":\n self.__dict__[ky] = datetime.strptime(val, format)\n else:\n self.__dict__[ky] = val\n else:\n models.storage.new(self)", "def __init__(self, **kwargs: Any):\n for name, value in kwargs.items():\n setattr(self, name, value)", "def __init__(self, attr=None):\r\n self.attr = attr", "def __init__(self, **kwargs):\n self.__dict__.update(kwargs)", "def __init__(self, **kwargs):\n self.__dict__.update(kwargs)", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'channel_id': 'str',\n 'channel_secret': 'str',\n 'switcher_secret': 'str',\n 'service_code': 'str',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'channel_id': 'channelId',\n 'channel_secret': 'channelSecret',\n 'switcher_secret': 'switcherSecret',\n 'service_code': 'serviceCode',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._channel_id = None\n self._channel_secret = None\n self._switcher_secret = None\n self._service_code = None\n self._self_uri = None", "def __init__(self, **kwds):\n self.system=self.username=self.password=\"\"\n if kwds.has_key(\"system\"):\n self.system=kwds[\"system\"]\n if kwds.has_key(\"username\"):\n self.username=kwds[\"username\"]\n if kwds.has_key(\"password\"):\n self.password=kwds[\"password\"]\n if kwds.has_key(\"element\"):\n self.fromElement(kwds[\"element\"])", "def __init__(self, name, surname, phone_number, creation_date):\n self.name = name\n self.surname = surname\n self.phone_number = phone_number\n self.creation_date = creation_date", "def __init__(self, **kwargs):\n \n default_attr = dict(username='')\n\n allowed_attr = list(default_attr)\n default_attr.update(kwargs)\n\n for key in default_attr:\n if key in allowed_attr:\n self.__dict__[key] = default_attr.get(key)", "def from_dict(cls, data):\n return cls(**data)", "def __init__(self, first_name, last_name):\n self.first_name = first_name\n self.last_name = last_name\n self.email = \"abc\"\n self.age = 20", "def __init__(self, a, b, c):\r\n self.a = a\r\n self.b = b\r\n self.c = c", "def __init__(self, t, a, v):\n\n self.time = t\n self.attribute = a\n self.value = v", "def __init__(__self__, *,\n attributes: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n description: Optional[pulumi.Input[str]] = None,\n disable_status_check: Optional[pulumi.Input[bool]] = None,\n email: Optional[pulumi.Input[str]] = None,\n masters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project_id: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n ttl: Optional[pulumi.Input[int]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None):\n if attributes is not None:\n pulumi.set(__self__, \"attributes\", attributes)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if disable_status_check is not None:\n pulumi.set(__self__, \"disable_status_check\", disable_status_check)\n if email is not None:\n pulumi.set(__self__, \"email\", email)\n if masters is not None:\n pulumi.set(__self__, \"masters\", masters)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if project_id is not None:\n pulumi.set(__self__, \"project_id\", project_id)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if ttl is not None:\n pulumi.set(__self__, \"ttl\", ttl)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if value_specs is not None:\n pulumi.set(__self__, \"value_specs\", value_specs)", "def __init__(__self__, *,\n attributes: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n description: Optional[pulumi.Input[str]] = None,\n disable_status_check: Optional[pulumi.Input[bool]] = None,\n email: Optional[pulumi.Input[str]] = None,\n masters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project_id: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n ttl: Optional[pulumi.Input[int]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None):\n if attributes is not None:\n pulumi.set(__self__, \"attributes\", attributes)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if disable_status_check is not None:\n pulumi.set(__self__, \"disable_status_check\", disable_status_check)\n if email is not None:\n pulumi.set(__self__, \"email\", email)\n if masters is not None:\n pulumi.set(__self__, \"masters\", masters)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if project_id is not None:\n pulumi.set(__self__, \"project_id\", project_id)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if ttl is not None:\n pulumi.set(__self__, \"ttl\", ttl)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if value_specs is not None:\n pulumi.set(__self__, \"value_specs\", value_specs)", "def __init__(self, attribute_name, before, after):\n self.attribute_name = attribute_name\n self.before = before\n self.after = after", "def load(cls, data):\n return cls(**data)", "def create(self, attributes=None, type=None, name=None):\n attributes = attributes or {}\n type = type or attributes.get('type')\n name = name or attributes.get('name')\n request = self.request(operation='CREATE', type=type, name=name, body=attributes)\n return Entity(self, self.call(request, expect=error.CREATED).body)", "def __init__(self, first_name, last_name, age, gender):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n self.gender = gender", "def create(cls, **kwargs):\r\n return cls().fill(**kwargs).save()" ]
[ "0.7391123", "0.71716684", "0.7001039", "0.6807363", "0.6790451", "0.67887485", "0.67748046", "0.67689526", "0.6607367", "0.6607367", "0.660078", "0.65276855", "0.65238714", "0.65238714", "0.64978755", "0.64805484", "0.64737403", "0.64229757", "0.6416804", "0.6346199", "0.63267696", "0.6301284", "0.62800115", "0.6266852", "0.6261111", "0.625898", "0.62559503", "0.6226024", "0.6226024", "0.6226024", "0.6226024", "0.62126696", "0.6212431", "0.6179058", "0.61728686", "0.6169052", "0.61686176", "0.616834", "0.6162826", "0.61499554", "0.61457753", "0.6118426", "0.6117904", "0.6117406", "0.6117406", "0.6117406", "0.6117406", "0.6117406", "0.6117406", "0.6117406", "0.6117406", "0.6115204", "0.61145985", "0.61145985", "0.61145985", "0.6111551", "0.61033165", "0.60992754", "0.60923827", "0.60918665", "0.6089918", "0.6075642", "0.6070811", "0.60702693", "0.6057479", "0.60506356", "0.60504603", "0.6046516", "0.6046516", "0.6046516", "0.6046076", "0.6034892", "0.6019327", "0.60145223", "0.6010246", "0.6007086", "0.60070676", "0.5992742", "0.5991436", "0.59910846", "0.5980032", "0.5975365", "0.5973959", "0.59719867", "0.59719867", "0.5965795", "0.5958236", "0.5957491", "0.59562063", "0.5953508", "0.59532994", "0.59460676", "0.5938876", "0.5935256", "0.5935256", "0.5904976", "0.5902499", "0.5899833", "0.5896616", "0.58853114" ]
0.63399637
20
Find a single instance
def find_instance(self, name_or_id, ignore_missing=True): return self._find( _instance.Instance, name_or_id, ignore_missing=ignore_missing )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_instance(cls, identifier):\r\n for instance in cls.all:\r\n if instance.identifier == identifier:\r\n return instance\r\n return None", "def find(cls, **kwargs):\n return cls.query.filter_by(**kwargs).first()", "def find(self, **kwargs):\n rl = self.findall(**kwargs)\n num = len(rl)\n\n if num == 0:\n msg = \"No %s matching %s.\" % (self.resource_class.__name__, kwargs)\n raise exceptions.NotFound(msg)\n elif num > 1:\n raise exceptions.NoUniqueMatch\n else:\n return self.get(rl[0].id)", "def find(self, **kwargs):\n matches = self.findall(**kwargs)\n num_matches = len(matches)\n if num_matches == 0:\n msg = \"No %s matching %s.\" % (self.resource_class.__name__, kwargs)\n raise exceptions.NotFound(404, msg)\n elif num_matches > 1:\n raise exceptions.NoUniqueMatch\n else:\n return matches[0]", "def find_one(self, criteria):\n return self.connection.find_one(criteria)", "def first(self, **kwargs):\n return self.find(**kwargs).first()", "def find_exact(self, **kwargs):\n results = list(self.find(**kwargs))\n if len(results) == 1:\n return results[0]\n return None", "def get_object(self):\n queryset = self.get_queryset() # acquire queryset\n for key in self.lookup_args:\n if self.kwargs.get(key):\n id = self.kwargs[key]\n try:\n instance = queryset.get(id=id) # acquire current instance\n return instance \n except models.ObjectDoesNotExist:\n raise Http404('NO object found.')\n \n raise Http404('No object found.')", "def find_one(cls, attr):\n result = cls.db().find_one(attr, True)\n\n if result is not None:\n return cls(result)\n\n return None", "def find(cls, id=None):\n return cls.query.filter_by(id=id).one_or_none()", "def find_instance_by_id ( ec2_conn, instance_id ) :\n instance_results = ec2_conn.get_only_instances( instance_ids = [ instance_id ] )\n if len( instance_results ) > 0 :\n return instance_results[ 0 ]\n\n return None", "def find(cls, uuid):\n entries = cls.objects.filter(uuid=uuid)\n if not entries:\n return None\n else:\n return entries.first()", "def get_instance(tag):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n reservations = ec2.get_all_instances()\n for res in reservations:\n for inst in res.instances:\n if \"tag\" in inst.tags.keys():\n if inst.tags[\"tag\"] == tag and inst.state == \"running\":\n #print \"Found %s\"%tag\n return inst\n print \"Couldn't find instance\"\n return None", "def findWhere(cls, args):\n return cls.search(args)[0][0]", "def find_first(cls, **kwargs):\n return cls.query.filter_by(**kwargs).first()", "def get(self, **search_terms):\n instances = self.filter(**search_terms)\n\n if not instances:\n raise NotFoundError(\"Nothing has been found.\")\n\n if len(instances) > 1:\n raise NotUniqueError(\"Serveral instance have been found.\")\n\n return instances[0]", "def find_instance_by_type ( ec2_conn, base_name, instance_type ) :\n instance_name = get_instance_name( base_name, instance_type )\n instance_results = ec2_conn.get_only_instances( filters = { \"tag:Name\": [ instance_name ] } )\n if len( instance_results ) > 0 :\n return instance_results[ 0 ]\n\n return None", "def get_instance(self, ix=None, name=None):\n assert ix is None or name is None\n if ix is None:\n instance = [ex for ex in self.instances if ex.name == name]\n assert len(instance) == 1\n return instance[0]\n else:\n return self.instances[ix]", "def get_instance(self, instance):\n return self._get(_instance.Instance, instance)", "def get_instance(self, instance_id):\n return self.instances.get(instance_id)", "def find_one(self, user_id):\n pass", "def find_object(self, obj_type, obj_name):\n try:\n # Simply look it up by type and name.\n obj = self.model_map['object'][obj_type][obj_name][1]\n except KeyError:\n # No dice. This object doesn't exist in the model.\n obj = None\n\n return obj", "def find(self, *args, **kwds):\n return self.collection.find(*args, **kwds)", "def find_by_id(cls, iid: int):\n return cls.query.filter_by(id=iid).first()", "def find_one_bywhereclause(cls, whereclause):\n return cls.dbm().modelclass_find_one_bywhereclause(cls, whereclause)", "def _get_instance(self):\n #return '_earth_instance_' + rospy.get_name().strip('/')\n return self.instance", "def get_fixture_instance(self, id=None, name=None, verify_exists=True):\n query = {}\n if id:\n query['_id'] = id\n if name:\n query['name_lower'] = name\n if not query:\n raise AXIllegalArgumentException(\"No query filters supplied\")\n fix_doc = self.instances.find_one(query)\n if not fix_doc:\n if verify_exists:\n raise AXApiResourceNotFound(\"No instances found matching: {}\".format(id or name))\n return None\n return FixtureInstance.deserialize_mongodoc(fix_doc)", "def get(cls, id_: int):\n query = DBSESSION.query(cls)\n instance = query.get(id_)\n if not instance:\n raise ObjectNotFound(f\"Register of {cls.str_representation} not found for id = {id_}.\")\n return instance", "def find(self):\n raise NotImplementedError", "async def find_one(self, **query):\n\n return await self._expand(await self.db.get_one(**query))", "async def find(self, pk_value: Union[int, str] = None, **kwargs) -> Union[E, None]:\n pass", "def find(cls, key):\r\n return cls.query().get(key)", "def get_instance(self, data):\n filters = {\n key: data[key]\n for key in self.fields.keys() if key in self.lookup_fields}\n\n if None not in filters.values():\n return self.session.query(\n self.opts.model\n ).filter_by(\n **filters\n ).first()\n return None", "def find_one_byprimaryid(cls, primaryid, defaultval = None):\n return cls.dbm().modelclass_find_one_byprimaryid(cls, primaryid, defaultval)", "async def get_one(self, where: t.Mapping[str, t.Any]) -> t.Optional[Model]:\n\n data = await self.collection.find_one(where)\n return self.model_class(**data) if data else None", "def find(self, name):\n return Search(self.request).find(name)", "def get_instance(self, name):\n return self.store.instance.id", "def find_one():\n fmter.tpl._straightline(\"one document\", 100)\n result = users.find_one({})\n print(type(result))\n ppt(result)\n \n fmter.tpl._straightline(\"none result\", 100)\n result = users.find_one({\"_id\": 100})\n print(type(result))\n ppt(result)", "def get_object(self):\n queryset = self.get_queryset()\n\n model = self.get_model()\n obj = queryset.get(get_primary_keys(model, self.kwargs))\n\n if not obj:\n raise Http404('No %s matches the given query.' % model.__name__)\n\n return obj", "def find_one_bykey(cls, keydict, defaultval = None):\n return cls.dbm().modelclass_find_one_bykey(cls, keydict, defaultval)", "def find_one(self, collection, query):\n obj = getattr(self.db, collection)\n result = obj.find_one(query)\n return result", "async def get_one(self, where):\n\n pass", "def get_instance (self):\n instances = self.data['instances']\n if not len(instances):\n raise Exception, \"ArchivalObject: No Instances found\"\n for instance in instances:\n # print json.dumps(instance, indent=3)\n try:\n instance['sub_container']['top_container']\n return instance\n except:\n pass\n return None", "def get_instance(instance):\n command = 'nova show %s' % instance\n return parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])", "def _must_find(session, cls, name):\n obj = session.query(cls).filter_by(label=name).first()\n if not obj:\n raise NotFoundError(\"%s %s does not exist.\" % (cls.__name__, name))\n return obj", "def find_first_object(self, ObjectClass, **kwargs):\n\n # Retrieve first object -- case sensitive\n return ObjectClass.objects(**kwargs).first()", "def _get_instance(identifier):\n # noinspection PyBroadException\n try:\n app_label, model, object_pk = identifier.split('.', maxsplit=2)\n # we don't expect to find anything, so don't log\n if object_pk != 'None':\n if object_pk == OBJECT_DOES_NOT_EXIST:\n raise ObjectDoesNotExist()\n content_type = ContentType.objects.get_by_natural_key(app_label, model)\n return content_type.get_object_for_this_type(pk=object_pk)\n except ContentType.DoesNotExist:\n logging.warning(f'Could not find content type for {identifier!r}')\n except ObjectDoesNotExist:\n logging.warning(f'Could not find related object for {identifier!r}')\n except DatabaseError: # don't mask these\n raise\n except Exception:\n logging.exception(f'Could not get related object for {identifier!r}', log_function=logging.error)", "def find(self, objectclass, **kwargs):\n raise NotImplementedError", "def test_find_one(self):\n person1 = self.Person(name=\"User A\", age=20)\n person1.save()\n person2 = self.Person(name=\"User B\", age=30)\n person2.save()\n\n # Retrieve the first person from the database\n person = self.Person.objects.first()\n assert isinstance(person, self.Person)\n assert person.name == \"User A\"\n assert person.age == 20\n\n # Use a query to filter the people found to just person2\n person = self.Person.objects(age=30).first()\n assert person.name == \"User B\"\n\n person = self.Person.objects(age__lt=30).first()\n assert person.name == \"User A\"\n\n # Use array syntax\n person = self.Person.objects[0]\n assert person.name == \"User A\"\n\n person = self.Person.objects[1]\n assert person.name == \"User B\"\n\n with pytest.raises(IndexError):\n self.Person.objects[2]\n\n # Find a document using just the object id\n person = self.Person.objects.with_id(person1.id)\n assert person.name == \"User A\"\n\n with pytest.raises(InvalidQueryError):\n self.Person.objects(name=\"User A\").with_id(person1.id)", "def find(cls, host, user):\n cls.__check_parameters(host=host, user=user)\n if not hasattr(Connection, \"__pool__\"):\n return None\n cid = cls.generate_id(host, user)\n return Connection.__pool__.get(cid) # by default None is returned", "def _get_instance(self, id):\n if id not in self._instances:\n self._instances[id] = self._load_constructor(id)\n\n return self._instances[id]", "def get_instance(self, name):\n return self.website.instance.id", "def find(self, **kwargs):\n url = self.build_url(dict_args_in_out=kwargs)\n\n rl = self._list(\n '%(url)s%(query)s' % {\n 'url': url,\n 'query': '?%s' % urllib.urlencode(kwargs) if kwargs else '',\n },\n self.collection_key)\n num = len(rl)\n\n if num == 0:\n return None\n elif num > 1:\n raise NoUniqueMatch\n else:\n return rl[0]", "def instance(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance\")", "def find_one(cls, *a, **ka):\n try:\n return cls.find(*a, **ka).next()\n except StopIteration:\n raise KeyError", "def find_by_id(cls, username):\n return cls.query.filter_by(username=username).first()", "def find_by_instance_id(self, instance_id: str) -> Optional[StorageObject]:\n return self._store.get(instance_id, None)", "def get_object(id):", "def get_instance(*, db_session, instance_id: int) -> WorkflowInstance:\n return (\n db_session.query(WorkflowInstance).filter(WorkflowInstance.id == instance_id).one_or_none()\n )", "def find_object_by_uuid(remote, uuid):\n cmd = mmapi.StoredCommands()\n cmd_key = cmd.AppendSceneCommand_FindObjectByUUID(uuid)\n remote.runCommand(cmd)\n result_val = mmapi.any_result()\n bFound = cmd.GetSceneCommandResult_FindObjectByUUID(cmd_key, result_val)\n return (bFound, result_val.i)", "def fetch_one(cls: Type[_T], session: Session, identifier: int) -> _T:\n return Query(cls, session=session).get(identifier)", "def get_instance(self, instance):\n\n title = list(instance.keys())[0]\n instance = instance.get(title)\n return instance", "def _get_object(self, **kwargs):\n results = self.salesforce.salesforce_query(self.object_name, **kwargs)\n if len(results) == 0:\n human_friendly_args = \", \".join(\n [\"{}={}\".format(key, kwargs[key]) for key in kwargs]\n )\n raise Exception(\n \"no {} matches {}\".format(self.object_name, human_friendly_args)\n )\n elif len(results) > 1:\n raise Exception(\"Query returned {} objects\".format(len(results)))\n else:\n return results[0]", "def get_instance(self, data):\n if self.transient:\n return None\n props = get_primary_keys(self.opts.model)\n filters = {prop.key: data.get(prop.key) for prop in props}\n if None not in filters.values():\n return self.session.query(self.opts.model).filter_by(**filters).first()\n return None", "def find(cls, animal_id):\n raise NotImplementedError", "def get_by_name(cls, name):\n return cls.query.filter(cls.name == name).first()", "def find_object_by_name(remote, obj_name):\n cmd = mmapi.StoredCommands()\n cmd_key = cmd.AppendSceneCommand_FindObjectByName(obj_name)\n remote.runCommand(cmd)\n result_val = mmapi.any_result()\n bFound = cmd.GetSceneCommandResult_FindObjectByName(cmd_key, result_val)\n return (bFound, result_val.i)", "def get_instance(self, container, cls, **params):\n if not cls in self.instances:\n self.instances[cls] = self.create_instance(container, cls, **params)\n \n return self.instances[cls]", "def find(self, p):\n pass", "def find(cls, device_name):\n return cls.query(cls.device_name == device_name).fetch(1)", "def find_one(cls, dataset_id):\n return super(cls, cls).find_one({DATASET_ID: dataset_id})", "def get_by_pk(cls, request, pk):\n session = get_session(request)\n\n return session.query(cls).filter(cls.pk == pk).first()", "def _get_instance(self):", "def _get_instance(self):", "def get_one(self, object):\n self.lock.acquire()\n result = self.__Session.query(object).first()\n self.lock.release()\n return result", "def get_object(self, path: str) -> Object:\n objects_found = [item for item in self._objects.values() if item.path == path]\n if len(objects_found) == 0:\n raise ClientError(\n \"ObjectNotFoundException\", f\"Object with id={path} not found\"\n )\n return objects_found[0]", "def verify(self, arg, choose):\n if not arg:\n print(\"** class name missing **\")\n return 0\n args = arg.split(\" \")\n if args[0] not in self.__classes:\n print(\"** class doesn't exist **\")\n return 0\n if len(args) == 1:\n print(\"** instance id missing **\")\n return 0\n obj = storage.all()\n k = \"{}.{}\".format(args[0], args[1])\n for key, val in obj.items():\n if key == k:\n if choose == 1:\n return val\n if choose == 2:\n return k\n print(\"** no instance found **\")", "def get_by_id(cls, id):\n e = api.get([key.Key(cls.__name__, id)])\n if e:\n return cls.from_entity(e[0])\n raise ObjectDoesNotExist", "def findone(cls, *lst, **dct):\n query = cls.where(*lst, **dct).select()\n result = query.execute()\n return result.fetchone()", "def find_by_id(cls, object_id):\n try:\n return mongo_db[cls.__collection__].find_one({\"_id\": ObjectId(object_id)})\n except InvalidId:\n # TODO: Log the exception\n print('Invalid bson id: {}'.format(object_id))\n return None", "def first_or_raise(self):\n res = super(CustomQuery, self).first()\n if not res:\n raise NotFoundException\n return res", "def find(cls, session, name_or_id, ignore_missing=False, **params):\n # Try to short-circuit by looking directly for a matching ID.\n\n data = cls.list(session, **params)\n\n result = cls._get_one_match(name_or_id, data)\n if result is not None:\n return result\n\n if ignore_missing:\n return None\n raise exceptions.ResourceNotFound(\n \"No %s found for %s\" % (cls.__name__, name_or_id))", "def lookup_obj(self,):\n return self._lookup_obj", "def _get_object(cls, pk):\n kwargs = {}\n try:\n kwargs['pk'] = int(pk)\n except Exception as e:\n if not cls.search_alternate:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n kwargs[f\"{cls.search_alternate}__iexact\"] = pk\n return get_object_or_404(cls.model().objects.all(), **kwargs)", "def do_show(self, arg):\n args = arg.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n elif len(args) < 2 and args[0] in self.class_dict:\n print(\"** instance id missing **\")\n return\n\n object_dict = storage.all()\n if args[0] in self.class_dict:\n for full_key in object_dict:\n key = full_key.split(\".\")\n if key[1] == args[1]:\n print(object_dict[full_key])\n return\n print(\"** no instance found **\")\n else:\n print(\"** class doesn't exist **\")", "def FindObject(self, tagged_address):\n raise NotImplementedError", "def get_this_instance(settings, instance_id_ip, ip_given=False):\n instances = get_all_instances(settings)\n for instance in instances:\n if ip_given:\n current_ip = get_instance_ip(instance)\n if current_ip == instance_id_ip:\n return instance\n else:\n if instance.id == instance_id_ip:\n return instance", "def get_instance(self, *args, **kwargs):\n self.pizza = None\n pk = self.kwargs.get('pk', None)\n if pk:\n try:\n self.pizza = Pizza.objects.get(pk=pk)\n except ObjectDoesNotExist:\n raise Http404(\"No %(verbose_name)s found matching the query\" %\n {'verbose_name': Pizza._meta.verbose_name})", "def first(self, **opts):\n try:\n return next(self.find(**opts))\n except StopIteration:\n if 'default' in opts:\n return opts['default']\n else:\n raise KeyError(\"no matching objects\")", "def get_one_by_id(self, object, id):\n self.lock.acquire()\n result = self.__Session.query(object).get(id)\n self.lock.release()\n return result", "def test_find(self):\n PromotionFactory(code=\"SAVE30\").save()\n save50 = PromotionFactory(code=\"SAVE50\")\n save50.save()\n\n promotion = Promotion.find(save50.id)\n self.assertIsNot(promotion, None)\n self.assertEqual(promotion.id, save50.id)\n self.assertEqual(promotion.code, save50.code)\n self.assertEqual(promotion.percentage, save50.percentage)", "def GetInstance():\n pass", "def find_by_name(name):\n return repository.find_by_name(name)", "def find_one(cls, query, select=None, as_dict=False):\n record = cls.collection.find_one(query, select)\n\n return record if as_dict else cls(record)", "def getinstance():\n if cls not in instances:\n instances[cls] = cls()\n return instances[cls]", "def find(cls, sid):\n cls.logger.info(\"Processing lookup for shopcart id %s ...\", sid)\n return cls.query.get(sid)", "def exists(klass, where=None):\n def _exists(result):\n return result is not None\n return klass.find(where=where, limit=1).addCallback(_exists)", "def get_obj(cls, container, name):\n\n for obj in container:\n if obj.name == name:\n return obj\n\n raise ValueError('%s not found.' % (name))", "def command_find(args):\n _perform_environment_check()\n\n filter_dict = _get_find_filter_dict(args)\n _find_verify_arguments(filter_dict)\n\n session = setup_session()\n expanded_queries = _expand_query_list(\n session, args[\"queries\"], True, args[\"verbose\"])\n query_results = retrieve_object_info(session, expanded_queries, \"unsorted\")\n\n filtered_results = _find_filter_results(query_results, filter_dict)\n\n dedup_results = _replica_results_dedup(filtered_results)\n _find_print_results(dedup_results, args[\"print0\"])", "def _find(self, details: CallableDetails) -> CallableArg:\n if self.name:\n return self._find_by_name(details, self.name)\n else:\n return self._get_first(details)" ]
[ "0.7728593", "0.71363354", "0.7030264", "0.6881119", "0.68568933", "0.68549156", "0.6848733", "0.6823757", "0.6800267", "0.6779677", "0.67150754", "0.6676291", "0.6646991", "0.6642961", "0.65998936", "0.6545709", "0.6520098", "0.6490596", "0.64855844", "0.6475394", "0.6446852", "0.6445242", "0.6426985", "0.6425823", "0.64223117", "0.64114696", "0.6409071", "0.6397761", "0.6392555", "0.6322973", "0.63010377", "0.6300839", "0.6298373", "0.6296453", "0.62888724", "0.6250845", "0.62464386", "0.6245248", "0.6237879", "0.623684", "0.6215244", "0.6214273", "0.6181688", "0.61522615", "0.6149998", "0.6147464", "0.61471444", "0.61396384", "0.61367124", "0.6115208", "0.6114216", "0.6096662", "0.60497975", "0.6046558", "0.6041628", "0.60363406", "0.6031456", "0.60203004", "0.6003842", "0.60026443", "0.5997388", "0.59960127", "0.5976012", "0.5974308", "0.59704137", "0.59575313", "0.5947276", "0.5946208", "0.5945773", "0.59400976", "0.5930304", "0.59241915", "0.59236616", "0.59236616", "0.591617", "0.59135306", "0.59072673", "0.59067655", "0.58997256", "0.58910483", "0.58907825", "0.5874663", "0.5874136", "0.58708227", "0.5858836", "0.5852764", "0.58464205", "0.58400863", "0.58373517", "0.58332074", "0.5831522", "0.5824715", "0.5812948", "0.58114696", "0.5794921", "0.57921606", "0.57888514", "0.5783902", "0.5782014", "0.57743824" ]
0.7567719
1
Get a single instance
def get_instance(self, instance): return self._get(_instance.Instance, instance)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetInstance():\n pass", "def get_instance(self, instance_id):\n return self.instances.get(instance_id)", "def get_instance(cls, *args, **kwargs):\n if cls._instance is not None:\n return cls._instance\n return cls(*args, **kwargs)", "def _get_instance(self):\n #return '_earth_instance_' + rospy.get_name().strip('/')\n return self.instance", "def getInstance(cls):\n cls.locker.acquire()\n try:\n if not cls.instance:\n cls.instance = cls()\n return cls.instance\n finally:\n cls.locker.release()", "def _get_instance(self, id):\n if id not in self._instances:\n self._instances[id] = self._load_constructor(id)\n\n return self._instances[id]", "def getinstance():\n if cls not in instances:\n instances[cls] = cls()\n return instances[cls]", "def instance(cls):\n if cls._instance is None:\n cls._instance = cls()\n return cls._instance", "def get(cls):\n return cls.instance", "def _get_instance(self):", "def _get_instance(self):", "def instance(cls):\n if not hasattr(cls, \"_instance\"):\n cls._instance = cls()\n\n return cls._instance", "def instance(cls):\n if not hasattr(cls, '_instance'):\n cls._instance = cls()\n return cls._instance", "def get_instance(self, name):\n return self.store.instance.id", "def instance(self):\n return self.__instance", "def instance(self):\n return self._instance", "def get_instance(self, name):\n return self.website.instance.id", "def _get_instance(cls, configuration, auth_type):\n if configuration in cls._INSTANCES:\n return cls._INSTANCES[configuration]\n return cls._create_instance(configuration, auth_type)", "def get_instance(self, name):\n klass = self.get_class(name)\n return klass()", "def get_instance(self, ix=None, name=None):\n assert ix is None or name is None\n if ix is None:\n instance = [ex for ex in self.instances if ex.name == name]\n assert len(instance) == 1\n return instance[0]\n else:\n return self.instances[ix]", "def get(self, **search_terms):\n instances = self.filter(**search_terms)\n\n if not instances:\n raise NotFoundError(\"Nothing has been found.\")\n\n if len(instances) > 1:\n raise NotUniqueError(\"Serveral instance have been found.\")\n\n return instances[0]", "def getInstance():\n if Car.inst is None: Car.inst = Car()\n return Car.inst", "def get_instance(self, instance):\n\n title = list(instance.keys())[0]\n instance = instance.get(title)\n return instance", "def instance(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance\")", "def instance(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance\")", "def instance(self) -> str:\n return pulumi.get(self, \"instance\")", "def instance(self) -> str:\n return pulumi.get(self, \"instance\")", "def get_instance(self):\n try:\n return self._instance\n except AttributeError:\n self._instance = self._decorated()\n return self._instance", "def get_object(self):\n queryset = self.get_queryset() # acquire queryset\n for key in self.lookup_args:\n if self.kwargs.get(key):\n id = self.kwargs[key]\n try:\n instance = queryset.get(id=id) # acquire current instance\n return instance \n except models.ObjectDoesNotExist:\n raise Http404('NO object found.')\n \n raise Http404('No object found.')", "def get(cls, id_: int):\n query = DBSESSION.query(cls)\n instance = query.get(id_)\n if not instance:\n raise ObjectNotFound(f\"Register of {cls.str_representation} not found for id = {id_}.\")\n return instance", "def get(self, sid):\n content = self._get(\"/\" + sid)\n\n # Get the instance out of the list\n content = json.loads(content)\n resources = content[self.name]\n\n return self._load_instance(resources[0])", "def instance(cls):\r\n if cls._INSTANCE is None:\r\n cls._INSTANCE = cls()\r\n return cls._INSTANCE", "def instance(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance\")", "def get_instance(self, container, cls, **params):\n if not cls in self.instances:\n self.instances[cls] = self.create_instance(container, cls, **params)\n \n return self.instances[cls]", "def get_instance(instance):\n command = 'nova show %s' % instance\n return parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])", "def get_instance(*, db_session, instance_id: int) -> WorkflowInstance:\n return (\n db_session.query(WorkflowInstance).filter(WorkflowInstance.id == instance_id).one_or_none()\n )", "def get_instance(cls, pid, instance_id=None):\n if not instance_id:\n # Find an available instance.\n for inst in cls._instance_pool:\n if not inst.locked:\n inst._acquire_lock(pid)\n \n\n if hasattr(cls, \"_pyroDaemon\"):\n cls._pyroDaemon.register(inst)\n \n\n return inst\n # Otherwise make a new instance if possible\n if cls.managed:\n if cls.MAXINSTANCES is None or cls.ninstances < cls.MAXINSTANCES:\n instance_id = cls.ninstances if instance_id is None else instance_id\n\n cls.ninstances += 1\n # Make the status directory.\n\n if hasattr(cls, \"_pyroDaemon\"):\n status_dir = os.path.join(cls.STATUS_DIR, 'mc_{}'.format(cls.ninstances))\n if not os.path.exists(status_dir):\n os.makedirs(status_dir)\n else:\n status_dir = None\n\n inst = cls.Instance(cls._get_valid_port(), status_dir=status_dir, instance_id=instance_id)\n cls._instance_pool.append(inst)\n inst._acquire_lock(pid)\n\n if hasattr(cls, \"_pyroDaemon\"):\n cls._pyroDaemon.register(inst)\n\n return inst\n \n else:\n raise RuntimeError(\"No available instances and max instances reached! :O :O\")\n else:\n raise RuntimeError(\"No available instances and managed flag is off\")", "def getinstance() :\n\t\treturn Jikji.instance", "def single(self):\r\n return single.Single(self)", "def find_instance(self, name_or_id, ignore_missing=True):\n return self._find(\n _instance.Instance, name_or_id, ignore_missing=ignore_missing\n )", "def get_cached_instance(cls, id):\n return cls.__dbclass__.__instance_cache__.get(id)", "def get_instance(*args):\n \n if c not in __Instances__:\n __Instances__[c] = c(*args)\n\n return __Instances__[c]", "def find_instance(cls, identifier):\r\n for instance in cls.all:\r\n if instance.identifier == identifier:\r\n return instance\r\n return None", "def get(cls, _id):\n return DataStore.get_instance(cls, _id)", "def inst(cls):\n if cls.instance is None:\n raise OptionsError(\"No options have been set\")\n return cls.instance", "def get_instance():\n if ModelUpdater._instance_ is None:\n ModelUpdater()\n return ModelUpdater._instance_", "def getInstance(klass):\n klass.locker.acquire()\n try:\n if not klass.instance:\n klass.instance = klass()\n return klass.instance\n finally:\n klass.locker.release()", "def get_object(self):\n queryset = self.get_queryset()\n\n model = self.get_model()\n obj = queryset.get(get_primary_keys(model, self.kwargs))\n\n if not obj:\n raise Http404('No %s matches the given query.' % model.__name__)\n\n return obj", "def get_instance(cls):\n\n if not cls._instance:\n cls._instance = Config()\n\n return cls._instance", "def _get_service_instance(self, name, no_cache=False):\n self._assert_space()\n\n if self._service_instance and not no_cache:\n return self._service_instance\n res = self._cc.request(self._space.service_instances_url)\\\n .get_by_name(name)\n self._service_instance = res.resource\n return self._service_instance", "def get_instance (self):\n instances = self.data['instances']\n if not len(instances):\n raise Exception, \"ArchivalObject: No Instances found\"\n for instance in instances:\n # print json.dumps(instance, indent=3)\n try:\n instance['sub_container']['top_container']\n return instance\n except:\n pass\n return None", "def instance(cls):\n if not cls.__singleton_instance:\n with cls.__singleton_lock: # # pylint: disable=E1129\n if not cls.__singleton_instance:\n cls.__singleton_instance = cls()\n return cls.__singleton_instance", "async def get_instance(self, resource_id) -> ApiResource:\n raw = await self.get_resource(resource_id)\n return self._resource_factory(self._get_to_actual_data(raw))", "def get_instance(self, db):\n table = db.metadata.tables['Instances']\n c_instance = table.c['instance']\n c_id = table.c['idInstance']\n # get prefix\n instance_header = db.session.connection().execute(select([func.substring(c_instance, 1, 4)],\n c_id == self.idInstance).select_from(\n table)).first()[0]\n data_length = db.session.connection().execute(select([func.length(c_instance)],\n c_id == self.idInstance).select_from(\n table)).first()[0]\n if data_length > 32 * 1024 * 1024:\n return \"Instance too large for processing. Please use the EDACC GUI application.\"\n if instance_header == 'LZMA': # compressed instance?\n # get blob without LZMA prefix\n instance_blob = db.session.connection().execute(select([func.substring(c_instance, 5)],\n c_id == self.idInstance).select_from(\n table)).first()[0]\n return utils.lzma_decompress(instance_blob)\n else:\n return self.instance", "def __call__(self, *args, **kwargs):\n if not self.instance:\n self.instance = super().__call__(*args, **kwargs)\n return self.instance", "def get_object(self, *args, **kwargs):\n\t\n #Setting the test_id\n\ttest_id = self.kwargs['test_id']\n try:\n return api.nova.server_get(self.request, test_id)\n except Exception:\n redirect = reverse(\"horizon:rally_dashboard:events:index\")\n msg = _('Unable to retrieve instance details.')\n exceptions.handle(self.request, msg, redirect=redirect)", "def get_instance(self, *args, **kwargs):\n self.pizza = None\n pk = self.kwargs.get('pk', None)\n if pk:\n try:\n self.pizza = Pizza.objects.get(pk=pk)\n except ObjectDoesNotExist:\n raise Http404(\"No %(verbose_name)s found matching the query\" %\n {'verbose_name': Pizza._meta.verbose_name})", "def get(cls, *args, **kwargs) -> object or None:\n try:\n return cls.objects.get(*args, **kwargs)\n except cls.DoesNotExist:\n # if objects does not exist, we use None\n return None", "def GetInstance(self, instance, reason=None):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s\" %\n (GANETI_RAPI_VERSION, instance)), query, None)", "def _get_cached_instance(self):\n\n try:\n identifier = self._get_identifier()\n except (ValueError, ObjectDoesNotExist) as error:\n if self._fail_silently:\n return None\n raise LazyModelObjectError(exc=error) from error\n\n # Get the cache key, basically just namespacing the identifier\n cache_key = model_cache_key(identifier)\n\n cache, timeout = self._cache\n cace: BaseCache\n if cache_key in cache:\n instance = cache.get(cache_key)\n else:\n instance = self._get_instance(identifier)\n cache.set(cache_key, instance, timeout=timeout)\n\n if instance is None and not self._fail_silently:\n raise LazyModelObjectError(f'{identifier} not found.')\n return instance", "def fetch_one(cls: Type[_T], session: Session, identifier: int) -> _T:\n return Query(cls, session=session).get(identifier)", "def load(cls):\n\n try:\n return cls.objects.get()\n except cls.DoesNotExist:\n return cls()", "def Instance (cls, instance = None):\n try:\n with cls.instance_lock:\n if instance is None:\n if cls.instance is None:\n cls.instance = Core ()\n else:\n if instance is cls.instance:\n return instance\n instance, cls.instance = cls.instance, instance\n return cls.instance\n finally:\n if instance:\n instance.Dispose ()", "def return_instance(cls):\n return cls()", "def get_instance(self, data):\n if self.transient:\n return None\n props = get_primary_keys(self.opts.model)\n filters = {prop.key: data.get(prop.key) for prop in props}\n if None not in filters.values():\n return self.session.query(self.opts.model).filter_by(**filters).first()\n return None", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = InstanceArgs.__new__(InstanceArgs)\n\n __props__.__dict__[\"build\"] = None\n __props__.__dict__[\"config\"] = None\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"instance_id\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"state_message\"] = None\n __props__.__dict__[\"update_time\"] = None\n return Instance(resource_name, opts=opts, __props__=__props__)", "def get_instance(*args, **kwargs):\n key = (TheClass, args, str(kwargs))\n if key not in class_instances:\n class_instances[key] = TheClass(*args, **kwargs)\n return class_instances[key]", "def getInstance(screen):\r\n if Game.__instance == None:\r\n Game.__instance = Game(screen)\r\n return Game.__instance", "def get_instance(cls):\n return cls.__new__(cls)", "def get_instance(cls):\n return cls.__new__(cls)", "def get_instance(cls):\n return cls.__new__(cls)", "def get_instance():\n if not Cache.__instance__:\n Cache.__instance__ = Cache(config('REDIS_HOST'), config('REDIS_PORT'))\n return Cache.__instance__", "def get_object(self):\n if getattr(self, 'current_instance', None):\n ret = self.current_instance\n else:\n ret = super().get_object()\n return ret", "def get_one(self, object):\n self.lock.acquire()\n result = self.__Session.query(object).first()\n self.lock.release()\n return result", "def get_instance(cls, *lstArgs, **dctKwArgs):\n if cls._isInstantiated():\n if (lstArgs or dctKwArgs) and not hasattr(cls, 'ignoreSubsequent'):\n raise SingletonException, 'Singleton already instantiated, but getInstance() called with args.'\n else:\n _createSingletonInstance(cls, lstArgs, dctKwArgs)\n\n return cls.cInstance", "def _get_instance(identifier):\n # noinspection PyBroadException\n try:\n app_label, model, object_pk = identifier.split('.', maxsplit=2)\n # we don't expect to find anything, so don't log\n if object_pk != 'None':\n if object_pk == OBJECT_DOES_NOT_EXIST:\n raise ObjectDoesNotExist()\n content_type = ContentType.objects.get_by_natural_key(app_label, model)\n return content_type.get_object_for_this_type(pk=object_pk)\n except ContentType.DoesNotExist:\n logging.warning(f'Could not find content type for {identifier!r}')\n except ObjectDoesNotExist:\n logging.warning(f'Could not find related object for {identifier!r}')\n except DatabaseError: # don't mask these\n raise\n except Exception:\n logging.exception(f'Could not get related object for {identifier!r}', log_function=logging.error)", "def _get_instance_id(self):\n return self.__instance_id", "def get_instance(self, data):\n filters = {\n key: data[key]\n for key in self.fields.keys() if key in self.lookup_fields}\n\n if None not in filters.values():\n return self.session.query(\n self.opts.model\n ).filter_by(\n **filters\n ).first()\n return None", "def get(self, id):\n if id == 'body':\n return document.body\n else:\n return self.instances[id]", "def get_instance_OLD (self):\n instances = self.data['instances']\n if not len(instances) == 1:\n raise Exception, \"ArchivalObject: %d Instances found\" % len(instances)\n return instances[0]", "def get_object(self, id_):\n return self._objects.get(id_, None)", "def get_instance(self):\n if DoubleSingleton.is_odd:\n DoubleSingleton.is_odd = False\n return DoubleSingleton.odd_instance\n if DoubleSingleton.is_odd is False:\n DoubleSingleton.is_odd = True\n return DoubleSingleton.even_instance", "def get_instance(self, zone, instance, fields=None):\n assert is_valid_zone(zone), zone\n assert is_valid_instance(instance), instance\n try:\n return self.call_api(\n '/zones/%s/instances/%s' % (zone, instance),\n params={'fields': ','.join(fields)} if fields else None)\n except net.NotFoundError: # pragma: no cover\n return None", "def get_object(id):", "def get_object(self, id, **args):\n return self.request(id, args)", "def get_instance():\n if Classifier.__instance is None:\n Classifier()\n return Classifier.__instance", "async def get_one(self, pk):\n\n return await self._expand(await self.db.get_one(pk=pk))", "def test_get_instance(self):\n url = \"http://www.google.com/\"\n http_response = requests.get(url, timeout=2.5)\n router_response = RouterResponse(\n url=url,\n method='get',\n status_code=http_response.status_code,\n port=RouterResponse.get_port_from_url(url),\n headers=RouterResponse.convert_headers(http_response.headers),\n body=http_response.text)\n identifier = Identifier.get_instance(router_response)\n self.assertTrue(identifier.parsing_succeeded())\n self.assertIsNotNone(identifier.title())", "def get_instance(tag):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n reservations = ec2.get_all_instances()\n for res in reservations:\n for inst in res.instances:\n if \"tag\" in inst.tags.keys():\n if inst.tags[\"tag\"] == tag and inst.state == \"running\":\n #print \"Found %s\"%tag\n return inst\n print \"Couldn't find instance\"\n return None", "def instance():\n global inst\n try:\n inst\n except:\n inst = BNVMAPI(None)\n return inst", "def singleton(cls):\n instances = {}\n\n def getinstance():\n \"\"\" Creates a single object and use instances dict as cache \"\"\"\n if cls not in instances:\n instances[cls] = cls()\n return instances[cls]\n return getinstance", "def get_by_id(cls, id):\n e = api.get([key.Key(cls.__name__, id)])\n if e:\n return cls.from_entity(e[0])\n raise ObjectDoesNotExist", "def get_instance(c: Config) -> NotionDAO:\n if c.notion_official_configured:\n result = APIv2(c)\n else:\n result = APIv1(c)\n return result", "async def get_one(self, where: t.Mapping[str, t.Any]) -> t.Optional[Model]:\n\n data = await self.collection.find_one(where)\n return self.model_class(**data) if data else None", "def getInstance():\n return net()", "def get_instance(app: Sanic = None):\n if Configs.__instance is None:\n Configs(app)\n return Configs.__instance", "def get(cls, pk):\n return DBSession().query(cls).get(pk)", "def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args,\n **kwargs)\n return cls._instances[cls]", "async def get_one(self, where):\n\n pass", "def get_instance(cls, name=None) -> 'Stamp':\n if name:\n try:\n return cls._stamps_cache[name]\n except KeyError:\n raise NoStampException\n else:\n stamp_names = list(cls._stamps_cache.keys())\n if len(stamp_names) == 1:\n return cls._stamps_cache[stamp_names[0]]" ]
[ "0.7694641", "0.75701267", "0.75683355", "0.7447133", "0.7252449", "0.7242679", "0.7239021", "0.7208093", "0.71733356", "0.71482056", "0.71482056", "0.7138967", "0.70699036", "0.7006299", "0.69852996", "0.69710827", "0.69649774", "0.69454527", "0.69406265", "0.6925822", "0.6866705", "0.6852299", "0.6851537", "0.68156976", "0.6790521", "0.6790394", "0.6790394", "0.6788033", "0.6723616", "0.6703117", "0.66981727", "0.6697095", "0.6685646", "0.6671392", "0.66145635", "0.65606475", "0.65595376", "0.6555584", "0.6532364", "0.6505068", "0.6477435", "0.6473033", "0.643556", "0.64344066", "0.64295167", "0.6427933", "0.64131504", "0.64080393", "0.64065486", "0.6404631", "0.6393497", "0.63672644", "0.63640726", "0.63639545", "0.63586867", "0.63508236", "0.63212645", "0.63108957", "0.6306451", "0.6304596", "0.62900996", "0.6272382", "0.625919", "0.62418085", "0.6236926", "0.6235515", "0.6202094", "0.6200196", "0.61950314", "0.61950314", "0.61950314", "0.6194233", "0.61908436", "0.61811626", "0.6178459", "0.61773384", "0.6168644", "0.61644596", "0.61617476", "0.61587894", "0.61570483", "0.6129443", "0.6115892", "0.61156726", "0.6113926", "0.6110403", "0.6107381", "0.61065847", "0.61022246", "0.610161", "0.60907674", "0.60901064", "0.60899603", "0.60844505", "0.6083697", "0.6075465", "0.6071525", "0.606376", "0.6060523", "0.6034275" ]
0.7944661
0
Return a generator of instances
def instances(self, **query): return self._list(_instance.Instance, **query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getinstances(cls):\n\t\t\tdead = set()\n\t\t\tfor ref in cls._instances:\n\t\t\t\tobj = ref()\n\t\t\t\tif obj is not None:\n\t\t\t\t\tyield obj\n\t\t\t\telse:\n\t\t\t\t\tdead.add(ref)\n\t\t\tcls._instances -= dead", "def __iter__(self):\n return self.new_generator()", "def instances(self):\n for d in os.listdir(self.directory):\n yield self.instance(self.directory, d)", "def __iter__(self):\n yield from self.gen", "def __iter__(self):\n for classresult in self.classresults:\n yield classresult", "def __iter__(self):\n for benchinst in sorted(self.instances.values()):\n yield benchinst", "def __iter__(self) -> Generator:\n\t\treturn (article for article in self._articles)", "def __iter__(self):\n\n return [self]", "def __iter__(self):\n\n return [self]", "def get_instances(cls):\n raise NotImplementedError", "def __iter__(self):\n yield self", "def all(cls):\n for x in cls._dbag:\n yield cls(**cls._dbag[x])", "def __iter__(self):\n for o in self._iter:\n yield o", "def __iter__(self):\n for benchclass in sorted(self.classes.values()):\n yield benchclass", "def __iter__(self):\n return self._product_generator()", "def permutations(self, key):\n for class_ in inspect.getmro(key):\n yield class_\n if class_ is not object:\n yield object", "def iterator(self):\n yield", "def __next__(self):\n for child in self.children:\n yield child", "def __iter__(self):\n for instresult in self.instresults:\n yield instresult", "def __iter__(self) -> Generator:\r\n yield from self.sequence", "def __iter__(self) -> object:\n return self", "def sequences(self):\n # i am one\n yield self\n # nothing further\n return", "def gen_extractors():\n return [klass() for klass in gen_extractor_classes()]", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n for index in range(len(self)):\n yield self[index]", "def __iter__(self):\n batch = []\n for i_batch in range(self.episode_num):\n classes = torch.randperm(len(self.idx_list))[: self.way_num]\n for c in classes:\n idxes = self.idx_list[c.item()]\n pos = torch.randperm(idxes.size(0))[: self.image_num]\n batch.append(idxes[pos])\n if len(batch) == self.episode_size * self.way_num:\n batch = torch.stack(batch).reshape(-1)\n yield batch\n batch = []", "def __iter__(self):\r\n return self", "def __iter__(self):\n return iter(self.__iter())", "def all_model_instances(self) -> Iterator['panda_core_data.model.Model']:\n for current_type in self.all_models:\n for current_instance in current_type.all_instances:\n yield current_instance", "def generator(func):\n\n @fn\n @wraps(func)\n def gen(*args, **kwargs):\n return Iter(func(*args, **kwargs))\n\n return gen", "def __iter__(self):\n\n return self", "def __iter__(self):\n for i in range(len(self)):\n yield self[i]", "def __iter__(self):\n for i in range(len(self)):\n yield self[i]", "def generator(self, data):\n for instance in data:\n yield (0, [str(instance.string)])", "def generator(self):\n return [None, 1]", "def __iter__():", "def __iter__():", "def __iter__():", "def __iter__():", "def yield_instances(self, instance_filter=None):\n if instance_filter and set(\"\\\"\\\\'\").intersection(instance_filter):\n raise ValueError('Invalid instance filter: %s' % instance_filter)\n page_token = None\n while True:\n params = {'maxResults': 250}\n if instance_filter:\n params['filter'] = 'name eq \"%s\"' % instance_filter\n if page_token:\n params['pageToken'] = page_token\n resp = self.call_api('/aggregated/instances', params=params, deadline=120)\n items = resp.get('items', {})\n for zone in sorted(items):\n for instance in items[zone].get('instances', []):\n yield instance\n page_token = resp.get('nextPageToken')\n if not page_token:\n break", "def __iter__(self) -> Iterator[object]:\n return iter(\n list(self._new.values()) + list(self.identity_map.values())\n )", "def __iter__(self):\n items = self._fetch()\n for item in items:\n yield item", "def __iter__(self):\n yield self.match", "def __iter__(self):\n for x in self.seq: yield x", "def __iter__(self):\r\n return self", "def fetchall(self):\n rows = self.cursor.fetchall()\n\n if self.model.single:\n for row in rows:\n yield self.__instance_from_db(self.model, row)\n else:\n for row in rows:\n yield tuple(self.__instance_from_db(m, row) for m in self.model.models)", "def __iter__(self):\n # type: () -> Iterator[Any]\n return iter(self[index] for index in range(len(self)))", "def __iter__(self):\n for run in self.runs:\n yield run", "def __iter__(self):\n return iter(())", "def generators(self):\n return self._generators", "def get_generator_class(self) -> Any:", "def __iter__(self):\n return self", "def __iter__(self):\n return iter(range(1, self.size() + 1))", "def __iter__(self):\n for tree in self._tree.subTrees():\n yield self.__class__(tree)", "def __iter__(self):\n cursor = 0\n while cursor < len(self):\n yield self._items[cursor]\n cursor += 1", "def __iter__(self):\n return self.next()", "def __iter__(self):\n for plug in self.plugs:\n yield plug", "def __iter__(self):\n for sample in self.samples:\n yield sample", "def __iter__(self):\n return (self.get_node(node_id) for node_id in self._collection.all_keys())", "def __iter__(self):\n handle = self.parent.handle\n cur = getattr(gv, \"first%s\" % self.type)(handle)\n nextitem = getattr(gv, \"next%s\" % self.type)\n while gv.ok(cur):\n yield self.get(gv.nameof(cur))\n cur = nextitem(handle, cur)", "def node_gen(self):\n for n in self.child_list:\n yield from n.node_gen\n yield self", "def __iter__(self) -> Iterator[T]:\n return self", "def __iter__(self):\n for child in self.children:\n yield child", "def __iter__(cls):\n return iter(cls.__by_number.values())", "def __iter__(self):\n for atom in self.iter_atoms():\n yield atom", "def new_generator(self):\n return self.generator_function(*self.args, **self.kwargs)", "def __iter__(self):\r\n return self._iterate()", "def __iter__(self):\n for batch in self.iterator:\n yield Batch.from_iterator_batch(batch, self.pad_index, self.sos_index, self.eos_index)", "def __iter__(self):\n counter = 0\n while True:\n if counter < len(self.all_records):\n yield self.all_records[counter]\n else:\n yield self.next()\n counter += 1", "def __iter__(self):\n for feature in self.features:\n yield feature", "def __iter__(self):\n for b in self.x:\n yield b", "def __iter__(self):\n for x in self.innings:\n yield x", "def generators(self) -> List[Generator]:\n return self._generators", "def __iter__(self):\n return iter({})", "def __iter__(self) -> Generator[str, None, None]:\n\n yield from self.__dict__[\"members\"]", "def _generators(self):\n return self.free_group.generators", "def __call__(self):\n yield from self", "def classIterator(classIter):\n for attribute, value in classIter.__dict__.iteritems():\n yield attribute, value", "def __next__(self):\n return self.next()", "def __next__(self):\n return self.next()" ]
[ "0.7728519", "0.764147", "0.73204434", "0.70539004", "0.6947389", "0.6733554", "0.67039716", "0.67004335", "0.67004335", "0.6692241", "0.66860837", "0.66724557", "0.6664278", "0.6646873", "0.6614654", "0.6586283", "0.6565416", "0.6558471", "0.65531677", "0.6528782", "0.64961296", "0.6495389", "0.6494068", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.6476666", "0.64687985", "0.6463204", "0.6432056", "0.6424805", "0.6424661", "0.6417883", "0.64034593", "0.63755983", "0.63755983", "0.63669455", "0.6357966", "0.6357886", "0.6357886", "0.6357886", "0.6357886", "0.63398653", "0.6338192", "0.631321", "0.63097394", "0.6295738", "0.6294294", "0.627754", "0.6276569", "0.62749976", "0.627131", "0.6269852", "0.6263442", "0.62614936", "0.6250488", "0.6242982", "0.62343764", "0.62267214", "0.62239814", "0.62054825", "0.62048346", "0.62035996", "0.6201454", "0.6195571", "0.6192514", "0.61882794", "0.61755663", "0.6174365", "0.61670804", "0.61634874", "0.61480045", "0.6136955", "0.6130327", "0.61264205", "0.6123656", "0.6121864", "0.611489", "0.6111753", "0.6107204", "0.610542", "0.6098074", "0.6098074" ]
0.0
-1
Create a new user from attributes
def create_user(self, instance, **attrs): instance = self._get_resource(_instance.Instance, instance) return self._create(_user.User, instance_id=instance.id, **attrs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(cls, **data):\n user = cls()\n for attribute in data:\n if hasattr(user, attribute):\n setattr(user, attribute, data[attribute])\n user.password = data[\"password\"]\n db.session.add(user)\n return user", "def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)", "def users_create():", "def create_user() -> tuple:\n # created new user\n user_data: dict = request.get_json()\n names: str = user_data.get(\"names\")\n surname: str = user_data.get(\"surname\")\n cell: str = user_data.get(\"cell\")\n email: str = user_data.get(\"email\")\n password: str = user_data.get(\"password\")\n uid: str = user_data.get(\"uid\")\n organization_id: str = user_data.get(\"organization_id\")\n\n # Add User View will perform error checking\n return user_view.add_user(organization_id=organization_id, uid=uid, names=names, surname=surname,\n cell=cell, email=email, password=password)", "def create_user(email, password, f_name, l_name):\n pass", "def create_user(context, params):\n form_user = dict()\n # form_user['edited_by'] = context.user\n if params.get('username'):\n form_user['username'] = params.get('username')\n else:\n form_user['username'] = create_username(params) # 'email_user{}'.format(MISUser.objects.latest('id').id + 1\n form_user['first_name'] = params.get('first_name')\n form_user['last_name'] = params.get('last_name')\n form_person = create_person(params)\n form_user.update(form_person)\n user = User.objects.create(**form_user)\n user.set_password(params.get('password'))\n\n email = {'label': 'Work', 'val': params.get('email'), 'person': user, 'is_main': True}\n create_email(context, email)\n\n user.save()\n return user", "def new_user(first_name, sur_name, user_name, email, password):\n new_user = User(first_name, sur_name, user_name, email, password)\n return new_user", "def create(self, validated_data:tuple):\n user = user_details.objects.create(user_name=validated_data[0], email=validated_data[1], password=validated_data[2])\n return user", "def create_user_object(self, request):\r\n user = {\r\n \"first_name\": request.form.get(\"first_name\"),\r\n \"last_name\": request.form.get(\"last_name\"),\r\n \"age\": request.form.get(\"age\"),\r\n \"cpr_number\": request.form.get(\"CPR\"),\r\n \"email\": request.form.get(\"email\"),\r\n \"phone_number\": request.form.get(\"phone_number\"),\r\n \"password\": PasswordHasher().hash(request.form.get(\"password\")),\r\n \"bank_account\": str(BankAccount(\"Savings\", 1000.00).store_account().inserted_id),\r\n \"crypto_wallet\": str(CryptoWallet(\"Bitcoin\", 0.0045).store_account().inserted_id)\r\n }\r\n return user", "def do_user_create():\n target = User(\n request.form['gender'],\n request.form['first_name'],\n request.form['name'],\n request.form['mail'],\n request.form['meter_id'],\n request.form['group_id'],\n secrets.token_hex(33))\n target.set_role(request.form['role'])\n target.nick = request.form['nick']\n db.session.add(target)\n db.session.commit()\n return user_list(\"Created user \" + target.name)", "def create_user(self):\n unique_id = str(uuid.uuid4())\n new_user_properties = {\n \"name\": self.name,\n \"mission_statement\": self.mission_statement,\n \"unique_id\": unique_id,\n \"email\": self.email.lower(),\n \"is_mentor\": True,\n \"is_tutor\": True,\n \"is_visible\": True,\n \"is_available_for_in_person\": True,\n \"is_admin\": True}\n new_user_node = Node.cast(AgoraLabel.USER, new_user_properties)\n try:\n self.graph_db.create(new_user_node)\n except:\n pass\n return new_user_node", "def add_user(first_name,last_name,email,password,typeOfUser):\n user=User.objects.create(first_name=first_name,last_name=last_name,email=email,password=password,role=typeOfUser)\n return user", "def create_user(first_name,last_name,email,password):\n\n\tnew_user = User(first_name,last_name,email,password)\n\treturn new_user", "def create(self, data):\n # ensure 'create()' calls the specific 'create_user()' method\n # note that the 'data' gets validated\n user = get_user_model().objects.create_user(**data)\n return user", "def create_user():\n new_dict = request.get_json(silent=True)\n if type(new_dict) is dict:\n if \"email\" not in new_dict.keys():\n return jsonify({\"error\": \"Missing email\"}), 400\n elif \"password\" not in new_dict.keys():\n return jsonify({\"error\": \"Missing password\"}), 400\n else:\n user = User(email=new_dict[\"email\"], password=new_dict[\"password\"])\n for k, v in new_dict.items():\n setattr(user, k, v)\n user.save()\n return jsonify(user.to_dict()), 201\n else:\n return jsonify({\"error\": \"Not a JSON\"}), 400", "def create_user(kwargs):\n fields = f'({\",\".join(kwargs.keys())})'\n values = f'({\",\".join(kwargs.values())})'\n stmt = 'INSERT INTO users %s VALUES %s;' % (fields, values,)\n with get_conn().cursor() as cur:\n cur.execute(stmt)\n return kwargs", "def _create_user(self, username, email, password, phone, **extra_fields):\n\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, phone=phone, **extra_fields) # using email_id instead of email\n user.set_password(password)\n user.save(using=self._db)\n return user", "def new_users(name_first, name_two, email_adress, user_name, pass_word):\n new_user = UserData(name_first, name_two, email_adress, user_name, pass_word)\n\n return new_user", "def create_by_args(self, params):\n signup_args = {}\n for arg in self.signup_args:\n signup_args[arg] = params.get(arg)\n\n # we don't use password, we use the magic raw_password\n del signup_args['password']\n signup_args['password_raw'] = params.get('password')\n prime_key = params[self.prime_key].lower()\n unique_properties = [self.prime_key]\n user_data = self.user_model.create_user(\n prime_key,\n unique_properties,\n **signup_args\n )\n\n if not user_data[0]: # user_data is a tuple\n details = \"Duplicate user id\"\n raise CustomException(error_code='', details=details)\n user = user_data[1]\n user.put()\n return user", "def create(self, data):\n # Make User\n username = data['email'].split(\"@\")[0]\n user = User.objects.create_user(**data, username=username, is_verified=False, is_client=True)\n Profile.objects.create(user=user)\n send_confirmation_email.delay(user_pk=user.pk)\n return user", "def create_user():\n record = request.get_json()\n if record is None:\n return {\"Error\": \"No data Supplied.\"}, 400\n\n schema = user_schema.load(record)\n\n if UserModel.objects(email=schema['email']):\n return {\"Error\": \"User Data already exists.\"}, 400\n user = UserModel(**schema)\n user.hash_password()\n user.save()\n ser_data = user_schema.dump(user)\n token = Auth.generate_token(ser_data[\"_id\"])\n return {\"message\": \"User Created Successfully\", \"Token\": token, \"id\": str(user.id)}, 200", "def create_user(self, **kwargs):\n\n user = self.user_model(**self._prepare_create_user_args(**kwargs))\n return self.put(user)", "def create_new_user(first_name, last_name, email, password):\n \n new_user = User(first_name, last_name, email, password)\n db.session.add(new_user)\n db.session.commit()\n \n # link a root storage folder to the user\n root_folder = Folder()\n db.session.add(root_folder)\n db.session.commit()\n new_user.storage_root_id = root_folder.id\n new_user.storage_root = root_folder\n db.session.commit()\n\n # link usage tracking to the user\n usage = Usage()\n usage.user_id = new_user.id\n new_user.usage = usage\n db.session.add(usage)\n db.session.commit()\n\n # link a billing address to the user\n billing_address = BillingAddress()\n billing_address.user_id = new_user.id\n new_user.billing_address = billing_address\n db.session.add(billing_address)\n db.session.commit()\n\n # link settings to the User\n settings = Settings()\n settings.user_id = new_user.id\n new_user.settings = settings\n db.session.add(settings)\n db.session.commit()", "def create(self,validated_data):\n user_obj = User.objects.create(**validated_data)\n return user_obj", "def create(self, validated_data):\n return User.objects.create(**validated_data)", "def create_user(username, password, user_fname, user_lname, email, profile_picture=\"/static/img/profile_pictures/default.png\"):\n\n user = User(username=username, password=password, user_fname=user_fname, user_lname=user_lname, profile_picture=profile_picture, email=email)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def create_user():\n usr = request.get_json()\n if not usr:\n abort(400, {'Not a JSON'})\n elif 'email' not in usr:\n abort(400, {'Missing email'})\n elif 'password' not in usr:\n abort(400, {'Missing password'})\n else:\n new_usr = User(**usr)\n storage.new(new_usr)\n storage.save()\n return jsonify(new_usr.to_dict()), 201", "def create(cls, sender, instance, created, **kdws):\n if created:\n username = helpers.make_username(instance.first_name, instance.last_name, instance.email)\n user = User(username=username)\n user.save()\n user = User.objects.get(username=username)\n instance.user = user\n instance.save()", "def create(self, **kwargs):\n\n # Normalize the address by lowercasing the domain part of the email\n # address.\n try:\n email_name, domain_part = kwargs['email'].strip().split('@', 1)\n except ValueError:\n pass\n else:\n kwargs['email'] = '@'.join([email_name.lower(), domain_part.lower()])\n \n user = User(**kwargs)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n\n email = self.normalize_email(email)\n #username = self.model.normalize_username(username)\n user = self.model( email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(entry):\n # only works for first + last name currently\n full_name = entry[5].split()\n email = '{first_name}-{client_id}@{domain}'.format(\n first_name=full_name[0].lower(),\n client_id=str(entry[4]).strip(), # unique email for clients with same name\n domain='example.com')\n password = 'test1234'\n dob = timezone.now() - timedelta(days=(365 * random.randint(18, 99)))\n try:\n user = get_user_model().objects.get(email=email)\n except get_user_model().DoesNotExist:\n user = get_user_model().objects.create_user(email=email, first_name=full_name[0],\n last_name=full_name[1], password=password, dob=dob)\n return user", "def new_user(request):\r\n rdict = request.params\r\n\r\n u = User()\r\n\r\n u.username = unicode(rdict.get('username'))\r\n if u.username:\r\n u.username = u.username.lower()\r\n u.email = unicode(rdict.get('email')).lower()\r\n passwd = get_random_word(8)\r\n u.password = passwd\r\n u.activated = True\r\n u.is_admin = False\r\n u.api_key = User.gen_api_key()\r\n\r\n try:\r\n DBSession.add(u)\r\n DBSession.flush()\r\n # We need to return the password since the admin added the user\r\n # manually. This is only time we should have/give the original\r\n # password.\r\n ret = dict(u)\r\n ret['random_pass'] = passwd\r\n return _api_response(request, ret)\r\n\r\n except IntegrityError, exc:\r\n # We might try to add a user that already exists.\r\n LOG.error(exc)\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: User exists.',\r\n })", "def _create_user(self, new_user):\n new_user = User(user_name=new_user['user_name'], pin=new_user['pin'], user_type='customer')\n self.session.output(new_user.get_user_info(), '\\n[ New user created ]')", "def create(self, data):\n data.pop('password_confirmation')\n try:\n availability = data.pop(\"availability\")\n babysitter = data.pop(\"user_bbs\")\n user = User.objects.create_user(**data, is_verified=False)\n if babysitter:\n bbs = Babysitter.objects.create(user_bbs=user, **babysitter)\n for shift in availability:\n Availability.objects.create(bbs=bbs, **shift)\n except KeyError:\n logging.info('This is a instance client')\n user = User.objects.create_user(**data, is_verified=False)\n logging.info(f'User created, whit pk {user.pk}')\n client = Client.objects.create(user_client=user)\n logging.info(f'User pk is already to pass {user.pk}')\n send_confirmation_email.delay(username=user.username, email=user.email )\n return user", "def create_user(uname,password):\n new_user = User(uname,password)\n return new_user", "def create(self, validated_data):\n user = User.objects.create(\n first_name=validated_data.get('first_name'),\n middle_name=validated_data.get('middle_name'),\n last_name=validated_data.get('last_name'),\n email=validated_data.get('email'),\n username=validated_data.get('username'),\n mobile_number=validated_data.get('mobile_number'),\n gender=validated_data.get('gender'),\n is_active=validated_data.get('is_active'),\n country=validated_data.get('country'),\n address=validated_data.get('address'),\n role=validated_data.get('role'),\n )\n if self.context['request'].data.get('file_profile_picture') is not None:\n user.profile_picture = self.context['request'].data['file_profile_picture']\n if self.context['request'].data.get('file_signature') is not None:\n user.signature = self.context['request'].data['file_signature']\n user.set_password(validated_data.get('password'))\n user.save()\n return user", "def create_user(email, password, fname, lname):\n\n user = User(email=email, password=password, fname=fname, lname=lname)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def create(self, username, password, email):\n pass", "def create(self, request):\n if not hasattr(request, \"data\"):\n request.data = request.POST\n attrs = self.flatten_dict(request.data)\n\n username = attrs['username']\n email = attrs['email']\n password = attrs['password']\n same_name_count = User.objects.filter(username = username).count()\n if same_name_count:\n return RC.DUPLICATE_ENTRY\n user = User(username = username, email = email)\n user.set_password(password)\n user.save()\n user.message_set.create(message=\"Confirmation email sent to %s\" % email)\n EmailAddress.objects.add_email(user, email)\n return user", "def create():\n api_request = apireq.APIRequest(request, 'client_schema')\n if api_request.is_invalid():\n return api_request.error_text, 400\n return user_management.create_user(api_json['username'])", "def create(self, validated_data: dict):\n return User.objects.create_user(**validated_data)", "def create_user():\n body = request.get_json(silent=True)\n if body is None:\n abort(400, jsonify(error=\"Not a JSON\"))\n if 'email' not in body:\n abort(400, jsonify(error=\"Missing email\"))\n if 'password' not in body:\n abort(400, jsonify(error=\"Missing password\"))\n user = models.user.User(**body)\n models.storage.new(user)\n models.storage.save()\n return make_response(jsonify(user.to_dict()), 201)", "def create(self, validated_data):\n ## overriding default create\n\n user = UserProfile.objects.create_user(\n email = validated_data['email'],\n name = validated_data['name'],\n password=validated_data['password']\n )\n \n return user", "def new_user(cls, user):\r\n pass", "def create (self, validated_data):\n user = models.UserProfile.objects.create_user(\n email = validated_data ['email'],\n name = validated_data ['name'],\n password = validated_data ['password']\n )\n\n return user", "def _create_user_Api(self,password,username, **extra_fields):\r\n if not username:\r\n raise ValueError('The given username must be set')\r\n user = self.model(email=username,username=str.strip(username), **extra_fields)\r\n user.set_password(password)\r\n user.save(using=self._db)", "def create_user(fname, lname, email, password):\n\n user = User(fname=fname, lname=lname, email=email, password=password)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def create_user(fname, lname, email, password, phone_number):\n user = User(fname = fname, lname = lname , email = email ,password = password, phone_number = phone_number)\n #setting password hash\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n return user", "def create_user(self):\n if not self.is_valid():\n return None\n # generate a username \n ids = User.objects.values_list('id', flat=True).order_by('-id')[:1]\n if len(ids) > 0:\n # ids[0] will be the maximum value (due to order_by: '-id')\n idnum = ids[0] + 1\n else:\n idnum = 1\n # create User object \n username = \"user%s\" % idnum\n # NOTE: store email in lower case\n email = self.clean_email().lower()\n password = self.clean_password2()\n user = User(username=username, email=email, password='tmp')\n user.save()\n # set the real password\n user.set_password(password)\n # make user inactive (until user has confirmed account)\n user.is_active = False\n # update\n user.save()\n return user", "def create_user():\n first_name = request.form['first_name'].capitalize()\n last_name = request.form['last_name'].capitalize()\n image_url = request.form['image_url']\n\n new_user = User(first_name=first_name, last_name=last_name, image_url=image_url)\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def create_form_user(self, **kwargs):\n user = User.objects.create_user(\n **kwargs\n )\n return user", "def new_user():\n success = True\n try:\n usr = User(request.json['username'], request.json['email'])\n db.session.add(usr)\n db.session.commit()\n except:\n success = False\n return jsonify(success=success)", "def create_user(self):\n return User.objects.create_user(**self.user_data)", "def make_new_user():\n\n new_user = User(\n first_name=request.form['first_name'],\n last_name=request.form['last_name'],\n image_url=request.form['image_url'] or None)\n\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def create_user():\r\n if not request.is_json or 'name' not in request.get_json() or 'phone_number' not in request.get_json() or 'password' not in request.get_json():\r\n return bad_request('Missing required data.')\r\n try:\r\n return add_user(request)\r\n except:\r\n return bad_request(error_messages['user_exist'])", "def sample_user(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def create( self , validated_data ) :\n\n user = models.UserProfile(\n email = validated_data[ 'email' ] ,\n name = validated_data[ 'name' ]\n )\n\n user.set_password( validated_data[ 'password' ] )\n user.save( )\n\n return user", "def profile_create(faker_obj=fake_init()):\n profile = faker_obj.simple_profile()\n user = User.objects.create(\n username=profile[\"username\"],\n email=profile[\"mail\"],\n password=profile[\"username\"][::-1],\n )\n return user.id", "def _create_user(self, FirstName,LastName, EmailId, MobileNo, password=None, **extra_fields):\n if not (FirstName and LastName):\n raise ValueError(\"The user's Name must be set\")\n if not EmailId:\n raise ValueError('The given EmailId must be set')\n if not password:\n raise ValueError('The given password must be set')\n if not MobileNo:\n raise ValueError('The given mobile must be set')\n EmailId = self.normalize_email(EmailId)\n user = self.model(FirstName =FirstName, LastName =LastName ,EmailId=EmailId, MobileNo=MobileNo, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user():\n try:\n\n user = User(username=request.json.get(\"username\"), score=0,)\n\n user.insert()\n\n response = jsonify({\"success\": True, \"created_user_id\": user.id})\n\n except AttributeError:\n abort(400)\n\n return response", "def create_user(fname, phone_num):\n\n user = User(fname=fname, phone_num=phone_num)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def create_user(fname, lname, email, password):\n\n user = User(fname=fname, \n lname=lname, \n email=email, \n password=password)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def create(self, validated_data):\n user = UserProfile.objects.create_user(\n email=validated_data['email'],\n first_name = validated_data['first_name'],\n last_name = validated_data['last_name'],\n password = validated_data['password']\n )\n return user", "def createuser(self, firstname, lastname, email, address1, address2, city, state, country, zipcode, password):\n uquery = {'firstname': firstname,\n 'lastname': lastname,\n 'address1': address1,\n 'address2': address2,\n 'city': city,\n 'state': state,\n 'country' : country,\n 'zipcode' : zipcode,\n 'email': email,\n 'password': password\n }\n\n userdb = self.dbase['users']\n urecord = uquery.copy()\n urecord['created'] = self.epoch()\n emailquery = { 'email': uquery['email'] }\n uqresult= userdb.find_one(emailquery)\n\n result = {'exists': False, 'userid': None}\n if uqresult:\n result['exists'] = True\n result['userid'] = str(uqresult['_id'])\n logging.info(\"== Record Exists. Skipping update. {}\".format(uqresult))\n else:\n logging.info(\"== Record does not exist, creating entry \")\n uqresult = userdb.insert_one(urecord)\n uqresult = userdb.find_one(urecord)\n result['userid'] = str(uqresult['_id'])\n\n return json.dumps(result)", "def do_user_create(cs, args):\n cs.users.create(args.username, args.password, args.email, args.realname,\n args.comment)\n print(\"Create user '%s' successfully.\" % args.username)", "def create_user(user_name: str):\n user = User()\n user.username = user_name\n user.save()\n return user", "def sample_user(email=user_v['email'], password=user_v['password']):\n return get_user_model().objects.create_user(email, password)", "def create_user():\n try:\n payload = _validatePayload(request)\n timestamp = int(time.time() * 1000)\n user = {\n 'name': payload.get('name'),\n 'email': payload.get('email'),\n 'password': _encodePassword(payload.get('password')),\n 'createdAt': timestamp,\n 'updatedAt': timestamp,\n }\n\n resp = table.put_item(\n Item=user,\n Expected={'email': {'Exists': False}}\n )\n return jsonify(user), 200\n except Exception as e:\n logger.info('ERROR {}'.format(str(e)))\n return _customizeErrorMessage(e)", "def post_user_obj():\n dic = {}\n dic = request.get_json(silent=True)\n if dic is None:\n abort(400, \"Not a JSON\")\n if \"password\" not in dic.keys():\n abort(400, \"Missing password\")\n if \"email\" not in dic.keys():\n abort(400, \"Missing email\")\n new_user = user.User()\n for k, v in dic.items():\n setattr(new_user, k, v)\n new_user.save()\n return jsonify(new_user.to_dict()), 201", "def create(self, validated_data):\n user = UserProfile.objects.create_user(\n email=validated_data[\"email\"],\n name=validated_data[\"name\"],\n password=validated_data[\"password\"]\n )\n\n return user", "def sample_user_third(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name3\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def new_user(cls, user):\n pass", "def create_user(\n *,\n user_in: schemas.UserCreate,\n) -> schemas.User:\n next_user_id = users[-1].id + 1 # type: ignore\n user = schemas.User(\n id=next_user_id,\n email=user_in.email,\n is_active=user_in.is_active,\n is_superuser=user_in.is_superuser,\n full_name=user_in.full_name,\n )\n users.append(user)\n return user", "def create_new_user(self):\n username = 'pseudo'\n email = '[email protected]'\n password = '00000000'\n user_created = self.user.objects.create_user(id=1, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n\n return user_created", "def create_user():\n email = request.json.get('email')\n username = request.json.get('username')\n password = request.json.get('password')\n\n details = [email, username, password]\n\n if not all(details):\n return bad_request(\"you must supply email, username and password\")\n if User.query.filter_by(email=email).first() is not None and User.query.filter_by(username=username) is not None:\n return forbidden(\"email or username already exist\")\n\n user = User(email=email, username=username)\n user.hash_password(password)\n user.save()\n\n return {'status': (user.username + ' has successfully registered')}", "def create_user(self, *args, **kwargs):\n user = User.objects.create_user(*args, **kwargs)\n return get_profile(user)", "def create(self, body):\n try:\n user_record = UserRecord.create_user(\n email=body[\"email\"],\n password=body[\"password\"],\n display_name=body[\"display_name\"],\n auth=web_sdk.auth,\n )\n complete_register = body.get(\"complete_register\") or False\n user_record.make_claims({\"complete_register\": complete_register})\n user = User(\n uid=user_record.uid,\n email=user_record.email,\n display_name=user_record.display_name,\n phone_number=body.get(\"phone_number\"),\n name=body[\"name\"],\n lastname=body[\"lastname\"],\n headline=body.get(\"headline\"),\n about_me=body.get(\"about_me\"),\n complete_register=complete_register,\n link_video=body.get(\"link_video\"),\n timezone=body.get(\"timezone\"),\n location=body.get(\"location\"),\n )\n\n if \"specialities\" in body:\n user.append_specialities(body[\"specialities\"])\n if \"methods\" in body:\n user.append_methods(body[\"methods\"])\n if \"plans\" in body:\n user.append_plans(body[\"plans\"])\n\n user.add()\n user.save()\n\n return {\"uid\": user_record.uid, \"a\": user_record, \"b\": user}\n except KeyError as ex:\n raise HandlerException(400, \"Bad request: \" + str(ex))", "def create(self, validated_data):\n\n user = models.User(\n email=validated_data['email'],\n name=validated_data['name']\n )\n\n user.set_password(validated_data['password'])\n\n user.save()\n\n return user", "async def create_user(user_request: UserRequestModel):\n\n user = User.create(\n username=user_request.username,\n email=user_request.email\n )\n\n return user", "def user_create(client_id, email, password=None, first_name=None, last_name=None, user_info=None):\n # validate if email contains actually a valid email address:\n try:\n validate_email(email)\n except ValidationError:\n raise ex.UserError(\"please enter a valid email address\")\n # create account\n user = create_user(email)\n user.first_name = first_name\n user.last_name = last_name\n if password:\n user.set_password(password)\n if user_info:\n for (key, value) in user_info.iteritems():\n if key == \"social\" and value is not None: user.meta['social'] = value\n elif key == \"address\" and value is not None: user.meta['address'] = value\n elif key == \"crm\" and value is not None: user.meta['crm'] = value\n elif key == \"local\" and value is not None: user.meta['local'] = value\n \n user_info = user_to_dict(user, include_name=True)\n\n # build success result\n return user_info", "def _create_user(self, email, password,username, **extra_fields):\r\n if not email:\r\n raise ValueError('The given email must be set')\r\n if not username:\r\n raise ValueError('The given username must be set')\r\n email = self.normalize_email(email)\r\n user = self.model(email=email,username=str.strip(username), **extra_fields)\r\n user.set_password(password)\r\n user.save(using=self._db)", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='[email protected]')[0]\n user.set_password('testabc123')\n user.save()\n\n return user", "def create_user():\n if not check_content_type():\n return jsonify(status=CONTENT_TYPE_ERROR)\n\n data = request.json\n #TODO check if request body contain required keys\n #if [\"login\", \"password\", \"user\", \"email\", \"first_name\", \"second_name\", \"phone\"].sort() != (data.keys()).sort():\n # return jsonify(status=\"err\")\n\n login = data[\"login\"]\n hash_password = raw_password_to_string(str(data[\"password\"]))\n role = \"user\"\n email = data[\"email\"]\n first_name = data[\"first_name\"]\n second_name = data[\"second_name\"]\n phone = data[\"phone\"] \n #TODO data validation\n #if login == \"\" or hash_password == \"\" or role == \"\" or email == \"\" or first_name == \"\" or second_name == \"\":\n # return jsonify(status=\"error\")\n\n db.session.add(User(login=login, hash_password=hash_password, role=role, email=email, first_name=first_name, second_name=second_name, phone=phone))\n try:\n db.session.commit()\n return jsonify(status=OK_STATUS)\n except:\n db.session.rollback()\n return jsonify(status=DATABASE_INTEGRITY_ERROR)", "def createUser(login_session):\r\n newUser = User_info(name=login_session['username'],\r\n email=login_session['email'],\r\n picture=login_session['picture'])\r\n session.add(newUser)\r\n session.commit()\r\n user = session.query(User_info).\\\r\n filter_by(email=login_session['email']).one()\r\n return user.id", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='[email protected]')[0]\n user.set_password('testabc123')\n user.save()\n return user", "def _create_user(self, username, email, persona_id, nombre_completo, password, is_staff, is_superuser,\n **kwargs):\n now = timezone.now()\n if not email:\n raise ValueError(_('El email debe ser proporcionado'))\n email = self.normalize_email(email)\n user = self.model(\n username=username,\n persona_id=persona_id,\n nombre_completo=nombre_completo,\n email=email,\n is_staff=is_staff,\n is_active=True,\n is_superuser=is_superuser,\n last_login=now,\n fecha_registro=now,\n **kwargs\n )\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create(self, validated_data):\n\n # Here we actually create a new user.\n user = models.UserProfile(\n email = validated_data['email'],\n name = validated_data['name']\n )\n\n user.set_password(validated_data['password'])\n\n # Here we save the object to the database.\n user.save()\n\n return user", "def new(self):\n\n for req_var in self.required_attribs:\n if req_var not in self.kwargs:\n err = \"The '%s' kwarg is required when creating a new user!\"\n self.logger.error(err % req_var)\n raise ValueError(err % req_var)\n\n self.logger.warn('Creating new user!')\n self.name = self.kwargs.get('name')\n self.email = self.kwargs.get('email').lower()\n self.created_on = datetime.now()\n\n try:\n self._id = self.mdb.insert({'email': self.email})\n except pymongo.errors.DuplicateKeyError:\n raise ValueError(\"Email '%s' is already in use!\" % self.email)\n\n if self.save(verbose=False):\n self.logger.warn('Created new user! %s' % self)\n else:\n raise AttributeError('New user record could not be saved!')\n\n self.update_password(self.kwargs.get('password'))", "def create_user(self, phone, password=None, **extra_fields):\n print(extra_fields)\n if not phone:\n raise ValueError('Users must have an phone number')\n if not password:\n raise ValueError('Users must have a password')\n try:\n extra_fields['role']\n except Exception:\n raise ValueError('Users must have a role')\n try:\n extra_fields['name']\n except Exception:\n raise ValueError('Users must have a name') \n user = self.model(phone=phone, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def create(self, validated_data):\n username = validated_data.pop('username')\n email = validated_data.pop('email')\n password = validated_data.pop('password')\n user = User.objects.create_user(\n username, email, password, **validated_data)\n return user", "def create_user(email, password, home_zipcode):\n\n user = User(email=email, password=password, home_zipcode=home_zipcode)\n db.session.add(user)\n db.session.commit()\n return user", "def create_profile(self, user, *args, **kwargs):\n salt = hashlib.sha1(str(random.random())).hexdigest()[:5]\n activation_key = hashlib.sha1(salt + user.username).hexdigest()\n return self.create(user=user, activation_key=activation_key, **kwargs)", "def CreateNewUser(self,name):\n new_user_id = self.db_manager.InsertData(name)\n self.db_manager.UpdateData(new_user_id,0)\n user_data = UserDTO()\n user_data.SetId(new_user_id)\n user_data.SetName(name)\n user_data.SetAmountMoney(0)\n\n return JsonSerializer.SerializeObject(user_data)", "def create_new_user(email, password, user_handle, age=None):\n\n return User(email=email,\n password=password,\n user_handle=user_handle,\n age=age)", "def create_user():\r\n data = request.get_json() or {}\r\n print(data)\r\n # some data checks\r\n if 'username' not in data or 'password' not in data:\r\n return bad_request('must include username and password fields')\r\n if User.query.filter_by(username=data['username']).first():\r\n return bad_request('please use a different username')\r\n user = User()\r\n # add user to database\r\n user.add_user(data)\r\n # check that the transaction was successful\r\n res = User.query.filter_by(username=data['username']).one_or_none()\r\n # return added user as query response\r\n if res:\r\n response = jsonify(res.to_dict())\r\n response.status_code = 201\r\n # else return error\r\n else:\r\n response.status_code = 403\r\n response.headers['Location'] = url_for('api.get_user', id=user.id)\r\n return response", "def create_user():\n username = request.get_json().get(\"name\", None)\n role = request.get_json().get(\"role\", None)\n email = request.get_json().get(\"email\", None)\n return jsonify(\n admin.create_user(current_app.scoped_session(), username, role, email)\n )", "def create(self, validated_data):\n\n user = models.User(\n email = validated_data['email'],\n name = validated_data['name'] \n )\n\n # This will encrypt the password first and then assign it to the user.\n user.set_password(validated_data['password'])\n\n # Save user into database.\n user.save()\n\n return user", "def create_user(fname, lname, email, username, password, category, country):\n try:\n user = User(fname=fname,\n lname=lname,\n email=email,\n username=username,\n password=password,\n preferred_category_id=category,\n preferred_country_id=country)\n\n db.session.add(user)\n db.session.commit()\n return user\n\n except IntegrityError:\n db.session.rollback()\n return None", "def _create_user(self, email, mobile_number, password, **extra_fields):\n\n print('model number')\n print(mobile_number)\n \n user = self.model(email=email,mobile_number = mobile_number, **extra_fields)\n user.set_password(password)\n \n user.save(using=self._db)\n return user", "def create_user(self, username, password,cpf, **extra_fields):\n\n user = self.model(username=username, cpf=cpf, **extra_fields)\n user.set_password(password)\n user.save()\n return user" ]
[ "0.7783138", "0.7630757", "0.7628778", "0.7590039", "0.7514446", "0.7495538", "0.7476639", "0.7472641", "0.7424796", "0.74176484", "0.73413366", "0.73327565", "0.73321164", "0.7310507", "0.72872263", "0.7284248", "0.7278472", "0.7245254", "0.7237018", "0.72202486", "0.72147137", "0.7211014", "0.7205506", "0.71860707", "0.718152", "0.71676177", "0.7150409", "0.71439576", "0.7139982", "0.7132148", "0.7125324", "0.7118744", "0.7112054", "0.7093816", "0.7087863", "0.70711035", "0.7057015", "0.7055743", "0.7052692", "0.7041324", "0.70383495", "0.70278484", "0.70224464", "0.70188653", "0.7016015", "0.70061165", "0.70042276", "0.70027775", "0.7001349", "0.69996125", "0.6998036", "0.6990562", "0.69905454", "0.6986608", "0.69810873", "0.69764596", "0.6974241", "0.6973169", "0.6958335", "0.6955218", "0.69541264", "0.6951181", "0.6936111", "0.6934368", "0.69326913", "0.69326174", "0.69234115", "0.69053936", "0.6899683", "0.6898657", "0.6898206", "0.6896826", "0.6894677", "0.68944746", "0.68893653", "0.6879749", "0.687078", "0.6870658", "0.68686277", "0.686704", "0.68665856", "0.686547", "0.68652916", "0.6859015", "0.6854103", "0.68534297", "0.6852377", "0.6851446", "0.68506634", "0.6848081", "0.68473095", "0.6841106", "0.6837676", "0.68279713", "0.682425", "0.6819375", "0.68180645", "0.68167114", "0.68117315", "0.68114007" ]
0.70159054
45
Find a single user
def find_user(self, name_or_id, instance, ignore_missing=True): instance = self._get_resource(_instance.Instance, instance) return self._find( _user.User, name_or_id, instance_id=instance.id, ignore_missing=ignore_missing, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_user(user_name):\n return User.find_by_user_name(user_name)", "def find_user(self, value, key=\"name\"):\n if not value:\n return\n\n if key.lower() not in (\"name\", \"id\", \"email\"):\n raise ValueError()\n\n if key.lower() == \"id\":\n return self.get_user(value)\n else:\n params = {key.lower(): value}\n try:\n return self.api.hq.get_users_search(**params)[0]\n except IndexError:\n self.logger.debug(\"User {}: {} not found\".format(key, value))", "def find_one(self, user_id):\n pass", "def find(user_id):\n return repository.find(user_id)", "def find(self, user_id: UserId) -> Optional[U]:\n ...", "def find_by_id(cls, _id):\n user = cls.query.filter_by(id=_id).first()\n return user", "def find(cls, username: str) -> \"User\":\n return cls.retrieve(\n bind=User, params=dict(method=\"user.getInfo\", user=username)\n )", "def find_by_id(cls, user_id):\n return UsersModel.query.filter_by(id=user_id).first()", "def find_user(self, *args, **kwargs):\n raise NotImplementedError", "def __find_matching_user(self, user):\n if not user.id in self.__users.keys():\n return user\n return self.__users[user.id]", "def checkUser():\n username_value = str(input('>>> Insert your username: '))\n\n user = users_coll.find_one(\n {'username': username_value},\n {'_id': 1, 'usr_fullname': 1}\n )\n\n return user", "def find_user_by_id(id: str) -> User:\n\n # Find the id user in the database, else return None\n return User.query.get(int(id))", "def find_user(cls, user_unique_identifier):\n # Select from the table users where email_id = email_id limit 1 .\n # return a UserModel Object .\n return cls.query.filter_by(uuid=user_unique_identifier).first( )", "def find_user():\n\tpost_json = request.get_json()\n\tif not post_json:\n\t\tabort(400)\n\temail = post_json['email']\n\tif not email:\n\t\tabort(400)\n\n\tuser = models.User.query.filter_by(email=email).first()\n\n\tif not user:\n\t\tusername = email.split(\"@\")[0]\n\t\tsame_username = models.User.query.filter_by(username=username).all()\n\t\tif len(same_username) > 0:\n\t\t\tusername = username + str(len(same_username))\n\t\tuser = models.User(\n\t\t\tusername = username,\n\t\t\temail = email,\n\t\t\tpassword = \"\"\n\t\t)\n\t\tdb.session.add(user)\n\t\tdb.session.commit()\n\treturn jsonify({'user_id':user.id}), 201", "def get_single_user(self, id):\n for user in self.users:\n if user['id'] == id:\n return user", "def find_user(self, username=None, email=None):\n if username:\n try:\n if self.auth_username_ci:\n return (\n self.get_session.query(self.user_model)\n .filter(func.lower(self.user_model.username) == func.lower(username))\n .one_or_none()\n )\n else:\n return (\n self.get_session.query(self.user_model)\n .filter(func.lower(self.user_model.username) == func.lower(username))\n .one_or_none()\n )\n except MultipleResultsFound:\n log.error(\"Multiple results found for user %s\", username)\n return None\n elif email:\n try:\n return self.get_session.query(self.user_model).filter_by(email=email).one_or_none()\n except MultipleResultsFound:\n log.error(\"Multiple results found for user with email %s\", email)\n return None", "def find_by_id(cls, _id):\n ## Setup Connection & Cursor\n connection, cursor = Database.connect_to_db()\n\n ## Find the user\n query = \"SELECT * FROM {table} WHERE id=?\".format(table=cls.TABLE_NAME)\n result = cursor.execute(query, (_id,)) ## Parameter must always be a tuple\n row = result.fetchone() ## Returns None if no results\n\n ## Create User object if we get data back\n if row:\n user = cls(*row)\n else:\n user = None\n\n ## Close Connection\n connection.close()\n\n return user", "def get_user_by_id(self, user_id):\n query = \"SELECT * FROM users WHERE user_id = %s\"\n self.cursor.execute(query,[user_id])\n result = self.cursor.fetchone()\n return result", "def get(cls,id):\n result = execute_query(\"\"\"SELECT * FROM Users Where username = ?\n \"\"\",\n [id])\n try:\n user = User(id,result[0][1])\n except Exception as e:\n return None\n \n return user", "def find_user(database: Database) -> User:\n session_id = cast(SessionId, bottle.request.get_cookie(\"session_id\"))\n session = find_session(database, session_id)\n return User(session.get(\"user\", \"\"), session.get(\"email\", \"\"), session.get(\"common_name\", \"\"))", "def get_user(id):\n pass", "def lookupUser_byID(self, user_id):\n sql = \"SELECT * FROM Users WHERE id='%s'\"\\\n % (user_id)\n res = self.execute(sql)\n reslist = res.fetchall()\n if reslist == []:\n return None\n else:\n return reslist[0]", "def get_user(self, user_id):\n _email = self._email_for_user_id(user_id)\n response = self._get('/users?{0}'.format(urllib.urlencode({'search': _email})))\n for _user in response:\n if _user['email'] == _email:\n return _user\n return None", "def get(id):\n return User.query.filter_by(id=id).first()", "def find_resource(self, id, session):\n return session.query(self.User).filter_by(id=id).first()", "def user_by_id(self, user_id):\n\n cur = self.db.cursor()\n cur.execute(\n \"\"\"SELECT user_id, username, password, phone, email, role\n FROM users WHERE user_id = %s\"\"\", (user_id, ))\n \n user_from_db = cur.fetchone()\n if cur.rowcount == 1: \n user_id, username, password, phone, email, role = user_from_db\n resp = dict(user_id=user_id, username=username, password=password, phone=phone, email=email, role=role)\n \n return resp\n return None", "def user_by_id(user_id):\n user = User.query.filter(User.id == user_id).one_or_none()\n return user", "def get_user_by_username(username): #hiking_object.user_id\n\n return User.query.filter_by(username=username).first()", "def get_user(username):\n users = get_users()\n for user in users:\n if user['username'] == username:\n return user\n\n raise UserNotFound", "def get_user(self, user_id):\n\n i = self.gdb.nodes.indexes.get('users')\n if str(user_id).isalnum(): # numerical ID\n results = i.get('user_id', user_id) # always iterable\n else:\n results = i.get('screen_name', user_id) # always iterable\n\n if len(results) == 1:\n log.info('Found existing users, ID %s' % user_id)\n return results[0]\n else:\n log.info('No user in graph with ID %s' % user_id)\n return None", "def get_user_by_id(user_id):\n return User.query.get(user_id)", "def get_by_id(user_id: int) -> Optional[User]:\n user = User.query.filter_by(id=user_id).first()\n return user", "def find_by_id(cls, username):\n return cls.query.filter_by(username=username).first()", "def find_user_by_id(id):\n try:\n cursor.execute(\"select * from users where id = %s\", (id,))\n user = cursor.fetchone()\n user = User(id=user[0], firstname=user[1], lastname=user[2], othername=user[3], email=user[4],phonenumber=user[5], passporturl=user[6], roles=user[7], nationalid=user[8], county=user[9],password=user[10], date_created=user[11], date_modified=user[12])\n return user.json_dump()\n except Exception:\n return False", "def _findUser(username):\r\n user = None\r\n try:\r\n user = PongUser.objects.get(username=username)\r\n except User.DoesNotExist:\r\n user = None\r\n finally:\r\n return user", "def find_by_username(cls, username):\n user = cls.query.filter_by(username=username).first()\n return user", "def get_one_user():", "def search_for_user(self, user_name: str) -> Optional[Dict]:\n user = None\n users = self.get_users()\n if users:\n user = next((\n user\n for _, user in users.items()\n if user['name'] == user_name or user['real_name'] == user_name\n ), None)\n return user", "def find_by_username(username):\n user = User.query.filter(User.username == username).first()\n\n return user", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def _get_user_by_id(self, _id):\n user_resp = self._db.Users(database_pb2.UsersRequest(\n request_type=database_pb2.UsersRequest.FIND,\n match=database_pb2.UsersEntry(global_id=_id)))\n if user_resp.result_type != database_pb2.UsersResponse.OK:\n self._logger.warning(\n 'Could not find user: {}'.format(user_resp.error))\n return None\n if not len(user_resp.results):\n self._logger.warning('Could not find user.')\n return None\n return user_resp.results[0]", "def get_user(username):\n return Users.query.filter_by(username=username).first()", "def find_user_by_username(db, username):\n users = db.tables.users\n return db.load_scalar(\n table=users, value={'username': username}, column='id')", "def user(email):\r\n return User.objects.get(email=email)", "async def get_user(id: int):\n with Session(engine) as session:\n # TODO return the user based on the ID (and an error if not)\n statement = select(User).where(User.id == id)\n user = session.exec(statement).first()\n if user == None:\n raise HTTPException(status_code=404, detail=\"User ID not found\")\n return {\"user\": user}", "def get_single_user(username):\n user = mongo.db.users.find_one({\"username\": username})\n user[\"_id\"] = str(user[\"_id\"])\n return user", "def _find_existing_user(self, username):\n users = User.objects.filter(username=username)\n if users.count() <= 0:\n return None\n else:\n return users[0]", "def find_by_identity(cls, identity):\n return User.query.filter(\n (User.email == identity) | (User.username == identity)).first()", "def maybe_find_user(user_id):\n try:\n return find(user_id)\n except KeyError:\n return None", "def find_user_or_404(mongo_id):\n user = None\n try:\n user = User.objects.get(id=mongo_id)\n except (DoesNotExist, ValidationError):\n pass\n\n if user is None:\n response = jsonify({'message': 'No user found!'})\n response.status_code = 404\n abort(response)\n\n return user", "async def get_user_byid(request):\n user_id = request.match_info[\"user_id\"]\n try:\n user_id = int(user_id)\n except (ValueError, TypeError):\n return web.Response(text=\"Incorrect value for user_id\", status=400)\n\n currentuser = (\n request.cirrina.db_session.query(User)\n .filter(User.username == request.cirrina.web_session[\"username\"])\n .first()\n )\n\n if user_id == -1 or not currentuser.is_admin:\n user = currentuser\n else:\n user = request.cirrina.db_session.query(User).filter_by(id=user_id).first()\n\n if not user:\n return web.Response(status=404, text=\"User not found\")\n\n data = {\"username\": user.username, \"user_id\": user.id, \"is_admin\": user.is_admin}\n return web.json_response(data)", "def find_by_user_name(cls,user_name):\n for user in cls.user_list:\n if user.user_name == user_name:\n return user", "def get_user(self, username):\n return self.s.query(User).filter(User.username == username).first()", "def get_user_from_id(user_id):\n return Users.query.filter_by(id=user_id).first()", "def fetch_user(uid):\n users = find_users(uid=uid)\n if users:\n return users[0]._asdict()\n return (\"Not found\", 404)", "def get(self, no):\n user = get_a_user(no)\n if not user:\n api.abort(404)\n else:\n return user", "def get_by_id(self):\n user_node = graph.find_one(\"User\",\n property_key=\"id\",\n property_value=self.id)\n return user_node", "def load_user(id):\n return User.query.get(int(id))", "def load_user(id):\n return User.query.get(int(id))", "def load_user(id):\n return User.query.get(int(id))", "def load_user(id):\n return User.query.get(int(id))", "def load_user(id):\n return Users.query.get(id)", "def get_user(name):\n try:\n return User.objects.get(name=name)\n except ObjectDoesNotExist:\n raise ObjectDoesNotFound(\"There is no user '{}'.\".format(name))", "def load_user(uid):\n return User.query.get(uid)", "def get_user_by_id(cls, userid):\n\n user = User.query.filter_by(user_id=userid).one()\n\n return user", "def get_user(id=None, name=None):\n found_id = get_user_id(id, name)\n if not found_id:\n return\n response = utils.checked_api_call(users_api, 'get_specific', id=found_id)\n if response:\n return response.content", "def get_user():\n try:\n userId = request.args.get('login_as')\n return users[int(userId)]\n except Exception:\n return None", "def find_user_by_username(username: str) -> User:\n\n # Find user with this username, or None if there isn't any\n return User.query.filter_by(username=username).first()", "async def get_user(id: int):\n service = SearchService()\n return await service.get_user(id)", "def get_user_by_id(user_id: int) -> User:\n session = Session()\n\n # verify user_id exists\n vote_user: User = session.query(User).filter(User.id == user_id).first()\n session.close()\n\n if not vote_user:\n raise UserNotFoundException\n\n return vote_user", "def load_user():\n\n return User.query.get(int(id))", "def find_by_username(cls, username):\n ## Setup Connection & Cursor\n connection, cursor = Database.connect_to_db()\n\n ## Find the user\n query = \"SELECT * FROM {table} WHERE username=?\".format(table=cls.TABLE_NAME)\n result = cursor.execute(query, (username,)) ## Parameter must always be a tuple\n row = result.fetchone() ## Returns None if no results\n\n ## Create User object if we get data back\n if row:\n user = cls(*row)\n else:\n user = None\n\n ## Close Connection\n connection.close()\n\n return user", "def select_user(user_id):\n return session.query(User).filter(User.id == user_id).first()", "def findUser(username):\n connector = appEngine.connect()\n userId = connector.execute(\"SELECT user.userID FROM user WHERE userName=(?)\", username).fetchone()\n #selectInput = select([user]).where(user.column.userName == username)\n #db.execute(selectInput)\n return userId", "def get(user_id=None, username=None, email=None, api_key=None):\r\n user_query = User.query\r\n\r\n if username is not None:\r\n return user_query.filter(User.username == username).first()\r\n\r\n if user_id is not None:\r\n return user_query.filter(User.id == user_id).first()\r\n\r\n if email is not None:\r\n return user_query.filter(User.email == email).first()\r\n\r\n if api_key is not None:\r\n return user_query.filter(User.api_key == api_key).first()\r\n\r\n return None", "def get_user_by_id(id):\n user = session.query(User).get(id)\n return user", "def get_user(name, password):\n collection = get_collection(\"user\")\n user_info = collection.find_one({\"name\": name, \"password\": get_password(name, password)})\n return user_info", "def get_user(login):\n if isinstance(login,str) or isinstance(login,unicode):\n user = Session.query(User).filter(or_(User.email==login,\n User.username==login.lower())).first()\n return user\n else:\n raise Exception(login)", "def load_user(id):\n\treturn User.query.get(int(id))", "def get_user(username):\n if custom_user_model:\n return get_user_model().objects.get_by_natural_key(username)\n else:\n return get_user_model().objects.get(username=username)", "def read_user_by_id(\n user_id: int = Path(description=\"User id\", example=1),\n) -> schemas.User:\n user = next((usr for usr in users if usr.id == user_id), None)\n if not user:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"User with id={user_id} doesn't exist\",\n )\n return user", "def get_user(cls, user_id):\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def identify(cls, user_id):\n return cls.query.get(user_id)", "def get_user(username: str) -> User:\n\n user = User.select(\n lambda db_user: db_user.username == username\n ).first()\n\n if not user:\n raise UserNotFound(username=username)\n\n return user", "def get(self, username):\n return User.find_by_username_or_email(username)", "def get_user(user_id):\n try:\n return UserModel.objects.get(id=user_id)\n except UserModel.DoesNotExist:\n return None", "def load_user(user_id):\n return models.UserModel.query.get(int(user_id))", "def getByID(session, id):\n return session.query(User).filter(User.id == id).first()", "def load_user(id):\n\n return User.query.get(int(id))", "def get(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n\n return user", "def get_user(user_id=None):\n users = storage.all('User')\n user = users.get('User' + \".\" + user_id)\n if user is None:\n abort(404)\n else:\n return jsonify(user.to_dict()), 200", "def get_user_by_id(self, id):\n\t\treturn self.users.get(id)", "def user_by_name(username):\n user = User.query.filter(User.username == username).one_or_none()\n return user", "def _resolve_user(self, data: dict):\n user_email = data.get('eml')\n if not user_email:\n raise OBDControllerError('User email not found')\n\n user: User = self.db_session.query(User).filter(User.email == user_email).first()\n if not user:\n raise OBDControllerError('User does not exist')\n\n return user", "def _lookup_user(user):\n if re.search(r'\\@', user):\n return pam.User.query.filter_by(user_email=user).one()\n if re.search(r'^\\d+$', user):\n return pam.User.query.filter_by(biv_id=user).one()\n raise Exception('invalid user: {}, expecting biv_id or email'.format(user))", "def get_by_id(self, id):\n return self.session.query(User).filter_by(id=id).first()", "def get_object(self, username):\n try:\n return User.objects.get(username=username)\n except User.DoesNotExist:\n raise Http404" ]
[ "0.79601854", "0.7887714", "0.78640777", "0.77059114", "0.766751", "0.76567847", "0.7623235", "0.75812227", "0.7565286", "0.75438184", "0.7530661", "0.75195897", "0.75174814", "0.747526", "0.7461013", "0.74320114", "0.7411608", "0.74057275", "0.73754036", "0.7375327", "0.73730886", "0.7371545", "0.7352837", "0.7331281", "0.7321522", "0.7310303", "0.7299389", "0.7287031", "0.72757477", "0.72534627", "0.72265065", "0.7222176", "0.72213334", "0.72087437", "0.7198961", "0.71938545", "0.7184444", "0.71708345", "0.71674097", "0.7154163", "0.7154163", "0.7154163", "0.7154163", "0.7143724", "0.71338993", "0.71247745", "0.71196884", "0.71155727", "0.7113625", "0.711125", "0.7105013", "0.7094948", "0.7090585", "0.7087251", "0.70848924", "0.7084668", "0.7082136", "0.7080184", "0.70767283", "0.7069618", "0.7052234", "0.7052234", "0.7052234", "0.7052234", "0.70494413", "0.7045253", "0.70443535", "0.7042386", "0.70395446", "0.7034444", "0.70266515", "0.7016019", "0.70152664", "0.70134276", "0.70096904", "0.7002338", "0.6995998", "0.69904447", "0.69846326", "0.69833136", "0.6966457", "0.6964487", "0.6953354", "0.69492626", "0.6931993", "0.69241375", "0.6915924", "0.69127226", "0.69123197", "0.68941945", "0.6886417", "0.6879485", "0.68776935", "0.68769675", "0.6867803", "0.68651646", "0.6860973", "0.68509996", "0.68435746", "0.68420434" ]
0.731691
25
Return a generator of users
def users(self, instance, **query): instance = self._get_resource(_instance.Instance, instance) return self._list(_user.User, instance_id=instance.id, **query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_users():", "def get_users():\n\n return User.query.all() # [<User user_id=1 fname=Alice lname=Apple>]", "def get_users(self):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_all_users_query+\" ORDER BY $username_field$\",{'username_field':self.sql_username_field,'password_field':self.sql_password_field})\n self.log.debug(\"sqlflexibleauthstore: get_users: %s\" % (query,))\n\n cursor.execute(query)\n desc=[i[0] for i in cursor.description]\n for row in cursor:\n self.log.debug('sqlflexibleauthstore: get_users: row %s'%str(row))\n dictrow=dict(zip(desc,row))\n yield dictrow[self.sql_username_field]", "def get_users(self):\n users = []\n page = 1\n while not len(users) % 100:\n users += self._get('/users?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not users:\n break\n page += 1\n return users", "def get_users():\n return db.fetch_users()", "def _get_users(self, cursor, team):\n cursor.execute(\n 'SELECT \"user\" FROM %s WHERE team = %%s' % (\n self.user_repository.users2teams_table_name\n ),\n (team.id,)\n )\n\n for user_data in cursor.fetchall():\n yield self.user_repository.get(user_id=user_data['user'])", "def humans(self) -> Generator[discord.User, None, None]:\n for user in self.users:\n if not user.bot:\n yield user", "def iter_users(self, selector: Optional[Callable[[User], bool]]=None) -> Generator[User, None, None]:\n if selector is None:\n for user in self.all_users.values():\n yield user\n else:\n for user in self.all_users.values():\n if selector(user):\n yield user", "def user_batch():\n return [\n UserFactory(roles=RoleFactory.create_batch(randint(0, 3)))\n for _ in range(randint(3, 5))\n ]", "def get_users():\n users = functions.users()\n return users", "def getAllUsers(self):\r\n return [(ind, user) for ind, user in enumerate(self.users)]", "def generate_users(config: Config):\n users_by_id = {}\n users_by_alternative_id = {}\n for user_data in config.users:\n alternative_id = secrets.token_hex()\n user = User(user_data[\"user_id\"], user_data[\"password_hash\"], alternative_id)\n users_by_id[user.id] = user\n users_by_alternative_id[user.alternative_id] = user\n return users_by_id, users_by_alternative_id", "def listUsers(self):\n return tuple(User.create({'name':name},self._modelDataManager) for name in self.pm_getUserManager().listUsers())", "def get_all_users():\n return User.query.all()", "def all_users(self):\n return range(self.n_users)", "def get_users(self):\n return get_users(self['__store'].db, self)", "def bots(self) -> Generator[discord.User, None, None]:\n for user in self.users:\n if user.bot:\n yield user", "def get_users(self):\n fields = ['name', ]\n return self.get_data(\"myUsers\", fields)", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get_all_users():\n return Users.query.all()", "def users(self, site = None):\r\n uids = self.user_ids()\r\n if uids:\r\n users = Account._byID(uids, True, return_dict = False)\r\n return [self.ajax_user(u) for u in users]\r\n else:\r\n return ()", "def get_user_list():\n\tudb = UserPageDB()\n\ttry:\n\t\tusers = udb.user_list()\n\t\treturn UserList([_transform_user(u) for u in users])\n\tfinally:\n\t\tudb.close()", "def getResponsibleUsers():", "def db_users():\n return [\n {\"name\": \"Cathy\", \"email\": \"cathy@\", \"group\": \"guest\", \"password\": \"12345\"},\n {\"name\": \"Marry\", \"email\": \"marry@\", \"group\": \"guest\", \"password\": \"12345\"},\n {\"name\": \"John\", \"email\": \"john@\", \"group\": \"guest\", \"password\": \"12345\"},\n ]", "def get_users(self):\r\n\t\tlogger.debug(\"Fetch users\")\r\n\t\t\r\n\t\treturn login.get_users()", "def list_users(self, user=None):\n from expfactory.database.models import Participant\n\n participants = Participant.query.all()\n users = []\n for user in participants:\n users.append(self.print_user(user))\n return users", "def get(self):\n\n users = [marshal(user, user_fields) for user in models.ExerciseUser.select()]\n\n return users", "def user_request_iterator(batch_size):\n\n print('Establishing connection to search API (to collect users)')\n\n for letter in 'abcdefghijklmnopqrstuvwxyz0123456789':\n page = 1\n print('Fetching users with query \"%s\"' % letter)\n while True:\n url = 'http://api.are.na/v2/search/users/'\n payload = {'q':letter, 'page':page, 'per':batch_size}\n\n\n req = requests.get(url, params=payload)\n\n user_json = req.json()\n user_data = user_json['users']\n num_pages = user_json['total_pages']\n\n if req.status_code != 200 or len(user_data) == 0:\n break\n\n print('Writing user data to csv (page %i of %i)' % (page, num_pages))\n page += 1\n\n for user in user_data:\n yield user", "def with_user0(session):\n user = get_user0(session)\n yield user", "def get_user_list():\n response = []\n for user in mongo.db.users.find():\n user[\"_id\"] = str(user[\"_id\"])\n response.append(user)\n return response", "def get_users_for(self, email):\r\n # this is a list rather than a generator because we probably want to do a len() on it right away\r\n return [address.user for address in EmailAddress.objects.filter(verified=True, email=email)]", "def list_users(BrokerId=None, MaxResults=None, NextToken=None):\n pass", "def list_users(self):\n raise NotImplementedError", "def __generateUserIDs(self,_count):\n return map(lambda x:self.__getNewUserID(),range(_count))", "def get_users(how_many):\n users = []\n fake = Faker(['en_US', 'pt_BR'])\n docs = CPF().generate_list(n=how_many, mask=False, repeat=False)\n for i in range(how_many):\n name = re.sub(r'^(Dr|Ms|Sr|Sra)\\.\\s', '', fake.name()).strip()\n users.append({\n 'name': name,\n 'email': f'{name.lower().replace(\" \", \".\")}@{fake.free_email_domain()}',\n 'document': docs[i],\n })\n return users", "def users(self):\r\n return resource.Users(self)", "def get_users(self, query_args={}):\n endpoint = '/v3/educator/users'\n result = self.request(endpoint, query_args)\n\n users = []\n for data in result.response:\n user = User(data)\n users.append(user)\n\n return users", "def get_users():\n coll = data_access.get_user_collection()\n users = [User(**u) for u in coll.find()]\n return users", "def get_users(self, *args, **kwargs):\n\n users_data = api.get_users(\n *args,\n api_key=self.__creds.api_key_v2, \n **kwargs)\n return [en.User(creds=self.__creds, **user_data) for user_data in users_data]", "async def read_all_users(db_handler: DBHandler = Depends(database_dependency)):\n all_user_records = await db_handler.select_users()\n all_user_records = [init_BaseUser(record) for record in all_user_records]\n\n return all_user_records", "def users(db):\n users = [UserFactory(), UserFactory()]\n db.session.commit()\n return users", "def get_users_for(self, email):\n # this is a list rather than a generator because we probably want to\n # do a len() on it right away\n return [address.user for address in \\\n self.filter(verified=True, email=email)]", "def getInterestedUsers():", "def list_users(bookings):\n return[view_user(booking.user) for booking in bookings]", "def _get_users_list(self):\n return self.users['user_id'].tolist()", "def users():\n access_token = session['access_token']\n return \"%s\" % list_users(access_token)", "def users(self):\r\n return users.Users(self)", "def users(self):\r\n return users.Users(self)", "def users(self):\r\n return users.Users(self)", "def get_users(self):\n query = \"\"\"SELECT firstname,lastname,othernames,email,phonenumber,\\\n username,public_id,isadmin,isactive,registered\\\n from users ORDER BY registered ASC\"\"\"\n conn = self.db\n cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cursor.execute(query)\n rows = cursor.fetchall()\n return rows", "def reviewers(self):\n for reviewer in self.get_data(\"reviewers\"):\n yield User(None, reviewer, **self._new_session_args)\n\n return", "def get_all_users(db):\n return list(db['user'].find())", "def users(self):\r\n return resources.Users(self)", "def create_users(\n self, count=1, password=\"Please bypass hashing!\", activation=False\n ):\n users = []\n for index in range(1, count + 1):\n user = User(\n username=\"sagan{}\".format(index),\n email=\"carlsagan{}@nasa.gov\".format(index),\n password=password,\n registered_date=datetime(2000, 1, 1),\n last_login_date=datetime(2000, 1, 1),\n )\n if activation:\n user.activation = Activation()\n users.append(user)\n if hasattr(self, \"repo\"):\n self.repo.add(user)\n if count == 1:\n return users[0]\n else:\n return users", "def getUsers(self) -> List[bbUser.bbUser]:\n return list(self.users.values())", "def get_users(self, date_start, date_end, project='enwiki'):\n\n # @TODO MOVE DB REFS INTO QUERY MODULE\n\n params = {\n 'date_start': format_mediawiki_timestamp(date_start),\n 'date_end': format_mediawiki_timestamp(date_end),\n }\n conn = Connector(instance=settings.PROJECT_DB_MAP[project])\n query = sub_tokens(self.QUERY_TYPES[self._query_type],\n db=escape_var(project))\n conn._cur_.execute(query, params)\n\n for row in conn._cur_:\n yield row[0]", "def all_users():\n\n users = crud.get_users()\n\n return render_template('all_users.html', users=users)", "def get_users(self):\n return self.get_all_dbusers()", "def get_users():\n return Response(f\"{User.get_all_users()}\", 200, mimetype='text/plain')", "def _create_users(self):\r\n users = []\r\n for i in range(8):\r\n username = \"user{}\".format(i)\r\n email = \"test+user{}@edx.org\".format(i)\r\n user = User.objects.create_user(username, email, 'foo')\r\n user.is_active = True\r\n user.save()\r\n users.append(user)\r\n return users", "def get_all_users():\n return session.query(User).all()", "def password_generator(num_users=1000, password_length=20):\n\n password_list = []\n for ind in range(num_users):\n password = random.randint(0, 2 ** password_length - 1)\n password_list.append(password)\n return password_list", "def get_user_list():\n users_tuple = db_session.query(Chat.chatID).all()\n users_list = [user for user, in users_tuple]\n return users_list", "def get_all_users(self):\n \n sql = \"select * from users\"\n return self._query_all(sql)", "def user_list(self):\n self.cur.execute(\"SELECT username FROM users\")\n users = []\n for username in self.cur.fetchall():\n users.append(username[0])\n return users", "def get_user_ids():\n TOTAL_USERS = 50\n return list(numpy.random.choice(\n TOTAL_USERS, random.randint(1, TOTAL_USERS), replace=False\n ))", "def build_users_list():\n\n # Cannot query in cronjob (only use case for this func) without app running.\n # Must build separate connection to read file\n # con = sqlite3.connect(f\"{cur_wd}/bigbeta/site.db\")\n # cur = con.cursor()\n # users_list = [u for u in cur.execute(\"SELECT email FROM user;\")]\n\n # Get app context\n\n with bigbeta_app.app_context():\n users = User.query.all()\n user_emails = [user.email for user in users]\n\n return user_emails", "def users(self):\r\n return Users(self)", "def get_users(self):\n res = self.conn.cursor().execute('SELECT id,email,username FROM users')\n return res.fetchall()", "def get_users(self):\n # remove some user media fields that we can't submit back\n def clean_media(entry):\n entry.pop(\"mediaid\", None)\n entry.pop(\"userid\", None)\n entry.pop(\"description\", None)\n return entry\n zabbix_users = self.conn.user.get(selectMedias=\"extend\", selectUsrgrps=\"extend\")\n zabbix_users = {user[\"alias\"].lower(): User(\n id=user[\"userid\"],\n name=user[\"name\"],\n surname=user[\"surname\"],\n alias=user[\"alias\"],\n groups=set(g[\"usrgrpid\"] for g in user[\"usrgrps\"]),\n media=[clean_media(entry) for entry in user[\"medias\"]],\n ) for user in zabbix_users}\n return zabbix_users", "def users(self):\n return users.Users(self)", "def _create_and_enroll_users(self, count):\n users = []\n for _ in range(count):\n user = UserFactory()\n CourseEnrollmentFactory.create(user=user, course_id=self.course.id)\n users.append(user)\n return users", "def __iter__(\n self,\n ) -> Generator[dict[str, str | int | bool | list[dict[str, str]]], None, None]:\n url = API_PATH[\"user_flair\"].format(subreddit=self.subreddit)\n params = {\"unique\": self.subreddit._reddit._next_unique}\n yield from self.subreddit._reddit.get(url, params=params)", "def get_all_users(self):\n query = \"SELECT * FROM users\"\n self.cursor.execute(query)\n result = self.cursor.fetchall()\n return result", "def users(self) -> List[str]:\n return self._users", "def users(self):\n from sagas.ofbiz.entities import OfEntity as e, oc\n rs=e().allUserLogin()\n for r in rs:\n print(r['userLoginId'])", "def get_users(self):\n return self.execute(TABELLE['users']['select']['all'])", "def get_user(number):\n for i in range(number):\n person = Person(\"ru\")\n user = HabrUser(\n username=person.username(template=\"U_d\"),\n email=person.email(domains=(\"yandex.ru\", \"gmail.com\")),\n password=person.password(),\n )\n user.save()", "def user_list():\n users = User.objects.all()\n return {\"users\": users}", "def get_users(self) -> List[Dict[str, Any]]:\n users = self.user_manager.get_users()\n return [\n {\n 'user_id': user.user_id,\n 'username': user.username,\n 'created_at': user.created_at.isoformat(),\n }\n for user in users\n ]", "def get_all_users():\n db = api.db.get_conn()\n return list(db.users.find({}, {\"_id\": 0, \"password_hash\": 0}))", "def generate_users(count=10):\n for i in range(count):\n user = generate_random_user()\n db.session.add(user)\n db.session.commit()", "def all_users(self):\n\n cur = self.db.cursor()\n cur.execute(\n \"\"\"SELECT user_id, username, phone, email, role, date_created \n FROM users\"\"\")\n \n user_from_db = cur.fetchall()\n if cur.rowcount >= 1: \n resp = self.serialize_user(user_from_db) \n return resp\n return None", "def list_user():\n\tbegin = 0\n\tlength = 25\n\ttry:\n\t\tif request.json != None:\n\t\t\tbegin = int(request.json.get('begin', 0))\n\t\t\tlength = int(request.json.get('length', 25))\n\texcept:\n\t\tabort(403)\n\tif length > 100 :\n\t\tlength = 100\n\tuserList = User.list(begin, length)\n\tif userList == None:\n\t\tabort(400)\n\treturn jsonify({'users': map(lambda(e): e.output(), userList), 'begin': begin, 'length': len(userList)})", "def users_get(): # noqa: E501\n base.check_session()\n ret = []\n for u in users.values():\n ret.append(_cleanuser(u))\n return ret", "def get_users():\n cache_key = 'GRAHAM_API_CACHED_USERS'\n cached = rd.get(cache_key)\n if cached is not None:\n return jsonify(json.loads(cached.decode('utf-8')))\n ret = []\n for user in User.select():\n ret.append({\n 'discord_id':user.user_id,\n 'user_name':user.user_name,\n 'created_ts': format_js_iso(user.created),\n 'address':user.wallet_address\n })\n rd.set(cache_key, json.dumps(ret), ex=600)\n return jsonify(ret)", "async def list_users(self) -> List[int]:\n return [\n # row[0]\n # async for row in self.conn.execute(\n # \"select userid from tg_users\",\n # )\n ]", "def get_users(self) -> List[User]:\n with self.client.create_session() as session:\n users = session.query(RDSUser).filter(RDSUser.is_active.is_(True)).all()\n\n return [self._build_user_from_record(user_record=user) for user in users]", "def users_view():\n data = get_data()\n return [{'user_id': i, 'name': 'User {0}'.format(str(i))}\n for i in data.keys()]", "def users():\n retlist = []\n rawlist = cext.users()\n for item in rawlist:\n user, hostname, tstamp = item\n user = py2_strencode(user)\n nt = _common.suser(user, None, hostname, tstamp, None)\n retlist.append(nt)\n return retlist", "def load_users(filename):\n with open(filename, 'rb') as f:\n for line in f:\n yield line.split(':', 1)[0]", "def __iter__(self):\n for itm in self._user_data:\n yield itm", "def query(self, *args, **kwargs) -> List[str]:\r\n self.logger.info(\"Returning Manual Users\")\r\n\r\n return kwargs['users']", "def users(self):\n return self.get_data(\"users\")", "def get_users_list_full(self, session):\n\n users = session.query(\n User.chat_id,\n User.is_banned,\n User.username,\n User.first_name,\n User.last_name,\n User.time_registered\n ).filter(User.is_admin==False).all()\n return users", "def getUsers(client, req):\n client.sendTarget(req[\"id\"], key=\"get.users\", payload={\"payload\": magic.users})", "def all_users():\n\treturn [unicode(name[:-4]).lower() for name in os.listdir(os.path.join(WORLD_DIR, 'players'))]", "def users(bot, event, *args):\n yield from command.run(bot, event, *[\"convusers\", \"id:\" + event.conv_id])" ]
[ "0.7804074", "0.7264213", "0.724916", "0.72419035", "0.7206268", "0.71832883", "0.714184", "0.70211744", "0.6948952", "0.6935378", "0.69063926", "0.6886542", "0.6870021", "0.68204033", "0.6802641", "0.67643344", "0.6726554", "0.6724001", "0.6705792", "0.6705792", "0.6705792", "0.6705792", "0.6682153", "0.66286653", "0.66279644", "0.661073", "0.66059554", "0.6588465", "0.6581918", "0.6573852", "0.65429854", "0.6528288", "0.6525133", "0.65197664", "0.6514037", "0.6513945", "0.6502234", "0.64937097", "0.64847136", "0.6470234", "0.64664936", "0.64654714", "0.64551735", "0.64235723", "0.6417492", "0.6410832", "0.64050734", "0.6400735", "0.6396738", "0.6395938", "0.6395938", "0.6395938", "0.6391511", "0.63810104", "0.6368435", "0.6362837", "0.6356438", "0.63508224", "0.6349371", "0.63483876", "0.6342856", "0.63415724", "0.6341228", "0.63407314", "0.6339156", "0.6323943", "0.6319511", "0.631302", "0.63122433", "0.63106203", "0.6308928", "0.62991816", "0.6296397", "0.62709826", "0.62681067", "0.6264666", "0.62604225", "0.62548804", "0.624676", "0.62415206", "0.6238429", "0.6225131", "0.6223884", "0.62237066", "0.622169", "0.62181824", "0.62165827", "0.6209439", "0.62084174", "0.62034106", "0.6198581", "0.61982757", "0.6197171", "0.61946696", "0.6182024", "0.6181863", "0.61808944", "0.6172098", "0.61718214", "0.6171301", "0.6167236" ]
0.0
-1
Get a single user
def get_user(self, user, instance=None): instance = self._get_resource(_instance.Instance, instance) return self._get(_user.User, user)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n\n return user", "def get(self, no):\n user = get_a_user(no)\n if not user:\n api.abort(404)\n else:\n return user", "def get_user(self, user_id):\n uri = 'users/' + user_id\n return self.make_request(uri)", "def get(self, id):\n\t\ttry:\n\t\t\tflask_app.logger.debug('We are getting the user: %d', id)\n\t\t\treturn user_service.get(id)\n\t\texcept AssertionError as e:\n\t\t\tuser_space.abort(400, e.args[0], status = \"Could not get user\", statusCode = \"400\")\n\t\texcept Exception as e:\n\t\t\tuser_space.abort(500, e.args[0], status = \"Could not get user\", statusCode = \"500\")", "def get_user(user_id=None):\n users = storage.all('User')\n user = users.get('User' + \".\" + user_id)\n if user is None:\n abort(404)\n else:\n return jsonify(user.to_dict()), 200", "def get_user(user_id):\n try:\n return UserModel.objects.get(id=user_id)\n except UserModel.DoesNotExist:\n return None", "def get(id):\n return User.query.filter_by(id=id).first()", "def get(self, user_id):\n return User.get(user_id)", "def get_user(cls, user_id):\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def get_user(self, user_id):\n User = get_user_model()\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def get_user(id):\n pass", "async def get(cls, user_id):\n try:\n user = await db.one(cls.SELECT_USER, user_id=user_id)\n except exceptions.NoResultFound:\n LOGGER.error(\"Could not find user=%s.\", user_id)\n raise DatabaseError\n except SQLAlchemyError as err:\n LOGGER.error(\"Failed to fetch user=%s. Error: %s\", user_id, err)\n raise DatabaseError\n\n return user", "def get_user_by_id(user_id):\n return User.query.get(user_id)", "def get_user(self, user_id):\n try:\n User = get_user_model()\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def get_one_user():", "def get_user(pk):\n user = UserService(user=pk).get_user_by_id()\n return CustomResponse(data=user).response()", "def get_user_by_id(self, user_id):\n query = \"SELECT * FROM users WHERE user_id = %s\"\n self.cursor.execute(query,[user_id])\n result = self.cursor.fetchone()\n return result", "def get_user_from_id(user_id):\n return Users.query.filter_by(id=user_id).first()", "def get(self, user_id):\n user = UserServices(public_id=user_id).get_an_item()\n if not user:\n api.abort(404)\n else:\n return user", "def get_user(id=None, name=None):\n found_id = get_user_id(id, name)\n if not found_id:\n return\n response = utils.checked_api_call(users_api, 'get_specific', id=found_id)\n if response:\n return response.content", "def get_user(self, user_id):\n return self.my_get_user(self.get_all_dbusers(), user_id)", "def get_user(self, user_id):\n return UserModel._default_manager.get(pk=user_id)", "def getUser(self, resource):\n if isinstance(resource, int):\n resource = 'users/{0}'.format(resource)\n\n res = self.getRequest(resource)\n\n if res:\n user = vsdModels.User(**res)\n return user\n else:\n return None", "def get_user():\n try:\n userId = request.args.get('login_as')\n return users[int(userId)]\n except Exception:\n return None", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user(user_id):\n user = storage.get(User, user_id)\n if user is None:\n abort(404)\n else:\n return jsonify(user.to_dict())", "def get_user(self, user_id):\n oauth_user = OAuthioUser.objects.filter(user__id=user_id)\n if oauth_user.exists():\n return oauth_user.get().user", "def get_user(self, user_id):\n _email = self._email_for_user_id(user_id)\n response = self._get('/users?{0}'.format(urllib.urlencode({'search': _email})))\n for _user in response:\n if _user['email'] == _email:\n return _user\n return None", "def get(cls,id):\n result = execute_query(\"\"\"SELECT * FROM Users Where username = ?\n \"\"\",\n [id])\n try:\n user = User(id,result[0][1])\n except Exception as e:\n return None\n \n return user", "def get_user(self, user_id=None):\n raise NotImplementedError", "def get_user(username):\n return Users.query.filter_by(username=username).first()", "def get_user(self):\n if \"user\" not in self._data:\n self._data[\"user\"] = User.objects.get(pk=self.kwargs[\"user_id\"])\n return self._data[\"user\"]", "def get_user(user_id):\n user = storage.get(\"User\", user_id)\n if user is None:\n abort(404)\n return user.to_dict()", "def get_user(user_id):\n usr = storage.get(User, user_id)\n if usr:\n return jsonify(usr.to_dict())\n else:\n abort(404)", "def get_user(self):\n\n user_session = self.get()\n if not user_session:\n return None\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n return us.single(user_session.login)", "def get_user_by_id(user_id):\n\n return User.query.get(user_id)", "def get_user(user_id):\n user = models.storage.get('User', user_id)\n if user is None:\n abort(404)\n return jsonify(user.to_dict())", "def get_user(user_id=None):\n\n user = storage.get(\"User\", user_id)\n if user is None:\n abort(404)\n return jsonify(user.to_dict())", "def get_user(id):\n user = User.query.get(id)\n return user_schema.jsonify(user)", "def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None", "def getuser(gh, user):\n return gh.users(user).get()", "def get_user(self, object_id):\n return self.get_object(\"user\", object_id)", "def get_user(self):\n try:\n return User.objects.get(id=self.user_id)\n except User.DoesNotExist:\n return AnonymousProfile()", "def get_user_by_id(self, id):\n\t\treturn self.users.get(id)", "def get_user(id):\n if (g.user.id == id):\n return jsonify(g.user.serialize)\n else:\n abort(400)", "def get_single_user(self, id):\n for user in self.users:\n if user['id'] == id:\n return user", "async def get_user(id: int):\n with Session(engine) as session:\n # TODO return the user based on the ID (and an error if not)\n statement = select(User).where(User.id == id)\n user = session.exec(statement).first()\n if user == None:\n raise HTTPException(status_code=404, detail=\"User ID not found\")\n return {\"user\": user}", "def get_user(username):\n if custom_user_model:\n return get_user_model().objects.get_by_natural_key(username)\n else:\n return get_user_model().objects.get(username=username)", "def getUser(self, id : int) -> bbUser.bbUser:\n id = self.validateID(id)\n return self.users[id]", "def get_user(self, u_id: int) -> Optional[Users]:\n try:\n user = self.session.query(Users).get(u_id)\n\n return user\n except Exception as excpt:\n self.session.rollback()\n print(f'Could not get user: {excpt}')\n\n return None", "def get_user(id, **options):\n\n return security_services.get_user(id, **options)", "def get_user(self, username):\n\t\treturn self.users.get(username, None)", "def user(email):\r\n return User.objects.get(email=email)", "def user_by_id(user_id):\n user = User.query.filter(User.id == user_id).one_or_none()\n return user", "def get_user_by_id(cls, userid):\n\n user = User.query.filter_by(user_id=userid).one()\n\n return user", "def get_user(user: User) -> User:\n if user.is_authenticated:\n return user\n else:\n return get_anonymous_user()", "def get_user(self, username):\n return self.s.query(User).filter(User.username == username).first()", "def get_user(self, *, params: Optional[dict] = None) -> \"resource_types.User\":\n\n return communicator.User(self.__requester).fetch(parameters=params)", "async def get_user_byid(request):\n user_id = request.match_info[\"user_id\"]\n try:\n user_id = int(user_id)\n except (ValueError, TypeError):\n return web.Response(text=\"Incorrect value for user_id\", status=400)\n\n currentuser = (\n request.cirrina.db_session.query(User)\n .filter(User.username == request.cirrina.web_session[\"username\"])\n .first()\n )\n\n if user_id == -1 or not currentuser.is_admin:\n user = currentuser\n else:\n user = request.cirrina.db_session.query(User).filter_by(id=user_id).first()\n\n if not user:\n return web.Response(status=404, text=\"User not found\")\n\n data = {\"username\": user.username, \"user_id\": user.id, \"is_admin\": user.is_admin}\n return web.json_response(data)", "def get_user(self, user_id: int) -> dict:\n user = self.call_method('getUser', user_id=user_id)\n return user", "def get_user(self, user_id):\n try:\n return Account.objects.get(pk=user_id)\n except Account.DoesNotExist:\n return None", "def get_user(self):\n\n r = requests.get(\n self._url('/usermanagement/userinfo'),\n headers={'Authorization': self.token},\n proxies=self.proxy)\n r.raise_for_status()\n user = r.json()\n log.info('user {:s} currently logged in'.format(user['login']))\n\n return user", "def get(user_id=None, username=None, email=None, api_key=None):\r\n user_query = User.query\r\n\r\n if username is not None:\r\n return user_query.filter(User.username == username).first()\r\n\r\n if user_id is not None:\r\n return user_query.filter(User.id == user_id).first()\r\n\r\n if email is not None:\r\n return user_query.filter(User.email == email).first()\r\n\r\n if api_key is not None:\r\n return user_query.filter(User.api_key == api_key).first()\r\n\r\n return None", "def get_by_id(user_id: int) -> Optional[User]:\n user = User.query.filter_by(id=user_id).first()\n return user", "def get_user(request: Request) -> User:\n\n return _check_and_extract_user(request)", "def load_user(user_id):\n return models.UserModel.query.get(int(user_id))", "def get_user(self, id: utils.Intable) -> User | None:\n id64 = make_id64(id=id, type=Type.Individual)\n return self._connection.get_user(id64)", "def get_user(api, user_name):\r\n # Checking input:\r\n if not isinstance(user_name, str) and not isinstance(user_name, int):\r\n raise ValueError(\"You can only get user by his/her id (int) or name (str).\")\r\n\r\n # Main part:\r\n try:\r\n user = api.get_user(user_name)\r\n return user\r\n except tweepy.error.TweepError:\r\n raise UserNotFoundError(\"No Twitter user with such name/id exists.\")", "def user_get_by_id(user_id):\n obj = storage.get(\"User\", user_id)\n if obj is None:\n abort(404)\n else:\n return jsonify(obj.to_dict())", "async def get_user(**kwargs: Any) -> UserModel:\n user = await UserModel.objects.get_or_none(**kwargs)\n\n if not user:\n raise NotFoundError(\"User not found.\")\n\n return user", "async def get_user(id: int):\n service = SearchService()\n return await service.get_user(id)", "def get_user_by_id(id):\n user = session.query(User).get(id)\n return user", "def get_user(self, instance, name):\n return instance.get_user(name)", "def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None", "async def get_user(self, name=None, id=None) -> User:\n if name:\n return await self.get_user_by_username(name)\n if id:\n return await self.get_user_by_id(id)\n return None", "def get_single_user(user_id):\n\n # default error\n response_object = {\n 'status': 'fail',\n 'message': 'User does not exist'\n }\n\n # get user by query\n try:\n user = User.query.filter_by(id=int(user_id)).first()\n # user doesn't exist\n if not user:\n return jsonify(response_object), 404\n else:\n response_object = {\n 'status': 'success',\n 'data': {\n 'username': user.username,\n 'email': user.email,\n 'created_at': user.created_at\n }\n }\n return jsonify(response_object), 200\n\n # invalid id\n except ValueError:\n return jsonify(response_object), 404", "def get_user(self, user_id):\n return None # noqa: WPS324", "def get_user(self, user_name):\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n resource_type = 'user'\n org_filter = None\n if self.client.is_sysadmin():\n resource_type = 'adminUser'\n org_filter = 'org==%s' % self.resource.get('href')\n query = self.client.get_typed_query(\n resource_type,\n query_result_format=QueryResultFormat.REFERENCES,\n equality_filter=('name', user_name),\n qfilter=org_filter)\n records = list(query.execute())\n if len(records) == 0:\n raise Exception('user not found')\n elif len(records) > 1:\n raise Exception('multiple users found')\n return self.client.get_resource(records[0].get('href'))", "def get(self, public_id):\n user = get_a_user(public_id)\n if not user:\n api.abort(404)\n else:\n return user", "def get(self, public_id):\n user = get_a_user(public_id)\n if not user:\n api.abort(404)\n else:\n return user", "def get_single_user(username):\n user = mongo.db.users.find_one({\"username\": username})\n user[\"_id\"] = str(user[\"_id\"])\n return user", "def user_by_id(self, user_id):\n\n cur = self.db.cursor()\n cur.execute(\n \"\"\"SELECT user_id, username, password, phone, email, role\n FROM users WHERE user_id = %s\"\"\", (user_id, ))\n \n user_from_db = cur.fetchone()\n if cur.rowcount == 1: \n user_id, username, password, phone, email, role = user_from_db\n resp = dict(user_id=user_id, username=username, password=password, phone=phone, email=email, role=role)\n \n return resp\n return None", "def get_user(user_id):\n netAdminToolDB = app.config['DATABASE']\n user = netAdminToolDB.get_user(user_id)\n\n if user == None:\n return jsonify({'error': 'User_id not found'}), 404\n\n uri = url_for('get_user', user_id=user.id, _external=True)\n return jsonify({'user':{\n 'id': user.id,\n 'uri': uri,\n 'username': user.username,\n 'display_name': user.display_name,\n 'role': user.role_name\n }\n })", "def get_user(self, user_id):\n\n i = self.gdb.nodes.indexes.get('users')\n if str(user_id).isalnum(): # numerical ID\n results = i.get('user_id', user_id) # always iterable\n else:\n results = i.get('screen_name', user_id) # always iterable\n\n if len(results) == 1:\n log.info('Found existing users, ID %s' % user_id)\n return results[0]\n else:\n log.info('No user in graph with ID %s' % user_id)\n return None", "def get(self, username):\n return User.find_by_username_or_email(username)", "def get(self, user_id):\n res = self._user.get_single_user(user_id)\n\n if res:\n return {\n \"status\": 200,\n \"data\": [res]\n }, 200\n else:\n return {\n \"status\": 404,\n \"error\": \"user with id {} \"\n \"was not found \".format(user_id)\n }, 404", "def get(self):\r\n return get_user(request)", "def get_user(self):\n return self.get('users/self')", "def get_a_user(public_id):\n return User.query.filter_by(public_id=public_id).first()", "def load_user(user_id):\n return app.user_models.query.get(int(user_id))", "def get_user(email, queryset=None):\n if queryset is None:\n queryset = User.objects\n return queryset.get(username=_email_to_username(email))", "def fusion_api_get_user(self, uri=None, param='', api=None, headers=None):\n return self.user.get(uri=uri, api=api, headers=headers, param=param)", "def me_get(): # noqa: E501\n s = base.check_session()\n return _cleanuser(s['user'])", "def get(user_id: int) -> User:\n try:\n user = User.objects.get(id=user_id)\n except User.DoesNotExist:\n logger.error(\n 'Getter(user_id = %d) in BaseUser throws User.DoesNotExist exception.' %\n user_id)\n raise NonUserException\n return user", "def load_user(uid):\n return User.query.get(uid)", "def get_single_user(user_id):\n response_object = {\n 'status': 'fail',\n 'message': 'User does not exist.'\n }\n try:\n user = User.query.filter_by(id=user_id).first()\n if not user:\n return jsonify(response_object), 404\n else:\n response_object = {\n 'status': 'success',\n 'data': {\n 'username': user.username,\n 'email': user.email\n }\n }\n return jsonify(response_object), 200\n except ValueError:\n return jsonify(response_object), 404", "def GetAppEngineUser(user_id):\n email_address = GetEmailAddress(user_id)\n if email_address:\n return users.User(email_address)\n else:\n return None" ]
[ "0.8360361", "0.83258814", "0.8310465", "0.8215789", "0.8177496", "0.8172062", "0.8166535", "0.8159785", "0.81516296", "0.81001353", "0.80638343", "0.8059556", "0.8051115", "0.8048663", "0.8047433", "0.8014368", "0.80115867", "0.79899853", "0.7974474", "0.79706466", "0.7956602", "0.7950774", "0.79487514", "0.794151", "0.7922907", "0.7922907", "0.7922907", "0.7922907", "0.7915468", "0.791464", "0.79022425", "0.7892923", "0.78856957", "0.78810215", "0.78777075", "0.7845098", "0.78345203", "0.7832152", "0.7831313", "0.7812867", "0.780608", "0.7801212", "0.7799712", "0.7791573", "0.7789192", "0.77546203", "0.7748518", "0.7739268", "0.7729434", "0.77119", "0.76944494", "0.7687817", "0.76771975", "0.767193", "0.7660326", "0.76590216", "0.7652885", "0.7643078", "0.76430047", "0.76427907", "0.7640553", "0.7632371", "0.7621481", "0.7618369", "0.7617324", "0.76097643", "0.7609231", "0.7601939", "0.75970787", "0.7567003", "0.75667804", "0.7562488", "0.7561787", "0.75586855", "0.75489354", "0.7548523", "0.75471187", "0.7540427", "0.7537401", "0.75372696", "0.7533985", "0.7532812", "0.7529019", "0.7525885", "0.75253284", "0.7523374", "0.7520383", "0.75109327", "0.75029737", "0.7499316", "0.74915624", "0.74884766", "0.7480773", "0.74794406", "0.74772507", "0.74757326", "0.7473651", "0.7456653", "0.74556315", "0.74469656" ]
0.75470227
77
Save the post data when creating a new bucketlist.
def perform_create(self, serializer): serializer.save()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self, user):\n # parse request data\n bucketlist_name = self.request.form['name']\n\n # validate bucketlist\n if not bucketlist_name:\n return \"Name cannot be empty\", 401\n\n # create bucketlist and save bucketlist\n bucketlist = Bucketlist(name=bucketlist_name, date_created=datetime.utcnow(\n ), created_by=user.username, author=user)\n bucketlist.save()\n\n return \"Successfully created bucketlist\", 201", "def post_bucketlist():\n pass", "def test_create_bucketlist_item(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n response = self.client.get(\n \"/bucketlists/1\", headers={\n \"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n result = json.loads(resp_item.data)\n self.assertEqual(result[\"message\"],\n \"Bucket list item added successfully.\")\n self.assertEqual(resp.status_code, 201)", "def create_bucketlist(self, title, intro):\n bucketlist_ = Bucketlist(owner_id=self._id,\n title=title,\n intro=intro,\n owner=self.username)\n bucketlist_.save_to_bucketlists()", "def test_bucketlist_create(self):\n res = self.client().post('/bucketlist', data=self.bucketlist)\n self.assertEqual(res.status_code, 201)\n self.assertIn('Go to vacation', str(res.data))", "def put_bucketlist_item(self, email, password, bucketlist_id, item_id, data):\r\n headers = self.authentication_headers(email=email, password=password)\r\n return self.client.put(\r\n '/api/v1/bucketlist/{}/items/{}'.format(bucketlist_id, item_id),\r\n content_type=\"application/json\",\r\n data=json.dumps(data),\r\n headers=headers,\r\n follow_redirects=True\r\n )", "def test_bucketlist_creation(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n self.assertIn('Climb the Himalayas', str(post_data.data))", "def add_bucketlist_item(self, email, password, buckelist_id, item_name):\r\n test_date = str(date(2020, 9, 22))\r\n headers = self.authentication_headers(email=email, password=password)\r\n return self.client.post(\r\n '/api/v1/bucketlist/{}/items/'.format(buckelist_id),\r\n data=json.dumps({\"name\": item_name, \"finished_by\": test_date}),\r\n content_type=\"application/json\",\r\n headers=headers,\r\n follow_redirects=True\r\n )", "def post(self, request, *args, **kwargs):\n if request.user.role != '1':\n return Response({\"error\":\"Only managers can create new checklists\"}, status=status.HTTP_403_FORBIDDEN)\n serializer = ChecklistPostSerializer(data=request.DATA)\n #if serializer.is_valid():\n print serializer.data;\n checklist = Checklist.objects.create(\n title=serializer.init_data[\"title\"],\n description=serializer.init_data[\"description\"],\n json_contents=serializer.init_data[\"json_contents\"],\n template=ChecklistTemplate.objects.get(id=1),\n assignee=Employee.objects.get(id=serializer.init_data[\"assignee\"]),\n assigner=request.user,\n address=serializer.init_data[\"address\"],\n district=serializer.init_data[\"district\"],\n date=datetime.date.today()\n )\n checklist.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n #else:\n # return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def save_to_db(self):\n db = DBConnection()\n db.cur.execute(\n \"\"\"\n INSERT INTO lists(user_id, heading, display_order, created_at, updated_at) VALUES(%s, %s, %s, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) RETURNING row_to_json(lists)\n \"\"\", (self.user_id, self.heading, self.display_order)\n )\n db.con.commit()\n new_list = db.cur.fetchone()[0]\n self.id = new_list[\"id\"]\n db.close()", "def create_object(self,object_data_list):\n for object_data in object_data_list:\n if self.valid_object(object_data):\n self.populate_names(object_data.get(\"planId\"))\n\n if self.unique_name(name=object_data.get(\"name\"),plan_id=object_data.get(\"planId\")):\n self.post_object(object_data)\n else:\n new_name = self.try_create_uniqe_name(object_data.get(\"name\"),object_data.get(\"planId\"))\n if new_name:\n object_data[\"name\"]= new_name\n self.post_object(object_data)\n else:\n logging.error(f'no unique name for bucket')\n self.append_response(f'no unique name for bucket')\n return self.response", "def POST_list(self):\n\t\trv = self.POST_data('/api/cleaner/{0}/list'.format(self.cleaner['_id']), data=TEST_LIST_DATA)\n\t\tself.list_id = json.loads(rv.data)['_id']", "def test_POST_list(self):\n\t\t# cleaner's lists should originally be empty\n\t\tdata = self.GET_data('/api/cleaner/' + self.cleaner['_id'])\n\t\tself.assertEqual([], data['lists'])\n\n\t\t# after posting list, cleaner's lists should contain just id of posted list\n\t\tself.POST_list()\n\t\tdata = self.GET_data('/api/cleaner/' + self.cleaner['_id'])\n\t\tself.assertEqual(1, len(data['lists']))\n\t\tself.assertEqual(self.list_id, data['lists'][0])", "def test_put_bucketlist_item(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"[email protected]\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertNotEqual(item.name, \"bucketlist item name\")\r\n self.assertFalse(item.completed)\r\n\r\n response = self.put_bucketlist_item(email, _pword, bucketlist.id, 1, data)\r\n result = json.loads(response.data.decode('utf-8'))\r\n item2 = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(item2.name, \"bucketlist item name\")\r\n self.assertTrue(item2.completed)", "def save_comment(data):\n data['comment_id'] = len(commentslist) + 1\n data['message'] =\"message\"\n data['author'] = \"author\"\n data['date_created'] = datetime.datetime.now()\n # save to list\n commentslist.append(data)", "def test_add_bucketlist_items(self):\r\n email = \"[email protected]\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(email, _pword, bucketlist.id, \"bucketlist item name\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '201 CREATED')\r\n self.assertEqual(result['message'], 'Bucket list item added')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertLess(item_no, new_item_no)", "def test_edit_bucketlist(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_put_method = self.client().put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data={\n \"name\": \"The seasons will be, summer winter and autumn\"\n })\n self.assertEqual(result_of_put_method.status_code, 201)\n result_of_get_method = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertIn('The seasons will b', str(result_of_get_method.data))", "def post_list(self, request, **kwargs):\n deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))\n\n # Force this in an ugly way, at least should do \"reverse\"\n deserialized[\"user\"] = \"/api/v1/user/%s/\" % request.user.id\n bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized))\n self.is_valid(bundle, request)\n updated_bundle = self.obj_create(bundle, request=request)\n return HttpCreated(location=self.get_resource_uri(updated_bundle))", "def post(self):\n data = request.json\n return save_new_post(data=data)", "def test_duplicate_bucketlist_item(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item2 = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n result = json.loads(resp_item2.data)\n self.assertEqual(result[\"message\"], \"Item with the given name exists.\")\n self.assertEqual(resp_item2.status_code, 409)", "def post_access_control_list_create(self, resource_dict):\n pass", "def post(self):\n data = request.json\n create_entry(data)\n return None, 201", "def create_item(_id, item_name, description):\n data_ = Data.get_the_data(_id, Data.bucketlists)\n for data in data_:\n bucketlist = Bucketlist(data['title'],\n data['owner'],\n data['intro'],\n data['owner_id'],\n data['_id'])\n bucketlist.new_item(item_name=item_name,\n description=description)", "def save_comment(data):\n data['comment_id'] = len(commentslist) + 1\n data['date_created'] = datetime.datetime.now()\n # save to list\n commentslist.append(data)", "def save(self):\n\t\tprint('bSlabList.save() not implemented')\n\n\t\t# headers are keys of xxxx\n\n\t\t# each element in xxx is a comma seperated row", "def test_create_bucket_list_return(self):\n bucket = BucketList(\"\", \"\")\n bucket = bucket.create_bucket_list(\"Name\", \"Completed\")\n self.assertIsInstance(bucket, BucketList)", "def test_api_edit_bucketlist(self):\n res_post = self.client().post('/bucketlist', data={'name': 'Wake up, Eat, Code, Sleep & Repeat'})\n self.assertEqual(res_post.status_code, 201)\n res_post_in_json = json.loads(res_post.data.decode('UTF-8').replace(\"'\", \"\\\"\"))\n id = res_post_in_json['id']\n res_put = self.client().put(\n f'bucketlist/{id}',\n data={\n 'name': \"Don't forget to exercise\"\n }\n )\n self.assertEqual(res_put.status_code, 200)\n res = self.client().get(f'/bucketlist/{id}')\n self.assertIn(\"exercise\", str(res.data))", "def put(self, user, id):\n # parse request data\n if 'name' not in self.request.form:\n return \"Bucketlist not Update\", 202\n\n bucketlist_name = self.request.form['name']\n\n # validate bucketlist\n if not bucketlist_name:\n return \"Name cannot be empty\", 401\n\n # search for the bucketlist_id\n bucketlist = Bucketlist.query.filter_by(\n id=id, created_by=user.email).first()\n\n # return 400 if bucketlist non exixtant or not belongs to this user\n if bucketlist is None:\n return 'Bucketlist not found', 202\n\n # Update bucketlist and save changes\n bucketlist.name = bucketlist_name\n bucketlist.save()\n\n return \"Successfully updated bucketlist\", 201", "def commit(self,form_list):\n raise NotImplementedError", "def post(self):\n blob_key = self.request.get(\"blobkey\")\n\n database_creation.run(blob_key)", "def post(self):\n parent_key = ndb.Key(Boat, \"parent_boat\")\n boat_data = json.loads(self.request.body)\n new_boat = Boat(id=None, name=boat_data['name'], type=boat_data['type'],\n length=boat_data['length'], at_sea=True, parent=parent_key)\n new_boat.put()\n new_boat.id = '/Boat/' + new_boat.key.urlsafe()\n new_boat.put()\n boat_dict = new_boat.to_dict()\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dict))", "def test_create_bucket_list_name(self):\n bucket = BucketList(\"\", \"\")\n bucket = bucket.create_bucket_list(\"\")\n self.assertEqual(bucket, \"Please provide a name for your bucket list\", )", "def bulkSave(self, objList: List[PermissionContext], tokenData: TokenData):", "def update_bucketlist():\n pass", "def test_get_bucketlist_items(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n\n self.assertEqual(resp_item.status_code, 200)\n resp_item = self.client.get('/bucketlistitems/1/items', headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp_item.status_code, 200)", "def post_list(self, object_list, bundle):\n\t\tself.method_check(request, allowed=['post'])\n\t\tdata = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))\n\n\t\ttry:\n\t\t\tuser = AppUser.objects.create_user(\n\t\t\t\tdata.get(\"username\"),\n\t\t\t\tdata.get(\"email\"),\n\t\t\t\tdata.get(\"password\")\n\t\t\t)\n\t\t\tuser.save()\n\t\texcept IntegrityError as e:\n\t\t\treturn self.create_response(request, {\n\t\t\t\t'success': False,\n\t\t\t\t'error': e\n\t\t\t})\n\n\t\treturn self.create_response(request, {\n\t\t\t'success': True\n\t\t})", "def save(self):\n response = settings.database.put_item(Item=self.to_dict())\n raise_for_response(response)", "def save_tags(self, post_getlist_tags):\n cleaned_tags = []\n for name in post_getlist_tags:\n if Tag.objects.filter(name=name).exists():\n tag = Tag.objects.filter(name=name).first()\n cleaned_tags.append(tag)\n else:\n if bool(name.strip()):\n tag = Tag.objects.create(name=name)\n tag.save()\n cleaned_tags.append(tag)\n return cleaned_tags", "def save(self, data):\n data['id'] = self.id\n\n self.db.append(data)", "def creating_entry(self):\n response = \"\"\n today = str(date.today())\n curent_time = str(datetime.time(datetime.now()))\n entry = Diary(self.entry_id, self.title, self.body)\n lst = {}\n lst[\"entry_id\"] = entry.entry_id\n lst[\"title\"] = entry.title\n lst[\"date\"] = today\n lst[\"time\"] = curent_time\n lst[\"body\"] = entry.body\n lst[\"updated\"] = entry.updated\n if Validate.validate_entry(Diary.entries, entry):\n response = jsonify({\"message\": \"Duplicate data,Try again\"})\n response.status_code = 409\n else:\n Diary.entries.append(lst)\n response = jsonify({\"message\": \"Entry saved\", \"data\": lst})\n response.status_code = 201\n return response", "def test_bucket_post_content_type_is_json(self):\n with self.client:\n response = self.client.post(\n '/bucketlists',\n headers=dict(Authorization='Bearer ' + self.get_user_token()),\n data=json.dumps(dict(name='Travel'))\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 202)\n self.assertTrue(data['status'], 'failed')\n self.assertTrue(data['message'], 'Content-type must be json')", "def _store(self):\n self._post_item.save()\n self._attachment_item.save()\n self._marshaller.marshall(self._post_item)", "def post_object(self,object_data):\n try:\n self.update_name(name=object_data[\"name\"],plan_id=object_data[\"planId\"])\n make_request(f'{GRAPH_URL}/planner/buckets', 'POST', object_data)\n logging.info(f'Created bucket with name {object_data.get(\"name\")}')\n self.append_response(\"Ok\")\n return True\n except:\n self.append_response(\"Error\")\n return False", "def add_bucket_list_item(self, id, collection, item):\n if type(id) is not ObjectId:\n id = ObjectId(id)\n obj = getattr(self.db, collection)\n result = obj.update(\n {'_id': id},\n {'$addToSet': {'bucket_list': item}}\n )\n return result", "def save(self):\n return api.put([self])", "def post_data(logged_in_apiclient):\n _, user = logged_in_apiclient\n\n input_data = {\n \"owner\": user.id,\n \"title\": \"foo title\",\n \"view_lists\": [],\n \"admin_lists\": [],\n }\n return input_data", "def post_list(self, request, **kwargs):\r\n #logger.debug(\"post list %s\\n%s\" % (request, kwargs));\r\n response = super(BaseCorsResource, self).post_list(request, **kwargs)\r\n return self.add_cors_headers(response, True)", "def post(self, dnzo_user):\n from tasks_data.task_lists import add_task_list, get_task_list\n \n task_list_name = self.request.get('task_list_name', None)\n if not task_list_name:\n self.bad_request(\"Must provide task_list_name to create a new list\")\n return\n \n new_list = add_task_list(dnzo_user, task_list_name)\n if not new_list:\n self.bad_request(\"Could not add the new task list!\")\n return\n \n self.json_response(task_list=new_list.to_dict())", "def test_create(self):\n responses.add(\n responses.Response(\n method='POST',\n url='https://connection.keboola.com/v2/storage/buckets',\n json=create_response\n )\n )\n name = 'my-new-bucket'\n description = 'Some Description'\n backend = 'snowflake'\n created_detail = self.buckets.create(name=name,\n description=description,\n backend=backend)\n assert created_detail['id'] == 'in.c-{}'.format(name)", "def test_creating_and_getting_a_bucketlist_for_authenticated_user(self):\n\n # test all bucketlists\n response = self.client.post(\n \"/bucketlists/\",\n data=dict(name='test_bucketlist'),\n headers={'Authorization': self.user_token}\n )\n bucketlist = json.loads(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(bucketlist[\"name\"], 'test_bucketlist')\n\n # test single bucketlist\n self.bucketlist_id = bucketlist[\"bucketlist_id\"]\n single_bucketlist = self.client.get(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n one_bucketlist = json.loads(single_bucketlist.data)\n\n self.assertEqual(single_bucketlist.status_code, 200)\n self.assertEqual(one_bucketlist[\"name\"], 'test_bucketlist')\n\n # test all items in bucketlist\n item = self.client.post(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\",\n data=dict(name=\"test_item\"),\n headers={'Authorization': self.user_token}\n )\n\n one_item = json.loads(item.data)\n\n self.assertEqual(item.status_code, 200)\n self.assertEqual(one_item[\"name\"], 'test_item')\n\n # test single item in bucketlist\n self.item_id = one_item[\"item_id\"]\n single_item = self.client.get(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\" + str(self.item_id) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n created_item = json.loads(single_item.data)\n\n self.assertEqual(single_item.status_code, 200)\n self.assertEqual(created_item[\"name\"], 'test_item')\n\n # test for deletion of bucketlist\n second_bucketlist = self.client.post(\n \"/bucketlists/\",\n data=dict(name='second_bucketlist'),\n headers={'Authorization': self.user_token}\n )\n\n bucketlist_two = json.loads(second_bucketlist.data)\n\n self.assertEqual(second_bucketlist.status_code, 200)\n self.assertEqual(bucketlist_two[\"name\"], 'second_bucketlist')\n\n delete_response = self.client.delete(\n \"/bucketlists/\" + str(bucketlist_two[\"bucketlist_id\"]) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n deletion = json.loads(delete_response.data)\n\n self.assertEqual(delete_response.status_code, 200)\n self.assertEqual(deletion[\"message\"], \"Deleted\")\n\n # test for deletion of an item in bucketlist\n delete_item = self.client.delete(\n \"/bucketlists/\" + str(bucketlist[\"bucketlist_id\"]) + \"/items/\" + str(one_item[\"item_id\"]) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n item_deletion = json.loads(delete_item.data)\n\n self.assertEqual(delete_item.status_code, 200)\n self.assertEqual(item_deletion[\"message\"], \"Deleted\")\n\n # test for updating of bucketlist\n self.bucketlist_id = bucketlist[\"bucketlist_id\"]\n bucketlist_update = self.client.put(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"\",\n data=dict(name='bucketlist_test'),\n headers={'Authorization': self.user_token}\n )\n\n updated_bucketlist = json.loads(bucketlist_update.data)\n\n self.assertEqual(bucketlist_update.status_code, 200)\n self.assertEqual(updated_bucketlist[\"name\"], 'bucketlist_test')\n\n # test update of item in bucketlist\n item = self.client.post(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\",\n data=dict(name=\"test_item\"),\n headers={'Authorization': self.user_token}\n )\n\n one_item = json.loads(item.data)\n\n item_update = self.client.put(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\"+ str(one_item[\"item_id\"]) + \"\",\n data=dict(name=\"item_test\"),\n headers={'Authorization': self.user_token}\n )\n\n updated_item = json.loads(item_update.data)\n\n self.assertEqual(item_update.status_code, 200)\n self.assertEqual(updated_item[\"name\"], 'item_test')", "def test_update_busketlistitem_by_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n update_item = self.client.put('/bucketlistitems/1/items/1',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs and museums too.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(update_item.status_code, 201)", "def post_create(self, state):\n\n self.id = self.get_flags_from_list(self.id)\n self.flags = self.get_flags_from_list(self.flags)", "def post(self, request, *args, **kwargs):\n return super().create(*args, **kwargs)", "def post(self, **kwargs):\n data = request.json\n return save_new_writer(data=data)", "def post(self, request, *args, **kwargs):\n return super().create(request, *args, **kwargs)", "def create_activity(bucketlist_id):\n form = ActivityForm(request.form) \n if form.validate_on_submit():\n new_activity = Activity(form.title.data, form.description.data, form.status.data)\n new_activity.create_activity(bucketlist_id)\n\n activity_created = Markup(\"<div class='alert alert-success' role='alert'>\\\n Bucketlist activity created successfully\\\n </div>\")\n flash(activity_created)\n \n # Select the activity belonging to the current bucket and pass it to show_activities\n all_activities = Activity.activities.items()\n created_activities = {k:v for k, v in all_activities if bucketlist_id==v['bucketlist_id']}\n \n if created_activities:\n\n return redirect(url_for(\"show_activities\", form=form, data=all_activities, bucketlist_id=bucketlist_id))\n \n # Else if the activity was not created\n return redirect(url_for('show_activities', form=form, bucketlist_id=bucketlist_id))\n\n if form.errors:\n form_error = Markup(\"<div class='alert alert-danger' role='alert'>\\\n Form error. Could not create bucketlist activity *#*#*??\\\n </div>\")\n flash(form_error)\n\n # If GET\n return render_template('show_activities.html', form=form, bucketlist_id=bucketlist_id)", "def create_object(self,object_data_list):\n for object_data in object_data_list:\n if self.valid_object(object_data):\n self.populate_titles(object_data.get(\"planId\"))\n\n if self.unique_title(title=object_data.get(\"title\"),plan_id=object_data.get(\"planId\")):\n self.post_object(object_data)\n else:\n new_title = self.try_create_uniqe_title(object_data.get(\"title\"),object_data.get(\"planId\"))\n if new_title:\n object_data[\"title\"]= new_title\n self.post_object(object_data)\n else:\n logging.error(f'no unique title for Task')\n self.append_response(f'no unique title for task')\n else:\n pass\n return self.response", "def test_name_attribute_is_set_in_bucket_creation_request(self):\n with self.client:\n response = self.client.post(\n '/bucketlists',\n headers=dict(Authorization='Bearer ' + self.get_user_token()),\n data=json.dumps({}),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertTrue(data['status'], 'failed')\n self.assertTrue(data['message'], 'Missing name attribute')", "def test_get_bucketlist_item_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n resp_item = self.client.post('/bucketlistitems/1/items',\n data=json.dumps(\n {\"name\": \"visit the busy surburbs.\"}),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)\n get_item = self.client.get('/bucketlistitems/1/items/1', headers={\n \"Authorization\": self.token\n })\n self.assertEqual(resp.status_code, 201)", "def create(self, validated_data):", "def test_delete_bucketlist(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_delete_method = self.client().delete('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_delete_method.status_code, 200)\n response_after_removal = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(response_after_removal.status_code, 400)", "def post(self):\n FeatureBusiness.add(request.get_json(), user_id=request.user_id)\n\n return {\"status\": 201}, 201", "def save(self):\n settings_path = os.path.join(self.file_path, \"__file_data.json\")\n f = open( settings_path, 'w' )\n f.write( simplejson.dumps( self.fileList ) )\n f.close()\n\n settings_path = os.path.join(self.file_path, \"__user_data.json\")\n f = open( settings_path, 'w' )\n f.write( simplejson.dumps( self.userList ) )\n f.close()", "def post_collection():\n\n record = collection.new_record()\n record.data = request.json or request.form.to_dict()\n record.save()\n\n return get_record(record.uuid)", "def on_post(self):\n return \"Ok, the stuff is being saved\"", "def post(self):\n data = request.json\n return save_new_provider(data=data)", "def on_post_resource(self, req, resp, *args, **params):\n instance = self.get_object(**params)\n self.save_object(req.params, req, resp, instance, **params)", "def save(self):\n self.data['items'] = self._items\n if self.storage_id:\n storage.set_shop_data([self.storage_id, 'cart'], self.data)", "def createPost(request):\n\n #save the organization's post\n if request.method == 'POST':\n form = PostForm(request.user, request.POST, request.FILES)\n if form.is_valid():\n filterList = ['everyone', 'black', 'hispanic', 'female', 'lgbt', 'immigrants', 'disabled', 'poor'] \n newpost = form.save()\n\n #Add tags to the object only if in the filterlist\n tags = form.cleaned_data.get('tags')\n tags = [tag.lower() for tag in tags if tag.lower() in filterList]\n\n newpost.tags.add(*tags)\n messages.success(request, 'You have successful created the post')\n form = PostForm(request.user)\n context = {'form':form} \n return render(request, 'create_post.html', context=context)\n \n #form to fill out for the post\n form = PostForm(request.user)\n context = {'form':form} \n return render(request, 'create_post.html', context=context)", "def Create(self, domainsList) :\n\t\t...", "def manipulate_bucketlist():\n pass", "def post_state():\n if not request.is_json:\n abort(400, 'Not a JSON')\n else:\n request_body = request.get_json()\n\n if 'name' not in request_body:\n abort(400, \"Missing name\")\n else:\n state = State(**request_body)\n storage.new(state)\n storage.save()\n return jsonify(state.to_dict()), 201", "def post(self):\n data = request.json\n create_ue(data)\n return None, 201", "def test_delete_bucketlist_item(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n # create a bucketlist by making a POST request\n res = self.client().post(\n '/api/v1/bucketlists/',\n headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.bucketlist)\n self.assertEqual(res.status_code, 201)\n # get the json with the bucketlist\n results = json.loads(res.data.decode())\n\n # create a bucketlist item by making a POST request and add it to the created bucketlist\n res = self.client().post(\n '/api/v1/bucketlists/{}/items/'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token),\n data={\n \"name\": \"Eat fried crabs\"\n })\n self.assertEqual(res.status_code, 201)\n # get the json containing the created bucketlist item\n res_item = json.loads(res.data.decode())\n\n # delete the bucketlist item we just created\n res = self.client().delete(\n '/api/v1/bucketlists/{}/items/{}'.format(results['id'], res_item['id']),\n headers=dict(Authorization=\"Bearer \" + access_token), )\n self.assertEqual(res.status_code, 200)\n\n # Test to see if it exists, should return a 404\n result = self.client().get(\n '/api/v1/bucketlists/{}/items/1'.format(results['id']),\n headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(result.status_code, 404)", "def test_model_can_create_a_bucketlist(self):\n old_count = Job.objects.count()\n self.job.save()\n new_count = Job.objects.count()\n self.assertNotEqual(old_count, new_count)", "def _create(self, postData):\n if self.infos is None:\n r = self.connection.session.post(self.getIndexesURL(), params = {\"collection\" : self.collection.name}, data = json.dumps(postData, default=str))\n data = r.json()\n if (r.status_code >= 400) or data['error']:\n raise CreationError(data['errorMessage'], data)\n self.infos = data", "def post(self):\n source = 'uploaded by user'\n upload_files = self.get_uploads('file')\n blob_key = upload_files[0].key()\n name = self.request.get('name')\n\n user = users.get_current_user()\n\n username = 'admin'\n date = datetime.datetime.now()\n str_blob_key = str(blob_key)\n key = FileMetadata.get_key_name(username, date, str_blob_key)\n\n ctx = ndb.get_context()\n meta = FileMetadata(key_name=key, parent=_PARENT)\n meta.owner = user\n meta.filename = name\n meta.uploaded_on = date\n meta.source = source\n meta.blobkey = str_blob_key\n meta.put()\n ctx.clear_cache()\n self.redirect('/admin')", "def test_add_item_using_post(self):\n pass", "def post(self):\n data = request.json\n return create_new_blog(data=data)", "def post(self):\n json = request.get_json()\n playlistlist = [Playlist.playlistfromjson(playlist) for playlist in json]\n with AccessRow(userdb, current_user.getname()) as data:\n data.setplaylists(playlistlist)\n\n return \"Success\", 200", "def save_blog(request):\n json_data = request.body\n logger.debug(json_data)", "def statePost():\n try:\n req = request.get_json()\n if 'name' not in req:\n return \"Missing name\\n\", 400\n new_obj = State(name=req['name'])\n storage.new(new_obj)\n storage.save()\n return jsonify(new_obj.to_dict()), 201\n except:\n return \"Not a JSON\\n\", 400", "def post_state():\n new_state = request.get_json()\n if new_state is None:\n abort(400, 'Not a JSON')\n if 'name' not in new_state:\n abort(400, 'Missing name')\n new_state = State(name=request.json['name'])\n storage.new(new_state)\n storage.save()\n return jsonify(new_state.to_dict()), 201", "def test_bucket_is_updated(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Create a Bucket\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 201)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['name'] == 'Adventure')\n self.assertEqual(data['id'], 1)", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def create_item():\n\n data = request.get_json()\n title = data.get(\"title\", None)\n description = data.get(\"description\", None)\n due_date = data.get(\"due_date\", None)\n list_id = data.get(\"list_id\", None)\n\n if title is None or list_id is None:\n return abort(400, description=f\"List ID and title cannot be null!\")\n\n list_to_append = ToDoList.query.filter(ToDoList.id == list_id).first()\n\n if list_to_append is None:\n return abort(404, description=f\"List ID {list_id} does not exist!\")\n\n if due_date is not None:\n try:\n due_date = datetime.datetime.strptime(due_date, DATE_FORMAT)\n except ValueError:\n return abort(400, description=f\"Date format must be YYYY-MM-DD HH:MM\")\n\n new_item = Task(\n title=title,\n description=description,\n status=\"pending\",\n due_date=due_date,\n list_id=list_id,\n )\n db.session.add(new_item)\n db.session.commit()\n\n return make_response(json.dumps(new_item.serialize()))", "def add_task_list(request):\n data = {\"success\": False}\n try:\n title = request.POST.get(\"title\")\n user = request.user\n todolist = TodoList.objects.filter(title=title).count()\n if todolist == 0:\n todolist_obj = TodoList.objects.create(title=title,\n creator=user)\n todolist_obj.save()\n data[\"success\"] = True\n data[\"message\"] = \"Data Saved\"\n else:\n raise Exception(\"List with same name exist\")\n except Exception as ex:\n data[\"message\"] = \"Failed to save data [%s]\" % ex\n finally:\n return JsonResponse(data)", "def save(self, *args, **kwargs):\n pass" ]
[ "0.7236179", "0.6786587", "0.6767346", "0.66732574", "0.6173778", "0.6169759", "0.6156117", "0.6154787", "0.6003337", "0.5966829", "0.5952784", "0.59198797", "0.5882262", "0.58755314", "0.58628845", "0.5862723", "0.5828422", "0.5778974", "0.5684983", "0.56759155", "0.562585", "0.5612564", "0.56027037", "0.5597744", "0.55621153", "0.5560547", "0.55596524", "0.55562913", "0.554885", "0.55326265", "0.55239385", "0.54979575", "0.5461495", "0.54534495", "0.5451214", "0.53821456", "0.5359576", "0.5357775", "0.53494525", "0.5348506", "0.5340562", "0.53169763", "0.5311078", "0.5294281", "0.52797663", "0.5269447", "0.525846", "0.52394384", "0.52303314", "0.5220857", "0.52180004", "0.5200652", "0.51916516", "0.5187233", "0.51863307", "0.51852024", "0.518107", "0.51787925", "0.5175309", "0.5174399", "0.5158576", "0.51578736", "0.51489216", "0.5148339", "0.5131581", "0.51298374", "0.51184916", "0.51041436", "0.5104135", "0.51010877", "0.50873935", "0.5086449", "0.50699073", "0.5063272", "0.5061432", "0.505224", "0.5045974", "0.5044451", "0.50443673", "0.5040927", "0.50391847", "0.50359654", "0.5028012", "0.50231147", "0.4989715", "0.4989715", "0.4989715", "0.4989715", "0.4989715", "0.4989715", "0.4989715", "0.4989715", "0.4989715", "0.4989715", "0.4989715", "0.4989715", "0.4989715", "0.4989715", "0.49575806", "0.49569723", "0.49556428" ]
0.0
-1
Add a user to 'prospects' unless the user is the campaign owner or is already linked to 'workers', 'prospects', or 'blacklist'. Also decline to add prospects when the campaign is not active. user A TcsUser instance to link to 'prospects'
def addProspect(self, user): if self.is_active and (user != self.owner) and not self.prospects.filter(pk=user.id).exists() \ and not self.workers.filter(pk=user.id) and not self.blacklist.filter(pk=user.id).exists(): self.prospects.add(user) return self return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addWorker(self, user):\n if (user != self.owner) and not self.workers.filter(pk=user.id).exists():\n self.workers.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.blacklist.filter(pk=user.id).exists():\n self.blacklist.remove(user)\n return self\n return None", "def addToBlacklist(self, user):\n if (user != self.owner) and not self.blacklist.filter(pk=user.id).exists():\n self.blacklist.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.workers.filter(pk=user.id).exists():\n self.workers.remove(user)\n return self\n return None", "def add_player(self, user):\n # Make sure the user can play\n user_profile = user.get_profile()\n if user_profile.credits < self.entrance_fee:\n raise ValueError(\"Not enough credits to pay entrance fee.\")\n if self.is_user_playing(user):\n raise ValueError(\"User already in tournament.\")\n \n # Handle the money transfer to join the tournament\n user_profile.credits = user_profile.credits - self.entrance_fee\n user_profile.save()\n self.prize_pool = self.prize_pool + self.entrance_fee\n self.save()\n \n # Join the tournament\n new_player = Player(user=user,\n tournament=self,\n credits=self.starting_credits)\n new_player.save()\n return True", "def check_professor(doc_user):\n info = doc_user[\"user_info\"]\n my_sharing_calendar = col_sharing.find_one({\"User\": doc_user[\"_id\"]})\n if info[\"professor\"]:\n logger.info('{}: sharing calendar start'.format(\n doc_user[\"user_id\"]))\n my_sharing_calendar = {\"User\": doc_user[\"_id\"],\n \"schedules\": []}\n col_sharing.insert_one(my_sharing_calendar)\n return True\n \n return False", "def can_add_player(self, user):\n user_profile = user.get_profile()\n if user_profile.credits < self.entrance_fee:\n return False\n if self.is_user_playing(user):\n return False\n return True", "def add_talk(talk):\n # Check if this user is already registered\n exists = check_attendee_exists(talk.userId, talk.profile)\n if not exists[0]:\n return False\n\n talk.put()\n return True", "def toggle_interested(self):\n user = self.context['request'].user\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n workshop = self.context['workshop']\n\n if workshop in profile.interested_workshops.all():\n workshop.interested_users.remove(profile)\n else:\n workshop.interested_users.add(profile)", "def create_users(cls):\n for p in Player.objects.exclude(race__can_play=False):\n p.get_extension(GrandChallengeUser)", "def createProfile(self):\n if self.profile:\n return\n from soc.modules.gsoc.models.profile import GSoCProfile\n user = self.createUser()\n properties = {'link_id': user.link_id, 'student_info': None, 'user': user,\n 'parent': user, 'scope': self.program, 'status': 'active'}\n self.profile = seeder_logic.seed(GSoCProfile, properties)", "def test_user_settings_for_subscribing_other_users(self) -> None:\n user_profile = self.example_user(\"cordelia\")\n invitee_user_id = user_profile.id\n realm = user_profile.realm\n\n do_set_realm_property(\n realm, \"create_public_stream_policy\", Realm.POLICY_MEMBERS_ONLY, acting_user=None\n )\n do_set_realm_property(\n realm, \"invite_to_stream_policy\", Realm.POLICY_ADMINS_ONLY, acting_user=None\n )\n do_change_user_role(self.test_user, UserProfile.ROLE_MODERATOR, acting_user=None)\n result = self.common_subscribe_to_streams(\n self.test_user,\n [\"stream1\"],\n {\"principals\": orjson.dumps([invitee_user_id]).decode()},\n allow_fail=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")\n\n do_change_user_role(self.test_user, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)\n self.common_subscribe_to_streams(\n self.test_user, [\"stream1\"], {\"principals\": orjson.dumps([invitee_user_id]).decode()}\n )\n\n do_set_realm_property(\n realm, \"invite_to_stream_policy\", Realm.POLICY_MODERATORS_ONLY, acting_user=None\n )\n do_change_user_role(self.test_user, UserProfile.ROLE_MEMBER, acting_user=None)\n # Make sure that we are checking the permission with a full member,\n # as full member is the user just below moderator in the role hierarchy.\n self.assertFalse(self.test_user.is_provisional_member)\n result = self.common_subscribe_to_streams(\n self.test_user,\n [\"stream2\"],\n {\"principals\": orjson.dumps([invitee_user_id]).decode()},\n allow_fail=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")\n\n do_change_user_role(self.test_user, UserProfile.ROLE_MODERATOR, acting_user=None)\n self.common_subscribe_to_streams(\n self.test_user, [\"stream2\"], {\"principals\": orjson.dumps([invitee_user_id]).decode()}\n )\n self.unsubscribe(user_profile, \"stream2\")\n\n do_set_realm_property(\n realm, \"invite_to_stream_policy\", Realm.POLICY_MEMBERS_ONLY, acting_user=None\n )\n do_change_user_role(self.test_user, UserProfile.ROLE_GUEST, acting_user=None)\n result = self.common_subscribe_to_streams(\n self.test_user,\n [\"stream2\"],\n {\"principals\": orjson.dumps([invitee_user_id]).decode()},\n allow_fail=True,\n )\n self.assert_json_error(result, \"Not allowed for guest users\")\n\n do_change_user_role(self.test_user, UserProfile.ROLE_MEMBER, acting_user=None)\n self.common_subscribe_to_streams(\n self.test_user,\n [\"stream2\"],\n {\"principals\": orjson.dumps([self.test_user.id, invitee_user_id]).decode()},\n )\n self.unsubscribe(user_profile, \"stream2\")\n\n do_set_realm_property(\n realm,\n \"invite_to_stream_policy\",\n Realm.POLICY_FULL_MEMBERS_ONLY,\n acting_user=None,\n )\n do_set_realm_property(realm, \"waiting_period_threshold\", 100000, acting_user=None)\n result = self.common_subscribe_to_streams(\n self.test_user,\n [\"stream2\"],\n {\"principals\": orjson.dumps([invitee_user_id]).decode()},\n allow_fail=True,\n )\n self.assert_json_error(result, \"Insufficient permission\")\n\n do_set_realm_property(realm, \"waiting_period_threshold\", 0, acting_user=None)\n self.common_subscribe_to_streams(\n self.test_user, [\"stream2\"], {\"principals\": orjson.dumps([invitee_user_id]).decode()}\n )", "def test_add_coach_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm(self.AUTH_ADD_COACH, self.classrooms[1]))", "def addParticipant(self, participant):\n if len(self.participants) < self.maxParticipants:\n self.participants[participant.discordId] = participant\n else:\n raise ValueError('Max number of participants has been reached')", "def add_user_with_status_granted(caller, user):\r\n if _add_user(user, CourseCreator.GRANTED):\r\n update_course_creator_group(caller, user, True)", "def create_investor(sender, **kwargs):\n u = kwargs[\"instance\"]\n try:\n \n if not InvestorProfile.objects.filter(username=u.username):\n inv = InvestorProfile(username=u.username,user=u)\n inv.save()\n g = DjangoGroup.objects.get(name='Investors') \n g.user_set.add(u)\n except Exception as e:\n print e", "def add_user(request):\n profile = get_object_or_404(UserProfile, user=request.user)\n\n # make sure only managers and admins can add a team\n if profile.level == 'admin' or profile.level == 'manager':\n\n if request.method == 'POST':\n form = UserProfileForm(request.POST)\n user_email = UserForm(request.POST)\n\n if user_email.is_valid() and form.is_valid():\n user = User.objects.create_user(username=random_username(),\n email=request.POST.get('email'),\n password='EggBox900')\n messages.success(request, 'Profile added successfully')\n\n user.userprofile.first_name = form.data['first_name']\n user.userprofile.last_name = form.data['last_name']\n user.userprofile.company_id = profile.company_id\n # user.userprofile.start_date = form.data['start_date']\n # user.userprofile.end_date = form.data['end_date']\n user.userprofile.level = form.data['level']\n user.userprofile.team = Team.objects.get(pk=form.data['team'])\n user.userprofile.contract_type = form.data['contract_type']\n user.userprofile.contract_percentage = form.data['contract_percentage']\n user.userprofile.agent_goal = form.data['agent_goal']\n user.userprofile.save()\n else:\n messages.error(request, 'Update failed. Please ensure the form is valid.')\n\n users = UserProfile.objects.filter(company_id=profile.company_id)\n\n template = 'profiles/user_management.html'\n context = {\n 'users': users,\n 'profile': profile\n }\n\n return render(request, template, context)\n\n else:\n form = UserProfileForm()\n user_email = UserForm()\n\n template = 'profiles/add_user.html'\n context = {\n 'form': form,\n 'profile': profile,\n 'user_email': user_email\n }\n\n return render(request, template, context)\n else:\n messages.info(request, \"Sorry, you are not authorized to add users. Ask a Manager or Admin.\")\n\n return redirect(reverse('planning', ))", "def add_candidate(self, user):\n weight = (\n self.assignment_related_users.aggregate(models.Max(\"weight\"))[\"weight__max\"]\n or 0\n )\n defaults = {\"weight\": weight + 1}\n self.assignment_related_users.update_or_create(user=user, defaults=defaults)", "def test_can_subscribe_other_users(self) -> None:\n\n def validation_func(user_profile: UserProfile) -> bool:\n user_profile.refresh_from_db()\n return user_profile.can_subscribe_other_users()\n\n self.check_has_permission_policies(\"invite_to_stream_policy\", validation_func)", "def create_player_profile(sender, **kwargs):\n if kwargs.get('created') is True:\n PlayerProfile.objects.create(user=kwargs.get('instance'))", "def create_user_profile(email, **kwargs): # POST\n user_exists = coll(\"users\").find_one({\"_id\": email})\n\n if user_exists:\n return {\"message\": \"User already exists\"}, 400\n\n # NOTE Doesn't make sense for a person to have prizes only a team should have this\n coll(\"users\").insert_one(\n {\n \"_id\": email,\n \"skills\": kwargs[\"skills\"],\n \"prizes\": kwargs[\"prizes\"],\n \"bio\": kwargs[\"bio\"],\n \"github\": kwargs[\"github\"],\n \"interests\": kwargs[\"interests\"],\n \"seriousness\": kwargs[\"seriousness\"],\n \"team_id\": \"\",\n \"hasateam\": False,\n }\n )\n return {\"message\": \"User profile successfully created\"}, 201", "def update_user_profile(sender, instance, created, **kwargs):\n if created:\n GameplanUser.objects.create(user=instance)\n instance.gameplanuser.save()", "def add_user_to_course_cohort(cohort_name, course_id, user):\n if cohort_name is not None:\n cohort = get_cohort_by_name(course_id, cohort_name)\n try:\n add_user_to_cohort(cohort, user)\n except ValueError:\n # user already in cohort, probably because they were un-enrolled and re-enrolled\n logger.exception('Cohort re-addition')", "def save_user_receiver(sender, instance, created, *args, **kwargs):\n print(\"profile created\", instance)\n if created:\n new_profile = UserProfile.objects.get_or_create(owner=instance)", "def add_participant(self, address):\n if address in [u.address for u in User.all()]:\n db.run_in_transaction(self._add_participantTx, address)\n xmpp.send_invite(address, self.jid)", "def setup_whitelisted_section():\n setup_unrelated_section()\n\n # whitelist user to the course\n cs61a = Course.objects.get(name=\"CS61A\")\n user = User.objects.get(username=\"demo_user\")\n cs61a.whitelist.add(user)", "def _add_user(user, state):\r\n if not user.is_staff and CourseCreator.objects.filter(user=user).count() == 0:\r\n entry = CourseCreator(user=user, state=state)\r\n entry.save()\r\n return True\r\n\r\n return False", "def create(cls, user_from, user_to, round):\n grand_challenge = cls.objects.create(round=round)\n user_from = user_from.user.get_profile()\n user_to = user_to.user.get_profile()\n grand_challenge.challenge = Challenge.create(user_from.get_extension(ChallengeUser), user_to.get_extension(ChallengeUser))\n grand_challenge.challenge.accept()\n grand_challenge.save()\n return grand_challenge", "def is_attended(value, user: User):\n return value.is_attended(user)", "def test_add_coach_specific_for_coach_pt1(self):\n self.assertTrue(self.coach2.has_perm(self.AUTH_ADD_COACH, self.classrooms[1]))", "def test_teams_add_user_to_team_v2(self):\n pass", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)", "def add_quester(self, user):\n user_node = user.get()\n for rel in graph.match(start_node=user_node, rel_type='can_complete'):\n if rel.end_node()['id'] == self.id:\n raise KeyError(\"user is already on this quest\")\n if user == self.creator:\n raise TypeError(\"creators are not eligible for their own quests.\")\n if not self.active:\n raise AttributeError(\"Quest not active.\")\n else:\n graph.create(Relationship(user_node,\n 'can_complete',\n self.quest_node))\n return True", "def add_user_to_cohort(cohort, username_or_email):\r\n user = get_user_by_username_or_email(username_or_email)\r\n previous_cohort = None\r\n\r\n course_cohorts = CourseUserGroup.objects.filter(\r\n course_id=cohort.course_id,\r\n users__id=user.id,\r\n group_type=CourseUserGroup.COHORT\r\n )\r\n if course_cohorts.exists():\r\n if course_cohorts[0] == cohort:\r\n raise ValueError(\"User {0} already present in cohort {1}\".format(\r\n user.username,\r\n cohort.name))\r\n else:\r\n previous_cohort = course_cohorts[0].name\r\n course_cohorts[0].users.remove(user)\r\n\r\n cohort.users.add(user)\r\n return (user, previous_cohort)", "def add_member(self, user):\n if user is self.owner:\n raise ValidationError('A trip owner cannot also be a member.')\n # check the user is not already a member\n if self.members.filter(pk=user.pk).exists():\n return\n self.members.add(user)", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n user_profile = UserProfile.objects.create(user=instance)", "def response_post_save_add(self, request, obj):\n\n # a simple hack to set the default prescribing officer\n if obj is not None and obj.prescribing_officer is None:\n obj.prescribing_officer = request.user\n obj.save()\n\n if obj is not None and obj.creator_id == 1:\n obj.creator = request.user\n obj.save()\n\n url = reverse('admin:prescription_prescription_detail',\n args=[str(obj.id)])\n return HttpResponseRedirect(url)", "def create_profile(sender, **kw):\n user = kw['instance']\n if kw['created']:\n profile = UserProfile(user=user)\n profile.save()", "async def anticipation(self, ctx: commands.Context):\n role = ctx.guild.get_role(529447810127495168)\n\n if role.id not in (r.id for r in ctx.author.roles):\n await ctx.author.add_roles(role, reason=\"/anticipation\")\n embed = discord.Embed(\n colour=discord.Colour.green(),\n description=\"Anticipation Notifications successfully added.\"\n )\n await ctx.send(embed=embed)\n\n else:\n await ctx.author.remove_roles(role, reason=\"/anticipation\")\n embed = discord.Embed(\n colour=discord.Colour.red(),\n description=\"Anticipation Notifications successfully removed.\"\n )\n await ctx.send(embed=embed)", "def _update_subscribers(self):\n try:\n campaign = self.campaigns.latest('when')\n except StudyGuideCampaign.DoesNotExist:\n pass\n else:\n for student in utils.students_for_event(self.event):\n subscriber, created = StudyGuideCampaignSubscriber.objects.get_or_create(\n campaign=campaign,\n user=student.user)\n if created: # only add if it's not there already\n campaign.subscribers.add(subscriber)", "def power_play_opportunities(self, power_play_opportunities):\n\n self._power_play_opportunities = power_play_opportunities", "def create_profile(sender, **kwargs):\n user = kwargs[\"instance\"]\n if kwargs[\"created\"]:\n user_profile = Profile(user=user)\n user_profile.save()", "def test_teams_add_user_to_team_v1(self):\n pass", "def manage_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)\n else:\n instance.profile.save()", "def create_profile_for_new_users(sender, instance, created, **kwargs):\n if not created:\n return\n\n profile = Profile.objects.filter(user=instance).first()\n if profile is None:\n profile = Profile(user=instance)\n profile.save()", "def create_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)", "def create_profile(sender, instance, signal, created, **kwargs):\n \n from tutablr_app.models import UserProfile\n \n if created:\n UserProfile.objects.get_or_create(user = instance);\n # Do additional stuff here if needed, e.g.\n # create other required related records", "def user_joined_society_handler(sender, **params):\n user = params['user']\n newest_tenure = user.society.tenures.order_by('-start_date').first()\n if newest_tenure:\n if newest_tenure.is_active() or newest_tenure.starts_soon():\n last_collection = newest_tenure.collection_schedules.order_by('-collection_date').first()\n new_tentative_end_date = get_new_tentative_end_date(last_collection.collection_date)\n newest_tenure.tentative_end_date = new_tentative_end_date\n newest_tenure.save()\n new_schedule = CollectionSchedule(\n\t\t\t\tuser=user,\n\t\t\t\tcollection_date=new_tentative_end_date,\n\t\t\t\ttenure=newest_tenure,\n )\n new_schedule.save()", "def create_user_profile(sender, instance, created, **kwargs):\n\n if created:\n user_profile = UserProfile.objects.create(user=instance)", "def test_no_enable_paid_course_registration(self):\r\n self.add_to_cart()\r\n self.request.user = self.user\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def toggle_subscription(self):\n user = self.context['request'].user\n # pylint: disable=no-member\n profile = UserProfile.objects.get(\n user=user)\n club = self.context['club']\n\n if club in profile.subscriptions.all():\n club.subscribed_users.remove(profile)\n else:\n club.subscribed_users.add(profile)", "def create_profile(sender, instance, created, **kwargs):\n if created: \n profile, new = UserProfile.objects.get_or_create(user=instance)", "def make_profile_for_user(sender, instance, **kwargs):\n if kwargs['created']:\n new_profile = ImagerProfile(user=instance)\n new_profile.save()", "def hook_assign_assistance(self, data):\n request_id = data[\"request_id\"]\n assignee_chat_id = data[\"volunteer\"]\n log.info(\"ASSIGN req:%s to vol:%s\", request_id, assignee_chat_id)\n\n try:\n request_details = self.updater.persistence.bot_data[request_id]\n except KeyError:\n log.debug(\"No such request %s, ignoring\", request_id)\n return\n else:\n self.updater.dispatcher.bot_data[request_id].update(\n {\"time\": utc_short_to_user_short(data[\"time\"])}\n )\n\n # first of all, notify the others that they are off the hook and update their state accordingly\n for chat_id in request_details[\"volunteers\"]:\n if chat_id != assignee_chat_id:\n self.send_message(chat_id, c.MSG_ANOTHER_ASSIGNEE)\n updated_state = {\"state\": c.State.AVAILABLE, \"reviewed_request\": None}\n self.updater.dispatcher.user_data[chat_id].update(updated_state)\n\n self.updater.dispatcher.user_data[assignee_chat_id].update({\"current_request\": request_id})\n self.updater.dispatcher.update_persistence()\n\n # notify the assigned volunteer, so they know they're responsible; at this point they still have to confirm\n # that they're in good health and they still have an option to cancel\n self.updater.bot.send_message(\n chat_id=assignee_chat_id,\n text=c.MSG_CAUTION,\n reply_markup=InlineKeyboardMarkup(k.caution_choices),\n )", "def setup_whitelisted_section_before_enrollment():\n setup_unrelated_section_with_unrestricted(enrollment_open=False)\n\n # whitelist user to the course\n cs61a = Course.objects.get(name=\"CS61A\")\n user = User.objects.get(username=\"demo_user\")\n cs61a.whitelist.add(user)", "def create_user_profile(sender, **kwargs):\n\n if kwargs['created']:\n UserProfile.objects.create(user=kwargs['instance'])", "def create_profile(sender, instance, created, **kwargs):\n if created:\n profile, created = UserProfile.objects.get_or_create(user=instance)", "def contribute(request, campaign_id, template='campaign/campaign_contribution_form.html'):\r\n campaign = get_object_or_404(Campaign.objects.active(), pk=campaign_id)\r\n if not campaign.is_free:\r\n # Disable direct credit card based contribution\r\n request.user.message_set.create(message=_('That payment option is not available for this campaign.'))\r\n return HttpResponseRedirect(reverse('view_campaign', kwargs={'campaign_id':campaign_id}))\r\n err_msg = None\r\n try:\r\n qualifies, reasons = campaign.is_user_qualified(request.user)\r\n user_profile=request.user.get_profile()\r\n data = None\r\n if qualifies and request.user.first_name and request.user.last_name:\r\n # Skip the form and directly register this event attendee.\r\n data = {'first_name':request.user.first_name, \r\n 'last_name':request.user.last_name,\r\n 'birth_date':user_profile.birth_date}\r\n if data or request.method == 'POST':\r\n if request.method == 'POST':\r\n data = request.POST\r\n form = forms.DirectContributionForm(data=data, campaign=campaign, user_profile=user_profile)\r\n if form.is_valid():\r\n contribution = form.save(commit=True)\r\n _log.info('Contribution processed %s', contribution)\r\n if contribution.qty > 1:\r\n request.user.message_set.create(message=_('Your %s contributions totalling $.2f have been processed. Thank you.' % (contribution.qty, contribution.amount)))\r\n elif not campaign.is_free:\r\n request.user.message_set.create(message=_('Your contribution of $.2f has been processed. Thank you.' % contribution.amount))\r\n else:\r\n request.user.message_set.create(message=_('You have successfully joined this free campaign. Thank you.'))\r\n return HttpResponseRedirect(reverse('view_campaign', kwargs={'campaign_id':campaign_id}))\r\n else:\r\n form = forms.DirectContributionForm(campaign=campaign, user_profile=user_profile)\r\n ctx = {'campaign':campaign, 'c':campaign, 'form':form}\r\n except CampaignError, e:\r\n request.user.message_set.create(message=e.message)\r\n return HttpResponseRedirect(reverse('view_campaign', kwargs={'campaign_id':campaign.pk}))\r\n return render_view(request, template, ctx)", "def test_teams_add_user_to_team_by_batch_v1(self):\n pass", "def add_attendee(attendee):\n # Check if this user is already registered\n exists = check_attendee_exists(attendee.userId, attendee.profile)\n if exists[0]:\n return False\n\n if (attendee.personal_page and\n not attendee.personal_page.startswith('http://')):\n attendee.personal_page = 'http://%s' % attendee.personal_page\n if (attendee.company_page and\n not attendee.company_page.startswith('http://')):\n attendee.company_page = 'http://%s' % attendee.company_page\n\n attendee.put()\n return True", "def form_valid(self, form):\n form.instance.founder = self.request.user\n print('Project Create user:', self.request.user)\n form.save()\n\n tc_lib.generate_user_matches(form)\n\n return super(ProjectCreate, self).form_valid(form)", "def create_ids_profile(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n display_name = kwargs['objectname']\n # stage the necessary JSON payload\n json_data = {\n \"profile_severity\": [\n \"CRITICAL\",\n \"HIGH\",\n \"MEDIUM\",\n \"LOW\"\n ],\n \"criteria\": [],\n \"resource_type\": \"IdsProfile\",\n \"display_name\": display_name,\n \"id\": display_name\n }\n # set value for CVSS severity, if configured by user\n if kwargs['cvss'] is not None:\n cvss = kwargs['cvss']\n cvss_criteria = {\n \"filter_name\": \"CVSS\",\n \"filter_value\": cvss,\n \"resource_type\": \"IdsProfileFilterCriteria\"\n }\n filter_operator = {\n \"operator\": \"AND\",\n \"resource_type\": \"IdsProfileConjunctionOperator\"\n }\n # update 'criteria' key in json payload\n json_data['criteria'].append(cvss_criteria)\n json_data['criteria'].append(filter_operator)\n # set value(s) for products affected, if configured by user\n if kwargs['product_affected'] is not None:\n pa = kwargs['product_affected']\n pa_criteria = {\n \"filter_name\": \"PRODUCT_AFFECTED\",\n \"filter_value\": pa,\n \"resource_type\": \"IdsProfileFilterCriteria\"\n }\n # update 'criteria' key in json payload\n json_data['criteria'].append(pa_criteria)\n response_code = patch_ips_profile_json(proxy, sessiontoken, json_data, display_name)\n if response_code == 200:\n print(f'The IDS Profile {display_name} has been created successfully')\n else:\n print(f'There was an error, please check your syntax')\n sys.exit(1)", "def create_user_start_program_advices_list_empty(self):\n username = 'pseudo'\n email = '[email protected]'\n password = '00000000'\n user_created = self.user.objects.create_user(id=4, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n weight = 60\n ProfileUser.objects.create(user=user_created, starting_weight=weight,\n actual_goal_weight=10, final_weight=50)\n self.add_user_results(50, user_created, weight)\n user = HistoryUser.objects.get(user=user_created)\n user.start_questionnaire_completed = True\n user.save()\n\n return user_created", "def create_or_update_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.get_or_create(user=instance)\n instance.profile.save()", "def test_user_profile_relationship(self):\r\n user = self._create_test_user()\r\n profile = self._create_test_profile()\r\n user.profile = profile\r\n self.db.session.commit()", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n profile = UserProfile()\n profile.user = instance\n profile.email=instance.email\n profile.save()", "def test_add_duplicated_profile_requests(self):\n # We change the profile of the original flow request. Notice that in test data there is already a flow request\n profile = {\n 'code': 'PROF_002',\n 'version': 'v0',\n 'payload': '[{\"clinical_domain\": \"Laboratory\"}]'\n }\n flow_request = {\n 'flow_id': 'f_11111',\n 'profile': profile,\n 'start_validity': '2017-10-23T10:00:00+02:00',\n 'expire_validity': '2018-10-23T10:00:00+02:00'\n }\n\n res = self._add_flow_request(flow_request=flow_request)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(res.json(), ERRORS_MESSAGE['INVALID_DATA'])", "def add_spawning_profile(intersection, spawning_profile):\n return intersection.add_spawning_profile(spawning_profile)", "def promoteUser(self):\n\t\t#ensure they're supposed to be here and haven't been here before\n\t\tif self.goodEventsCount >= 3 and not self.verified:\n\t\t\tself.verifiedUser=True\n\t\t\tself.put()\n\t\t\tmessage = mail.EmailMessage(\n\t\t\t\t\tsender=\"Friends with Food Admin <[email protected]>\",\n subject=\"Your account has been verified!\")\n\n\t\t\tmessage.to = self.id.email()\n\t\t\tmessage.cc = \"[email protected]\"\n\t\t\tmessage.body = \"\"\"\n\t\t\tDear %s:\n\n\t\t\tYour account on Friends with Food has been verified! Because you've \n\t\t\tshown us so many good events, we've upgraded your account. Now, you'll \n\t\t\tget notified of free food on campus ASAP! You'll also be able to verify\n\t\t\tevents so that everyone knows they're legit.\n\t\t\t\n\t\t\t*With great power comes great responsibility*\n\t\t\t\n\t\t\tThanks,\n\t\t\t\n\t\t\tThe Friends with Food Team\n\t\t\t\"\"\" % self.id.nickname()\n\t\t\tmessage.send()", "def setUp(self) -> None:\n super().setUp()\n\n user_models.UserContributionProficiencyModel(\n id='%s.%s' % (self.SCORE_CATEGORY_1, self.USER_1_ID),\n user_id=self.USER_1_ID,\n score_category=self.SCORE_CATEGORY_1,\n score=1.5,\n onboarding_email_sent=False\n ).put()\n user_models.UserContributionProficiencyModel(\n id='%s.%s' % (self.SCORE_CATEGORY_2, self.USER_1_ID),\n user_id=self.USER_1_ID,\n score_category=self.SCORE_CATEGORY_2,\n score=2,\n onboarding_email_sent=False\n ).put()\n user_models.UserContributionProficiencyModel(\n id='%s.%s' % (self.SCORE_CATEGORY_1, self.USER_2_ID),\n user_id=self.USER_2_ID,\n score_category=self.SCORE_CATEGORY_1,\n score=1.5,\n onboarding_email_sent=False,\n deleted=True\n ).put()", "def setup_trial_and_user(cidc_api, monkeypatch) -> int:\n # this is necessary for adding/removing permissions from this user\n # without trying to contact GCP\n mock_gcloud_client(monkeypatch)\n\n user = Users(\n email=user_email, role=CIDCRole.CIMAC_USER.value, approval_date=datetime.now()\n )\n mock_current_user(user, monkeypatch)\n\n with cidc_api.app_context():\n TrialMetadata(\n trial_id=\"test_trial\",\n metadata_json={\n prism.PROTOCOL_ID_FIELD_NAME: trial_id,\n \"participants\": [],\n \"allowed_cohort_names\": [\"Arm_Z\"],\n \"allowed_collection_event_names\": [],\n },\n ).insert()\n\n user.insert()\n return user.id", "def create_profile_for_new_user(sender, created, instance, **kwargs):\n if created:\n profile = self.get_model('profile')(user=instance)\n profile.save()", "def create_pootle_profile(sender, instance, **kwargs):\n try:\n profile = instance.get_profile()\n except PootleProfile.DoesNotExist:\n profile = PootleProfile(user=instance)\n profile.save()", "def add_profile(self, request, *args, **kwargs):\n # Todo (mo): utilize self.get_serializer(instance=conversation, data=request.data)\n context = {\n 'conversation': self.get_object(),\n 'request': request\n }\n serializer = AddProfileSerializer(data=request.data, context=context)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_202_ACCEPTED)", "def post_save_user_model_receiver(sender, instance, created, *args, **kwargs):\n if created:\n try:\n Profile.objects.create(user=instance)\n except:\n pass", "def create_participants(self, nagg, nneu, custom):\n neu = [NeutralScooterCompany() for _ in range(nneu)]\n agg = [AggressiveScooterCompany() for _ in range(nagg)]\n parts = neu + agg\n if custom is not None:\n parts += [custom]\n self.participants = parts\n return", "def users_page(request):\n if request.method == 'POST':\n user = request.user\n form = CompetenceForm(request.POST)\n\n if form.is_valid():\n form.instance.person = request.user\n form.save()\n # return redirect('user-page')\n # competence = Competence.objects.create_competence(user, form.title_of_competence, form.level_of_competence)\n else:\n form = CompetenceForm()\n\n return render(request, 'core/user-page.html', {'form': form})", "def create_or_update_user_profile(sender, instance, created, **kwargs):\n\n # Create profile and set ACTIVE status to account -- TODO : ACTIVE STATUS\n if created:\n Profile.objects.create(user=instance, status=Status.get_or_create_status(strings.ACTIVE_STATUS))\n\n else:\n instance.profile.save()", "def create_or_update_user_profile(sender, instance, created, **kwargs):\n _, created = UserProfile.objects.get_or_create(user=instance)\n if created and instance.email != \"\":\n instance.profile.email = instance.email\n instance.profile.save()", "def add_volunteer(self, volunteer):\n self._volunteers += [volunteer]", "def attend_event(self, event_id):\n event = Event.objects.get(id=event_id)\n self.event_attending.add(event)\n self.save()\n event.save()", "def copy_to_teamusercopy(apps, schema_editor):\n TeamUser = apps.get_model('status', 'TeamUser')\n TeamUserCopy = apps.get_model('status', 'TeamUserCopy')\n\n for teamuser in TeamUser.objects.all():\n if TeamUserCopy.objects.filter(team_id=teamuser.team_id, user_id=teamuser.user_id).count() == 0:\n TeamUserCopy.objects.create(team_id=teamuser.team_id, user_id=teamuser.user_id)\n print('Created %s %s' % (teamuser.team_id, teamuser.user_id))\n else:\n print('Already exists... skipping')", "def test_subscriptions_add_for_principal_deactivated(self) -> None:\n target_profile = self.example_user(\"cordelia\")\n post_data = dict(\n principals=orjson.dumps([target_profile.id]).decode(),\n )\n self.common_subscribe_to_streams(self.test_user, \"Verona\", post_data)\n\n do_deactivate_user(target_profile, acting_user=None)\n result = self.common_subscribe_to_streams(\n self.test_user, \"Denmark\", post_data, allow_fail=True\n )\n self.assert_json_error(\n result,\n f\"User not authorized to execute queries on behalf of '{target_profile.id}'\",\n status_code=403,\n )", "def create_user_profile(instance, created, **_):\n if created:\n Profile.objects.create(user=instance)", "def post_save_add_contact(sender, **kwargs):\n obj = kwargs['instance']\n active_campaign_list = Campaign.objects.filter(phonebook__contact__id=obj.id,\n status=CAMPAIGN_STATUS.START)\n # created instance = True + active contact + active_campaign\n if kwargs['created'] and obj.status == CONTACT_STATUS.ACTIVE \\\n and active_campaign_list.count() >= 1:\n for elem_campaign in active_campaign_list:\n try:\n Subscriber.objects.create(\n contact=obj,\n duplicate_contact=obj.contact,\n status=SUBSCRIBER_STATUS.PENDING,\n campaign=elem_campaign)\n except:\n pass", "def _add_user_props_to_hit(self, hit):\n for key in self._user_properties:\n try:\n if key in [\"user_id\", \"non_personalized_ads\"]:\n hit.update({key: self._user_properties[key]})\n else:\n if \"user_properties\" not in hit.keys():\n hit.update({\"user_properties\": {}})\n hit[\"user_properties\"].update(\n {key: {\"value\": self._user_properties[key]}}\n )\n except:\n logger.info(f\"Failed to add user property to outgoing hit: {key}\")", "def test_add_users_doesnt_add_duplicate_entry(self):\r\n role = CourseStaffRole(self.course_key)\r\n role.add_users(self.student)\r\n self.assertTrue(role.has_user(self.student))\r\n # Call add_users a second time, then remove just once.\r\n role.add_users(self.student)\r\n role.remove_users(self.student)\r\n self.assertFalse(role.has_user(self.student))", "async def test_regular_user_can_explicitly_target_themselves(self, create_embed, _):\n constants.STAFF_ROLES = [self.moderator_role.id]\n ctx = helpers.MockContext(author=self.author, channel=self.bot_command_channel)\n\n await self.cog.user_info(self.cog, ctx, self.author)\n\n create_embed.assert_called_once_with(ctx, self.author, False)\n ctx.send.assert_called_once()", "def add_campaign(self, campaign):\n self._campaigns += [campaign]", "async def coach(self, ctx):\r\n if ctx.guild.id == 445092370006933505:\r\n user = ctx.author\r\n dm_channel = user.dm_channel\r\n guild_data = self.config.guild(ctx.guild)\r\n coach_id = await guild_data.coachid()\r\n coach = ctx.guild.get_role(int(coach_id))\r\n channel_id = await self.config.guild(ctx.guild).coachchannel()\r\n channel = ctx.guild.get_channel(int(channel_id))\r\n if dm_channel is None:\r\n dm_channel = await user.create_dm()\r\n lst = await guild_data.get_raw(\"neededlist\")\r\n player_data = self.config.member(ctx.author)\r\n\r\n def check(m):\r\n return m.channel == dm_channel and m.author == user\r\n\r\n try:\r\n if user.id in lst:\r\n await ctx.send(\"You already have a coaching request pending please stay patient or contact our staff if its been over 48 hrs since your coaching request\")\r\n else:\r\n await ctx.send(\"Please check your DM's...\")\r\n await user.send(\"Please tell us your In game name?, Type 'stop' to stop the process\")\r\n ign = await self.bot.wait_for('message', timeout=60, check=check)\r\n ign_use = ign.content\r\n new_ign = ign.content.lower()\r\n if new_ign == \"stop\":\r\n raise UserEnd\r\n await user.send(\"Please tell us your Player Tag?, Type 'stop' to stop the process\")\r\n tag = await self.bot.wait_for('message', timeout=60, check=check)\r\n tag_use = tag.content\r\n new_tag = tag.content.lower()\r\n if new_tag == \"stop\":\r\n raise UserEnd\r\n await user.send(\"What time do you prefer for coaching? (Times in UTC only), Type 'stop' to stop the process\")\r\n time = await self.bot.wait_for('message', timeout=60, check=check)\r\n time_use = time.content\r\n np = time.content.lower()\r\n if np == \"stop\":\r\n raise UserEnd\r\n await user.send(\"What archatypes do you prefer to play?\")\r\n deck = await self.bot.wait_for('message', timeout=60, check=check)\r\n new_deck = deck.content.lower() # I know I could have made a function to check this but my brain is not working\r\n deck_use = deck.content\r\n if new_deck == \"stop\":\r\n raise UserEnd\r\n\r\n await user.send(\"You will be contacted by one of our coaches please stay patient.\")\r\n await channel.send(\"{} New coaching request from {}\".format(coach.mention, user.mention))\r\n await self.emb(ctx, \"Discord Name\", \"In Game Name\", \"Player Tag\", \"Preferred Time\", \"Deck Type\", user.mention, ign_use, tag_use, time_use, deck_use)\r\n lst.append(user.id)\r\n await self.config.guild(ctx.guild).neededlist.set(lst)\r\n await player_data.ign.set(ign_use)\r\n await player_data.tag.set(tag_use)\r\n await player_data.time.set(time_use)\r\n await player_data.deck_type.set(deck_use)\r\n\r\n except asyncio.exceptions.TimeoutError:\r\n await user.send(\"Timeout...\") # not sure where to send these messages\r\n return\r\n except UserEnd:\r\n await user.send(\"Stopped!\") # not sure where to send these messages\r\n return\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")", "def test_subscribe_to_stream_post_policy_moderators_stream(self) -> None:\n member = self.example_user(\"AARON\")\n stream = self.make_stream(\"stream1\")\n # Make sure that we are testing this with full member which is just below the moderator\n # in the role hierarchy.\n self.assertFalse(member.is_provisional_member)\n do_change_stream_post_policy(\n stream, Stream.STREAM_POST_POLICY_MODERATORS, acting_user=member\n )\n result = self.common_subscribe_to_streams(member, [\"stream1\"])\n json = self.assert_json_success(result)\n self.assertEqual(json[\"subscribed\"], {member.email: [\"stream1\"]})\n self.assertEqual(json[\"already_subscribed\"], {})", "def check_and_add_default_teams(user):\n for course in user.course.all():\n user_courses_without_teams = course.teams.exclude(\n team_id__icontains='default_team_'\n )\n team_id = f'default_team_for_{course.frontend_course_id}'\n default_team = list(course.teams.filter(team_id=team_id))\n\n if user_courses_without_teams.exists():\n # delete this student from default team\n user.teams.remove(*default_team)\n else:\n # add this student to default team\n user.teams.add(*default_team)\n\n user.save()", "def save(self, *args, **kwargs):\n create = self.id is None\n # Strip out the user keyword argument, since the super save method\n # does not expect it.\n user = None\n if 'user' in kwargs:\n user = kwargs.pop('user')\n super(Member, self).save(*args, **kwargs)\n # Only register if the object is not being updated\n if create:\n self._register(user=user)\n else:\n # User and UserProfile already exist so save them too\n self.userprofile.save()\n self.userprofile.user.save()", "def user_post_save(sender, instance, created, **kwargs):\n\t\tif created == True:\n\t\t\tup = UserProfile()\n\t\t\tup.user = instance\n\t\t\tup.save()", "def add_users_to_team(team, users):\n assignment = team.assignment_fk\n if len(TeamMember.objects.filter(team_fk=team)) + len(users) > assignment.max_num_team_members:\n raise Exception('Maximum number of team members exceeds')\n\n with transaction.atomic():\n for user in users:\n if TeamMember.objects.filter(team_fk=team, user_fk=user):\n raise Exception('Some users have had belonged team')\n TeamMember.objects.create(team_fk=team, user_fk=user,\n assignment_fk=assignment, is_leader=False)\n\n return True", "def add_skills_to_profile():\n # get specific objects\n profile = storage.get(\"Profile\", profile_id)\n skills = storage.get(\"Skills\", skills_id)\n if profile is not None and skills is not None:\n # check every skill in profile\n for profile_skill in profile.skills:\n # if the given skill is already linked to profile, return\n if profile_skill.id == skills.id:\n return jsonify(skills.to_dict()), 200\n # if skill is not in profile, append skill and save\n profile.skills.append(skills)\n profile.save()\n return jsonify(skills.to_dict()), 201\n\n # if id not in database, abort\n abort(404)", "def forwards(apps, schema_editor):\n Referral = apps.get_model(\"core\", \"Referral\")\n\n for referral in Referral.objects.all():\n if hasattr(referral, \"user\"):\n referral.users.add(referral.user)\n referral.save()", "def conference_registration(self, request, reg=True):\n prof = self.profile_service.get_profile_from_user() # get user Profile\n\n # check if conf exists given websafeConfKey\n # get conference; check that it exists\n wsck = request.websafeConferenceKey\n conf = ndb.Key(urlsafe=wsck).get()\n if not conf:\n raise endpoints.NotFoundException(\n 'No conference found with key: %s' % wsck)\n\n # register\n if reg:\n # check if user already registered otherwise add\n if wsck in prof.conferenceKeysToAttend:\n raise ConflictException(\n \"You have already registered for this conference\")\n\n # check if seats avail\n if conf.seatsAvailable <= 0:\n raise ConflictException(\"There are no seats available.\")\n\n # register user, take away one seat\n prof.conferenceKeysToAttend.append(wsck)\n conf.seatsAvailable -= 1\n retval = True\n\n # unregister\n else:\n # check if user already registered\n if wsck in prof.conferenceKeysToAttend:\n\n # unregister user, add back one seat\n prof.conferenceKeysToAttend.remove(wsck)\n conf.seatsAvailable += 1\n retval = True\n else:\n retval = False\n\n # write things back to the datastore & return\n prof.put()\n conf.put()\n return BooleanMessage(data=retval)", "def save_user_ref(sender, created, instance, **_):\n if created:\n UserExtend.objects.create(user=instance)\n UserSettings.objects.create(user=instance)", "def connect_user(self, user):\n\t\tis_user_added = False\n\t\tif not user in self.users.all():\n\t\t\tself.users.add(user)\n\t\t\tself.save()\n\t\t\tis_user_added = True\n\t\telif user in self.users.all():\n\t\t\tis_user_added = True\n\t\treturn is_user_added" ]
[ "0.57529235", "0.57101214", "0.56048286", "0.549154", "0.52644926", "0.516734", "0.51544005", "0.50623524", "0.49674854", "0.49344334", "0.48778322", "0.48658597", "0.48227632", "0.481681", "0.4816524", "0.48090467", "0.48052084", "0.47986007", "0.4791924", "0.47789344", "0.47755465", "0.47655156", "0.4752161", "0.47516337", "0.47505718", "0.4746542", "0.4709363", "0.47012112", "0.4699854", "0.46997496", "0.46997496", "0.46997496", "0.468685", "0.46840233", "0.4680401", "0.46755132", "0.46608788", "0.46598732", "0.4653721", "0.46521178", "0.46488962", "0.46433002", "0.46401206", "0.46345705", "0.46329427", "0.46316847", "0.46230507", "0.46200785", "0.46152642", "0.46138063", "0.4611925", "0.46104655", "0.46027818", "0.46006763", "0.4586657", "0.45791322", "0.4573566", "0.45687005", "0.45680785", "0.45626473", "0.45565626", "0.4550838", "0.45505604", "0.4544945", "0.45385593", "0.45191127", "0.45107755", "0.45074463", "0.45018753", "0.4499488", "0.4495001", "0.44894874", "0.44875628", "0.44827548", "0.44794616", "0.44662932", "0.44658947", "0.44579184", "0.44520506", "0.44505826", "0.44488105", "0.4447912", "0.44474494", "0.44427347", "0.44384515", "0.4434648", "0.44336092", "0.4433317", "0.44283426", "0.4421991", "0.4415756", "0.44155833", "0.44058728", "0.43995127", "0.43967348", "0.43965068", "0.43964812", "0.43958116", "0.43936512", "0.43927875" ]
0.8156176
0
Remove the user from the lists of workers and prospects, if applicable, and add the user to the blacklist. Note that adding somebody as a worker removes the person from the blacklist. user A TcsUser instance to link to the blacklist
def addToBlacklist(self, user): if (user != self.owner) and not self.blacklist.filter(pk=user.id).exists(): self.blacklist.add(user) if self.prospects.filter(pk=user.id).exists(): self.prospects.remove(user) if self.workers.filter(pk=user.id).exists(): self.workers.remove(user) return self return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def add_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n try:\r\n self.settings['blacklist'].append(user.id)\r\n await ctx.send(\"User blacklisted.\")\r\n except:\r\n await ctx.send(\"An error occured.\")\r\n else:\r\n await ctx.send(\"User already blacklisted.\")", "async def remove_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n await ctx.send(\"User is not blacklisted.\")\r\n else:\r\n self.settings['blacklist'].remove(user.id)\r\n await ctx.send(\"User removed from blacklist.\")", "def addWorker(self, user):\n if (user != self.owner) and not self.workers.filter(pk=user.id).exists():\n self.workers.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.blacklist.filter(pk=user.id).exists():\n self.blacklist.remove(user)\n return self\n return None", "async def blacklist_remove(self, ctx: commands.Context, target):\r\n table = \"user_blacklist\" if isinstance(target, discord.User) else \"guild_blacklist\"\r\n\r\n if isinstance(target, discord.User):\r\n check = await self.check_user(target.id, table)\r\n target = target.id\r\n else:\r\n check = await self.check_user(int(target), table)\r\n target = int(target)\r\n\r\n if check[0]:\r\n await self.remove_blacklist(target, table)\r\n await ctx.message.add_reaction(self.bot.custom_emojis.tick)\r\n else:\r\n await ctx.error(description=f\"{table.split('_')[0].title()} is not blacklisted.\")", "def delete_from_blacklist(self, user_id, blacklist_user_id):\n try:\n self.table.delete().where(and_(\n self.table.c.user_id == user_id,\n self.table.c.blacklisted_id == blacklist_user_id )).execute() \n except Exception as e:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible", "def removeWorker(self, user):\n if user == self.owner:\n return None\n # Without these queries, there's no way to tell if anything actually gets removed.\n # Calling remove() on a user that is not in the set does not raise an error.\n if self.workers.filter(pk=user.id).exists():\n self.workers.remove(user)\n return self\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n return self\n return None", "async def blacklist_global(self, ctx, user: discord.User, *, reason):\n await self.bot.db.execute(\n \"INSERT IGNORE blacklisted_user VALUES (%s, %s)\", user.id, reason\n )\n self.bot.cache.blacklist[\"global\"][\"user\"].add(user.id)\n await util.send_success(ctx, f\"**{user}** can no longer use Miso Bot!\")", "def add_to_blacklist(self, user_id, blacklist_user_id):\n try:\n self.table.insert().values( user_id=user_id,\n blacklisted_id=blacklist_user_id).execute()\n except sqlalchemy.exc.IntegrityError as e:\n if e.orig.args[0] == 1062 :\n # duplicate entry, don't care !\n pass\n elif e.orig.args[0] == 1452 :\n self.log(e, self.identifier)\n raise egg_errors.UnknownUserOrBadgeIDException\n else:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible\n except Exception as e:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible", "async def blacklist_add(self, ctx: commands.Context, target, *, reason: str = \"No reason given.\"):\r\n table = \"user_blacklist\" if isinstance(target, discord.User) else \"guild_blacklist\"\r\n guild = None\r\n\r\n try:\r\n check = await self.check_user(target.id, table)\r\n except Exception:\r\n guild = discord.utils.get(self.bot.guilds, id=int(target))\r\n if not guild:\r\n return\r\n\r\n check = await self.check_user(int(target), table)\r\n\r\n if not check[0]:\r\n if isinstance(target, discord.User):\r\n await self.add_blacklist(target.id, table, reason)\r\n else:\r\n await self.add_blacklist(int(target), table, reason)\r\n\r\n await ctx.message.add_reaction(self.bot.custom_emojis.tick)\r\n if not isinstance(target, discord.User):\r\n embed = discord.Embed(color=self.bot.colors.red,\r\n description=f\"Your guild / server has been blacklisted. \"\r\n f\"If you wish to know the reason, join the \"\r\n f\"[Support server]({self.bot.invite_url})\")\r\n await guild.owner.send(embed=embed)\r\n await guild.leave()\r\n self.bot.logger.info(f\"Added guild with ID {target} to blacklist.\")\r\n else:\r\n self.bot.logger.info(f\"Added user with ID {target.id} to blacklist\")\r\n else:\r\n await ctx.error(description=f\"{table.split('_')[0].title()} is already blacklisted.\")", "def nuke_users(modeladmin, request, queryset):\n users = None\n form = BlacklistForm(initial={'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})\n contenttype = ContentType.objects.get_for_model(queryset.model)\n # Because we want this action available from comments or user admin lists, sort out content type\n ctype_as_string = unicode(contenttype)\n if ctype_as_string == 'user':\n users = queryset\n if ctype_as_string == 'comment':\n # build list of unique users within comment list.\n users = []\n for comment in queryset:\n if not comment.user in users:\n users.append(comment.user)\n\n if ctype_as_string == 'contact':\n # build list of unique users from contact list.\n users = []\n for c in queryset:\n if c.user and c.user not in users:\n users.append(c.user)\n if not users:\n # we haven't built out a content-type appropriate user list.\n return HttpResponse(\"Error finding content type: %s\" % contenttype)\n\n if 'apply_blacklist' in request.POST: # we're returning from the intermediate page and are ready to do some work.\n form = BlacklistForm(request.POST)\n if form.is_valid():\n reason = form.cleaned_data['reason']\n spammer = form.cleaned_data['is_spammer']\n for user in users:\n # Deactivate user accounts\n # Note: Update is more efficient,\n # but we can't use it because we may have a list (from comments)\n # rather than a proper queryset.\n user.is_active = False\n user.save()\n\n for c in user.comment_comments.all(): # remove their comments from public view.\n if spammer:\n c.delete()\n else:\n c.is_public = False\n c.is_removed = True\n c.save()\n for c in user.contact_set.all(): # and contact messages\n if spammer:\n c.delete()\n else:\n c.publish = False\n c.save()\n # remove their session. -- Is there a more efficient way than looping through all sessions? That can become a mighty big table.\n for s in Session.objects.all():\n decoded_session = s.get_decoded()\n if '_auth_user_id' in decoded_session and decoded_session['_auth_user_id'] == user.id:\n s.delete()\n # and add them to the blacklist\n blacklist = Blacklist(\n user = user,\n blacklister = request.user,\n reason = reason,\n )\n blacklist.save()\n\n if spammer:\n resp_str = 'Any related accounts will still be visible, but related comments have been deleted.'\n else:\n resp_str = 'Any related accounts and comments will still be visible in the admin.'\n\n count = len(users)\n if count == 1:\n modeladmin.message_user(request, \"%s was removed and blocked from the site. %s\" % (users[0].username, resp_str))\n else:\n modeladmin.message_user(request, \"%s users were removed and blocked from the site. %s\" % (count, resp_str))\n return HttpResponseRedirect(request.get_full_path())\n else:\n return HttpResponse(\"error!\")\n # We haven't captured intermediate page data. Go there...\n return render(request, 'admin/blacklist.html', {'users': users, 'form': form})", "def blacklist_remove():\n db = unitdata.kv()\n blacklist = db.get(BLACKLIST_KEY, [])\n for device in get_devices():\n try:\n blacklist.remove(device)\n except ValueError:\n raise Error('{}: Device not in blacklist.'.format(device))\n db.set(BLACKLIST_KEY, blacklist)\n db.flush()", "def test_remove_from_blacklist(self):\n\n self.feature_test.add_to_blacklist(3)\n self.feature_test.remove_from_blacklist(3)\n self.assertFalse(3 in Feature(\"testing\").blacklist)", "async def blacklist(self, ctx, add_or_remove: AddOrRemove = None, id: int = 0):\n # view\n if add_or_remove is None or not id:\n return await ctx.send(f\"```py\\n{self.bot._blacklist}\\n```\")\n\n # add\n elif add_or_remove is True:\n if id not in self.bot._blacklist:\n self.bot._blacklist.add(id)\n else:\n return await ctx.send(\"That id is already blacklisted!\")\n # remove\n else:\n if id in self.bot._blacklist:\n self.bot._blacklist.remove(id)\n else:\n return await ctx.send(\"That id is not blacklisted!\")\n\n # confirm\n self.bot.dump_blacklist()\n await ctx.send(\"Done!\")", "def update_exam_blacklist(sender, instance, **kwargs):\n exams = Exam.objects.filter(\n course_instance__instructors=instance.instructor)\n if instance.permission_allowed is False:\n exams.exclude(blacklisted=True).update(blacklisted=True)\n else:\n for exam in exams:\n if exam.has_permission():\n exam.blacklisted = False\n exam.save()", "def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)", "def BUM(tw, user, action):\n\n if (user in twStart.WHITE_LIST_USERS):\n return\n\n if(action == \"B\"):\n print(_(\"Blocked: {0}\").format(user))\n # TODO: Uncomment the code below\n # tw.blocks.create(user_id=usrId, skip_status=1, include_entities=False)\n return\n elif (action == \"M\"):\n print(_(\"Muted: {0}\").format(user))\n # TODO: Uncomment the code below\n # tw.users.mutes(user_id=usrId)\n return\n elif(action == \"U\"):\n print(_(\"Unfollowed: {0}\").format(user))\n # TODO: Uncomment the code below\n # tw.friendships.destroy(user_id=usrId)\n return", "def addProspect(self, user):\n if self.is_active and (user != self.owner) and not self.prospects.filter(pk=user.id).exists() \\\n and not self.workers.filter(pk=user.id) and not self.blacklist.filter(pk=user.id).exists():\n self.prospects.add(user)\n return self\n return None", "def unfollowing_and_removing(self, user_id):\n if self.unfollowing(user_id):\n ind = [i for i, j in enumerate(self.monitored_users) if j.get('user', '') == user_id]\n if ind:\n self.monitored_users.remove(self.monitored_users[ind[0]])", "def addWhitelist(self, user, guildId):\n flag = self.con.addUserToWhitelist(user, guildId)\n\n if flag:\n self.whitelist[str(guildId)].append(user)\n\n return flag", "def anti_bot(self, message):\n msg_list = self.ts.get_human_readable_message(message).lower().split(' ')\n bot_creation_date = self._get_creation_date(msg_list[1])\n viewers = self.ts.fetch_chatters_from_API()['viewers']\n mod_list = self.ts.get_mods()\n with codecs.open('whitelist.json', 'r', 'utf-8') as f:\n whitelist = json.load(f)\n for viewer in viewers:\n if self._get_creation_date(viewer) == bot_creation_date and viewer not in whitelist:\n self.ts.send_message('/ban {}'.format(viewer))\n mod_str = ', '.join(mod_list)\n self._add_to_whisper_queue(viewer, 'We\\'re currently experiencing a bot attack. If you\\'re a human and were accidentally banned, please whisper a mod: {}'.format(mod_str))", "def delWhitelist(self, user, guildId):\n flag = False\n\n for item in self.whitelist[str(guildId)]:\n if str(user) == item:\n flag = True\n break\n\n if flag:\n if self.con.removeUserOfWhitelist(user, guildId):\n self.whitelist[str(guildId)].remove(user)\n\n return flag", "def remove_candidate(self, user):\n self.assignment_related_users.filter(user=user).delete()\n inform_changed_data(self)", "def _remove(self, room, regex, user):\n regexes_for_room = self.notifications[room]\n users_for_regex = regexes_for_room[regex]\n\n # users may have been added multiple times in the past, so make sure\n # we remove them all.\n while user in users_for_regex:\n users_for_regex.remove(user)\n\n if not users_for_regex:\n # remove regex from room when there are no users left to notify\n del regexes_for_room[regex]", "async def oauth_whitelist(self, ctx, target: Union[Role, utils.User]):\n whitelisted = self.bot.config[\"oauth_whitelist\"]\n\n # target.id is not int??\n if target.id in whitelisted:\n whitelisted.remove(target.id)\n removed = True\n else:\n whitelisted.append(target.id)\n removed = False\n\n await self.bot.config.update()\n\n embed = Embed(color=self.bot.main_color)\n embed.title = \"Success\"\n\n if not hasattr(target, \"mention\"):\n target = self.bot.get_user(target.id) or self.bot.modmail_guild.get_role(\n target.id\n )\n\n embed.description = (\n f\"{'Un-w' if removed else 'W'}hitelisted \" f\"{target.mention} to view logs.\"\n )\n\n await ctx.send(embed=embed)", "def whitelist(self, message):\n user = self.ts.get_user(message)\n msg_list = self.ts.get_human_readable_message(message).lower().split(' ')\n try:\n with codecs.open('whitelist.json', 'r', 'utf-8') as f:\n holder_list = json.load(f)\n except json.decoder.JSONDecodeError:\n holder_list = []\n if msg_list[1] not in holder_list:\n holder_list.append(msg_list[1])\n with codecs.open('whitelist.json', 'w', 'utf-8') as f:\n json.dump(holder_list, f, ensure_ascii=False)\n self._add_to_whisper_queue(user, '{} has been added to the whitelist'.format(msg_list[1]))\n else:\n self._add_to_whisper_queue(user, '{} is already in the whitelist!'.format(msg_list[1]))", "async def blacklist_member(self, ctx, *members):\n successes = []\n fails = []\n for member_arg in members:\n try:\n member = await commands.MemberConverter().convert(ctx, member_arg)\n except commands.errors.BadArgument:\n fails.append(f\"Cannot find member {member_arg}\")\n else:\n if member == ctx.author:\n fails.append(\"You cannot blacklist yourself!\")\n continue\n\n await self.bot.db.execute(\n \"\"\"\n INSERT INTO blacklisted_member (user_id, guild_id)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE\n user_id = VALUES(user_id)\n \"\"\",\n member.id,\n ctx.guild.id,\n )\n try:\n self.bot.cache.blacklist[str(ctx.guild.id)][\"member\"].add(member.id)\n except KeyError:\n self.bot.cache.blacklist[str(ctx.guild.id)] = {\n \"member\": {member.id},\n \"command\": set(),\n }\n successes.append(f\"Blacklisted {member.mention}\")\n\n await util.send_tasks_result_list(ctx, successes, fails)", "def user_disappears(self, user):\n pass", "async def cmd_galtoguserwl(self, ctx, user_id): \n\n # ===== CHECK IF INPUT IS VALID\n try:\n user_id = int(user_id.replace(\"<\", '').replace(\"@\", '').replace(\"!\", '').replace(\">\", ''))\n except (IndexError, ValueError):\n await ctx.send_help('galtoguserwl', delete_after=Gallery.delete_after)\n return \n\n # ===== REMOVE OR ADD USER TO THE WHITELIST\n ret_msg = \"\"\n\n if user_id in self.cogset['user_wl']:\n self.cogset['user_wl'].remove(user_id)\n ret_msg = f'<@{user_id} has been **removed** from the gallery whitelist.'\n \n else:\n self.cogset['user_wl'].append(user_id)\n ret_msg = f'<@{user_id} has been **added** to the gallery whitelist.'\n\n\n # ===== WRITE TO THE DATABASE\n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n\n # ===== RETURN\n await ctx.channel.send(content=ret_msg, delete_after=Gallery.delete_after)\n return", "def remove_users(users_to_remove: list, users_dict: dict,\n end_of_service: str) -> None:\n for reciever in users_to_remove:\n if reciever in users_dict:\n send_message(reciever,\n 'Subscription expired\\n',\n end_of_service,\n users_dict[reciever]['carrier'])\n del users_dict[reciever]", "def blacklist_meme(self, id):\n cursor = self.conn.cursor()\n cursor.execute(f\"update memes set blacklisted = 1 where meme_id = ?\", (id, ))\n self.conn.commit()\n cursor.close()", "def test_remove_from_whitelist(self):\n\n self.feature_test.add_to_whitelist(3)\n self.feature_test.remove_from_whitelist(3)\n self.assertFalse(3 in Feature(\"testing\").whitelist)", "def remove(self, user):\n self.packet.send_room([\"rp\", user.get_int_id(self.rooms),\n user.data.id], user.room)\n self.rooms[user.room][\"users\"].remove(user)", "def remove(self, user_id):\n pass", "async def remove_user_from_blacklist(user_id: int) -> int:\n async with aiosqlite.connect(DATABASE_PATH) as db:\n await db.execute(\"DELETE FROM blacklist WHERE user_id=?\", (user_id,))\n await db.commit()\n rows = await db.execute(\"SELECT COUNT(*) FROM blacklist\")\n async with rows as cursor:\n result = await cursor.fetchone()\n return result[0] if result is not None else 0", "def copy_to_user(self, user):\n user_pod = user.profile.get_user_pod()\n checklist_copy = self\n checklist_copy.pk = None\n checklist_copy.collaborators.clear()\n checklist_copy.save()\n\n # Copy all the checklist entries, but don't save the checked\n # state or any of the optional details - the new checklist\n # should be \"blank\"\n copied_entries = []\n for entry in self.entries:\n item_copy = ChecklistEntry(plant_name=entry.plant_name,\n checklist=checklist_copy)\n copied_entries.append(item_copy)\n\n ChecklistEntry.objects.bulk_create(copied_entries)\n\n # Assign ownership of the new checklist to the user\n ownership = ChecklistCollaborator(collaborator=user_pod,\n checklist=checklist_copy, is_owner=True)\n ownership.save()", "async def member(self, ctx: commands.Context, member: discord.Member):\n\n if str(member.id) in self.user_blacklist:\n self.user_blacklist.remove(str(member.id))\n removed = True\n else:\n self.user_blacklist.append(str(member.id))\n removed = False\n\n await ctx.send(\n f\"{'Un' if removed else None}Blacklisted **{member.name}#{member.discriminator}**\"\n )\n return", "def test_remove_from_blacklist_with_string(self):\n email = '[email protected]'\n self.feature_test.add_to_blacklist(email)\n self.feature_test.remove_from_blacklist(email)\n self.assertFalse(email in Feature(\"testing\").blacklist)", "def remove_user(self, user: discord.User) -> bool:\n\t\tif not self.user_has_entry(user):\n\t\t\treturn False\n\t\t\n\t\tdef data_interaction(cur: Cursor):\n\t\t\tsql = f\"DELETE FROM {StrikeConsts.STRIKE_TABLE} WHERE id=%s;\"\n\t\t\tcur.execute(sql, (user.id,))\n\t\t\t\n\t\t\treturn [True]\n\t\t\t\n\t\treturn self.connect_and_execute(data_interaction)[1][0]", "def add_to_whitelist(self, instance_id, creator, reason):\n item = {\n \"InstanceID\": instance_id,\n \"Creator\": creator,\n \"Reason\": reason,\n \"EmailSent\": False\n }\n self.delete_from_low_use(instance_id)\n response = self.whitelist.put_item(Item=item)\n return response", "async def blacklist_show(self, ctx: commands.Context, target):\r\n table = \"user_blacklist\" if isinstance(target, discord.User) else \"guild_blacklist\"\r\n check = await self.check_user(target.id, table)\r\n\r\n if check[0]:\r\n embed = discord.Embed(color=self.bot.colors.neutral)\r\n if isinstance(target, discord.User):\r\n entry = await self.get_blacklist_entry(target.id, table)\r\n u = discord.utils.get(self.bot.users, id=target.id)\r\n if u:\r\n embed.set_author(name=f\"User {u} ({u.id})\", icon_url=u.avatar_url_as(static_format=\"png\"))\r\n else:\r\n embed.set_author(name=f\"User {u.id}\")\r\n else:\r\n entry = await self.get_blacklist_entry(target, table)\r\n g = discord.utils.get(self.bot.guilds, id=target)\r\n if g:\r\n embed.set_author(name=f\"Guild {g} ({g.id})\", icon_url=g.icon_url_as(static_format=\"png\"))\r\n else:\r\n embed.set_author(name=f\"Guild {g.id}\")\r\n embed.add_field(name=\"Reason:\", value=entry['reason'])\r\n await ctx.send(embed=embed)\r\n else:\r\n await ctx.error(description=f\"{table.split('_')[0].title()} is not blacklisted.\")", "def _remove(users, room_name):\n global users_removed\n users_removed = []\n\n try:\n\n for word in users['message']['text'].split():\n\n if word == 'myself':\n user = users['message']['sender']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n \n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append('Not found ->> ' + \"<\" + user + \">\")\n\n check_continue = 1\n text = '```User removed: %s ```' % (','.join(users_removed))\n\n for _item in range(len(users['message']['text'].split())):\n\n _item = _item + 1\n\n try:\n _type = users['message']['annotations'][_item]['userMention']['user']['type']\n user = users['message']['annotations'][_item]['userMention']['user']['name']\n \n if _type == 'BOT':\n\n if check_continue == 1:\n continue\n else:\n text = 'Please add user with @'\n continue\n \n user = users['message']['annotations'][_item]['userMention']['user']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n\n except:\n pass\n\n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append(\"Not found ->> \" + \"<\" + user + \">\")\n text = \"```Removed users: %s ```\" % (','.join(list(set(users_removed))))\n return text\n except:\n\n text = 'Please add user with @'\n return text", "def unfriend(self, remove):\n remover_friends_list = self # person terminating the friendship \n \n # remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n\n #remove friend from removee friend list\n friends_list = FriendList.objects.get(user=removee)\n friend_list.remove_friend(self.user)", "def _RemoveUsers(self, remove_users):\n for username in remove_users:\n self.utils.RemoveUser(username)\n self.user_ssh_keys.pop(username, None)\n self.invalid_users -= set(remove_users)", "def blacklist_add():\n db = unitdata.kv()\n blacklist = db.get(BLACKLIST_KEY, [])\n for device in get_devices():\n if not os.path.exists(device):\n raise Error('{}: No such file or directory.'.format(device))\n if device not in blacklist:\n blacklist.append(device)\n db.set(BLACKLIST_KEY, blacklist)\n db.flush()", "def blacklist_token(token, user):\r\n user = User.query.filter_by(username=user).first()\r\n user.login_status = False\r\n token = Token.query.filter_by(token=token).first()\r\n token.blacklist = True\r\n db.session.commit()\r\n return {'Message': 'You have successfully logged out', \"Status\": \"Success\"}, 201", "def test_remove_from_team_forbidden(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=['Team_foo'])\n req = User.create(name='requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)", "def test_teams_remove_user_from_team_v2(self):\n pass", "async def ban_users(request: Request, user_list: list[User]) -> ModBan:\n conn: Connection = request.state.db_conn\n users = [user.user_id for user in user_list]\n\n records = await conn.fetch(\"SELECT * FROM users WHERE user_id=any($1::bigint[])\", tuple(users))\n db_users = [record[\"user_id\"] for record in records]\n\n non_db_users = set(users) - set(db_users)\n\n async with conn.transaction():\n # Ref:\n # https://magicstack.github.io/asyncpg/current/faq.html#why-do-i-get-postgressyntaxerror-when-using-expression-in-1\n await conn.execute(\"UPDATE users SET is_banned=TRUE WHERE user_id=any($1::bigint[])\", db_users)\n await conn.execute(\"UPDATE pixel_history SET deleted=TRUE WHERE user_id=any($1::bigint[])\", db_users)\n\n await request.state.canvas.sync_cache(conn, skip_check=True)\n\n return ModBan(banned=db_users, not_found=list(non_db_users))", "def is_blacklisted(self, user_id, blacklist_user_id):\n try:\n result = self.table.select(and_(\n self.table.c.user_id == user_id,\n self.table.c.blacklisted_id == blacklist_user_id)).execute()\n if result.rowcount >= 1:\n return True\n elif result.rowcount == 0:\n return False\n except Exception as e:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible", "def unfriend(self, removee):\n remover_friends_list = self # person terminating the friendship\n # Remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n # Remove friend from removee's friend list\n friends_list = FriendList.objects.get(user=removee)\n friends_list.remove_friend(self.user)", "async def role_blacklist(\n self, ctx: Context, target: Member, role_title: str, *, reason: str = \"None given.\"\n ) -> None:\n\n role_title = role_title.lower()\n\n guild = ctx.guild\n target_role_id = self._get_role_id_from_title(guild, role_title)\n\n if target_role_id is None:\n role_names = \", \".join(self.get_guild_role_names(guild))\n await ctx.send(\"Invalid role name. Possible roles are {}.\".format(role_names))\n return\n elif target is None:\n await ctx.send(\"Member not found.\")\n return\n else:\n\n # Send a message, send a mod note and create a server log note\n if discord.utils.get(target.roles, name=role_title):\n # Note that we can still unban if the user doesn't have the given role\n await target.remove_roles(discord.utils.get(target.roles, name=role_title),\n reason=\"Removed due to blacklisting user.\")\n\n self.config.hset(\"guild:{}:roles:roles:{}:bans\".format(guild.id, role_title), target.id, -1)\n\n # I actually think the way I handled the expiring tags was super clever, should remember that this is a\n # thing in redis\n\n # self.config.set(\"user:{}:role_blacklist:{}\".format(target.id, role_title), str(duration), ex=expire_time)\n\n server_logs = self.bot.get_cog(\"ServerLogs\")\n if server_logs:\n\n await server_logs.handle_external_embed(ctx, \"was blacklisted from {}.\".format(role_title),\n priority=True,\n member=target,\n **{\"Moderator responsible\": ctx.message.author.name,\n \"Reason\": reason,\n \"Duration\": \"Permanent\",\n \"Expires\": \"Never\"})\n\n await ctx.send(\"User {} has been role-banned successfully.\".format(str(target)))", "def add_excl_parts(db, usernames):\n desc = \"Replicating the effect \" + \\\n \"of priming with common vs rare ideas in individual \" + \\\n \"brainstorming with revised interface\"\n exp_id= 'tN33ATDiCukWfj5G7'\n # exps = db.experiments.find()\n exp = db.experiments.find_one({'_id': exp_id})\n\n db.experiments.update({'_id': exp_id},\n {'$set': {'excludeUsers': list(usernames), 'description': desc}})\n # exp['excludeUsers'] = list(usernames)\n exp = db.experiments.find_one({'_id': exp_id})\n print exp['excludeUsers']\n print exp['description']", "def deregister_users(self):\n user_list = self.request.POST.getlist(\"user\")\n if not user_list:\n self.messages.warning(\"Ingen brukere krysset av!\")\n\n for username in user_list:\n try:\n\n event = self.get_object()\n user = User.objects.get(username=username)\n\n log_eventregistration_change(\n current_user=self.request.user,\n user=user,\n event=event,\n action=DELETION,\n )\n\n self.get_object().deregister_user(user, respect_closed=False)\n\n # to log deletion we need registration\n except (User.DoesNotExist, UserRegistrationException) as ex:\n self.messages.warning(\n f\"Kunne ikke fjerne {username} fra påmeldingslisten. \"\n f\"Returnert error var: {type(ex).__name__}: {str(ex)}. \"\n \"Ta kontakt med WebKom, og oppgi denne feilmeldingen \"\n \"dersom du tror dette er en feil.\"\n )", "async def removeuser(ctx, user: discord.Member):\n channel = ctx.channel\n if not IsATicket(channel.id):\n await ctx.send(\n \"This is not a ticket! Users can only be removed from a ticket channel\"\n )\n return\n\n await channel.set_permissions(user, read_messages=False, send_messages=False)\n await ctx.message.delete()", "def reject_user_application(self, user):\n if self.is_moderator \\\n and self.has_perm('accounts.reject_user_application'):\n user.moderator = self\n user.moderator_decision = user.REJECTED\n user.decision_datetime = timezone.now()\n user.save()\n\n return user\n\n else:\n raise PermissionDenied", "def setup_whitelisted_section():\n setup_unrelated_section()\n\n # whitelist user to the course\n cs61a = Course.objects.get(name=\"CS61A\")\n user = User.objects.get(username=\"demo_user\")\n cs61a.whitelist.add(user)", "async def removeuser(self, ctx, user: discord.Member):\n\n if check_key(user.id):\n delete_key(user.id)\n await self.bot.say(\"{}, you are way out of this league.\".format(user.mention))\n else:\n await self.bot.say(\"That user does not exist in this league.\")", "def removeUserId(self, user_id):\n self.__register_user_ids.discard(user_id)", "async def admin_remove(self, ctx: MyContext, wormhole: str, user: discord.User):\n if not self.check_wh_exists(wormhole):\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.not-exists\", name=wormhole\n )\n )\n return\n if not self.check_is_admin(wormhole, ctx.author.id):\n await ctx.send(await self.bot._(ctx.guild.id, \"wormhole.error.not-admin\"))\n return\n query = \"SELECT 1 FROM wormhole_admin WHERE name = ? AND admin = ?\"\n isAlready = len(self.bot.db_query(query, (wormhole, user.id))) > 0\n if isAlready:\n query = \"DELETE FROM wormhole_admin WHERE admin = ? AND name = ?\"\n self.bot.db_query(query, (user.id, wormhole))\n await ctx.send(\n await self.bot._(ctx.guild.id, \"wormhole.success.admin-removed\")\n )\n else:\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.not-admin\", user=user.name\n )\n )", "def remove_blacklisted(blacklist, ssc, output_dir):\n print('[INFO] Writing blacklisted corpus to {}...'.format(output_dir))\n # assuming there is only 1 SSC, so take index 0\n ssc_filepaths = list(get_filepaths(ssc))[0]\n # for faster lookup\n blacklist = set(blacklist)\n for filepath in ssc_filepaths:\n with open(filepath, 'r') as f:\n # remove blacklisted entities\n lines = f.readlines()\n for i in range(1, len(lines) - 1):\n previous_tag = 'O' if lines[i-1] == '\\n' else lines[i-1].strip().split('\\t')[1]\n next_tag = 'O' if lines[i+1] == '\\n' else lines[i+1].strip().split('\\t')[1]\n single_token_entity = (previous_tag != 'I-' and next_tag != 'I-')\n blacklisted = tuple(lines[i].strip().split('\\t')) in blacklist\n if single_token_entity and blacklisted:\n lines[i] = '{}\\tO\\n'.format(lines[i].split('\\t')[0])\n # write blacklisted copy to disk\n corpus_name = os.path.basename(ssc) + '_blacklisted'\n output_directory = os.path.join(output_dir, corpus_name)\n make_dir(output_directory)\n output_filepath = os.path.join(output_directory, os.path.basename(filepath))\n with open(output_filepath, 'w') as f:\n for line in lines:\n f.write(line)", "def remove_friends(self, user1_index, user2_index):\n if user1_index >= self.num_users or user2_index >= self.num_users:\n raise ValueError(\n f\"Number of users is {self.num_users}, but indices \"\n f\"{user1_index} and {user2_index} were requested.\"\n )\n if self.users_hat[user1_index, user2_index] == 1:\n self.users_hat[user1_index, user2_index] = 0\n elif self.is_verbose():\n self.log(f\"User {user2_index} was not following user {user1_index}\")\n if self.users_hat[user2_index, user1_index] == 1:\n self.users_hat[user2_index, user1_index] = 0\n elif self.is_verbose():\n self.log(f\"User {user1_index} was not following user {user2_index}\")", "def liberate_user(cls, user):\n liberated = False\n if user.mobile_phone and cls.remove_banned_number(user.mobile_phone):\n liberated = True\n\n if user.add_mobile_phone and cls.remove_banned_number(user.mobile_phone):\n liberated = True\n\n if user.landline_phone and cls.remove_banned_number(user.landline_phone):\n liberated = True\n\n if user.add_landline_phone and cls.remove_banned_number(user.add_landline_phone):\n liberated = True\n\n if user.mobile_phone and cls.remove_suspicious_number(user.mobile_phone):\n liberated = True\n\n if user.add_mobile_phone and cls.remove_suspicious_number(user.add_mobile_phone):\n liberated = True\n\n if user.landline_phone and cls.remove_suspicious_number(user.landline_phone):\n liberated = True\n\n if user.add_landline_phone and cls.remove_suspicious_number(user.add_landline_phone):\n liberated = True\n\n if liberated:\n cls.signals.user_liberated.send(cls, user=user)\n\n return liberated", "def test_listing_from_wall_when_blocked_some_users(self):", "def share_to_user(self, user):\n user_pod = user.profile.get_user_pod()\n user_access = ChecklistCollaborator(collaborator=user_pod,\n checklist=self, is_owner=False)\n user_access.save()", "def remove_registrar(contest, user):\n _remove_role(contest, user, pcm.Registrar)", "def results_blacklist(self, value):\n if self.uuid is not None:\n raise AttributeError(\"can't set attribute on a launched task\")\n self._results_blacklist = value", "def remove_users(self, *users):\r\n pass", "def test_teams_remove_user_from_team_v1(self):\n pass", "def remove_user(self):\n self.currentuser = None\n self.carlocked = False", "async def unban(self, user: User):\n coro = self._state.remove_team_ban(self.id, user.id)\n await coro", "def unsend_scheduled_messages_after(self, time_cutoff):\n for user_id in self.user_id_to_scheduled_message_ts:\n for scheduled_ts in list(self.user_id_to_scheduled_message_ts[user_id]):\n if scheduled_ts >= time_cutoff:\n # The below if statement is likley redundant\n if scheduled_ts in self.user_id_to_scheduled_message_ts[user_id]:\n self.user_id_to_scheduled_message_ts[user_id].remove(scheduled_ts)", "async def on_message(self, message: discord.Message):\n if not message.author.bot:\n guild_info = server_setup.get_guild_info(message.guild)\n warned_users = guild_info[\"warnedUsers\"]\n\n if profanity.contains_profanity(message.content):\n await message.delete()\n await message.channel.send(f\"{message.author.mention} that is not allowed!\")\n\n try:\n found_user = False\n\n for user in warned_users:\n if user[\"userID\"] == message.author.id:\n found_user = True\n amount_of_warns = user[\"numOfWarns\"]\n amount_of_warns += 1\n user[\"numOfWarns\"] = amount_of_warns\n\n if amount_of_warns >= 15:\n await message.author.ban(reason=\"15 warnings reached.\")\n await message.channel.send(\n f\"{message.author.mention} has been banned for reaching 15 warnings.\")\n if amount_of_warns == 5 or amount_of_warns == 10:\n await message.author.kick(reason=f\"{amount_of_warns} warnings reached.\")\n await message.channel.send(\n f\"{message.author.mention} has been kicked for reaching {amount_of_warns} warnings.\")\n\n if not found_user:\n warn_user_info = {\n \"userID\": message.author.id,\n \"numOfWarns\": 1\n }\n\n warned_users.append(warn_user_info)\n\n guild_info[\"warnedUsers\"] = warned_users\n server_setup.update_guild(guild_info=guild_info)\n\n except:\n traceback.print_exc()\n print(\"User could not be warned or kicked.\")", "def leave(self, user):\n membership = self.check_membership(user)\n if membership is not None and membership.role != 'O':\n if membership.role == 'B':\n membership.role = 'LB'\n else:\n membership.role = 'L'\n membership.save()", "def rem_list(action, user):\n \n try:\n l = List.objects.get(id=action['listId'])\n verify_permission(l, user)\n l.delete()\n \n # Remove\n userprofile = user.get_profile()\n board = userprofile.get_board(action['boardId'])\n board.lists.remove(action['listId'])\n userprofile.save()\n except:\n # the list or the board doesn't exist.\n pass", "def block_user(user_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n user = User.query.get_or_404(user_id)\n users_blocking = [block.user_blocking_id for block in Blocks.query.all() if block.user_being_blocked_id == g.user.id]\n likes = [message for message in user.likes if message.user_id not in users_blocking]\n return render_template('users/blocked-users.html', user=user, likes=likes)", "async def add_user_to_blacklist(user_id: int) -> int:\n async with aiosqlite.connect(DATABASE_PATH) as db:\n await db.execute(\"INSERT INTO blacklist(user_id) VALUES (?)\", (user_id,))\n await db.commit()\n rows = await db.execute(\"SELECT COUNT(*) FROM blacklist\")\n async with rows as cursor:\n result = await cursor.fetchone()\n return result[0] if result is not None else 0", "def restricted_teams(self, user):\n return []", "def update_users_in_range(self, user_list):\r\n self.users_in_range = []\r\n for user in user_list:\r\n if user != self:\r\n for freq_range in user.currently_used_frequencies:\r\n received_power = self.calculate_signal_power(user, freq_range)\r\n if received_power > settings.power_threshold:\r\n tmp_freq = freq_range[:]\r\n if tmp_freq not in self.users_in_range:\r\n self.users_in_range.append(tmp_freq)", "def build_blacklist(blk_lst):\n bad_pair_dict = {}\n for stu in blk_lst:\n if stu[0] in bad_pair_dict: # Appends additional student to stu[0]'s blacklist\n bad_pair_dict[stu[0]].add(stu[1])\n else: # Adds stu[0] to the blacklist dict with the set of themself and their banned partner\n bad_pair_dict[stu[0]] = {stu[0], stu[1]}\n if stu[1] in bad_pair_dict: # Mirrors the actions taken above now for stu[1]\n bad_pair_dict[stu[1]].add(stu[0])\n else: # Mirrors the actions taken above now for stu[1]\n bad_pair_dict[stu[1]] = {stu[0], stu[1]}\n return bad_pair_dict", "def get_banned_user_obj_list(user_obj):\n banned_user_obj_list = [user_banned_list_obj.banned_user for user_banned_list_obj in user_obj.banned_user_set.all()]\n return banned_user_obj_list", "def lobbyUserPart(self, __userID):\n\n\t\t# Make sure the user is in mp lobby\n\t\tif (__userID in self.usersInLobby):\n\t\t\t# Part lobby and #lobby channel\n\t\t\tself.usersInLobby.remove(__userID)", "async def adduser(ctx, user: discord.Member):\n channel = ctx.channel\n if not IsATicket(channel.id):\n await ctx.send(\n \"This is not a ticket! Users can only be added to a ticket channel\"\n )\n return\n\n await channel.set_permissions(user, read_messages=True, send_messages=True)\n await ctx.message.delete()", "def refund_unmatched_entries(self, user_id_list):\n for user_id in user_id_list:\n entry = self.user_entries[user_id].pop()\n # Create a celery task to refund this entry.\n refund_unmatched_entry.delay(entry)", "def doBlackWhiteList(value, bwfilter, matcher = str.startswith, onEmpty = None, preferWL = True):\n\t(blacklist, whitelist) = splitBlackWhiteList(bwfilter)\n\tcheckMatch = lambda item, matchList: True in map(lambda x: matcher(item, x), matchList)\n\tvalue = filter(lambda x: not checkMatch(x, blacklist), QM(value or not preferWL, value, whitelist))\n\tif len(whitelist):\n\t\treturn filter(lambda x: checkMatch(x, whitelist), value)\n\treturn QM(value or bwfilter, value, onEmpty)", "def rm_favoriting_user_id(self, circuit_id, user_id):\n key = ':'.join(\n [CIRCUIT_FAV_USRS_1, \n str(circuit_id), \n CIRCUIT_FAV_USRS_2]\n )\n self.RS.srem(key, user_id)", "def ban_user(self, user):\n # salvo l'id dell'utente o del bot\n # print(\"Sto negando l'accesso all'user \" + str(user['id']))\n self.execute(TABELLE['id_users']['insert']['complete_user'],\n (user['id'], False, False, False, False, True))", "def _add_user(self):\n args = {}\n args[\"name\"] = False\n #Loop until valid name given\n while not args[\"name\"]: #While name not set\n args[\"name\"] = input(\"Please enter the username of the user you would like to add: \").lower()\n args[\"userID\"] = self._get_user_id(args[\"name\"])\n if not args[\"userID\"]:\n args[\"name\"] = False\n #Get more input\n args[\"webhook_url\"] = input(\"Please enter the Discord WebHook URL for this user: \")\n args[\"override\"] = None\n #Loop until override info completed\n while args[\"override\"] == None:\n userInput = input(\"Override authentication user? y/n: \")\n if userInput.lower() == \"y\":\n args[\"override\"] = True\n args[\"overrideUser\"] = False\n #Loop until valid user given\n while not args[\"overrideUser\"]:\n args[\"overrideUser\"] = input(\"Please enter the Twitch username that you would like to authenticate with: \").lower()\n args[\"overrideUserID\"] = self._get_user_id(args[\"overrideUser\"])\n if not args[\"overrideUserID\"]:\n args[\"overrideUser\"] = False\n #Get oauth input, removing 'oauth:' from beginning\n args[\"overrideOauth\"] = input(\"Please enter the oauth token for the Twitch account, omitting 'oauth:': \")\n if args[\"overrideOauth\"].startswith(\"oauth:\"): #If the oauth token starts with oauth:, remove it\n args[\"overrideOauth\"] = args[\"overrideOauth\"][6:]\n elif userInput.lower() == \"n\":\n args[\"override\"] = False\n else:\n print(\"That is not a valid input.\")\n args[\"blacklist\"] = input(\"Please enter a space separated list of users to blacklist: \")\n return(args)", "def test_add_to_blacklist(self):\n\n self.feature_test.add_to_blacklist(3)\n self.assertTrue(3 in Feature(\"testing\").blacklist)", "def prune_losers(self):\n self.log.debug(\"PRUNE LOSERS\")\n # check to see if people i followed follow me back\n cutoff_time = (datetime.now()\n - timedelta(hours=self.reciprocation_window))\n ingrates = Target.objects.filter(\n hunter=self.user, status=Target.PURGATORY,\n modified__lt=cutoff_time) # They didn't follow back in time\n\n for ingrate in ingrates:\n ingrate.status = Target.INGRATE\n ingrate.save()\n self.log.debug(\" => Unfollowed %s\" % ingrate.hunted.screen_name)\n try:\n self.api.destroy_friendship(ingrate.hunted)\n except Exception, e:\n print e\n return\n finally:\n pass\n #self.contact(ingrate)", "def set_blacklist(self):\n\n for name in self.__ipset:\n if self.verbose:\n print(\"Start create: \" + self.__ipset[name]['ipset-name'])\n\n # create ipset\n self.__process(name, self.__parser.create(name))\n\n if self.verbose:\n print('Done')", "async def whitelist_member(self, ctx, *members):\n successes = []\n fails = []\n for member_arg in members:\n try:\n member = await commands.MemberConverter().convert(ctx, member_arg)\n except commands.errors.BadArgument:\n fails.append(f\"Cannot find member {member_arg}\")\n else:\n await self.bot.db.execute(\n \"DELETE FROM blacklisted_member WHERE guild_id = %s AND user_id = %s\",\n ctx.guild.id,\n member.id,\n )\n self.bot.cache.blacklist[str(ctx.guild.id)][\"member\"].discard(member.id)\n successes.append(f\"Unblacklisted {member.mention}\")\n\n await util.send_tasks_result_list(ctx, successes, fails)", "def prune(self):\n target_user_ids = self.get_queryset().values_list('id', flat=True)\n exclude_user_ids = SentDrip.objects.filter(date__lt=conditional_now(),\n drip=self.drip_model,\n user__id__in=target_user_ids)\\\n .values_list('user_id', flat=True)\n self._queryset = self.get_queryset().exclude(id__in=exclude_user_ids)", "def remove_admin(self, project_id, user_id):\n current_user = request.environ.get('repoze.who.identity')['user']\n user = controller_globals._get_user_from_email(current_user.email)\n\n # make sure we're actually the project lead\n if not self._current_user_leads_review(project_id):\n return \"<font color='red'>tsk, tsk. you're not the project lead, %s.</font>\" % user.fullname\n\n leader_to_remove = Session.query(model.User).filter_by(id=user_id).one()\n review = self._get_review_from_id(project_id)\n review.leaders.remove(leader_to_remove)\n Session.add(review)\n Session.commit()\n\n redirect(url(controller=\"review\", action=\"admin\", project_id=project_id))", "async def trainer_rm(ctx, user: discord.User):\r\n \r\n trainer_data = load_file(file_path_trainer)\r\n trainer = user.id \r\n #await bot.say(trainer) tester to see if user ID -> string for trainer variable\r\n if trainer not in trainer_data[\"Trainers\"]:\r\n await bot.say(\"This trainer is not registered or has already been removed.\")\r\n \r\n else:\r\n remove_trainer(user)\r\n await bot.say(user.mention + \" has been removed.\")", "def removeFriend(self, user):\n user = user if isinstance(user, MyPlexUser) else self.user(user)\n url = self.FRIENDUPDATE.format(userId=user.id)\n return self.query(url, self._session.delete)", "def add_user_with_status_unrequested(user):\r\n _add_user(user, CourseCreator.UNREQUESTED)", "def change_user_mailing_lists(sender, instance, action, reverse, model, pk_set, **kwargs):\n\tmail = instance.associated_user.email\n\tusername = instance.associated_user.first_name+\" \"+instance.associated_user.last_name\n\t#if groups are going to be added\n\tif action == \"post_add\":\n\t\tgroups = instance.groups_as_string\n\t\tgroups = groups.split(\", \")\n\t\t#put all added groups_as_string\n\t\tfor group in groups:\n\t\t \trequests.post(\"https://api.mailgun.net/v3/lists/{}@arenbergorkest.be/members\".format(group),\n\t\t auth=('api', settings.MAILGUN_API_KEY),\n\t\t data={'subscribed': True,\n\t\t \t 'name':username,\n\t\t 'address': mail})\n\t#if groups are going to be removed\n\tif action == \"pre_clear\": \n\t\t#put the removed groups from a set in a list\n\t\tprevious = UserProfile.objects.get(pk=instance.pk)\n\t\tgrplst = previous.groups_as_string.split(\", \")\n\t\t#loop over list\n\t\tfor grp in grplst:\n\t\t\trequests.delete(\"https://api.mailgun.net/v3/lists/{}@arenbergorkest.be/members/{}\".format(grp,mail),auth=('api', settings.MAILGUN_API_KEY))", "def unblock(self):\n data = {'container': self._reddit.user.me().fullname,\n 'name': str(self), 'type': 'enemy'}\n url = API_PATH['unfriend'].format(subreddit='all')\n # PRAW5 REMOVE (return statement)\n return self._reddit.post(url, data=data)", "async def remove_users_manually_marked_as_guests(\n registry: RedisResourceRegistry, app: web.Application\n) -> None:\n alive_keys, dead_keys = await registry.get_all_resource_keys()\n\n user_ids_to_ignore = set()\n for entry in chain(alive_keys, dead_keys):\n user_ids_to_ignore.add(int(entry[\"user_id\"]))\n\n guest_user_ids = await get_guest_user_ids(app)\n logger.info(\"GUEST user id candidates to clean %s\", guest_user_ids)\n\n for guest_user_id in guest_user_ids:\n if guest_user_id in user_ids_to_ignore:\n logger.info(\n \"Ignoring user '%s' as it previously had alive or dead resource keys \",\n guest_user_id,\n )\n continue\n\n await remove_guest_user_with_all_its_resources(\n app=app,\n user_id=guest_user_id,\n )", "async def approve(self, ctx, user: discord.Member):\n server = ctx.message.server\n if user.id in self.norole[server.id]:\n if self.norole[server.id][user.id]['Role'] == True:\n self.norole[server.id][user.id] = {'Role': False}\n dataIO.save_json(self.warninglist, self.norole)\n nobnl = discord.utils.get(server.roles, name = \"NoBNL\")\n await self.bot.remove_roles(user,nobnl)\n msg = await self.bot.say (\"Role removed!\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg) \n await self.bot.delete_message(ctx.message)\n else:\n msg = await self.bot.say(\"There is no role to remove!\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg)\n await self.bot.delete_message(ctx.message)" ]
[ "0.74218553", "0.7352311", "0.7122453", "0.67325133", "0.6435446", "0.6364521", "0.6361342", "0.6294082", "0.6188603", "0.58658487", "0.5860538", "0.58456814", "0.58305186", "0.58287203", "0.57763344", "0.57541144", "0.56883603", "0.5684058", "0.55988926", "0.55928737", "0.5575431", "0.5563341", "0.55434424", "0.5528555", "0.5486386", "0.54816455", "0.54713535", "0.54643005", "0.5445286", "0.5443467", "0.5440424", "0.5430859", "0.54259855", "0.5420396", "0.5401794", "0.53925484", "0.5385865", "0.53710854", "0.53559065", "0.53475773", "0.5332454", "0.5311031", "0.5304728", "0.5259866", "0.5259674", "0.5250264", "0.5239465", "0.52249557", "0.5219506", "0.52112806", "0.5206998", "0.5206307", "0.52019733", "0.51971984", "0.517307", "0.51653934", "0.5153798", "0.51489526", "0.5148439", "0.5147295", "0.5127441", "0.5125992", "0.51235986", "0.51232225", "0.51223326", "0.5087151", "0.5087049", "0.50826627", "0.508024", "0.5070383", "0.50545156", "0.50490385", "0.50401855", "0.50360787", "0.5035757", "0.5032314", "0.502469", "0.5020585", "0.50184804", "0.50166243", "0.50066596", "0.5001577", "0.5000449", "0.49996075", "0.49993083", "0.49982476", "0.49962685", "0.49847144", "0.4982929", "0.4979071", "0.49784732", "0.49770996", "0.49766746", "0.49718776", "0.49715742", "0.49683723", "0.49680337", "0.4962802", "0.49616423", "0.49419403" ]
0.81987846
0
Remove the user from 'prospects' and 'blacklist', if applicable, and add the user to 'workers'. Note that adding somebody as a worker removes the person from the blacklist. user A TcsUser instance to link to workers
def addWorker(self, user): if (user != self.owner) and not self.workers.filter(pk=user.id).exists(): self.workers.add(user) if self.prospects.filter(pk=user.id).exists(): self.prospects.remove(user) if self.blacklist.filter(pk=user.id).exists(): self.blacklist.remove(user) return self return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addToBlacklist(self, user):\n if (user != self.owner) and not self.blacklist.filter(pk=user.id).exists():\n self.blacklist.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.workers.filter(pk=user.id).exists():\n self.workers.remove(user)\n return self\n return None", "def removeWorker(self, user):\n if user == self.owner:\n return None\n # Without these queries, there's no way to tell if anything actually gets removed.\n # Calling remove() on a user that is not in the set does not raise an error.\n if self.workers.filter(pk=user.id).exists():\n self.workers.remove(user)\n return self\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n return self\n return None", "def addProspect(self, user):\n if self.is_active and (user != self.owner) and not self.prospects.filter(pk=user.id).exists() \\\n and not self.workers.filter(pk=user.id) and not self.blacklist.filter(pk=user.id).exists():\n self.prospects.add(user)\n return self\n return None", "async def add_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n try:\r\n self.settings['blacklist'].append(user.id)\r\n await ctx.send(\"User blacklisted.\")\r\n except:\r\n await ctx.send(\"An error occured.\")\r\n else:\r\n await ctx.send(\"User already blacklisted.\")", "async def remove_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n await ctx.send(\"User is not blacklisted.\")\r\n else:\r\n self.settings['blacklist'].remove(user.id)\r\n await ctx.send(\"User removed from blacklist.\")", "def remove_candidate(self, user):\n self.assignment_related_users.filter(user=user).delete()\n inform_changed_data(self)", "def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)", "async def blacklist_remove(self, ctx: commands.Context, target):\r\n table = \"user_blacklist\" if isinstance(target, discord.User) else \"guild_blacklist\"\r\n\r\n if isinstance(target, discord.User):\r\n check = await self.check_user(target.id, table)\r\n target = target.id\r\n else:\r\n check = await self.check_user(int(target), table)\r\n target = int(target)\r\n\r\n if check[0]:\r\n await self.remove_blacklist(target, table)\r\n await ctx.message.add_reaction(self.bot.custom_emojis.tick)\r\n else:\r\n await ctx.error(description=f\"{table.split('_')[0].title()} is not blacklisted.\")", "def remove(self, user):\n self.packet.send_room([\"rp\", user.get_int_id(self.rooms),\n user.data.id], user.room)\n self.rooms[user.room][\"users\"].remove(user)", "def _remove(self, room, regex, user):\n regexes_for_room = self.notifications[room]\n users_for_regex = regexes_for_room[regex]\n\n # users may have been added multiple times in the past, so make sure\n # we remove them all.\n while user in users_for_regex:\n users_for_regex.remove(user)\n\n if not users_for_regex:\n # remove regex from room when there are no users left to notify\n del regexes_for_room[regex]", "def add_to_blacklist(self, user_id, blacklist_user_id):\n try:\n self.table.insert().values( user_id=user_id,\n blacklisted_id=blacklist_user_id).execute()\n except sqlalchemy.exc.IntegrityError as e:\n if e.orig.args[0] == 1062 :\n # duplicate entry, don't care !\n pass\n elif e.orig.args[0] == 1452 :\n self.log(e, self.identifier)\n raise egg_errors.UnknownUserOrBadgeIDException\n else:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible\n except Exception as e:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible", "def copy_to_user(self, user):\n user_pod = user.profile.get_user_pod()\n checklist_copy = self\n checklist_copy.pk = None\n checklist_copy.collaborators.clear()\n checklist_copy.save()\n\n # Copy all the checklist entries, but don't save the checked\n # state or any of the optional details - the new checklist\n # should be \"blank\"\n copied_entries = []\n for entry in self.entries:\n item_copy = ChecklistEntry(plant_name=entry.plant_name,\n checklist=checklist_copy)\n copied_entries.append(item_copy)\n\n ChecklistEntry.objects.bulk_create(copied_entries)\n\n # Assign ownership of the new checklist to the user\n ownership = ChecklistCollaborator(collaborator=user_pod,\n checklist=checklist_copy, is_owner=True)\n ownership.save()", "def test_teams_remove_user_from_team_v2(self):\n pass", "def _RemoveUsers(self, remove_users):\n for username in remove_users:\n self.utils.RemoveUser(username)\n self.user_ssh_keys.pop(username, None)\n self.invalid_users -= set(remove_users)", "def toggle_interested(self):\n user = self.context['request'].user\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n workshop = self.context['workshop']\n\n if workshop in profile.interested_workshops.all():\n workshop.interested_users.remove(profile)\n else:\n workshop.interested_users.add(profile)", "async def blacklist_add(self, ctx: commands.Context, target, *, reason: str = \"No reason given.\"):\r\n table = \"user_blacklist\" if isinstance(target, discord.User) else \"guild_blacklist\"\r\n guild = None\r\n\r\n try:\r\n check = await self.check_user(target.id, table)\r\n except Exception:\r\n guild = discord.utils.get(self.bot.guilds, id=int(target))\r\n if not guild:\r\n return\r\n\r\n check = await self.check_user(int(target), table)\r\n\r\n if not check[0]:\r\n if isinstance(target, discord.User):\r\n await self.add_blacklist(target.id, table, reason)\r\n else:\r\n await self.add_blacklist(int(target), table, reason)\r\n\r\n await ctx.message.add_reaction(self.bot.custom_emojis.tick)\r\n if not isinstance(target, discord.User):\r\n embed = discord.Embed(color=self.bot.colors.red,\r\n description=f\"Your guild / server has been blacklisted. \"\r\n f\"If you wish to know the reason, join the \"\r\n f\"[Support server]({self.bot.invite_url})\")\r\n await guild.owner.send(embed=embed)\r\n await guild.leave()\r\n self.bot.logger.info(f\"Added guild with ID {target} to blacklist.\")\r\n else:\r\n self.bot.logger.info(f\"Added user with ID {target.id} to blacklist\")\r\n else:\r\n await ctx.error(description=f\"{table.split('_')[0].title()} is already blacklisted.\")", "def test_teams_remove_user_from_team_v1(self):\n pass", "def nuke_users(modeladmin, request, queryset):\n users = None\n form = BlacklistForm(initial={'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})\n contenttype = ContentType.objects.get_for_model(queryset.model)\n # Because we want this action available from comments or user admin lists, sort out content type\n ctype_as_string = unicode(contenttype)\n if ctype_as_string == 'user':\n users = queryset\n if ctype_as_string == 'comment':\n # build list of unique users within comment list.\n users = []\n for comment in queryset:\n if not comment.user in users:\n users.append(comment.user)\n\n if ctype_as_string == 'contact':\n # build list of unique users from contact list.\n users = []\n for c in queryset:\n if c.user and c.user not in users:\n users.append(c.user)\n if not users:\n # we haven't built out a content-type appropriate user list.\n return HttpResponse(\"Error finding content type: %s\" % contenttype)\n\n if 'apply_blacklist' in request.POST: # we're returning from the intermediate page and are ready to do some work.\n form = BlacklistForm(request.POST)\n if form.is_valid():\n reason = form.cleaned_data['reason']\n spammer = form.cleaned_data['is_spammer']\n for user in users:\n # Deactivate user accounts\n # Note: Update is more efficient,\n # but we can't use it because we may have a list (from comments)\n # rather than a proper queryset.\n user.is_active = False\n user.save()\n\n for c in user.comment_comments.all(): # remove their comments from public view.\n if spammer:\n c.delete()\n else:\n c.is_public = False\n c.is_removed = True\n c.save()\n for c in user.contact_set.all(): # and contact messages\n if spammer:\n c.delete()\n else:\n c.publish = False\n c.save()\n # remove their session. -- Is there a more efficient way than looping through all sessions? That can become a mighty big table.\n for s in Session.objects.all():\n decoded_session = s.get_decoded()\n if '_auth_user_id' in decoded_session and decoded_session['_auth_user_id'] == user.id:\n s.delete()\n # and add them to the blacklist\n blacklist = Blacklist(\n user = user,\n blacklister = request.user,\n reason = reason,\n )\n blacklist.save()\n\n if spammer:\n resp_str = 'Any related accounts will still be visible, but related comments have been deleted.'\n else:\n resp_str = 'Any related accounts and comments will still be visible in the admin.'\n\n count = len(users)\n if count == 1:\n modeladmin.message_user(request, \"%s was removed and blocked from the site. %s\" % (users[0].username, resp_str))\n else:\n modeladmin.message_user(request, \"%s users were removed and blocked from the site. %s\" % (count, resp_str))\n return HttpResponseRedirect(request.get_full_path())\n else:\n return HttpResponse(\"error!\")\n # We haven't captured intermediate page data. Go there...\n return render(request, 'admin/blacklist.html', {'users': users, 'form': form})", "def delete_from_blacklist(self, user_id, blacklist_user_id):\n try:\n self.table.delete().where(and_(\n self.table.c.user_id == user_id,\n self.table.c.blacklisted_id == blacklist_user_id )).execute() \n except Exception as e:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible", "async def blacklist_global(self, ctx, user: discord.User, *, reason):\n await self.bot.db.execute(\n \"INSERT IGNORE blacklisted_user VALUES (%s, %s)\", user.id, reason\n )\n self.bot.cache.blacklist[\"global\"][\"user\"].add(user.id)\n await util.send_success(ctx, f\"**{user}** can no longer use Miso Bot!\")", "def join_player(self, data, user):\n self.remove(user)\n\n user.room = \"100\"\n user.x = \"0\"\n user.y = \"0\"\n user.frame = \"0\"\n\n self.add(user)", "def _remove(users, room_name):\n global users_removed\n users_removed = []\n\n try:\n\n for word in users['message']['text'].split():\n\n if word == 'myself':\n user = users['message']['sender']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n \n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append('Not found ->> ' + \"<\" + user + \">\")\n\n check_continue = 1\n text = '```User removed: %s ```' % (','.join(users_removed))\n\n for _item in range(len(users['message']['text'].split())):\n\n _item = _item + 1\n\n try:\n _type = users['message']['annotations'][_item]['userMention']['user']['type']\n user = users['message']['annotations'][_item]['userMention']['user']['name']\n \n if _type == 'BOT':\n\n if check_continue == 1:\n continue\n else:\n text = 'Please add user with @'\n continue\n \n user = users['message']['annotations'][_item]['userMention']['user']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n\n except:\n pass\n\n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append(\"Not found ->> \" + \"<\" + user + \">\")\n text = \"```Removed users: %s ```\" % (','.join(list(set(users_removed))))\n return text\n except:\n\n text = 'Please add user with @'\n return text", "def add_new_user(network, user, games):\n if user not in network:\n network[user] = [[], games]\n return network", "def remove(self, user_id):\n pass", "def create_retirement_request_and_deactivate_account(user):\n # Add user to retirement queue.\n UserRetirementStatus.create_retirement(user)\n\n # Unlink LMS social auth accounts\n UserSocialAuth.objects.filter(user_id=user.id).delete()\n\n # Change LMS password & email\n user.email = get_retired_email_by_email(user.email)\n user.set_unusable_password()\n user.save()\n\n # TODO: Unlink social accounts & change password on each IDA.\n # Remove the activation keys sent by email to the user for account activation.\n Registration.objects.filter(user=user).delete()\n\n # Delete OAuth tokens associated with the user.\n retire_dot_oauth2_models(user)\n AccountRecovery.retire_recovery_email(user.id)", "def add_excl_parts(db, usernames):\n desc = \"Replicating the effect \" + \\\n \"of priming with common vs rare ideas in individual \" + \\\n \"brainstorming with revised interface\"\n exp_id= 'tN33ATDiCukWfj5G7'\n # exps = db.experiments.find()\n exp = db.experiments.find_one({'_id': exp_id})\n\n db.experiments.update({'_id': exp_id},\n {'$set': {'excludeUsers': list(usernames), 'description': desc}})\n # exp['excludeUsers'] = list(usernames)\n exp = db.experiments.find_one({'_id': exp_id})\n print exp['excludeUsers']\n print exp['description']", "def remove_users(users_to_remove: list, users_dict: dict,\n end_of_service: str) -> None:\n for reciever in users_to_remove:\n if reciever in users_dict:\n send_message(reciever,\n 'Subscription expired\\n',\n end_of_service,\n users_dict[reciever]['carrier'])\n del users_dict[reciever]", "async def cmd_galtoguserwl(self, ctx, user_id): \n\n # ===== CHECK IF INPUT IS VALID\n try:\n user_id = int(user_id.replace(\"<\", '').replace(\"@\", '').replace(\"!\", '').replace(\">\", ''))\n except (IndexError, ValueError):\n await ctx.send_help('galtoguserwl', delete_after=Gallery.delete_after)\n return \n\n # ===== REMOVE OR ADD USER TO THE WHITELIST\n ret_msg = \"\"\n\n if user_id in self.cogset['user_wl']:\n self.cogset['user_wl'].remove(user_id)\n ret_msg = f'<@{user_id} has been **removed** from the gallery whitelist.'\n \n else:\n self.cogset['user_wl'].append(user_id)\n ret_msg = f'<@{user_id} has been **added** to the gallery whitelist.'\n\n\n # ===== WRITE TO THE DATABASE\n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n\n # ===== RETURN\n await ctx.channel.send(content=ret_msg, delete_after=Gallery.delete_after)\n return", "def remove_user(self, user: discord.User) -> bool:\n\t\tif not self.user_has_entry(user):\n\t\t\treturn False\n\t\t\n\t\tdef data_interaction(cur: Cursor):\n\t\t\tsql = f\"DELETE FROM {StrikeConsts.STRIKE_TABLE} WHERE id=%s;\"\n\t\t\tcur.execute(sql, (user.id,))\n\t\t\t\n\t\t\treturn [True]\n\t\t\t\n\t\treturn self.connect_and_execute(data_interaction)[1][0]", "def unfollowing_and_removing(self, user_id):\n if self.unfollowing(user_id):\n ind = [i for i, j in enumerate(self.monitored_users) if j.get('user', '') == user_id]\n if ind:\n self.monitored_users.remove(self.monitored_users[ind[0]])", "def setup_whitelisted_section():\n setup_unrelated_section()\n\n # whitelist user to the course\n cs61a = Course.objects.get(name=\"CS61A\")\n user = User.objects.get(username=\"demo_user\")\n cs61a.whitelist.add(user)", "def remove_friends(self, user1_index, user2_index):\n if user1_index >= self.num_users or user2_index >= self.num_users:\n raise ValueError(\n f\"Number of users is {self.num_users}, but indices \"\n f\"{user1_index} and {user2_index} were requested.\"\n )\n if self.users_hat[user1_index, user2_index] == 1:\n self.users_hat[user1_index, user2_index] = 0\n elif self.is_verbose():\n self.log(f\"User {user2_index} was not following user {user1_index}\")\n if self.users_hat[user2_index, user1_index] == 1:\n self.users_hat[user2_index, user1_index] = 0\n elif self.is_verbose():\n self.log(f\"User {user1_index} was not following user {user2_index}\")", "def add_candidate(self, user):\n weight = (\n self.assignment_related_users.aggregate(models.Max(\"weight\"))[\"weight__max\"]\n or 0\n )\n defaults = {\"weight\": weight + 1}\n self.assignment_related_users.update_or_create(user=user, defaults=defaults)", "def remove_user(self):\n self.currentuser = None\n self.carlocked = False", "def remove_judge(contest, user):\n _remove_role(contest, user, pcm.Judge)", "async def adduser(ctx, user: discord.Member):\n channel = ctx.channel\n if not IsATicket(channel.id):\n await ctx.send(\n \"This is not a ticket! Users can only be added to a ticket channel\"\n )\n return\n\n await channel.set_permissions(user, read_messages=True, send_messages=True)\n await ctx.message.delete()", "def _add_user(self):\n args = {}\n args[\"name\"] = False\n #Loop until valid name given\n while not args[\"name\"]: #While name not set\n args[\"name\"] = input(\"Please enter the username of the user you would like to add: \").lower()\n args[\"userID\"] = self._get_user_id(args[\"name\"])\n if not args[\"userID\"]:\n args[\"name\"] = False\n #Get more input\n args[\"webhook_url\"] = input(\"Please enter the Discord WebHook URL for this user: \")\n args[\"override\"] = None\n #Loop until override info completed\n while args[\"override\"] == None:\n userInput = input(\"Override authentication user? y/n: \")\n if userInput.lower() == \"y\":\n args[\"override\"] = True\n args[\"overrideUser\"] = False\n #Loop until valid user given\n while not args[\"overrideUser\"]:\n args[\"overrideUser\"] = input(\"Please enter the Twitch username that you would like to authenticate with: \").lower()\n args[\"overrideUserID\"] = self._get_user_id(args[\"overrideUser\"])\n if not args[\"overrideUserID\"]:\n args[\"overrideUser\"] = False\n #Get oauth input, removing 'oauth:' from beginning\n args[\"overrideOauth\"] = input(\"Please enter the oauth token for the Twitch account, omitting 'oauth:': \")\n if args[\"overrideOauth\"].startswith(\"oauth:\"): #If the oauth token starts with oauth:, remove it\n args[\"overrideOauth\"] = args[\"overrideOauth\"][6:]\n elif userInput.lower() == \"n\":\n args[\"override\"] = False\n else:\n print(\"That is not a valid input.\")\n args[\"blacklist\"] = input(\"Please enter a space separated list of users to blacklist: \")\n return(args)", "def remove_users(self, *users):\r\n pass", "def unfriend(self, removee):\n remover_friends_list = self # person terminating the friendship\n # Remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n # Remove friend from removee's friend list\n friends_list = FriendList.objects.get(user=removee)\n friends_list.remove_friend(self.user)", "async def removeuser(ctx, user: discord.Member):\n channel = ctx.channel\n if not IsATicket(channel.id):\n await ctx.send(\n \"This is not a ticket! Users can only be removed from a ticket channel\"\n )\n return\n\n await channel.set_permissions(user, read_messages=False, send_messages=False)\n await ctx.message.delete()", "def unfriend(self, remove):\n remover_friends_list = self # person terminating the friendship \n \n # remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n\n #remove friend from removee friend list\n friends_list = FriendList.objects.get(user=removee)\n friend_list.remove_friend(self.user)", "async def removeuser(self, ctx, user: discord.Member):\n\n if check_key(user.id):\n delete_key(user.id)\n await self.bot.say(\"{}, you are way out of this league.\".format(user.mention))\n else:\n await self.bot.say(\"That user does not exist in this league.\")", "async def trainer_rm(ctx, user: discord.User):\r\n \r\n trainer_data = load_file(file_path_trainer)\r\n trainer = user.id \r\n #await bot.say(trainer) tester to see if user ID -> string for trainer variable\r\n if trainer not in trainer_data[\"Trainers\"]:\r\n await bot.say(\"This trainer is not registered or has already been removed.\")\r\n \r\n else:\r\n remove_trainer(user)\r\n await bot.say(user.mention + \" has been removed.\")", "def sipserver_user_remove(self, user: str) -> None:\n self.remove_endpoint_from_sipserver(endpoint=user)", "def remove_registrar(contest, user):\n _remove_role(contest, user, pcm.Registrar)", "def make_donor(self):\n self.user.is_staff = False\n self.user.is_superuser = False\n self.user.groups.remove(get_group_by_name(self.ADMIN_GROUP))\n self.user.groups.remove(get_group_by_name(self.AMBASSADOR_GROUP))\n self.user.save()", "def change_user_mailing_lists(sender, instance, action, reverse, model, pk_set, **kwargs):\n\tmail = instance.associated_user.email\n\tusername = instance.associated_user.first_name+\" \"+instance.associated_user.last_name\n\t#if groups are going to be added\n\tif action == \"post_add\":\n\t\tgroups = instance.groups_as_string\n\t\tgroups = groups.split(\", \")\n\t\t#put all added groups_as_string\n\t\tfor group in groups:\n\t\t \trequests.post(\"https://api.mailgun.net/v3/lists/{}@arenbergorkest.be/members\".format(group),\n\t\t auth=('api', settings.MAILGUN_API_KEY),\n\t\t data={'subscribed': True,\n\t\t \t 'name':username,\n\t\t 'address': mail})\n\t#if groups are going to be removed\n\tif action == \"pre_clear\": \n\t\t#put the removed groups from a set in a list\n\t\tprevious = UserProfile.objects.get(pk=instance.pk)\n\t\tgrplst = previous.groups_as_string.split(\", \")\n\t\t#loop over list\n\t\tfor grp in grplst:\n\t\t\trequests.delete(\"https://api.mailgun.net/v3/lists/{}@arenbergorkest.be/members/{}\".format(grp,mail),auth=('api', settings.MAILGUN_API_KEY))", "def leave(self, user):\n membership = self.check_membership(user)\n if membership is not None and membership.role != 'O':\n if membership.role == 'B':\n membership.role = 'LB'\n else:\n membership.role = 'L'\n membership.save()", "def test_remove_from_team_forbidden(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=['Team_foo'])\n req = User.create(name='requestor', email='[email protected]',\n user_type='user')\n user.put()\n req.put()\n\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_teams': []},\n headers=self.login_headers(req),\n status=403,\n )\n\n # Not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)", "def add_new_user(network, user, games):\n if not user in network:\n network[user] = {'connections': [], 'games': games}\n return network", "async def kick(self, user: User):\n coro = self._state.remove_team_member(self.id, user.id)\n await coro", "def accept(self, user):\n # If the user is already a collaborator on the project, we don't make another\n # But we do still consider the invitation accepted\n self.project.collaborators.get_or_create(\n user = user,\n defaults = dict(role = Collaborator.Role.CONTRIBUTOR)\n )\n self.delete()", "def remove_admin(self, project_id, user_id):\n current_user = request.environ.get('repoze.who.identity')['user']\n user = controller_globals._get_user_from_email(current_user.email)\n\n # make sure we're actually the project lead\n if not self._current_user_leads_review(project_id):\n return \"<font color='red'>tsk, tsk. you're not the project lead, %s.</font>\" % user.fullname\n\n leader_to_remove = Session.query(model.User).filter_by(id=user_id).one()\n review = self._get_review_from_id(project_id)\n review.leaders.remove(leader_to_remove)\n Session.add(review)\n Session.commit()\n\n redirect(url(controller=\"review\", action=\"admin\", project_id=project_id))", "def add_user_with_status_unrequested(user):\r\n _add_user(user, CourseCreator.UNREQUESTED)", "def addWhitelist(self, user, guildId):\n flag = self.con.addUserToWhitelist(user, guildId)\n\n if flag:\n self.whitelist[str(guildId)].append(user)\n\n return flag", "def remove_user_from_govern(self, request, pk=None, user_id=None):\n try:\n user = UserProfile.objects.get(id=user_id, organization__id=pk)\n except ObjectDoesNotExist:\n raise ResourceNotFound\n else:\n user.organization = None\n user.save()\n\n return Response(status=status.HTTP_204_NO_CONTENT)", "def BUM(tw, user, action):\n\n if (user in twStart.WHITE_LIST_USERS):\n return\n\n if(action == \"B\"):\n print(_(\"Blocked: {0}\").format(user))\n # TODO: Uncomment the code below\n # tw.blocks.create(user_id=usrId, skip_status=1, include_entities=False)\n return\n elif (action == \"M\"):\n print(_(\"Muted: {0}\").format(user))\n # TODO: Uncomment the code below\n # tw.users.mutes(user_id=usrId)\n return\n elif(action == \"U\"):\n print(_(\"Unfollowed: {0}\").format(user))\n # TODO: Uncomment the code below\n # tw.friendships.destroy(user_id=usrId)\n return", "async def _kill_player(self, ctx: Context, *, user: discord.Member):\n\n guild = ctx.guild\n\n player_id = await self.config.guild(guild).player_id()\n player_role = discord.utils.get(guild.roles, id=player_id)\n\n if player_role not in user.roles:\n return await ctx.send(_(\"User doesn't have player role.\"))\n\n try:\n await user.remove_roles(player_role)\n except discord.Forbidden:\n return await ctx.send(\n _(\n \"I either don't have permissions to manage\"\n \" roles or the `{}` role is above my highest role!\"\n ).format(player_role.name)\n )\n\n dead_id = await self.config.guild(guild).dead_id()\n dead_role = discord.utils.get(guild.roles, id=dead_id)\n\n await user.add_roles(dead_role)\n\n await ctx.message.add_reaction(CHECK_MARK)", "def remove_users_from_team(team, users):\n team_member_list = []\n for user in users:\n member_list = TeamMember.objects.filter(team_fk=team, user_fk=user)\n if not member_list:\n raise Exception('Some users do not belong this team')\n team_member_list.append(member_list[0])\n \n if any([m.is_leader for m in team_member_list]):\n team.delete()\n else:\n for m in team_member_list:\n m.delete()", "def add_user(self, user):\n\t\tself.users[user.username] = user", "def _add_to_whisper_queue(self, user, message):\n whisper_tuple = (user, message)\n self.whisper_message_queue.appendleft(whisper_tuple)", "def remove_user(user):\n # user.confirmed = False\n # user = get_user_by_phone(phone_num)\n db.session.delete(user)\n db.session.commit()\n\n return user\n # DELETE FROM users WHERE user.phone_num == phone)", "def removeUserId(self, user_id):\n self.__register_user_ids.discard(user_id)", "def test_remove_from_blacklist(self):\n\n self.feature_test.add_to_blacklist(3)\n self.feature_test.remove_from_blacklist(3)\n self.assertFalse(3 in Feature(\"testing\").blacklist)", "def deactivate_user_setup():\n\n app.active_users.pop(current_user.get_id(), None)\n try:\n requests.post(\"http://localhost:9090\",\n json={\"active_users\": app.active_users})\n except:\n logger.info('Unable to send updated list of active users.')\n return Response('500')\n logger.info('Deactivated messages for user '\n + current_user.get_id() + '.')\n return Response('200')", "def test_remove_from_whitelist(self):\n\n self.feature_test.add_to_whitelist(3)\n self.feature_test.remove_from_whitelist(3)\n self.assertFalse(3 in Feature(\"testing\").whitelist)", "def promote_to_admin(worker):\n if 'reply_to_message' not in worker.source.message:\n worker.answer_to_the_message(\"You have to reply to the user's message to promote him/her to admin.\")\n return False\n user_data = worker.source.message['reply_to_message']['from']\n if user_data['is_bot']:\n worker.answer_to_the_message(\"Can't register bot as participant.\")\n return False\n participant = get_from_Model(Participant, id=user_data['id'])\n if not participant:\n participant = register_participant(user_data)\n gspd = get_from_Model(participant.groupspecificparticipantdata_set, _mode='direct',\n participant_group=worker.source.administrator_page.participant_group)\n if not gspd:\n gspd = register_groupspecificparticipantdata(\n participant=participant,\n participant_group=worker.source.administrator_page.participant_group\n )\n if gspd.is_admin:\n worker.answer_to_the_message(\n f\"The user is already an admin in the {worker.source.administrator_page.participant_group.title}.\")\n return\n create_participantgroupbinding(gspd, Role.objects.get(value='admin'))\n worker.answer_to_the_message(f\"Congratulations, {participant.name} is now an admin.\")", "async def admin_remove(self, ctx: MyContext, wormhole: str, user: discord.User):\n if not self.check_wh_exists(wormhole):\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.not-exists\", name=wormhole\n )\n )\n return\n if not self.check_is_admin(wormhole, ctx.author.id):\n await ctx.send(await self.bot._(ctx.guild.id, \"wormhole.error.not-admin\"))\n return\n query = \"SELECT 1 FROM wormhole_admin WHERE name = ? AND admin = ?\"\n isAlready = len(self.bot.db_query(query, (wormhole, user.id))) > 0\n if isAlready:\n query = \"DELETE FROM wormhole_admin WHERE admin = ? AND name = ?\"\n self.bot.db_query(query, (user.id, wormhole))\n await ctx.send(\n await self.bot._(ctx.guild.id, \"wormhole.success.admin-removed\")\n )\n else:\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.not-admin\", user=user.name\n )\n )", "def deregister_users(self):\n user_list = self.request.POST.getlist(\"user\")\n if not user_list:\n self.messages.warning(\"Ingen brukere krysset av!\")\n\n for username in user_list:\n try:\n\n event = self.get_object()\n user = User.objects.get(username=username)\n\n log_eventregistration_change(\n current_user=self.request.user,\n user=user,\n event=event,\n action=DELETION,\n )\n\n self.get_object().deregister_user(user, respect_closed=False)\n\n # to log deletion we need registration\n except (User.DoesNotExist, UserRegistrationException) as ex:\n self.messages.warning(\n f\"Kunne ikke fjerne {username} fra påmeldingslisten. \"\n f\"Returnert error var: {type(ex).__name__}: {str(ex)}. \"\n \"Ta kontakt med WebKom, og oppgi denne feilmeldingen \"\n \"dersom du tror dette er en feil.\"\n )", "def _purge_user(self, user):\n self.user_order.remove(user)\n del self.user_queue[user]\n del self.user_skip[user]", "async def ticket_remove(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n if user.id not in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is not added.\")\n return\n\n removing_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if removing_is_admin:\n await ctx.send(\"You cannot remove a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n\n try:\n await channel.set_permissions(user, send_messages=False, read_messages=False)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].remove(user.id)\n\n await ctx.send(f\"{user.mention} has been removed from the ticket.\")", "async def remove_users_manually_marked_as_guests(\n registry: RedisResourceRegistry, app: web.Application\n) -> None:\n alive_keys, dead_keys = await registry.get_all_resource_keys()\n\n user_ids_to_ignore = set()\n for entry in chain(alive_keys, dead_keys):\n user_ids_to_ignore.add(int(entry[\"user_id\"]))\n\n guest_user_ids = await get_guest_user_ids(app)\n logger.info(\"GUEST user id candidates to clean %s\", guest_user_ids)\n\n for guest_user_id in guest_user_ids:\n if guest_user_id in user_ids_to_ignore:\n logger.info(\n \"Ignoring user '%s' as it previously had alive or dead resource keys \",\n guest_user_id,\n )\n continue\n\n await remove_guest_user_with_all_its_resources(\n app=app,\n user_id=guest_user_id,\n )", "def liberate_user(cls, user):\n liberated = False\n if user.mobile_phone and cls.remove_banned_number(user.mobile_phone):\n liberated = True\n\n if user.add_mobile_phone and cls.remove_banned_number(user.mobile_phone):\n liberated = True\n\n if user.landline_phone and cls.remove_banned_number(user.landline_phone):\n liberated = True\n\n if user.add_landline_phone and cls.remove_banned_number(user.add_landline_phone):\n liberated = True\n\n if user.mobile_phone and cls.remove_suspicious_number(user.mobile_phone):\n liberated = True\n\n if user.add_mobile_phone and cls.remove_suspicious_number(user.add_mobile_phone):\n liberated = True\n\n if user.landline_phone and cls.remove_suspicious_number(user.landline_phone):\n liberated = True\n\n if user.add_landline_phone and cls.remove_suspicious_number(user.add_landline_phone):\n liberated = True\n\n if liberated:\n cls.signals.user_liberated.send(cls, user=user)\n\n return liberated", "def remove_slaves(self, *, user: str, identity_file: str):\n self.load_manifest(user=user, identity_file=identity_file)\n\n partial_func = functools.partial(\n remove_slaves_node,\n user=user,\n identity_file=identity_file,\n services=self.services,\n cluster=self)\n hosts = [self.master_ip] + self.slave_ips\n\n run_against_hosts(partial_func=partial_func, hosts=hosts)", "def add_user(user):\n new_user = models.Leaderboard(username=user, score=100)\n db.session.add(new_user)\n db.session.commit()\n all_people = models.Leaderboard.query.all()\n users = []\n for person in all_people:\n users.append(person.username)\n return users", "def save_user(self):\n\n User.user_list.append(self)", "def reject_user_application(self, user):\n if self.is_moderator \\\n and self.has_perm('accounts.reject_user_application'):\n user.moderator = self\n user.moderator_decision = user.REJECTED\n user.decision_datetime = timezone.now()\n user.save()\n\n return user\n\n else:\n raise PermissionDenied", "def register(self, user) -> None:\n self._all_members[user.name] = user\n if type(user).__name__ == 'LeaderUser':\n self._leaders.append(user)", "def create_users(cls):\n for p in Player.objects.exclude(race__can_play=False):\n p.get_extension(GrandChallengeUser)", "def user_to_user(self, user_to_user):\n\n self._user_to_user = user_to_user", "def unfriend(self, removee):\n\t\tremover_friend_list = self # person terminating the friendship\n\n\t\t# Remove friend from friend request\n\t\tremover_friend_list.remove_friend(removee)\n\n\t\t# Remove friend from the removeee friend list\n\t\tfriends_list = FriendList.objects.get(user=removee)\n\t\tfriends_list.remove_friend(self.user)", "def removeOnUserCreate(call, args=(), kwargs={}, nodeClass='*'):\n pass", "def add_friend(self, User):\n if not User in self.friends.all():\n self.friend.add(User)\n #self.save()", "def add(self, user):\n int_id = user.get_int_id(self.rooms)\n self.rooms[user.room][\"users\"].append(user)\n\n # Games\n if self.rooms[user.room][\"isGame\"] == \"true\":\n user.send([\"jg\", int_id, user.room])\n # Rooms\n else:\n user.send([\"jr\", int_id, user.room, self.get_strings(user.room)])\n self.packet.send_room([\"ap\", int_id, user.get_string()], user.room)", "def save_user(self):\n User.user_list.append(self)", "def save_user(self):\n User.user_list.append(self)", "def add_player(self, user):\n # Make sure the user can play\n user_profile = user.get_profile()\n if user_profile.credits < self.entrance_fee:\n raise ValueError(\"Not enough credits to pay entrance fee.\")\n if self.is_user_playing(user):\n raise ValueError(\"User already in tournament.\")\n \n # Handle the money transfer to join the tournament\n user_profile.credits = user_profile.credits - self.entrance_fee\n user_profile.save()\n self.prize_pool = self.prize_pool + self.entrance_fee\n self.save()\n \n # Join the tournament\n new_player = Player(user=user,\n tournament=self,\n credits=self.starting_credits)\n new_player.save()\n return True", "def save_users(user):\n user.save_user()", "def remove_user(users, curr_username, user_role, request_ip):\n #TODO: error checking\n log_connector.add_log('DELETE USER', \"Removed {} user(s)\".format(len(users)), curr_username, user_role, request_ip)\n user_connector.remove_user(users)", "def add_co_worker(self, employee):\n self.co_worker_list.append(employee)\n self.original_co_worker_list.append(employee)", "def add_user_to_team(self, for_user, to_manager):\n for_user = User.get_user_by_username(for_user)\n manager = User.get_user_by_username(to_manager)\n # @Todo test inheritance of get_user_by_username\n self.access_handler.check_add_user_to_team(for_user, manager)\n manager.add_user_to_team(for_user)", "def test_remove_friends_symmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n\n u.friends.add(f)\n f.friends.remove(u)\n self.assertIs(u in f.friends.all(), False)\n self.assertIs(f in u.friends.all(), False)", "def createNewUser(self, userList, UserObj):\n if(self.adminAccess):\n userList.append(UserObj)\n \n return userList", "def create_worker_and_join_task(self, user_name, user_password, task_name, display, logger, user_org='TREE', verbose=False):\r\n config = 'cloud'\r\n created = False\r\n while not created:\r\n try:\r\n # Create context for the cloud communications\r\n ffl.Factory.register(config, fflapi.Context, fflapi.User, fflapi.Aggregator, fflapi.Participant)\r\n try:\r\n fflapi.create_user(user_name, user_password, user_org, self.credentials_filename)\r\n except Exception as err:\r\n display('The user %s is already registered in pycloudmessenger platform.' %user_name, logger, verbose)\r\n context = ffl.Factory.context(config, self.credentials_filename, user_name, user_password, encoder=serializer.Base64Serializer)\r\n\r\n # Join task\r\n user = ffl.Factory.user(context)\r\n with user:\r\n try:\r\n result = user.join_task(task_name)\r\n display('Worker %s has joined task %s' %(user_name, task_name), logger, verbose)\r\n created = True\r\n except Exception as err:\r\n display('Error - %' %str(err).split(':')[1], logger, verbose)\r\n except Exception as err:\r\n print(err)\r\n display('Waiting for Master...', logger, verbose)\r\n time.sleep(5)\r\n pass\r\n\r\n # Create the comms object \r\n participant = ffl.Factory.participant(context, task_name=task_name)\r\n return participant", "async def approve(self, ctx, user: discord.Member):\n server = ctx.message.server\n if user.id in self.norole[server.id]:\n if self.norole[server.id][user.id]['Role'] == True:\n self.norole[server.id][user.id] = {'Role': False}\n dataIO.save_json(self.warninglist, self.norole)\n nobnl = discord.utils.get(server.roles, name = \"NoBNL\")\n await self.bot.remove_roles(user,nobnl)\n msg = await self.bot.say (\"Role removed!\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg) \n await self.bot.delete_message(ctx.message)\n else:\n msg = await self.bot.say(\"There is no role to remove!\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg)\n await self.bot.delete_message(ctx.message)", "def remove_users(caller, role, *users):\r\n # can always remove self (at this layer)\r\n if not(len(users) == 1 and caller == users[0]):\r\n _check_caller_authority(caller, role)\r\n role.remove_users(*users)", "async def _clear_heist(self, ctx, user: discord.Member):\r\n author = ctx.message.author\r\n await self.thief.member_clear(user)\r\n await ctx.send(\"```{} administratively cleared {}```\".format(author.name, user.name))", "def use(self, user, expected_shape):\n if self._frozen:\n raise ValueError('cannot mutate frozen internal')\n self._check_shape(user, expected_shape)\n self._users.add(user)", "def test_teams_add_user_to_team_by_batch_v1(self):\n pass", "def process_user(user_name):\n \n try:\n user_pwd = getpwnam(user_name)\n except KeyError:\n print('Error: User {0} is not recognized.'.format(user_name))\n sys.exit(25)\n \n qstat = subprocess.getoutput(\"qstat -f\").split('-'.center(81, '-')) #81 -'s\n \n node_list = []\n pending_jobs = ''\n pending_search = '#'.center(79, '#') #denotes pending jobs in qstat 79 #'s\n #Weeding out nonessential nodes\n for node in qstat:\n if user_name in (node.split()):\n if pending_search in node: #Taking pending jobs out\n if \".crc.nd.edu\" in node:\n # This means its the last node. We must only accept up tp the pending jobs ONLY. Below we are doing that and taking out an\n # Additional newline by stripping it but adding one back in to keep formatting correct. (there were two instead of one).\n tempNode = (node[:node.find(pending_search)].rstrip())+'\\n'\n if user_name in tempNode:\n node_list.append(tempNode)\n pending_jobs += (node[node.find(pending_search):]) #reaping pending jobs\n else:\n node_list.append(node)\n \n final_list = []\n \n numU_jobs = 0 # Will hold the number of jobs attributed to the specified user\n numU_cores = 0 # The number of cores the user is currently using. Starts at 0 and counts up as jobs encountered.\n \n for host in node_list:\n # Grabbing the node's name in qstat and making a Node() instance of it\n temp_node = Node((host.split()[0]))\n host_used_cores = host.split()[2].split('/')[1]\n host_total_cores = host.split()[2].split('/')[2]\n # If within the first line of the node there is a 'd' at the end, disable it\n if len(host.split('\\n')[0]) == 6 and host.split()[5] == 'd':\n temp_node.set_disabled_switch(True)\n disabled_cores += int(host_total_cores)\n else: \n temp_node.set_disabled_switch(False)\n \n temp_node.set_cores(host_total_cores, host_used_cores)\n # In qstat -F, qf:min_cpu . . . . is the last item before the jobs are listed, \n # 28 is how many char's that string is (don't want it)\n node_stat= host[host.find('qf:min_cpu_interval=00:05:00') + 28\\\n :host.find('\\n---------------------------------------------------------------------------------\\n')]\n \"\"\"Possibly do a host.split('\\n') and join the rest of 2 - end\"\"\"\n\n # There is always an extra '\\n' in here, so subtract 1 to get rid of it\n num_jobs = len(node_stat.split('\\n')) -1\n # If there are any jobs, parse them and gather info\n if num_jobs > 0:\n # Python is non-inclusive for the right operand, and we want to \n # skip another extra '\\n' so start at 1, and want to go num_jobs\n for i in range(1, num_jobs + 1):\n info = node_stat.split('\\n')[i].split()\n temp_job = Job(info[2], info[3], info[7])\n temp_job.set_id(info[0])\n temp_job.set_priority(info[1])\n temp_node.add_job(temp_job)\n if info[3] == user_name:\n numU_jobs += 1 #info[3] is the user-name of job, if == spec. user, increment user_jobs\n numU_cores += int(info[7]) # info[7] is the number of cores occupied by the user's job\n \n final_list.append(temp_node)\n \n pending_list = []\n if len(pending_jobs): #As long as the user has pending jobs T if len != 0\n p_lines = pending_jobs.split('\\n')\n pending_list.append((p_lines[0] + '\\n' + p_lines[1] + '\\n' + p_lines[2] + '\\n'))\n for i in range(3, len(p_lines)):\n if p_lines[i].find(user_name) != (-1):\n pending_list.append(p_lines[i])\n \n if len(sys.argv) == 4:\n if sys.argv[3] == '--details':\n print_detailed_user(final_list, pending_list, user_name, numU_jobs, numU_cores)\n else:\n print('Error: Arg syntax error with: ' + sys.argv[3])\n show_usage(23)\n else:\n print_short_user(final_list, pending_list, user_name, numU_jobs, numU_cores)" ]
[ "0.7682945", "0.6766191", "0.6598563", "0.62174505", "0.60659224", "0.5727882", "0.56629395", "0.5472015", "0.54637945", "0.54543555", "0.53860176", "0.53835195", "0.53404295", "0.5299187", "0.5298164", "0.5247873", "0.5234306", "0.5230454", "0.52074", "0.51751196", "0.5164737", "0.51634574", "0.51532143", "0.5134601", "0.51339114", "0.5132001", "0.51275533", "0.508116", "0.5071606", "0.5069833", "0.5065321", "0.5060675", "0.50530535", "0.5045693", "0.50407153", "0.50332326", "0.5024095", "0.5020234", "0.49717793", "0.4965035", "0.49620995", "0.49490118", "0.49269807", "0.49236828", "0.49229455", "0.491712", "0.49075958", "0.49007607", "0.48913598", "0.4889436", "0.48893628", "0.4886212", "0.48821607", "0.48794094", "0.48781472", "0.48758203", "0.48742473", "0.48685342", "0.4856828", "0.48538363", "0.48499694", "0.48493716", "0.4848282", "0.48309332", "0.48263967", "0.48202544", "0.481968", "0.48029372", "0.4782481", "0.4781575", "0.47786844", "0.47741792", "0.47730377", "0.4772547", "0.47691306", "0.47670898", "0.476554", "0.4764567", "0.47631484", "0.47575775", "0.47538152", "0.47482288", "0.47473806", "0.4746979", "0.47404206", "0.47404206", "0.4733771", "0.47328472", "0.47284603", "0.47222406", "0.47216922", "0.47184065", "0.47169608", "0.47138846", "0.47136176", "0.47136018", "0.4709067", "0.47045496", "0.4704501", "0.47013223" ]
0.7764532
0
Return True if the campaign authorizes the user to provide data to the campaign. This is the case if the user owns or works for the campaign. Otherwise, return False.
def authorizes(self, user): return self.owner == user or self.workers.filter(pk=user.id).exists()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authorized(self):\n return self.authorization is not None", "def user_allow_credit(self):\n try:\n return self.user.creditAllowed()\n except AttributeError:\n return False", "def has_permission(self, request, view):\n user = request.user\n try:\n user.user_client\n return True\n except Exception:\n return False", "def is_authorized(self) -> bool:\n\t\tif \"access_token\" in session:\n\t\t\tif session.get(\"access_token\") is not None:\n\t\t\t\tif \"user\" in session:\n\t\t\t\t\treturn True\n\t\treturn False", "def current_user_has_access(self):\n return self.user_has_access(users.get_current_user())", "def has_permission(self):\n\n org_id = self.kwargs.get(\"pk1\", False)\n # i'm checking the org_id is truthy here since some org user views don't\n # have `/org/pk1/` for example the initial publish landing page.\n if org_id and not self.request.user.organisations.filter(id=org_id).exists():\n return False\n\n return super().has_permission() and self.request.user.is_org_user", "def user_has_access(self, user):\n if self.visibility == self.PUBLIC:\n return True\n elif self.visibility == self.PRIVATE and self.created_by == user:\n return True\n elif self.visibility in (self.ORG_ONLY, self.ORG_ONLY_NO_EXTERNAL):\n if user.external and self.visibility == self.ORG_ONLY_NO_EXTERNAL:\n return False\n elif self.organization.memberships.filter(user=user).count() >= 1:\n return True\n return False", "def is_authorized(self, request, obj=None):\r\n return True", "def has_user(self, user): # pylint: disable=unused-argument\r\n return False", "def has_ownership(self):\n user = self.request.user\n object = self.get_object()\n if object.owned_by(user):\n return True\n else:\n return False", "def has_access(self, user):\n if user.is_superuser:\n return True\n return self.user_objects(user).filter(id=self.id).exists()", "def _evaluate_has_auths(self, context, user, partner):\n # User fulfills initial authorization\n fulfills_auth = True\n # Checking if user has agreed to terms and conditions, otherwise\n # they shouldn't be authorized to access the collection\n user_agreed_terms = user.userprofile.terms_of_use\n\n if partner.authorization_method in [Partner.EMAIL, Partner.CODES, Partner.LINK]:\n partner_renew = True\n final_auth = fulfills_auth and user_agreed_terms and partner_renew\n else:\n final_auth = fulfills_auth and user_agreed_terms\n # User has authorizations, link to collection page\n context[\"has_auths\"] = final_auth\n\n return context", "def can_retrieve(self, user):\n return user.has_perm('agenda.can_see')", "def test_func(self):\n answer = self.get_object()\n return True if self.request.user == answer.author or self.request.user.is_superuser else False", "def is_authorised_representative(self):\n if not hasattr(self, '_is_authorised_representative'):\n self._is_authorised_representative = hasattr(self, 'authorised_representative')\n\n return self._is_authorised_representative", "def user_can_access(self, user):\n return user.profile.certificates().filter(id=self.id).exists()", "def has_permission(self, request, view):\n if request.method == \"POST\":\n return not (request.user and is_authenticated(request.user))\n\n return request.user and is_authenticated(request.user)", "def has_permission(self, request, view):\n if request.user.is_authenticated():\n return True\n return False", "def authorize(self, action, author_id=None):\n if Serializable.authorize(self, action, author_id=author_id):\n return author_id == self.author.id\n return False", "def has_permission(self, request, view):\n if request.user.merchant:\n return True\n\n return False", "def can_be_accessed(self, user):\n if self.shared_with_everyone:\n return True\n\n if self.user == user or self.users_allowed.filter(pk=user.pk).exists():\n return True\n\n for group in self.groups_allowed.all():\n if user.groups.filter(pk=group.pk).exists():\n return True\n\n return False", "def authorize(self, action, author_id=None):\n if Identity.authorize(self, action, author_id=author_id):\n return (self.id == author_id)\n return False", "def is_authenticated(self) -> bool:\n return self.requester.uuid is not None", "def is_authorized(self, request, object=None):\n if request.user.is_superuser:\n return True\n\n if not object:\n # if the object is unavailable, then leave the authorization to the apply_limits method\n # or to the over-rides of this method\n return True\n\n if request.method == 'GET' and hasattr(object, 'is_public'):\n is_public = False\n obj_visibility_attr = object.is_public\n if isinstance(obj_visibility_attr, types.FunctionType):\n is_public = obj_visibility_attr()\n elif isinstance(obj_visibility_attr, types.BooleanType):\n is_public = obj_visibility_attr\n else:\n raise ImproperlyConfigured('The is_public method of {obj} should either be a function or a boolean'.format(\n obj=object\n ))\n return is_public\n else:\n course = None\n if isinstance(object, Course):\n course = object\n elif hasattr(object, 'course'):\n course = getattr(object, 'course')\n elif hasattr(object, 'get_course') and isinstance(getattr(object, 'get_course'), types.FunctionType):\n course = getattr(object, 'get_course')()\n if course and course.instructor == request.user:\n return True\n else:\n return False", "def authorize(self, action, author_id=None):\n if Serializable.authorize(self, action, author_id=author_id):\n return (self.id == author_id)\n return False", "def authorize(self, action, author_id=None):\n if Serializable.authorize(self, action, author_id=author_id):\n return self.admin_id == author_id\n return False", "def authorize(self, request, **kwargs):\n return True", "def can_view(self, user):\n if self.applicant == user:\n return True\n elif user.has_perm('funding.view_all_applications'):\n # Fundihg commitee\n return True\n elif user.has_perm('funding.make_application_decisions'):\n # Fundihg manager - should have the view permissions, but just in case\n return True\n return False", "def authorized(self, user):\n\n return self.admin.id.getUnhashed() == user.id.getUnhashed()", "def has_permission(self, request):\n return request.user.is_active", "def has_permission(self, request):\n return request.user.is_active", "def has_permission(self, request):\n return request.user.is_active", "def has_organizer(self, user):\n return self.main_organizer == user or self.team.filter(id=user.id).exists()", "def can_approve(self, user, **data):\n raise Return(False)", "def is_accessible_by(self, user):\n return (self.public or\n (user.is_authenticated and\n (user.is_staff or self.users.filter(pk=user.pk).exists())))", "def view(self, user, action, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return True\n\n if user.is_manager:\n return False\n\n # TODO check groups in request maybe ? dunno\n if user.is_advisor:\n return True\n\n return self.admin_permission(user, action, *args)", "def test_func(self):\n if not self.request.user.is_authenticated:\n return False\n if self.request.user.is_staff:\n return True\n return self.get_user() == self.request.user", "def has_view_permission(self, request, obj=None):\n user = request.user\n if obj and type(obj) is Client:\n return obj.is_user_in_sales_contacts_of_client(user) or obj.is_user_in_support_contacts_of_client(user)\n return True", "def is_valid(self):\n return self.user.is_authenticated", "def has_object_permission(self, request, view, obj):\n if request.user.is_authenticated:\n return obj.author != request.user\n return False", "def is_organizer(user):\n if user is None:\n return False\n url = app.config['USERS_ENDPOINT'] + 'authorization'\n response = requests.post(url, data={'user_id': user['user_id']})\n return response.json()['is_organizer'] is True", "def can_be_viewed_by(self,user):\n return True", "def is_authorized(self, user_view):\n authenticated = False\n for email in self.emails:\n if user_view[email]:\n self.valid_email = email\n authenticated = True\n return authenticated", "def has_object_permission(self, request, view, obj):\n\n # Users can always see and edit their own comments\n if obj.create_user == request.user:\n return True\n\n # And see but not edit those from their others in their own\n # organization\n if obj.create_user.organization == request.user.organization and \\\n request.method in permissions.SAFE_METHODS:\n return True\n\n # Government roles can always view comments\n # and can view or edit privileged comments with correct permission\n if request.user.is_government_user:\n # read\n if request.method in permissions.SAFE_METHODS:\n if obj.privileged_access:\n return request.user.has_perm('DOCUMENTS_VIEW')\n return True\n\n # write\n if request.method not in permissions.SAFE_METHODS:\n if obj.privileged_access:\n return request.user.has_perm('DOCUMENTS_GOVERNMENT_REVIEW')\n return True\n\n # not authorized\n return False", "def can_accept(self, user):\n if user.has_perm('funding.make_application_decisions'):\n # Funding manager can override / update decisions, if required\n # But we still need to have had a offer made\n if self.status in ['G', 'A', 'N']:\n return True\n # Applicants can only decide on granted applications\n if self.status == 'G':\n if self.applicant == user:\n return True\n return False", "def authorize(self, action, author_id=None):\n if Serializable.authorize(self, action, author_id=author_id):\n if self.kind == \"persona_profile\":\n p = Persona.request_persona(self.author_id)\n return p.id == author_id\n elif self.kind == \"group_profile\":\n # Everyone can update\n if action == \"update\":\n return True\n # Only author can insert and delete\n elif self.author_id == author_id:\n return True\n\n elif self.kind == \"index\":\n p = Persona.query.filter(Persona.index_id == self.id)\n return p.id == author_id\n return False", "def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)", "def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)", "def has_object_permission(self, request, view, account):\n if request.user.is_authenticated():\n if request.user.is_staff:\n return True\n return account.username == request.user.username\n return False", "def is_post_authorised(self, params):\n if 'id' not in params or not params['id']:\n return True\n else:\n return self.is_get_authorised(params['id'])", "def has_object_permission(self, request, view, user):\n return user == request.user or request.user.is_superuser", "def has_object_permission(self, request, view, obj):\n if request.method in SAFE_METHODS:\n return True\n return obj.author == request.user", "def is_access_allowed(self, user_id):\n ### DATABASE CODE GOES HERE\n return False", "def is_authorized(self, request, obj=None):\r\n if request.method == 'GET':\r\n return True\r\n else:\r\n return False", "def has_authority(self, user):\n UserModel = get_user_model()\n ADMINISTRATOR = UserModel.ROLE_MAP[UserModel.ADMINISTRATOR]\n result = True\n\n if not (user.is_superuser or user.role == ADMINISTRATOR):\n try:\n self.memberships.get(user=user)\n except Membership.DoesNotExist:\n result = False\n\n return result", "def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False", "def authorize(self, action, author_id=None):\n return False", "def current_user_has_permission(query: 'Query') -> bool:\n return acl.current_user_has_permission(data_set_acl_resources[query.data_set.id])", "def user_has_permission(self, id: int, user: User) -> bool:\n return self.get_queryset().filter(pk=id).filter_for_user(user).exists()", "def belongs_to_user(self) -> bool:\n return flask.g.user is not None and flask.g.user.id == getattr(\n self, 'user_id', False\n )", "def has_permission(self, request):\n\t\treturn request.user.is_active", "def is_available(self):\n if not is_optional_task_permissions_revoking_enabled():\n return False\n if not ITask.providedBy(self.context):\n return False\n if not self.context.get_review_state() in FINAL_TASK_STATES:\n return False\n if api.user.has_permission('cmf.ManagePortal'):\n return True\n issuer = self.context.get_issuer_actor()\n return issuer.identifier == api.user.get_current().id", "async def permits(self, identity, permission, context=None):\r\n return self.authorized_userid(identity) is not None", "def has_object_permission(self, request, view, obj):\n # if the user is trying to retrieve to create a item.. it will return true\n if request.method in permissions.SAFE_METHODS:\n return True\n # check if the user is trying to don't do a SAFE_METHODS, put,patch,delete and if the feed owner is doing it or another different user.. and it will return true if match or false if not\n return obj.user_profile.id == request.user.id", "def has_user(self):\n\t\treturn len( self.a_token ) > 0 and len( self.a_secret ) > 0", "def can_view(self, user):\r\n return True", "def has_permission(self, request, view):\n\n # Fallback to has_object_permission unless it's a POST\n if request.method != 'POST':\n return True\n\n # Need this information to make a decision\n if 'privileged_access' not in request.data and \\\n 'document' in request.data:\n return False\n\n document = request.data['document']\n privileged_access = request.data['privileged_access']\n\n found = Document.objects.filter(id=document).first()\n\n if not found:\n return False\n\n if found.create_user.organization != request.user.organization and \\\n not request.user.is_government_user:\n return False\n\n return DocumentCommentPermissions.user_can_comment(\n request.user,\n found,\n privileged_access\n )", "def has_access(self):\n self._has_access = False\n\n if self.read_contact_info is not None:\n if self.read_contact_info['USERNAME'] == consts.USERNAME or \\\n consts.USERNAME in consts.REGISTEREDADMINS:\n self._has_access = True\n\n return self._has_access", "def test_func(self):\n post = self.get_object()\n\n return self.request.user == post.author", "def test_func(self):\n post = self.get_object()\n\n return self.request.user == post.author", "def is_organization(self):\n return self.user_id is None", "def authorize(self, username):\n user = User(self).get(username)\n if not user:\n raise DatabaseError(\"User does not exist.\")\n return user['Permissions'] == 'MANAGER'", "def test_func(self):\r\n post = self.get_object()\r\n if self.request.user == post.author:\r\n return True\r\n return False", "def test_func(self):\r\n post = self.get_object()\r\n if self.request.user == post.author:\r\n return True\r\n return False", "def test_func(self):\r\n post = self.get_object()\r\n if self.request.user == post.author:\r\n return True\r\n return False", "def can_be_viewed_by(self,user):\n\n # check whether everyone is allowed to view this. Anymous user is the only member of group\n # 'everyone' for which permissions can be set\n anonymousUser = get_anonymous_user()\n\n if anonymousUser.has_perm(\"view_ComicSiteModel\",self):\n return True\n else:\n # if not everyone has access, check whether given user has permissions\n return user.has_perm(\"view_ComicSiteModel\",self)", "def has_permission(self, request, view):\n\n is_authenticated = request.user.is_authenticated()\n safe_request = request.method in permissions.SAFE_METHODS\n return is_authenticated and safe_request", "def requires_auth(self):\n return True", "def get_is_por_holder(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n if obj in profile.get_club_privileges():\n return True\n return False", "def can_edit(self, user):\n if user.has_perm('funding.make_application_decisions'):\n # Funding manager can update things later, if required\n return True\n # Applicants can only edit the application before the final review step\n if self.status in ('S', 'U'):\n if self.applicant == user:\n return True\n return False", "def get_viewable(self, user):\n if user.get('role') in ('admin', 'manager', 'engineer'):\n return True\n return user['name'] == self.doc.get('customer')", "def is_organizer(self, event):\n return event.user_id == self.id", "def has_permission(self, request, view):\n classroom_id = view.get_related_classroom_id()\n return models.OrganizationAccess.objects.filter(\n **self.role_filter,\n organization__playlists__classrooms__id=classroom_id,\n user__id=request.user.id,\n ).exists()", "def has_permission(self, request, view):\n usuario = request.user\n return str(usuario) == \"AnonymousUser\"", "def user_auth(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n return True\n return False", "def authorized(self):\n\n return PyFunceble.cli.facility.CredentialLoader.is_already_loaded()", "def view(self, user, notificationrequested, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return True\n\n if user.is_manager:\n return False\n\n if user.is_advisor and user.group == notificationrequested.group:\n return True\n\n return self.admin_permission(user, notificationrequested, *args)", "def is_owner(self, resource: Model) -> bool:\n\n try:\n self.raise_for_ownership(resource)\n except SupersetSecurityException:\n return False\n\n return True", "def get_is_por_holder(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n if obj in profile.get_council_privileges():\n return True\n return False", "def CAN_ASSIGN_OWNER(article, user): # pylint: disable=invalid-name\r\n return _is_staff_for_article(article, user)", "def _check_admin_only(self, request):\r\n api_key = request.params.get(self.api_field, None)\r\n\r\n if request.user is None:\r\n user = self.user_fetcher(api_key=api_key)\r\n else:\r\n user = request.user\r\n\r\n if user is not None and user.is_admin:\r\n request.user = user\r\n return True", "def is_authorized():\n return CentralStorageClient.token is not None", "def check(self):\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n\n user_session = self.get()\n user = self.get_user()\n\n return user is not None and us.verify_auth_token(user_session.token, config.SESSION_EXPIRES)", "def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False", "def has_object_permission(self, request, view, obj):\n\n try:\n Contact.objects.get(user=request.user)\n\n except Contact.DoesNotExist:\n return False\n\n return True", "def with_grant(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"with_grant\")", "def is_visible(cls, request):\n if cls.permission_required:\n return request.user.has_perm(cls.permission_uri)\n else:\n return True", "def consent_check():\n\n auth = current.auth\n\n person_id = auth.s3_logged_in_person()\n if not person_id:\n return None\n\n has_role = auth.s3_has_role\n if has_role(\"ADMIN\"):\n required = None\n elif has_role(\"VOUCHER_ISSUER\"):\n required = [\"STORE\", \"RULES_ISS\"]\n else:\n required = None\n\n if required:\n consent = current.s3db.auth_Consent(required)\n pending = consent.pending_responses(person_id)\n else:\n pending = None\n\n return pending", "def consent_check():\n\n auth = current.auth\n\n person_id = auth.s3_logged_in_person()\n if not person_id:\n return None\n\n has_role = auth.s3_has_role\n if has_role(\"ADMIN\"):\n required = None\n elif has_role(\"VOUCHER_ISSUER\"):\n required = [\"STORE\", \"RULES_ISS\"]\n else:\n required = None\n\n if required:\n consent = current.s3db.auth_Consent(required)\n pending = consent.pending_responses(person_id)\n else:\n pending = None\n\n return pending", "def has_object_permission(self, request, view, obj):\n if request.user.is_superuser:\n return True\n if request.user.profile.role == UserRole.CLIENT and obj.owner != request.user:\n return False\n if request.user.profile.role == UserRole.EXECUTOR and obj.executor != request.user:\n return False\n return True" ]
[ "0.71512544", "0.6903803", "0.6882612", "0.6824171", "0.6759655", "0.671842", "0.67081565", "0.6686441", "0.66475916", "0.6640897", "0.6619891", "0.66168815", "0.6604757", "0.6577445", "0.65759784", "0.65525156", "0.65457594", "0.65178764", "0.6516392", "0.65048504", "0.6494996", "0.647402", "0.6460558", "0.64525473", "0.6438828", "0.6427834", "0.64154124", "0.64137524", "0.6401163", "0.63827974", "0.63827974", "0.63827974", "0.6380446", "0.63795644", "0.63765246", "0.6365286", "0.6352808", "0.6345689", "0.63387936", "0.6323501", "0.63177866", "0.62876153", "0.6280434", "0.6278163", "0.6270238", "0.6269757", "0.62682503", "0.62682503", "0.62643105", "0.62624115", "0.6259307", "0.6258833", "0.6256862", "0.6254743", "0.6250495", "0.6244801", "0.62433684", "0.6242202", "0.624216", "0.6233676", "0.62284786", "0.6221698", "0.62208384", "0.62155503", "0.62054485", "0.6203002", "0.61990476", "0.61976403", "0.6193683", "0.6193683", "0.6187405", "0.6187349", "0.618071", "0.618071", "0.618071", "0.6179503", "0.6164978", "0.6148491", "0.6139987", "0.61303717", "0.6121355", "0.61209935", "0.6120677", "0.61137867", "0.6112592", "0.61123574", "0.61075145", "0.6107239", "0.6100416", "0.60969657", "0.6096592", "0.6084731", "0.60843676", "0.6082127", "0.60662097", "0.6065897", "0.6064566", "0.60614854", "0.60614854", "0.60492194" ]
0.68715215
3
Return campaign workers who do not already own a campaign.
def getOwnerOptions(self): # TODO return self.workers.all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crew_needing_reports(self):\n reports = self.ccreport_set.all().values_list('crew_chief', flat=True)\n return self.ccinstances.exclude(crew_chief__in=reports)", "def getNotMyCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() != 1):\n r.append(p)\n return r", "def _find_inaccessible_workflows(self, prune_nodes):\n\n referrer_map = self._build_referrer_map()\n\n removed_referring_nodes = frozenset(\n node for referrers in referrer_map.values()\n for node in referrers\n if node in prune_nodes)\n\n return frozenset(\n workflow for (workflow, referrers) in six.iteritems(referrer_map)\n if all(referrer in removed_referring_nodes for referrer in referrers))", "def workers(self):\n return self.worker_list", "def get_blocked_workers(self) -> List[dict]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT worker_id FROM workers\n WHERE is_blocked = ?\n \"\"\",\n (True,),\n )\n results = c.fetchall()\n return results", "def find_campaigns_as_caller(caller):\n\n \"\"\"Get Campaigns for Caller\"\"\"\n campaigns_as_caller = caller.campaigns_as_caller.filter(\n status__in=[x.value[0] for x in call_campaign_statuses_for_caller],\n ).order_by('-date_created')\n\n \"\"\"Check Call Tool Feature Access for Campaigns\"\"\"\n campaigns = [x for x in campaigns_as_caller if has_call_feature_access_for_local_group(\n x.local_group\n )]\n\n return campaigns", "def get_joined_buddies(self):\n return self._buddies.values()", "def get_waiting_worker(self):\r\n if self.item in (Item.A, Item.B):\r\n for worker in self.workers:\r\n if not worker.working and self.item not in worker.items:\r\n return worker", "def get_not_contacted(self, seller_id):\n return self.contactcampaignstatus_set.filter(seller_id=seller_id, status__in=[1, 3])", "def get_all_worker_prior(self):\r\n return self._workers_prior", "def get_ready_worker(self):\r\n for worker in self.workers:\r\n if Item.P in worker.items:\r\n return worker", "def give_workers_list(self):\n return self._workers", "def get_all_candidates_in_queue(self):\n all_queued_ids = [t.gaid for t in self.c.select(queued=1)]\n all_relaxed_ids = [t.gaid for t in self.c.select(relaxed=1)]\n\n in_queue = [qid for qid in all_queued_ids\n if qid not in all_relaxed_ids]\n return in_queue", "def common_cities_excluding(members, member_to_exclude, city_sets):\n\n cities = common_cities(members, city_sets)\n cities = [x for x in cities\n if x not in city_sets[member_to_exclude]]\n\n return cities", "def get_worker_id_list(self):\r\n return self._workers_id", "def customers_presence(self):\n return self._customers_presence", "def availableWorkersDuringPeriod(self, begin, end):\n availableWorkers = []\n for worker in self._workers:\n if worker.availableInPeriod(begin, end):\n availableWorkers.append(worker)\n return availableWorkers", "def getMyCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() == 1):\n r.append(p)\n return r", "def find_not_in(people, filter):\n new_people = set()\n for person in people.intersection(filter):\n new_people.update(set(person.parents) - filter)\n if len(person.parents) < 2:\n new_people.add(\"[Unknown parent(s) of %r]\" % person)\n return new_people", "def _find_memberless_constituencies(self):\n constituencies = Constituency.objects.filter(\n end=None, # Constituency currently exists/is not historical\n mp=None,\n )\n\n self.stdout('Constituencies with missing MP:')\n for constituency in constituencies:\n self.stdout(f'[{constituency.parliamentdotuk}] {constituency.name} {constituency.start}')", "def members(self):\r\n return self.exclude(contributor__username=u'anonymous')", "def getNeutralCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() == 0):\n r.append(p)\n return r", "def available_sets(session, player):\n excluded_sets = set(session.taken.keys())\n for grouping in session.exclusives:\n if player.sets.intersection(grouping):\n excluded_sets.update(grouping)\n return [s for s in session.sets if s not in excluded_sets]", "def workers(self):\n return list(self._workers.keys())", "def wait_for_workers(self):\r\n stop = False\r\n workers = self.aggregator.get_participants()\r\n\r\n while not stop: \r\n try:\r\n with self.aggregator:\r\n resp = self.aggregator.receive(1)\r\n participant = resp.notification['participant']\r\n workers.append(participant)\r\n print('Task %s: participant %s has joined' % (self.task_name, participant))\r\n except Exception as err:\r\n print(\"Task %s: joined %d participants out of %d\" % (self.task_name, len(workers), self.Nworkers))\r\n #print(err)\r\n #print('Check here: error')\r\n #import code\r\n #code.interact(local=locals())\r\n pass\r\n\r\n if len(workers) == self.Nworkers:\r\n stop = True\r\n\r\n workers = self.aggregator.get_participants()\r\n return list(workers.keys())", "def getAvailableWorkers(day, labDays,workers):\n availableWorkers = []\n for worker in workers:\n if labDays[day] in worker.getWorkDays():\n availableWorkers.append(worker)\n return availableWorkers", "def get_all_game_players_but_indicated(self, user):\n return GamePlayer.objects.filter(Q(game=self) & ~Q(player=user))", "def load_coworkers(args):\n if os.path.isfile(args.coworkers):\n with open(args.coworkers, 'r') as c:\n list_coworkers = json.load(c)\n else:\n list_coworkers = []\n coworkers = []\n for coworker_set in list_coworkers:\n for pair in itertools.combinations(coworker_set, 2):\n # print(\"pair is {}\".format(pair))\n coworkers.append(set(pair))\n return coworkers", "def _inactiveplayers():\n\n rosters = _activerosters()\n dbrosters = _eidset() # players not in rosters scrape but in db.\n notactive = dbrosters.difference(rosters)\n return notactive", "def get_available_invitees(self):\n return User.objects.exclude(pk=self.request.user.pk)", "def workers_status(self):\n workers = []\n for agent in self.agents_status():\n workers += agent['workers']\n return workers", "def __get_ids_of_all_unrelaxed_candidates__(self):\n\n all_unrelaxed_ids = set([t.gaid for t in self.c.select(relaxed=0)])\n all_relaxed_ids = set([t.gaid for t in self.c.select(relaxed=1)])\n all_queued_ids = set([t.gaid for t in self.c.select(queued=1)])\n\n actually_unrelaxed = [gaid for gaid in all_unrelaxed_ids\n if (gaid not in all_relaxed_ids and\n gaid not in all_queued_ids)]\n\n return actually_unrelaxed", "def get_work_subscriptions(self, use_threading=False):\r\n \r\n subs = self.get_subscriptions(use_threading)\r\n return list(filter(lambda obj: isinstance(obj, Work), subs))", "def getWorkers(self):\n return self.workers", "def get_absent_client_names(self, clients):\n return list(set(self.get_all_client_names()) - set(clients))", "def breakingteam_set_competing(self):\n return self.breakingteam_set.filter(break_rank__isnull=False)", "def find_campaigns_as_admin(call_profile):\n\n \"\"\"Check Feature Access and Local Group Permissions\"\"\"\n user = call_profile.user\n local_group = find_local_group_by_user(user)\n if local_group is not None and has_call_permission_for_local_group(\n user,\n local_group,\n 'calls.change_callcampaign'\n ):\n return local_group.callcampaign_set.all().order_by(\n '-date_created'\n )\n\n \"\"\"Otherwise return empty list\"\"\"\n return CallCampaign.objects.none()", "def get_worker_addresses(self) -> List[str]:", "def GetExclusiveResources(self):\n res = set(self.exclusive_resources)\n if self.parent:\n res |= self.parent.GetExclusiveResources()\n return res", "def get_drip_campaigns(self):\n return list(DripCampaign.objects(user_id=self.user_id))", "def check_rooms(self, exclude=[]):\n stmt = Session.query(Lesson.room, Lesson.day, Lesson.order,\n Lesson.schedule_id)\n stmt = stmt.group_by(Lesson.room, Lesson.order, Lesson.day, Lesson.schedule_id)\n stmt = stmt.having(func.count(Lesson.room)>1)\n stmt = stmt.filter(not_(Lesson.room.in_(exclude)))\n stmt = stmt.subquery()\n q = Session.query(Lesson).join((stmt, and_(\n Lesson.room == stmt.c.room,\n Lesson.day == stmt.c.day,\n Lesson.order == stmt.c.order,\n Lesson.schedule_id == stmt.c.schedule_id)))\n q = q.order_by(Lesson.day, Lesson.order, Lesson.room)\n\n conflicts = q.all()\n if len(conflicts) == 0:\n return []\n rooms = [[conflicts.pop(0), conflicts.pop(0)]]\n for c in conflicts:\n prev = rooms[-1][-1]\n if c.room == prev.room and c.day == prev.day and c.order == \\\n prev.order and c.schedule_id == prev.schedule_id:\n rooms[-1].append(c)\n else:\n rooms.append([c])\n return rooms", "def get_available_cops():\n allIncidents = Incident.get_all()\n cops = []\n \n for i in allIncidents:\n if(inicioAmostragem <= i.reporting_date and i.reporting_date <=terminoAmostragem):\n cops.append(i['operations_center']['id'])\n \n allReports = RelatoDeSituacao.get_all()\n \n for r in allReports:\n if (\n inicioAmostragem <= r.data_hora and \n r.data_hora <=terminoAmostragem and\n 'cop' in r.relator and # todos tem que ter o COP\n 'id' in r.relator['cop'] # todos tem que ter o id \n ):\n cops.append(r.relator['cop']['id'])\n \n return set(cops)", "def find_users_missing_standup():\n token = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED_SLACK_TOKEN))['Plaintext']\n sc = SlackClient(token)\n channels = sc.api_call('channels.list')['channels']\n standup = (i for i in channels if i['name'] == SLACK_CHANNEL).next()\n members = standup['members']\n messages = sc.api_call('channels.history', channel=standup['id'])['messages']\n messages_within_last_10_hours = filter(check_in_date_range, messages) \n users_posted = (i['user'] for i in messages_within_last_10_hours if\n 'user' in i.keys())\n difference = set(members).difference(users_posted)\n return difference", "def to_exclude(self):\n reporting_period = self.message_number - 3\n one_day_ago = now() - datetime.timedelta(hours=24)\n\n return PollingReport.objects.filter(\n period_number=reporting_period,\n creation_date__gte=one_day_ago,\n ).values_list('phone_number', flat=True)", "def prune(self):\n target_user_ids = self.get_queryset().values_list('id', flat=True)\n exclude_user_ids = SentDrip.objects.filter(date__lt=conditional_now(),\n drip=self.drip_model,\n user__id__in=target_user_ids)\\\n .values_list('user_id', flat=True)\n self._queryset = self.get_queryset().exclude(id__in=exclude_user_ids)", "def get_partner_requests(request):\n try:\n partner_requests = PartnerRequest.objects.filter(to_user=request.user)\n except:\n partner_requests = []\n\n return partner_requests", "def get_workers(actor_id):\n try:\n Actor.from_db(actors_store[actor_id])\n except KeyError:\n raise WorkerException(\"actor not found: {}'\".format(actor_id))\n try:\n workers = json.loads(workers_store[actor_id])\n except KeyError:\n return []\n return workers", "def excluded(cls):\n return []", "def _get_campaigns(self, params):\n return self._api.account.get_campaigns(params={**params, **self._state_filter()}, fields=[self.state_pk])", "def eligible_nodes(self):\n return [v for v in self.G if self.eligible_node(v)]", "def blacklisted_emails(self):\n blacklisted_emails = []\n for stream in self.related_streams():\n blacklisted_emails = blacklisted_emails + stream.blacklisted_emails()\n return blacklisted_emails", "def get_dead(self):\n return ReadingSet(set([x for x in self._set if not x.alive]))", "def get_already_contacted(self, seller_id):\n return self.contactcampaignstatus_set.filter(\n seller_id=seller_id, status=2\n )", "def channels(self):\n return [channel for channel in self.client.channels if channel.has_nick(self)]", "def exclude_foreign_resources(returned_resource_set, expected_resource_set):\n expected_owners = {res.owner for res in expected_resource_set}\n return [\n res for res in returned_resource_set\n if res.owner in expected_owners\n ]", "def exclude_words(self, words):\n idcs = []\n for i in range(len(self)):\n if not self.transcript(i) in words:\n idcs.append(i)\n subset = self.sub_set(idcs)\n return subset", "def queue_people(_):\n today = datetime.now()\n today_date = datetime(today.year, today.month, today.day)\n not_checked_today = Tracker.select().where(\n (\n (Tracker.friends_last_retrieved.is_null())\n | (Tracker.friends_last_retrieved < today_date)\n )\n & (Tracker.participant == True)\n )\n return not_checked_today.count() > 0", "def _remote_worker_ids_for_metrics(self) -> List[int]:\n return self.workers.healthy_worker_ids()", "def collect_coexist(self):\r\n co_list = []\r\n ner_dictKeyList = list(self.ner_dict.keys())\r\n for words in self.ner_sents:\r\n co_ners = set(ner_dictKeyList).intersection(set(words))\r\n co_info = self.combination(list(co_ners))\r\n co_list += co_info\r\n if not co_list:\r\n return []\r\n return {i[0]: i[1] for i in Counter(co_list).most_common()}", "def get_targeted_campaign_ids(client, customer_id, resource_name):\n ga_service = client.get_service(\"GoogleAdsService\")\n\n query = \"\"\"\n SELECT\n campaign.id,\n campaign_extension_setting.extension_feed_items\n FROM campaign_extension_setting\n WHERE\n campaign_extension_setting.extension_type = 'PROMOTION'\n AND campaign.status != 'REMOVED'\"\"\"\n\n stream = ga_service.search_stream(customer_id=customer_id, query=query)\n\n campaign_ids = []\n\n for batch in stream:\n for row in batch.results:\n feed_items = row.campaign_extension_setting.extension_feed_items\n if resource_name in feed_items:\n print(f\"Found matching campaign with ID: '{row.campaign.id}'\")\n campaign_ids.append(row.campaign.id)\n\n return campaign_ids", "def get_free_sessions(self):\n return [session for session in self.sessions if not session.is_booked()]", "def get_unbroken_instances(self):\n return self._get_cond_instance(cond=0)", "def _filter_optouts_from_recipients(to_list, course_id):\r\n optouts = Optout.objects.filter(\r\n course_id=course_id,\r\n user__in=[i['pk'] for i in to_list]\r\n ).values_list('user__email', flat=True)\r\n optouts = set(optouts)\r\n # Only count the num_optout for the first time the optouts are calculated.\r\n # We assume that the number will not change on retries, and so we don't need\r\n # to calculate it each time.\r\n num_optout = len(optouts)\r\n to_list = [recipient for recipient in to_list if recipient['email'] not in optouts]\r\n return to_list, num_optout", "def related_reports(self):\n return Report.objects.exclude(contact_email__isnull=True).filter(contact_email__iexact=self.contact_email).order_by('status', '-create_date')[:1000]", "async def get_participants(self):\n for i in range(self.num):\n def check(m):\n if m.content.lower().strip() == \"i\" and m.author not in self.participants:\n return True\n\n return False\n\n # Wait with a timeout of 2 minutes and check each message with check(m)\n reply = await client.wait_for_message(timeout=120, channel=self.channel, check=check)\n\n if reply: # A user replied with a valid check\n asyncio.ensure_future(\n client.say(self.message,\n \"{} has entered! `{}/{}`. Type `I` to join!\".format(\n reply.author.mention, i + 1, self.num))\n )\n self.participants.append(reply.author)\n\n # Remove the message if bot has permissions\n if self.member.permissions_in(self.channel).manage_messages:\n asyncio.ensure_future(client.delete_message(reply))\n else:\n # At this point we got no reply in time and thus, gathering participants failed\n await client.say(self.message, \"**The {} game failed to gather {} participants.**\".format(\n self.name, self.num))\n started.pop(started.index(self.channel.id))\n\n return False\n\n return True", "def active_comics():\n # FUTURE: Should not include ended comics?\n return Comic.objects.exclude(active=False)", "def init_workers(self):\n worker_list = []\n for number in range(0, self.staff):\n worker = self.get_worker()\n worker.job_id = self.job_id\n worker.number = number + 1\n worker_list.append(worker)\n self.worker_list = worker_list\n return worker_list", "def get_running_condor_jobs(self):\n return Utils.condor_q(selection_pairs=[[\"taskname\",self.unique_name]], extra_columns=[\"jobnum\"])", "def to_exclude(self):\n midnight = now().replace(hour=0, minute=0, microsecond=0)\n return CenterOpen.objects.filter(\n creation_date__gte=midnight,\n ).values_list('phone_number', flat=True)", "def ready_players(self):\n return self.players.filter_by(sitting_out=False).join(players_active).all()", "def get_all_exclusives(self):\r\n if self.exclusives is None:\r\n self._propagate_exclusives()\r\n return self.exclusives", "def __calculate_emptiness (self, scheduling_unit):\n difference = 0\n \n for date in self.workers.get_dates ( ):\n for person in self.mapper.get_all_people ( ):\n for turnus in self.mapper.get_turnuses (scheduling_unit, person):\n if person.is_scheduled_exact (scheduling_unit, turnus, date):\n difference += 1\n \n needed = self.workers.get_workers_by_type (date, scheduling_unit, turnus)\n \n difference -= needed\n return difference", "def _filter_cid(self, cids):\n return [cid for cid in cids if cid is not None]", "def get_worker_list(self):\n return [{WORKER_ID_KEY: worker_id, REGISTRATION_STATUS_KEY: value}\n for worker_id, value in self.registered_workers.items()]", "def get_campaign_name_list(self):\n campaigns = self.find('campaigns', {})\n campaign_names = []\n for campaign in campaigns:\n if 'name' in campaign:\n campaign_names.append(campaign['name'])\n return campaign_names", "def candidates(self):\n return self.related_users.all()", "def getHostileCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() >=2):\n r.append(p)\n return r", "def cohorted_discussions(self):\r\n config = self.cohort_config\r\n if config is None:\r\n return set()\r\n\r\n return set(config.get(\"cohorted_discussions\", []))", "def get_invalid(cls, instance):\n\n others = [i for i in list(instance.context) if\n i is not instance and\n set(cls.families) & get_families(i)]\n if not others:\n return []\n\n other_ids = defaultdict(list)\n for other in others:\n for _id, members in get_instance_node_ids(other).items():\n other_ids[_id].extend(members)\n\n # Take only the ids with more than one member\n invalid = list()\n ids = get_instance_node_ids(instance)\n for _id, members in ids.iteritems():\n if _id in other_ids:\n cls.log.error(\"ID found on multiple nodes: '%s'\" % members)\n cls.log.debug(\"Clashes with: %s\" % (other_ids[_id],))\n invalid.extend(members)\n\n return invalid", "def get_pending_instances(self):\n return [instance for instance in self.instances.itervalues()\n if InstanceState.REQUESTED <= instance.state < InstanceState.RUNNING]", "def findUnoccupied( board, occupation):\n return [ j for j in xrange(len(board.positions))\n if not occupation.has_key(j) ]", "def addWorker(self, user):\n if (user != self.owner) and not self.workers.filter(pk=user.id).exists():\n self.workers.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.blacklist.filter(pk=user.id).exists():\n self.blacklist.remove(user)\n return self\n return None", "def likely_regressions(self):\n return set([label for label, count in self.regressions.items() if count == 0])", "def find_workers(\n self, worker_name: Optional[str] = None, provider_type: Optional[str] = None\n ) -> List[Worker]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from workers\n WHERE (?1 IS NULL OR worker_name = ?1)\n AND (?2 IS NULL OR provider_type = ?2)\n \"\"\",\n (worker_name, provider_type),\n )\n rows = c.fetchall()\n return [\n Worker(self, str(r[\"worker_id\"]), row=r, _used_new_call=True)\n for r in rows\n ]", "def objects_in_use(self):\n return set()", "def cancel_node(self, node: DOMNode) -> list[Worker]:\n workers = [worker for worker in self._workers if worker.node == node]\n for worker in workers:\n worker.cancel()\n return workers", "def get_all_non_motif_assoc_cur_site_insts():\n return get_all_cur_site_insts().filter(site_type=\"non_motif_associated\")", "def _find_homeless_mps(self):\n mps = Person.objects.filter(\n active=True,\n house__name=HOUSE_OF_COMMONS,\n constituency=None,\n )\n\n self.stdout('MPs with missing constituency:')\n for mp in mps:\n self.stdout(f' [{mp.parliamentdotuk}] {mp.name} has no constituency')", "def teammates(self):\n return [\n p for p in self.roster.participants\n if p.participant_id != self.participant_id\n ]", "def _get_private_team_ids_to_exclude(self, course_module):\n if has_access(self.request.user, 'staff', course_module.id):\n return set()\n\n private_teamset_ids = [ts.teamset_id for ts in course_module.teamsets if ts.is_private_managed]\n excluded_team_ids = CourseTeam.objects.filter(\n course_id=course_module.id,\n topic_id__in=private_teamset_ids\n ).exclude(\n membership__user=self.request.user\n ).values_list('team_id', flat=True)\n return set(excluded_team_ids)", "def find_conclusions(self):\n conc = []\n self.rule.right.visit_find_premises(conc)\n self.conclusions = conc", "def get_campaigns(self, uuid=None):\n params = self._build_params(uuid=uuid)\n return self._get_query('campaigns', params, Campaign)", "def avoids(w, forbidden):\n\treturn set(w).isdisjoint(set(forbidden))", "def one_off(self):\n return self.filter(total_billing_cycles__isnull=False)", "def get_subscriptions_to_self(self):\n return self._roster.get_my_subscribers()\n return self._roster.get_my_subscribers()", "def wait_for_workers_to_join(self, display, logger, verbose=False):\r\n with self.aggregator:\r\n workers = self.aggregator.get_participants()\r\n\r\n if workers:\r\n if len(workers) == self.Nworkers:\r\n display('Participants have already joined', logger, verbose)\r\n return workers\r\n\r\n display('Waiting for workers to join (%d of %d present)' %(len(workers), self.Nworkers), logger, verbose)\r\n\r\n ready = False\r\n while not ready:\r\n try:\r\n with self.aggregator:\r\n resp = self.aggregator.receive(3000)\r\n participant = resp.notification['participant']\r\n display('Participant %s joined' %participant, logger, verbose)\r\n except Exception as err:\r\n raise err\r\n\r\n if len(workers) == self.Nworkers:\r\n ready = True\r\n\r\n return workers", "def get_owned_apps(self):\n user = users.get_current_user()\n if not user:\n return []\n email = user.email()\n try:\n user_info = self.get_by_id(UserInfo, email)\n if user_info:\n return user_info.owned_apps\n else:\n return []\n except Exception as err:\n logging.exception(err)\n return []", "def test_get_non_existent_campaigns_returns_empty_list(self):\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body, {\"campaigns\": []})", "def campaign(self):\n if self.campaign_count > self.campaign_max_count and self.__reserved_members:\n self.__expedition.append(self.__reserved_members.pop())\n self.campaign_count = 0\n else:\n self.campaign_count += 1", "def campaign(self):\n if self.campaign_count > self.campaign_max_count and self.__reserved_members:\n self.__expedition.append(self.__reserved_members.pop())\n self.campaign_count = 0\n else:\n self.campaign_count += 1" ]
[ "0.6192415", "0.6118456", "0.5496211", "0.5446709", "0.5353705", "0.5315112", "0.5248027", "0.5240531", "0.5227237", "0.5211369", "0.5207625", "0.5090434", "0.50866437", "0.507655", "0.5067131", "0.50648826", "0.5036302", "0.5027716", "0.50187", "0.50136185", "0.5004204", "0.4992099", "0.49854133", "0.49833414", "0.498121", "0.4966103", "0.49598026", "0.4919331", "0.4915649", "0.4905241", "0.48899695", "0.48688728", "0.48670796", "0.4843081", "0.48138875", "0.48108873", "0.4808317", "0.4806371", "0.4801306", "0.48011175", "0.47875118", "0.4779248", "0.47756466", "0.47710383", "0.47678292", "0.47673413", "0.4766626", "0.47595122", "0.4754721", "0.4754041", "0.47419733", "0.47392735", "0.4729103", "0.47241533", "0.47233924", "0.47211286", "0.47091034", "0.47047314", "0.46861354", "0.46570456", "0.46539584", "0.46500868", "0.46496642", "0.46483943", "0.46437493", "0.46383372", "0.46288574", "0.46281123", "0.46228063", "0.4621167", "0.46118185", "0.4611051", "0.46049908", "0.45927322", "0.45862988", "0.45847145", "0.45835862", "0.4576609", "0.45719036", "0.45704225", "0.45702755", "0.45665854", "0.45618552", "0.45614752", "0.45580208", "0.4543308", "0.45331848", "0.45201167", "0.45192555", "0.45121315", "0.45079896", "0.4507915", "0.4507057", "0.45027757", "0.45003116", "0.44827658", "0.44822195", "0.44726235", "0.44674388", "0.44674388" ]
0.4997855
21
Return active constituent voters who have not been contacted since the last election and have not been served to a supporter in the last two days. Don't limit the size of the result set here; let APIs do that.
def getVotersToContact(self): two_days_ago = date.today() - timedelta(2) year_ago = date.today() - timedelta(365) return self.voters.filter( Q(campaignstovoters__last_served=None) | Q(campaignstovoters__last_served__lt=two_days_ago), Q(campaignstovoters__last_contacted=None) | Q(campaignstovoters__last_contacted__lt=year_ago), campaignstovoters__is_active=True, is_active=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _inactiveplayers():\n\n rosters = _activerosters()\n dbrosters = _eidset() # players not in rosters scrape but in db.\n notactive = dbrosters.difference(rosters)\n return notactive", "def getVotersToDial(self):\n return self.getVotersToContact().exclude(\n (Q(phone_number1='') | Q(wrong_phone_number1__gt=1)),\n (Q(phone_number2='') | Q(wrong_phone_number2__gt=1)))", "def getUnconfirmedVolunteers(self, query):\n query = Volunteer.query(Volunteer.confirmed == False)\n return query", "async def get_non_voters(self, guild: discord.Guild, uservotes: dict):\n\n player_role = guild.get_role(\n await self.config.guild(guild).player_id()\n )\n\n for member in guild.members:\n if player_role in member.roles:\n userkey = f\"{member.name}#{member.discriminator}\"\n if userkey not in uservotes:\n uservotes[userkey] = \"No vote\"\n\n return uservotes", "def remove_expired(cls):\n max_trailers = 10\n current_trailers = cls.get_all(collection='approved_trailers')\n current_trailers.reverse()\n queued_trailers = cls.get_all(collection='queued_trailers')\n\n if len(current_trailers) >= max_trailers and len(queued_trailers) > 0:\n for trailer in current_trailers:\n time_active = trailer.date.timetuple().tm_yday - datetime.now().timetuple().tm_yday\n if time_active >= 14 and len(queued_trailers) > 0:\n cls.move(trailer, 'approved_trailers', 'archived_trailers')\n cls.move(queued_trailers[0], 'queued_trailers', 'approved_trailers')", "def get_all_volunteers(self):\n volunteers = []\n for user in User.objects.all():\n if not OcAuth(user.id).is_admin():\n volunteers.append(user)\n return volunteers", "def get_victors(self):\n if self.is_game_over():\n scores = [p.get_score() for p in self.state.get_players()]\n if len(scores) == 0:\n return []\n max_score = max(scores)\n victors = []\n for p in self.state.get_players():\n if p.get_color() not in self.violators and p.get_score() == max_score:\n victors.append(self.players[p.get_color()])\n return victors\n else:\n return None", "def find_outdated_game_dates(self):\n state = 'preview.gameData.status.detailedState'\n old = self._db.Games.find({state : {'$nin' : ['Final']}})\n return set([x['date'] for x in old])", "def find_users_missing_standup():\n token = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED_SLACK_TOKEN))['Plaintext']\n sc = SlackClient(token)\n channels = sc.api_call('channels.list')['channels']\n standup = (i for i in channels if i['name'] == SLACK_CHANNEL).next()\n members = standup['members']\n messages = sc.api_call('channels.history', channel=standup['id'])['messages']\n messages_within_last_10_hours = filter(check_in_date_range, messages) \n users_posted = (i['user'] for i in messages_within_last_10_hours if\n 'user' in i.keys())\n difference = set(members).difference(users_posted)\n return difference", "def get_voters():", "def get_voters():", "def get_current_visitors():\n return Visitor.objects.filter(acknowledged=False).order_by(\"arrival_time\")", "def get_expired_invoices(self):\n return self.invoice_set.filter(\n expiration_date__lte=date.today(),\n paid=False,\n debited=False,\n canceled=False,\n uncollectible=False,\n )", "def restricted_teams(self, user):\n return []", "def filter_only_remaining(self,now):\n\t\ttimeshift = now.replace(tzinfo=\"Europe/London\")\n\t\treturn Programs([program for program in self.list if program.end > timeshift and program.end < now])", "def get_available_invitees(self):\n return User.objects.exclude(pk=self.request.user.pk)", "def upcoming_meetups_query(cls):\r\n # Warning, this timestamp inequality is actually done as a string comparison\r\n # in the db for some reason. BUT, since epoch seconds won't get another digit\r\n # for another 275 years, we're good for now...\r\n return Meetup._query(Meetup.c.timestamp > time.time() - g.meetup_grace_period, data=True, sort='_date')", "def has_victim(self):\n # first-party\n from tcex.api.tc.v3.victims.victim_filter import VictimFilter\n\n victims = VictimFilter(Tql())\n self._tql.add_filter('hasVictim', TqlOperator.EQ, victims, TqlType.SUB_QUERY)\n return victims", "def getExpired(self, idle=365):\n cutOff = datetime.datetime.now() - datetime.timedelta(days=idle)\n return [x for x in self.xeps if x.status == \"Experimental\" and x.date < cutOff]", "def get_recent_matches(self, limit=None):\n matches = (Match.objects\n .filter(Q(winner=self) | Q(loser=self))\n .order_by('-played_time'))\n return matches[:limit or MATCH_RESULT_LIMIT]", "def get_active_deposits():\n skip = 0\n graphql_client = GraphQLClient('https://api.thegraph.com/subgraphs/name/miracle2k/all-the-keeps')\n members = GqlQuery().fields(['address']).query('members').generate()\n bondedECDSAKeep = GqlQuery().fields(['totalBondAmount', members]).query('bondedECDSAKeep').generate()\n deposits_query = GqlQuery().fields(['id', 'lotSizeSatoshis', bondedECDSAKeep]).query('deposits', input={\n \"first: 1000 skip: $skip where\": \"{currentState: ACTIVE}\"}).operation('query', name='GetActiveDeposits',\n input={\"$skip\": \"Int!\"}).generate()\n\n params = {\"skip\": skip}\n result = jsonpickle.decode(graphql_client.execute(deposits_query, variables=params))[\"data\"][\"deposits\"]\n deposits = result\n while len(result) == 1000:\n params[\"skip\"] += 1000\n result = jsonpickle.decode(graphql_client.execute(deposits_query, variables=params))[\"data\"][\"deposits\"]\n deposits += result\n return deposits", "def get_recent_matches(self, limit=None):\n return (Match.objects\n .filter(company=self)\n .order_by('-played_time')[:limit or MATCH_RESULT_LIMIT]\n )", "def get_users_with_missing_data() -> Set[str]:\n users_data = {user[\"_source\"][\"VENDOR_UUID\"] for user in Handlers.elastic_handler.get_all_today_data(\n _type=\"status\",\n date_start=dt.date.today() + dt.timedelta(days=1),\n date_end=dt.date.today() + dt.timedelta(days=7),\n )}\n\n all_tokens = Handlers.token_handler.get_all_today_data(_type=\"token\")\n to_dict = {dict_[\"_source\"][\"VENDOR_UUID\"]: dict_[\"_source\"][\"TOKEN\"] for dict_ in all_tokens}\n\n return set(dict(filter(lambda item_tup: item_tup[0] not in users_data, to_dict.items())).values())", "def eligible(cls, lost_count):\n return cls.base_query().filter(lost=lost_count)", "def get_queryset(self):\n return Person.objects.filter(expiry_date__gt=timezone.now())", "def getAvailableTimeslots(self, allTimeslots) -> [Timeslot]:\r\n # List with all Timeslots any of the Teachers is not available at.\r\n notAvailableTimeslotsTeachers = flatMap(lambda t: t.not_available_timeslots, self.teachers)\r\n # notAvailableTimeslotsTeachers = [item for sublist in map(lambda t: t.not_available_timeslots, self.teachers) for item in sublist]\r\n # If Lesson can only take place on forenoon, create list with all afternoon timeslots.\r\n if self.course.only_forenoon:\r\n notAvailableTimeslotsForenoon = list(filter(lambda t: t.number not in Timeslot.getForenoonTimeslotNumbers(), allTimeslots))\r\n else:\r\n notAvailableTimeslotsForenoon = []\r\n\r\n timeslots = [x for x in allTimeslots if x not in (notAvailableTimeslotsTeachers + notAvailableTimeslotsForenoon)]\r\n if self.available_timeslots: # If list is not empty. Else no restrictions.\r\n timeslots = [x for x in timeslots if x in self.available_timeslots]\r\n\r\n return timeslots", "def newbies(self):\n newness = datetime.now() - timedelta(days=self.DAYS_FOR_NEWBIE_CHECK)\n newbies = (\n self.valid_choices.filter(\n Q(roster__accounthistory__start_date__gte=newness)\n & Q(roster__accounthistory__end_date__isnull=True)\n )\n .distinct()\n .order_by(\"db_key\")\n )\n return list(newbies)", "def get_unresolved_future_prices():\n #TODO this is inefficient, hits the db A LOT\n latest_bitcoin_time = get_latest_bitcoin_time()\n\n potentially_unresolved = Future_Price.objects.filter(\n time_to_match_price__lte=latest_bitcoin_time\n #TODO would like a __gt condition somehow\n )\n\n unresolved_future_prices = []\n for p in potentially_unresolved:\n has_no_returned_amounts_from_before_window = Returned_Amount.objects.filter(to_prediction__future_price=p, from_received_amount__time__lt=F('from_received_amount__prediction__future_price__time_window_closes')).count() == 0\n if has_no_returned_amounts_from_before_window:\n has_received_amounts_from_before_window = Received_Amount.objects.filter(prediction__future_price=p, time__lt=F('prediction__future_price__time_window_closes')).count() > 0\n if has_received_amounts_from_before_window:\n bitcoin_price_exists = Bitcoin_Price.objects.filter(time=p.time_to_match_price).count() == 1\n if bitcoin_price_exists:\n unresolved_future_prices.append(p)\n\n return unresolved_future_prices\n\n \"\"\"\n The following commented-out method:\n - assumes that there is always a bitcoin_price for every minute before the\n last bitcoin_price\n - assumes that every future_prediction before the last returned_amount has\n been evaluated\n ...I am not willing to make these assumptions\n \n latest_bitcoin_time = get_latest_bitcoin_time()\n\n try:\n latest_returned_amount = Returned_Amount.objects.order_by('-from_received_amount__prediction__future_price__time_to_match_price')[0]\n latest_returned_time = latest_returned_amount.from_received_amount.prediction.future_price.time_to_match_price\n except IndexError:\n latest_returned_time = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, utc)\n\n unresolved_future_prices = Future_Price.objects.filter(\n time_to_match_price__lte=latest_bitcoin_time,\n time_to_match_price__gt=latest_returned_time\n )\n\n return unresolved_future_prices\n \"\"\"", "def get_queryset(self):\n unitlist = get_units_visible_to_user(self.request.user)\n\n return Candidate.objects.filter(\n appointments__committee__unit__in=unitlist,\n )", "def to_exclude(self):\n midnight = now().replace(hour=0, minute=0, microsecond=0)\n return CenterOpen.objects.filter(\n creation_date__gte=midnight,\n ).values_list('phone_number', flat=True)", "def available_days(self):\n dates = []\n for date in self.tweets_list.available_dates:\n if date[1] not in dates and len(dates) < 10:\n dates.append(date[1])\n return dates", "def candidates(self):\n return self.related_users.all()", "def get_all_elections(self) -> list:", "def losses(self):\n return [g for g in self.games if g.winner is not self.team]", "def get_recent_matches_with_player(self, player, limit=None):\n matches = (Match.objects\n .filter(\n Q(winner=self, loser=player) |\n Q(loser=self, winner=player)\n )\n .order_by('-played_time'))\n return matches[:limit or MATCH_RESULT_LIMIT]", "def get_free_sessions(self):\n return [session for session in self.sessions if not session.is_booked()]", "def find_inactive_devices(db, cutoff=IDLE_CUTOFF):\n app_log.info(f\"Checking for devices inactive since {cutoff}\")\n cur = db.execute(r\"{CALL getLastActivityBefore(?)}\", (cutoff,))\n return cur.fetchall()", "def find_inactive_event_users(self, collection='activity', activeperiod_days=7):\n now = self.now\n time_filt = {}\n if collection == 'attendeelogs':\n time_key = 'changedDatetime'\n else:\n time_key = 'time'\n if activeperiod_days != None:\n now = self.now\n time_filt = {\n \"time\" : { \n \"$lt\": now,\n \"$gte\": now - timedelta(days=activeperiod_days)\n }\n }\n active_users = self.client.smartsleep[collection].distinct(\"userId\", time_filt)\n inactive_users = set(self.find_active_users(activeperiod_days)) - set(active_users)\n return list(inactive_users)", "def tempfeeder_exp_nonzerotest_users():\n\n return [ user for user in tempfeeder_exp().user_ids if all(tempfeeder_exp()[user]['Load']['2005-10-01 00:00':]) ]", "def online_users(room):\n threshold = datetime.now() - timedelta(seconds=10)\n authorizations = models.Authorization.gql(\"WHERE room = :room AND last_checked_in >= :threshold\", room=room, threshold=threshold).fetch(1000)\n return [x.user for x in authorizations]", "def upcoming(self):\n return self.filter(start__gte=timezone.now())", "def get_curr_events(self):\n today = datetime.date.today()\n return self.s.query(Event).filter(Event.time > today).all()", "def undead(self):\n cutoff = datetime.utcnow() - timedelta(seconds=HEARTBEAT_FAILED)\n return self.status_in(\"active\").filter(heartbeat__lt=cutoff)", "def get_not_voted_cards(cls, user, card_set_id):\n user_card_set_votes_id = user.votes.filter(\n card__card_set_id=card_set_id,\n ).values_list('card__id', flat=True)\n\n return Card.objects.filter(\n card_set_id=card_set_id,\n ).exclude(pk__in=user_card_set_votes_id)", "def queue_people(_):\n today = datetime.now()\n today_date = datetime(today.year, today.month, today.day)\n not_checked_today = Tracker.select().where(\n (\n (Tracker.friends_last_retrieved.is_null())\n | (Tracker.friends_last_retrieved < today_date)\n )\n & (Tracker.participant == True)\n )\n return not_checked_today.count() > 0", "def get_available_teams(self):\n teams = self.request.user.team_set.filter(competition__is_open=True)\n if not teams.exists():\n msg = \"Can't send invites at this time. You're not\"\n msg += \" registered for any open competitions\"\n messages.error(self.request, msg)\n raise Http404(msg)\n return teams", "def getValidCertifications(self):\n certs = []\n today = date.today()\n for c in self.getCertifications():\n validfrom = c.getValidFrom() if c else None\n validto = c.getValidTo() if validfrom else None\n if not validfrom or not validto:\n continue\n validfrom = validfrom.asdatetime().date()\n validto = validto.asdatetime().date()\n if (today >= validfrom and today <= validto):\n certs.append(c)\n return certs", "def old_cells_die(celllist, tnow):\n survivors = [cell for cell in celllist\n if tnow - cell.birthtime <= cf.tlifeN]\n return survivors", "def get_non_current_conferences_for_series(self, series,\n current_conference=None):\n #print 'get_non_current_conferences_for_series()'\n #print ' series: %s' % series\n #print ' current_conference: %s' % current_conference\n if current_conference is None:\n current_conference = self.get_current_conference_for_series(series)\n #print ' current_conference: %s' % current_conference\n # Get the other conferences in the series\n return Resource.objects.filter(conference_series=series).\\\n exclude(id=current_conference.id).all()", "def ready_players(self):\n return self.players.filter_by(sitting_out=False).join(players_active).all()", "def get_not_contacted(self, seller_id):\n return self.contactcampaignstatus_set.filter(seller_id=seller_id, status__in=[1, 3])", "def get_popular_tickets_solution(tickets):\n popular_tickets = []\n for ticket in tickets:\n num_watchers = len(ticket['people']['watchers'])\n if num_watchers >= 8:\n popular_tickets.append(ticket)\n return popular_tickets", "def newly_off_waitlist_rsvps(self, old_admitted):\n new_admitted = set(self.admitted_set())\n return new_admitted - old_admitted", "def non_current_championships():\n current_championships = (Alfa_Romeo+Ferrari+Haas+McLaren+Mercedes+Racing_Point+Red_Bull+Renault+Toro_Rosso+Williams).constructors_championships_years\n non_current_championships = []\n year = 1958\n while year < 2020:\n if year not in current_championships:\n non_current_championships.append(year)\n year += 1\n return f\"The F1 Constructors' Championships won by teams no longer on the grid are: \\n{non_current_championships}\"", "def _computesuspendedset(repo):\n suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))\n return set(r for r in getrevs(repo, 'obsolete') if r in suspended)", "def top30_clients(self):\n clients = self.clients_sorted_by_rentals()\n return clients[:int(0.3 * len(clients))]", "def get_remaining_events(index_disappeared,to_destroy):\n index_cp = index_disappeared[:]\n for i,deb,fin in to_destroy:\n index_cp = [(x,y,z) for x,y,z in index_cp if (x!=deb and x!=fin)]\n return index_cp", "def get_pending_instances(self):\n return [instance for instance in self.instances.itervalues()\n if InstanceState.REQUESTED <= instance.state < InstanceState.RUNNING]", "def get_available_rental_instruments(self) -> list:\n self.cursor.execute(\"\"\"\n SELECT DISTINCT name, brand, monthly_cost, ri_id AS id\n FROM rental_instrument AS ri\n WHERE NOT EXISTS\n (SELECT 1 FROM rental AS r\n WHERE ri.ri_id = r.ri_id \n AND CURRENT_DATE < end_date\n AND terminated IS NULL)\n \"\"\")\n self.db.commit()\n return self._cursor_result()", "def expired(self, *args, **kwargs):\n # Expired messages are those that have been delivered, AND have a\n # set `expire_on` attribute.\n #\n # OR, those messages that were never delivered, but are no longer\n # relevant (ie. they're too old).\n now = timezone.now()\n since = timezone.now() - timedelta(days=3) # 3 days ago\n\n return self.get_queryset().filter(\n Q(expire_on__lte=now) |\n Q(deliver_on__lte=since, success=None) |\n Q(deliver_on__lte=since, success=False)\n )", "def get_all_game_players_but_indicated(self, user):\n return GamePlayer.objects.filter(Q(game=self) & ~Q(player=user))", "def all_inactive(self, now):\n for timestamps in self._timestamps.itervalues():\n timestamps.last_active = min(now - CANDIDATE_INACTIVE, timestamps.last_active)", "def get_allowed_vos():\n return get_vos(filter_by_existing_users(filter_out_bans(read_mapfiles(), read_banfile())))", "def recent(self):\n return self.filter(\n start_date__lte=self.current().end_date + timezone.timedelta(days=1),\n end_date__gte=self.current().start_date - timezone.timedelta(days=1),\n )", "def limited_infection(self, user, target_num, version):\n num_infected = 0\n\n # Checks to make sure we don't have a target_num greater than number of uninfected users\n uninfected = [u for u in self.users if u.version != version]\n if target_num > len(uninfected):\n self.infect_all(version)\n\n while num_infected < target_num:\n # Infect the given user first\n if user.version != version:\n user.version = version\n num_infected += 1\n\n # Infect the uninfected students of user\n uninfected_students = [s for s in user.students if s.version != version]\n for student in uninfected_students:\n student.version = version\n num_infected += 1\n\n # Find the next user to infect\n user = self.find_best_user(target_num - num_infected, version)\n\n # If there is no best user we have infected everyone\n if user is None:\n break", "def get_excluded_dates(self):\n raise NotImplementedError", "def get_excluded_dates(self):\n raise NotImplementedError", "def get_recent_contacts(user, limit=5, timespan_days=14) -> typing.List[Contact]:\n timespan_recent = datetime.now().astimezone() - timedelta(days=timespan_days)\n contacts_recent = (\n Contact.objects.filter(interactions__was_at__gt=timespan_recent)\n .filter(user=user)\n .annotate(count=Count(\"interactions\"))\n .order_by(\"-count\")[:limit]\n )\n return list(contacts_recent)", "def list_inactive_users(datab, eng, timeperiod):\n now = datetime.\\\n datetime.now()\n time = period_check(now, timeperiod)\n active_users_for_timeperiod = []\n users = []\n group_association = {}\n user, session, group_assoc, group = datab.galaxy_user, datab.galaxy_session, datab.user_group_association, \\\n datab.galaxy_group\n sele = select([user.email])\\\n .where(user.deleted == False)\\\n .where(user.id == session.user_id)\\\n .where(session.update_time > time)\\\n .group_by(user.email)\n with eng.connect() as conn:\n result = conn.execute(sele)\n for row in result:\n active_users_for_timeperiod.append(row[0])\n sele = select([user.email])\\\n .where(user.deleted == False)\n with eng.connect() as conn:\n result = conn.execute(sele)\n for row in result:\n users.append(row[0])\n sele = select([group.name, user.email])\\\n .where(group.id == group_assoc.group_id)\\\n .where(user.id == group_assoc.user_id)\n with eng.connect() as conn:\n result = conn.execute(sele)\n for row in result:\n if row[0] in group_association:\n group_association[row[0]].append(row[1])\n else:\n group_association[row[0]] = [row[1]]\n\n all_inactive_users = set(users) - set(active_users_for_timeperiod)\n external_inactive_users = []\n for user in all_inactive_users:\n if user not in group_association[\"pasteur_users\"]:\n external_inactive_users.append(user)\n message = \"Total users: {0}\\n\" \\\n \"Actives users in the last {1} month(s): {7}\\n {3} \\n\" \\\n \"Inactives users in the last {1} month(s): {2}\\n{4}\\n\" \\\n \"Not Pasteur and inactives users in the last {1} month(s): {5}\\n{6}\\n\".format(len(users), timeperiod,\n len(all_inactive_users), \" \".join(active_users_for_timeperiod),\n\n \" \".join(all_inactive_users),\n len(external_inactive_users),\n \" \".join(external_inactive_users), len(active_users_for_timeperiod))\n sendmail(ADMINS_EMAILS, message, \"Galaxy Pasteur inactive users Report\")\n\n return", "def _get_instances_pending_events(self):\n\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n stats = conn.get_all_instance_status()\n next_token = stats.next_token\n while next_token != None:\n next_stats = conn.get_all_instance_status(next_token=next_token)\n stats.extend(next_stats)\n next_token = next_stats.next_token\n ret = []\n for stat in stats:\n if stat.events:\n for event in stat.events:\n if re.match('^\\[Completed\\]', event.description):\n continue\n ret.append([stat.id, event.code, event.not_before])\n if len(ret) > 0:\n instances = get_instances([stat[0] for stat in ret])\n for stat in ret:\n stat.insert(1, instances[stat[0]])\n return ret", "def teammates(self):\n return [\n p for p in self.roster.participants\n if p.participant_id != self.participant_id\n ]", "def EndCriteria(Vote):\n if (time.time() - Vote['VoteInfo']['timeout']) < Vote['VoteInfo']['StartTime']:\n return True\n if Vote['TotalVotes'] == len(livingPlayers)-2:\n return True", "def get_unhealthy_instances(self):\n unhealthy = []\n for instance in self.instances.itervalues():\n if instance.state == InstanceState.RUNNING_FAILED:\n unhealthy.append(instance)\n continue # health report from epuagent (or absence of it) is irrelevant\n\n if instance.health not in _HEALTHY_STATES:\n\n # only allow the zombie state for instances that are\n # terminated\n if (instance.state < InstanceState.TERMINATED or\n instance.health == InstanceHealthState.ZOMBIE):\n unhealthy.append(instance)\n\n return unhealthy", "def get_last_matches(self):\n if not isinstance(self.__root, NegationNode):\n return []\n # this is the node that contains the pending matches\n first_unbounded_negative_node = self.__root.get_first_unbounded_negative_node()\n if first_unbounded_negative_node is None:\n return []\n first_unbounded_negative_node.flush_pending_matches()\n # the pending matches were released and have hopefully reached the root\n return self.get_matches()", "def active_comics():\n # FUTURE: Should not include ended comics?\n return Comic.objects.exclude(active=False)", "def members_voted(self):\r\n return MembersVoted(self)", "def in_past(self):\n return self.election_date < datetime.date.today()", "def get_queryset(self):\n return Investor.objects.order_by('-kyc_date')[:5]", "def course_course_recommendations(self, course_id: str, max_recs: int = 10) -> np.ndarray:\n users = np.array(self.leads_user_item_matrix[self.leads_user_item_matrix.loc[:, course_id] == 1].index)\n recs = np.array([])\n\n for user_id in users:\n user_courses = self.requested_courses(user_id)\n\n new_recs = user_courses[user_courses != course_id]\n recs = np.unique(np.concatenate([new_recs, recs], axis=0))\n\n if len(recs) > max_recs:\n break\n\n return recs[:max_recs]", "def upcoming(self):\n now = timezone.now()\n # construct a datetime based on now but with zero hour/minute/second\n today = datetime(\n now.year, now.month, now.day, tzinfo=timezone.get_default_timezone()\n )\n return self.filter(end_time__gte=today).order_by_start()", "def get_upcoming_sessions(self):\n return [session for session in self.sessions if not session.is_complete()]", "def fetch_elections(self):\n payload = {\"key\": self._api_key} \n response = requests.get(self._url, params=payload)\n try: \n response.raise_for_status() \n return response.json()\n except requests.exceptions.HTTPError as error:\n # Error in request \n logging.error(error)\n except requests.exceptions.RequestException as error:\n # Catastrophic error \n logging.error(error)\n raise", "def _get_closer_drivers(self, passenger):\n logging.info(\"[_get_closer_drivers] Busca los choferes cercanos.\")\n nearest = []\n for driver in db.drivers.find({'available': True}):\n if self._calculate_distance(passenger, driver) < self.max_distance:\n user_data = self._get_data_user(driver['_id'])\n user_data['user'].pop('cars')\n user_data['user'].pop('_ref')\n nearest.append({'driver': user_data['user'], 'position': {'lat': driver['lat'], 'lon': driver['lon']}})\n logging.info(\"[_get_closer_drivers] Se encontraron \" + str(len(nearest)) + \" choferes cercanos.\")\n return nearest", "def get_all_non_motif_assoc_cur_site_insts():\n return get_all_cur_site_insts().filter(site_type=\"non_motif_associated\")", "def get_active(self):\n return self.get_challenges().filter(status='A')", "def test_list_inactive_users(self):\r\n # for now just make sure we can get a 200 call on it.\r\n params = {\r\n 'api_key': self.api_key\r\n }\r\n res = self.testapp.get('/api/v1/a/accounts/inactive',\r\n params=params,\r\n status=200)\r\n # by default we shouldn't have any inactive users\r\n data = json.loads(res.body)\r\n users = [u for u in data['users']]\r\n for u in users:\r\n self.assertEqual(0, u['invite_ct'], \"Count should be 0 to start.\")", "def _get_requests(self, since=None):\n if since is None:\n return self._finished_requests\n # Find the first element newer than 'since' using bisect\n left, right = 0, len(self._finished_requests)\n while left < right:\n middle = (left + right) // 2\n if since <= self._finished_requests[middle].end_time:\n right = middle\n else:\n left = middle + 1\n result = self._finished_requests[left:]\n return result", "def active(self) -> models.QuerySet[PersistentMessage]:\n start_date_filter = models.Q(display_from__lte=tz_now())\n end_date_filter = models.Q(display_until__gte=tz_now()) | models.Q(\n display_until__isnull=True\n )\n return self.filter(start_date_filter).filter(end_date_filter)", "def getNotMyCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() != 1):\n r.append(p)\n return r", "def getConfirmedVolunteers(self, query):\n query = Volunteer.query(Volunteer.confirmed == True)\n return query", "def _get_remaining(self):\n remaining = []\n for game_info in self.steam_keys:\n if game_info[1] not in self.steam_keys_given:\n remaining.append(game_info[0])\n return remaining", "def get_invited_polls(self):\n\n invited_polls = []\n for poll_user in PollUser.objects.filter(user=self):\n invited_polls.append(poll_user.poll)\n\n return invited_polls", "def player_has_active_games(self, player):\n return self.filter(active=True, finished=False, player=player)", "def get_all_nodes_without_provider(self):\n\n no_provider_nodes = []\n # create list of all nodes without provider and more than tier1_threshold customers\n for node in self.nodes():\n tier1 = True\n\n # check that node is not a customer of any node\n if not self.has_providers(node):\n no_provider_nodes.append(node)\n\n return no_provider_nodes", "def all_inactive(self, now):\n for timestamps in self._timestamps.itervalues():\n timestamps.last_active = min(now - CANDIDATE_INACTIVE, timestamps.last_active)\n timestamps.last_walk = min(now - CANDIDATE_WALK_LIFETIME, timestamps.last_walk)\n timestamps.last_stumble = min(now - CANDIDATE_STUMBLE_LIFETIME, timestamps.last_stumble)", "def _filter_committees_failing_weak_representation(self, profile: list[set[int]], committees: list[list[int]]) -> list[list[int]]:\n unique_approval_scores = self._compute_unique_approval_scores(profile)\n parties_deserving_representation = {party for party in self.parties if unique_approval_scores[party] >= self.n / self.k}\n possible_committees = [committee for committee in committees if parties_deserving_representation.issubset(set(committee))]\n return possible_committees", "def vwho():\n cleanup()\n return {'available': userlist(), 'eta': data['etas'], 'etd': data['etds'], 'vavailable': vavailable(), 'veta': data['vetas'], 'lastlocation': data['lastlocation'], 'ceitloch': ceitloch(), 'reminder': data['reminder']}", "def long_waiters_die(celllist, tnow):\n survivors = []\n for sublist in celllist:\n newsub = []\n for cell in sublist:\n if tnow - cell.GCentrytime <= cf.tlifeGC:\n newsub.append(cell)\n survivors.append(newsub)\n return survivors", "def get_past_reminders(self, now=None):\n now = now or datetime.datetime.now()\n store = self.load_data(default=[])\n return [\n reminder_info\n for reminder_info in store\n if reminder_info['datetime'] < now\n ]", "def get_last_conversations(self):\n email_token = auth.current_user()[0]\n user_data, last_messages = self.friend_database.get_conversations(email_token)\n last_messages = [{k:v for k,v in m._asdict().items() if k != \"hidden_to\"} for m in last_messages]\n for i in range(len(last_messages)):\n last_messages[i][\"timestamp\"] = last_messages[i][\"timestamp\"].isoformat()\n response = []\n for i in range(len(last_messages)):\n response.append({\"user\": user_data[i], \"last_message\": last_messages[i]})\n return json.dumps(response), 200" ]
[ "0.6028861", "0.5758767", "0.5515115", "0.54826623", "0.5425473", "0.5403842", "0.53461355", "0.5314975", "0.5251976", "0.5220554", "0.5220554", "0.5193499", "0.51809436", "0.51725066", "0.517217", "0.5158285", "0.51561767", "0.5152371", "0.51470834", "0.51421833", "0.51222056", "0.51121193", "0.5101381", "0.5098493", "0.5082552", "0.5066987", "0.50647473", "0.5043818", "0.5035207", "0.5031032", "0.5029981", "0.5010582", "0.5000646", "0.49901694", "0.4985118", "0.49764192", "0.49731606", "0.4969864", "0.49687278", "0.4949284", "0.49486518", "0.4943488", "0.49307257", "0.49203226", "0.4914278", "0.49070996", "0.49046242", "0.490335", "0.49011862", "0.48975387", "0.48787922", "0.48784575", "0.48642683", "0.48535213", "0.48455307", "0.4841221", "0.48377237", "0.48302302", "0.48211837", "0.48173496", "0.48147202", "0.4810684", "0.4803741", "0.48021927", "0.4799807", "0.4797727", "0.4797727", "0.4797154", "0.4791008", "0.478999", "0.47885814", "0.47861394", "0.47794625", "0.4778246", "0.47782037", "0.47705486", "0.47627735", "0.4757903", "0.47528982", "0.4752766", "0.47519615", "0.47500658", "0.47487032", "0.47450438", "0.4742235", "0.47417995", "0.47310883", "0.47289068", "0.47246873", "0.47231066", "0.47185054", "0.47184774", "0.4706067", "0.47053385", "0.47049946", "0.47037756", "0.47035813", "0.47019583", "0.46901447", "0.4682741" ]
0.65486276
0
Return active constituent voters with valid phone contact information who have not been contacted since the last election. Don't limit the size of the result set here; let APIs do that.
def getVotersToDial(self): return self.getVotersToContact().exclude( (Q(phone_number1='') | Q(wrong_phone_number1__gt=1)), (Q(phone_number2='') | Q(wrong_phone_number2__gt=1)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getVotersToContact(self):\n two_days_ago = date.today() - timedelta(2)\n year_ago = date.today() - timedelta(365)\n return self.voters.filter(\n Q(campaignstovoters__last_served=None) | Q(campaignstovoters__last_served__lt=two_days_ago),\n Q(campaignstovoters__last_contacted=None) | Q(campaignstovoters__last_contacted__lt=year_ago),\n campaignstovoters__is_active=True,\n is_active=True)", "def get_all_active_members(debug, contactsUrl):\n\n valid_date = str(datetime.date.today() - datetime.timedelta(days=7)) # 7 days ago in yyyy-mm-dd format\n\n #params = {'$filter': 'member eq true AND Status eq Active',\n # '$async': 'false'}\n params = {'$filter': \"member eq true AND ( Status eq Active OR ( Status eq PendingRenewal AND 'Renewal due' ge \" + valid_date + \"))\",\n '$async': 'false'}\n request_url = contactsUrl + '?' + urllib.parse.urlencode(params)\n if debug: print('Making api call to get contacts')\n return api.execute_request(request_url).Contacts", "def to_exclude(self):\n midnight = now().replace(hour=0, minute=0, microsecond=0)\n return CenterOpen.objects.filter(\n creation_date__gte=midnight,\n ).values_list('phone_number', flat=True)", "def get_recent_contacts(user, limit=5, timespan_days=14) -> typing.List[Contact]:\n timespan_recent = datetime.now().astimezone() - timedelta(days=timespan_days)\n contacts_recent = (\n Contact.objects.filter(interactions__was_at__gt=timespan_recent)\n .filter(user=user)\n .annotate(count=Count(\"interactions\"))\n .order_by(\"-count\")[:limit]\n )\n return list(contacts_recent)", "def get_active_contact(self):\n list_contact = Contact.objects.filter(phonebook__campaign=self.id,\n status=CONTACT_STATUS.ACTIVE).all()\n if not list_contact:\n return False\n return list_contact", "def get_not_contacted(self, seller_id):\n return self.contactcampaignstatus_set.filter(seller_id=seller_id, status__in=[1, 3])", "def get_last_conversations(self):\n email_token = auth.current_user()[0]\n user_data, last_messages = self.friend_database.get_conversations(email_token)\n last_messages = [{k:v for k,v in m._asdict().items() if k != \"hidden_to\"} for m in last_messages]\n for i in range(len(last_messages)):\n last_messages[i][\"timestamp\"] = last_messages[i][\"timestamp\"].isoformat()\n response = []\n for i in range(len(last_messages)):\n response.append({\"user\": user_data[i], \"last_message\": last_messages[i]})\n return json.dumps(response), 200", "def imps_by_me(self):\n return self.caller.roster.accounthistory_set.last().initiated_contacts.all()", "def get_frequent_contacts(user, limit=5) -> typing.List[Contact]:\n contacts_frequent = (\n Contact.objects.filter(user=user)\n .annotate(count=Count(\"interactions\"))\n .order_by(\"-count\")[:limit]\n )\n return list(contacts_frequent)", "def previous_imps_by_me(self):\n return FirstContact.objects.filter(\n from_account__in=self.caller.roster.previous_history\n )", "def get_all_candidates(self) -> list:", "def get_allowed_vos():\n return get_vos(filter_by_existing_users(filter_out_bans(read_mapfiles(), read_banfile())))", "def getValidCertifications(self):\n certs = []\n today = date.today()\n for c in self.getCertifications():\n validfrom = c.getValidFrom() if c else None\n validto = c.getValidTo() if validfrom else None\n if not validfrom or not validto:\n continue\n validfrom = validfrom.asdatetime().date()\n validto = validto.asdatetime().date()\n if (today >= validfrom and today <= validto):\n certs.append(c)\n return certs", "def active_comics():\n # FUTURE: Should not include ended comics?\n return Comic.objects.exclude(active=False)", "def get_explicit_community_match(self) -> list:\n return self.matching", "def _inactiveplayers():\n\n rosters = _activerosters()\n dbrosters = _eidset() # players not in rosters scrape but in db.\n notactive = dbrosters.difference(rosters)\n return notactive", "def get_candidates(data):\n return data.groups[\"Candidates\"].objects", "def get_active(self):\n return self.get_challenges().filter(status='A')", "def get_clients_to_be_reactivated(file=\"db.json\") -> List[Client]:\n with TinyDB(file) as db:\n query = Query()\n result = db.search(query[\"rem date\"].test(contact_now))\n output = []\n for client in result:\n output.append(Client(client[\"first name\"], client[\"last name\"],\n client[\"last visit\"], client[\"rem date\"],\n client[\"email\"]\n ))\n return output", "def candidates(self):\n return self.related_users.all()", "def candidates_all():\n return jsonify(candidate.get_candidates())", "def get_recipients(self):\n return [\n self.obj.activity.owner\n ] + [\n slot_participant.participant.user for slot_participant\n in self.obj.slot_participants.all()\n if (\n slot_participant.status == 'registered' and\n slot_participant.participant.status == 'accepted'\n )\n ]", "def teammates(self):\n return [\n p for p in self.roster.participants\n if p.participant_id != self.participant_id\n ]", "def ldap_get_live_onfloor():\n members = []\n onfloor = _ldap_get_group_members('onfloor')\n for member in onfloor:\n if ldap_get_roomnumber(member) and not ldap_is_eboard(member):\n members.append(member)\n\n return members", "def customers_presence(self):\n return self._customers_presence", "def get_candidates(self, cloud_name, jobs, count, return_only_all_idle=False):\n\n asg_info = self.phantom_client.get_autoscale_groups_info(self.phantom_client.asg.name)\n all_instances_info = asg_info[self.phantom_client.asg.name]['instances']\n instances = self.phantom_client.get_alive_instnaces(all_instances_info)\n\n localjobs = copy.copy(jobs)\n\n idle_list = []\n nonidle_list = []\n for instance in instances:\n if instances[instance]['cloud_name'] != cloud_name:\n continue\n job_matching_found = False\n for job in localjobs.list:\n if instances[instance]['public_dns'] == job.node:\n #nonidle_list.append( (instance, job.running, instances[instance]) )\n nonidle_list.append( (instance, job.progress, instances[instance]) )\n\n localjobs.list.remove(job)\n job_matching_found = True\n break\n if not job_matching_found:\n idle_list.append( (instance, instances[instance]) )\n\n # Truncate idle list if needed (in case there are more idle instances than count)\n # Does not do anything if count >= len(idle_list)\n\n if return_only_all_idle:\n # DONE if this flag is set\n return idle_list\n\n idle_list = idle_list[:count]\n\n if idle_list:\n idle_list_str = \"\"\n for instance in idle_list:\n idle_list_str += \"%s:%s,\" % (instance[0], instance[1]['public_dns'])\n LOG.info(\"OO found idle candidates for termination in %s: %s\" % (cloud_name, idle_list_str))\n\n # Sort by the run time in the decreasing order\n sorted_nonidle_list = sorted(nonidle_list, key=operator.itemgetter(1), reverse=True)\n\n remaining_count = count - len(idle_list)\n # Truncate sorted non-idle list if needed (in case remaining_count < len(sorted_nonidle_list))\n sorted_nonidle_list = sorted_nonidle_list[:remaining_count]\n\n sorted_nonidle_list_instances_only = []\n if sorted_nonidle_list:\n nonidle_list_str = \"\"\n for atuple in sorted_nonidle_list:\n nonidle_list_str += \"%s:%s:%s,\" % (atuple[0], atuple[2]['public_dns'], atuple[1])\n sorted_nonidle_list_instances_only.append((atuple[0], atuple[2] ))\n LOG.info(\"OO found non-idle candidates for termination in %s: %s\" % (cloud_name, nonidle_list_str))\n\n total_found = len(idle_list)+len(sorted_nonidle_list_instances_only)\n if not total_found == count:\n LOG.info(\"OO can't supply enough (%d) instances for termination. Found only %d\", count, total_found)\n\n return idle_list, sorted_nonidle_list_instances_only", "def retrieve_exact_commutes(self):\n for destination in self.tenants:\n try:\n results = retrieve_exact_commute_rent_algorithm(self.homes[:NUMBER_OF_EXACT_COMMUTES_COMPUTED],\n destination,\n destination.commute_type,\n with_traffic=destination.traffic_option)\n\n # Store the results to the homes\n for i in range(len(results)):\n duration_seconds = results[i][0][0]\n distance_meters = results[i][0][1]\n if duration_seconds is not None and distance_meters is not None:\n self.homes[i].exact_commute_times[destination] = int(duration_seconds / 60)\n\n except Distance_Matrix_Exception as e:\n print(\"Caught: \" + e.__class__.__name__)", "def clients_with_team_access(self):\n from lastuser_core.models.client import CLIENT_TEAM_ACCESS\n return [cta.client for cta in self.client_team_access if cta.access_level == CLIENT_TEAM_ACCESS.ALL]", "def test_special_contacts(self):\n\n vcards = []\n\n # Generate a contact with no email address\n current_contact = bt_contacts_utils.VCard()\n current_contact.first_name = \"Mr.\"\n current_contact.last_name = \"Smiley\"\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a 2nd contact with the same name but different phone number\n current_contact = bt_contacts_utils.VCard()\n current_contact.first_name = \"Mr.\"\n current_contact.last_name = \"Smiley\"\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a contact with no name\n current_contact = bt_contacts_utils.VCard()\n current_contact.email = \"{}@gmail.com\".format(\n bt_contacts_utils.generate_random_string())\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a contact with random characters in its name\n current_contact = bt_contacts_utils.VCard()\n current_contact.first_name = bt_contacts_utils.generate_random_string()\n current_contact.last_name = bt_contacts_utils.generate_random_string()\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a contact with only a phone number\n current_contact = bt_contacts_utils.VCard()\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n # Generate a 2nd contact with only a phone number\n current_contact = bt_contacts_utils.VCard()\n current_contact.add_phone_number(\n bt_contacts_utils.generate_random_phone_number())\n vcards.append(current_contact)\n\n bt_contacts_utils.create_new_contacts_vcf_from_vcards(\n self.contacts_destination_path, PSE_CONTACTS_FILE, vcards)\n\n phone_numbers_added = bt_contacts_utils.import_device_contacts_from_vcf(\n self.pse, self.contacts_destination_path, PSE_CONTACTS_FILE)\n\n return self.connect_and_verify(phone_numbers_added)", "def list_valid(self):\n contacts = AccountHistory.objects.claimed_impressions(self.caller.roster)\n if \"list\" in self.switches:\n if \"previous\" in self.switches:\n contacts = AccountHistory.objects.filter(\n contacted_by__in=self.caller.roster.previous_history\n )\n self.msg(\n \"{wCharacters you have written first impressions of:{n %s\"\n % \", \".join(str(ob.entry) for ob in contacts)\n )\n return\n qs = AccountHistory.objects.unclaimed_impressions(self.caller.roster)\n if \"outstanding\" in self.switches:\n impressions = self.imps_of_me.filter(private=False, from_account__in=qs)\n authors_and_imps = [\n '{c%s{n: \"%s\"' % (ob.writer, ob.summary) for ob in impressions\n ]\n self.msg(\n \"First Impressions you have not yet reciprocated: \\n%s\"\n % \"\\n\".join(authors_and_imps)\n )\n return\n location = \"\"\n if \"here\" in self.switches:\n location = \"at your location \"\n qs = qs.filter(entry__character__db_location=self.caller.location)\n # filter out masked people\n qs = [\n ob\n for ob in qs\n if ob.entry.player.username.capitalize() == str(ob.entry.character)\n ]\n players = sorted(\n set(ob.entry.player for ob in qs), key=lambda x: x.username.capitalize()\n )\n self.msg(\n \"{wPlayers %syou haven't written a first impression for yet:{n %s\"\n % (location, \", \".join(str(ob) for ob in players))\n )", "def get_recent_matches(self, limit=None):\n return (Match.objects\n .filter(company=self)\n .order_by('-played_time')[:limit or MATCH_RESULT_LIMIT]\n )", "def get_contacts(self):\n feet = [\"REAR_RIGHT_FOOT\", \"REAR_LEFT_FOOT\",\n \"FRONT_RIGHT_FOOT\", \"FRONT_LEFT_FOOT\"]\n contacts = np.zeros(4, dtype=np.float32)\n for i, foot in enumerate(feet):\n if self.supervisor.getFromDef(foot).getNumberOfContactPoints() > 0:\n contacts[i] = 1.0\n return contacts", "def get_due_contacts(user) -> typing.List[Contact]:\n contacts = (\n Contact.objects.filter(user=user)\n .order_by(\"name\")\n .prefetch_related(\"interactions\")\n .all()\n )\n contacts = filter(lambda c: c.get_urgency() > 0, contacts)\n contacts = sorted(contacts, key=lambda c: c.get_urgency(), reverse=True)\n return list(contacts)", "def find_users_missing_standup():\n token = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED_SLACK_TOKEN))['Plaintext']\n sc = SlackClient(token)\n channels = sc.api_call('channels.list')['channels']\n standup = (i for i in channels if i['name'] == SLACK_CHANNEL).next()\n members = standup['members']\n messages = sc.api_call('channels.history', channel=standup['id'])['messages']\n messages_within_last_10_hours = filter(check_in_date_range, messages) \n users_posted = (i['user'] for i in messages_within_last_10_hours if\n 'user' in i.keys())\n difference = set(members).difference(users_posted)\n return difference", "def get_all_elections(self) -> list:", "def list_contacts(self):\n return self.contacts", "def get_queryset(self):\n unitlist = get_units_visible_to_user(self.request.user)\n\n return Candidate.objects.filter(\n appointments__committee__unit__in=unitlist,\n )", "def get_victors(self):\n if self.is_game_over():\n scores = [p.get_score() for p in self.state.get_players()]\n if len(scores) == 0:\n return []\n max_score = max(scores)\n victors = []\n for p in self.state.get_players():\n if p.get_color() not in self.violators and p.get_score() == max_score:\n victors.append(self.players[p.get_color()])\n return victors\n else:\n return None", "def get_contacts(self):\n\n\t\treturn self.__contacts", "def get_recipients(self):\n return [\n slot_participant.participant.user for slot_participant\n in self.obj.slot_participants.all()\n if (\n slot_participant.status == 'registered' and\n slot_participant.participant.status == 'accepted'\n )\n ]", "def online_users(room):\n threshold = datetime.now() - timedelta(seconds=10)\n authorizations = models.Authorization.gql(\"WHERE room = :room AND last_checked_in >= :threshold\", room=room, threshold=threshold).fetch(1000)\n return [x.user for x in authorizations]", "def get_partner_requests(request):\n try:\n partner_requests = PartnerRequest.objects.filter(to_user=request.user)\n except:\n partner_requests = []\n\n return partner_requests", "def queue_people(_):\n today = datetime.now()\n today_date = datetime(today.year, today.month, today.day)\n not_checked_today = Tracker.select().where(\n (\n (Tracker.friends_last_retrieved.is_null())\n | (Tracker.friends_last_retrieved < today_date)\n )\n & (Tracker.participant == True)\n )\n return not_checked_today.count() > 0", "def get_cached_contacts(self):\n return list(self._replacement_cache)", "def remove_contact(self, date_limit):\n for provider in ServiceProvider.objects.filter(end_at__lt=date_limit, history=None):\n # Check for history versions\n for sp in ServiceProvider.objects.filter(history=provider.pk):\n for contact in Contact.objects.filter(sp=sp):\n self.output(\n sp.entity_id + \": Removing contact (history): \" + contact.firstname + \" \" + contact.lastname\n )\n if not self.list_only:\n contact.delete()\n for contact in Contact.objects.filter(sp=provider):\n self.output(provider.entity_id + \": Removing contact: \" + contact.firstname + \" \" + contact.lastname)\n if not self.list_only:\n contact.delete()", "def get_voters():", "def get_voters():", "def _online(self):\n user = self._user_obj\n not_invited = user.friends.personally_invited(status=False)\n online = not_invited.filter(fb_uid__in=online_friends(self.request))\n selection = online.order_by('?')[:4]\n return {\n 'friends': selection,\n 'title': 'Will your friends vote?',\n 'description': (\n \"It's in your hands. Here are four friends who are likely \"\n \"voters. They're online right now, and some of them may be \"\n \"able to vote early. Can you get them to vote?\"\n ),\n 'attributes': {\n 'online': True,\n 'likely_voter': True,\n 'early_voting': True,\n }\n }", "def getUnconfirmedVolunteers(self, query):\n query = Volunteer.query(Volunteer.confirmed == False)\n return query", "def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts", "def fetch_elections(self):\n payload = {\"key\": self._api_key} \n response = requests.get(self._url, params=payload)\n try: \n response.raise_for_status() \n return response.json()\n except requests.exceptions.HTTPError as error:\n # Error in request \n logging.error(error)\n except requests.exceptions.RequestException as error:\n # Catastrophic error \n logging.error(error)\n raise", "def end_effector_contacts(self, physics):\n return self.collect_contacts(physics, self._end_effector_geom_ids)", "def find_inactive_devices(db, cutoff=IDLE_CUTOFF):\n app_log.info(f\"Checking for devices inactive since {cutoff}\")\n cur = db.execute(r\"{CALL getLastActivityBefore(?)}\", (cutoff,))\n return cur.fetchall()", "def valid_choices(self):\n last_week = datetime.now() - timedelta(days=self.NUM_DAYS)\n return Character.objects.filter(\n Q(roster__roster__name=\"Active\")\n & ~Q(roster__current_account=self.caller.roster.current_account)\n & Q(roster__player__last_login__isnull=False)\n & Q(\n Q(roster__player__last_login__gte=last_week)\n | Q(roster__player__db_is_connected=True)\n )\n & Q(roster__player__is_staff=False)\n & ~Q(roster__player__db_tags__db_key=\"staff_npc\")\n ).distinct()", "def get_previous_partial_payments(self, validated_data):\n payments = Pago.objects.all()\n property = payments.filter(codigoInmueble=validated_data['codigoInmueble'])\n partial_payments = property.filter(valorPagado__lt=1_000_000)\n return partial_payments", "def incoming_phone_numbers(self):\r\n return numbers.IncomingPhoneNumbers(self)", "def get_contacts_list(self):\n contacts = self.driver.find_elements_by_class_name(\"_1wjpf\")\n s= [contact.text for contact in contacts] #extracts chats and last messsages\n print (\"get contacts: \"+str(s)) #print only chat names\n return s[::2] #returns only chat names", "def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts", "def voterContactCount(self, user):\n return self.votercontact_set.filter(user=user).count()", "def get_available_invitees(self):\n return User.objects.exclude(pk=self.request.user.pk)", "def active(self) -> models.QuerySet[PersistentMessage]:\n start_date_filter = models.Q(display_from__lte=tz_now())\n end_date_filter = models.Q(display_until__gte=tz_now()) | models.Q(\n display_until__isnull=True\n )\n return self.filter(start_date_filter).filter(end_date_filter)", "def get_active_deposits():\n skip = 0\n graphql_client = GraphQLClient('https://api.thegraph.com/subgraphs/name/miracle2k/all-the-keeps')\n members = GqlQuery().fields(['address']).query('members').generate()\n bondedECDSAKeep = GqlQuery().fields(['totalBondAmount', members]).query('bondedECDSAKeep').generate()\n deposits_query = GqlQuery().fields(['id', 'lotSizeSatoshis', bondedECDSAKeep]).query('deposits', input={\n \"first: 1000 skip: $skip where\": \"{currentState: ACTIVE}\"}).operation('query', name='GetActiveDeposits',\n input={\"$skip\": \"Int!\"}).generate()\n\n params = {\"skip\": skip}\n result = jsonpickle.decode(graphql_client.execute(deposits_query, variables=params))[\"data\"][\"deposits\"]\n deposits = result\n while len(result) == 1000:\n params[\"skip\"] += 1000\n result = jsonpickle.decode(graphql_client.execute(deposits_query, variables=params))[\"data\"][\"deposits\"]\n deposits += result\n return deposits", "def _get_live_games(self):\n response = requests.get(self._get_score_url())\n if response.status_code == 200:\n return [g for g in response.json()['games'] if g['status']['state'] == self.desired_game_state]", "def candidates_retrieve_for_api(office_id, office_we_vote_id):\n # NOTE: Candidates retrieve is independent of *who* wants to see the data. Candidates retrieve never triggers\n # a ballot data lookup from Google Civic, like voterBallotItems does\n\n if not positive_value_exists(office_id) and not positive_value_exists(office_we_vote_id):\n status = 'VALID_OFFICE_ID_AND_OFFICE_WE_VOTE_ID_MISSING'\n json_data = {\n 'status': status,\n 'success': False,\n 'office_id': office_id,\n 'office_we_vote_id': office_we_vote_id,\n 'google_civic_election_id': 0,\n 'candidate_list': [],\n }\n return HttpResponse(json.dumps(json_data), content_type='application/json')\n\n candidate_list = []\n candidates_to_display = []\n google_civic_election_id = 0\n try:\n candidate_list_object = CandidateCampaignListManager()\n results = candidate_list_object.retrieve_all_candidates_for_office(office_id, office_we_vote_id)\n success = results['success']\n status = results['status']\n candidate_list = results['candidate_list']\n except Exception as e:\n status = 'FAILED candidates_retrieve. ' \\\n '{error} [type: {error_type}]'.format(error=e, error_type=type(e))\n handle_exception(e, logger=logger, exception_message=status)\n success = False\n\n if success:\n # Reset office_we_vote_id and office_id so we are sure that it matches what we pull from the database\n office_id = 0\n office_we_vote_id = ''\n for candidate in candidate_list:\n one_candidate = {\n 'id': candidate.id,\n 'we_vote_id': candidate.we_vote_id,\n 'ballot_item_display_name': candidate.display_candidate_name(),\n 'candidate_photo_url_large': candidate.we_vote_hosted_profile_image_url_large\n if positive_value_exists(candidate.we_vote_hosted_profile_image_url_large)\n else candidate.candidate_photo_url(),\n 'candidate_photo_url_medium': candidate.we_vote_hosted_profile_image_url_medium,\n 'candidate_photo_url_tiny': candidate.we_vote_hosted_profile_image_url_tiny,\n 'party': candidate.political_party_display(),\n 'order_on_ballot': candidate.order_on_ballot,\n 'kind_of_ballot_item': CANDIDATE,\n }\n candidates_to_display.append(one_candidate.copy())\n # Capture the office_we_vote_id and google_civic_election_id so we can return\n if not positive_value_exists(office_id) and candidate.contest_office_id:\n office_id = candidate.contest_office_id\n if not positive_value_exists(office_we_vote_id) and candidate.contest_office_we_vote_id:\n office_we_vote_id = candidate.contest_office_we_vote_id\n if not positive_value_exists(google_civic_election_id) and candidate.google_civic_election_id:\n google_civic_election_id = candidate.google_civic_election_id\n\n if len(candidates_to_display):\n status = 'CANDIDATES_RETRIEVED'\n else:\n status = 'NO_CANDIDATES_RETRIEVED'\n\n json_data = {\n 'status': status,\n 'success': True,\n 'office_id': office_id,\n 'office_we_vote_id': office_we_vote_id,\n 'google_civic_election_id': google_civic_election_id,\n 'candidate_list': candidates_to_display,\n }\n else:\n json_data = {\n 'status': status,\n 'success': False,\n 'office_id': office_id,\n 'office_we_vote_id': office_we_vote_id,\n 'google_civic_election_id': google_civic_election_id,\n 'candidate_list': [],\n }\n\n return HttpResponse(json.dumps(json_data), content_type='application/json')", "def Vcontacts(\n # Selectors\n leftSelector='', rightSelector='',\n # Left side positive filters\n chainLeftIn='',resiNumLeftIn='',resiNameLeftIn='',atomSerialLeftIn='',\n atomNameLeftIn='',\n # Left side negative filters\n chainLeftOut='',resiNumLeftOut='',resiNameLeftOut='', atomSerialLeftOut='',\n atomNameLeftOut='',\n # Right side positive filters\n chainRightIn='',resiNumRightIn='',resiNameRightIn='',atomSerialRightIn='',\n atomNameRightIn='',\n # Right side negative filters\n chainRightOut='',resiNumRightOut='',resiNameRightOut='',atomSerialRightOut='',\n atomNameRightOut='',\n # Contact Area\n contactAreaMin='',contactAreaMax='',\n # Minimal distance\n minimalDistanceMin='',minimalDistanceMax='',\n # Sequence separation\n seqSeparationMin='',seqSeparationMax='',\n # Misc.\n model='', solvent='False', color='white', invert='False', opacity='1',\n # Server connection\n host='127.0.0.1', port='8888',\n # Debug mode\n debug='False'\n ):\n\n # Logger level\n logging_level = logging.INFO if not Bool(debug) else logging.DEBUG\n\n # Init logger\n logging.basicConfig(format='%(levelname)s:%(message)s', level=logging_level)\n\n # Loggin error wrapper\n logging.parser_error = CallCounter(logging.error)\n\n # Get model from selectors\n sele_model = get_selectors_model(leftSelector, rightSelector)\n\n if sele_model:\n model = sele_model\n else:\n model = get_model(model)\n\n params = params_parser(solvent, color, invert, opacity)\n\n if logging.parser_error.counter != 0:\n return\n\n # Append atom serials\n atomSerialLeftIn = atomSerialLeftIn + get_serials(leftSelector)\n atomSerialRightIn = atomSerialRightIn + get_serials(rightSelector)\n\n # Compose query commands\n Vfilter = compose(\n # Left side positive filters\n chainLeftIn, resiNumLeftIn, resiNameLeftIn, atomSerialLeftIn,\n atomNameLeftIn,\n # Left side negative filters\n chainLeftOut, resiNumLeftOut, resiNameLeftOut, atomSerialLeftOut,\n atomNameLeftOut,\n # Right side positive filters\n chainRightIn, resiNumRightIn, resiNameRightIn, atomSerialRightIn,\n atomNameRightIn,\n # Right side negative filters\n chainRightOut, resiNumRightOut, resiNameRightOut, atomSerialRightOut,\n atomNameRightOut,\n # Contact Area\n contactAreaMin, contactAreaMax,\n # Minimal distance\n minimalDistanceMin, minimalDistanceMax,\n # Sequence separation\n seqSeparationMin, seqSeparationMax\n )\n\n\n query = json.dumps({\n 'filter': Vfilter,\n 'params': params\n })\n\n try:\n # Create TCP client obj\n client = TCPClient(host, port)\n # Start TCP client\n client.start()\n except Exception as e:\n logging.critical(e)\n logging.info('Server might not be running')\n return\n\n try:\n # Check if server has PDB file\n if not client.check_file(model):\n client.send_file(model)\n\n cgo_path = client.get_cgo(model, query)\n\n except socket.timeout as e:\n logging.error(\"Connection time out.\")\n return\n except Exception as e:\n logging.error(\"Server side error\")\n return\n\n del client\n\n # draw CGOs\n draw_CGO(cgo_path)\n\n return", "def get_people_invited(self, users):\n invited = []\n for user in users:\n if Room.verify_if_is_invited(user):\n invited.append(user)\n return invited", "def getCandidate(self):\n data = self.data.copy()\n for p in self.pruned:\n if p in data:\n data.remove(p)\n tmp = data.copy()\n for d in tmp:\n if d in data:\n pastart = [self.drange[1] if i+self.radius>self.drange[1] else i+self.radius for i in d.getLocationMax()]\n pamax = [self.drange[1] for j in range(self.dim)]\n pruned = (self.index.intersection(tuple(pastart+pamax),objects=True))\n for p in pruned:\n if p.object in data:\n data.remove(p.object)\n return data", "def on_call_email_addresses(self):\n if self._on_call_email_addresses is not None:\n return self._on_call_email_addresses\n\n url = 'https://{}.pagerduty.com/api/v1/users/on_call'.format(self.pager_duty_domain_prefix)\n on_call = self._make_request(url, headers={'Authorization': 'Token token=' + self.pager_duty_token})\n users = set() # users can be in multiple schedule, this will de-dupe\n\n for user in on_call['users']:\n for schedule in user['on_call']:\n if schedule['level'] <= self.escalation_level:\n users.add(user['email'])\n\n log.info('Found %d users on-call', len(users))\n self._on_call_email_addresses = users\n return users", "def contacts(self):\r\n return contacts.Contacts(self)", "def list_active_emails():\n db_customers = Customers.select().where(Customers.status)\n LOGGER.debug(\"Returning list of active customer emails\")\n email_list = [x.email_address for x in db_customers]\n LOGGER.info(\"Email list: %s\", email_list)\n return email_list", "def get_available_teams(self):\n teams = self.request.user.team_set.filter(competition__is_open=True)\n if not teams.exists():\n msg = \"Can't send invites at this time. You're not\"\n msg += \" registered for any open competitions\"\n messages.error(self.request, msg)\n raise Http404(msg)\n return teams", "def fetch_contact_messages(self, org, contact, created_after, created_before):\n pass", "def contact_list(self):\n return self._contact_list", "def candidates_retrieve_for_api(office_id=0, office_we_vote_id=''): # candidatesRetrieve\n # NOTE: Candidates retrieve is independent of *who* wants to see the data. Candidates retrieve never triggers\n # a ballot data lookup from Google Civic, like voterBallotItems does\n\n if not positive_value_exists(office_id) and not positive_value_exists(office_we_vote_id):\n status = 'VALID_OFFICE_ID_AND_OFFICE_WE_VOTE_ID_MISSING'\n json_data = {\n 'status': status,\n 'success': False,\n 'office_id': office_id,\n 'office_we_vote_id': office_we_vote_id,\n 'google_civic_election_id': 0,\n 'candidate_list': [],\n }\n return HttpResponse(json.dumps(json_data), content_type='application/json')\n\n candidate_object_list = []\n candidate_dict_list = []\n google_civic_election_id = 0\n try:\n candidate_list_manager = CandidateListManager()\n results = candidate_list_manager.retrieve_all_candidates_for_office(\n office_id=office_id,\n office_we_vote_id=office_we_vote_id)\n success = results['success']\n status = results['status']\n candidate_object_list = results['candidate_list']\n except Exception as e:\n status = 'FAILED candidates_retrieve. ' \\\n '{error} [type: {error_type}]'.format(error=e, error_type=type(e))\n handle_exception(e, logger=logger, exception_message=status)\n success = False\n\n if success:\n candidate_manager = CandidateManager()\n candidate_object_list_modified = []\n for candidate in candidate_object_list:\n if not positive_value_exists(candidate.contest_office_name):\n candidate = candidate_manager.refresh_cached_candidate_office_info(candidate)\n candidate_object_list_modified.append(candidate)\n results = generate_candidate_dict_list_from_candidate_object_list(\n candidate_object_list=candidate_object_list_modified,\n office_id=office_id,\n office_we_vote_id=office_we_vote_id)\n candidate_dict_list = results['candidate_dict_list']\n google_civic_election_id = results['google_civic_election_id']\n office_id = results['office_id']\n\n if len(candidate_dict_list) > 0:\n status += 'CANDIDATES_RETRIEVED '\n else:\n status += 'NO_CANDIDATES_RETRIEVED '\n\n json_data = {\n 'status': status,\n 'success': success,\n 'contest_office_id': office_id, # Deprecate\n 'contest_office_we_vote_id': office_we_vote_id,\n 'google_civic_election_id': google_civic_election_id, # Deprecate\n 'candidate_list': candidate_dict_list,\n }\n\n return HttpResponse(json.dumps(json_data), content_type='application/json')", "def _remove_potential_false_positives_for_consent_version(self, df):\n\n # Pandas: find all the records we want to keep and make a new dataframe out of the result. Inverts the\n # \"and\" conditions above for the known false positives in order to find everything but those records\n filtered_df = df.loc[(df.sync_status != int(ConsentSyncStatus.NEEDS_CORRECTING)) |\\\n (df.hpo != 'VA') | (df.va_consent_for_non_va == 0) |\\\n (df.missing_file == 1) | (df.invalid_dob == 1) | (df.invalid_age_at_consent == 1) |\\\n (df.checkbox_unchecked == 1) | (df.non_va_consent_for_va == 1)]\n\n print(f'Filtered count for consent version false positives: {df.shape[0] - filtered_df.shape[0]}')\n\n return filtered_df", "def peer_list_active(self):\n return self.client.call('GET', self.name + 'peer-list/active')", "def get_recent_matches(self, limit=None):\n matches = (Match.objects\n .filter(Q(winner=self) | Q(loser=self))\n .order_by('-played_time'))\n return matches[:limit or MATCH_RESULT_LIMIT]", "def get_available_companies_and_people(team):", "def getNotMyCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() != 1):\n r.append(p)\n return r", "def getLatestValidCalibration(self):\n calibration = None\n lastfrom = None\n lastto = None\n for c in self.getCalibrations():\n validfrom = c.getDownFrom() if c else None\n validto = c.getDownTo() if validfrom else None\n if not validfrom or not validto:\n continue\n validfrom = validfrom.asdatetime().date()\n validto = validto.asdatetime().date()\n if not calibration \\\n or validto > lastto \\\n or (validto == lastto and validfrom > lastfrom):\n calibration = c\n lastfrom = validfrom\n lastto = validto\n return calibration", "def get_contacts(self, count=-1, excluded_guid=None):\n current_len = len(self._contacts)\n if current_len == 0 or count == 0:\n return []\n\n if count < 0:\n count = current_len\n else:\n count = min(count, current_len)\n\n if excluded_guid is None:\n # Get the last `count` contacts.\n contact_list = self._contacts[-count:]\n else:\n contact_list = []\n for contact in reversed(self._contacts):\n if contact.guid == excluded_guid:\n continue\n contact_list.append(contact)\n if len(contact_list) >= count:\n break\n return contact_list", "def support_contacts(self):\n return self._support_contacts", "def _find_memberless_constituencies(self):\n constituencies = Constituency.objects.filter(\n end=None, # Constituency currently exists/is not historical\n mp=None,\n )\n\n self.stdout('Constituencies with missing MP:')\n for constituency in constituencies:\n self.stdout(f'[{constituency.parliamentdotuk}] {constituency.name} {constituency.start}')", "def get_conversations(self):\n\t\treturn self.conversations", "def get_users_with_missing_data() -> Set[str]:\n users_data = {user[\"_source\"][\"VENDOR_UUID\"] for user in Handlers.elastic_handler.get_all_today_data(\n _type=\"status\",\n date_start=dt.date.today() + dt.timedelta(days=1),\n date_end=dt.date.today() + dt.timedelta(days=7),\n )}\n\n all_tokens = Handlers.token_handler.get_all_today_data(_type=\"token\")\n to_dict = {dict_[\"_source\"][\"VENDOR_UUID\"]: dict_[\"_source\"][\"TOKEN\"] for dict_ in all_tokens}\n\n return set(dict(filter(lambda item_tup: item_tup[0] not in users_data, to_dict.items())).values())", "def getAttendeesVocab(self):\n # TODO: this currently allows us to add active members to a\n # sitting. That means we can't add members to sittings of past\n # parliaments. Is this OK?\n parliament = self.aq_parent.aq_parent\n teams = parliament.getTeams()\n members = []\n for team in teams:\n members.extend(team.getActiveMembers())\n return DisplayList([(m.UID(), m.getFullname()) for m in members if m])", "def vwho():\n cleanup()\n return {'available': userlist(), 'eta': data['etas'], 'etd': data['etds'], 'vavailable': vavailable(), 'veta': data['vetas'], 'lastlocation': data['lastlocation'], 'ceitloch': ceitloch(), 'reminder': data['reminder']}", "def get_current_visitors():\n return Visitor.objects.filter(acknowledged=False).order_by(\"arrival_time\")", "def get_all_volunteers(self):\n volunteers = []\n for user in User.objects.all():\n if not OcAuth(user.id).is_admin():\n volunteers.append(user)\n return volunteers", "def upcoming_meetups_query(cls):\r\n # Warning, this timestamp inequality is actually done as a string comparison\r\n # in the db for some reason. BUT, since epoch seconds won't get another digit\r\n # for another 275 years, we're good for now...\r\n return Meetup._query(Meetup.c.timestamp > time.time() - g.meetup_grace_period, data=True, sort='_date')", "def search_contact_list(self):\n\n search_db = Database()\n result = search_db.contact_search(self.name)\n if not result:\n print Fore.YELLOW + ' No such contact'\n return None\n if result > 1:\n print ' Which contact ??'\n for items in result:\n if items[2] > 1:\n print Fore.BLUE + ' %s %s %s' % ([items[0]], items[1], items[2])\n else:\n print str(items[1]), items[2]\n\n return result", "def available_phone_numbers(self):\r\n return numbers.AvailablePhoneNumbers(self)", "def get_invited_polls(self):\n\n invited_polls = []\n for poll_user in PollUser.objects.filter(user=self):\n invited_polls.append(poll_user.poll)\n\n return invited_polls", "def _get_receivers_list(self):\n\n # TODO: document what this plugin expects to be in Dockerfile/where it gets info from\n global_component = self._get_component_label()\n # this relies on bump_release plugin configuring source.git_commit to actually be\n # branch name, not a commit\n if not isinstance(self.workflow.source, GitSource):\n raise PluginFailedException('Source is not of type \"GitSource\", panic!')\n git_branch = self.workflow.source.git_commit\n try:\n r = requests.get(urljoin(self.pdc_url, 'rest_api/v1/release-component-contacts/'),\n headers={'Authorization': 'Token %s' % self._get_pdc_token()},\n params={'global_component': global_component,\n 'dist_git_branch': git_branch,\n 'role': self.pdc_contact_role},\n verify=self.pdc_verify_cert)\n except requests.RequestException as e:\n self.log.error('failed to connect to PDC: %s', str(e))\n raise RuntimeError(e)\n\n if r.status_code != 200:\n self.log.error('PDC returned status code %s, full response: %s',\n r.status_code, r.text)\n raise RuntimeError('PDC returned non-200 status code (%s), see referenced build log' %\n r.status_code)\n\n contacts = r.json()\n\n if contacts['count'] == 0:\n self.log.error('no %s role for the component', self.pdc_contact_role)\n raise RuntimeError('no %s role for the component' % self.pdc_contact_role)\n\n send_to = []\n for contact in contacts['results']:\n send_to.append(contact['contact']['email'])\n\n return send_to", "def getLatestValidCertification(self):\n cert = None\n lastfrom = None\n lastto = None\n for c in self.getCertifications():\n validfrom = c.getValidFrom() if c else None\n validto = c.getValidTo() if validfrom else None\n if not validfrom or not validto:\n continue\n validfrom = validfrom.asdatetime().date()\n validto = validto.asdatetime().date()\n if not cert \\\n or validto > lastto \\\n or (validto == lastto and validfrom > lastfrom):\n cert = c\n lastfrom = validfrom\n lastto = validto\n return cert", "def get_messages(self, since_timestamp=0):\n return filter(lambda x: x.timestamp > since_timestamp,\n self.chat_messages)", "def get_unresolved_future_prices():\n #TODO this is inefficient, hits the db A LOT\n latest_bitcoin_time = get_latest_bitcoin_time()\n\n potentially_unresolved = Future_Price.objects.filter(\n time_to_match_price__lte=latest_bitcoin_time\n #TODO would like a __gt condition somehow\n )\n\n unresolved_future_prices = []\n for p in potentially_unresolved:\n has_no_returned_amounts_from_before_window = Returned_Amount.objects.filter(to_prediction__future_price=p, from_received_amount__time__lt=F('from_received_amount__prediction__future_price__time_window_closes')).count() == 0\n if has_no_returned_amounts_from_before_window:\n has_received_amounts_from_before_window = Received_Amount.objects.filter(prediction__future_price=p, time__lt=F('prediction__future_price__time_window_closes')).count() > 0\n if has_received_amounts_from_before_window:\n bitcoin_price_exists = Bitcoin_Price.objects.filter(time=p.time_to_match_price).count() == 1\n if bitcoin_price_exists:\n unresolved_future_prices.append(p)\n\n return unresolved_future_prices\n\n \"\"\"\n The following commented-out method:\n - assumes that there is always a bitcoin_price for every minute before the\n last bitcoin_price\n - assumes that every future_prediction before the last returned_amount has\n been evaluated\n ...I am not willing to make these assumptions\n \n latest_bitcoin_time = get_latest_bitcoin_time()\n\n try:\n latest_returned_amount = Returned_Amount.objects.order_by('-from_received_amount__prediction__future_price__time_to_match_price')[0]\n latest_returned_time = latest_returned_amount.from_received_amount.prediction.future_price.time_to_match_price\n except IndexError:\n latest_returned_time = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, utc)\n\n unresolved_future_prices = Future_Price.objects.filter(\n time_to_match_price__lte=latest_bitcoin_time,\n time_to_match_price__gt=latest_returned_time\n )\n\n return unresolved_future_prices\n \"\"\"", "async def get_participants(self):\n for i in range(self.num):\n def check(m):\n if m.content.lower().strip() == \"i\" and m.author not in self.participants:\n return True\n\n return False\n\n # Wait with a timeout of 2 minutes and check each message with check(m)\n reply = await client.wait_for_message(timeout=120, channel=self.channel, check=check)\n\n if reply: # A user replied with a valid check\n asyncio.ensure_future(\n client.say(self.message,\n \"{} has entered! `{}/{}`. Type `I` to join!\".format(\n reply.author.mention, i + 1, self.num))\n )\n self.participants.append(reply.author)\n\n # Remove the message if bot has permissions\n if self.member.permissions_in(self.channel).manage_messages:\n asyncio.ensure_future(client.delete_message(reply))\n else:\n # At this point we got no reply in time and thus, gathering participants failed\n await client.say(self.message, \"**The {} game failed to gather {} participants.**\".format(\n self.name, self.num))\n started.pop(started.index(self.channel.id))\n\n return False\n\n return True", "def top30_clients(self):\n clients = self.clients_sorted_by_rentals()\n return clients[:int(0.3 * len(clients))]", "def _get_instances_pending_events(self):\n\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n stats = conn.get_all_instance_status()\n next_token = stats.next_token\n while next_token != None:\n next_stats = conn.get_all_instance_status(next_token=next_token)\n stats.extend(next_stats)\n next_token = next_stats.next_token\n ret = []\n for stat in stats:\n if stat.events:\n for event in stat.events:\n if re.match('^\\[Completed\\]', event.description):\n continue\n ret.append([stat.id, event.code, event.not_before])\n if len(ret) > 0:\n instances = get_instances([stat[0] for stat in ret])\n for stat in ret:\n stat.insert(1, instances[stat[0]])\n return ret" ]
[ "0.6920536", "0.57854915", "0.5222731", "0.50926673", "0.5040607", "0.502312", "0.50023216", "0.48708686", "0.48509404", "0.48265207", "0.48217788", "0.48114735", "0.47936308", "0.47893497", "0.47839564", "0.47823417", "0.47818604", "0.4778862", "0.47737798", "0.47727492", "0.47644523", "0.47436103", "0.4732465", "0.47127", "0.47012612", "0.4696725", "0.4692097", "0.46916696", "0.4673781", "0.4667727", "0.46611717", "0.46602547", "0.46569824", "0.46567994", "0.46405014", "0.46353593", "0.4629785", "0.4629196", "0.4609319", "0.46000522", "0.45939112", "0.45931968", "0.45826313", "0.45794436", "0.45750338", "0.4574286", "0.4574286", "0.45542866", "0.4552785", "0.4544313", "0.45351398", "0.45295408", "0.45130685", "0.4510641", "0.4508172", "0.45071828", "0.45059845", "0.4498744", "0.44959447", "0.4494402", "0.4494318", "0.44890064", "0.44887704", "0.44832233", "0.4481072", "0.4481037", "0.44673055", "0.44672325", "0.44626212", "0.44616103", "0.44595876", "0.44579428", "0.44551063", "0.44539985", "0.44534954", "0.44474763", "0.44455928", "0.44443315", "0.44437414", "0.44407", "0.44402444", "0.44364384", "0.44351786", "0.44317794", "0.44316423", "0.44309747", "0.44248232", "0.44123864", "0.44095042", "0.44078487", "0.44076133", "0.44042817", "0.43986487", "0.43956932", "0.43745586", "0.43636987", "0.43614367", "0.43604678", "0.4359222", "0.43584043" ]
0.671149
1
Return active constituent voters in proximity to given latitude and longitude coordinates. (Database contrainsts require all rows in the voters table to have valid address information.) Don't limit the size of the result set here; let APIs do that.
def getVotersDoorToDoor(self, latitude, longitude): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def users_nearby(self, meters):\n location = Location.objects.get(id=self.most_recent_location_id)\n lng = location.position['coordinates'][0]\n lat = location.position['coordinates'][1]\n\n nearby_locations = Location.objects(position__near=[lng, lat], position__max_distance=meters)\n\n nearby_user_ids = []\n\n for loc in nearby_locations:\n nearby_user_ids.append(loc.uid)\n\n return SallasanaUser.objects.filter(id__in=nearby_user_ids)", "def proximity_search(self, latitude, longitude, radius):\n\n hashcode = geohash.encode(latitude=latitude, longitude=longitude)\n centerpoint = (latitude, longitude)\n\n tmp_hashcode = ''\n for x in hashcode:\n # Go through the hashcode character by character\n tmp_hashcode += x\n lat, lng, delta_lat, delta_lng = geohash.decode(tmp_hashcode,\n delta=True)\n overall_lat = 2 * 1000 * haversine(\n point1=(latitude - delta_lat, longitude),\n point2=(latitude + delta_lat, longitude)\n )\n overall_lng = 2 * 1000 * haversine(\n point1=(latitude, longitude-delta_lng),\n point2=(latitude, longitude+delta_lng)\n )\n\n dist = min(overall_lng, overall_lat)\n if dist < radius:\n tmp_hashcode = tmp_hashcode[:-1]\n break\n\n if tmp_hashcode == '':\n raise ValueError('Radius larger than earth')\n\n precision = len(tmp_hashcode)\n\n search_hashes = self._get_adjoining_hashes(hashcode=hashcode,\n precision=precision)\n search_hashes.append(tmp_hashcode)\n\n possible_points = []\n result_values = []\n\n for search_hash in search_hashes:\n possible_points.extend(self.storage.values(prefix=search_hash))\n\n for point_id in possible_points:\n point = self.points_by_id[point_id]\n dist = 1000 * haversine(centerpoint, point)\n if dist <= radius:\n result_values.append((point_id, dist))\n\n sorted_results = sorted(result_values, key = lambda x: x[1])\n final_results = [x[0] for x in sorted_results]\n return final_results", "def get_near_cities_from_user_coordinates(user_coordinates):\n data = pandas.read_csv('city_coordinates.tsv', sep='\\t')\n cities = data['city_ascii']\n latitudes, longitudes = data['lat'], data['lng']\n distance_list = []\n for city, lat, lng in zip(cities, latitudes, longitudes):\n try:\n distance = geodesic((lat, lng), user_coordinates).km\n distance_list.append(((lat, lng), city, distance))\n except Exception:\n continue\n distance_list_sorted = sorted(distance_list, key=lambda x: x[-1])\n return [elem[-2] for elem in distance_list_sorted[:100]]", "def get_vehicles(self, latitude, longitude):\n result = self.get_zones(latitude, longitude)\n if result and \"zone_id\" in result[0]:\n return self.__request(\n \"GET\",\n \"https://api.voiapp.io/v1/vehicles/zone/{}/ready\".format(\n result[0][\"zone_id\"]\n ),\n )", "def CreateLocationByProximityQuery(self):\n self.CenterPoint = self.randomGenerator.CreateRandomLattitudeLongitude()\n self.SearchDistanceMiles = self.randomGenerator.CreateRandomNumber(self.minDistanceMiles,self.maxDistanceMiles)\n self.SearchDistanceMeters = GeoConversion.ConvertMilesToMeters(self.SearchDistanceMiles)\n QueryParameterDic = {}\n QueryParameterDic['SearchType'] ='LocationByProximity'\n QueryParameterDic['MaxResults'] = self.maxSearchResults\n QueryParameterDic['MaxDistanceMeters'] = GeoConversion.ConvertMilesToMeters(self.SearchDistanceMiles)\n QueryParameterDic['CenterPt'] = self.CenterPoint\n QueryParameterDic['HasShopsOnly'] = 'True'\n QueryParameterDic['FilterByTrueDistance'] = 'True'\n\n # Get a list of GpGridCells to search\n gpSearch = GpSearch()\n gpSearch.MaxSearchCellCount = self.MaxSearchCellCount\n centerGpPoint = GpPoint(self.CenterPoint[0],self.CenterPoint[1])\n gpSearch.ComputeSearchListForMetersProximity(centerGpPoint,self.SearchDistanceMeters)\n self.FinalSearchResolution = gpSearch.FinalSearchResolution\n self.SearchGpGridCells = gpSearch.SearchCellList\n\n QueryParameterDic['SearchGridCells'] = self.SearchGpGridCells\n return QueryParameterDic", "def query(self, points):\n return self.locate_points(points)", "def get_nearby_location(request):\n latitude, longitude = latlang(request)\n point = Point(float(longitude), float(latitude), srid=4326)\n locations = Location.objects.filter(point__distance_lte=(point, D(km=100)))\n return JsonResponse(json.dumps([serializer(location) for location in locations]), safe=False)", "def variant_hotspots(most_prevalent=100, verbose=True):\n verbose = ast.literal_eval(str(verbose))\n most_prevalent = int(most_prevalent)\n ROW = 'Position'\n # Fetch & store all positions\n with database.make_connection() as connection:\n cursor = r.table(TABLE).pluck(ROW).run(connection)\n positions = [int(e[ROW]) for e in cursor]\n # Count occurences at positions\n counts = Counter(positions)\n mp = counts.most_common(most_prevalent)\n # Now extract out\n header = \"Counts,Position,LocusTag,Product\"\n results = []\n results.append(header)\n if verbose:\n print header\n with database.make_connection() as connection:\n for element in mp:\n first_hit = list(r.table(TABLE).filter(r.row[ROW] == int(element[0])).pluck('Position', 'LocusTag').run(connection))[0]\n product = '\"'+list(r.table('reference_features').filter({'LocusTag': first_hit['LocusTag']}).pluck('Product').run(connection))[0]['Product']+'\"'\n cur = '%i,%i,%s,%s' % (element[1], first_hit['Position'], first_hit['LocusTag'], product)\n results.append(cur)\n if verbose:\n print cur\n return results", "def set_vehicles_coordinates(self):\n coordinates = []\n\n for car_number in range(1, self.number_of_found_cars + 1):\n # Find pixels with each car_number label value\n nonzero = (self.binary_map == car_number).nonzero()\n\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n\n x_min_max = [np.min(nonzerox), np.max(nonzerox)]\n y_min_max = [np.min(nonzeroy), np.max(nonzeroy)]\n\n coordinates.append([int(np.mean(x_min_max)), int(np.mean(y_min_max)), int(x_min_max[1]-x_min_max[0]), int(y_min_max[1]-y_min_max[0])])\n\n self.coord_of_found_cars = coordinates", "def find_parking_spot(lat_u, long_u, timestamp):\n\n coord_u = project_long_lag_coord_into_cartesian([[lat_u, long_u]])\n xu, yu = coord_u[0][0], coord_u[0][1]\n user_point = Point(xu, yu)\n\n # Create my list of polygons from json\n json_data = cast_json_into_list()\n polygons_lat_long_coord = json_coordinates(json_data)\n\n # project lat-long to a plan\n polygons_cartesians_coord = [project_long_lag_coord_into_cartesian(_) for _ in polygons_lat_long_coord]\n\n # creates a list of polygons\n polygons_list = np.array([Polygon(_) for _ in polygons_cartesians_coord])\n\n # list of distances\n distances_list = np.array([distance_user_point_to_polygons(user_point, polygons_list)])\n distances_list_scaled = np.array([np.round(SCALING_FACTOR*elt, 0).astype(int) for elt in distances_list]).ravel()\n\n # Calls the probability\n probas = calculate_probs(timestamp)\n probas_display = calculate_probs(timestamp)\n print(probas)\n\n # Gets the names of the places\n places_name = json_names(json_data)\n\n # Combined metric\n #probas_s = (probas - np.array(probas).mean()) / np.array(probas).std()\n #distances_list_scaled_s = (distances_list_scaled - distances_list_scaled.mean()) / distances_list_scaled.std()\n #print(probas_s)\n #print(distances_list_scaled_s)\n metrics = np.array([round(10**5*probas[i]**4/distances_list_scaled[i],2) for i in range(len(places_name))])\n\n #print(places_name)\n #print(distances_list_scaled.ravel())\n #print(probas)\n\n # Creates the dataframe\n df = pd.DataFrame.from_dict(data={'place': places_name, 'distance': distances_list_scaled.ravel(), 'chance': probas_display, 'score': metrics})\n df = df.sort_values('score', ascending=False)\n\n json_table = df.to_json(orient=\"split\")\n print(json_table)\n return json_table", "def search_nearby(self, fields: dict) -> list[dict]:\r\n results: list = []\r\n\r\n if \"location\" not in fields.keys():\r\n geolocate: dict = self.get_current_locate()\r\n fields[\"location\"] = geolocate[\"location\"]\r\n\r\n if \"radius\" not in fields.keys():\r\n fields[\"radius\"] = 1000\r\n\r\n fields[\"type\"] = \"restaurant\"\r\n\r\n for i in range(1):\r\n places = self.gmaps.places_nearby(**fields)\r\n if places[\"status\"] != \"OK\":\r\n continue\r\n results.extend(places[\"results\"])\r\n try:\r\n # Update attribute to get next 20 places.\r\n fields = {\r\n \"page_token\": places[\"next_page_token\"]\r\n }\r\n # 連続実行するとエラー(Google側の仕様)\r\n time.sleep(2)\r\n except KeyError:\r\n # 最大で60件まで それ以上検索すると next_page_token がなくなる\r\n break\r\n\r\n return results", "def _bycoord(self, coord):\n query = \"\"\"SELECT * \n FROM ppmxl \n WHERE circle(coord,0.0006) @> circle(point(%f,%f),0) LIMIT 1;\"\"\" % coord\n result = self.corot.query(query)\n return result", "def get_viable_positions(self, index, radius, cutoff, num_pts=None):\n assert (radius > cutoff), \"radius larger than cutoff distance\"\n assert self.host_zeotype is not None, \"host MAZE-sim cannot be none\"\n\n guess_positions = self.sphere_sample(radius, num_pts)\n host_pos = self.host_zeotype.get_positions()[index]\n viable_positions = []\n\n for pos in guess_positions:\n dist = self.min_distance(pos + host_pos)\n if dist > cutoff:\n viable_positions.append(pos + host_pos)\n\n return viable_positions", "def list_near_location(coords):\n latitude, longitude = coords\n # Quick check to ensure coordinates are within range of Great Britain\n if not location.check_bounds(latitude, longitude):\n raise NotFound(\"Latitude and longitude coordinates are too far from \"\n \"Great Britain.\")\n\n stops = models.StopPoint.in_range(latitude, longitude,\n db.undefer(models.StopPoint.lines))\n groups = _group_lines_stops(stops)\n\n return render_template(\"location.html\", latitude=latitude,\n longitude=longitude, list_stops=stops, groups=groups)", "def get_nearest(infected_coordinates, uninfected_coordinates, d):\n # Create tree from the GPS coordinates of uninfected users\n tree = BallTree(uninfected_coordinates, leaf_size=15, metric='haversine')\n indices,distances=tree.query_radius(infected_coordinates, r=d,return_distance=True)\n indices=indices.transpose()\n distances=distances.transpose()\n return indices,distances", "def get_features_near_me(self,collection,point,radius,earth_radius=3963.2): #km = 6371\n x,y = point\n res = self.client['rephie'][collection].find( { 'geometry': { '$geoWithin': { '$centerSphere': [ [x, y ] , radius/earth_radius ] } }} )\n \n return self._make_result_list(res)", "def findNearset(x,y,lon,lat):\n dist = np.sqrt( (lon - x)**2 + (lat - y)**2)\n\n return np.argwhere(dist==dist.min())[0][0]", "def get_near(self,map):\n near_cells = []\n for i in range(self.x-1, self.x+2):\n for j in range(self.y-1, self.y+2):\n if(i>=0 and i<map.size and j>=0 and j<map.size): near_cells.append(map.search(i,j))\n return near_cells", "def nearby(cls, lat: float, lon: float, radius: float) -> List[Place]:\n formatted_point = cls._format_point_postgis(lat, lon)\n distance = cls._postgis_distance(formatted_point)\n query = (\n cls.query.with_entities(cls, distance)\n .filter(distance < radius)\n .order_by(distance)\n .limit(DEFAULT_LIMIT)\n .all()\n )\n return cls._set_distances(query)", "def voronoi(points, buffer_percent=100):\n # Remove duplicate xy points bc that would make delauney fail, and must remember z (if any) for retrieving originals from index results\n seen = set() \n uniqpoints = [ p for p in points if str( p[:2] ) not in seen and not seen.add( str( p[:2] ) )]\n classpoints = [_Point(*point[:2]) for point in uniqpoints]\n\n # Create fake sitepoints around the point extent to correct for infinite polygons\n # For a similar approach and problem see: http://gis.stackexchange.com/questions/11866/voronoi-polygons-that-run-out-to-infinity\n xs,ys = list(zip(*uniqpoints))[:2]\n pointswidth = max(xs) - min(xs)\n pointsheight = max(ys) - min(ys)\n xbuff,ybuff = ( pointswidth / 100.0 * buffer_percent , pointsheight / 100.0 * buffer_percent )\n midx,midy = ( sum(xs) / float(len(xs)) , sum(ys) / float(len(ys)) )\n #bufferbox = [(midx-xbuff,midy-ybuff),(midx+xbuff,midy-ybuff),(midx+xbuff,midy+ybuff),(midx-xbuff,midy+ybuff)] # corner buffer\n bufferbox = [(midx-xbuff,midy),(midx+xbuff,midy),(midx,midy+ybuff),(midx,midy-ybuff)] # mid sides buffer\n classpoints.extend([_Point(*corner) for corner in bufferbox])\n\n # Compute Voronoi\n vertices,edges,poly_dict = tesselator.computeVoronoiDiagram(classpoints)\n\n # Turn unordered result edges into ordered polygons\n polygons = list()\n for sitepoint,polyedges in list(poly_dict.items()):\n polyedges = [edge[1:] for edge in polyedges]\n poly = list()\n firststart,firstend = polyedges.pop(0)\n poly.append(firstend)\n while polyedges:\n curend = poly[-1]\n for i,other in enumerate(polyedges):\n otherstart,otherend = other\n if otherstart == curend:\n poly.append(otherend)\n ##print otherstart,otherend\n polyedges.pop(i)\n break\n elif otherend == curend:\n ##print otherend,otherstart\n poly.append(otherstart)\n polyedges.pop(i)\n break\n # Get vertices from indexes\n try: sitepoint = uniqpoints[sitepoint]\n except IndexError:\n sitepoint = None # fake bbox sitepoints shouldnt be in the results\n poly = [vertices[vi] for vi in poly if vi != -1]\n polygons.append((sitepoint, poly))\n\n # Maybe clip parts of polygons that stick outside screen?\n # ...\n\n return polygons", "def alive_neighbors(live_coords, coord):\n if not live_coords or not coord:\n return False\n x, y = coord\n neighbors = [[(x - 1), y], [(x - 1), (y - 1)], [(x - 1), (y + 1)],\n [(x + 1), y], [(x + 1), (y - 1)], [(x + 1), (y + 1)],\n [x, (y - 1)], [x, (y + 1)]]\n intersection = [value for value in neighbors if value in live_coords]\n return len(intersection)", "def pointPotential(x,y,q,posx,posy):\n k = 8.99e9\n V = (k * q) / (sqrt(x**2 + (y - sqrt((posx**2 + posy**2)))**2))\n return V", "def queryPoint(self, x, y):\n self.makeTree()\n dists, inds = self.nntree.query((x, y), self.nnmaxcount,\n distance_upper_bound=self.nncutoff)\n r = self.frametracks.ix[self.frametracks.index.values.take(inds.compress((dists > selfdistance) & ~np.isinf(dists)))]\n if len(r) == self.nnmaxcount:\n logging.warning('Too many neighbors around (%f, %f); incrase nnmaxcount' \\\n % (x, y))\n return r", "def get_voronoi_vertices(self, epsilon=2.5e-4, distance_threshold=0, width_buffer=10):\n voro = Voronoi(self._structure.get_extended_positions(width_buffer)+epsilon)\n xx = voro.vertices\n if distance_threshold > 0:\n cluster = AgglomerativeClustering(\n linkage='single',\n distance_threshold=distance_threshold,\n n_clusters=None\n )\n cluster.fit(xx)\n xx = get_average_of_unique_labels(cluster.labels_, xx)\n xx = xx[np.linalg.norm(xx-self._structure.get_wrapped_coordinates(xx, epsilon=0), axis=-1)<epsilon]\n return xx-epsilon", "def get_candidate_locations(cur_location, radius, row_num, col_num):\n cur_y, cur_x = cur_location\n delta = int(radius)\n max_x = cur_x + delta if cur_x + delta < col_num else col_num - 1\n min_x = cur_x - delta if cur_x - delta >= 0 else 0\n max_y = cur_y + delta if cur_y + delta < row_num else row_num - 1\n min_y = cur_y - delta if cur_y - delta >= 0 else 0\n candidates = []\n for x in range(min_x, max_x + 1):\n for y in range(min_y, max_y + 1):\n if distance(cur_x, cur_y, x, y) < radius:\n candidates.append((y, x))\n return candidates", "def get_coordinates(ipaddress):\n\n if Config.use_geoip:\n response = reader.city(ipaddress)\n lat = float(response.location.latitude)\n lon = float(response.location.longitude)\n return [lat, lon]\n else:\n return False", "def near(self, meters):\n lng = self.position['coordinates'][0]\n lat = self.position['coordinates'][1]\n\n return Location.objects(position__near=[lng, lat], position__max_distance=meters)", "def features_search(df, type_, keywords):\n PLACES_KEY = os.environ[\"PLACES_KEY\"]\n output_file = \"json\"\n radius = \"1500\"\n lst = []\n\n for i in range(len(df)):\n coor = df[\"latitude\"][i].astype(str) + \", \" + df[\"longitude\"][i].astype(str)\n url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/\"+ output_file +\"?location=\"+coor +\"&radius=\" +radius+ \"&type=\"+type_+\"&keyword=\"+keywords + \"&key=\"+ PLACES_KEY\n res = requests.get(url)\n data = res.json()\n lst.append(len(data))\n \n return lst", "def get_in_radius(radius):\n def _return_values(user):\n \"\"\"\n :user Dict containing the users information, by keeping this in a nested function if more or less information\n is required to be returned it can be modified in one location rather than multiple locations.\n Returns a predefined dict of values to minimise duplicated code.\n :return: dictionary of user values\n \"\"\"\n return {\"id\":user[\"id\"],\n \"first_name\":user[\"first_name\"],\n \"last_name\": user[\"last_name\"]\n }\n\n users_in_range = [] # dictionary to store user information as we only want first name and last name.\n london = (51.30, 0.5) # Create a position point for london using its latitude and longitude\n\n # ** Note: 'City' is not included in the data returned by the users end point ut it is if you call the\n # users individually i could do this using for loop but that would cause 1000 calls to the API each time\n # the end point is called so instead i've opted to do 2 calls and parsing the data in the API.\n # This should minimise the number of requests being sent to the API.\n\n # First i will get all the users and compute their current distance from london and checking if that is within\n # the radius specified by the end user (radius component of the url), Then i will get all users listed as being\n # in the city of london and checking if those customers are already in the list by creating a list of ids.\n\n # If they are in the list they are discarded if they are not then their first name, last name and id are added\n # to the array, since the requirements did not specify what information was to be returned only those three values\n # are returned (This minimises the data protection implications)\n\n url = \"/users\"\n response = requests.get(f\"{host}{url}\")\n\n for user in json.loads(response.text):\n # Creation location point for the current user and use haversine to compute the distance between the user and\n # london in miles\n user_location = (float(user[\"latitude\"]), float(user[\"longitude\"]))# (lat, lon)\n distance = haversine(london, user_location, unit='mi')\n\n # if the distance is 50 miles or less then add the users first and last name to the users_in_range dict using\n if distance <= float(radius):\n users_in_range.append(_return_values(user))\n\n # Get the used defined as 'living in london' this is not clear in the instructions so i have made the 'assumption'\n # That the city value corresponds to their current city of residence.\n url = \"/city/London/users\"\n response = requests.get(f\"{host}{url}\")\n\n # Parse through the list or returned users and filter entries which already exist and append ones that dont to the\n # list to be returned\n for user in json.loads(response.text):\n if not user[\"id\"] in [user[\"id\"] for user in users_in_range]:\n users_in_range.append(_return_values(user))\n\n # convert the list into a json payload and return using\n return json.dumps(users_in_range)", "def get(self, prox):\n response = hereService.getCityByLatLong(prox)\n return response", "def things_near(self, location, radius = None):\n if radius is None:\n radius = self.perceptible_distance\n radius2 = radius * radius\n return [ thing for thing in self.things if distance2(location, thing.location) == radius2 ]", "def query_coords(self, jsonify=False):\n log.debug('Scanning for image boundary coordinates ...')\n\n results = self.query(table=self.name,\n cols=self.coord_cols)\n\n coords = {}\n for cell in results:\n (lat, lng) = cell.cq.split(',')\n if coords.get(cell.row) is not None:\n coords[cell.row].append([float(lat), float(lng)])\n else:\n coords[cell.row] = []\n coords[cell.row].append([float(lat), float(lng)])\n\n if jsonify:\n coords = json.dumps(coords)\n\n log.info('Image boundary coordinates scan complete')\n\n return coords", "def browse(self, lat, lon):\n places = self.filter(active=True).order_by('-id')[:10]\n items = []\n for item in places:\n item.distance = item.compute_distance(lat, lon)\n item.orientation = self.orientation(int(item.compute_orientation(lat,lon)))\n items.append(item)\n return items", "def get_neighbours(self, coords, filter = None, size = 1):\n\t\tif filter is None:\n\t\t\tfilter = lambda pos: True\n\t\tgrid = self.map\n\t\tresult = [pos for pos in grid.env_keys(coords, size) if filter(pos)]\n\t\tresult.sort(key = lambda pos: get_distance_2(pos, coords))\n\t\treturn result", "def create_local_voxmap(sampler, point, xd=10, yd=10, zd=10, voxel_size=1):\n \n # minimum and maximum north coordinates\n north_min = point[0] - xd\n north_max = point[0] + xd\n \n # minimum and maximum east coordinates\n east_min = point[1] - yd\n east_max = point[1] + yd\n \n # miniumum and maximum altitude\n alt_min = point[2] - zd\n alt_max = point[2] + zd\n \n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil((north_max - north_min))) // voxel_size\n east_size = int(np.ceil((east_max - east_min))) // voxel_size\n alt_size = int(np.ceil((alt_max - alt_min))) // voxel_size\n\n # Create an empty grid\n voxmap = np.zeros((north_size, east_size, alt_size), dtype=np.bool)\n \n #maximum distance between point and outer voxels\n d_voxmap = np.sqrt((xd**2+yd**2) + (zd/2)**2)\n \n #maximum distance between obstacle center and outer borders\n d_obstacle = np.max(np.array([ \n LA.norm(np.array(p.coords[0]) - \n np.array(p.coords[2])) / 2 \n for p in polygons]))\n \n #maximum combined distances between voxmap center and obstacle centers\n d_max = d_voxmap + d_obstacle\n\n #all obstacles in vincinity\n idxs = list(sampler._tree.query_radius(point[:2], r=d_max))[0]\n \n #loop over closeby obstacles\n for i in idxs:\n \n #current obstacle\n p = polygons[i]\n \n #get the obstacle bounds (north_min, north_max, east_min, east_max)\n bounds = [\n np.min([vals[0] for vals in p.coords]),\n np.max([vals[0] for vals in p.coords]),\n np.min([vals[1] for vals in p.coords]),\n np.max([vals[1] for vals in p.coords]),\n 0.,\n p.height\n ]\n \n #discretize obstacle bounds according to voxel size\n obstacle = [\n int(bounds[0] - north_min) // voxel_size,\n int(bounds[1] - north_min) // voxel_size,\n int(bounds[2] - east_min) // voxel_size,\n int(bounds[3] - east_min) // voxel_size,\n int(bounds[4] - alt_min) // voxel_size,\n int(bounds[5] - alt_min) // voxel_size\n ]\n \n #correct for out-of-bound values\n if obstacle[0]<0:\n obstacle[0]=0\n if obstacle[1]>voxmap.shape[0]-1:\n obstacle[1]=voxmap.shape[0]-1\n if obstacle[2]<0:\n obstacle[2]=0\n if obstacle[3]>voxmap.shape[1]-1:\n obstacle[3]=voxmap.shape[1]-1\n if obstacle[4]<0:\n obstacle[4]=0\n if obstacle[5]>voxmap.shape[2]-1:\n obstacle[5]=voxmap.shape[2]-1\n \n #add collision information to the voxmap\n voxmap[obstacle[0]:obstacle[1]+1,\n obstacle[2]:obstacle[3]+1,\n obstacle[4]:obstacle[5]+1] = True\n \n #collect collision information for the ground floor\n floor = int(0-alt_min)//voxel_size\n\n #if voxmap collides with ground floor: add collision information\n if floor>=0:\n voxmap[:,:,:floor]=True\n \n #return the voxmap\n return voxmap", "def happy():\n # Query all venues\n results = session.query(VP.name, VP.latitude, VP.longitude).all()\n \n # Create a dictionary from the row data and append to a list of all_venue\n all_venues = []\n for name, lat, lon in results:\n venue_dict = {}\n venue_dict[\"name\"] = name\n venue_dict[\"latitude\"] = lat\n venue_dict[\"longitude\"] = lon\n all_venues.append(venue_dict)\n \n return jsonify(all_venues)", "def voxelize(points,leaf = 0.1):\n if (type(points) == pclpy.pcl.PointCloud.PointXYZRGB):\n cloud = points\n voxel_filter = pclpy.pcl.filters.VoxelGrid.PointXYZRGB()\n filtered_pointcloud = pclpy.pcl.PointCloud.PointXYZRGB()\n else:\n cloud = pclpy.pcl.PointCloud.PointXYZ(points)\n voxel_filter = pclpy.pcl.filters.VoxelGrid.PointXYZ()\n filtered_pointcloud = pclpy.pcl.PointCloud.PointXYZ()\n \n voxel_filter.setLeafSize(leaf,leaf,leaf)\n voxel_filter.setInputCloud(cloud)\n \n voxel_filter.filter(filtered_pointcloud)\n if type(points) == pclpy.pcl.PointCloud.PointXYZRGB:\n return filtered_pointcloud\n else:\n return filtered_pointcloud.xyz", "def near():\n\targs = request.args\n\n\tif 'limit' in args: limit = int(args.get('limit'))\n\telse: limit = 1\n\n\tif 'lat' in args and 'lng' in args:\n\t\tlat = float(args.get('lat'))\n\t\tlng = float(args.get('lng'))\n\n\telse:\n\t\treturn jsonify(success=False, reason='wrong_arguments')\n\n\tdocs = findWithInCircle([lat,lng],6)\n\n\treturn json_util.dumps({\n\t\t'success': True, 'docs': docs\n\t})", "def reverse_geolocate(\n cls, lat: float, lon: float, weighted: bool = False\n ) -> List[Place]:\n formatted_point = cls._format_point_postgis(lat, lon)\n distance = cls._postgis_distance(formatted_point)\n\n ordering = (distance + 1) / (Place.popularity + 1) if weighted else distance\n\n query = (\n cls.query.with_entities(cls, distance)\n .filter(cls._postgis_buffered_intersect(formatted_point))\n .order_by(ordering)\n .limit(DEFAULT_LIMIT)\n .all()\n )\n return cls._set_distances(query)", "def _valid_neighbors(location, some_num):\n xloc, yloc = location\n vector = [(1, 0), (-1, 0), (0, 1), (0, -1)]\n ret_v = []\n for vect in vector:\n xpos = xloc + vect[0]\n ypos = yloc + vect[1]\n if xpos <= 0 or ypos <= 0:\n continue\n if xpos > some_num or ypos > some_num:\n continue\n ret_v.append((xpos, ypos))\n return ret_v", "def geocoded(self):\n return self.get_queryset().filter(latitude__isnull=False,\n longitude__isnull=False)", "def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #assign each of neighbours corrds\n #top left to top rigt\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # left to right (center)\n left = (row, col - 1)\n # the (row, col) cordinates passed into this function are situated here\n right = (row, col + 1)\n \n #bottom-left to bottom-right\n bottom_left = (row +1, col -1)\n bottom_center = (row +1, col)\n bottom_right = (row +1, col +1)\n \n return [top_left, top_center, top_right,\n left , right ,\n bottom_left, bottom_center, bottom_right]", "def get_con_ver(vertices, distances, nbr_ver, nbr_pt_num = 100):\n dists_ranked = np.sort(distances)\n #if total vertices are less than nbr_pt_num, use all vertices\n if(vertices.shape[0]<nbr_pt_num):\n nearby_pts=vertices\n #Otherwise, use 100 nearest points for NN and 500 for TIN interpolation\n else:\n nearby_pts_idx = distances < dists_ranked[nbr_pt_num]\n nearby_pts=vertices[nearby_pts_idx]\n \n con_ver = np.concatenate((nbr_ver, nearby_pts))\n # remove duplicated points\n con_ver = misc.unique_rows(con_ver)\n return con_ver", "def find_near_location():\n return render_template(\"location.html\", latitude=None, longitude=None,\n list_stops=None)", "def vincenty(p1, p2):\n # Note: GeoPy expects (latitude, longitude) pairs.\n return geopy.distance.vincenty(\n (p1.y, p1.x),\n (p2.y, p2.x)\n ).miles", "def get(self):\n # TODO: catch ValueError and raise proper 400 exception with data about error instead of 500\n lon = float(self.get_query_argument(\"lon\"))\n lat = float(self.get_query_argument(\"lat\"))\n distance = int(self.get_query_argument(\"distance\", 5000))\n limit = int(self.get_query_argument(\"limit\", 20))\n offset = int(self.get_query_argument(\"offset\", 0))\n\n cursor = self.db_conn.places.find({\n \"loc\": {\n \"$near\": {\n \"$geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [lon, lat]\n },\n \"$maxDistance\": distance\n }\n },\n \"box\": {\"$exists\": True}\n },\n {\"_id\": 0, \"box\": 0}).skip(offset).limit(limit)\n\n res = yield cursor.to_list(length=limit)\n\n raise gen.Return(res)", "def closest_node(self, where, cartesian=False, threshold=None, vincenty=False, haversine=False):\n\n if not vincenty or not haversine:\n if cartesian:\n x, y = self.grid.x, self.grid.y\n else:\n x, y = self.grid.lon, self.grid.lat\n dist = np.sqrt((x - where[0])**2 + (y - where[1])**2)\n elif vincenty:\n grid_pts = np.asarray([self.grid.lon, self.grid.lat]).T\n where_pt_rep = np.tile(np.asarray(where), (len(self.grid.lon),1))\n dist = np.asarray([vincenty_distance(pt_1, pt_2) for pt_1, pt_2 in zip(grid_pts, where_pt_rep)])*1000\n elif haversine:\n grid_pts = np.asarray([self.grid.lon, self.grid.lat]).T\n where_pt_rep = np.tile(np.asarray(where), (len(self.grid.lon),1))\n dist = np.asarray([haversine_distance(pt_1, pt_2) for pt_1, pt_2 in zip(grid_pts, where_pt_rep)])*1000\n index = np.argmin(dist)\n if threshold:\n if dist.min() < threshold:\n index = np.argmin(dist)\n else:\n index = None\n\n return index", "def get_cities_sorted_location(request):\n latitude, longitude = latlang(request)\n point = Point(float(longitude), float(latitude), srid=4326)\n locations = Location.objects.filter(point__distance_lte=(point, D(km=200))).annotate(distance=Distance(\"point\", point)).order_by(\"distance\")[:10]\n return JsonResponse(json.dumps([serializer_distance(location) for location in locations]), safe=False)", "def compute_marker_proximity(self, coords):\n\n #can be important for parallel\n self.swarm.shadow_particles_fetch()\n\n if self.empty:\n return np.empty((0,1)), np.empty(0, dtype=\"int\")\n\n d, p = self.kdtree.query( coords, distance_upper_bound=self.thickness )\n\n fpts = np.where( np.isinf(d) == False )[0]\n\n proximity = np.zeros((coords.shape[0],1))\n proximity[fpts] = self.ID\n\n return proximity, fpts", "def filter_nearby_points(self, point: Tuple[float], radius: float):\n\n nearby = self.tree.query_ball_point(point, radius)\n return nearby", "def proximity_voltage(x):\n y = VOLTAGE_TASK['value']\n rate = 1 / (y * 0.02)\n diff = abs(y - x)\n return 1 - rate * diff", "def getVenue(lat, lon, name, radius=300, addr=''):\n # Construct the client object\n client = foursquare.Foursquare(CLIENT_ID, CLIENT_SECRET, redirect_uri='http://fondu.com/oauth/authorize')\n\n # Return all venues within radius of lat,lon\n ll = str(lat) + \",\" + str(lon)\n radius = str(radius)\n venues = client.venues.search(params={'v': VERSION, 'll': ll, 'intent': 'browse', \n 'radius': radius, 'limit': 100 })[\"venues\"]\n # Returns a list of dictionaries, each is a \"compact venue\"\n print \"Returned\", len(venues) , \"venues within\", radius ,\"meters\"\n print venues[0]\n \n # pull out just venue name and its distance from lat, lon\n venue_deets = [(ven[\"name\"], ven[\"location\"][\"distance\"], ven[\"location\"][\"address\"]) for ven in venues]\n \n # sort by distance away\n venue_deets = sorted(venue_deets, key=lambda x: x[1])\n venue_names = [x[0] for x in venue_deets]\n venue_addr = [x[2] for x in venue_deets]\n print venue_names\n \n # grab the \"foursquare\" version of the name\n if name in venue_names:\n # name supplied exactly matches foursquare name\n fs_name = name\n else:\n # look for close matches to supplied name\n \n # defaults set: returns a max of 3 matches with minimum score of 0.6 in similarity\n fs_name = difflib.get_close_matches(name, venue_names, n=3, cutoff=0.5)\n print fs_name\n \n if len(fs_name)<1:\n # hopefully this doesn't happen!\n #raise ValueError(\"ERROR: venue not found\")\n # match on address instead\n add_name = difflib.get_close_matches(addr, venue_addr, n=3, cutoff=0.5)\n print add_name\n return -1\n elif len(fs_name)>1:\n # if more than one match returned take closest venue\n dists = [venue_deets[venue_names.index(n)][1] for n in fs_name]\n fs_name = fs_name[dists.index(min(dists))] # return closest\n else:\n fs_name = fs_name[0]\n \n \n # details of desired venue\n print \"Name given =\", name\n print \"Name in foursquare =\", fs_name\n print \"Distance from original lat, long =\", venue_deets[venue_names.index(fs_name)][1],\"meters\"\n desired_venue_id = [ven for ven in venues if ven[\"name\"]==fs_name][0][\"id\"]\n\n \n # Now get \"complete venue\" information, that has more details on venue properties\n venue_url = \"https://api.foursquare.com/v2/venues/\" + desired_venue_id\n venue_url += \"?client_id=\" + CLIENT_ID\n venue_url += \"&client_secret=\" + CLIENT_SECRET\n venue_url += \"&v=\" + VERSION\n venue_url += \"&m=foursquare\"\n\n complete_venue = json.load(urllib2.urlopen(venue_url))[\"response\"][\"venue\"]\n \n \n # fields that help grab pertinent information\n descriptors = ['phrases', 'categories', 'attributes', 'tags', 'tips']\n\n words = ''\n venue_type = []\n for desc in descriptors:\n if desc in complete_venue:\n field = complete_venue[desc] \n \n # scan over phrases field\n if desc=='phrases':\n for f in field:\n print \"printing from 'sample'\"\n if 'sample' in f:\n if 'text' in f['sample']:\n print f['sample']['text'], type(f['sample']['text'])\n words += f['sample']['text'] + ' '\n print \"printing from 'phrase'\"\n if 'phrase' in f:\n print f['phrase'], type(f['phrase'])\n words += f['phrase'] + ' '\n \n # scan over categories field\n if desc=='categories':\n for f in field:\n if 'name' in f:\n print f['name'], type(f['name'])\n words += f['name'] + ' '\n venue_type.append(f['name'])\n \n # scan over attributes field\n if desc=='attributes':\n if 'groups' in field:\n gr = field['groups']\n for f in gr:\n if 'name' in f:\n print f['name'], type(f['name'])\n words += f['name'] + ' '\n \n # scan over tags field\n if desc=='tags':\n for f in field:\n print f, type(f),\n words += f + ' '\n print ''\n \n \n # scan over tips field\n if desc=='tips':\n if 'groups' in field:\n gr = field['groups']\n for group in gr:\n if 'items' in group:\n for item in group['items']:\n if 'text' in item:\n print item['text'], type(item['text'])\n words += item['text'] + ' '\n print ''\n \n # scrape all words for things indicating beer, coffee, food, liquor, wine\n words = word_tokenize(words)\n words = [x.lower() for x in words]\n \n service_flag = [0,0,0,0,0]\n print sorted(SERVICES)\n for i, (service, rel_words) in enumerate(sorted(SERVICES.items())):\n print service\n cnt = 0\n for word in rel_words:\n print difflib.get_close_matches(word.lower(), words, n=5, cutoff=0.99)\n cnt += len(difflib.get_close_matches(word.lower(), words, n=5, cutoff=0.99))\n print cnt, \"\"\n if cnt>=1:\n service_flag[i] = 1\n print service_flag\n print \"\"\n \n print words\n hours_id = None\n if 'hours' in complete_venue:\n print complete_venue['hours'], '\\n'\n else:\n print \"No hours in venue information\\n\"\n print \"\"\n\n \n rating = None\n if 'rating' in complete_venue:\n print 'rating =', complete_venue['rating'], '\\n'\n rating = complete_venue['rating']\n print type(rating)\n else:\n print \"No rating in venue information\\n\"\n print \"\"\n \n nLikes = None\n if 'likes' in complete_venue:\n print 'likes =', complete_venue['likes']['count'], '\\n'\n nLikes = complete_venue['likes']['count']\n print type(nLikes)\n else:\n print \"No likes in venue information\\n\"\n \n print \"\"\n \n if (len(venue_type)<0):\n venue_type = None\n # phrases \n # List of phrases commonly seen in this venue's tips, as well as a sample tip snippet and the number of \n # tips this phrase appears in.\n \n # categories\n # An array, possibly empty, of categories that have been applied to this venue. One of the categories \n # will have a field primary indicating that it is the primary category for the venue. For the complete \n # set of categories, see venues/categories. \n \n # attributes\n # Attributes associated with the venue, such as price tier, whether the venue takes reservations, and \n # parking availability. \n \n # tags\n # An array of string tags applied to this venue.\n \n # rating\n # Numerical rating of the venue (0 through 10). Returned as part of an explore result, excluded in \n # search results. Not all venues will have a rating.\n \n # tips\n # Contains the total count of tips and groups with friends and others as groupTypes. Groups may change \n # over time. \n \n # reasons?\n \n # likes \n # The count of users who have liked this venue, and groups containing any friends and others \n # who have liked it. The groups included are subject to change. \n \n # hours\n # Contains the hours during the week that the venue is open along with any named hours segments in a \n # human-readable format. For machine readable hours see venues/hours", "def getGridPoints(x, y, robot):\r\n roundedGrid = (round(x), round(y))\r\n total_radius = (robot.RADIUS + robot.BALL_RADIUS) / robot.grid.scale\r\n scanAmount = math.ceil(total_radius)\r\n scan = range(-scanAmount, scanAmount + 1)\r\n corners = ((0, 0), (0, 1), (1, 1), (1, 0))\r\n points = []\r\n for i in scan:\r\n for j in scan:\r\n for corner in corners:\r\n newX = roundedGrid[0] + i + corner[0]\r\n newY = roundedGrid[1] + j + corner[1]\r\n if grid_distance(newX, newY, x, y) < total_radius:\r\n points.append((newX, newY))\r\n\r\n return points", "def query_restaurants_by_location(collection, radius, lat, lon):\n results = collection.find(\n {'location': {'$nearSphere': {'$geometry': {'type': \"Point\",\n 'coordinates': [float(lon), float(lat)]},\n '$maxDistance': radius}}}, {\"_id\": 0})\n\n return results", "def query_ten(self, table_name_activities, table_name_trackpoints):\n\n query = (\n \"SELECT Activity.id,lat,lon \"\n \"FROM %s INNER JOIN %s on Activity.id = TrackPoint.activity_id \"\n \"WHERE user_id='112' and \"\n \"EXTRACT(YEAR FROM date_time) = 2008 \"\n \"and transportation_mode='walk' \"\n \"ORDER BY date_time ASC\"\n )\n\n self.cursor.execute(\n query % (table_name_activities, table_name_trackpoints))\n rows = self.cursor.fetchall()\n\n activity_dict = dict()\n for row in rows:\n if row[0] in activity_dict:\n activity_dict[row[0]].append((row[1], row[2]))\n else:\n activity_dict[row[0]] = [(row[1], row[2])]\n\n distance = 0\n for value in activity_dict.values():\n for i in range(len(value) - 1):\n distance += haversine(value[i], value[i + 1], unit=\"km\")\n\n print(distance)", "def voxelize_points(points, pc_bbox_center, voxel_resolution, num_voxels_per_dim, pc_center_in_voxel_grid):\n\n # this is the voxel grid we are going to return\n voxel_grid = np.zeros((num_voxels_per_dim,\n num_voxels_per_dim,\n num_voxels_per_dim), dtype=np.bool)\n\n # take the points and convert them from meters to voxel space coords\n centered_scaled_points = np.floor(\n (points - np.array(pc_bbox_center) + np.array(\n pc_center_in_voxel_grid) * voxel_resolution) / voxel_resolution)\n\n # remove any points that are beyond the area that falls in our voxel grid\n mask = centered_scaled_points.max(axis=1) < num_voxels_per_dim\n centered_scaled_points = centered_scaled_points[mask]\n\n # if we don't have any more points that fall within our voxel grid\n # return an empty grid\n if centered_scaled_points.shape[0] == 0:\n return voxel_grid\n\n # remove any points that are outside of the region we are voxelizing\n # as they are to small.\n mask = centered_scaled_points.min(axis=1) > 0\n centered_scaled_points = centered_scaled_points[mask]\n\n # if we don't have any more points that fall within our voxel grid,\n # return an empty grid\n if centered_scaled_points.shape[0] == 0:\n return voxel_grid\n\n # treat our remaining points as ints, since we are already in voxel coordinate space.\n # this points shoule be things like (5, 6, 7) which represent indices in the voxel grid.\n csp_int = centered_scaled_points.astype(int)\n\n # create a mask from our set of points.\n mask = (csp_int[:, 0], csp_int[:, 1], csp_int[:, 2])\n\n # apply the mask to our voxel grid setting voxel that had points in them to be occupied\n voxel_grid[mask] = 1\n\n return voxel_grid", "def test_make_veto_proximity(self):\n veto_intervals = self.st.get_array(self.run, 'veto_intervals')\n self.st.set_config(dict(event_time_range=[0,\n int(veto_intervals['endtime'][-1])]))\n self.st.make(self.run, 'event_basics')\n for c in self.st.get_iter(self.run, 'veto_proximity'):\n print(c)\n return self.st", "def find_5near_markets(lon, lat):\r\n engine = get_sql_engine()\r\n fmarkets5 = text(\r\n \"\"\"\r\n SELECT\r\n \"NAME\" as name, \"ADDRESS\" as address,\r\n \"TIME\" as time, geom,\r\n ST_X(geom) as lon, ST_Y(geom)as lat,\r\n ST_Distance(ST_SetSRID(ST_MakePoint(:lon, :lat), 4326)::geography, geom::geography) AS distance\r\n FROM farmers_markets\r\n ORDER BY 7 ASC\r\n LIMIT 5\r\n \"\"\"\r\n )\r\n near_markets = gpd.read_postgis(fmarkets5, con=engine, params={\"lon\": lon, \"lat\": lat})\r\n return near_markets", "def determine_addresses(self, x1, y1, d):\n rez = []\n addresses = self.__repository.get_all()\n for address in addresses:\n x2 = address.get_x()\n y2 = address.get_y()\n distance = math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n if distance < d:\n rez.append([address, distance])\n return rez", "def vincenty(lon0, lat0, a1, s):\n\n lon0 = np.deg2rad(lon0)\n lat0 = np.deg2rad(lat0)\n a1 = np.deg2rad(a1)\n s = np.deg2rad(s)\n\n sina = np.cos(lat0) * np.sin(a1)\n\n num1 = np.sin(lat0) * np.cos(s) + np.cos(lat0) * np.sin(s) * np.cos(a1)\n den1 = np.sqrt(\n sina**2 + (np.sin(lat0) * np.sin(s) - np.cos(lat0) * np.cos(a1)) ** 2\n )\n lat = np.rad2deg(np.arctan2(num1, den1))\n\n num2 = np.sin(s) * np.sin(a1)\n den2 = np.cos(lat0) * np.cos(s) - np.sin(lat0) * np.sin(s) * np.cos(a1)\n L = np.arctan2(num2, den2)\n lon = np.rad2deg(lon0 + L)\n\n return lon, lat", "def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #Assign each of the neighbours\n # Top-left to the top-right\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # Left to right\n left = (row, col - 1)\n # The '(row, col)' coordinates passed to this\n # function are situated here\n right = (row, col + 1)\n \n # Bottom-left to bottom-right\n bottom_left = (row + 1, col - 1)\n bottom_center = (row + 1, col)\n bottom_right = (row + 1, col + 1)\n \n return [top_left, top_center, top_right,\n left, right,\n bottom_left, bottom_center, bottom_right]", "def voxelize(self, points):\n voxels, coors, num_points = [], [], []\n for res in points:\n res_voxels, res_coors, res_num_points = self.pts_voxel_layer(res)\n voxels.append(res_voxels)\n coors.append(res_coors)\n num_points.append(res_num_points)\n voxels = torch.cat(voxels, dim=0)\n num_points = torch.cat(num_points, dim=0)\n coors_batch = []\n for i, coor in enumerate(coors):\n coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)\n coors_batch.append(coor_pad)\n coors_batch = torch.cat(coors_batch, dim=0)\n return voxels, num_points, coors_batch", "def find_all_nearest_neighbours(point_cloud:np.ndarray) -> np.ndarray:\n pass", "def lane_waypt_to_query_dist(\n query_xy_city_coords: np.ndarray, nearby_lane_objs: List[LaneSegment]\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n per_lane_dists: List[float] = []\n dense_centerlines: List[np.ndarray] = []\n for nn_idx, lane_obj in enumerate(nearby_lane_objs):\n centerline = lane_obj.centerline\n # densely sample more points\n sample_num = 50\n centerline = interp_arc(sample_num, centerline[:, 0], centerline[:, 1])\n dense_centerlines += [centerline]\n # compute norms to waypoints\n waypoint_dist = np.linalg.norm(centerline - query_xy_city_coords, axis=1).min()\n per_lane_dists += [waypoint_dist]\n per_lane_dists = np.array(per_lane_dists)\n min_dist_nn_indices = np.argsort(per_lane_dists)\n return per_lane_dists, min_dist_nn_indices, dense_centerlines", "def rangeQuery(self, x):\n \n neighbors = []\n \n for y in range(len(self.df)):\n q = self.df[y, :2]\n if self.dist(x, q) <= self.epsilon:\n neighbors.append(y)\n \n return neighbors", "def get_neighbours(lat, long):\n # ns = north east, ew = east west (ratio between 1 feet and degree) \n # its different on diferent places on earth (sphere)!!\n ns = 0.0025\n ew = 0.0025\n walk = []\n for i in range(-2, 3):\n for j in range(-2, 3):\n thiscell = CellId.from_lat_lng(LatLng.from_degrees(lat + ns*i, long + ew*j)).parent(S2_CELL_LEVEL)\n if abs(i * j) < 4:\n walk.append(thiscell.id())\n return sorted(walk)", "def query_points(self, point, precision=5):\n # Generate the geohash.\n (latitude, longitude) = point\n\n hashcode = geohash.encode(latitude, longitude, precision)\n log.debug('Point \"%s\" geohash is: \"%s\"' % (point, hashcode))\n\n results = self.regex_query(self.spatial_index_name,\n '.*_%s.*' % hashcode)\n\n files = {'center_point_match': []}\n for cell in results:\n files['center_point_match'].append(cell.cq)\n\n return files", "def get(self, request, *args, **kwargs):\n address = ProfileAddress.objects.filter(profile__user__is_verified=True)\n address = address.exclude(city__isnull=True)\n coordinates = [c.city.get_coords() for c in address]\n\n serializer = CityCoordinateSerializer(coordinates, many=True)\n\n return Response(serializer.data, status=status.HTTP_200_OK)", "def itervaluesnear(self, key, radius):\n\n for k, v in self.iteritemsnear(key, radius):\n yield v", "def find_5near_hospitals(lon, lat):\r\n engine = get_sql_engine()\r\n hospital5 = text(\r\n \"\"\"\r\n SELECT\r\n \"HOSPITAL_NAME\" AS name, \"STREET_ADDRESS\" as address,\r\n \"PHONE_NUMBER\" as contact, geom,\r\n\t ST_X(geom) AS lon, ST_Y(geom) AS lat,\r\n\t ST_Distance(ST_SetSRID(ST_MakePoint(:lon, :lat), 4326)::geography, geom::geography) AS distance\r\n FROM philly_hospital\r\n ORDER BY 7 ASC\r\n LIMIT 5\r\n \"\"\"\r\n )\r\n near_hospital = gpd.read_postgis(hospital5, con=engine, params={\"lon\": lon, \"lat\": lat})\r\n return near_hospital", "def _extract_coords_loc_entities(loc_entities: Iterable[GeoLocation]):\n return [\n (loc[\"Latitude\"], loc[\"Longitude\"])\n for loc in loc_entities\n if \"Latitude\" in loc and \"Longitude\" in loc\n ]", "def retrieve_all_approximate_commutes(self):\n commute_cache_updater.update_commutes_cache_rent_algorithm(self.homes, self.tenants, accuracy=CommuteAccuracy.APPROXIMATE)\n for destination in self.tenants:\n lat_lng = \"\"\n\n # If the commute type is walking or bicycling then we need to generate a lat and lng for the destination\n # We do it here so we can save the lat and lng for every home\n if destination.commute_type.commute_type == CommuteType.BICYCLING or \\\n destination.commute_type.commute_type == CommuteType.WALKING:\n # Pulls lat/lon based on address\n lat_lng_result = geolocator.maps_requester(gmaps_api_key).\\\n get_lat_lon_from_address(destination.full_address)\n\n if lat_lng_result == -1:\n continue\n else:\n lat_lng = (lat_lng_result[0], lat_lng_result[1])\n\n self.populate_approx_commutes(self.homes, destination, lat_lng_dest=lat_lng)", "def query_region(self, coords, radius=3.5 * u.arcsec):\n try:\n idx = coords.separation(self.coords) < radius\n except:\n idx = coords.separation(self.coords) < radius * u.arcsec\n\n return self.table[idx]", "def query_region(self, coords, radius=3.5 * u.arcsec):\n try:\n idx = coords.separation(self.coords) < radius\n except:\n idx = coords.separation(self.coords) < radius * u.arcsec\n\n return self.table[idx]", "def fetch_voxel_neighbors(x, y, z, vtk_volume):\n s = (vtk_volume.GetScalarComponentAsFloat(x-1, y, z, 0),\n vtk_volume.GetScalarComponentAsFloat(x+1, y, z, 0),\n vtk_volume.GetScalarComponentAsFloat(x, y-1, z, 0),\n vtk_volume.GetScalarComponentAsFloat(x, y+1, z, 0),\n vtk_volume.GetScalarComponentAsFloat(x, y, z-1, 0),\n vtk_volume.GetScalarComponentAsFloat(x, y, z+1, 0))\n return s", "def getVotersToContact(self):\n two_days_ago = date.today() - timedelta(2)\n year_ago = date.today() - timedelta(365)\n return self.voters.filter(\n Q(campaignstovoters__last_served=None) | Q(campaignstovoters__last_served__lt=two_days_ago),\n Q(campaignstovoters__last_contacted=None) | Q(campaignstovoters__last_contacted__lt=year_ago),\n campaignstovoters__is_active=True,\n is_active=True)", "def get_city_points(city):\n for item in coordinate_list:\n if item[0] == city:\n return (item[1], item[2])", "def get_neighbour(self, loc):\n y_lim, x_lim = np.shape(self.map)\n y, x = loc\n neighbour_cords = [(y - 1, x), (y + 1, x), (y, x - 1), (y, x + 1)]\n neighbour_cells = []\n for cords in neighbour_cords:\n curr_y, curr_x = cords\n if curr_y < 0 or curr_y >= y_lim:\n pass\n elif curr_x < 0 or curr_x >= x_lim:\n pass\n else:\n neighbour_cells.append(self.map[cords])\n\n return neighbour_cells", "def find_5near_stations(lon, lat):\r\n engine = get_sql_engine()\r\n bikestation5 = text(\r\n \"\"\"\r\n SELECT name, \"addressStreet\" as address,\r\n \"bikesAvailable\" as available_bikes, geom,\r\n\t ST_X(geom) as lon, ST_Y(geom)as lat,\r\n\t ST_Distance(ST_SetSRID(ST_MakePoint(:lon, :lat), 4326)::geography, geom::geography) AS distance\r\n FROM indego_rt1130\r\n ORDER BY 7 ASC\r\n LIMIT 5\r\n \"\"\"\r\n )\r\n near_bike = gpd.read_postgis(bikestation5, con=engine, params={\"lon\": lon, \"lat\": lat})\r\n return near_bike", "def query_voting_results(vr_data, precinct, queries):\r\n\r\n query_result = 0\r\n # Get the voting results for the precinct\r\n vr_data = vr_data[ vr_data['precinct'] == precinct ]\r\n \r\n # for each of the queries return the remaining that match the conditions\r\n for col, row in queries:\r\n if len( vr_data[ vr_data[col] == row ] ) > 0:\r\n vr_data = vr_data[ vr_data[col] == row ]\r\n else:\r\n vr_data = []\r\n \r\n if len(vr_data) > 0:\r\n query_result = int(vr_data.iloc[0]['votes'])\r\n\r\n return query_result", "def get_distance(film_coordinates, latitude, longitude):\n film_distance = []\n for film in film_coordinates.keys():\n user_coordinates = (latitude, longitude)\n film_coord = (film[0], film[1])\n\n distance = great_circle(user_coordinates, film_coord).kilometers\n film_distance.append((distance, film[0], film[1], film_coordinates[film]))\n\n film_distance.sort(key=lambda x: x[0])\n return film_distance[:10]", "def getSearchSpaceCoords(self):", "def get_all_locations(self):", "def near(self, pose):\n # type: (Pose) -> Set[Pose]\n return [p for p in self.nodes if self.dist(p, pose) <= self.r_near]", "def get_cars_location():\n cars = Car.query.all()\n car_locations = []\n\n for car in cars:\n carid = car.carid\n locationstr = car.location\n\n #skip cars without location.\n if locationstr is None or locationstr == '':\n continue\n \n #get lat and long.\n location = locationstr.split(',')\n lat = float(location[0].strip())\n long = float(location[1].strip())\n\n car_locations.append([carid, lat, long])\n\n return jsonify(car_locations)", "def geo_proximity_locations(self) -> Optional[Sequence['outputs.GetTrafficPolicyDocumentRuleGeoProximityLocationResult']]:\n return pulumi.get(self, \"geo_proximity_locations\")", "def truck_get_customers(request):\n\n # Check if the request type if POST\n if request.method == \"POST\":\n # Deserialize the JSON because it will be in bytes\n body = json.loads(request.body)\n # Make success true\n body[\"success\"] = True\n\n # First update the truck with its coordinates\n result = Coordinates.objects.filter(user_id=body['truck_id'])\n if not result.exists() or result[0].user_id.role != str(Role.truck):\n # Make success false if something goes wrong\n body[\"success\"] = False\n # Return the body JSON\n return JsonResponse(body)\n # The result variable is immutable. So, put it to a new coordinates\n # object\n coordinates = result[0]\n coordinates.latitude = truck_latitude = body[\"latitude\"]\n coordinates.longitude = truck_longitude = body[\"longitude\"]\n # Save the coordinates object\n coordinates.save()\n\n # Get all customers within a radius\n result = Coordinates.objects.filter(user_id__role=str(Role.customer)).values()\n result = [entry for entry in result] # Convert queryset to list\n body[\"customers\"] = []\n for i in range(len(result)):\n if haversine(truck_latitude, truck_longitude, result[i]['latitude'], result[i]['longitude']):\n # Filter the customers\n body[\"customers\"].append(result[i])\n\n # Return the body JSON\n return JsonResponse(body)\n else:\n # Return method not allowed\n return HttpResponse(status=405)", "def nearlonlat_zl(lon,lat,lonp,latp): # needed for the next function get_FVCOM_bottom_temp \r\n # approximation for small distance \r\n cp=np.cos(latp*np.pi/180.) \r\n dx=(lon-lonp)*cp\r\n dy=lat-latp \r\n xi=np.argmin(abs(dx)) \r\n yi=np.argmin(abs(dy))\r\n min_dist=111*np.sqrt(dx[xi]**2+dy[yi]**2)\r\n return xi,yi,min_dist", "def query_six(self, table_name_activities, table_name_trackpoints):\n\n query = (\n \"SELECT t1.user_id, t1.lat, t1.lon, t2.user_id, t2.lat, t2.lon \"\n \"FROM (SELECT user_id, lat, lon, date_time FROM %s inner join %s on Activity.id=TrackPoint.activity_id) as t1, \"\n \"(SELECT user_id, lat, lon, date_time FROM Activity inner join TrackPoint on Activity.id=TrackPoint.activity_id) as t2 \"\n \"where t1.user_id != t2.user_id \"\n \"AND ABS(TIMESTAMPDIFF(SECOND,t1.date_time, t2.date_time)) <= 60\"\n )\n\n self.cursor.execute(\n query % (table_name_activities, table_name_trackpoints))\n rows = self.cursor.fetchall()\n print(tabulate(rows, headers=self.cursor.column_names))\n\n user_dict = dict()\n for row in rows:\n if haversine((row[1], row[2]), (row[4], row[5]), unit=\"km\") <= 0.1:\n if row[0] in user_dict:\n user_dict[row[0]].append(row[3])\n else:\n user_dict[row[0]] = [row[3]]\n users = 0\n for value in users_dict.values():\n users += len(value)\n\n users = users / 2\n\n print(users)\n return users", "def find_objects_within_radius(self, radius):\n\n if type(radius) != float:\n radius = float(radius)\n\n objects_nearby = []\n\n for item in self.object_store:\n\n if item.Position == None:\n continue\n\n if math.sqrt(math.pow((item.Position.X - self.agent.Position.X), 2) + math.pow((item.Position.Y - self.agent.Position.Y), 2) + math.pow((item.Position.Z - self.agent.Position.Z), 2)) <= radius:\n objects_nearby.append(item)\n\n return objects_nearby", "def _obtener_autos_cercanos(self, x, y):\n\n\t\t\"\"\"Convierte a metros\"\"\"\n\n\t\tx = vincenty((0,x), origen).meters\n\t\ty = vincenty((y,0), origen).meters\n\t\t\n\t\tconductores = mongo.db.conductores\n\t\tquery = \"if(this.posicion){if((Math.pow(this.posicion.lng-\"+str(x)+\",2)+Math.pow(this.posicion.lat-\"+str(y)+\",2)) <= \"+str(self.distanciaMaxima)+\") return this}\"\n\t\treturn conductores.find({\"estado\": \"libre\", \"$where\": query})", "def vector(self):\n return np.array([self.lat, self.lng])", "def maxmind_geocode():\n reader = maxminddb.open_database('GeoLite2-City.mmdb')\n asn = maxminddb.open_database('GeoLite2-ASN.mmdb')\n\n unique_ips = session.query(UniqueVictims).all()\n\n for ip in unique_ips:\n try:\n current_ip = reader.get(ip.ip)\n asn_ip = asn.get(ip.ip)\n ip.lat = current_ip['location']['latitude']\n ip.long = current_ip['location']['longitude']\n if 'city' in current_ip:\n ip.city = current_ip['city']['names']['en']\n if 'country' in current_ip:\n ip.country = current_ip['country']['names']['en']\n if asn_ip:\n ip.isp = asn_ip['autonomous_system_organization']\n except TypeError:\n continue\n session.commit()", "def get_addresses(cls, prov_name, city_name, tv=None, nv=None):\n\n params = {'Provincia': prov_name,\n 'Municipio': city_name}\n if tv:\n params['TipoVia'] = tv\n else:\n params['TipoVia'] = ''\n if nv:\n params['NombreVia'] = nv\n else:\n params['NombreVia'] = ''\n\n url = cls.URL_LOCATIONS_BASE.format(\"/OVCCallejero.asmx/ConsultaVia\")\n response = requests.get(url, params=params)\n xml = response.content\n\n sleep(config['sleep_time'])\n return DotMap(xmltodict.parse(xml, process_namespaces=False, xml_attribs=False))", "def query_region(self, point):\n result = []\n indexes = []\n for didx, dpoint in enumerate(self.data):\n if dpoint != point:\n if self.l2_distance(dpoint, point) <= self.eps:\n result.append(dpoint)\n indexes.append(didx)\n return result, indexes", "def get_states_near_place(self, latitude=PARIS_LATITUDE, longitude=PARIS_LONGITUDE, radius=DEFAULT_RADIUS):\n\n states = []\n for state in self.get_states():\n distance = state.get_distance_to(latitude, longitude)\n if distance and distance <= radius:\n states.append(state)\n return states", "def get_neighbors(point):\n pt = point.copy()\n output= [point.copy() for i in range(4)]\n output[0:2] = map(Point.setY, output[0:2], [pt.getY()+ i for i in range(-1,2,2)])\n output[2:4]= map(Point.setX, output[2:4], [pt.getX()+ i for i in range(-1,2,2)])\n return output", "def query_radius(self,\n location: np.ndarray,\n r: float) -> np.ndarray:\n lat = location[0, 0]\n lon = location[0, 1]\n dist = vec_haversine(self.lats, self.lons, lat, lon)\n return np.argwhere(dist <= r)", "def voxelize(self, points):\n voxels, coors, num_points, voxel_centers = [], [], [], []\n for res in points:\n res_voxels, res_coors, res_num_points = self.voxel_layer(res)\n res_voxel_centers = (\n res_coors[:, [2, 1, 0]] + 0.5) * res_voxels.new_tensor(\n self.voxel_layer.voxel_size) + res_voxels.new_tensor(\n self.voxel_layer.point_cloud_range[0:3])\n voxels.append(res_voxels)\n coors.append(res_coors)\n num_points.append(res_num_points)\n voxel_centers.append(res_voxel_centers)\n\n voxels = torch.cat(voxels, dim=0)\n num_points = torch.cat(num_points, dim=0)\n voxel_centers = torch.cat(voxel_centers, dim=0)\n coors_batch = []\n for i, coor in enumerate(coors):\n coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)\n coors_batch.append(coor_pad)\n coors_batch = torch.cat(coors_batch, dim=0)\n\n voxel_dict = dict(\n voxels=voxels,\n num_points=num_points,\n coors=coors_batch,\n voxel_centers=voxel_centers)\n return voxel_dict", "def _get_closer_drivers(self, passenger):\n logging.info(\"[_get_closer_drivers] Busca los choferes cercanos.\")\n nearest = []\n for driver in db.drivers.find({'available': True}):\n if self._calculate_distance(passenger, driver) < self.max_distance:\n user_data = self._get_data_user(driver['_id'])\n user_data['user'].pop('cars')\n user_data['user'].pop('_ref')\n nearest.append({'driver': user_data['user'], 'position': {'lat': driver['lat'], 'lon': driver['lon']}})\n logging.info(\"[_get_closer_drivers] Se encontraron \" + str(len(nearest)) + \" choferes cercanos.\")\n return nearest" ]
[ "0.5678817", "0.5506488", "0.54521376", "0.5356175", "0.5350751", "0.5323278", "0.5296326", "0.5275836", "0.5268494", "0.52337134", "0.5233509", "0.5190683", "0.51781356", "0.51566523", "0.513223", "0.51218677", "0.51183736", "0.51013875", "0.50991535", "0.5097977", "0.508264", "0.5078686", "0.5074377", "0.50434405", "0.5024731", "0.50220037", "0.50186425", "0.49920973", "0.49736077", "0.4952807", "0.49311405", "0.4925727", "0.49219882", "0.4918755", "0.49174255", "0.49143618", "0.4907577", "0.4905862", "0.48908526", "0.48781446", "0.48568928", "0.48468637", "0.4840945", "0.4836843", "0.48354015", "0.48125792", "0.48122558", "0.4810527", "0.48099956", "0.48088402", "0.47861028", "0.4769696", "0.47614735", "0.4759758", "0.4759151", "0.47568333", "0.47543937", "0.47541165", "0.47459757", "0.4742994", "0.47419295", "0.47385773", "0.473669", "0.47324514", "0.47289136", "0.4723357", "0.47166142", "0.47152147", "0.4712147", "0.4704617", "0.4703943", "0.46996045", "0.46986306", "0.46986306", "0.46929613", "0.46902794", "0.46878305", "0.4686736", "0.46864215", "0.46791625", "0.4677177", "0.46765122", "0.4675023", "0.4670489", "0.46686202", "0.46683216", "0.4667946", "0.4667039", "0.46597198", "0.4658534", "0.46561196", "0.46552733", "0.46542913", "0.46442991", "0.4641313", "0.46401456", "0.46356243", "0.4627334", "0.46272224", "0.46250492" ]
0.5194998
11
Remove the user from 'workers' or 'prospects', if applicable. user A TcsUser instance to remove from workers
def removeWorker(self, user): if user == self.owner: return None # Without these queries, there's no way to tell if anything actually gets removed. # Calling remove() on a user that is not in the set does not raise an error. if self.workers.filter(pk=user.id).exists(): self.workers.remove(user) return self if self.prospects.filter(pk=user.id).exists(): self.prospects.remove(user) return self return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_candidate(self, user):\n self.assignment_related_users.filter(user=user).delete()\n inform_changed_data(self)", "def remove(self, user):\n self.packet.send_room([\"rp\", user.get_int_id(self.rooms),\n user.data.id], user.room)\n self.rooms[user.room][\"users\"].remove(user)", "def addWorker(self, user):\n if (user != self.owner) and not self.workers.filter(pk=user.id).exists():\n self.workers.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.blacklist.filter(pk=user.id).exists():\n self.blacklist.remove(user)\n return self\n return None", "def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)", "def remove(self, user_id):\n pass", "def delete(self, user: 'UserCondensed'):\n self._delete(entity=user)", "def remove_user(user):\n # user.confirmed = False\n # user = get_user_by_phone(phone_num)\n db.session.delete(user)\n db.session.commit()\n\n return user\n # DELETE FROM users WHERE user.phone_num == phone)", "async def removeuser(ctx, user: discord.Member):\n channel = ctx.channel\n if not IsATicket(channel.id):\n await ctx.send(\n \"This is not a ticket! Users can only be removed from a ticket channel\"\n )\n return\n\n await channel.set_permissions(user, read_messages=False, send_messages=False)\n await ctx.message.delete()", "def _remove(self, room, regex, user):\n regexes_for_room = self.notifications[room]\n users_for_regex = regexes_for_room[regex]\n\n # users may have been added multiple times in the past, so make sure\n # we remove them all.\n while user in users_for_regex:\n users_for_regex.remove(user)\n\n if not users_for_regex:\n # remove regex from room when there are no users left to notify\n del regexes_for_room[regex]", "def sipserver_user_remove(self, user: str) -> None:\n self.remove_endpoint_from_sipserver(endpoint=user)", "def remove_judge(contest, user):\n _remove_role(contest, user, pcm.Judge)", "async def removeuser(self, ctx, user: discord.Member):\n\n if check_key(user.id):\n delete_key(user.id)\n await self.bot.say(\"{}, you are way out of this league.\".format(user.mention))\n else:\n await self.bot.say(\"That user does not exist in this league.\")", "def _purge_user(self, user):\n self.user_order.remove(user)\n del self.user_queue[user]\n del self.user_skip[user]", "def test_teams_remove_user_from_team_v1(self):\n pass", "def remove_user(self, u: \"Node\") -> None:\n\n if u in self.users_:\n self.users_[u] -= 1\n if self.users_[u] == 0:\n del self.users_[u]", "def delete_user(network, user):\n if user in network:\n del network[user]\n for u in network:\n connections = get_connections(network, u)\n if user in connections:\n i = connections.index(user)\n del connections[i]\n return network", "def test_teams_remove_user_from_team_v2(self):\n pass", "def remove_user(users, curr_username, user_role, request_ip):\n #TODO: error checking\n log_connector.add_log('DELETE USER', \"Removed {} user(s)\".format(len(users)), curr_username, user_role, request_ip)\n user_connector.remove_user(users)", "def remove_user(self, user: discord.User) -> bool:\n\t\tif not self.user_has_entry(user):\n\t\t\treturn False\n\t\t\n\t\tdef data_interaction(cur: Cursor):\n\t\t\tsql = f\"DELETE FROM {StrikeConsts.STRIKE_TABLE} WHERE id=%s;\"\n\t\t\tcur.execute(sql, (user.id,))\n\t\t\t\n\t\t\treturn [True]\n\t\t\t\n\t\treturn self.connect_and_execute(data_interaction)[1][0]", "def remove(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def delete_user(self, user):\n self.delete(user)", "def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)", "def remove_user(self):\n self.currentuser = None\n self.carlocked = False", "def remove_user(self, user):\n\n data = user.to_json()\n key = \"%s:%s\" % (self.channel_id, user.username)\n\n logging.info(data)\n # remove our users timestamp\n affected = self.redis_server.zrem(ENVIRONMENT['REDIS_PREFIX'] + 'users_timestamp',key)\n logging.info(\"removed user timestamp(%d): %s\" % (affected, key))", "def _RemoveUsers(self, remove_users):\n for username in remove_users:\n self.utils.RemoveUser(username)\n self.user_ssh_keys.pop(username, None)\n self.invalid_users -= set(remove_users)", "async def kick(self, user: User):\n coro = self._state.remove_team_member(self.id, user.id)\n await coro", "def remove_users(self, *users):\r\n pass", "def removeUserId(self, user_id):\n self.__register_user_ids.discard(user_id)", "def view_remove_user(self, user, username):\r\n user.realm._checker.removeUser(username)", "def remove_user(self, user_id):\n if user_id in self:\n user = self[user_id]\n del self[user_id]\n return user", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def addToBlacklist(self, user):\n if (user != self.owner) and not self.blacklist.filter(pk=user.id).exists():\n self.blacklist.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.workers.filter(pk=user.id).exists():\n self.workers.remove(user)\n return self\n return None", "def delete_by(self, user):\n if user.is_superuser or user is self.added_by:\n self.delete()", "async def remove_user(app: web.Application, user_id: int) -> None:\n try:\n await delete_user(app, user_id)\n except Exception: # pylint: disable=broad-except\n logger.warning(\n \"User '%s' still has some projects, could not be deleted\", user_id\n )", "def rm_favoriting_user_id(self, circuit_id, user_id):\n key = ':'.join(\n [CIRCUIT_FAV_USRS_1, \n str(circuit_id), \n CIRCUIT_FAV_USRS_2]\n )\n self.RS.srem(key, user_id)", "def test_teams_remove_customer_from_workgroup_v1(self):\n pass", "def remove_ticket(self, user):\n # Get the first ticket that matches the query.\n ticket = RaffleTicket.objects.filter(raffle_prize=self, user=user)[0]\n ticket.delete()", "def remove_user_from_govern(self, request, pk=None, user_id=None):\n try:\n user = UserProfile.objects.get(id=user_id, organization__id=pk)\n except ObjectDoesNotExist:\n raise ResourceNotFound\n else:\n user.organization = None\n user.save()\n\n return Response(status=status.HTTP_204_NO_CONTENT)", "def delete_user(self):\n\n User.user_list.remove(self)", "def remove_user(self, login):\n\t\tif login in self.users_by_name:\n\t\t\tuser = self.users_by_name[login]\n\t\t\tif not user.system:\n\t\t\t\tself.users.pop(user.id, None)\n\t\t\t\tdel(self.users_by_name[login])\n\t\t\t\tself.sync()", "def remove_users(caller, role, *users):\r\n # can always remove self (at this layer)\r\n if not(len(users) == 1 and caller == users[0]):\r\n _check_caller_authority(caller, role)\r\n role.remove_users(*users)", "async def red_delete_data_for_user(self, *, requester, user_id):\n\t\tawait self.config.user_from_id(user_id).clear()", "def delete_user(self, user, instance_m):\n from resela.model.User import authenticate\n if user:\n mikrotik_m = MikrotikManager()\n lab_m = LabManager(current_user.session)\n group_m = GroupManager(current_user.session)\n user_m = UserManager(current_user.session)\n\n # Remove router conf\n mikrotik_m.unbind_vpn_to_vlan(user.email)\n mikrotik_m.delete_vpn_user(user.email)\n\n instance_list = instance_m.list(\n detailed=True,\n search_opts={'all_tenants': True, 'user_id': user.id}\n )\n\n for instance in instance_list:\n instance_name = instance.name.split('|')\n lab_name = instance_name[0] + '|' + instance_name[1]\n lab = lab_m.find(name=lab_name)\n instance_m.delete_instance(\n user_m=self,\n session=current_user.session,\n lab=lab,\n instance_id=instance.id\n )\n\n teacher_group = group_m.find(name='teachers')\n\n try:\n user_m.check_in_group(user=user, group=teacher_group)\n snapshot_factory = lab_m.find(\n name='snapshotFactory|{}'.format(user.email))\n\n session = authenticate(\n credentials=current_user.token,\n project_domain_name='snapshotFactory',\n project_name=snapshot_factory.name\n )\n\n security_handler = SecurityGroupHandler(session=session)\n\n for sec_group in security_handler.list()['security_groups']:\n if sec_group['tenant_id'] == snapshot_factory.id and \\\n 'internet' in sec_group['name']:\n security_handler.delete(sec_group['id'])\n\n lab_m.delete(snapshot_factory)\n\n except ksa_exceptions.NotFound:\n # Removing students will cause en exception as they are not found.\n # Does not need to be handled.\n pass\n\n # Remove user from db\n try:\n user_model = UserModel.query.get(user.id)\n DATABASE.session.delete(user_model)\n DATABASE.session.commit()\n except Exception:\n # Ignore user not in database\n pass\n\n # Remove user from openstack\n removed = self.delete(user)\n\n if not removed:\n print('User was not deleted:', user.id)\n raise Exception(' user not deleted')", "def delete_user(self, user):\n try:\n with dbm.open(self.dbm_path, 'c', 0o600) as db:\n del db[user.name]\n except KeyError as k:\n pass", "def _remove(users, room_name):\n global users_removed\n users_removed = []\n\n try:\n\n for word in users['message']['text'].split():\n\n if word == 'myself':\n user = users['message']['sender']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n \n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append('Not found ->> ' + \"<\" + user + \">\")\n\n check_continue = 1\n text = '```User removed: %s ```' % (','.join(users_removed))\n\n for _item in range(len(users['message']['text'].split())):\n\n _item = _item + 1\n\n try:\n _type = users['message']['annotations'][_item]['userMention']['user']['type']\n user = users['message']['annotations'][_item]['userMention']['user']['name']\n \n if _type == 'BOT':\n\n if check_continue == 1:\n continue\n else:\n text = 'Please add user with @'\n continue\n \n user = users['message']['annotations'][_item]['userMention']['user']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n\n except:\n pass\n\n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append(\"Not found ->> \" + \"<\" + user + \">\")\n text = \"```Removed users: %s ```\" % (','.join(list(set(users_removed))))\n return text\n except:\n\n text = 'Please add user with @'\n return text", "def remove_user(self, team, params={}, **options):\n path = \"/teams/%s/removeUser\" % (team)\n return self.client.post(path, params, **options)", "def remove_admin(self, project_id, user_id):\n current_user = request.environ.get('repoze.who.identity')['user']\n user = controller_globals._get_user_from_email(current_user.email)\n\n # make sure we're actually the project lead\n if not self._current_user_leads_review(project_id):\n return \"<font color='red'>tsk, tsk. you're not the project lead, %s.</font>\" % user.fullname\n\n leader_to_remove = Session.query(model.User).filter_by(id=user_id).one()\n review = self._get_review_from_id(project_id)\n review.leaders.remove(leader_to_remove)\n Session.add(review)\n Session.commit()\n\n redirect(url(controller=\"review\", action=\"admin\", project_id=project_id))", "def del_co_worker(self, employee):\n self.co_worker_list.remove(employee)", "def delete_user(self, user):\n self.execute(TABELLE['id_users'][\"delete\"], user[\"id\"])", "def unfollowing_and_removing(self, user_id):\n if self.unfollowing(user_id):\n ind = [i for i, j in enumerate(self.monitored_users) if j.get('user', '') == user_id]\n if ind:\n self.monitored_users.remove(self.monitored_users[ind[0]])", "async def trainer_rm(ctx, user: discord.User):\r\n \r\n trainer_data = load_file(file_path_trainer)\r\n trainer = user.id \r\n #await bot.say(trainer) tester to see if user ID -> string for trainer variable\r\n if trainer not in trainer_data[\"Trainers\"]:\r\n await bot.say(\"This trainer is not registered or has already been removed.\")\r\n \r\n else:\r\n remove_trainer(user)\r\n await bot.say(user.mention + \" has been removed.\")", "def mutate(self, info, user_id):\n del info\n assert self is None, \"Root `self` expected to be `None`!\"\n\n OnChatMessageSent.unsubscribe(group=f\"user_{user_id}\")\n\n return KickOutUser(success=True)", "def delete_user():", "def remove_slaves(self, *, user: str, identity_file: str):\n self.load_manifest(user=user, identity_file=identity_file)\n\n partial_func = functools.partial(\n remove_slaves_node,\n user=user,\n identity_file=identity_file,\n services=self.services,\n cluster=self)\n hosts = [self.master_ip] + self.slave_ips\n\n run_against_hosts(partial_func=partial_func, hosts=hosts)", "def test_resource_user_resource_remove_user_from_user_groups_delete(self):\n pass", "def remove_users(users_to_remove: list, users_dict: dict,\n end_of_service: str) -> None:\n for reciever in users_to_remove:\n if reciever in users_dict:\n send_message(reciever,\n 'Subscription expired\\n',\n end_of_service,\n users_dict[reciever]['carrier'])\n del users_dict[reciever]", "def remove_users(self, *users):\r\n entries = CourseAccessRole.objects.filter(\r\n user__in=users, role=self._role_name, org=self.org, course_id=self.course_key\r\n )\r\n entries.delete()\r\n for user in users:\r\n if hasattr(user, '_roles'):\r\n del user._roles", "async def _clear_heist(self, ctx, user: discord.Member):\r\n author = ctx.message.author\r\n await self.thief.member_clear(user)\r\n await ctx.send(\"```{} administratively cleared {}```\".format(author.name, user.name))", "def on_removeuser(self, username):\n self.users.remove(username)\n print ('%s left the room.' % username)", "async def del_user(conn: LDAPConnection, user: dict, mailman: Client) -> None:\n await conn.delete(user[\"dn\"])\n uid = user[\"attributes\"][\"uid\"][0]\n rmtree(user[\"attributes\"][\"homeDirectory\"][0])\n rmtree(f\"/webtree/{uid[:1]}/{uid}\")\n mailing_list = mailman.get_list(\"announce-redbrick\")\n mailing_list.unsubscribe(f\"{uid}@redbrick.dcu.ie\")", "def removeFriend(self, user):\n user = user if isinstance(user, MyPlexUser) else self.user(user)\n url = self.FRIENDUPDATE.format(userId=user.id)\n return self.query(url, self._session.delete)", "async def _kill_player(self, ctx: Context, *, user: discord.Member):\n\n guild = ctx.guild\n\n player_id = await self.config.guild(guild).player_id()\n player_role = discord.utils.get(guild.roles, id=player_id)\n\n if player_role not in user.roles:\n return await ctx.send(_(\"User doesn't have player role.\"))\n\n try:\n await user.remove_roles(player_role)\n except discord.Forbidden:\n return await ctx.send(\n _(\n \"I either don't have permissions to manage\"\n \" roles or the `{}` role is above my highest role!\"\n ).format(player_role.name)\n )\n\n dead_id = await self.config.guild(guild).dead_id()\n dead_role = discord.utils.get(guild.roles, id=dead_id)\n\n await user.add_roles(dead_role)\n\n await ctx.message.add_reaction(CHECK_MARK)", "def remove_user(self, username):\n del self.user_table[username]", "def cleanup_user(self, cleanup_request):\n user_name = cleanup_request.message.user_name\n self.logger.debug(\"Clean up after user %r\", user_name)\n\n self.logger.debug(\"Removing requests of user %r\", user_name)\n for request in self._requests[:]:\n if request.worker.name == user_name and not request.server_request:\n self._requests.remove(request)\n\n self.logger.debug(\"Releasing locked resources of user %r\", user_name)\n resources = ResourceData.objects.filter(owner=user_name)\n if resources.count() == 0:\n self.logger.debug(\"User %r didn't lock any resource\", user_name)\n\n else:\n resources.update(owner=\"\", owner_time=None)\n self.logger.debug(\"User %r was successfully cleaned\", user_name)\n\n return SuccessReply()", "def disconnect_user(self, user):\n\t\tis_user_removed = False\n\t\tif user in self.users.all():\n\t\t\tself.users.remove(user)\n\t\t\tself.save()\n\t\t\tis_user_removed = True\n\t\treturn is_user_removed", "def remove_user(self, workspace, params={}, **options):\n path = \"/workspaces/%s/removeUser\" % (workspace)\n return self.client.post(path, params, **options)", "def test_remove_user(self):\n pass", "def remove_users_from_team(team, users):\n team_member_list = []\n for user in users:\n member_list = TeamMember.objects.filter(team_fk=team, user_fk=user)\n if not member_list:\n raise Exception('Some users do not belong this team')\n team_member_list.append(member_list[0])\n \n if any([m.is_leader for m in team_member_list]):\n team.delete()\n else:\n for m in team_member_list:\n m.delete()", "async def remove_guest_user_with_all_its_resources(\n app: web.Application, user_id: int\n) -> None:\n logger.debug(\"Will try to remove resources for user '%s' if GUEST\", user_id)\n if not await is_user_guest(app, user_id):\n logger.debug(\"User is not GUEST, skipping cleanup\")\n return\n\n await remove_all_projects_for_user(app=app, user_id=user_id)\n await remove_user(app=app, user_id=user_id)", "def userPart(self, __userID):\n\n\t\tconnectedUsers = self.connectedUsers\n\t\tif (__userID in connectedUsers):\n\t\t\tconnectedUsers.remove(__userID)", "def delete_account(user):\n\n # first delete all owned categories and all the items in those\n # categories, including items that other users added to the category.\n for category in user.categories:\n for item in category.items:\n db.session.delete(item)\n db.session.delete(category)\n db.session.commit()\n\n # then delete all remaining owned items\n for item in user.items:\n db.session.delete(item)\n db.session.commit()\n\n # finally, delete the user\n db.session.delete(user)\n db.session.commit()", "async def ticket_remove(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n if user.id not in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is not added.\")\n return\n\n removing_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if removing_is_admin:\n await ctx.send(\"You cannot remove a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n\n try:\n await channel.set_permissions(user, send_messages=False, read_messages=False)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].remove(user.id)\n\n await ctx.send(f\"{user.mention} has been removed from the ticket.\")", "def delete_user(self, user):\n name = utils.get_name(user)\n self._user_manager.delete(name)", "def remove_registrar(contest, user):\n _remove_role(contest, user, pcm.Registrar)", "def remove_user_from_project(tas_project, user_ref):\n keycloak_client = KeycloakClient()\n user = get_user(user_ref)\n keycloak_client.update_membership(tas_project.chargeCode, user.username, \"delete\")\n\n return True", "def del_user(self, username):\n pass", "def delete_user():\n #TODO user delete\n pass", "async def admin_remove(self, ctx: MyContext, wormhole: str, user: discord.User):\n if not self.check_wh_exists(wormhole):\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.not-exists\", name=wormhole\n )\n )\n return\n if not self.check_is_admin(wormhole, ctx.author.id):\n await ctx.send(await self.bot._(ctx.guild.id, \"wormhole.error.not-admin\"))\n return\n query = \"SELECT 1 FROM wormhole_admin WHERE name = ? AND admin = ?\"\n isAlready = len(self.bot.db_query(query, (wormhole, user.id))) > 0\n if isAlready:\n query = \"DELETE FROM wormhole_admin WHERE admin = ? AND name = ?\"\n self.bot.db_query(query, (user.id, wormhole))\n await ctx.send(\n await self.bot._(ctx.guild.id, \"wormhole.success.admin-removed\")\n )\n else:\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.not-admin\", user=user.name\n )\n )", "async def remove_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n await ctx.send(\"User is not blacklisted.\")\r\n else:\r\n self.settings['blacklist'].remove(user.id)\r\n await ctx.send(\"User removed from blacklist.\")", "def stop(self, user):\n self.logger.info(\"Stopping {}\".format(user))\n return self.director.stop(user)", "def userdel(pwfile, user):\n return __salt__[\"webutil.userdel\"](pwfile, user)", "def action_remove_from_group(self, kwargs):\n user = kwargs[\"user\"]\n group = kwargs[\"group\"]\n\n if self.engine.remove_user_from_group(user, group):\n info(f\"User {user} sucessfully removed from {group}\")\n else:\n error(f\"Unable to remove {user} from {group}, check privileges or dn\")", "def delete_user_collection(another_user_id, user_id):\n\n db_session.query(Collection_User).filter(and_(Collection_User.user_id ==\n user_id, Collection_User.another_user_id == another_user_id)).delete()\n db_session.commit()\n update_collection_num(user_id, another_user_id, False)", "async def unplonk(ctx, user: typing.Union[discord.Member, discord.User]):\n await bot.plonk.delete(user.id)\n await r(ctx, f'Unplonked **{user}**')", "def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200", "def remove_member(self, id, user):\n request = self.request_builder('orgs.teams.remove_member',\n id=id, user=user)\n return self._delete(request)", "def _user_delete(sender, instance, using, **kwargs):\n Booking.objects.filter(requester=instance).update(\n requester=get_sentinel_user(instance.group)\n )", "def delete_user_by_id(user_id):\n return woo_request_helper().delete_details(wc_endpoint='customers/{}'.format(user_id))", "def clean(self):\n super().clean()\n if self.user2:\n self.orig_cloud.delete_user(self.user2.id)", "def del_user(self, name):\n del self.users[irc.strings.IRCFoldedCase(modules.trim_nick(name))]", "def remove_user(cloud_list, user_id, adminu, adminpw):\n url_success = ['Success', 'success']\n for cloud in cloud_list:\n try:\n resp = urllib2.urlopen('%s/services/users/%s?operation=delete&user=%s&password=%s' %\n (cloud, user_id, adminu, adminpw))\n contents = resp.read()\n except urllib2.HTTPError, error:\n contents = error.read()\n except urllib2.URLError:\n contents = 'failed'\n output(contents, cloud, user_id, url_success, '')", "def remove_user(self, username): # remove only users from json file\n return self._user(username=username, remove=True)", "def kill_user_processes(user):\n for pid in get_user_processes(user):\n kill(pid)", "async def removerole(self, ctx, rolename, user: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n\n role = self._role_from_string(server, rolename)\n if role is None:\n await self.bot.say(\"Role not found.\")\n return\n\n if user is None:\n user = author\n\n if role in user.roles:\n try:\n await self.bot.remove_roles(user, role)\n await self.bot.say(\"Role successfully removed.\")\n except discord.Forbidden:\n await self.bot.say(\"I don't have permissions to manage roles!\")\n else:\n await self.bot.say(\"User does not have that role.\")", "def wipe_user(user_name):\n user_name = urllib.unquote(user_name) # Username is coming straight from the url bar.\n user = User.query.filter(User.user_name==user_name).first()\n delete_user(user)", "def delusers(self, args):\n\n if len(args) < 2:\n print(self.addusers.__doc__)\n return\n\n gname = args[0]\n users = args[1:]\n\n g = sr.group(gname)\n\n if not g.in_db:\n print(\"Group '%s' not found.\" % ( gname ))\n return\n\n not_members = g.user_rm( users )\n g.save()\n\n for uname in not_members:\n print(\"Unable to remove non-member '%s' from '%s'\" % ( gname, uname ))", "def delete_users(project):\n for user_id in project.user_id.all():\n project.user_id.remove(user_id.pk)\n project.save()", "def removeOnUserCreate(call, args=(), kwargs={}, nodeClass='*'):\n pass" ]
[ "0.6894938", "0.68542784", "0.685336", "0.67998946", "0.6638343", "0.6393456", "0.63634795", "0.6293484", "0.6282741", "0.6280759", "0.6247314", "0.62332475", "0.6231962", "0.6227879", "0.61912465", "0.6178004", "0.6158775", "0.6148308", "0.6134449", "0.6126714", "0.6120114", "0.60983396", "0.6096013", "0.606499", "0.5955821", "0.5939528", "0.5925723", "0.5921348", "0.5916311", "0.59012145", "0.58971006", "0.58971006", "0.58971006", "0.58745545", "0.5860761", "0.58505446", "0.5838628", "0.5816247", "0.58034956", "0.5793691", "0.5789163", "0.5784681", "0.5779461", "0.5777171", "0.57746196", "0.5770486", "0.57687575", "0.5762873", "0.57434314", "0.5737735", "0.57268333", "0.57217485", "0.57199764", "0.5700944", "0.5692091", "0.56885487", "0.5688173", "0.56859475", "0.5681384", "0.56790423", "0.5678291", "0.5676155", "0.5672018", "0.566497", "0.5657416", "0.56469357", "0.5640149", "0.56293446", "0.561604", "0.5612923", "0.559429", "0.55890685", "0.5577992", "0.55743104", "0.55696183", "0.5563441", "0.5550521", "0.55462307", "0.55400854", "0.5536567", "0.55279297", "0.55246943", "0.5512064", "0.5499752", "0.5493575", "0.54835063", "0.5477116", "0.54745257", "0.5466946", "0.5466898", "0.5458567", "0.5428476", "0.5421473", "0.5419582", "0.5419546", "0.5412158", "0.5400838", "0.5396411", "0.53901035", "0.53863776" ]
0.7817377
0
Return the number of voters a user has contacted for the campaign.
def voterContactCount(self, user): return self.votercontact_set.filter(user=user).count()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_number_of_ver_sponsors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'VerificationSponsor'])\n return n_agents", "def nay_voter_cnt(self):\n\n return len(self._nay_voters())", "def present_voter_cnt(self):\n\n return len(self._present_voters())", "def people_count(self):\n return len(self.__users)", "def member_count(self) -> int:\n return sum([g.member_count for g in self.guilds])", "def get_agent_number_of_players(players):\n return sum([count_players(player) for player in players\n if player.startswith('agent')])", "def abstain_voter_cnt(self):\n\n return len(self._abstain_voters())", "def yay_voter_cnt(self):\n\n return len(self._yay_voters())", "def getVotersToContact(self):\n two_days_ago = date.today() - timedelta(2)\n year_ago = date.today() - timedelta(365)\n return self.voters.filter(\n Q(campaignstovoters__last_served=None) | Q(campaignstovoters__last_served__lt=two_days_ago),\n Q(campaignstovoters__last_contacted=None) | Q(campaignstovoters__last_contacted__lt=year_ago),\n campaignstovoters__is_active=True,\n is_active=True)", "async def _vote_count(\n self, ctx: Context, *, channel: discord.TextChannel = None\n ):\n\n guild: discord.Guild = ctx.guild\n\n if not channel:\n channel = await self.get_vote_channel(guild)\n if isinstance(channel, str):\n return await ctx.send(channel)\n\n history = await channel.history(oldest_first=True).flatten()\n if len(history) > 100:\n return await ctx.send(_(\n \"I couldn't identify a voting channel. Please specify one explicitly.\"\n ))\n else:\n history = await channel.history(oldest_first=True).flatten()\n if len(history) > 100:\n return await ctx.send(_(\n \"That channel has too many messages!\"\n \" Please ask a host for manual vote count.\"\n ))\n\n if len(history) < 1:\n return await ctx.send(_(\"{} is empty.\").format(channel.mention))\n\n user_votes = {}\n player_role = guild.get_role(\n await self.config.guild(guild).player_id()\n )\n\n for message in history:\n author = message.author\n if player_role not in author.roles:\n continue\n vote = self.get_vote_from_message(message)\n if not vote:\n continue\n user_votes[f\"{author.name}#{author.discriminator}\"] = vote\n\n user_votes = await self.get_non_voters(guild, user_votes)\n\n votes = {}\n for user in user_votes:\n val = user_votes[user].capitalize()\n try:\n votes[val].append(user)\n except KeyError:\n votes[val] = [user]\n\n # max votes first\n votes = dict(sorted(\n votes.items(), key=lambda item: len(item[1]), reverse=True\n ))\n\n # Pop and add stuff back to dict for ordering purpose.\n try:\n votes[\"VTNL\"] = votes.pop(\"Vtnl\")\n except KeyError:\n pass\n try:\n votes[\"No vote\"] = votes.pop(\"No vote\")\n except KeyError:\n pass\n\n txt = \"\"\n\n for i, vote in enumerate(votes, start=1):\n voters = votes[vote]\n\n if vote == \"VTNL\":\n txt += _(\"\\n\\n**{}** - {} ({})\").format(vote, len(voters), \", \".join(voters))\n elif vote == \"No vote\":\n txt += _(\"\\n\\n**Not voting** - {} ({})\").format(len(voters), \", \".join(voters))\n else:\n txt += _(\"\\n{}. **{}** - {} ({})\").format(i, vote, len(voters), \", \".join(voters))\n\n title = _(\"Vote Count\")\n\n embed = discord.Embed(\n color=0x00CDFF, title=title,\n description=_(\"__Counting from {} channel.__\\n\\n{}\").format(\n channel.mention, txt.strip()\n )\n )\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\n f\"**{title}**\\n\\n__Counting from {channel.mention}\"\n f\" channel.__\\n\\n{txt.strip()}\"\n )", "def count_subscribers(self):\n return self.request(\"count:Contact\", [ None ])", "async def users(ctx):\n\n if ctx.channel.name.lower() in channels:\n await ctx.send(f\"\"\"# of members: {ctx.guild.member_count}\"\"\")", "def get_voters():", "def get_voters():", "def get_user_view_count(self, username):\n self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return self.cur.fetchone()[0]", "def number_of_players(self) -> int:\n return self.param.number_of_players", "def getUserCount(self):\n logger.debug('Getting the number of users discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='usersdiscovered']\"))", "def get_number_of_char_sponsors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'CharitableSponsor'])\n return n_agents", "def amount_of_receivers(self) -> int:\n return sum([1 for _ in self.receivers])", "def get_total_number_of_buildings_for_user(request):\n buildings_count = get_buildings_for_user_count(request.user)\n\n return {'status': 'success', 'buildings_count': buildings_count}", "def bounced_member_count(self):\n return self._bounced_member_count", "def get_number_of_agents(model):\n\n n_agents = len(model.schedule.agents_by_type['Customer'])\n return n_agents", "def get_amount_users() -> User:\n return User.objects.all().count()", "async def membercount(ctx, *args):\n if ctx.message.channel.is_private:\n await bot.delete_message(ctx.message)\n return\n\n g = ctx.message.server\n\n gid = g.id\n membs = str(len(g.members))\n membs_on = str(len([m for m in g.members if not m.status == Status.offline]))\n users = str(len([m for m in g.members if not m.bot]))\n users_on = str(len([m for m in g.members if not m.bot and not m.status == Status.offline]))\n bots = str(len([m for m in g.members if m.bot]))\n bots_on = str(len([m for m in g.members if m.bot and not m.status == Status.offline]))\n created = str(g.created_at)\n \n em = Embed(title=\"Membercount\")\n em.description = \"```\\n\" \\\n \"Members: %s (%s)\\n\" \\\n \" Users: %s (%s)\\n\" \\\n \" Bots: %s (%s)\\n\" \\\n \"Created: %s\\n\" \\\n \"```\" % (membs, membs_on, users, users_on, bots, bots_on, created)\n\n await client.send_message(ctx.message.channel, embed=em)\n await client.delete_message(ctx.message)", "def candidate_count(self):\n return self.candidate_set.count()", "def candidate_count(self):\n return self.candidate_set.count()", "def getNumVassals(self, iPlayer):\n\t\tiCounter = 0\n\t\tfor iCiv in range(con.iNumPlayers):\n\t\t\tif iCiv != iPlayer:\n\t\t\t\tif gc.getPlayer(iCiv).isAlive():\n\t\t\t\t\tif gc.getTeam(gc.getPlayer(iCiv).getTeam()).isVassal(iPlayer):\n\t\t\t\t\t\tiCounter += 1\n\t\treturn iCounter", "def num_votes(self):\n return sum(self.votes_per_count)", "async def vouch(self, ctx, user: discord.Member=None):\n\n if user:\n if user.id == self.bot.user.id:\n user = ctx.message.author\n response = \"- thanks for vouching for me, your robot overlord.\"\n await self.bot.say(user.mention + response)\n\n elif user.id == ctx.message.author.id:\n response = \"- you can't vouch for yourself, you silly goose\"\n await self.bot.say(user.mention + response)\n\n else:\n # see if this author has previously vouched for this user.\n for item in self.vouchers:\n if item['VOUCHER'] == ctx.message.author.display_name:\n if item['USER'] == user.display_name:\n response = \" you already vouched for this user\"\n await self.bot.say(ctx.message.author.mention +\n response)\n return\n\n # check if USER has already been vouched, record the new name\n for item in self.vouchers:\n if item['USER'] == user.display_name:\n if not item['VOUCHER'] == \\\n ctx.message.author.display_name:\n # case: we have a USER who has already been vouched\n # vouched for again, by a different discord member\n item['VOUCHER'] = item['VOUCHER'] + \", \" + \\\n ctx.message.author.display_name\n fileIO(\"data/vouchers/vouchers.json\", \"save\",\n self.vouchers)\n await self.bot.say(ctx.message.author.mention +\n \", recorded.\")\n await self.bot.say(user.display_name +\n \" now has multple vouches.\")\n return\n\n # record the vouching\n self.vouchers.append({\"VOUCHER\": ctx.message.author.display_name,\n \"USER\": user.display_name, \"ID\": user.id,\n \"DATE\": str(\"{:%B %d, %Y}\".format(\n datetime.datetime.now()))})\n fileIO(\"data/vouchers/vouchers.json\", \"save\", self.vouchers)\n response = \" - your voucher for \" + user.mention + \\\n \" has been recorded.\"\n await self.bot.say(ctx.message.author.mention + response)\n\n else:\n response = \"Usage: !vouch <user>\"\n await self.bot.say(response)", "def get_members_count(self, *args, **kwargs):\n return self.bot.get_chat_members_count(self.id, *args, **kwargs)", "def vsvrcount(self) :\n\t\ttry :\n\t\t\treturn self._vsvrcount\n\t\texcept Exception as e:\n\t\t\traise e", "def get_vehicle_count(self):\n return len(self.vehicles)", "def getViewPortUserCount(self):\n logger.debug('Getting map view port user count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.srcCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "def get_interested_users(self, obj):\n return obj.interested_users.count()", "def member_count(ctx, verbosity):\n\n if verbosity is not None:\n logging.basicConfig(level=getattr(logging, verbosity))\n else:\n logging.getLogger(__name__).addHandler(logging.NullHandler())\n\n ma = MailmanAdmin(os.environ['GEOUSAGE_MAILMAN_ADMIN_URL'],\n os.environ['GEOUSAGE_MAILMAN_ADMIN_PASSWORD'])\n\n click.echo(ma.member_count)", "def count_revisions_by_user(self):\n return self.run_query(f\"count({self.r}/contributor[id = 5558])\")", "def n_users(self):\n if self._n_users is None:\n self._n_users = len(self.user_unique_vals)\n return self._n_users", "def get_usage_count(request, user_id):\n board_count = Member.objects.filter(user_id=user_id, is_creator=True).count()\n return Response({\"board_count\": board_count})", "def election_count(self):\n return self.candidate_set.values(\"election_id\").distinct().count()", "def member_count(self):\n return len(self.members)", "async def _count(\n self, ctx: Context, user: discord.Member, channel: discord.TextChannel = None\n ):\n\n if not channel:\n channel = ctx.channel\n\n count = 0\n async with ctx.typing():\n async for message in channel.history(limit=None):\n if message.author.id == user.id:\n count += 1\n\n await ctx.send(_(\n \"{} has sent **{}** messages in {} channel.\"\n ).format(user.name, count, channel.mention))", "def GetVendorCount(self):\n regionVectorData = self.VectorData[self.SelectedRegion]\n return regionVectorData['Vendors']", "def noOfPlayers(self):\n\t\tnumber = 0\n\t\tfor n in range(6):\n\t\t\tif self.playerList[n] != None:\n\t\t\t\tnumber = number + 1\n\t\treturn number", "def contact_count(self, *args, **kwargs) -> Any:\n pass", "def get_count(username):\n return get_contributor(username)[\"count\"]", "def prepare_count_incidents(self, object):\n roles = object.actorrole_set.all()\n return Incident.objects.filter(actors_role__in=roles).count()", "def count(self, volume):\n\n countResult = 0\n\n for x in range(volume.shape[0]):\n for y in range(volume.shape[1]):\n for z in range(volume.shape[2]):\n if self.isMember(volume[x,y,z]):\n countResult += 1\n\n return countResult", "def get_videos_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'videos')", "def count(self):\n return self.vcount", "def get_total_collisions(self):\n return self.count_collisions", "def nclients(self, r):\r\n return len(self.clients(r))", "def numberOfPlayers(self):\r\n return len(self.playerPreparers)", "def get_number_of_contributors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'Contributor'])\n return n_agents", "def num_attendees(self):\r\n n = sum([c.qty for c in self.contribution_set.all()])\r\n return n", "def count_users(self):\n return self.get_session.query(func.count(self.user_model.id)).scalar()", "def get_already_contacted_count(self, seller_id):\n return self.get_already_contacted(seller_id).count()", "def num_servos(self) -> int:\n return self._num_servos", "def get_user_votes(user_id: int) -> int:\n session = Session()\n\n # get user by id to ensure user exists\n get_user_by_id(user_id)\n # count votes for the user that haven't expired\n user_votes: int = session.query(Vote)\\\n .filter(Vote.user_id == user_id)\\\n .filter(Vote.vote_expiry > datetime.datetime.now()).count()\n\n session.close()\n\n return user_votes", "def test_n_volunteers(self):\r\n\r\n app = self.create_app_with_contributors(anonymous=2, registered=3, two_tasks=True)\r\n total_volunteers = cached_apps.n_volunteers(app.id)\r\n\r\n err_msg = \"Volunteers is %s, it should be 5\" % total_volunteers\r\n assert total_volunteers == 5, err_msg", "def member_count(self):\n\n url = '{}/members'.format(self.url)\n headers = {\n 'User-Agent': 'GeoUsage (https://github.com/geopython/GeoUsage)'\n }\n\n LOGGER.debug('Fetching URL: {}'.format(url))\n response = requests.post(url,\n headers=headers,\n data={'adminpw': self.password})\n LOGGER.debug('Parsing HTML')\n\n element = re.search(r'(\\d+) members total', response.text).group(0)\n members = int(element.split('members total')[0].strip())\n\n return members", "def guests_counter(window, n_guests):\r\n window.write_event_value('-COUNT-', n_guests)", "def participant_count(self) -> int:\n return self.participants.count() + 1", "def getPlayerCount(self):\n return self.sandboxplayergroupplayer_set.filter(quit=False).count()", "def get_user_video_count(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n return done", "def _get_count(_khoros_object, _user_id, _object_type):\n _api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id)\n return int(_api_response['data']['items'][0][_object_type]['count'])", "def active_member_count(self):\n return self._active_member_count", "def number_of_subscribers(subreddit):\n URL = 'https://api.reddit.com/r/{}/about'.format(subreddit)\n header = {'User-Agent': 'Custom-User'}\n\n resp = requests.get(URL, headers=header).json()\n try:\n return resp['data']['subscribers']\n except Exception:\n return 0", "def get_not_contacted_count(self, seller_id):\n return self.get_not_contacted(seller_id).count()", "def count_party_votes(votes):\r\n vote_count = {'Pineapple Pizza Party': 0, 'Pronounced Jiff Union': 0, 'Socks and Crocs Reform League': 0}\r\n for person in votes:\r\n vote_count[votes[person]] += 1\r\n return vote_count", "def total_candidate_mentions(self):\n total_candidate_mentions = self.sentence_data().loc[:, self.candidates].sum(axis = 0, skipna = True)\n total_candidate_mentions = total_candidate_mentions.to_frame(name = 'count').rename_axis('candidate').reset_index()\n \n self._total_candidate_mentions = total_candidate_mentions\n \n return self._total_candidate_mentions", "def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count", "def count_users(self, session) -> int:\n\n users_quantity = session.query(User).count()\n return users_quantity", "def number_of_subscribers(subreddit):\n header = {\"User-agent\": \"darth\"}\n url = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n response = (requests.get(url, headers=header))\n if response.status_code != 200:\n return 0\n return response.json().get('data').get('subscribers')", "def getNumPlayers(self):\n return len(self.__colordict__.keys())", "def get_visits_count(visit_container):\r\n return visit_container.visits.all().count()", "def get_buildings_for_user_count(user):\n return BuildingSnapshot.objects.filter(\n super_organization__in=user.orgs.all(),\n canonicalbuilding__active=True,\n ).count()", "def number_performers(self):\n return len(self.touches['device_id'].unique().tolist())", "async def update_member_count():\n guild = bot.get_guild(SERVER_ID)\n channel_prefix = \"Members\"\n vc = discord.utils.find(lambda c: channel_prefix in c.name, guild.voice_channels)\n mem_count = guild.member_count\n joined_today = len([m for m in guild.members if m.joined_at.date() == datetime.datetime.today().date()])\n left_channel = discord.utils.get(guild.text_channels, name=CHANNEL_LEAVE)\n left_messages = await left_channel.history(limit=200).flatten()\n left_today = len([m for m in left_messages if m.created_at.date() == datetime.datetime.today().date()])\n await vc.edit(name=f\"{mem_count} Members (+{joined_today}/-{left_today})\")\n print(\"Refreshed member count.\")", "def NumberOfRobots(self):\n\t\treturn len(self.__robotList)", "def count_votes(self):\n return self.annotate(sum=Sum('value'))", "def invitation_received_no(request):\n if request.user.is_authenticated:\n profile_obj = CustomUser.objects.get(id__exact=request.user.id)\n qs_count = Relationship.objects.invitation_received(profile_obj).count()\n return {'invites_num': qs_count}\n return {}", "def count_party_votes(votes: dict) -> dict:\r\n vote_count = {'Pineapple Pizza Party': 0, 'Pronounced Jiff Union': 0, 'Socks and Crocs Reform League': 0}\r\n for person in votes:\r\n vote_count[votes[person]] += 1\r\n return vote_count", "def GetPerVendorLocationCount(self):\n regionVectorData = self.VectorData[self.SelectedRegion]\n return regionVectorData['PerVendorLocations']", "def vscr_ratchet_group_session_get_participants_count(self, ctx):\n vscr_ratchet_group_session_get_participants_count = self._lib.vscr_ratchet_group_session_get_participants_count\n vscr_ratchet_group_session_get_participants_count.argtypes = [POINTER(vscr_ratchet_group_session_t)]\n vscr_ratchet_group_session_get_participants_count.restype = c_uint\n return vscr_ratchet_group_session_get_participants_count(ctx)", "def get_vote_tally(self):\r\n voters = []\r\n tally = {}\r\n for b in reversed(self.blocks):\r\n if b.user_id not in voters and type(b) == VoteBlock:\r\n voters.append(b.user_id)\r\n if b.choice in tally.keys():\r\n tally[b.choice] += 1\r\n else:\r\n tally[b.choice] = 1\r\n result = []\r\n for key in tally:\r\n d = {}\r\n d['name'] = key\r\n d['count'] = tally[key]\r\n result.append(d)\r\n return result", "def getConnectedUsersCount(self):\n\n\t\treturn len(self.connectedUsers)", "def _count(self):\n if self._count_valid:\n return self._total_results\n\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n\n self._total_results = len(results)\n self._count_valid = True\n\n return self._total_results", "def utilization(user, ressource):\n if ressource == 'accounts':\n return Account.objects.filter(vhost__in=list(get_vhosts(user))).count()\n return None", "def members_voted(self):\r\n return MembersVoted(self)", "def user_vs_vehicle_comparison(allotment, rates, vehicle_rate):\n\tnumber_of_users = 0\n\tfor user in allotment:\t\t\n\t\tif rates[user] >= vehicle_rate:\n\t\t\tnumber_of_users += 1\n\n\treturn number_of_users", "def test_n_registered_volunteers(self):\r\n\r\n app = self.create_app_with_contributors(anonymous=0, registered=3)\r\n registered_volunteers = cached_apps.n_registered_volunteers(app.id)\r\n\r\n err_msg = \"Volunteers is %s, it should be 3\" % registered_volunteers\r\n assert registered_volunteers == 3, err_msg", "async def count(ctx):\n users = len(set(bot.get_all_members()))\n servers = len(bot.servers)\n\n colour = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])\n colour = int(colour, 16)\n embed = discord.Embed(colour = discord.Colour(value = colour), timestamp = datetime.datetime.utcnow())\n embed.add_field(name = \"Servers im Modding: \", value = servers)\n embed.add_field(name = \"Users im Serving: \",value = users)\n embed.add_field(name = \"Add me: \", value = \"Type m.botinfo\")\n embed.set_footer(text= \"{} | Requested by: {} at\".format(version, ctx.message.author))\n await bot.say(embed = embed)", "def max_guests(appetite: list[int], cake: list[int]) -> int:\n guest_count = 0\n\n appetite_index = len(appetite) - 1\n cake_index = len(cake) - 1\n\n while appetite_index >= 0 and cake_index >= 0:\n appetite_size = appetite[appetite_index]\n cake_size = cake[cake_index]\n\n if cake_size >= appetite_size:\n # cake is fed\n cake_index -= 1\n guest_count += 1\n\n # else, the person is skipped\n appetite_index -= 1\n\n return guest_count", "def unseen_count_for(self, user):\r\n return self.filter(user=user, unseen=True).count()", "def _get_usr_traceroute_count(self):\n return self.__usr_traceroute_count", "def countPlayers():\n\n db = connect()\n c = db.cursor()\n query = (\"SELECT count(players.id) AS count_player FROM players;\")\n c.execute(query)\n count_player = c.fetchone()[0]\n db.close()\n return count_player", "def getNumberOfViews(self) -> int:\n ...", "def get_printers_count(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetPrintersCount', self.handle)", "def get_num_tigers(self) -> int:\n return len(self.get_all_tiger_positions())", "def comitentes_count(self):\n return self.expedientepersona_set.filter(comitente=True).count()" ]
[ "0.6371157", "0.63698155", "0.63018495", "0.60611516", "0.6054053", "0.6027923", "0.6018428", "0.59854454", "0.5949555", "0.59482515", "0.5914123", "0.58427924", "0.57395315", "0.57395315", "0.57389605", "0.5730802", "0.57079923", "0.57007176", "0.56978345", "0.5668029", "0.5659213", "0.56583387", "0.5647654", "0.560794", "0.5596477", "0.5596477", "0.5586592", "0.55668133", "0.5557891", "0.55402887", "0.5530064", "0.5528317", "0.55247056", "0.55125856", "0.5476241", "0.5460573", "0.544858", "0.54445195", "0.54261833", "0.5424498", "0.5420583", "0.54135144", "0.54004836", "0.53994834", "0.5370732", "0.5362814", "0.5362503", "0.5361401", "0.535564", "0.53411293", "0.5324483", "0.5316809", "0.5312692", "0.5310226", "0.5298235", "0.52917546", "0.52818847", "0.5275594", "0.5273447", "0.5270051", "0.52698064", "0.5269068", "0.5260506", "0.52590674", "0.5253387", "0.52328527", "0.52274084", "0.5223158", "0.5221641", "0.5218765", "0.5204818", "0.51964504", "0.5172781", "0.5164841", "0.5155445", "0.5147143", "0.5144209", "0.5132892", "0.51322997", "0.5132171", "0.51290303", "0.51241225", "0.5123368", "0.51149094", "0.5111117", "0.5108302", "0.51080114", "0.51023954", "0.5086524", "0.5086262", "0.5084307", "0.5075318", "0.50705904", "0.5069347", "0.50691503", "0.50653", "0.5063478", "0.50619304", "0.50608605", "0.50605005" ]
0.7715844
0
Returns an indented representation of the nested dictionary.
def pretty_repr(self, num_spaces=4): def pretty_dict(x): if not isinstance(x, dict): return repr(x) rep = '' for key, val in x.items(): rep += f'{key}: {pretty_dict(val)},\n' if rep: return '{\n' + _indent(rep, num_spaces) + '}' else: return '{}' return f'FrozenDict({pretty_dict(self._dict)})'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _format_dict(self, dict_, indent=0):\n prefix = indent*\" \"*4\n output = \"{\\n\"\n for key, val in sorted(dict_.items()):\n if isinstance(val, dict):\n rval = self._format_dict(val, indent+1)\n else:\n rval = repr(val)\n output += prefix + \" \"*4 + repr(key) + \" : \" + rval + \",\\n\"\n output += prefix + \"}\"\n return output", "def format_dict(dictionary, depth=0):\n tab = \" \" * 4\n string = \"{\\n\"\n for key, val in dictionary.items():\n string += depth * tab \n string += \"{}: \".format(key)\n if type(val) is dict:\n string += format_dict(val, depth + 1)\n \n else:\n if type(val) is str:\n fmt = \"'{}'\\n\"\n else:\n fmt = \"{}\\n\"\n string += fmt.format(val)\n string += (depth) * tab + '}\\n'\n return string", "def pretty_print(d, indent=0):\n for key, value in d.items():\n print('\\t' * indent + str(key) + \":\")\n if isinstance(value, dict):\n pretty_print(value, indent + 1)\n else:\n print('\\t' * (indent + 1) + str(value))", "def print_dictionary(\n d, nested_level=0, output=sys.stdout, spacing=' ', separator=None,\n):\n if separator:\n print(separator, file=output)\n\n if type(d) == dict:\n print('%s{' % (nested_level * spacing), file=output)\n for k, v in list(d.items()):\n if hasattr(v, '__iter__'):\n print('%s%s:' % ((nested_level + 1) * spacing, k), file=output)\n print_dictionary(v, nested_level + 1, output)\n else:\n print(\n '%s%s: %s' % ((nested_level + 1) * spacing, k, v),\n file=output\n )\n print('%s}' % (nested_level * spacing), file=output)\n elif type(d) == list:\n print('%s[' % (nested_level * spacing), file=output)\n for v in d:\n if hasattr(v, '__iter__'):\n print_dictionary(v, nested_level + 1, output)\n else:\n print('%s%s' % ((nested_level + 1) * spacing, v), file=output)\n print('%s]' % (nested_level * spacing), file=output)\n else:\n print('%s%s' % (nested_level * spacing, d), file=output)", "def prettyPrintDictHelper_ (d, stream, indent, pretty_print=True, indent_additive=4) :\r\n \r\n # Base case, empty table\r\n entries = len(d)\r\n if entries==0 :\r\n stream.write(\"{ }\")\r\n return\r\n\r\n # Recursive case\r\n stream.write(\"{\")\r\n if pretty_print: stream.write('\\n')\r\n\r\n # Iterate through, printing each element\r\n ii=0\r\n keys = d.keys()\r\n keys.sort()\r\n for key in keys : # Sorted order on keys\r\n if pretty_print : indentOut_(stream, indent+indent_additive)\r\n stream.write(repr(key)+\":\")\r\n value = d[key]\r\n specialStream_(value, stream, indent, pretty_print, indent_additive)\r\n if entries>1 and ii!=entries-1 :\r\n stream.write(\",\")\r\n if pretty_print: stream.write('\\n')\r\n ii += 1\r\n \r\n if pretty_print : indentOut_(stream, indent) \r\n stream.write(\"}\")", "def pretty(d, indent=0):\n\tret_str = ''\n\tfor key, value in d.items():\n\n\t\tif isinstance(value, collections.Mapping):\n\t\t\tret_str = ret_str + '\\n' + '\\t' * indent + str(key) + '\\n'\n\t\t\tret_str = ret_str + pretty(value, indent + 1)\n\t\telse:\n\t\t\tret_str = ret_str + '\\n' + '\\t' * indent + str(key) + '\\t' * (indent + 1) + ' => ' + str(value) + '\\n'\n\n\treturn ret_str", "def dict_pretty_print(D: dict, indent_lvl=0):\n print(\"Using 3 decimal places.\")\n base_indent = indent_lvl * \" \"\n indent = (indent_lvl+2)*\" \"\n print(f\"{base_indent}\" + \"{\")\n for key, value in D.items():\n print(f\"{indent}{key}: \", end=\"\")\n if type(value) is dict:\n print(\"\")\n dict_pretty_print(value, indent_lvl + 2)\n else:\n print(f\"{value:.3f}\")\n print(f\"{base_indent}\" + \"}\")", "def pretty_print(dictionary: dict):\n return json.dumps(dictionary, indent=4)", "def print_dict_tree(d, max_depth=None, indent=0):\n def _recurse(d, indent=0):\n for key, value in d.items():\n print(' ' * indent + str(key), end=' ')\n if isinstance(value, dict) and indent != max_depth:\n print(); _recurse(value, indent + 1)\n else:\n print(\":\", str(type(d[key])).split(\"'\")[1], \"-\", str(len(str(d[key]))))\n \n return _recurse(d)", "def prettyPrint(self):\n import pprint\n pp = pprint.PrettyPrinter(indent=4)\n x=pp.pformat(self.__dict__)\n print x\n return", "def prettyPrintODictHelper_ (d, stream, indent, pretty_print=True, indent_additive=4) :\r\n global OTabRepr\r\n # Base case, empty table\r\n entries = len(d)\r\n if entries==0 :\r\n stream.write(OTabEmpty[OTabRepr]) # \"o{ }\"\r\n return\r\n\r\n # Recursive case\r\n stream.write(OTabLeft[OTabRepr]) # \"o{\"\r\n if pretty_print: stream.write('\\n')\r\n\r\n # Iterate through, printing each element\r\n ii=0\r\n keys = d.keys()\r\n for key in keys : # Insertion order on keys\r\n if pretty_print : indentOut_(stream, indent+indent_additive)\r\n if OTabRepr == 0 :\r\n stream.write(\"(\"+repr(key)+\", \")\r\n else :\r\n stream.write(repr(key)+\":\")\r\n value = d[key]\r\n specialStream_(value, stream, indent, pretty_print, indent_additive)\r\n if OTabRepr == 0 :\r\n stream.write(\")\")\r\n \r\n if entries>1 and ii!=entries-1 :\r\n stream.write(\",\")\r\n if pretty_print: stream.write('\\n')\r\n ii += 1\r\n \r\n if pretty_print : indentOut_(stream, indent) \r\n stream.write(OTabRight[OTabRepr]) # \"}\"\r", "def nice_dict_format(d):\n return ''.join([key+\": \"+str(d[key])+\"\\n\" for key in list(d.keys())])", "def format_dictionary(dct, indent=4):\n return json.dumps(dct, indent=indent, sort_keys=True)", "def print_json_tree(d, indent=0):\n for key, value in d.items():\n print(' ' * indent + str(key), end=' ')\n if isinstance(value, dict):\n print(); print_json_tree(value, indent+1)\n else:\n print(\":\", str(type(d[key])).split(\"'\")[1], \"-\", str(len(str(d[key]))))", "def pprint(self):\n import json\n return json.dumps(OrderedDict(self.items()), indent=4)", "def pretty_dict(d):\n return '{%s}' % ', '.join('%r: %r' % (k, v)\n for k, v in sorted(d.items(), key=repr))", "def _pretty_print(self, json_dict):\n if self.prettyprint:\n return \"\\n\" + json.dumps(json_dict, indent=self.indent)\n return json.dumps(json_dict)", "def tree_view(dictionary, level=0, sep=\"| \"):\n return \"\".join([\"{0}{1}\\n{2}\".format(sep * level, k,\n tree_view(v, level + 1, sep=sep) if isinstance(v, dict)\n else \"\") for k, v in dictionary.items()])", "def recursive_dict_key_print(dict_in, spacer=\"\"):\n if type(dict_in) is not dict:\n return\n next_spacer = spacer + \" \"\n for key, value in dict_in.items():\n try:\n print(spacer, f\"{key} : {value.shape}\")\n except(AttributeError):\n print(spacer, key)\n recursive_dict_key_print(value, next_spacer)", "def prettify(tree, indent=0):\n for key, value in six.iteritems(tree):\n if key == FILE_MARKER:\n if value:\n print((' ' * indent + str(value)))\n else:\n print((' ' * indent + str(key)))\n if isinstance(value, dict):\n prettify(value, indent+1)\n else:\n print((' ' * (indent+1) + str(value)))", "def json_pretty_print(dictionary):\n return json.dumps(dictionary, sort_keys=True,\n indent=2, separators=(',', ': '))", "def __repr__(self, indent=2):\n return pprint.pformat(self.to_dict(), indent=indent)", "def pretty(d, indent=0):\n sp = \" \"\n t = \"\"\n \n if isinstance(d, dict):\n l = len(d)\n c = 0\n t += \"<type 'dict'>:{\\n\"\n for key, value in d.items():\n t += sp * (indent + 1) + \"'\" + str(key) + \"':\" + pretty(value, indent + 1)\n \n if c + 1 < l:\n t += \",\"\n \n t += \"\\n\"\n c += 1\n t += sp * indent + \"}\"\n elif isinstance(d, list):\n l = len(d)\n c = 0\n t += \"<type 'list'>:[\\n\"\n for value in d:\n t += sp * (indent + 1) + str(c) + \":\" + pretty(value, indent + 1)\n \n if c + 1 < l:\n t += \",\"\n \n t += \"\\n\"\n c += 1\n t += sp * indent + \"]\"\n elif isinstance(d, tuple):\n l = len(d)\n c = 0\n t += \"<type 'tuple'>:(\\n\"\n for value in d:\n t += sp * (indent + 1) + str(c) + \":\" + pretty(value, indent + 1)\n \n if c + 1 < l:\n t += \",\"\n \n t += \"\\n\"\n c += 1\n t += sp * indent + \")\"\n else:\n t += str(type(d)) + \":'\" + str(d) + \"'\"\n \n return t", "def _pretty_print(value, indent=''):\n keys = list(value.keys())\n keys.sort()\n for k in keys:\n v = value[k]\n if type(v) == dict:\n print(\"%s%s:\"%(indent, k))\n _pretty_print(v, indent+' ')\n elif type(v) == str:\n if '\\n' in v:\n print(indent+'%s: |'%k)\n for l in v.split('\\n'):\n print(indent+' '+l)\n else:\n print(\"%s%s: %s\"%(indent, k, v))\n else:\n dump = yaml.dump(v)\n # #1617\n # newer versions of python-yaml append the '...' document end\n # syntax. as YAML functions fine w/o it, and as it is\n # confusing to users who are just getting a single scalar, we\n # strip it\n if dump.endswith('\\n...\\n'):\n dump = dump[:-4]\n \n sys.stdout.write(\"%s%s: %s\"%(indent, k, dump))", "def ppdict(d):\n print '{'\n keys=d.keys()\n keys.sort()\n for k in keys:\n spacing=\" \" * (16-(len(repr(k))+1))\n print \"%s:%s%s,\" % (repr(k),spacing,repr(d[k]))\n print '}'", "def dumps(self, indent=1):\n str_keys_dict = OrderedDict({str(k): v for k, v in self.items()})\n for k, v in str_keys_dict.items():\n if isinstance(v, dict):\n str_keys_dict[k] = OrderedDict({str(k1): v1 for k1, v1 in v.items()})\n for k1, v1 in str_keys_dict[k].items():\n if isinstance(v1, dict):\n str_keys_dict[k][k1] = OrderedDict({str(k2): v2 for k2, v2 in v1.items()})\n return json.dumps(str_keys_dict, indent=indent)", "def print_recursive(value, indent=0):\n tabs = lambda count: '' + str(' ' * (indent + count))\n if isinstance(value, dict):\n to_print = '{}{}'.format(tabs(1), '{')\n for key, item in value.iteritems():\n to_print += '\\n{}{}:\\n{}'.format(tabs(2), key, print_recursive(item, indent + 2))\n return to_print + '{}{}'.format('\\n' + tabs(1) if len(value) > 0 else ' ', '}')\n if isinstance(value, list):\n to_print = '{}['.format(tabs(1))\n for item in value:\n to_print += '\\n' + print_recursive(item, indent + 1)\n return to_print + '{}{}'.format('\\n' + tabs(1) if len(value) > 0 else ' ', ']')\n if isinstance(value, str) or isinstance(value, unicode):\n return tabs(1) + '\\'' + value + '\\''\n if len(str(value)) > 0:\n return tabs(1) + str(value) + ''\n return ''", "def _walk(self, d, depth=0):\n\n output = ''\n indent = 3\n header_width = 35 - depth*indent\n\n for k, v in sorted(d.items(), key=lambda x: x[0]):\n if isinstance(v, dict):\n output += \"\".ljust(depth * indent)+k+'\\n'\n output += self._walk(v, depth + 1)\n else:\n if isinstance(v, np.ndarray):\n # np array or matrix\n shape = v.shape\n if len(shape) == 1:\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : \" + \"array (%d)\" % (v.shape[0]) + '\\n'\n\n elif len(shape) == 2:\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : \" + \"matrix (%d,%d)\" % (v.shape[0], v.shape[1]) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], str):\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n for item_id, item in enumerate(v):\n output += \"\".ljust((depth + 1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : \" + str(item) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], np.ndarray):\n # List of arrays\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n for item_id, item in enumerate(v):\n if len(item.shape) == 1:\n output += \"\".ljust((depth+1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : array (%d)\" % (item.shape[0]) + '\\n'\n\n elif len(item.shape) == 2:\n output += \"\".ljust((depth+1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : matrix (%d,%d)\" % (item.shape[0], item.shape[1]) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], dict):\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n\n for item_id, item in enumerate(v):\n output += \"\".ljust((depth + 1) * indent) + \"[\"+str(item_id)+\"]\" + '\\n'\n output += self._walk(item, depth + 2)\n\n else:\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : \" + str(v) + '\\n'\n\n return output", "def _pretty_json_dump(d):\n return json.dumps(d, sort_keys=True, indent=3)", "def pretty_repr(x: Any, num_spaces: int = 4) -> str:\n\n if isinstance(x, FrozenDict):\n return x.pretty_repr()\n else:\n\n def pretty_dict(x):\n if not isinstance(x, dict):\n return repr(x)\n rep = ''\n for key, val in x.items():\n rep += f'{key}: {pretty_dict(val)},\\n'\n if rep:\n return '{\\n' + _indent(rep, num_spaces) + '}'\n else:\n return '{}'\n\n return pretty_dict(x)", "def dictree(in_dict, verbose=False, spaces=None, levels=True, attrs=False, **kwargs):\n try:\n assert hasattr(in_dict, 'keys')\n except AssertionError:\n try:\n assert hasattr(in_dict, 'attrs')\n except:\n raise TypeError('dictree: Input must be dictionary-like')\n\n if not spaces:\n spaces = ''\n print('+')\n\n if 'toplev' in kwargs:\n toplev = kwargs['toplev']\n else:\n toplev = True\n try:\n if toplev and attrs:\n dictree(in_dict.attrs, spaces = ':', verbose = verbose, levels = levels, attrs=attrs, toplev=True)\n toplev = False\n except:\n pass\n\n # TODO, if levels is True why check again?\n if levels:\n try:\n assert levels is True\n except AssertionError:\n levels -= 1\n if levels == 0:\n levels = None\n\n try:\n for key in sorted(in_dict.keys()):\n bar = '|____' + str(key)\n if verbose:\n typestr = str(type(in_dict[key])).split(\"'\")[1]\n #check entry for dict-like OR .attrs dict\n try:\n dimstr = in_dict[key].shape\n dimstr = ' ' + str(dimstr)\n except AttributeError:\n try:\n dimstr = len(in_dict[key])\n dimstr = ' [' + str(dimstr) + ']'\n except:\n dimstr = ''\n print(spaces + bar + ' ('+ typestr + dimstr + ')')\n else:\n print(spaces + bar)\n if hasattr(in_dict[key], 'attrs') and attrs:\n dictree(in_dict[key].attrs, spaces = spaces + ' :', verbose = verbose, levels = levels, attrs=attrs, toplev=False)\n if hasattr(in_dict[key], 'keys') and levels:\n dictree(in_dict[key], spaces = spaces + ' ', verbose = verbose, levels = levels, attrs=attrs, toplev=False)\n except:\n pass\n return None", "def _format_dict(self, dict):\n\n result = \"\"\n for k, v in dict.items():\n result += \"\\n{0}: {1}\".format(k.capitalize(), v)\n\n return result", "def dump(self):\n\n d = OrderedDict()\n d[\"Predicates\"] = self.predicates\n d[\"Initial State\"] = self.init\n d[\"Goal State\"] = self.goal\n d[\"Actions\"] = self.actions\n #d[\"Types\"] = self.types\n d[\"Parent Types\"] = self.parent_types\n #d[\"Objects\"] = self.objects\n d[\"Obj -> Type Mapping\"] = self.obj_to_type\n #d[\"Type -> Obj Mapping\"] = self.type_to_obj\n\n for k, v in d.items():\n print(\"*** %s ***\" % k)\n if isinstance(v, dict):\n if len(v) == 0:\n print(\"\\t<no items>\")\n for k, val in v.items():\n print(\"\\t%s -> %s\" % (k, str(val)))\n elif hasattr(v, '__iter__'):\n if len(v) == 0:\n print(\"\\tNone\")\n elif k == \"Actions\":\n for action in self.actions:\n action.dump(lvl=1)\n else:\n print(\"\\t\" + \"\\n\\t\".join([str(item) for item in v]))\n else:\n print(\"\\t\" + str(v))\n print(\"\")", "def pprint(tree):\n p = PrettyPrinter(indent=2)\n p.pprint(tree)", "def pretty_print(data, indent=4):\n if type(data) == dict:\n print(json.dumps(data, indent=indent, sort_keys=True))\n else:\n print(data)", "def __str__(self):\n try:\n delim = ', ' if len(self) < 8 else ',\\n '\n s = delim.join('%s: %s' % (repr(k), repr(self[k])) for k in self.peys())\n return '{' + s + '}'\n except Exception:\n return dict.__repr__(self)", "def pprint(self, indent: str = \"\"):\n\n from os import linesep\n\n res = self.__str__() + linesep\n child_indent = f\"{indent} \"\n\n pos = -1\n for x in self.children:\n pos += 1\n if pos == len(self.children) - 1:\n res += f\"{child_indent}└── {x.pprint(child_indent)}\"\n else:\n res += f\"{child_indent}├── {x.pprint(child_indent)}\"\n return res", "def _prettify_attributes(self, config_entry, indentation_level):\n def get_string_representation(singular):\n return \"{0}: {1}{2}\".format(singular['@name'], str(singular['@value']), os.linesep)\n \n indent_level = indentation_level * 2\n string_representation = \"\"\n \n if 'attribute' in config_entry:\n if type(config_entry['attribute']) == list:\n for entry in config_entry['attribute']:\n string_representation = \"{0}{1}{2}\".format(string_representation, \" \"*indent_level, get_string_representation(entry))\n else:\n string_representation = \"{0}{1}\".format(\" \"*indent_level, get_string_representation(config_entry['attribute']))\n \n if len(string_representation) > 0 and string_representation[-1] == os.linesep:\n return string_representation[:-1]\n \n return string_representation", "def dumps(data):\n def _dump(d, indent=0):\n for key, value in six.iteritems(d):\n if isinstance(value, dict):\n yield '%s%s {\\n' % (' ' * indent, _escape(key))\n for subs in _dump(value, indent + 2):\n yield subs\n yield '%s}\\n' % (' ' * indent)\n elif isinstance(value, list):\n yield '%s%s = {\\n' % (' ' * indent, _escape(key))\n for subvalue in value:\n if type(subvalue) == dict:\n yield '%s{\\n' % (' ' * (indent + 2))\n for subs in _dump(subvalue, indent + 4):\n yield subs\n yield '%s}\\n' % (' ' * (indent + 2))\n else:\n yield '%s%s\\n' % (' ' * (indent + 2),\n _escape(subvalue))\n\n yield '%s}\\n' % (' ' * indent)\n elif type(value) == bool:\n yield '%s%s = %s\\n' % (' ' * indent, _escape(key),\n _escape(str(value).lower()))\n else:\n yield '%s%s = %s\\n' % (' ' * indent, _escape(key),\n _escape(str(value)))\n return ''.join(list(_dump(data)))", "def prettify(self):\n return self._config_dict", "def test_nested_dict(self):\n self.assertLines(\n {\n 'foo': 'foo',\n 'bar': {\n 'a': 'apple',\n 'b': 'banana',\n },\n 'zoo': 'hoo',\n },\n [\n 'foo: foo zoo: hoo',\n 'foo: foo zoo: hoo bar: a: apple b: banana',\n ])", "def _to_string(self) -> str:\n\n string_list = []\n for key, value in self.__dict__.items():\n if isinstance(value, dict):\n string_list.append(key)\n string_list.extend('\\n'.join([\"Key: {:24}\\tValue: {}\".format(_key, _value) for _key, _value in value.items()]))\n else:\n string_list.append(\"Key: {:24}\\tValue: {}\\n\".format(key, value))\n return ''.join(string_list)", "def json_pretty(self):\n\n return json.dumps(self.definition, indent=4, cls=JSONEncoder)", "def json_pretty(self):\n\n return json.dumps(self.definition, indent=4, cls=JSONEncoder)", "def pprint(self):\r\n for i in self.items():\r\n print '%s => %r'%i", "def pretty_print(name, input, val_width=40, key_width=0):\n\n # root\n pretty_str = name + ': {\\n'\n\n # determine key width\n for key in input.keys(): key_width = max(key_width, len(str(key)) + 4)\n\n # cycle keys\n for key in input.keys():\n\n val = input[key]\n\n # round values to 3 decimals..\n if type(val) == np.ndarray: val = np.round(val, 3).tolist()\n\n # difficult formatting\n val_str = str(val)\n if len(val_str) > val_width:\n val_str = pprint.pformat(val, width=val_width, compact=True)\n val_str = val_str.replace('\\n', '\\n{tab}')\n tab = ('{0:' + str(4 + key_width) + '}').format('')\n val_str = val_str.replace('{tab}', tab)\n\n # more difficult formatting\n format_str = '{0:' + str(4) + '}{1:' + str(key_width) + '} {2:' + str(val_width) + '}\\n'\n pretty_str += format_str.format('', key + ':', val_str)\n\n # close root object\n pretty_str += '}'\n\n return pretty_str", "def _pretty(item):\n ivs = \"IVs: \\n\"\n for stat, value in item.get(\"ivs\", {}).items():\n ivs += f\" {value} {stat}\\n\"\n\n evs = \"EVs: \\n\"\n for stat, value in item.get(\"evs\", {}).items():\n evs += f\" {value} {stat}\\n\"\n\n moves = \"\\n\"\n for move, acquired in item.get(\"moves\", {}).items():\n moves += f\" [{'x' if acquired else ' '}] {move}\\n\"\n\n return f\"\"\"\n-------------------------------------------------------------------------------\n{item['Pokemon']} - {item.get('Index')}\n{item.get('nickname', item['Pokemon'])}\n\nAbility: {item.get('ability')}\nNature: {item.get('nature')}\n{ivs}\n{evs}\n{moves}\n\"\"\"", "def pretty_str(self) -> str:\n return _yaml_dump(self.to_ordered_dict())", "def build_dict(self, d):\n comma = self.art_type([self.string_type(', ')],\n baseline=0,\n breakpoints=[1])\n colon = self.art_type([self.string_type(':')], baseline=0)\n def concat_no_breakpoint(k,v):\n k = self.build(k)\n v = self.build(v)\n elt = k + colon + v\n elt._breakpoints.remove(k._l)\n elt._breakpoints.remove(k._l + 1)\n return elt\n repr_elems = self.concatenate(\n (concat_no_breakpoint(k,v) for k,v in d.iteritems()),\n comma)\n return self.build_container(repr_elems,\n self.left_curly_brace, self.right_curly_brace)", "def print_dd_dict( self, ):\n print( self._dd_dict )", "def simple_formatter(entry, fp, indent=0):\n for key, value in six.iteritems(entry):\n if isinstance(value, dict):\n print('{}{}:'.format(' ' * indent, key))\n simple_formatter(value, fp, indent + 1)\n else:\n print('{}{}: {}'.format(' ' * indent, key, value), file=fp)", "def prettyprint(\n D,\n indent=0,\n width=0,\n maxdepth=None,\n step=4,\n only_keys=None,\n output=sys.stdout,\n _key_prefix='',\n _exclude=None):\n # be sure we do not try to recursively dump `D`\n if _exclude is None:\n _exclude = set()\n _exclude.add(id(D))\n for k, v in sorted(D.iteritems()):\n leading_spaces = indent * ' '\n full_name = \"%s%s\" % (_key_prefix, k)\n if only_keys is not None:\n try:\n # is `only_keys` a filter function?\n if not only_keys(str(full_name)):\n continue\n except TypeError:\n # no, then it must be a list of key names, check for\n # keys having the same number of dots as in the prefix\n level = _key_prefix.count('.')\n found = False\n for name in only_keys:\n # take only the initial segment, up to a \"level\" dots\n dots = min(name.count('.'), level) + 1\n prefix = str.join('.', name.split('.')[:dots])\n if str(full_name) == prefix:\n found = True\n break\n if not found:\n continue\n # ignore excluded items\n if id(v) in _exclude:\n continue\n # To make a 'key' valid in YAML it must not start with one of the following chars\n sk = str(k)\n sk = sk if sk[0] not in u'\\0 \\t\\r\\n\\x85\\u2028\\u2029-?:,[]{}#&*!|>\\'\\\"%@`' else \"'%s'\" % sk\n first = str.join('', [leading_spaces, sk, ': '])\n if isinstance(\n v, (dict, UserDict.DictMixin, UserDict.UserDict, OrderedDict)):\n if maxdepth is None or maxdepth > 0:\n if maxdepth is None:\n depth = None\n else:\n depth = maxdepth - 1\n sstream = StringIO.StringIO()\n prettyprint(v, indent + step, width, depth, step,\n only_keys, sstream, full_name + '.', _exclude)\n second = sstream.getvalue()\n sstream.close()\n elif maxdepth == 0:\n second = \"...\"\n elif isinstance(v, (list, tuple)):\n second = str.join(', ', [str(item) for item in v])\n else:\n second = str(v)\n # wrap overlong lines, and always wrap if the second part is multi-line\n if (width > 0 and len(first) + len(second)\n > width) or ('\\n' in second):\n first += '\\n'\n # indent a multi-line block by indent+step spaces\n if '\\n' in second:\n lines = second.splitlines()\n # keep indentation relative to first line\n dedent = 0\n line0 = lines[0].expandtabs(step)\n while line0[dedent].isspace():\n dedent += 1\n # rebuild `second`, indenting each line by (indent+step) spaces\n second = ''\n for line in lines:\n second = str.join('', [\n second,\n ' ' * (indent + step),\n line.rstrip().expandtabs(step)[dedent:],\n '\\n'\n ])\n # there can be multiple trailing '\\n's, which we remove here\n second = second.rstrip()\n # finally print line(s)\n output.write(first)\n output.write(second)\n output.write('\\n')", "def print_pairs(self, d, level=0):\n for k, v in d.iteritems():\n if type(v) is dict:\n self._write('%s%s :\\n' % (\"\\t\" * level, k.upper()))\n self.print_pairs(v, level + 1)\n elif k == \"output\":\n self._write('%s%s :\\n' % (\"\\t\" * level, k.upper()))\n self._write('%s\\n' % v)\n else:\n self._write('%s%s : %s\\n' % (\"\\t\" * level, k.upper(), v))", "def pprint(self,obj):\n return(json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': ')))", "def __repr__(self):\n res = \"{\"\n for k in self.keys():\n res+=\" '\"+str(k)+\"':\"+str(self[k])+\",\"\n res=res[:-1]+\" }\"\n return res", "def print_data(d, indent=0):\n prefix = indent * ' '\n for k in sorted(d):\n v = d[k]\n k = prefix + str(k)\n if isinstance(v, dict):\n print(k)\n print_data(v, indent + 1)\n else:\n if k.endswith('cent'):\n v = ' '.join(\n str(tuple(int(j) if j.is_integer() else j for j in i))\n for i in v\n )\n elif isinstance(v, np.ndarray):\n v = str(v).replace('\\n', '')\n print(k, '=', v)", "def __str__(self):\n try:\n delim = ', ' if len(self) < 8 else ',\\n '\n s = delim.join('%s: %s' % (repr(k), repr(self[k])) for k in self.peys())\n return '{' + s + '}'\n except Exception:\n return defaultdict.__repr__(self)", "def format_dict(kv_list):\n return '\\n'.join(['{} - {}'.format(key, value) for\n key, value in kv_list])", "def pretty_print(self):\n for dtr in self.dtrs:\n dtr.pretty_print(indent=2)", "def pretty_print(self):\n return self.tree.pretty_print()", "def __repr__(self):\r\n return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])", "def tree_str(self, depth_index=0, recursive_dict=None):\r\n if not hasattr(self,'iteritems'): return ''\r\n if recursive_dict is not None: self = TreeMap(recursive_dict)\r\n buff_str = ''\r\n \r\n for item in self.iteritems():\r\n # Starts working now.\r\n k = item[0]\r\n v = item[1]\r\n \r\n spacer = '\\n' + '| ' * depth_index\r\n \r\n if hasattr(v,'iteritems'):\r\n buff_str += spacer + '+--[ ' + k + ' ]'\r\n buff_str += self.tree_str(depth_index=depth_index + 1, recursive_dict=v)\r\n else:\r\n buff_str += spacer + '\\_.--[ ' + str(k) + ' = ' + str(v) + ' ]'\r\n \r\n return buff_str", "def debug_repr(self) -> str:\n repr_string = \"{}(Confi):\\n\".format(self.__class__.__name__)\n items = list(self.entries.items())\n items.sort(key = lambda item: item[0])\n indent = ' ' * 4\n for key, entry in items:\n repr_string += f\"{indent}{key}: {repr(entry.value)}\\n\"\n return repr_string", "def indent(self, modifier=0):\n return (self.depth+modifier)*self.indentString", "def print_id_keyed_dict(d):\n newdoc_string=\"==========================================================\"\n for key,value in d.items():\n print(newdoc_string)\n if isinstance(key,ObjectId):\n print('ObjectId string of document=',str(key))\n else:\n print('WARNING: key is not object id as it shoudl be. It is->',\n key,' of type ',type(key))\n print(newdoc_string)\n if type(value)==dict:\n print(json_util.dumps(value,indent=2))\n else:\n print(value)", "def dump(self, indentation=0):\n\n dump = []\n\n dump.append('[%s]' % self.name)\n\n # Refer to the __set_format__ method for an explanation\n # of the following construct.\n for keys in self.__keys__:\n for key in keys:\n\n val = getattr(self, key)\n if isinstance(val, int) or isinstance(val, long):\n val_str = '0x%-8X' % (val)\n if key == 'TimeDateStamp' or key == 'dwTimeStamp':\n try:\n val_str += ' [%s UTC]' % time.asctime(time.gmtime(val))\n except exceptions.ValueError, e:\n val_str += ' [INVALID TIME]'\n else:\n val_str = ''.join(filter(lambda c:c != '\\0', str(val)))\n\n dump.append('0x%-8X 0x%-3X %-30s %s' % (\n self.__field_offsets__[key] + self.__file_offset__,\n self.__field_offsets__[key], key+':', val_str))\n\n return dump", "def as_dict(self):\n return dict((key, value) for key, value, depth in self.entries.itervalues())", "def testPrettyPrintJSON(self):\n test_dict = {'test': [{'dict1': {'key1': 'val1'}, 'dict2': None}]}\n expected_string = ('{\\n \"test\": [\\n {\\n \"dict1\": {\\n'\n ' \"key1\": \"val1\"\\n }, \\n'\n ' \"dict2\": null\\n }\\n ]\\n}\\n')\n self.assertEqual(expected_string, utils.PrettyPrintJSON(test_dict))", "def pprint(self,indent=0,node=None):\n if node == None:\n node = self.root\n if node == None:\n print_indent(indent)\n print \"[empty tree]\"\n return\n if node.type == 'v':\n print_indent(indent)\n print node.value\n elif node.type == 's':\n for (val,c) in node.children.iteritems():\n print_indent(indent)\n print \"-\",self.keys[node.feature],\"=\",val,\":\"\n self.pprint(indent+1,c)\n elif node.type == 'i':\n print_indent(indent)\n print self.keys[node.feature],\"<=\",node.value,\":\"\n self.pprint(indent+1,node.children[0])\n print_indent(indent)\n print self.keys[node.feature],\">\",node.value,\":\"\n self.pprint(indent+1,node.children[1])", "def printDict(self):\n print str(self)", "def print_tree(self,root_key='',offset=''):\n itm = self._root\n if root_key:\n itm = self.get_data(root_key)\n tstr = os.linesep \n try: #if isinstance(itm,dict):\n for k in itm.keys():\n x_str = self.print_tree(root_key+'.'+k,offset+' ')\n tstr = tstr+offset+'{}: {}'.format(k,x_str)+os.linesep\n except:\n try: #elif isinstance(itm,list):\n for i,x in enumerate(itm):\n x_str = self.print_tree(root_key+'.'+str(i),offset+' ')\n tstr = tstr+offset+'{}: {}'.format(i,x_str)+os.linesep\n except:\n return '{}'.format(itm)\n return tstr", "def test_multi_nested_dict(self):\n self.assertLines(\n {\n 'foo': 'foo',\n 'bar': {\n 'a': 'apple',\n 'b': 'banana',\n },\n 'car': {\n 'a': 'apple',\n 'b': 'banana',\n },\n 'dog': [\n 1,2,'foo',\n ]\n },\n [\n 'foo: foo',\n 'foo: foo bar: a: apple b: banana',\n 'foo: foo car: a: apple b: banana',\n 'foo: foo dog: 1',\n 'foo: foo dog: 2',\n 'foo: foo dog: foo',\n ]\n )", "def json_dump_dict(dictionary):\n\n print(json.dumps(dictionary, indent=4, ensure_ascii=False).encode(\"utf8\").decode())\n\n return", "def print_dict(self):\n print(self.__dict__)", "def pprint(obj):\n return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))", "def pprint(obj):\n return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))", "def pretty_print_drt(self):\n self.drt_manager.pretty_print_drt()", "def test_nested_dict(self):\n nested = self.TEI.nested_dict(exclude=[\"tei:note\"])\n self.assertEqual(nested[\"1\"][\"pr\"][\"1\"], \"Spero me secutum in libellis meis tale temperamen-\",\n \"Check that dictionary path is well done\")\n self.assertEqual(nested[\"1\"][\"12\"][\"1\"], \"Itur ad Herculeas gelidi qua Tiburis arces \",\n \"Check that dictionary path works on more than one passage\")\n self.assertEqual(nested[\"2\"][\"pr\"][\"1\"], \"'Quid nobis' inquis 'cum epistula? parum enim tibi \",\n \"Check that different fist level works as well\")\n self.assertEqual(nested[\"1\"][\"3\"][\"8\"], \"Ibis ab excusso missus in astra sago. \",\n \"Check that notes are removed \")\n self.assertEqual(\n [list(nested.keys()), list(nested[\"1\"].keys())[:3], list(nested[\"2\"][\"pr\"].keys())[:3]],\n [[\"1\", \"2\"], [\"pr\", \"1\", \"2\"], [\"sa\", \"1\", \"2\"]],\n \"Ensure that text keeps its order\")", "def serialize_dict(d):\n txt = '{'\n for k in d:\n txt += f'\"{k}\":'\n if isinstance(d[k], dict):\n txt += serialize_dict(d[k])\n if isinstance(d[k], str):\n txt += serialize_string(d[k])\n if isinstance(d[k], int):\n txt += serialize_number(d[k])\n txt += ','\n txt += '}'\n return txt", "def indent(node, file = None, deep = 0):\n\tif file == None:\n\t\toutput = StringIO()\n\telse:\n\t\toutput = file\n\t\n\tindentType = \" \"\n\tif isList(node):\n\t\toutput.write(\"%s[\\n\"%(indentType*deep))\n\t\tfor item in node:\n\t\t\tindent(item, output, deep + 1)\n\t\toutput.write(\"%s]\\n\"%(indentType*deep))\n\telif isTuple(node):\n\t\toutput.write(\"%s(\\n\"%(indentType*deep))\n\t\tfor item in node:\n\t\t\tindent(item, output, deep + 1)\n\t\toutput.write(\"%s)\\n\"%(indentType*deep))\n\telif isDict(node):\n\t\toutput.write(\"%s{\\n\"%(indentType*deep))\n\t\tfor key, value in node.items():\n\t\t\tif isTuple(value) or isList(value) or isDict(value):\n\t\t\t\toutput.write('%s\"%s\":\\n'%(indentType*(deep+1), key))\n\t\t\t\tindent(value, output, deep+1)\n\t\t\telse:\n\t\t\t\toutput.write('%s%-20s : %s\\n'%(indentType*(deep+1), '\"%s\"'%key, repr(value)))\n\t\toutput.write(\"%s}\\n\"%(indentType*deep))\n\telse:\n\t\toutput.write(\"%s%s\\n\"%(indentType*deep, repr(node)))\n\t\n\tif file == None:\n\t\treturn output.getvalue()\n\telse:\n\t\treturn None", "def __repr__(self):\n return repr(dict([(k, v) for k, v in self.iteritems()]))", "def pprint(self):\n def pprintStr(node):\n s = \"(\" + str(node.value) \n for action in node.children:\n s = s + \", \" + pprintStr(node.children[action])\n s = s + \")\"\n return s\n\n print pprintStr(self)", "def indentation(self) -> str:\n return self._indent", "def _indent_spaces(self):\n if prettyprint:\n return self.indentspace * self._indent_level\n else:\n return ''", "def pretty(self):\n return yaml.dump(self.get_data(), encoding='utf-8',\n default_flow_style=False).rstrip()", "def pretty_print(self,depth=0):\n\t\tfor i in range(depth):\n\t\t\tprint \"\\t\",\n\t\t\t\t\n\t\tprint self.__str__()\n\t\t\n\t\tfor c in self.tree.children:\n\t\t\tc.viz.pretty_print(depth+1)", "def __str__(self):\n string = super().__str__()\n string += \"\\n\" + str(self.get_dict())\n return string", "def print_mat_nested(self, d, indent=0, nkeys=0):\n\n # Subset dictionary to limit keys to print. Only works on first level\n self.d = d\n self.indent = indent\n self.nkeys = nkeys\n if self.nkeys > 0:\n self.d = {k: self.d[k] for k in self.d.keys()[:self.nkeys]} # Dictionary comprehension: limit to first nkeys keys.\n\n if isinstance(self.d, dict):\n for key, value in self.d.iteritems(): # iteritems loops through key, value pairs\n print '\\t' * self.indent + 'Key: ' + str(key)\n self.print_mat_nested(value, indent + 1)\n\n if isinstance(self.d, numpy.ndarray) and self.d.dtype.names is not None: # Note: and short-circuits by default\n for n in self.d.dtype.names: # This means it's a struct, it's bit of a kludge test.\n print '\\t' * self.indent + 'Field: ' + str(n)\n self.print_mat_nested(self.d[n], self.indent + 1)", "def pretty_print(data):\n print json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))", "def __str__(self):\n if len(self.__keys) == 0:\n return '{}'\n output = '{'\n fmt = '{}: {}, '\n for key, val in zip(self.__keys, self.__vals):\n output += fmt.format(repr(key), repr(val))\n return output[:-2] + '}'", "def printTree(self, tree, str):\n\n\t\tif type(tree) == dict:\n\t\t\tfor item in list(tree.values())[0].keys():\n\t\t\t\t\tprint(\"%s %s = %s \" % (str, list(tree.keys())[0], item))\n\t\t\t\t\tself.printTree(list(tree.values())[0][item], str + \"\\t\")\n\t\telse:\n\t\t\tprint(\"%s -> %s = %s\" % (str, self.targetName, tree))", "def pretty_json_repr(data):\n return json.dumps(data, sort_keys=True, indent=2)", "def pprint(self):\n return pformat(repr(self))", "def json_format_dict(self, data, pretty=False):\n if pretty:\n return json.dumps(data, sort_keys=True, indent=2)\n else:\n return json.dumps(data)", "def dump(self, indent=None):\n indent = indent if indent else '.'\n\n print('-------------------------------------------------------------------------------------------------------')\n print('id =', id(self), '\\nnodes =', self)\n if self.nodes:\n def walk(_cfg, count):\n count += 1\n for key, value in _cfg.items():\n if isinstance(value, dict):\n item = '' if value else '{}'\n print(indent * count, key, item)\n walk(value, count)\n else:\n if isinstance(value, str):\n value = f'\"{value}\"'\n print(indent * count, key, f'value={value}')\n walk(self.nodes, 0)\n else:\n print(' (No Data)')\n\n print('-------------------------------------------------------------------------------------------------------')", "def nested_dict():\n return defaultdict(nested_dict)", "def ToJson(self,\n json_indent: int = 4) -> str:\n return json.dumps(self.ToDict(), indent=json_indent)", "def _format_instance(d, style=None):\n pt = PrettyTable(['Property', 'Value'], caching=False)\n pt.align = 'l'\n for k, v in sorted(d.items()):\n # convert dict to str to check length\n if isinstance(v, (dict, list)):\n v = json.dumps(v)\n # if value has a newline, add in multiple rows\n # e.g. fault with stacktrace\n if v and isinstance(v, six.string_types) and (r'\\n' in v or '\\r' in v):\n # '\\r' would break the table, so remove it.\n if '\\r' in v:\n v = v.replace('\\r', '')\n lines = v.strip().split(r'\\n')\n col1 = k\n for line in lines:\n pt.add_row([col1, line])\n col1 = ''\n else:\n if v is None:\n v = '-'\n pt.add_row([k, v])\n\n if style == 'html':\n output = '<b>Instance details</b>'\n output += pt.get_html_string(attributes={\n 'border': 1,\n 'style': 'border-width: 1px; border-collapse: collapse;'\n })\n else:\n output = 'Instance details:\\n'\n output += pt.get_string()\n return output", "def pformat_in_needed(obj, indent=4):\n if obj:\n formatted_string = pprint.pformat(obj, indent)\n indented_string = ''\n for line in formatted_string.split('\\n'):\n indented_string = indented_string + '\\n' + (' ' * indent * 2) + line\n return \"\\n{}\\n\".format(indented_string)", "def __repr__(self):\n return json.dumps(self, sort_keys=True, indent=2)" ]
[ "0.73564094", "0.7016583", "0.7004065", "0.69742304", "0.69219863", "0.6862406", "0.68234503", "0.6813462", "0.6663069", "0.6650337", "0.66487944", "0.6608814", "0.65994126", "0.65836185", "0.6566666", "0.6555802", "0.6501829", "0.6487838", "0.6477041", "0.6438375", "0.6435267", "0.6265773", "0.62417954", "0.62247425", "0.619704", "0.61966306", "0.6168665", "0.61645025", "0.6160097", "0.6127049", "0.60971767", "0.6096953", "0.6040852", "0.6028234", "0.60143024", "0.5984193", "0.5972268", "0.59430677", "0.5935998", "0.59244657", "0.5894568", "0.5893715", "0.5880631", "0.5880631", "0.5859452", "0.58570266", "0.58426774", "0.5800276", "0.5798642", "0.5791083", "0.5776648", "0.57436985", "0.5739041", "0.5729159", "0.5725975", "0.57250655", "0.57232034", "0.57197547", "0.5682992", "0.5679998", "0.56732774", "0.5672859", "0.56638765", "0.5633892", "0.563363", "0.5633157", "0.56134164", "0.5603293", "0.56010586", "0.5586759", "0.5586714", "0.55858535", "0.55775493", "0.5575063", "0.55688536", "0.55688536", "0.55606985", "0.55590886", "0.55358636", "0.5532776", "0.5528301", "0.5517281", "0.5516912", "0.55129457", "0.5502386", "0.550046", "0.5499281", "0.5493637", "0.54891455", "0.5488313", "0.54849833", "0.54742664", "0.54691535", "0.5464498", "0.54329044", "0.5421515", "0.54192424", "0.5413345", "0.5408712", "0.537891" ]
0.70187014
1
Create a new FrozenDict with additional or replaced entries.
def copy( self, add_or_replace: Mapping[K, V] = MappingProxyType({}) ) -> 'FrozenDict[K, V]': return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]:\n return FrozenDict(xs)", "def fromkeys(iterable, value=None):\n return FrozenDict(dict.fromkeys(iterable, value))", "def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(\n {}\n ),\n) -> Union[FrozenDict, Dict[str, Any]]:\n\n if isinstance(x, FrozenDict):\n return x.copy(add_or_replace)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n new_dict.update(add_or_replace)\n return new_dict\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}", "def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less performant because jax.tree_util.tree_map\n # uses an optimized C implementation.\n return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore\n elif isinstance(x, dict):\n ys = {}\n for key, value in x.items():\n ys[key] = unfreeze(value)\n return ys\n else:\n return x", "def _freeze_mapping(d):\r\n d = dict(d)\r\n for k in d.keys():\r\n if hasattr(d[k], '__getitem__') and hasattr(d[k], 'keys'):\r\n d[k] = _freeze_mapping(d[k])\r\n d = types.MappingProxyType(d)\r\n return d", "def new_dict(key, value, n_keys=0):\n # With JIT disabled, ignore all arguments and return a Python dict.\n return dict()", "def __copy__(self):\n d = dict()\n d.update(self.items())\n return d", "def construct_kv_dict(self):\r\n key1 = self.key_factory('existing_field')\r\n key2 = self.key_factory('other_existing_field')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}", "def _attrdict_copy(d):\n if isinstance(d, collections.Iterable):\n return AttrDict(dict(d))\n else:\n return AttrDict(vars(d))", "def impl_new_dict(key, value, n_keys=0):\n if any([\n not isinstance(key, Type),\n not isinstance(value, Type),\n ]):\n raise TypeError(\"expecting *key* and *value* to be a numba Type\")\n\n keyty, valty = key, value\n\n def imp(key, value, n_keys=0):\n if n_keys < 0:\n raise RuntimeError(\"expecting *n_keys* to be >= 0\")\n dp = _dict_new_sized(n_keys, keyty, valty)\n _dict_set_method_table(dp, keyty, valty)\n d = _make_dict(keyty, valty, dp)\n return d\n\n return imp", "def _mask_dict(self, value):\n\n return MaskedDict(value)", "def copy(self):\n return AttrDict(dict(self).copy())", "def make_globals(\n self, d: t.Optional[t.MutableMapping[str, t.Any]]\n ) -> t.MutableMapping[str, t.Any]:\n if d is None:\n d = {}\n\n return ChainMap(d, self.globals)", "def copy(self) -> AF:\n if self._base == OrderedDict:\n kopied = dict(self)\n else:\n kopied = self._base.copy(self)\n return self.__class__(kopied, use_fuzzy=self.use_fuzzy, dottable=self._dottable)", "def copy(self):\n import copy\n MultiDict.__setitem__ = dict.__setitem__\n cp = copy.deepcopy(self)\n MultiDict.__setitem__ = MultiDict._setitem_list\n return cp", "def dict_with_attrs2(*args):\n class CustomDict(object):\n __slots__ = args\n __dict__ = {}\n\n def __init__(self, *args, **kwargs):\n super(CustomDict, self).__init__()\n if args:\n self.__dict__.update(*args)\n\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __setitem__(self, key, val):\n self.__dict__[key] = val\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __getattr__(self, name):\n return self.__dict__[name]\n\n return CustomDict", "def copy(self):\n return pdict(dict.copy(self))", "def freeze(value):\n if isinstance(value, list):\n value = tuple(freeze(e) for e in value)\n elif isinstance(value, set):\n value = frozenset(freeze(e) for e in value)\n elif isinstance(value, dict):\n value = frozendict({k: freeze(v) for k, v in value.items()})\n elif isinstance(value, tuple):\n value = tuple(freeze(e) for e in value)\n else:\n pass\n return value", "def fillDict(valDict, nowDate=datetime.now()):\n copyDict = copy.deepcopy(valDict)\n copyDict[names.year] = nowDate.year\n copyDict[names.month] = nowDate.month\n copyDict[names.day] = nowDate.day\n return copyDict", "def memodict(f):\r\n class memodict(defaultdict):\r\n def __missing__(self, key):\r\n ret = self[key] = f(key)\r\n return ret\r\n return memodict().__getitem__", "def unfreeze(cls, frozen):\n timeframe = frozen.get('timeframe')\n finished_threshold = frozen.get('finished_threshold')\n if finished_threshold is not None:\n finished_threshold = timedelta(seconds=finished_threshold)\n # if there's no data or cache config an error has occurred\n data_config = frozen['data']\n cache_config = frozen['cache']\n return cls(data_config,\n cache_config,\n timeframe,\n finished_threshold)", "def __copy__(self):\n cls = self.__class__\n result = cls.__new__(cls)\n to_copy = {\"_cache\", \"_buffers\", \"_parameters\", \"_modules\"}\n result.__dict__.update(\n {k: v.copy() if k in to_copy else v for k, v in self.__dict__.items()}\n )\n return result", "def copy(self):\r\n new = WeakKeyIdentityDict()\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n new[o] = value\r\n return new", "def extend(d, k, v):\n\tn = d.copy()\n\tn[k] = v\n\treturn n", "def test_dict_merge_immutable():\n x1 = {'one': 1, 'two': 2}\n x1_cop = x1.copy()\n ir.dict_merge(x1, {'three': 3, 'two': None})\n assert x1 == x1_cop\n ir.dict_merge({'ten': 10, 'one': '1'}, x1)\n assert x1 == x1_cop", "def _update_loose (self, dict):\n self.__dict__.update(dict)", "def __post_init__(self) -> None:\n setattr(self, _FROZEN, True)", "def frozenset(self) -> frozenset:\n return frozenset(self)", "def update(self, *args, **kwargs):\n super(ReadOnlyDict, self).update(*args, **kwargs) # pragma: no cover", "def dict2frozenset(d):\n return frozenset(d.items())", "def shallow(dict, exclude=None):\n\tnew_dict = {}\n\tfor key in dict:\n\t\tif not exclude or key not in exclude:\n\t\t\tnew_dict[key] = dict[key]\n\treturn new_dict", "def makeFMData(from_dict, locked=False):\n\n class FMData(object):\n \"\"\"Datastructure where:\n\n - attr and dict access is equal (eg. FMData.value == FMData['value'])\n - only attributtes given during initialization are readable and writable\n - modified attributes are tracked\"\"\"\n __modified__ = set()\n __slots__, __init_dict__, __old2new__, __new2old__ = key_dict(from_dict)\n\n def __init__(self, locked=False):\n init_dict = self.__init_dict__\n for key in init_dict:\n value = init_dict[key]\n date, mo, da, ye, time, ho, mi, se = [None] * 8\n if type(value) in [str, str]:\n date, da, mo, ye, time, ho, mi, se = reDateTime.match(value).groups()\n if mo and int(mo) > 12:\n mo, da = da, mo\n\n if type(init_dict[key]) == dict:\n setattr(self, key, makeFMData(init_dict[key], locked=False)) # lock all substructures??\n elif type(init_dict[key]) == list:\n l = []\n for d in init_dict[key]:\n if type(d) == dict:\n l.append(makeFMData(d)) # lock ??\n else:\n l.append(d)\n setattr(self, key, l)\n elif date and time:\n setattr(self, key, DateTime(int(ye), int(mo), int(da), int(ho), int(mi), int(se)))\n elif date:\n setattr(self, key, Date(int(ye), int(mo), int(da)))\n elif time:\n setattr(self, key, Time(int(ho), int(mi), int(se)))\n else:\n setattr(self, key, init_dict[key])\n if locked:\n self.__modified__.add('__locked__')\n\n def __setattr__(self, key, value):\n if '__locked__' in self.__modified__:\n raise AttributeError(\"This substructure is read-only, so you cannot modify '%s' attribute.\" % key)\n oldvalue = None\n if hasattr(self, key):\n oldvalue = getattr(self, key)\n # if oldvalue != None and type(oldvalue) != type(value):\n #\t raise TypeError, \"Type of field '%s' is %s, you cannot insert %s\" % (key, type(oldvalue), type(value))\n object.__setattr__(self, key, value)\n if oldvalue != None and value != oldvalue:\n self.__modified__.add(key)\n\n def __getitem__(self, key):\n if type(key) == str or type(key) == str:\n spl = key.split('.')\n else:\n print(\"-\" * 20, key, type(key))\n if len(spl) == 2:\n if spl[0] in self.__old2new__:\n spl[0] = self.__old2new__[spl[0]]\n if spl[1] in self.__old2new__:\n spl[1] = self.__old2new__[spl[1]]\n return getattr(getattr(self, spl[0]), spl[1])\n if key in self.__old2new__:\n key = self.__old2new__[key]\n return getattr(self, key)\n\n def __setitem__(self, key, value):\n spl = key.split('.')\n if len(spl) == 2:\n if spl[0] in self.__old2new__:\n spl[0] = self.__old2new__[spl[0]]\n if spl[1] in self.__old2new__:\n spl[1] = self.__old2new__[spl[1]]\n return setattr(getattr(self, spl[0]), spl[1], value)\n if key in self.__old2new__:\n key = self.__old2new__[key]\n return setattr(self, key, value)\n\n def __str__(self):\n return object.__repr__(self)\n\n def __iter__(self):\n l = []\n for key in self.__slots__:\n if hasattr(getattr(self, key), '__slots__'):\n for subkey in getattr(self, key).__slots__:\n l.append(\"%s.%s\" % (key, subkey))\n else:\n l.append(key)\n l.sort()\n for x in l:\n yield x\n\n def _modified(self):\n \"\"\"Returns tuple (key, value) for modified keys inside of FMData tree (recursive without lists)\"\"\"\n l = []\n for key in self.__slots__:\n if hasattr(getattr(self, key), '__modified__'):\n for subkey, value in getattr(self, key)._modified():\n yield (\"%s.%s\" % (key, subkey), value)\n else:\n if key in self.__modified__:\n yield (key, getattr(self, key))\n\n def __repr__(self):\n # from pformat import pformat\n # return \"<%s instance with %s records>\\n%s\" % (str(self.__class__), len(self.__slots__), pformat(dict([(value, getattr(self, value)) for value in self.__slots__])))\n # return pformat(dict([(value, getattr(self, value)) for value in self.__slots__]))\n l = []\n for key in self.__slots__:\n ukey = \"\"\n if key in self.__new2old__:\n ukey = \" (%s)\" % self.__new2old__[key]\n if hasattr(getattr(self, key), '__slots__'):\n for subkey in getattr(self, key).__slots__:\n value = getattr(getattr(self, key), subkey)\n if type(value) == str:\n value = value.decode('utf-8')\n l.append(\"%s.%s = '%s'\" % (key, subkey, value))\n elif type(getattr(self, key)) == list:\n l.append(\"%s%s = <list with %s records>\" % (key, ukey, len(getattr(self, key))))\n elif type(getattr(self, key)) == str:\n l.append(\"%s%s = '%s'\" % (key, ukey, getattr(self, key).decode('utf-8')))\n else:\n l.append(\"%s%s = '%s'\" % (key, ukey, getattr(self, key)))\n l.sort()\n return str(('\\n'.join(l)).encode('utf-8'))\n\n def get(self, key, default=None):\n try:\n return self.__getitem__(key)\n except AttributeError:\n return default\n\n return FMData(locked)", "def copy(self):\n return self.update({})", "def extend_dict(source_dict, diff=None, deep=False):\n if deep:\n new_dict = deepcopy(source_dict)\n else:\n new_dict = copy(source_dict)\n\n if diff:\n new_dict.update(diff)\n return new_dict", "def extend(primary: Mapping, *others: Mapping, in_place=False):\n others = flatten(others)\n if not in_place:\n primary = dict(primary or {})\n for other in others:\n if other is None:\n continue\n for key, value in other.items():\n primary[key] = value\n return primary", "def fill_dict_defaults(d, required_keys=None, opt_keys=None, noleft=True):\n if required_keys is None:\n required_keys = []\n if opt_keys is None:\n opt_keys = {}\n if d is None:\n if not required_keys:\n if opt_keys is None:\n raise TypeError(\"`d` and òpt_keys` are both None.\")\n return opt_keys.copy()\n else:\n raise ValueError(\"`d` is None, but `required_keys` is not empty.\")\n\n d = d.copy()\n out = {}\n # Set required keys\n for key in required_keys:\n if key in d:\n out[key] = d.pop(key)\n else:\n raise KeyError(\"Dict is missing required key '{}'.\".format(key))\n # Set optional values, if key not given\n for key, val in opt_keys.items():\n out[key] = d.pop(key, val)\n # Complain when extra keys are left and noleft is True\n if d and noleft:\n raise KeyError(\"Leftover keys ['{}'].\".format(\n \"', '\".join(list(d.keys()))))\n return out", "def updated_with(orig_dict, *new_values):\n newdict = dict(orig_dict)\n for vals in new_values:\n if vals:\n newdict.update(vals)\n return newdict", "def copy(self):\n return self.from_dict(self.to_dict(True))", "def merge_default_from_dict(self, key, value, lists_only=False):\n pass", "def update_dict(new,old):", "def __iter__(self) -> 'Dictionary':\n return copy.deepcopy(self)", "def dict_with_attrs(*args):\n class CustomDict(dict):\n __slots__ = args\n\n def __init__(self, *args, **kwargs):\n super(CustomDict, self).__init__(*args)\n\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n return CustomDict", "def pad_dict(indict):\n indict[\"xtfit\"].append(None)\n indict[\"ytfit\"].append(None)\n indict[\"wmask\"].append(None)\n return indict", "def insertable_dict(self):\n # .strip('_') is for type_\n return {\n 'f_' +\n p.key.strip('_'): getattr(\n self,\n p.key) for p in self.__mapper__.attrs}", "def Dictionary_create_from(nMarkers, markerSize, baseDictionary):\n pass", "def _from_dict_transform(cls: Type[TElementSubclass], data: Dict[str, Any]) -> Dict[str, Any]:\n if 'application' in data:\n data['created_by'] = data.pop('application')\n\n if 'added_timestamp' in data:\n data['created_ts'] = data.pop('added_timestamp')\n\n if 'created_ts' not in data:\n # some really old nin entries in the database have neither created_ts nor modified_ts\n data['_no_created_ts_in_db'] = True\n data['created_ts'] = datetime.fromisoformat('1900-01-01')\n\n if 'modified_ts' not in data:\n data['_no_modified_ts_in_db'] = True\n # Use created_ts as modified_ts if no explicit modified_ts was found\n data['modified_ts'] = data['created_ts']\n\n return data", "def test_merge_overwrite_traditional(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"A\"] = \"b\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"A\": \"b\"})\n self.assertEqual(mdict, ret)", "def merge_dict(d: dict, overwrite=False, inplace=False, **kwargs):\n nd = dict([(k, v) for k, v in d.items()] + [(k, v) for k, v in kwargs.items() if overwrite or k not in d])\n if inplace:\n d.update(nd)\n return d\n return nd", "def get_added_dicts(a, b):\n tmp = copy.deepcopy(a)\n for key, val in b.iteritems():\n if key not in tmp:\n tmp[key] = val\n return tmp", "def test_dictionary_inplace_update(self):\r\n vm = Dictionary.value_manager(None, None, {1:2, 3:4})\r\n assert not vm.changed\r\n vm.value[4] = 5\r\n assert vm.changed", "def pop(\n x: Union[FrozenDict, Dict[str, Any]], key: str\n) -> Tuple[Union[FrozenDict, Dict[str, Any]], Any]:\n\n if isinstance(x, FrozenDict):\n return x.pop(key)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n value = new_dict.pop(key)\n return new_dict, value\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def insertable_dict(self):\n\n d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs if p.key not in ('table', 'stats', '_codes')}\n\n x = {('c_' + k).strip('_'): v for k, v in d.items()}\n\n return x", "def __init__(self, cutoff: float = 0.6):\r\n super(FuzzyDict, self).__init__()\r\n self.cutoff = cutoff\r\n\r\n # short wrapper around some super (dict) methods\r\n self._dict_contains = lambda key: super(FuzzyDict, self).__contains__(key)\r\n self._dict_getitem = lambda key: super(FuzzyDict, self).__getitem__(key)", "def update(x, **entries):\n if isinstance(x, dict):\n x.update(entries)\n else:\n x.__dict__.update(entries)\n return x", "def copy(self):\n return OrderedDict(self)", "def copy(self):\n return self.__class__(dict(self))", "def underride(d, **options):\n if d is None:\n d = {}\n\n for key, val in options.items():\n d.setdefault(key, val)\n\n return d", "def update_default_from_dict(self, key, value):\n pass", "def copy_obs_dict(obs):\n return {k: np.copy(v) for k, v in obs.items()}", "def construct_kv_dict(self):\r\n key1 = user_state_key('field_a')\r\n key2 = user_state_key('field_b')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}", "def _as_dict(self):\r\n local = dict((key, value) for key, value in self)\r\n joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)\r\n if not k[0] == '_'])\r\n local.update(joined)\r\n return local", "def add_to_dict(source_dict=None, working_dict=None, new_key=None, new_dict=None):\n if source_dict is None or working_dict is None or new_key is None or new_dict is None:\n raise RuntimeError(\"Invalid arguments passed, one of is == None.\")\n\n if working_dict[new_key] is None:\n working_dict[new_key] = new_dict\n else:\n working_dict[new_key].update(new_dict)\n\n return source_dict.update(working_dict)", "def intern_dict(d, fields_to_intern=None):\n fields_to_intern = fields_to_intern or set()\n out = {}\n for k, v in d.iteritems():\n # We can't intern unicode strings, as returned by etcd but all our\n # keys should be ASCII anyway. Use the utf8 encoding just in case.\n k = intern(k.encode(\"utf8\"))\n if k in fields_to_intern:\n if isinstance(v, StringTypes):\n v = intern(v.encode(\"utf8\"))\n elif isinstance(v, list):\n v = intern_list(v)\n out[k] = v\n return out", "def copymod(dct, without=None, **kwargs):\r\n if without is None:\r\n without = []\r\n rval = copy(dct)\r\n for a in without:\r\n if a in rval:\r\n del rval[a]\r\n for kw, val in kwargs.items():\r\n rval[kw] = val\r\n return rval", "def setup_dict(self, keys=None):\n keys = keys or []\n return {key: True for key in keys}", "def update_dict(d, u, omit_new=False):\n\n for k, v in u.items():\n if k not in d and omit_new:\n continue\n\n if isinstance(v, collections.abc.Mapping):\n d[k] = update_dict(d.get(k, {}), v, omit_new)\n elif isinstance(v, list):\n d[k] = [update_dict(i, j, omit_new) if None not in (i, j) else\n i if j is None else j\n for (i, j) in itertools.zip_longest(d.get(k, []), v)]\n else:\n d[k] = v\n return d", "def dict_merge(base, upd, inplace=False):\n assert quacks_like_dict(base), quacks_like_dict(upd)\n dst = base if inplace else deepcopy(base)\n\n stack = [(dst, upd)]\n while stack:\n current_dst, current_src = stack.pop()\n for key in current_src:\n if key not in current_dst:\n current_dst[key] = current_src[key]\n else:\n if quacks_like_dict(current_src[key]) and quacks_like_dict(current_dst[key]) :\n stack.append((current_dst[key], current_src[key]))\n else:\n current_dst[key] = current_src[key]\n return dst", "def _as_dict(self):\n local = dict((key, value) for key, value in self)\n joined = dict([(k, v) for k, v in self.__dict__.items() if not k[0] == '_'])\n local.update(joined)\n return local", "def copy_dict(source_dict, diffs):\n result = dict(source_dict)\n result.update(diffs)\n return result", "def universal_dict(name, attributes, namespace=None):\n\n def wrapper(cls):\n \"\"\"Inner decorator\n\n Args:\n cls: Input class\n\n Returns:\n Decorated class\n \"\"\"\n\n if namespace:\n qualified_name = '.'.join([namespace, name])\n else:\n qualified_name = name\n\n class RetCls(UniversalDict):\n SERIALIZABLE_ATTRIBUTES = attributes\n JSON_OBJECT_IDENTIFIER = qualified_name\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n self.__dict__['_obj'] = cls(*args, **kwargs)\n\n def __getattr__(self, item):\n \"\"\"Pass-through to underlying class attributes\n \"\"\"\n return getattr(self.__dict__['_obj'], item)\n\n def __setattr__(self, key, value):\n \"\"\"Pass-through to underlying class attributes\n \"\"\"\n return setattr(self.__dict__['_obj'], key, value)\n\n def __str__(self):\n \"\"\"Pass-through to underlying class attributes\n \"\"\"\n return str(self.__dict__['_obj'])\n\n def __repr__(self):\n \"\"\"Pass-through to underlying class attributes\n \"\"\"\n return repr(self.__dict__['_obj'])\n\n assert qualified_name not in UniversalDict.SERIALIZERS\n UniversalDict.SERIALIZERS[qualified_name] = RetCls\n\n return RetCls\n\n return wrapper", "def custom_dictionary_from(nMarkers, markerSize, baseDictionary):\n pass", "def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:\n value = self[key]\n new_dict = dict(self._dict)\n new_dict.pop(key)\n new_self = type(self)(new_dict)\n return new_self, value", "def _new_wos_dict():\n wos_dict = {\n 'DI': None,\n 'TI': None,\n 'PY': None,\n 'SO': None,\n 'UT': None,\n 'DE': None,\n }\n\n return wos_dict", "def deepish_copy(org):\n out = dict().fromkeys(org)\n for k,v in org.items():\n try:\n out[k] = v.copy() # dicts, sets\n except AttributeError:\n try:\n out[k] = v[:] # lists, tuples, strings, unicode\n except TypeError:\n out[k] = v # ints\n \n return out", "def add_to_dict(from_dict, to_dict):\n for k, v in list(from_dict.items()):\n if hasattr(v, 'copy') and callable(getattr(v, 'copy')):\n to_dict[k] = v.copy()\n else:\n to_dict[k] = v", "def create_cache(\n size: int,\n) -> t.Optional[t.MutableMapping[t.Tuple[weakref.ref, str], \"Template\"]]:\n if size == 0:\n return None\n\n if size < 0:\n return {}\n\n return LRUCache(size) # type: ignore", "def stubbornDict(*arg, **kwarg):\n result = {}\n for a in arg:\n result.update(StubbornDict.to_dict(a))\n result.update(kwarg)\n return StubbornDict(result)", "def make_dict(cls, *args: Any, **kwargs: Any) -> Dict[str, Any]:\n return _DictMaker(struct_class=cls, positional_args=args, keyword_args=kwargs).make_dict()", "def factory(f):\n def decorator(**data):\n d = f()\n assert type(d) is dict\n d.update(data)\n return d\n\n return decorator", "def copy_backward_mapping(self) -> Dict[str, Set[str]]:\n return deepcopy(self._backward_mapping)", "def filter_dict(fdict, mask):\n\n if fdict is None:\n fdict = dict()\n\n if mask is None:\n mask = []\n\n return {k: v for (k, v) in fdict.items() if k in mask}", "def copy(self):\r\n # This way of initializing the copy means it works for subclasses, too.\r\n obj = self.__class__(self)\r\n obj.keyOrder = self.keyOrder[:]\r\n return obj", "def _initializeDoneDict(self):\n\t\tself.donedictfile = os.path.join(self.params['rundir'] , self.functionname+\".donedict\")\n\t\tif os.path.isfile(self.donedictfile) and self.params['continue'] == True:\n\t\t\t### unpickle previously done dictionary\n\t\t\tapDisplay.printMsg(\"Reading old done dictionary: \"+os.path.basename(self.donedictfile))\n\t\t\tf = open(self.donedictfile,'r')\n\t\t\tself.donedict = cPickle.load(f)\n\t\t\tf.close()\n\t\t\tif not 'commit' in self.donedict or self.donedict['commit'] == self.params['commit']:\n\t\t\t\t### all is well\n\t\t\t\tapDisplay.printMsg(\"Found \"+str(len(self.donedict))+\" done dictionary entries\")\n\t\t\t\treturn\n\t\t\telif self.donedict['commit'] is True and self.params['commit'] is not True:\n\t\t\t\t### die\n\t\t\t\tapDisplay.printError(\"Commit flag was enabled and is now disabled, create a new runname\")\n\t\t\telse:\n\t\t\t\t### set up fresh dictionary\n\t\t\t\tapDisplay.printWarning(\"'--commit' flag was changed, creating new done dictionary\")\n\n\t\t### set up fresh dictionary\n\t\tself.donedict = {}\n\t\tself.donedict['commit'] = self.params['commit']\n\t\tapDisplay.printMsg(\"Creating new done dictionary: \"+os.path.basename(self.donedictfile))\n\n\t\t### write donedict to file\n\t\tf = open(self.donedictfile, 'w', 0666)\n\t\tcPickle.dump(self.donedict, f)\n\t\tf.close()\n\t\treturn", "def deep_merge(origin: dict, renovator: Mapping) -> dict:\n\n for key, value in renovator.items():\n if isinstance(value, Mapping):\n node = origin.setdefault(key, {})\n deep_merge(node, value)\n else:\n origin[key] = value\n\n return origin", "def dict_none_to_new(base_dict, new_dict):\r\n for key, value in list(new_dict.items()):\r\n base_value = base_dict.get(key)\r\n if base_value is None:\r\n base_dict[key] = value", "def dict_none_to_new(base_dict, new_dict):\r\n for key, value in list(new_dict.items()):\r\n base_value = base_dict.get(key)\r\n if base_value is None:\r\n base_dict[key] = value", "def new_data(first: dict, second: dict, changeables: tuple):\n for name, field in first.items():\n if name not in changeables:\n second[name] = field", "def frozen_time():\n # TODO LATER: Either freezegun should support the system clock, or find something else.\n with freezegun.freeze_time(\"2020-01-01 00:00:00\") as frozen:\n # Use freezegun-supported time instead of system clocks -- for testing purposes only.\n # NB: Patch strictly after the time is frozen -- to use fake_time(), not real time().\n with patch('time.monotonic', time.time), patch('time.perf_counter', time.time):\n yield frozen", "def __deepcopy__(self, memodict={}) -> 'FqeData':\n new_data = FqeData(nalpha=self.nalpha(),\n nbeta=self.nbeta(),\n norb=self._core.norb(),\n fcigraph=self._core,\n dtype=self._dtype)\n new_data._low_thresh = self._low_thresh\n new_data.coeff = self.coeff.copy()\n return new_data", "def overwrite_dict(dict_base, dict_new, base_path=None):\n assert isinstance(dict_new, dict)\n for k in dict_new:\n # Add the current key to the path\n k_path = str(k) if base_path is None else f'{base_path}.{str(k)}'\n # Make sure that the key in the new dictionary matches one from the base dictionary\n assert k in dict_base, f'Could not find path {k_path} in the base dictionary'\n # Check that the types match between the base dictionary entry and the new one\n if dict_base[k] is not None:\n assert isinstance(type(dict_base[k]), type(dict_new[k])), \\\n 'The types at {} in the base dictionary do not match (expected {}, got {})'.format(\n k_path, str(type(dict_base[k])), str(type(dict_new[k])))\n # Recursively replace dictionary entries\n if isinstance(dict_base[k], dict):\n overwrite_dict(dict_base[k], dict_new[k], k_path)\n else:\n # Simply copy over leaf entries\n dict_base[k] = dict_new[k]", "def dict(self, exclude=None, keys=None):\n\t\tdictionary = self.__dict__\n\n\t\t# Return immediately if the user only wants certain keys\n\t\tif keys:\n\t\t\tdictionary = {i: dictionary[i] for i in keys if i in dictionary}\n\t\t\treturn dictionary\n\n\t\tif exclude:\n\t\t\tdictionary = {key: dictionary[key] for key, _ in dictionary.items() if key not in exclude}\n\n\t\tdictionary = {key: dictionary[key] for key, _ in dictionary.items() if not key.startswith('_')}\n\t\treturn dictionary", "def get_initial_data(self, removed=('billing_country_code', )):\n initial = getattr(self, 'initial_data', None) or {}\n for ea in removed:\n initial.pop(ea, None)\n if not initial:\n return initial\n test_data = MultiValueDict()\n test_data.update(initial)\n self.test_data = test_data\n return test_data", "def __setitem__(self, key, value):\n super(ReadOnlyDict, self).__setitem__(key, value)", "def copy(self):\n return udict(self)", "def dict() -> Dict:\n pass", "def infinitedict():\n return defaultdict(infinitedict)", "def buildcompletedict(oldfile, newfile):\n return adddifftodictionary(addtodictionary(builddictionary(open(oldfile).readlines()), open(newfile).readlines()))", "def create(name, add=None, remove=None, change=None, clear=None):\n\n class GDEvents:\n \"\"\"Defines events for GlobalDictionary\"\"\"\n\n def __init__(self):\n self.name = name\n\n def OnAdd(self, key, value, size):\n if add:\n add(self, key, _decode_value(value), size)\n\n def OnRemove(self, key, size):\n if remove:\n remove(self, key, size)\n\n def OnChange(self, key, value, size):\n if change:\n change(self, key, _decode_value(value), size)\n\n def OnClear(self):\n if clear:\n clear(self)\n\n return GlobalDictionary(name, GDEvents)", "def initialize_assignment(self):\n # Initialize empty frozensets for each agent\n init_assignment = frozendict({a:frozenset() for a in self.agents})\n \n # Add hard assignments\n if self.hard_assignment:\n init_dict = dict(init_assignment)\n for a, t in self.hard_assignment.items():\n init_dict[a] = init_dict[a] | t\n init_assignment = frozendict(init_dict)\n \n return init_assignment" ]
[ "0.7146128", "0.68393993", "0.65283275", "0.6507282", "0.5816068", "0.5484915", "0.5304193", "0.5286141", "0.5152492", "0.5124631", "0.5105132", "0.50970227", "0.5096764", "0.5051437", "0.5042281", "0.49775112", "0.49529138", "0.4934736", "0.49311805", "0.48632023", "0.48579502", "0.48420447", "0.48328725", "0.4832583", "0.48117724", "0.48107445", "0.4801841", "0.477287", "0.47656885", "0.4764035", "0.47503012", "0.47303838", "0.472882", "0.47151965", "0.47088215", "0.4691155", "0.46847296", "0.46763018", "0.4673732", "0.46252957", "0.4610507", "0.46090707", "0.46086088", "0.4606574", "0.46047378", "0.4603262", "0.4598905", "0.45976153", "0.4570252", "0.45345598", "0.45281914", "0.45270756", "0.45215434", "0.45178002", "0.45120463", "0.45024753", "0.45014915", "0.44970614", "0.44959235", "0.44882524", "0.44847715", "0.44841114", "0.44823384", "0.4475223", "0.446664", "0.446392", "0.44581994", "0.4449541", "0.44472167", "0.44429278", "0.4429974", "0.44269764", "0.44268218", "0.4425565", "0.44223624", "0.44115683", "0.44115043", "0.4411128", "0.44070894", "0.440335", "0.43935415", "0.43925944", "0.4390529", "0.43900338", "0.43890077", "0.43889853", "0.43889853", "0.43835124", "0.4375172", "0.4374822", "0.43724865", "0.43682", "0.43633065", "0.43573093", "0.43542948", "0.43521833", "0.43474084", "0.43472198", "0.43435445", "0.43348998" ]
0.70710015
1
Create a new FrozenDict where one entry is removed.
def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]: value = self[key] new_dict = dict(self._dict) new_dict.pop(key) new_self = type(self)(new_dict) return new_self, value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]:\n return FrozenDict(xs)", "def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less performant because jax.tree_util.tree_map\n # uses an optimized C implementation.\n return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore\n elif isinstance(x, dict):\n ys = {}\n for key, value in x.items():\n ys[key] = unfreeze(value)\n return ys\n else:\n return x", "def unfreeze(cls, frozen):\n timeframe = frozen.get('timeframe')\n finished_threshold = frozen.get('finished_threshold')\n if finished_threshold is not None:\n finished_threshold = timedelta(seconds=finished_threshold)\n # if there's no data or cache config an error has occurred\n data_config = frozen['data']\n cache_config = frozen['cache']\n return cls(data_config,\n cache_config,\n timeframe,\n finished_threshold)", "def fromkeys(iterable, value=None):\n return FrozenDict(dict.fromkeys(iterable, value))", "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}", "def remove_element( self, dictionary, key):\n\n _dict = dictionary.copy()\n _dict.pop(key, None)\n return _dict", "def copy(\n self, add_or_replace: Mapping[K, V] = MappingProxyType({})\n ) -> 'FrozenDict[K, V]':\n return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]", "def __delitem__(self, key):\n if not self._set:\n raise TypeError('This dict is read-only')\n return self._set(key, None)", "def clear(self):\n super(ReadOnlyDict, self).clear() # pragma: no cover", "def pop(\n x: Union[FrozenDict, Dict[str, Any]], key: str\n) -> Tuple[Union[FrozenDict, Dict[str, Any]], Any]:\n\n if isinstance(x, FrozenDict):\n return x.pop(key)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n value = new_dict.pop(key)\n return new_dict, value\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def popitem(self):\n return super(ReadOnlyDict, self).popitem()", "def clear_dict(d: dict) -> dict:\n # TODO delete if not used\n return {k: v for k, v in d.items() if v is not None}", "def copy(self):\r\n new = WeakKeyIdentityDict()\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n new[o] = value\r\n return new", "def task_2_remove_dict_fields(data: DT, redundant_keys: List[str]) -> DT:\n dict2 = copy.deepcopy(data)\n for item in dict2:\n for key in redundant_keys:\n item.pop(key)\n return dict2", "def frozenset(self) -> frozenset:\n return frozenset(self)", "def revive(self):\n field_name = self.get_delete_flag_field_name()\n return self.update(**{field_name: None})", "def _freeze_mapping(d):\r\n d = dict(d)\r\n for k in d.keys():\r\n if hasattr(d[k], '__getitem__') and hasattr(d[k], 'keys'):\r\n d[k] = _freeze_mapping(d[k])\r\n d = types.MappingProxyType(d)\r\n return d", "def copy(self):\n return AttrDict(dict(self).copy())", "def clean_local_cache(self):\n to_expire = []\n now = int(time())\n\n try:\n for k, (_, _, grace) in six.iteritems(self._local_cache):\n if now > grace:\n to_expire.append(k)\n except RuntimeError:\n # It's possible for the dictionary to be mutated in another thread\n # while iterating, but this case is rare, so instead of making a\n # copy and iterating that, it's more efficient to just let it fail\n # gracefully. It'll just get re-run later.\n return\n\n for k in to_expire:\n try:\n del self._local_cache[k]\n except KeyError:\n # This could only exist in a race condition\n # where another thread has already deleted this key,\n # but we'll guard ourselves against it Justin Case.\n pass", "def remove(enforcer_dict, key):\n del enforcer_dict['f']\n assert other.keystring == 'abcde'\n assert other.valuesum == 15\n\n enforcer_dict['a'] = 2\n assert other.keystring == 'bcdea'\n assert other.valuesum == 16\n\n enforcer_dict.clear()\n assert other.keystring == ''\n assert other.valuesum == 0", "def pop(self, key, *args):\n return super(ReadOnlyDict, self).pop(key, *args) # pragma: no cover", "def discard(m: MutableMapping[KT, VT], key: KT) -> None:\n try:\n del m[key]\n except KeyError:\n pass", "def dict2frozenset(d):\n return frozenset(d.items())", "def remove_fc(state_dict):\n return {key: value for key, value in state_dict.items() if not key.startswith('fc.')}", "def nonull_dict(self):\n return {k: v for k, v in self.dict.items() if v and k != '_codes'}", "def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(\n {}\n ),\n) -> Union[FrozenDict, Dict[str, Any]]:\n\n if isinstance(x, FrozenDict):\n return x.copy(add_or_replace)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n new_dict.update(add_or_replace)\n return new_dict\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def purge(self):\n keys = [k for (k, v) in self.get_range()]\n\n [self.remove(k) for k in keys]", "def _remove_key(self):\n heaps = self.priorities\n keys = heaps.keys()\n keys = min(keys)\n heaps.pop(keys)", "def clean_dict(to_clean):\n for k in list(to_clean.keys()):\n if not to_clean.get(k):\n to_clean.pop(k)", "def copy(self):\n return pdict(dict.copy(self))", "def removeDic(dic, key):\n pass", "def removeAll(self):\n self.pDict.clear()", "def __delitem__(self, key):\n super(ReadOnlyDict, self).__delitem__(key)", "def prune(self): # HashMap.prune\n for hashval, list in self.contentHash.iteritems():\n newlist=[]\n for entry in list:\n if not entry.deleted:\n newlist.append(entry)\n self.contentHash[hashval]=newlist", "def copy(self):\n return self.from_dict(self.to_dict(True))", "def remove(self, name):\n\n w = self._wdict[name]\n del(self._wdict[name])\n \n \n return w", "def unfreeze(obj, ignore_types=[]):\n if obj is None:\n return obj\n\n to_process = [obj]\n while len(to_process) > 0:\n _obj = to_process.pop()\n\n for attr in dir(_obj):\n if attr.startswith(\"__\"):\n continue\n value = getattr(_obj, attr)\n if isinstance(value, FrozenDict):\n value = {k: v for k, v in value.items()}\n to_process.extend(value.values())\n elif isinstance(value, FrozenList):\n value = [x for x in value]\n to_process.extend(value)\n elif not callable(value) and not isinstance(value, tuple(ignore_types)):\n to_process.append(value)\n\n try:\n setattr(_obj, attr, value)\n except BaseException:\n pass\n\n return obj", "def remove_keys(_dict, keys):\n if not _dict:\n return None\n new = dict(_dict)\n for key in keys:\n new.pop(key, None)\n return new", "def make_mutable_REMEMBER_CLEANUP_FIRST(self):\n # UNSET the flag to make object immutable and hashable - need to do it in a roundabout way,\n # because the immutability prevents simply \"self.immutable = False\" from working!\n self.__dict__['immutable'] = False\n # but if I put __slots__ in, self.__dict__ won't exist any more... TODO Options for then:\n # setattr(self, 'immutable', False) - doesn't seem to work?\n # object.__setattr__(self, 'immutable', False) - does that work?", "def _prunelowestweight(self):\r\n # note: must be called with acquired self._lock!\r\n numentries = len(self._dict)\r\n if numentries >= self.maxentries:\r\n # evict according to entry's weight\r\n items = [(entry.weight, key) for key, entry in self._dict.iteritems()]\r\n items.sort()\r\n index = numentries - self.prunenum\r\n if index > 0:\r\n for weight, key in items[:index]:\r\n del self._dict[key]", "def removeAllKeys(self) -> None:\n ...", "def clear_cache(self): # pragma: no cover\n # Overwite with an empty dictionary\n with open(self.cacheFile, \"wb\") as f:\n pkl.dump({}, f)\n return", "def removeDictItem(self, key):\n if key in self._dentsvertsdata:\n self._dentsvertsdata[key].free()\n del self._dentsvertsdata[key]", "def without(self, to_unset):\n modified = self.copy()\n assert type(to_unset) == list\n for env_var in to_unset:\n if env_var in modified:\n modified.pop(env_var)\n return modified", "def cut(d, k):\n\tif isinstance(d, dict):\n\t\tn = d.copy()\n\t\tif k in n:\n\t\t\tdel n[k]\n\t\treturn n\n\treturn [v for v in d if v != k]", "def popall(self, k, default=_MISSING):\n super_self = super(OrderedMultiDict, self)\n if super_self.__contains__(k):\n self._remove_all(k)\n if default is _MISSING:\n return super_self.pop(k)\n return super_self.pop(k, default)", "def mempty(self) -> 'Dictionary':\n return Dictionary()", "def delete_duplicate(x):\n return list(dict.fromkeys(x))", "def omit(self, *keys):\n return _({k: self[k] for k in self._ if k not in keys})", "def empty(cls, key_type, value_type):\n return cls(dcttype=DictType(key_type, value_type))", "def unique(self):\n return frozenset(self)", "def _mask_dict(self, value):\n\n return MaskedDict(value)", "def delete(self, key):\n self.map.pop(key, None)", "def compact_dict(source_dict):\n return {k: v for k, v in source_dict.items() if v is not None}", "def _filter_dict(src_dict, key_set):\n for k in set(src_dict.keys()) - key_set:\n src_dict.pop(k)", "def remove_empty(d):\n for key in d.keys():\n if d[key] is None:\n del d[key]\n return d", "def __copy__(self):\n d = dict()\n d.update(self.items())\n return d", "def uncleanable():\n data = attrdict.AttrDict()\n data.backup_ids = set()\n data.image_ids = set()\n data.keypair_ids = set()\n data.server_ids = set()\n data.nodes_ids = set()\n data.chassis_ids = set()\n data.snapshot_ids = set()\n data.transfer_ids = set()\n data.volume_ids = set()\n return data", "def remove_item_from_map(): \n ITEM_LIST[ZERO_BASE_PLYR_POS] = int(len(ITEMTYPES) - 2) # Replaces item with the \"None\" item", "def copy(self):\n return udict(self)", "def truncate_dict(dictionary: Dict, n: int) -> Dict:\n return {k: v for (k, v) in list(dictionary.items())[:n]}", "def remove_min(self):\n p = self._find_min()\n item = self._data.delete(p)\n return (item._key, item._value)", "def clear(self):\n self._map = {}", "def clean_dict(dictionary):\n return {k: v for k, v in dictionary.items() if v}", "def test_delete(self):\n mute_map = MutableMap(**VALUE)\n del mute_map.str_val\n del mute_map['dict_val']\n\n assert not mute_map.get('str_val')\n assert not mute_map.get('dict_val')", "def test_remove_key_not_dict(self):\n\n expected = None\n actual = Dict([\"Hello\", \"World!\"]).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)", "def dict_pop(d, key):\n return d.pop(key)", "def freeze(value):\n if isinstance(value, list):\n value = tuple(freeze(e) for e in value)\n elif isinstance(value, set):\n value = frozenset(freeze(e) for e in value)\n elif isinstance(value, dict):\n value = frozendict({k: freeze(v) for k, v in value.items()})\n elif isinstance(value, tuple):\n value = tuple(freeze(e) for e in value)\n else:\n pass\n return value", "def test_fuzz_deletions():\n key_range = 2 ** 64\n value_range = 1024\n key_set = set()\n \n d = OrderedTreeDict()\n for value in range(0, value_range):\n key = randint(0, key_range)\n d.put(key, value)\n key_set.add(key)\n \n sorted_keys = list(sorted(key_set))\n sorted_keys_slice = sorted_keys[0:len(sorted_keys) // 2]\n \n for key in sorted_keys_slice:\n d.delete(key)\n assert len(d) > 0\n assert key not in d\n assert d.depth() <= int(2 * math.log(len(d), 2)), \"Should stay as balanced as a red black tree. \"\n \n keys = list(d.keys())\n assert len(keys) == len(sorted_keys_slice), \"Length should reflect number of items inserted.\"\n assert len(keys) == len(list(keys)), \"Iteration should find all items in tree.\"", "def shallow(dict, exclude=None):\n\tnew_dict = {}\n\tfor key in dict:\n\t\tif not exclude or key not in exclude:\n\t\t\tnew_dict[key] = dict[key]\n\treturn new_dict", "def copy(self):\n return self.update({})", "def _remove_keys(results: dict, remove: list) -> dict:\n removed = {}\n for key, val in results.items():\n if key not in remove:\n removed[key] = val\n return removed", "def copy(self):\n return OrderedDict(self)", "def cache_clean(self):\n\t\tnow = time.time()\n\t\tkeys_for_removal = collections.deque()\n\t\tfor key, (_, expiration) in self.__cache.items():\n\t\t\tif expiration < now:\n\t\t\t\tkeys_for_removal.append(key)\n\t\tfor key in keys_for_removal:\n\t\t\tdel self.__cache[key]", "def CleanUpDict(dct):\n SanityCheck.ValidateTypes(((dct, dict),))\n\n new_dct = {}\n for key in dct:\n if dct[key]:\n new_dct[key] = dct[key]\n\n return new_dct", "def _clean(self):\n map(self.__delitem__, self.keys())\n self._original = []\n self._columns = {}\n self._modified, self._deleted = {}, {}", "def Remove(self, version_number):\n self.dict.pop(str(version_number))", "def without_keys(d, keys):\n return {x: d[x] for x in d if x not in keys}", "def _cleanse_dict(original):\n return dict((k, v) for k, v in original.items() if \"_pass\" not in k)", "def clean_up_dict(clean_dict, ignore_list):\n for i in ignore_list:\n clean_dict.pop(i, None)\n return clean_dict", "def clear():\n global d\n for key in d.keys():\n del d[key]", "def remove(self, key):\n ndx = self._findPosition(key)\n assert ndx, 'Invalid map key'\n self._entryList.pop(key)", "def remove_unused_keys(cop):\n delete_these = [\n 'officer_atty',\n 'officer_atty_firm',\n 'case_id',\n 'cop_first_name',\n 'cop_middle_initial',\n 'cop_last_name',\n 'entered_by',\n 'entered_when',\n 'fact_checked_by',\n 'fact_checked_when',\n 'matched_by',\n 'matched_when'\n ]\n\n for key in delete_these:\n del cop[key]\n\n return cop", "def delete_key_HELPER(data_dict, key_list, key_to_delete):\n data_dict = get_key_from_dict_HELPER(data_dict, key_list[:-1])\n data_dict.pop(key_to_delete)\n return data_dict", "def copy(self):\n import copy\n MultiDict.__setitem__ = dict.__setitem__\n cp = copy.deepcopy(self)\n MultiDict.__setitem__ = MultiDict._setitem_list\n return cp", "def copy(self):\n return self.__class__(dict(self))", "def rem(self, key):\n if self.dexists('ttl', key):\n self.dpop('ttl', key)\n return super(MyCache, self).rem(key)", "def prune(bushy: dict) -> dict:\n pruned = dict()\n for key in bushy:\n if bushy[key]:\n pruned[key] = bushy[key]\n return pruned", "def __iter__(self) -> 'Dictionary':\n return copy.deepcopy(self)", "def __evict_least_frequent_entry(self):\n least_frequent_group = self.head.next.frequency_cache\n self.size -= 1\n removed_node = least_frequent_group.evict_LRU_entry()\n del self.key_node_map[removed_node.key] # remove from self.key_node_map\n del self.key_to_frequency_node[removed_node.key] # remove from self.key_to_frequency_node\n if least_frequent_group.size == 0: # if frequency group now empty, remove it\n self.__remove_frequency_node(self.head.next)\n return removed_node", "def copy(self) -> AF:\n if self._base == OrderedDict:\n kopied = dict(self)\n else:\n kopied = self._base.copy(self)\n return self.__class__(kopied, use_fuzzy=self.use_fuzzy, dottable=self._dottable)", "def copy_backward_mapping(self) -> Dict[str, Set[str]]:\n return deepcopy(self._backward_mapping)", "def clear_keymap(self):\n self.keymap = {}", "def _del(self, entry):\n entry.key = dummy\n entry.value = None\n self.used -= 1", "def clean_out_old_env():\n d = \"dict_racey.json\"\n if os.path.exists(d):\n print(\"Remove the old cached JSON before continuing.\")\n os.remove(d)", "def complement(self):\r\n\r\n for key in Options.defaults():\r\n if key not in self:\r\n self[key] = Options.defaults()[key]\r\n return self", "def remove_one(self):\n item = self.expiry.pop(0)\n if item.updated:\n self.new_expiry.append(item)\n return\n del self.index[item.target]\n return", "def args_frozen(self):\n return {k: v for k, v in self.args.items() if k not in self._traversable}", "def _remove_empty(self, data, many):\n if not many:\n for key in list(data):\n if key == 'versions':\n data.pop(key)\n\n return {\n key: value for key, value in data.items()\n if value is not None\n }\n for item in data:\n for key in list(item):\n if (key == 'versions') or (item[key] is None):\n item.pop(key)\n\n return data", "def remove_keys(d, keys):\n pp = deepcopy(d)\n if isinstance(keys, (list, tuple)):\n for k in keys:\n pp.pop(k, None)\n else:\n pp.pop(keys, None)\n return pp" ]
[ "0.6839586", "0.6433712", "0.63349175", "0.630568", "0.60893214", "0.59897816", "0.5919276", "0.5696106", "0.56297266", "0.5533009", "0.5489603", "0.54769456", "0.54426163", "0.53937614", "0.5389322", "0.5372033", "0.5324981", "0.53037596", "0.53020614", "0.52977496", "0.5270413", "0.5267698", "0.5258565", "0.5258448", "0.5257699", "0.5219929", "0.5214302", "0.51955247", "0.51898754", "0.5187546", "0.5166382", "0.51255184", "0.511276", "0.510687", "0.50921047", "0.50904304", "0.5068631", "0.5058429", "0.50554204", "0.50518155", "0.5048803", "0.5047805", "0.5044374", "0.5014926", "0.5014566", "0.5013802", "0.4999455", "0.49986127", "0.49950835", "0.49840546", "0.4975407", "0.4966317", "0.49572384", "0.49548468", "0.4954227", "0.49431765", "0.49344617", "0.49197724", "0.4919698", "0.49157974", "0.49134383", "0.49011022", "0.48937026", "0.48921898", "0.4886875", "0.48839566", "0.48829725", "0.48731652", "0.48684844", "0.4868287", "0.48609367", "0.48575285", "0.4853223", "0.48437813", "0.48423967", "0.48380414", "0.4837057", "0.48339957", "0.48261473", "0.48250178", "0.48228538", "0.48184356", "0.4818434", "0.479618", "0.47957736", "0.47936022", "0.47897935", "0.47897327", "0.47884732", "0.4774748", "0.47654837", "0.4763874", "0.47623238", "0.4754613", "0.4748253", "0.47463262", "0.4745046", "0.4741097", "0.47408736", "0.4740202" ]
0.57714397
7
Deep copy unfrozen dicts to make the dictionary FrozenDict safe.
def _prepare_freeze(xs: Any) -> Any: if isinstance(xs, FrozenDict): # we can safely ref share the internal state of a FrozenDict # because it is immutable. return xs._dict # pylint: disable=protected-access if not isinstance(xs, dict): # return a leaf as is. return xs # recursively copy dictionary to avoid ref sharing return {key: _prepare_freeze(val) for key, val in xs.items()}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less performant because jax.tree_util.tree_map\n # uses an optimized C implementation.\n return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore\n elif isinstance(x, dict):\n ys = {}\n for key, value in x.items():\n ys[key] = unfreeze(value)\n return ys\n else:\n return x", "def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]:\n return FrozenDict(xs)", "def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(\n {}\n ),\n) -> Union[FrozenDict, Dict[str, Any]]:\n\n if isinstance(x, FrozenDict):\n return x.copy(add_or_replace)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n new_dict.update(add_or_replace)\n return new_dict\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def _freeze_mapping(d):\r\n d = dict(d)\r\n for k in d.keys():\r\n if hasattr(d[k], '__getitem__') and hasattr(d[k], 'keys'):\r\n d[k] = _freeze_mapping(d[k])\r\n d = types.MappingProxyType(d)\r\n return d", "def _app_cache_deepcopy(obj):\n if isinstance(obj, dict):\n return dict((_app_cache_deepcopy(key), _app_cache_deepcopy(val))\n for key, val in obj.items())\n elif isinstance(obj, list):\n return list(_app_cache_deepcopy(val) for val in obj)\n elif isinstance(obj, SortedDict):\n return deepcopy(obj)\n return obj", "def deepish_copy(org):\n out = dict().fromkeys(org)\n for k,v in org.items():\n try:\n out[k] = v.copy() # dicts, sets\n except AttributeError:\n try:\n out[k] = v[:] # lists, tuples, strings, unicode\n except TypeError:\n out[k] = v # ints\n \n return out", "def dict2frozenset(d):\n return frozenset(d.items())", "def shallow(dict, exclude=None):\n\tnew_dict = {}\n\tfor key in dict:\n\t\tif not exclude or key not in exclude:\n\t\t\tnew_dict[key] = dict[key]\n\treturn new_dict", "def deepcopy(obj):\n if isinstance(obj, dict):\n return {deepcopy(key): deepcopy(value) for key, value in obj.items()}\n if hasattr(obj, '__iter__'):\n return type(obj)(deepcopy(item) for item in obj)\n return obj", "def copy(self):\n import copy\n MultiDict.__setitem__ = dict.__setitem__\n cp = copy.deepcopy(self)\n MultiDict.__setitem__ = MultiDict._setitem_list\n return cp", "def unfreeze(obj, ignore_types=[]):\n if obj is None:\n return obj\n\n to_process = [obj]\n while len(to_process) > 0:\n _obj = to_process.pop()\n\n for attr in dir(_obj):\n if attr.startswith(\"__\"):\n continue\n value = getattr(_obj, attr)\n if isinstance(value, FrozenDict):\n value = {k: v for k, v in value.items()}\n to_process.extend(value.values())\n elif isinstance(value, FrozenList):\n value = [x for x in value]\n to_process.extend(value)\n elif not callable(value) and not isinstance(value, tuple(ignore_types)):\n to_process.append(value)\n\n try:\n setattr(_obj, attr, value)\n except BaseException:\n pass\n\n return obj", "def copy(self):\n return self.from_dict(self.to_dict(True))", "def _attrdict_copy(d):\n if isinstance(d, collections.Iterable):\n return AttrDict(dict(d))\n else:\n return AttrDict(vars(d))", "def copy(self):\n return pdict(dict.copy(self))", "def copy(\n self, add_or_replace: Mapping[K, V] = MappingProxyType({})\n ) -> 'FrozenDict[K, V]':\n return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]", "def __copy__(self):\n d = dict()\n d.update(self.items())\n return d", "def copy_forward_mapping(self) -> Dict[str, Set[str]]:\n return deepcopy(self._forward_mapping)", "def freeze(value):\n if isinstance(value, list):\n value = tuple(freeze(e) for e in value)\n elif isinstance(value, set):\n value = frozenset(freeze(e) for e in value)\n elif isinstance(value, dict):\n value = frozendict({k: freeze(v) for k, v in value.items()})\n elif isinstance(value, tuple):\n value = tuple(freeze(e) for e in value)\n else:\n pass\n return value", "def __deepcopy__(self, memo):\n from copy import deepcopy\n return self.__class__(deepcopy(self.items(), memo), self.strict)", "def args_frozen(self):\n return {k: v for k, v in self.args.items() if k not in self._traversable}", "def __copy__(self):\n cls = self.__class__\n result = cls.__new__(cls)\n to_copy = {\"_cache\", \"_buffers\", \"_parameters\", \"_modules\"}\n result.__dict__.update(\n {k: v.copy() if k in to_copy else v for k, v in self.__dict__.items()}\n )\n return result", "def copy_dict(source_dict, diffs):\n result = dict(source_dict)\n result.update(diffs)\n return result", "def copy(self):\n return AttrDict(dict(self).copy())", "def deep_copy(old_dict, parent=None, depth=None, main=None):\n\n # Is this a copy starting from the top level?\n if isinstance(old_dict, configobj.ConfigObj):\n new_dict = configobj.ConfigObj('',\n encoding=old_dict.encoding,\n default_encoding=old_dict.default_encoding,\n interpolation=old_dict.interpolation)\n else:\n # No. It's a copy of something deeper down. If no parent or main is given, then\n # adopt the parent and main of the incoming dictionary.\n new_dict = configobj.Section(parent if parent is not None else old_dict.parent,\n depth if depth is not None else old_dict.depth,\n main if main is not None else old_dict.main)\n for entry in old_dict:\n # Avoid interpolation by using the version of __getitem__ from dict\n old_value = dict.__getitem__(old_dict, entry)\n if isinstance(old_value, configobj.Section):\n new_value = deep_copy(old_value, new_dict, new_dict.depth+1, new_dict.main)\n elif isinstance(old_value, list):\n # Make a copy\n new_value = list(old_value)\n elif isinstance(old_value, tuple):\n # Make a copy\n new_value = tuple(old_value)\n else:\n # It's a scalar\n new_value = old_value\n new_dict[entry] = new_value\n return new_dict", "def flatten(self):\n flat = {}\n for d in self.dicts:\n flat.update(d)\n return flat", "def get_pure_data_copy(self):\n import copy\n data=copy.copy(self)\n data.xp = data.xp.get_pure_data_copy()\n data.timetable = data.timetable.get_pure_data_copy() \n return data", "def copy(self):\r\n new = WeakKeyIdentityDict()\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n new[o] = value\r\n return new", "def copy(self) -> AF:\n if self._base == OrderedDict:\n kopied = dict(self)\n else:\n kopied = self._base.copy(self)\n return self.__class__(kopied, use_fuzzy=self.use_fuzzy, dottable=self._dottable)", "def copy_obs_dict(obs):\n return {k: np.copy(v) for k, v in obs.items()}", "def deepupdate(self, other, copy=False):\n for k in other:\n if isinstance(other[k], self.__class__):\n if not k in self:\n self[k] = self.__class__()\n elif isinstance(self[k], self.__class__):\n pass\n elif isinstance(self[k], dict):\n self[k] = self.__class__(self[k]).rconvert()\n else:\n self[k] = self.__class__()\n self[k].deepupdate(other[k])\n else:\n if copy: self[k] = copymod.deepcopy(other[k])\n else: self[k] = other[k]\n return self", "def dictcopy(dic):\n keys = list(dic.keys())\n values = [list(i) for i in dic.values()]\n return dict(zip(keys,values))", "def _copy(query_dict):\n\n memo = { }\n\n result = query_dict.__class__('',\n encoding=query_dict.encoding,\n mutable=True)\n\n memo[id(query_dict)] = result\n\n for key, value in dict.items(query_dict):\n dict.__setitem__(result,\n copy.deepcopy(key, memo),\n copy.deepcopy(value, memo))\n\n return result", "def dict_copies(my_dict, num_copies):\n answer = []\n for idx in range(num_copies):\n answer.append(dict(my_dict))\n return answer", "def copy_backward_mapping(self) -> Dict[str, Set[str]]:\n return deepcopy(self._backward_mapping)", "def __deepcopy__(self, memo):\n new = type(self)(None)\n for k in self.__dict__:\n if k in ('fh', 'fh_archive'):\n new.__dict__[k] = None\n else:\n new.__dict__[k] = copy.deepcopy(self.__dict__[k], memo)\n return new", "def simple_deepcopy(coll):\n if isinstance(coll, dict):\n return {k: simple_deepcopy(v) for k, v in coll.iteritems()}\n elif isinstance(coll, set):\n return {simple_deepcopy(v) for v in coll}\n elif hasattr(coll, \"__iter__\"):\n return [simple_deepcopy(v) for v in coll]\n else:\n return coll", "def copy(self):\n return udict(self)", "def __deepcopy__(self, memodict=None):\n return self.copy()", "def compact_dict(source_dict):\n return {k: v for k, v in source_dict.items() if v is not None}", "def task_2_remove_dict_fields(data: DT, redundant_keys: List[str]) -> DT:\n dict2 = copy.deepcopy(data)\n for item in dict2:\n for key in redundant_keys:\n item.pop(key)\n return dict2", "def __iter__(self) -> 'Dictionary':\n return copy.deepcopy(self)", "def fast_deep_copy(v: _T) -> _T:\n return cast(_T, pickle.loads(pickle.dumps(v)))", "def copy(self):\n return self.update({})", "def original_dict(self):\n return self.obj.__dict__", "def _update_loose (self, dict):\n self.__dict__.update(dict)", "def delete_keys_from_dict(self, orig_dict, keys_whitelist):\n for k in list(orig_dict.keys()):\n if k not in keys_whitelist:\n del orig_dict[k]\n\n for v in orig_dict.values():\n if isinstance(v, dict):\n self.delete_keys_from_dict(v, keys_whitelist)\n\n return orig_dict", "def freeze_transforms(self: TDataWTransform) -> TDataWTransform:\n tgroups = copy.copy(self._transform_groups)\n frozen_tgroups = copy.copy(self._frozen_transform_groups)\n datacopy = self._shallow_clone_dataset()\n datacopy._frozen_transform_groups = frozen_tgroups + tgroups\n datacopy._transform_groups = EmptyTransformGroups()\n dds: List[IDataset] = []\n for dd in datacopy._datasets:\n if isinstance(dd, _FlatDataWithTransform):\n dds.append(dd.freeze_transforms())\n else:\n dds.append(dd)\n datacopy._datasets = dds\n return datacopy", "def _deep_copy_arg_dict(input_arg_dict):\n output_arg_dict = {}\n for name, param in input_arg_dict.items():\n output_arg_dict[name] = param.copy()\n return output_arg_dict", "def copy(self):\n return self.__class__(dict(self))", "def test_deepcopy_removes_cached_values(self):\n foreign_object = Membership._meta.get_field(\"person\")\n # Trigger storage of cached_property into ForeignObject's __dict__.\n foreign_object.path_infos\n foreign_object.reverse_path_infos\n # The ForeignObjectRel doesn't have reverse_path_infos.\n foreign_object.remote_field.path_infos\n self.assertIn(\"path_infos\", foreign_object.__dict__)\n self.assertIn(\"reverse_path_infos\", foreign_object.__dict__)\n self.assertIn(\"path_infos\", foreign_object.remote_field.__dict__)\n # Cached value is removed via __getstate__() on ForeignObjectRel\n # because no __deepcopy__() method exists, so __reduce_ex__() is used.\n remote_field_copy = copy.deepcopy(foreign_object.remote_field)\n self.assertNotIn(\"path_infos\", remote_field_copy.__dict__)\n # Field.__deepcopy__() internally uses __copy__() on both the\n # ForeignObject and ForeignObjectRel, so all cached values are removed.\n foreign_object_copy = copy.deepcopy(foreign_object)\n self.assertNotIn(\"path_infos\", foreign_object_copy.__dict__)\n self.assertNotIn(\"reverse_path_infos\", foreign_object_copy.__dict__)\n self.assertNotIn(\"path_infos\", foreign_object_copy.remote_field.__dict__)", "def test_dumps_frozenset(self):\n try:\n _build_test_dirs()\n dicti = {\n 'set': frozenset([1, 2, 4, 4, 2]),\n 'array': [1, 2, 3],\n 'string': 'trololo',\n 'int': 1,\n 'float': 4.32,\n 'true': True,\n 'false': False,\n 'null': None\n }\n with open(_TEST_FILE, 'w+') as fileobj:\n morejson.dump(dicti, fileobj)\n with open(_TEST_FILE, 'r') as fileobj:\n self.assertEqual(dicti, morejson.load(fileobj))\n finally:\n _dismantle_test_dirs()", "def copy_dict(in_dict):\n\n if in_dict is None:\n return None\n\n out_dict = {}\n\n for key, val in in_dict.items():\n if isinstance(val, np.ndarray):\n out_dict[key] = val.copy()\n elif isinstance(val, dict):\n out_dict[key] = copy_dict(val)\n\n else:\n out_dict[key] = val\n\n return out_dict", "def unfreeze(cls, frozen):\n timeframe = frozen.get('timeframe')\n finished_threshold = frozen.get('finished_threshold')\n if finished_threshold is not None:\n finished_threshold = timedelta(seconds=finished_threshold)\n # if there's no data or cache config an error has occurred\n data_config = frozen['data']\n cache_config = frozen['cache']\n return cls(data_config,\n cache_config,\n timeframe,\n finished_threshold)", "def __deepcopy__(self, others={}):\n miniMe = self.__class__.__new__(self.__class__)\n others[id(self)] = miniMe\n for key, val in self.__dict__.items():\n if id(val) in others:\n setattr(miniMe, key, others[id(val)])\n else:\n new = deepcopy(val, others)\n others[id(val)] = new\n setattr(miniMe, key, new)\n if miniMe.package:\n miniMe._addOurselvesToPackage(self.path)\n return miniMe", "def rconvert(self):\n for k in self:\n if isinstance(self[k], dict):\n if not isinstance(self[k], AttrDict):\n self[k] = AttrDict(self[k])\n self[k].rconvert()\n return self", "def deepupdate(original, update):\n for key, value in original.iteritems():\n if not key in update:\n update[key] = value\n elif isinstance(value, dict):\n deepupdate(value, update[key])\n return update", "def test_dict_merge_immutable():\n x1 = {'one': 1, 'two': 2}\n x1_cop = x1.copy()\n ir.dict_merge(x1, {'three': 3, 'two': None})\n assert x1 == x1_cop\n ir.dict_merge({'ten': 10, 'one': '1'}, x1)\n assert x1 == x1_cop", "def __deepcopy__(self, memodict=None):\n return self.__class__(self.m, self.n, deepcopy(self.data))", "def __deepcopy__(self, memo):\n obj = self.__class__()\n for k, v in self.__dict__.items():\n if k in ('_iter', '_result_cache'):\n obj.__dict__[k] = None\n else:\n obj.__dict__[k] = copy.deepcopy(v, memo)\n return obj", "def freeze(self):\r\n for store_attr in self.__store_attrs__:\r\n frozen = _freeze_mapping(getattr(self, store_attr))\r\n setattr(self, store_attr, frozen)", "def test_copy_removes_direct_cached_values(self):\n foreign_object = Membership._meta.get_field(\"person\")\n # Trigger storage of cached_property into ForeignObject's __dict__.\n foreign_object.path_infos\n foreign_object.reverse_path_infos\n # The ForeignObjectRel doesn't have reverse_path_infos.\n foreign_object.remote_field.path_infos\n self.assertIn(\"path_infos\", foreign_object.__dict__)\n self.assertIn(\"reverse_path_infos\", foreign_object.__dict__)\n self.assertIn(\"path_infos\", foreign_object.remote_field.__dict__)\n # Cached value is removed via __getstate__() on ForeignObjectRel\n # because no __copy__() method exists, so __reduce_ex__() is used.\n remote_field_copy = copy.copy(foreign_object.remote_field)\n self.assertNotIn(\"path_infos\", remote_field_copy.__dict__)\n # Cached values are removed via __copy__() on ForeignObject for\n # consistency of behavior.\n foreign_object_copy = copy.copy(foreign_object)\n self.assertNotIn(\"path_infos\", foreign_object_copy.__dict__)\n self.assertNotIn(\"reverse_path_infos\", foreign_object_copy.__dict__)\n # ForeignObjectRel's remains because it's part of a shallow copy.\n self.assertIn(\"path_infos\", foreign_object_copy.remote_field.__dict__)", "def __deepcopy__(self, memo):\n\t\tcls = self.__class__\n\t\tobj = cls.__new__(cls)\n\t\tfor k, v in self.__dict__.items():\n\t\t\tif k != 'policy':\n\t\t\t\tsetattr(obj, k, v)\n\t\treturn obj", "def __deepcopy__(self, memo: dict[Any, Any]) -> geom:\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n old = self.__dict__\n new = result.__dict__\n\n # don't make a deepcopy of data, or environment\n shallow = {\"data\", \"_kwargs\", \"environment\"}\n for key, item in old.items():\n if key in shallow:\n new[key] = old[key]\n memo[id(new[key])] = new[key]\n else:\n new[key] = deepcopy(old[key], memo)\n\n return result", "def frozenset(self) -> frozenset:\n return frozenset(self)", "def shallow_copy(self):\n # TODO: Rename this to __copy__()?\n raise NotImplementedError(\"shallow_copy is not implemented\")", "def getFullDict(self):\n temp = copy.copy(self)\n\n for key in temp.dict:\n if temp.dict[key]['type'] == 'func' or temp.dict[key]['type'] == 'lambda':\n childDict = temp.dict[key][varTable].getFullDict()\n temp.dict[key][varTable] = childDict\n\n return temp.dict", "def copy(self, order=\"C\"):\n new = super().copy(order=order)\n\n new.__dict__ = copy.deepcopy(self.__dict__)\n return new", "def fromkeys(iterable, value=None):\n return FrozenDict(dict.fromkeys(iterable, value))", "def copy(self):\n attrs = {k: self.__dict__[k].copy() for k in self.containers}\n attrs.update({k: cp.deepcopy(self.__dict__[k]) for k in self.shared})\n return self.__class__(**attrs)", "def copy(self, deep=False):\n return _(copy.deepcopy(self._) if deep else copy.copy(self._))", "def merge_dict_recursive(target, src):\r\n for k in src.keys():\r\n if ((k in target and isinstance(target[k], dict) and\r\n isinstance(src[k], collections.Mapping))):\r\n merge_dict_recursive(target[k], src[k])\r\n else:\r\n target[k] = src[k]", "def copy(self, **kwargs):\n\n # Future versions may add new options here\n with KWArgs(kwargs) as k:\n deep = k.optional(\"deep\", True)\n\n if deep:\n return copy.deepcopy(self)\n else:\n return copy.copy(self)", "def copy_dictionary(self,dictionary):\r\n return dictionary.copy()", "def pop(\n x: Union[FrozenDict, Dict[str, Any]], key: str\n) -> Tuple[Union[FrozenDict, Dict[str, Any]], Any]:\n\n if isinstance(x, FrozenDict):\n return x.pop(key)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n value = new_dict.pop(key)\n return new_dict, value\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def add_to_dict(from_dict, to_dict):\n for k, v in list(from_dict.items()):\n if hasattr(v, 'copy') and callable(getattr(v, 'copy')):\n to_dict[k] = v.copy()\n else:\n to_dict[k] = v", "def deepcopy(self):\n return copymod.deepcopy(self)", "def override_dict_values(d1, d2):\n new = d1.copy()\n for k, v in d2.items():\n if isinstance(v, dict):\n new[k] = override_dict_values(new[k], d2[k])\n else:\n new[k] = v\n\n return new", "def __removeDuplicateDictsFromList(self, listOfDicts: List[Dict[str, str]]) -> List[Dict[str, str]]:\n return list({frozenset(item.items()): item for item in listOfDicts}.values())", "def __deepcopy__(self, memo):\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n ignore_attrs = [\"cbc\"]\n for k, v in self.__dict__.items():\n if k not in ignore_attrs:\n setattr(result, k, copy.deepcopy(v, memo))\n\n # Set CellBudgetFile object attribute manually. This is object\n # read-only so should not be problems with pointers from\n # multiple objects.\n result.cbc = self.cbc\n return result", "def prune(bushy: dict) -> dict:\n pruned = dict()\n for key in bushy:\n if bushy[key]:\n pruned[key] = bushy[key]\n return pruned", "def safe_update(dict_to, dict_from):\n for key, val in dict(dict_from).iteritems():\n if key in dict_to:\n raise KeyError(key)\n dict_to[key] = val\n return dict_to", "def test_flatten_dict(self):\n order_dict = OrderDict()\n\n order_2 = StockOrderWrapper(self.order_2)\n order_3 = StockOrderWrapper(self.order_3)\n order_5 = StockOrderWrapper(self.order_5)\n order_7 = StockOrderWrapper(self.order_7)\n\n order_2.stock_order.order_status = DEFINITIVE\n order_3.stock_order.order_status = DEFINITIVE\n order_5.stock_order.order_status = DEFINITIVE\n order_7.stock_order.order_status = DEFINITIVE\n\n order_dict.add_order(1.125, order_2)\n order_dict.add_order(10.321, order_3)\n order_dict.add_order(1.4, order_5)\n order_dict.add_order(9.321, order_7)\n\n # =================================================================\n # test: flattened dict contains all orders\n # =================================================================\n\n order_list = order_dict.flatten_dict()\n exp_list = [order_2, order_3, order_5, order_7]\n self.assertItemsEqual(exp_list, order_list)\n\n # =================================================================\n # test: exclude keys\n # =================================================================\n\n exclude_list = [.125, .4]\n order_list = order_dict.flatten_dict(exclude_keys=exclude_list)\n exp_list = [order_3, order_7]\n self.assertItemsEqual(exp_list, order_list)\n\n exclude_list = [0.125, 0.4]\n order_list = order_dict.flatten_dict(exclude_keys=exclude_list)\n exp_list = [order_3, order_7]\n self.assertItemsEqual(exp_list, order_list)\n\n # =================================================================\n # test: exclude non-existing key\n # =================================================================\n\n exclude_list = [.543, .9753]\n order_list = order_dict.flatten_dict(exclude_keys=exclude_list)\n exp_list = [order_2, order_3, order_5, order_7]\n self.assertItemsEqual(exp_list, order_list)\n\n exclude_list = [1, 4]\n order_list = order_dict.flatten_dict(exclude_keys=exclude_list)\n exp_list = [order_2, order_3, order_5, order_7]\n self.assertItemsEqual(exp_list, order_list)\n\n exclude_list = [\".125\", \".4\"]\n order_list = order_dict.flatten_dict(exclude_keys=exclude_list)\n exp_list = [order_2, order_3, order_5, order_7]\n self.assertItemsEqual(exp_list, order_list)", "def clone(self):\n\n def deep_copy(value):\n try:\n if hasattr(value, 'clone'):\n value.clone()\n except Exception:\n pass\n\n try:\n json.loads(json.dumps(value))\n except Exception:\n pass\n\n return value\n\n out = Property()\n for k in KEYS:\n out.set(k, deep_copy(self.get(k)))\n return out", "def f_to_dict(self, copy=True):\n if copy:\n return self._data.copy()\n else:\n return self._data", "def get_locals_copy(self): # this function has none of its own testing because of its simplicity\r\n return copy.deepcopy(self.__locals) # a copy is made so no changes propagate after function call\r", "def copy(self):\n new = self.__class__(integration=None, data=None)\n for attribute, value in self.__dict__.items():\n if attribute in self.referenced_attributes:\n setattr(new, attribute, value)\n elif hasattr(value, 'copy'):\n setattr(new, attribute, value.copy())\n else:\n setattr(new, attribute, deepcopy(value))\n return new", "def extend_dict(source_dict, diff=None, deep=False):\n if deep:\n new_dict = deepcopy(source_dict)\n else:\n new_dict = copy(source_dict)\n\n if diff:\n new_dict.update(diff)\n return new_dict", "def copy_qoi_dict(QoI_dict):\n new_dict = dict.fromkeys(QoI_dict)\n for keys in QoI_dict:\n new_dict[keys] = dict.fromkeys(QoI_dict[keys])\n new_dict[keys]['QoI_func'] = QoI_dict[keys]['QoI_func']\n new_dict[keys]['output_dimensions'] = copy.deepcopy(QoI_dict[keys]['output_dimensions'])\n # Copy the deriv_dict now\n if 'deriv_dict' in QoI_dict[keys]:\n new_dict[keys]['deriv_dict'] = dict.fromkeys(QoI_dict[keys]['deriv_dict'])\n for key2 in new_dict[keys]['deriv_dict']:\n new_dict[keys]['deriv_dict'][key2] = dict.fromkeys(QoI_dict[keys]['deriv_dict'][key2])\n new_dict[keys]['deriv_dict'][key2]['dQoI_func'] = QoI_dict[keys]['deriv_dict'][key2]['dQoI_func']\n new_dict[keys]['deriv_dict'][key2]['output_dimensions'] = copy.copy(QoI_dict[keys]['deriv_dict'][key2]['output_dimensions'])\n\n # Make sure that the dictionaries have been copied correctly\n assert new_dict == QoI_dict\n assert new_dict is not QoI_dict\n\n return new_dict", "def __deepcopy__(self, memo):\n copy = self.__class__()\n copy.wvalues = self.wvalues\n return copy", "def copy(obj):\n return loads(dumps(obj))", "def clone_state_dict(state_dict):\n return OrderedDict([(name, clone(param)) for name, param in state_dict.items()])", "def _copy_metadata_deep(value, old_value):\n if value is None or old_value is None or value is old_value: return\n\n if isinstance(value, dict):\n for k, v in value.iteritems():\n _copy_metadata_deep(v, old_value[k])\n elif isinstance(value, list):\n for v, old_v in zip(value, old_value):\n _copy_metadata_deep(v, old_v)\n else:\n try:\n value.__dict__.update(old_value.__dict__)\n except AttributeError:\n pass", "def copymod(dct, without=None, **kwargs):\r\n if without is None:\r\n without = []\r\n rval = copy(dct)\r\n for a in without:\r\n if a in rval:\r\n del rval[a]\r\n for kw, val in kwargs.items():\r\n rval[kw] = val\r\n return rval", "def __deepcopy__(self, memo):\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n for k, v in self.__dict__.items():\n if k not in ['viewer', 'automatic_rendering_callback']:\n setattr(result, k, copy.deepcopy(v, memo))\n else:\n setattr(result, k, None)\n return result", "def __deepcopy__(self, memo):\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n for k, v in self.__dict__.items():\n if k not in ['viewer', '_record_video_wrapper']:\n setattr(result, k, copy.deepcopy(v, memo))\n else:\n setattr(result, k, None)\n return result", "def _shallow_clone_dataset(self: TDataWTransform) -> TDataWTransform:\n dataset_copy = copy.copy(self)\n dataset_copy._transform_groups = copy.copy(dataset_copy._transform_groups)\n dataset_copy._frozen_transform_groups = copy.copy(\n dataset_copy._frozen_transform_groups\n )\n return dataset_copy", "def deepcopy(self):\n return copy.deepcopy(self)", "def safe_dict(d):\r\n if isinstance(d, dict):\r\n return dict([(k.encode('utf-8'), safe_dict(v)) for k, v in d.iteritems()])\r\n elif isinstance(d, list):\r\n return [safe_dict(x) for x in d]\r\n else:\r\n return d", "def update_inplace_from(self, other):\n self.__dict__ = other.__dict__.copy()", "def as_set(dict_inst):\n\n return frozenset(dict_inst.items()) if isinstance(dict_inst, dict) else None" ]
[ "0.71733785", "0.7037678", "0.65864396", "0.6474158", "0.62573695", "0.6152454", "0.61428803", "0.6087053", "0.6052837", "0.60174745", "0.5997487", "0.59899086", "0.5918377", "0.5899443", "0.5898595", "0.5892593", "0.5735094", "0.572419", "0.5684948", "0.56120795", "0.5609732", "0.55691975", "0.5562032", "0.555424", "0.5553709", "0.55536914", "0.55452317", "0.5534845", "0.5526414", "0.5512912", "0.5510737", "0.54844666", "0.5475723", "0.5442625", "0.5422002", "0.54212666", "0.5406314", "0.53947663", "0.5342799", "0.53395855", "0.53335077", "0.5309447", "0.5308923", "0.5284945", "0.52815443", "0.5278798", "0.52660847", "0.525737", "0.5249583", "0.5249364", "0.5244576", "0.52097523", "0.5197727", "0.5190097", "0.5185409", "0.51725435", "0.5171431", "0.5170278", "0.5165406", "0.5159609", "0.51545256", "0.5138096", "0.5134301", "0.5132066", "0.5119427", "0.5112133", "0.5099225", "0.5090477", "0.5088724", "0.5083528", "0.5081184", "0.5078574", "0.5078461", "0.5073923", "0.5062587", "0.50599843", "0.50499314", "0.5040509", "0.5037156", "0.50221103", "0.5021363", "0.50121695", "0.50107694", "0.5002577", "0.49981543", "0.49967575", "0.49920171", "0.49787772", "0.4974319", "0.4971836", "0.49648464", "0.4959618", "0.49570313", "0.49520746", "0.49396184", "0.49386805", "0.4938235", "0.49374568", "0.4937183", "0.49366614" ]
0.7742294
0
Freeze a nested dict. Makes a nested `dict` immutable by transforming it into `FrozenDict`.
def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]: return FrozenDict(xs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}", "def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less performant because jax.tree_util.tree_map\n # uses an optimized C implementation.\n return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore\n elif isinstance(x, dict):\n ys = {}\n for key, value in x.items():\n ys[key] = unfreeze(value)\n return ys\n else:\n return x", "def _freeze_mapping(d):\r\n d = dict(d)\r\n for k in d.keys():\r\n if hasattr(d[k], '__getitem__') and hasattr(d[k], 'keys'):\r\n d[k] = _freeze_mapping(d[k])\r\n d = types.MappingProxyType(d)\r\n return d", "def makeFMData(from_dict, locked=False):\n\n class FMData(object):\n \"\"\"Datastructure where:\n\n - attr and dict access is equal (eg. FMData.value == FMData['value'])\n - only attributtes given during initialization are readable and writable\n - modified attributes are tracked\"\"\"\n __modified__ = set()\n __slots__, __init_dict__, __old2new__, __new2old__ = key_dict(from_dict)\n\n def __init__(self, locked=False):\n init_dict = self.__init_dict__\n for key in init_dict:\n value = init_dict[key]\n date, mo, da, ye, time, ho, mi, se = [None] * 8\n if type(value) in [str, str]:\n date, da, mo, ye, time, ho, mi, se = reDateTime.match(value).groups()\n if mo and int(mo) > 12:\n mo, da = da, mo\n\n if type(init_dict[key]) == dict:\n setattr(self, key, makeFMData(init_dict[key], locked=False)) # lock all substructures??\n elif type(init_dict[key]) == list:\n l = []\n for d in init_dict[key]:\n if type(d) == dict:\n l.append(makeFMData(d)) # lock ??\n else:\n l.append(d)\n setattr(self, key, l)\n elif date and time:\n setattr(self, key, DateTime(int(ye), int(mo), int(da), int(ho), int(mi), int(se)))\n elif date:\n setattr(self, key, Date(int(ye), int(mo), int(da)))\n elif time:\n setattr(self, key, Time(int(ho), int(mi), int(se)))\n else:\n setattr(self, key, init_dict[key])\n if locked:\n self.__modified__.add('__locked__')\n\n def __setattr__(self, key, value):\n if '__locked__' in self.__modified__:\n raise AttributeError(\"This substructure is read-only, so you cannot modify '%s' attribute.\" % key)\n oldvalue = None\n if hasattr(self, key):\n oldvalue = getattr(self, key)\n # if oldvalue != None and type(oldvalue) != type(value):\n #\t raise TypeError, \"Type of field '%s' is %s, you cannot insert %s\" % (key, type(oldvalue), type(value))\n object.__setattr__(self, key, value)\n if oldvalue != None and value != oldvalue:\n self.__modified__.add(key)\n\n def __getitem__(self, key):\n if type(key) == str or type(key) == str:\n spl = key.split('.')\n else:\n print(\"-\" * 20, key, type(key))\n if len(spl) == 2:\n if spl[0] in self.__old2new__:\n spl[0] = self.__old2new__[spl[0]]\n if spl[1] in self.__old2new__:\n spl[1] = self.__old2new__[spl[1]]\n return getattr(getattr(self, spl[0]), spl[1])\n if key in self.__old2new__:\n key = self.__old2new__[key]\n return getattr(self, key)\n\n def __setitem__(self, key, value):\n spl = key.split('.')\n if len(spl) == 2:\n if spl[0] in self.__old2new__:\n spl[0] = self.__old2new__[spl[0]]\n if spl[1] in self.__old2new__:\n spl[1] = self.__old2new__[spl[1]]\n return setattr(getattr(self, spl[0]), spl[1], value)\n if key in self.__old2new__:\n key = self.__old2new__[key]\n return setattr(self, key, value)\n\n def __str__(self):\n return object.__repr__(self)\n\n def __iter__(self):\n l = []\n for key in self.__slots__:\n if hasattr(getattr(self, key), '__slots__'):\n for subkey in getattr(self, key).__slots__:\n l.append(\"%s.%s\" % (key, subkey))\n else:\n l.append(key)\n l.sort()\n for x in l:\n yield x\n\n def _modified(self):\n \"\"\"Returns tuple (key, value) for modified keys inside of FMData tree (recursive without lists)\"\"\"\n l = []\n for key in self.__slots__:\n if hasattr(getattr(self, key), '__modified__'):\n for subkey, value in getattr(self, key)._modified():\n yield (\"%s.%s\" % (key, subkey), value)\n else:\n if key in self.__modified__:\n yield (key, getattr(self, key))\n\n def __repr__(self):\n # from pformat import pformat\n # return \"<%s instance with %s records>\\n%s\" % (str(self.__class__), len(self.__slots__), pformat(dict([(value, getattr(self, value)) for value in self.__slots__])))\n # return pformat(dict([(value, getattr(self, value)) for value in self.__slots__]))\n l = []\n for key in self.__slots__:\n ukey = \"\"\n if key in self.__new2old__:\n ukey = \" (%s)\" % self.__new2old__[key]\n if hasattr(getattr(self, key), '__slots__'):\n for subkey in getattr(self, key).__slots__:\n value = getattr(getattr(self, key), subkey)\n if type(value) == str:\n value = value.decode('utf-8')\n l.append(\"%s.%s = '%s'\" % (key, subkey, value))\n elif type(getattr(self, key)) == list:\n l.append(\"%s%s = <list with %s records>\" % (key, ukey, len(getattr(self, key))))\n elif type(getattr(self, key)) == str:\n l.append(\"%s%s = '%s'\" % (key, ukey, getattr(self, key).decode('utf-8')))\n else:\n l.append(\"%s%s = '%s'\" % (key, ukey, getattr(self, key)))\n l.sort()\n return str(('\\n'.join(l)).encode('utf-8'))\n\n def get(self, key, default=None):\n try:\n return self.__getitem__(key)\n except AttributeError:\n return default\n\n return FMData(locked)", "def freeze(value):\n if isinstance(value, list):\n value = tuple(freeze(e) for e in value)\n elif isinstance(value, set):\n value = frozenset(freeze(e) for e in value)\n elif isinstance(value, dict):\n value = frozendict({k: freeze(v) for k, v in value.items()})\n elif isinstance(value, tuple):\n value = tuple(freeze(e) for e in value)\n else:\n pass\n return value", "def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(\n {}\n ),\n) -> Union[FrozenDict, Dict[str, Any]]:\n\n if isinstance(x, FrozenDict):\n return x.copy(add_or_replace)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n new_dict.update(add_or_replace)\n return new_dict\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def unfreeze(obj, ignore_types=[]):\n if obj is None:\n return obj\n\n to_process = [obj]\n while len(to_process) > 0:\n _obj = to_process.pop()\n\n for attr in dir(_obj):\n if attr.startswith(\"__\"):\n continue\n value = getattr(_obj, attr)\n if isinstance(value, FrozenDict):\n value = {k: v for k, v in value.items()}\n to_process.extend(value.values())\n elif isinstance(value, FrozenList):\n value = [x for x in value]\n to_process.extend(value)\n elif not callable(value) and not isinstance(value, tuple(ignore_types)):\n to_process.append(value)\n\n try:\n setattr(_obj, attr, value)\n except BaseException:\n pass\n\n return obj", "def shallow(dict, exclude=None):\n\tnew_dict = {}\n\tfor key in dict:\n\t\tif not exclude or key not in exclude:\n\t\t\tnew_dict[key] = dict[key]\n\treturn new_dict", "def _update_loose (self, dict):\n self.__dict__.update(dict)", "def unfreeze(cls, frozen):\n timeframe = frozen.get('timeframe')\n finished_threshold = frozen.get('finished_threshold')\n if finished_threshold is not None:\n finished_threshold = timedelta(seconds=finished_threshold)\n # if there's no data or cache config an error has occurred\n data_config = frozen['data']\n cache_config = frozen['cache']\n return cls(data_config,\n cache_config,\n timeframe,\n finished_threshold)", "def copy(\n self, add_or_replace: Mapping[K, V] = MappingProxyType({})\n ) -> 'FrozenDict[K, V]':\n return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]", "def _attrdict_copy(d):\n if isinstance(d, collections.Iterable):\n return AttrDict(dict(d))\n else:\n return AttrDict(vars(d))", "def _cycle_safe_dict(dict_with_cycles):\n # type: (typing.Dict[typing.Any, typing.Any]) -> typing.Dict[typing.Any, typing.Any]\n seen = {} # type: typing.Dict[int, typing.Tuple[typing.Any, ...]]\n\n def process(d, path):\n # type: (typing.Any, typing.Tuple[typing.Any, ...]) -> typing.Any\n if id(d) in seen:\n return CycleSentinel(seen[id(d)])\n else:\n if isinstance(d, dict):\n seen[id(d)] = path\n return {\n key: process(value, path + (key,))\n for key, value in sorted(d.items())\n }\n else:\n return d\n\n ret = process(dict_with_cycles, ())\n assert isinstance(ret, dict) # mypy isn't smart enough to figure this out\n return ret", "def deepcopy(obj):\n if isinstance(obj, dict):\n return {deepcopy(key): deepcopy(value) for key, value in obj.items()}\n if hasattr(obj, '__iter__'):\n return type(obj)(deepcopy(item) for item in obj)\n return obj", "def _app_cache_deepcopy(obj):\n if isinstance(obj, dict):\n return dict((_app_cache_deepcopy(key), _app_cache_deepcopy(val))\n for key, val in obj.items())\n elif isinstance(obj, list):\n return list(_app_cache_deepcopy(val) for val in obj)\n elif isinstance(obj, SortedDict):\n return deepcopy(obj)\n return obj", "def make_recursive(obj):\n if isinstance(obj, list):\n for i, l in enumerate(obj):\n obj[i] = AttrDict.make_recursive(l)\n elif isinstance(obj, dict):\n for k, v in obj.items():\n obj[k] = AttrDict.make_recursive(v)\n return AttrDict(obj)\n return obj", "def deep_copy(old_dict, parent=None, depth=None, main=None):\n\n # Is this a copy starting from the top level?\n if isinstance(old_dict, configobj.ConfigObj):\n new_dict = configobj.ConfigObj('',\n encoding=old_dict.encoding,\n default_encoding=old_dict.default_encoding,\n interpolation=old_dict.interpolation)\n else:\n # No. It's a copy of something deeper down. If no parent or main is given, then\n # adopt the parent and main of the incoming dictionary.\n new_dict = configobj.Section(parent if parent is not None else old_dict.parent,\n depth if depth is not None else old_dict.depth,\n main if main is not None else old_dict.main)\n for entry in old_dict:\n # Avoid interpolation by using the version of __getitem__ from dict\n old_value = dict.__getitem__(old_dict, entry)\n if isinstance(old_value, configobj.Section):\n new_value = deep_copy(old_value, new_dict, new_dict.depth+1, new_dict.main)\n elif isinstance(old_value, list):\n # Make a copy\n new_value = list(old_value)\n elif isinstance(old_value, tuple):\n # Make a copy\n new_value = tuple(old_value)\n else:\n # It's a scalar\n new_value = old_value\n new_dict[entry] = new_value\n return new_dict", "def nest_dict(dct, keys):\n nested_dict = dct\n for key in reversed(keys):\n nested_dict = RecursiveDict({key: nested_dict})\n return nested_dict", "def deepish_copy(org):\n out = dict().fromkeys(org)\n for k,v in org.items():\n try:\n out[k] = v.copy() # dicts, sets\n except AttributeError:\n try:\n out[k] = v[:] # lists, tuples, strings, unicode\n except TypeError:\n out[k] = v # ints\n \n return out", "def nested_dict():\n return defaultdict(nested_dict)", "def deepupdate(original, update):\n for key, value in original.iteritems():\n if not key in update:\n update[key] = value\n elif isinstance(value, dict):\n deepupdate(value, update[key])\n return update", "def to_dict(self):\r\n new_dict = {}\r\n for key, val in self.items():\r\n if isinstance(val, NestedDict):\r\n new_dict[key] = val.to_dict()\r\n else:\r\n new_dict[key] = val\r\n return new_dict", "def _Freeze(self) -> None:\n self._SetNodes(_FROZEN_NODE_COUNT)", "def test_creation_dict_from_std_dict(self):\n semaphore = Semaphore()\n lock = Lock()\n\n std_dict = {'a': 1, 'b': 2, 'c': 3}\n\n d = SwapDict(std_dict)\n self.assertTrue(str(sorted(d)) == str(sorted(std_dict)), \"Error creation SwapDict from dict, info: \\nSwapDict: %s\\n dict: %s\" %\n (str(d), str(std_dict)))\n del d", "def pop(\n x: Union[FrozenDict, Dict[str, Any]], key: str\n) -> Tuple[Union[FrozenDict, Dict[str, Any]], Any]:\n\n if isinstance(x, FrozenDict):\n return x.pop(key)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n value = new_dict.pop(key)\n return new_dict, value\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def getFullDict(self):\n temp = copy.copy(self)\n\n for key in temp.dict:\n if temp.dict[key]['type'] == 'func' or temp.dict[key]['type'] == 'lambda':\n childDict = temp.dict[key][varTable].getFullDict()\n temp.dict[key][varTable] = childDict\n\n return temp.dict", "def create_dict_deep_distortion_old(defect_dict: dict, \r\n fancy_defects: dict,\r\n ):\r\n dict_deep_distortion = {}\r\n defect_dict_copy = defect_dict.copy()\r\n for defect_type in fancy_defects.keys(): # for each defect type (vac, as , int)\r\n \r\n dict_deep_distortion[defect_type] = import_deep_distortion_by_type(defect_dict_copy[defect_type],\r\n fancy_defects[defect_type]) #defects for which we'll try the deep distortion found for one of the charge states \r\n return dict_deep_distortion", "def copy(self):\n return pdict(dict.copy(self))", "def flatten_dict(nested):\n flattened = {}\n for key, value in nested.items():\n if isinstance(value, Mapping):\n for subkey, subval in value.items():\n newkey = '.'.join([key, subkey])\n flattened[newkey] = subval\n flatten_dict(flattened)\n else:\n flattened[key] = value\n mappings = [isinstance(value, Mapping) for key, value in flattened.items()]\n if len(set(mappings)) == 1 and set(mappings).pop() is False:\n return flattened\n else:\n return flatten_dict(flattened)", "def __dict_to_BetterDict(self, attr):\n if type(self[attr]) == dict:\n self[attr] = BetterDict(self[attr])\n\n return self[attr]", "def deep_update(d, u):\n for k, v in six.iteritems(u):\n dv = d.get(k, {})\n if not isinstance(dv, collections.abc.Mapping):\n d[k] = v\n elif isinstance(v, collections.abc.Mapping):\n d[k] = deep_update(dv, v)\n else:\n d[k] = v\n return d", "def recursive_squeeze(dictlike):\n out = {}\n for k, v in dictlike.items():\n if isinstance(v, dict):\n out[k] = recursive_squeeze(v)\n else:\n out[k] = np.squeeze(v)\n return out", "def pivot_nested_dict(nested_dict):\r\n\r\n reverse_nest_dict = {} #Create an empty dictionary\r\n for k, v in nested_dict.items(): #Iterate through each pair of elements\r\n for k2, v2 in v.items(): #Iterate through pair of values\r\n try:\r\n reverse_nest_dict[k2][k] = v2\r\n except KeyError:\r\n reverse_nest_dict[k2] = { k : v2 }\r\n return reverse_nest_dict\r\n \r\n #Create a dictionary that produces a different nested dictionary which\r\n #contains the same values\r", "def depth_wrap(self, value):\n return DictToObj(**value) if isinstance(value, dict) else value", "def dict_normalization(dict_, nested=False):\n dict_norm = dict()\n if not nested:\n if dict_.values():\n d_max = max(dict_.values())\n d_min = min(dict_.values())\n if d_max - d_min == 0:\n dict_norm = {key: 1 for key in dict_}\n else:\n dict_norm = {key: (dict_[key] - d_min) / (d_max - d_min) for key in dict_}\n else:\n for key_1 in dict_:\n if dict_[key_1]:\n dict_norm[key_1] = dict()\n else: continue\n d_max = max(dict_[key_1].values())\n d_min = min(dict_[key_1].values())\n for key_2 in dict_[key_1]:\n if d_max - d_min == 0:\n dict_norm[key_1][key_2] = 1 / len(dict_[key_1])\n else:\n dict_norm[key_1][key_2] = (dict_[key_1][key_2] - d_min) / (d_max - d_min)\n return dict_norm", "def to_dict(self):\n return {\n k: v.to_dict() if isinstance(v, AttrDict) else v\n for k, v in self.__dict__.items()\n if not k.startswith(\"_\")\n }", "def flatten_dict(d, separator=':', _parent_key=''):\n items = []\n for k, v in d.items():\n new_key = _parent_key + separator + k if _parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten_dict(v, separator=separator, _parent_key=new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def dict2frozenset(d):\n return frozenset(d.items())", "def pare_dict(d, ref, strict_b=False, **kw):\n strict_b = kw.get(\"strict\", strict_b)\n if strict_b:\n return {k: v for k, v in d.items() if k in ref and v != ref.get(k)}\n return {k: v for k, v in d.items() if k not in ref or v != ref.get(k)}", "def original_dict(self):\n return self.obj.__dict__", "def freeze(self):\n for p in self.parameters():\n p.requires_grad = False\n FrozenBatchNorm2d.convert_frozen_batchnorm(self)\n return self", "def to_dict(self):\n return {k: v.to_dict() if isinstance(v, AttrDict) else v\n for k, v in self.__dict__.items() if not k.startswith('_')}", "def frozen_time():\n # TODO LATER: Either freezegun should support the system clock, or find something else.\n with freezegun.freeze_time(\"2020-01-01 00:00:00\") as frozen:\n # Use freezegun-supported time instead of system clocks -- for testing purposes only.\n # NB: Patch strictly after the time is frozen -- to use fake_time(), not real time().\n with patch('time.monotonic', time.time), patch('time.perf_counter', time.time):\n yield frozen", "def _get_nested_dict(dictionary, key, nested_config=None):\n if key not in dictionary:\n nested = {}\n if nested_config:\n _fill_zero_counters_dict(nested_config, nested)\n dictionary[key] = nested\n return nested\n return dictionary[key]", "def _mask_dict(self, value):\n\n return MaskedDict(value)", "def get_safe_dict(target: dict) -> dict:\n return_value = {}\n for k in target:\n return_value[k] = cleanse_value(k, target.get(k))\n return return_value", "def safe_dict(d):\r\n if isinstance(d, dict):\r\n return dict([(k.encode('utf-8'), safe_dict(v)) for k, v in d.iteritems()])\r\n elif isinstance(d, list):\r\n return [safe_dict(x) for x in d]\r\n else:\r\n return d", "def deepcopy(item):\n\n try:\n return copy.deepcopy(item)\n except:\n # Detach grad when necessary\n key_requires_grad = []\n for key, value in zip(item.__dict__.keys(), item.__dict__.values()):\n if requires_grad(value):\n value_detached = detach(value)\n setattr(item, key, value_detached)\n key_requires_grad.append(key)\n\n # Set requires_grad to True when necessary\n item_copy = copy.deepcopy(item)\n for key in key_requires_grad:\n value = getattr(item_copy, key)\n setattr(item_copy, key, state_requires_grad(value))\n return item_copy", "def test_references(self):\n a = DummyObject()\n d = {'a.a.a':1, 'a.b.a':3, 'b':a}\n # Check dict single level keys don't lose reference\n self.assertEqual( dottedDict(d).data['b'], d['b'] )\n self.assertEqual( dottedDict(d).data, dottedDict(dottedDict(d)).data )", "def flatten(d: MutableMapping, sep: str = \".\", parent_key: str = \"\") -> dict:\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten(v, sep=sep, parent_key=new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def __post_init__(self) -> None:\n setattr(self, _FROZEN, True)", "def test_roundtrip_nested_map():\n Person = Map(\n MapEntrySpec(1, \"name\", String),\n MapEntrySpec(2, \"age\", UnsignedInt),\n \"Person\"\n )\n Family = Map(\n MapEntrySpec(1, \"mother\", Person),\n MapEntrySpec(2, \"father\", Person),\n \"Family\"\n )\n\n my_family = {\n \"mother\": {\n \"name\": \"Helen\",\n \"age\": 62\n },\n \"father\": {\n \"name\": \"Mark\",\n \"age\": 65\n }\n }\n\n roundtripped_family = Family.read(Family.to_bytes(my_family))\n assert my_family == roundtripped_family", "def safe_update(dict_to, dict_from):\n for key, val in dict(dict_from).iteritems():\n if key in dict_to:\n raise KeyError(key)\n dict_to[key] = val\n return dict_to", "def recursive_update_cfg(d, u):\n for k, v in u.iteritems():\n if isinstance(v, collections.Mapping):\n r = update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d", "def test_AttrDict():\n obj1 = AttrDict()\n obj1.test = 'abc'\n print(obj1)\n\n obj2 = AttrDict({'foo': 'bar'})\n print(obj2)\n\n obj2.update(obj1)\n j = obj2.to_json()\n print('json:\\n', j)\n y = obj2.to_yaml()\n print('yaml:\\n', y)\n\n obj3 = AttrDict.from_json(j)\n print('from json:', obj3)\n obj3 = AttrDict.from_yaml(y)\n print('from yaml:', obj3)", "def update_double_dict(outer, inner):\n for k, v in outer.items():\n outer[k].update(inner[k])", "def clean_dict(d):\n if not isinstance(d, dict):\n return d\n return dict((clean_dict(k), v) for k, v in d.items() if k is not 'dates')", "def copy(self):\n return AttrDict(dict(self).copy())", "def test_freeze(self):\n self.model.freeze()\n self.assertEqual(self.model.frozen(), True)", "def _dictRoundTripNormalize(self, treedict):\n for key, value in list(treedict.items()):\n if isinstance(value, dict):\n self._dictRoundTripNormalize(value)\n\n # Expand treedict[(\"group\", \"attr_name\")]\n # to treedict[\"group\"][\"attr_name\"]\n for key, value in list(treedict.items()):\n if not isinstance(key, tuple):\n continue\n # Put the attribute inside the group\n grpname, attr = key\n if not grpname:\n continue\n group = treedict.setdefault(grpname, dict())\n if isinstance(group, dict):\n del treedict[key]\n group[(\"\", attr)] = value", "def invert_dict(d):\r\n if isinstance(d, dict):\r\n temp = d\r\n else:\r\n temp = dict(d)\r\n result = {}\r\n for key, val in temp.iteritems():\r\n if val not in result:\r\n result[val] = []\r\n result[val].append(key)\r\n return result", "def flat_dict(d):\n nd = {}\n for (key, value) in d.items():\n nd[key] = value.pop()\n\n return nd", "def flatten_dict(d, parent_key=\"\", sep=\"_\"):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten_dict(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def recursive_mapping_update(d, u):\n if u is not None:\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n r = recursive_mapping_update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d", "def deep_merge(origin: dict, renovator: Mapping) -> dict:\n\n for key, value in renovator.items():\n if isinstance(value, Mapping):\n node = origin.setdefault(key, {})\n deep_merge(node, value)\n else:\n origin[key] = value\n\n return origin", "def ensure_json_serializable(dict_, normalize_containers=False, verbose=0):\n dict_ = copy.deepcopy(dict_)\n\n def _norm_container(c):\n if isinstance(c, dict):\n # Cast to a normal dictionary\n if isinstance(c, OrderedDict):\n if type(c) is not OrderedDict:\n c = OrderedDict(c)\n else:\n if type(c) is not dict:\n c = dict(c)\n return c\n\n walker = ub.IndexableWalker(dict_)\n for prefix, value in walker:\n if isinstance(value, tuple):\n new_value = list(value)\n walker[prefix] = new_value\n elif isinstance(value, np.ndarray):\n new_value = value.tolist()\n walker[prefix] = new_value\n elif isinstance(value, (np.integer)):\n new_value = int(value)\n walker[prefix] = new_value\n elif isinstance(value, (np.floating)):\n new_value = float(value)\n walker[prefix] = new_value\n elif isinstance(value, (np.complexfloating)):\n new_value = complex(value)\n walker[prefix] = new_value\n elif isinstance(value, decimal.Decimal):\n new_value = float(value)\n walker[prefix] = new_value\n elif isinstance(value, fractions.Fraction):\n new_value = float(value)\n walker[prefix] = new_value\n elif isinstance(value, pathlib.Path):\n new_value = str(value)\n walker[prefix] = new_value\n elif hasattr(value, '__json__'):\n new_value = value.__json__()\n walker[prefix] = new_value\n elif normalize_containers:\n if isinstance(value, dict):\n new_value = _norm_container(value)\n walker[prefix] = new_value\n\n if normalize_containers:\n # normalize the outer layer\n dict_ = _norm_container(dict_)\n return dict_", "def fromkeys(iterable, value=None):\n return FrozenDict(dict.fromkeys(iterable, value))", "def unflatten(\n d: Dict[str, Any],\n base: Dict[str, Any] = None,\n) -> Dict[str, Any]:\n if base is None:\n base = {}\n\n for key, value in d.items():\n root = base\n\n ###\n # If a dotted path is encountered, create nested dicts for all but\n # the last level, then change root to that last level, and key to\n # the final key in the path. This allows one final setitem at the bottom\n # of the loop.\n if '.' in key:\n *parts, key = key.split('.')\n\n for part in parts:\n root.setdefault(part, {})\n root = root[part]\n\n if isinstance(value, dict):\n value = unflatten(value, root.get(key, {}))\n\n root[key] = value\n\n return base", "def pivot_nested_dict(nested_dict):\n # declare res as the return object which should be a dict\n res = dict()\n # traverse the pollsters\n for pollster in nested_dict:\n \t# travserse the states\n \tfor state in nested_dict[pollster]:\n \t\t# if first meet a state, we need to create a new dict\n \t\tif state not in res:\n \t\t\tres[state] = dict()\n \t\t# put the pollster value in the state dict\n \t\tres[state][pollster] = nested_dict[pollster][state]\n return res", "def merge_dict_recursive(target, src):\r\n for k in src.keys():\r\n if ((k in target and isinstance(target[k], dict) and\r\n isinstance(src[k], collections.Mapping))):\r\n merge_dict_recursive(target[k], src[k])\r\n else:\r\n target[k] = src[k]", "def __copy__(self):\n d = dict()\n d.update(self.items())\n return d", "def __getstate__(self) -> Dict[str, Any]:\n s = self.__dict__.copy()\n # Kill the parent ref. It won't pickle well.\n s[\"_parent\"] = None\n return s", "def flatten_dict(d):\n\n def expand(key, value):\n if isinstance(value, dict):\n return [(key + '.' + k, v) for k, v in flatten_dict(value).items()]\n else:\n return [(key, value)]\n\n items = [item for k, v in d.items() for item in expand(k, v)]\n return dict(items)", "def serialize_dict(container: Dict) -> Dict:\n for key, value in container.items():\n container[key] = serialize_obj(value)\n return container", "def serialize_dict(container: Dict) -> Dict:\n for key, value in container.items():\n container[key] = serialize_obj(value)\n return container", "def test_to_dict(self):\n\n class Person(Model):\n name = StringField()\n age = IntegralField(bounds = (0, None))\n siblings = ListField(of = StringField())\n\n data1 = {\n \"name\": \"Joe Shmoe\",\n \"age\": 21,\n \"siblings\": [\"Dick Shmoe\", \"Jane Shmoe\"]\n }\n person1 = Person(**data1)\n assert person1.to_dict() == data1\n\n # The defined but unset fields should still be present, but set to none\n data2 = {\"notaname\": 2, \"age\": \"lots\"}\n person2 = Person.from_dict(data2)\n assert person2.to_dict() == {\n \"notaname\": 2,\n \"age\": \"lots\",\n \"name\": None,\n \"siblings\": None\n }", "def flatten_nested_dict(prefix, nested_dict):\n\n cleaned_nested_dict = {}\n cleaned_nested_dict = {\n f'{prefix}_{key}': val for key, val in nested_dict.items()}\n\n return cleaned_nested_dict", "def sub_dict(d):\n r = {}\n for k in d:\n if type(d[k]) in prims:\n r[k] = d[k]\n elif type(d[k]) is list:\n r[k] = sub_list(d[k])\n elif type(d[k]) is dict:\n r[k] = sub_dict(d[k])\n else:\n print \"Unknown Type: {}\".format(type(d[k]))\n return r", "def make_dicts(self):\n self._dicts = [tree.to_dict() for tree in self.reaction_trees]\n self._update_route_dict(self._dicts, \"dict\")", "def make_mutable_REMEMBER_CLEANUP_FIRST(self):\n # UNSET the flag to make object immutable and hashable - need to do it in a roundabout way,\n # because the immutability prevents simply \"self.immutable = False\" from working!\n self.__dict__['immutable'] = False\n # but if I put __slots__ in, self.__dict__ won't exist any more... TODO Options for then:\n # setattr(self, 'immutable', False) - doesn't seem to work?\n # object.__setattr__(self, 'immutable', False) - does that work?", "def test_dict_merge_immutable():\n x1 = {'one': 1, 'two': 2}\n x1_cop = x1.copy()\n ir.dict_merge(x1, {'three': 3, 'two': None})\n assert x1 == x1_cop\n ir.dict_merge({'ten': 10, 'one': '1'}, x1)\n assert x1 == x1_cop", "def pretty_repr(self, num_spaces=4):\n\n def pretty_dict(x):\n if not isinstance(x, dict):\n return repr(x)\n rep = ''\n for key, val in x.items():\n rep += f'{key}: {pretty_dict(val)},\\n'\n if rep:\n return '{\\n' + _indent(rep, num_spaces) + '}'\n else:\n return '{}'\n\n return f'FrozenDict({pretty_dict(self._dict)})'", "def nested_to_flat(self, data: dict, target: str, **kwargs):\n data.update(data.pop(target, {}))\n return data", "def clone_state_dict(state_dict):\n return OrderedDict([(name, clone(param)) for name, param in state_dict.items()])", "def copy_dict(source_dict, diffs):\n result = dict(source_dict)\n result.update(diffs)\n return result", "def dict() -> Dict:\n pass", "def intern_dict(d, fields_to_intern=None):\n fields_to_intern = fields_to_intern or set()\n out = {}\n for k, v in d.iteritems():\n # We can't intern unicode strings, as returned by etcd but all our\n # keys should be ASCII anyway. Use the utf8 encoding just in case.\n k = intern(k.encode(\"utf8\"))\n if k in fields_to_intern:\n if isinstance(v, StringTypes):\n v = intern(v.encode(\"utf8\"))\n elif isinstance(v, list):\n v = intern_list(v)\n out[k] = v\n return out", "def revert_dictionary(dictionary):\n return {v: k for k, v in dictionary.items()}", "def flatten_dict(nested, prefix=''):\n result = dict()\n\n for key, value in nested.items():\n prefix_key = '__'.join([prefix, str(key)]) if len(prefix) else key\n\n if key in IGNORED_DICT_KEYS and not isinstance(value, (dict, list)):\n continue\n\n elif isinstance(value, dict):\n result.update(flatten_dict(value, prefix_key))\n\n elif isinstance(value, (np.ndarray, list)):\n result.update(flatten_array(value, prefix_key))\n\n else:\n result[prefix_key] = value\n\n return result", "def convert_dotKeyToNestedDict(self, tree, key, value):\n\n t = tree\n if \".\" in key:\n key, rest = key.split(\".\", 1)\n if key not in tree:\n t[key] = {}\n self.convert_dotKeyToNestedDict(t[key], rest, value)\n else:\n t[key] = value\n\n return t", "def flatten_dict(d, sep=' ', parent_key=''):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten_dict(v, sep=sep, parent_key=new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def universal_dict(name, attributes, namespace=None):\n\n def wrapper(cls):\n \"\"\"Inner decorator\n\n Args:\n cls: Input class\n\n Returns:\n Decorated class\n \"\"\"\n\n if namespace:\n qualified_name = '.'.join([namespace, name])\n else:\n qualified_name = name\n\n class RetCls(UniversalDict):\n SERIALIZABLE_ATTRIBUTES = attributes\n JSON_OBJECT_IDENTIFIER = qualified_name\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n self.__dict__['_obj'] = cls(*args, **kwargs)\n\n def __getattr__(self, item):\n \"\"\"Pass-through to underlying class attributes\n \"\"\"\n return getattr(self.__dict__['_obj'], item)\n\n def __setattr__(self, key, value):\n \"\"\"Pass-through to underlying class attributes\n \"\"\"\n return setattr(self.__dict__['_obj'], key, value)\n\n def __str__(self):\n \"\"\"Pass-through to underlying class attributes\n \"\"\"\n return str(self.__dict__['_obj'])\n\n def __repr__(self):\n \"\"\"Pass-through to underlying class attributes\n \"\"\"\n return repr(self.__dict__['_obj'])\n\n assert qualified_name not in UniversalDict.SERIALIZERS\n UniversalDict.SERIALIZERS[qualified_name] = RetCls\n\n return RetCls\n\n return wrapper", "def simplify_dict(d: Dict[str, Any]) -> Dict[str, Any]:\n return {\n k: [ast_to_testing_string(n) for n in v] if k == \"children\" else v\n for k, v in d.items()\n }", "def _flatten_dict(x: Dict) ->Dict:\n new_dict = {}\n for key, value in x.items():\n if isinstance(value, dict):\n for k, v in value.items():\n new_dict[k] = v\n else:\n new_dict[key] = value\n return new_dict", "def copy(self):\n return self.from_dict(self.to_dict(True))", "def extend_dict(source_dict, diff=None, deep=False):\n if deep:\n new_dict = deepcopy(source_dict)\n else:\n new_dict = copy(source_dict)\n\n if diff:\n new_dict.update(diff)\n return new_dict", "def swapdict(d):\n x = {}\n for k, v in d.iteritems():\n x[v] = k\n return x", "def flatten(d, parent_key='', sep='_'):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def rconvert(self):\n for k in self:\n if isinstance(self[k], dict):\n if not isinstance(self[k], AttrDict):\n self[k] = AttrDict(self[k])\n self[k].rconvert()\n return self", "def dict2obj(dictionary):\r\n class Obj(object):\r\n def __init__(self, dictionary):\r\n self.__dict__.update(dictionary)\r\n return Obj(dictionary)" ]
[ "0.7681707", "0.73136884", "0.6781864", "0.5990238", "0.5973389", "0.59243876", "0.57386374", "0.56837773", "0.5637036", "0.5456011", "0.5356702", "0.5356166", "0.5309256", "0.5258965", "0.52475625", "0.5228192", "0.5178104", "0.51654106", "0.5152976", "0.5144742", "0.51241285", "0.5107595", "0.5090213", "0.50683635", "0.49966976", "0.49966702", "0.49613225", "0.49478617", "0.49434373", "0.4929446", "0.4927069", "0.49238735", "0.4920888", "0.4910363", "0.4899213", "0.48960927", "0.4884121", "0.4880495", "0.4874191", "0.4856728", "0.4841366", "0.48407876", "0.48269665", "0.48254696", "0.48174152", "0.48098713", "0.48059714", "0.48044518", "0.47923964", "0.478179", "0.47753558", "0.47720796", "0.47692764", "0.4765507", "0.47641185", "0.4757799", "0.4757222", "0.47543457", "0.4749226", "0.47486746", "0.47467554", "0.47413036", "0.47345206", "0.4729983", "0.4722985", "0.47106135", "0.47016394", "0.46921828", "0.46809977", "0.4663667", "0.46622762", "0.4656699", "0.4655455", "0.46553272", "0.46553272", "0.46415773", "0.4639779", "0.4631717", "0.4627243", "0.4621289", "0.46149975", "0.4610406", "0.46056655", "0.46039182", "0.46012238", "0.45977607", "0.45961002", "0.45958632", "0.45957574", "0.45866704", "0.45829153", "0.45823696", "0.45823008", "0.45803505", "0.45791748", "0.45718688", "0.45697895", "0.45525002", "0.4551059", "0.45408195" ]
0.6944025
2
Unfreeze a FrozenDict. Makes a mutable copy of a `FrozenDict` mutable by transforming it into (nested) dict.
def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]: if isinstance(x, FrozenDict): # deep copy internal state of a FrozenDict # the dict branch would also work here but # it is much less performant because jax.tree_util.tree_map # uses an optimized C implementation. return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore elif isinstance(x, dict): ys = {} for key, value in x.items(): ys[key] = unfreeze(value) return ys else: return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}", "def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]:\n return FrozenDict(xs)", "def unfreeze(cls, frozen):\n timeframe = frozen.get('timeframe')\n finished_threshold = frozen.get('finished_threshold')\n if finished_threshold is not None:\n finished_threshold = timedelta(seconds=finished_threshold)\n # if there's no data or cache config an error has occurred\n data_config = frozen['data']\n cache_config = frozen['cache']\n return cls(data_config,\n cache_config,\n timeframe,\n finished_threshold)", "def unfreeze(obj, ignore_types=[]):\n if obj is None:\n return obj\n\n to_process = [obj]\n while len(to_process) > 0:\n _obj = to_process.pop()\n\n for attr in dir(_obj):\n if attr.startswith(\"__\"):\n continue\n value = getattr(_obj, attr)\n if isinstance(value, FrozenDict):\n value = {k: v for k, v in value.items()}\n to_process.extend(value.values())\n elif isinstance(value, FrozenList):\n value = [x for x in value]\n to_process.extend(value)\n elif not callable(value) and not isinstance(value, tuple(ignore_types)):\n to_process.append(value)\n\n try:\n setattr(_obj, attr, value)\n except BaseException:\n pass\n\n return obj", "def _freeze_mapping(d):\r\n d = dict(d)\r\n for k in d.keys():\r\n if hasattr(d[k], '__getitem__') and hasattr(d[k], 'keys'):\r\n d[k] = _freeze_mapping(d[k])\r\n d = types.MappingProxyType(d)\r\n return d", "def pop(\n x: Union[FrozenDict, Dict[str, Any]], key: str\n) -> Tuple[Union[FrozenDict, Dict[str, Any]], Any]:\n\n if isinstance(x, FrozenDict):\n return x.pop(key)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n value = new_dict.pop(key)\n return new_dict, value\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def flat_dict(d):\n nd = {}\n for (key, value) in d.items():\n nd[key] = value.pop()\n\n return nd", "def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(\n {}\n ),\n) -> Union[FrozenDict, Dict[str, Any]]:\n\n if isinstance(x, FrozenDict):\n return x.copy(add_or_replace)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n new_dict.update(add_or_replace)\n return new_dict\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def dict2frozenset(d):\n return frozenset(d.items())", "def freeze(value):\n if isinstance(value, list):\n value = tuple(freeze(e) for e in value)\n elif isinstance(value, set):\n value = frozenset(freeze(e) for e in value)\n elif isinstance(value, dict):\n value = frozendict({k: freeze(v) for k, v in value.items()})\n elif isinstance(value, tuple):\n value = tuple(freeze(e) for e in value)\n else:\n pass\n return value", "def fl_unfreeze_form(ptr_flform):\n _fl_unfreeze_form = library.cfuncproto(\n library.load_so_libforms(), \"fl_unfreeze_form\", \\\n None, [cty.POINTER(xfdata.FL_FORM)], \\\n \"\"\"void fl_unfreeze_form(FL_FORM * form) \"\"\")\n library.check_if_flinitialized()\n library.verify_flformptr_type(ptr_flform)\n library.keep_elem_refs(ptr_flform)\n _fl_unfreeze_form(ptr_flform)", "def unflatten_dict(flat):\n unflattened = dict()\n\n for key, value in sorted(flat.items(), key=_key_order):\n if '__' in key:\n key, subkey = key.split('__', 1)\n subkey, name = subkey.rsplit('__', 1)\n\n if name.isdigit():\n column_index = int(name)\n row_index = int(subkey)\n\n array = unflattened.setdefault(key, list())\n\n if len(array) == row_index:\n row = list()\n array.append(row)\n elif len(array) == row_index + 1:\n row = array[row_index]\n else:\n # This should never happen\n raise ValueError('There was an error unflattening the extension.')\n\n if len(row) == column_index:\n row.append(value)\n else:\n # This should never happen\n raise ValueError('There was an error unflattening the extension.')\n\n else:\n subdict = unflattened.setdefault(key, dict())\n if subkey.isdigit():\n subkey = int(subkey)\n\n inner = subdict.setdefault(subkey, dict())\n inner[name] = value\n\n else:\n unflattened[key] = value\n\n return unflattened", "def _unparse_dict(d, strategies=None):\n\n def _unparse_val(val):\n for instance_type, func in strategies:\n if isinstance(val, instance_type):\n return func(val)\n else:\n return val\n\n strategies = strategies or []\n out = dict()\n for k, v in d.items():\n if isinstance(v, dict):\n v = _unparse_dict(v, strategies=strategies)\n elif isinstance(v, list):\n v = [_unparse_val(val) for val in v]\n elif isinstance(v, tuple):\n v = tuple(_unparse_val(val) for val in v)\n else:\n v = _unparse_val(v)\n out[k] = v\n return out", "def invert_dict(d):\r\n if isinstance(d, dict):\r\n temp = d\r\n else:\r\n temp = dict(d)\r\n result = {}\r\n for key, val in temp.iteritems():\r\n if val not in result:\r\n result[val] = []\r\n result[val].append(key)\r\n return result", "def clean_dict(d):\n if not isinstance(d, dict):\n return d\n return dict((clean_dict(k), v) for k, v in d.items() if k is not 'dates')", "def CleanUpDict(dct):\n SanityCheck.ValidateTypes(((dct, dict),))\n\n new_dct = {}\n for key in dct:\n if dct[key]:\n new_dct[key] = dct[key]\n\n return new_dct", "def fromkeys(iterable, value=None):\n return FrozenDict(dict.fromkeys(iterable, value))", "def deep_update(d, u):\n for k, v in six.iteritems(u):\n dv = d.get(k, {})\n if not isinstance(dv, collections.abc.Mapping):\n d[k] = v\n elif isinstance(v, collections.abc.Mapping):\n d[k] = deep_update(dv, v)\n else:\n d[k] = v\n return d", "def revert_dictionary(dictionary):\n return {v: k for k, v in dictionary.items()}", "def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:\n value = self[key]\n new_dict = dict(self._dict)\n new_dict.pop(key)\n new_self = type(self)(new_dict)\n return new_self, value", "def fl_unfreeze_all_forms():\n _fl_unfreeze_all_forms = library.cfuncproto(\n library.load_so_libforms(), \"fl_unfreeze_all_forms\", \\\n None, [], \\\n \"\"\"void fl_unfreeze_all_forms() \"\"\")\n library.check_if_flinitialized()\n _fl_unfreeze_all_forms()", "def task_2_remove_dict_fields(data: DT, redundant_keys: List[str]) -> DT:\n dict2 = copy.deepcopy(data)\n for item in dict2:\n for key in redundant_keys:\n item.pop(key)\n return dict2", "def shallow(dict, exclude=None):\n\tnew_dict = {}\n\tfor key in dict:\n\t\tif not exclude or key not in exclude:\n\t\t\tnew_dict[key] = dict[key]\n\treturn new_dict", "def unflatten(\n d: Dict[str, Any],\n base: Dict[str, Any] = None,\n) -> Dict[str, Any]:\n if base is None:\n base = {}\n\n for key, value in d.items():\n root = base\n\n ###\n # If a dotted path is encountered, create nested dicts for all but\n # the last level, then change root to that last level, and key to\n # the final key in the path. This allows one final setitem at the bottom\n # of the loop.\n if '.' in key:\n *parts, key = key.split('.')\n\n for part in parts:\n root.setdefault(part, {})\n root = root[part]\n\n if isinstance(value, dict):\n value = unflatten(value, root.get(key, {}))\n\n root[key] = value\n\n return base", "def rconvert(self):\n for k in self:\n if isinstance(self[k], dict):\n if not isinstance(self[k], AttrDict):\n self[k] = AttrDict(self[k])\n self[k].rconvert()\n return self", "def InvertDict(dict_in):\n return dict(zip(dict_in.values(), dict_in.keys()))", "def detachDict(dict, key1, key2):\n\n for key in dict.keys():\n if key == key1 or key == key2:\n del dict[key]\n else:\n for subkey in dict[key].keys():\n if subkey == key1 or subkey == key2:\n del dict[key][subkey]", "def _rev_dict(d):\n return {v: k for k, v in d.items()}", "def deepcopy(obj):\n if isinstance(obj, dict):\n return {deepcopy(key): deepcopy(value) for key, value in obj.items()}\n if hasattr(obj, '__iter__'):\n return type(obj)(deepcopy(item) for item in obj)\n return obj", "def clean_dict(to_clean):\n for k in list(to_clean.keys()):\n if not to_clean.get(k):\n to_clean.pop(k)", "def clear_dict(d: dict) -> dict:\n # TODO delete if not used\n return {k: v for k, v in d.items() if v is not None}", "def invert_dict(d):\n inv_d = {}\n for k, v in d.items():\n inv_d[v] = inv_d.get(v, [])\n inv_d[v].append(k)\n return inv_d", "def update(d, u):\n\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "def clean_dict(d):\n\n if not isinstance(d, (dict, list)):\n return d\n if isinstance(d, list):\n return [v for v in (clean_dict(v) for v in d) if v]\n return OrderedDict([(k, v) for k, v in ((k, clean_dict(v)) for k, v in list(d.items())) if v])", "def update(d, u):\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "def invert_dict(d):\n newd={}\n for k in d:\n newd[d[k]]=k\n return newd", "def compact_dict(source_dict):\n return {k: v for k, v in source_dict.items() if v is not None}", "def _flatten_dict(x: Dict) ->Dict:\n new_dict = {}\n for key, value in x.items():\n if isinstance(value, dict):\n for k, v in value.items():\n new_dict[k] = v\n else:\n new_dict[key] = value\n return new_dict", "def deepish_copy(org):\n out = dict().fromkeys(org)\n for k,v in org.items():\n try:\n out[k] = v.copy() # dicts, sets\n except AttributeError:\n try:\n out[k] = v[:] # lists, tuples, strings, unicode\n except TypeError:\n out[k] = v # ints\n \n return out", "def _update_loose (self, dict):\n self.__dict__.update(dict)", "def copy(\n self, add_or_replace: Mapping[K, V] = MappingProxyType({})\n ) -> 'FrozenDict[K, V]':\n return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]", "def unflatten(dictionary, sep=\".\"):\n unflattened_dictionary = {}\n for key, value in dictionary.items():\n parts = key.split(sep)\n sub_dictionary = unflattened_dictionary\n for part in parts[:-1]:\n if part not in sub_dictionary:\n sub_dictionary[part] = {}\n sub_dictionary = sub_dictionary[part]\n sub_dictionary[parts[-1]] = value\n return unflattened_dictionary", "def _attrdict_copy(d):\n if isinstance(d, collections.Iterable):\n return AttrDict(dict(d))\n else:\n return AttrDict(vars(d))", "def unflatten(arg):\n if hasattr(arg, \"iteritems\"):\n items = arg.iteritems()\n elif hasattr(arg, \"items\"):\n items = arg.items()\n else:\n items = arg\n\n data = {}\n holders = []\n for flat_key, val in items:\n parsed_key = _parse_key(flat_key)\n obj = data\n for depth, (key, next_key) in enumerate(zip(parsed_key, parsed_key[1:]), 1):\n if isinstance(next_key, string_type):\n holder_type = _dict_holder\n else:\n holder_type = _list_holder\n\n if key not in obj:\n obj[key] = holder_type(_unparse_key(parsed_key[:depth]))\n holders.append((obj, key))\n elif not isinstance(obj[key], holder_type):\n raise ValueError(\n \"conflicting types %s and %s for key %r\"\n % (\n _node_type(obj[key]),\n holder_type.node_type,\n _unparse_key(parsed_key[:depth]),\n )\n )\n obj = obj[key]\n\n last_key = parsed_key[-1]\n if isinstance(obj.get(last_key), _holder):\n raise ValueError(\n \"conflicting types %s and terminal for key %r\"\n % (_node_type(obj[last_key]), flat_key)\n )\n obj[last_key] = val\n\n for obj, key in reversed(holders):\n obj[key] = obj[key].getvalue()\n\n return data", "def flatten_dict(d, separator=':', _parent_key=''):\n items = []\n for k, v in d.items():\n new_key = _parent_key + separator + k if _parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten_dict(v, separator=separator, _parent_key=new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def invert_dict(invertible_dict):\n inverted = {}\n for k, v in six.iteritems(invertible_dict):\n if not isinstance(v, Hashable):\n raise TypeError(u'Expected an invertible dict, but value at key {} has type {}'.format(\n k, type(v).__name__))\n if v in inverted:\n raise TypeError(u'Expected an invertible dict, but keys '\n u'{} and {} map to the same value'.format(\n inverted[v], k))\n inverted[v] = k\n return inverted", "def recursive_squeeze(dictlike):\n out = {}\n for k, v in dictlike.items():\n if isinstance(v, dict):\n out[k] = recursive_squeeze(v)\n else:\n out[k] = np.squeeze(v)\n return out", "def invert_dict(d):\n inverse = dict()\n for key in d:\n val = d[key]\n # If val is in inverse, setdefault(val,[]) will just return\n # inverse[val], so this is like saying inverse[val].append(key).\n # If val is *not* in inverse, setdefault will create the key-value\n # pair {val: []}, then return inverse[val] (which is now []).\n # Then we call append(key) on this new inverse[val], which will yield\n # inverse[val]=[key]\n inverse.setdefault(val,[]).append(key)\n return inverse", "def recursive_mapping_update(d, u):\n if u is not None:\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n r = recursive_mapping_update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d", "def unfrozen(self):\n archive_name = self.get_archive_name()\n\n # Decompilation for all .pyc files (inside of archive or binary)\n for pyc_file in self.unpack_archive(archive_name):\n self.decompilation(pyc_file)\n\n os.chdir(self.current_path)\n\n print(\"\\nWork is done.\")", "def _app_cache_deepcopy(obj):\n if isinstance(obj, dict):\n return dict((_app_cache_deepcopy(key), _app_cache_deepcopy(val))\n for key, val in obj.items())\n elif isinstance(obj, list):\n return list(_app_cache_deepcopy(val) for val in obj)\n elif isinstance(obj, SortedDict):\n return deepcopy(obj)\n return obj", "def flatten(dictionary, sep=\".\"):\n\n def _flatten(dictionary):\n if dictionary == {}:\n return dictionary\n\n key, value = dictionary.popitem()\n if not isinstance(value, dict) or not value:\n new_dictionary = {key: value}\n new_dictionary.update(flatten(dictionary, sep=sep))\n return new_dictionary\n\n flat_sub_dictionary = flatten(value, sep=sep)\n for flat_sub_key in list(flat_sub_dictionary.keys()):\n flat_key = key + sep + flat_sub_key\n flat_sub_dictionary[flat_key] = flat_sub_dictionary.pop(flat_sub_key)\n\n new_dictionary = flat_sub_dictionary\n new_dictionary.update(flatten(dictionary, sep=sep))\n return new_dictionary\n\n return _flatten(copy.deepcopy(dictionary))", "def _dictRoundTripNormalize(self, treedict):\n for key, value in list(treedict.items()):\n if isinstance(value, dict):\n self._dictRoundTripNormalize(value)\n\n # Expand treedict[(\"group\", \"attr_name\")]\n # to treedict[\"group\"][\"attr_name\"]\n for key, value in list(treedict.items()):\n if not isinstance(key, tuple):\n continue\n # Put the attribute inside the group\n grpname, attr = key\n if not grpname:\n continue\n group = treedict.setdefault(grpname, dict())\n if isinstance(group, dict):\n del treedict[key]\n group[(\"\", attr)] = value", "def dictflip(dictionary):\n\n return {v: k for k, v in dictionary.items()}", "def del_dict_attrs(d, key):\n key_parts = key.split('.')\n if len(key_parts) > 1:\n d[key_parts[:1][0]] = del_dict_attrs(d[key_parts[:1][0]], '.'.join(key_parts[1:]))\n else:\n del d[key_parts[:1][0]]\n return d", "def remove_fc(state_dict):\n return {key: value for key, value in state_dict.items() if not key.startswith('fc.')}", "def dict_pop(d, key):\n return d.pop(key)", "def recursive_drop_falsy(d):\r\n if isinstance(d, dict):\r\n return dict((k, recursive_drop_falsy(v)) for k, v in d.items() if v)\r\n elif isinstance(d, list):\r\n return map(recursive_drop_falsy, d)\r\n elif isinstance(d, basestring):\r\n return force_bytes(d)\r\n else:\r\n return d", "def invertDictionary(input_dict):\n inverse_dict = {v: k for k, v in input_dict.items()}\n\n return inverse_dict", "def truncate_dict(dictionary: Dict, n: int) -> Dict:\n return {k: v for (k, v) in list(dictionary.items())[:n]}", "def dictkeyclean(d):\r\n new_d = {}\r\n for k, v in d.iteritems():\r\n new_d[str(k)] = v\r\n return new_d", "def flatten_dict(d):\n def items():\n for key, value in d.items():\n if isinstance(value, dict):\n for subkey, subvalue in flatten_dict(value).items():\n yield subkey, subvalue\n else:\n yield key, value\n\n return dict(items())", "def recursive_update_cfg(d, u):\n for k, v in u.iteritems():\n if isinstance(v, collections.Mapping):\n r = update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d", "def flatten_dict(d):\n\n def expand(key, value):\n if isinstance(value, dict):\n return [(key + '.' + k, v) for k, v in flatten_dict(value).items()]\n else:\n return [(key, value)]\n\n items = [item for k, v in d.items() for item in expand(k, v)]\n return dict(items)", "def unfreeze(name, path=None, use_vt=None):\n _ensure_exists(name, path=path)\n if state(name, path=path) == \"stopped\":\n raise CommandExecutionError(f\"Container '{name}' is stopped\")\n cmd = \"lxc-unfreeze\"\n if path:\n cmd += f\" -P {shlex.quote(path)}\"\n return _change_state(cmd, name, \"running\", path=path, use_vt=use_vt)", "def _filter_dict(src_dict, key_set):\n for k in set(src_dict.keys()) - key_set:\n src_dict.pop(k)", "def update_dict(d, u, omit_new=False):\n\n for k, v in u.items():\n if k not in d and omit_new:\n continue\n\n if isinstance(v, collections.abc.Mapping):\n d[k] = update_dict(d.get(k, {}), v, omit_new)\n elif isinstance(v, list):\n d[k] = [update_dict(i, j, omit_new) if None not in (i, j) else\n i if j is None else j\n for (i, j) in itertools.zip_longest(d.get(k, []), v)]\n else:\n d[k] = v\n return d", "def update(d, u):\n for k, v in u.items():\n if isinstance(v, Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "def remove_keys(d, keys):\n pp = deepcopy(d)\n if isinstance(keys, (list, tuple)):\n for k in keys:\n pp.pop(k, None)\n else:\n pp.pop(keys, None)\n return pp", "def dict_normalization(dict_, nested=False):\n dict_norm = dict()\n if not nested:\n if dict_.values():\n d_max = max(dict_.values())\n d_min = min(dict_.values())\n if d_max - d_min == 0:\n dict_norm = {key: 1 for key in dict_}\n else:\n dict_norm = {key: (dict_[key] - d_min) / (d_max - d_min) for key in dict_}\n else:\n for key_1 in dict_:\n if dict_[key_1]:\n dict_norm[key_1] = dict()\n else: continue\n d_max = max(dict_[key_1].values())\n d_min = min(dict_[key_1].values())\n for key_2 in dict_[key_1]:\n if d_max - d_min == 0:\n dict_norm[key_1][key_2] = 1 / len(dict_[key_1])\n else:\n dict_norm[key_1][key_2] = (dict_[key_1][key_2] - d_min) / (d_max - d_min)\n return dict_norm", "def to_dict(self):\n return {k: v.to_dict() if isinstance(v, AttrDict) else v\n for k, v in self.__dict__.items() if not k.startswith('_')}", "def make_dict_unstructure_fn(\n cl: type[T],\n converter: BaseConverter,\n _cattrs_use_linecache: bool = True,\n **kwargs: AttributeOverride,\n) -> Callable[[T], dict[str, Any]]:\n origin = get_origin(cl)\n attrs = _adapted_fields(origin or cl) # type: ignore\n req_keys = _required_keys(origin or cl)\n\n mapping = {}\n if is_generic(cl):\n mapping = generate_mapping(cl, mapping)\n\n for base in getattr(origin, \"__orig_bases__\", ()):\n if is_generic(base) and not str(base).startswith(\"typing.Generic\"):\n mapping = generate_mapping(base, mapping)\n break\n\n # It's possible for origin to be None if this is a subclass\n # of a generic class.\n if origin is not None:\n cl = origin\n\n cl_name = cl.__name__\n fn_name = \"unstructure_typeddict_\" + cl_name\n globs = {}\n lines = []\n internal_arg_parts = {}\n\n # We keep track of what we're generating to help with recursive\n # class graphs.\n try:\n working_set = already_generating.working_set\n except AttributeError:\n working_set = set()\n already_generating.working_set = working_set\n if cl in working_set:\n raise RecursionError()\n working_set.add(cl)\n\n try:\n # We want to short-circuit in certain cases and return the identity\n # function.\n # We short-circuit if all of these are true:\n # * no attributes have been overridden\n # * all attributes resolve to `converter._unstructure_identity`\n for a in attrs:\n attr_name = a.name\n override = kwargs.get(attr_name, neutral)\n if override != neutral:\n break\n handler = None\n t = a.type\n nrb = get_notrequired_base(t)\n if nrb is not NOTHING:\n t = nrb\n\n if isinstance(t, TypeVar):\n if t.__name__ in mapping:\n t = mapping[t.__name__]\n else:\n handler = converter.unstructure\n elif is_generic(t) and not is_bare(t) and not is_annotated(t):\n t = deep_copy_with(t, mapping)\n\n if handler is None:\n try:\n handler = converter._unstructure_func.dispatch(t)\n except RecursionError:\n # There's a circular reference somewhere down the line\n handler = converter.unstructure\n is_identity = handler == converter._unstructure_identity\n if not is_identity:\n break\n else:\n # We've not broken the loop.\n return converter._unstructure_identity\n\n for ix, a in enumerate(attrs):\n attr_name = a.name\n override = kwargs.get(attr_name, neutral)\n if override.omit:\n lines.append(f\" res.pop('{attr_name}', None)\")\n continue\n if override.rename is not None:\n # We also need to pop when renaming, since we're copying\n # the original.\n lines.append(f\" res.pop('{attr_name}', None)\")\n kn = attr_name if override.rename is None else override.rename\n attr_required = attr_name in req_keys\n\n # For each attribute, we try resolving the type here and now.\n # If a type is manually overwritten, this function should be\n # regenerated.\n handler = None\n if override.unstruct_hook is not None:\n handler = override.unstruct_hook\n else:\n t = a.type\n nrb = get_notrequired_base(t)\n if nrb is not NOTHING:\n t = nrb\n\n if isinstance(t, TypeVar):\n if t.__name__ in mapping:\n t = mapping[t.__name__]\n else:\n handler = converter.unstructure\n elif is_generic(t) and not is_bare(t) and not is_annotated(t):\n t = deep_copy_with(t, mapping)\n\n if handler is None:\n try:\n handler = converter._unstructure_func.dispatch(t)\n except RecursionError:\n # There's a circular reference somewhere down the line\n handler = converter.unstructure\n\n is_identity = handler == converter._unstructure_identity\n\n if not is_identity:\n unstruct_handler_name = f\"__c_unstr_{ix}\"\n globs[unstruct_handler_name] = handler\n internal_arg_parts[unstruct_handler_name] = handler\n invoke = f\"{unstruct_handler_name}(instance['{attr_name}'])\"\n elif override.rename is None:\n # We're not doing anything to this attribute, so\n # it'll already be present in the input dict.\n continue\n else:\n # Probably renamed, we just fetch it.\n invoke = f\"instance['{attr_name}']\"\n\n if attr_required:\n # No default or no override.\n lines.append(f\" res['{kn}'] = {invoke}\")\n else:\n lines.append(f\" if '{kn}' in instance: res['{kn}'] = {invoke}\")\n\n internal_arg_line = \", \".join([f\"{i}={i}\" for i in internal_arg_parts])\n if internal_arg_line:\n internal_arg_line = f\", {internal_arg_line}\"\n for k, v in internal_arg_parts.items():\n globs[k] = v\n\n total_lines = [\n f\"def {fn_name}(instance{internal_arg_line}):\",\n \" res = instance.copy()\",\n *lines,\n \" return res\",\n ]\n script = \"\\n\".join(total_lines)\n\n fname = generate_unique_filename(\n cl, \"unstructure\", reserve=_cattrs_use_linecache\n )\n\n eval(compile(script, fname, \"exec\"), globs)\n\n fn = globs[fn_name]\n if _cattrs_use_linecache:\n linecache.cache[fname] = len(script), None, total_lines, fname\n finally:\n working_set.remove(cl)\n if not working_set:\n del already_generating.working_set\n\n return fn", "def deepupdate(original, update):\n for key, value in original.iteritems():\n if not key in update:\n update[key] = value\n elif isinstance(value, dict):\n deepupdate(value, update[key])\n return update", "def inverse(dict_):\n idict = dict([(value,key) for key,value in dict_.iteritems()])\n if len(idict)!=len(dict_):\n raise ValueError(\"Dictionary has no inverse (values not unique).\")\n return idict", "def swapdict(d):\n x = {}\n for k, v in d.iteritems():\n x[v] = k\n return x", "def to_dict(self):\n return {\n k: v.to_dict() if isinstance(v, AttrDict) else v\n for k, v in self.__dict__.items()\n if not k.startswith(\"_\")\n }", "def prune(bushy: dict) -> dict:\n pruned = dict()\n for key in bushy:\n if bushy[key]:\n pruned[key] = bushy[key]\n return pruned", "def repackage_state(self, state):\n state['hxs'] = state['hxs'].detach()\n state['cxs'] = state['cxs'].detach()\n return state", "def unflatten(self): \n self.assign(self.get_unflattened_circuit())\n self._expr_map = None", "def clean_dict(dictionary):\n return {k: v for k, v in dictionary.items() if v}", "def dict_deep_update(d, u, handlers=None):\n if handlers is None:\n handlers = {}\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n r = dict_deep_update(d.get(k, {}), v, handlers)\n d[k] = r\n elif k in d:\n h = handlers.get(type(v), None)\n if h is not None:\n d[k] = h(d[k], u[k])\n else:\n d[k] = u[k]\n else:\n d[k] = u[k]\n return d", "def to_dict(self):\r\n new_dict = {}\r\n for key, val in self.items():\r\n if isinstance(val, NestedDict):\r\n new_dict[key] = val.to_dict()\r\n else:\r\n new_dict[key] = val\r\n return new_dict", "def flatten(d: MutableMapping, sep: str = \".\", parent_key: str = \"\") -> dict:\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten(v, sep=sep, parent_key=new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def flatten_dict(d, parent_key=\"\", sep=\"_\"):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten_dict(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def subtract(dict_a, dict_b, strict=False):\n if not strict:\n return subtract_by_key(dict_a, dict_b)\n\n difference_dict = {}\n for key in dict_a:\n if key not in dict_b or dict_b[key] != dict_a[key]:\n difference_dict[key] = dict_a[key]\n\n return difference_dict", "def safe_dict(d):\r\n if isinstance(d, dict):\r\n return dict([(k.encode('utf-8'), safe_dict(v)) for k, v in d.iteritems()])\r\n elif isinstance(d, list):\r\n return [safe_dict(x) for x in d]\r\n else:\r\n return d", "def safe_update(dict_to, dict_from):\n for key, val in dict(dict_from).iteritems():\n if key in dict_to:\n raise KeyError(key)\n dict_to[key] = val\n return dict_to", "def pivot_nested_dict(nested_dict):\r\n\r\n reverse_nest_dict = {} #Create an empty dictionary\r\n for k, v in nested_dict.items(): #Iterate through each pair of elements\r\n for k2, v2 in v.items(): #Iterate through pair of values\r\n try:\r\n reverse_nest_dict[k2][k] = v2\r\n except KeyError:\r\n reverse_nest_dict[k2] = { k : v2 }\r\n return reverse_nest_dict\r\n \r\n #Create a dictionary that produces a different nested dictionary which\r\n #contains the same values\r", "def flatten_dict(dict_input):\n flattened_dict = dict()\n\n for key, value in dict_input.items():\n if isinstance(value, dict):\n new_keys = sorted(value.keys())\n for new_key in new_keys:\n entry = {key + '_' + new_key: value[new_key]}\n flattened_dict.update(entry)\n else:\n entry = {key: value}\n flattened_dict.update(entry)\n\n return flattened_dict", "def copy_obs_dict(obs):\n return {k: np.copy(v) for k, v in obs.items()}", "def flatten_dict(nested):\n flattened = {}\n for key, value in nested.items():\n if isinstance(value, Mapping):\n for subkey, subval in value.items():\n newkey = '.'.join([key, subkey])\n flattened[newkey] = subval\n flatten_dict(flattened)\n else:\n flattened[key] = value\n mappings = [isinstance(value, Mapping) for key, value in flattened.items()]\n if len(set(mappings)) == 1 and set(mappings).pop() is False:\n return flattened\n else:\n return flatten_dict(flattened)", "def nest_dict(dct, keys):\n nested_dict = dct\n for key in reversed(keys):\n nested_dict = RecursiveDict({key: nested_dict})\n return nested_dict", "def flatten_dict(d, sep=' ', parent_key=''):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten_dict(v, sep=sep, parent_key=new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def dictcopy(dic):\n keys = list(dic.keys())\n values = [list(i) for i in dic.values()]\n return dict(zip(keys,values))", "def delete_keys_from_dict(self, orig_dict, keys_whitelist):\n for k in list(orig_dict.keys()):\n if k not in keys_whitelist:\n del orig_dict[k]\n\n for v in orig_dict.values():\n if isinstance(v, dict):\n self.delete_keys_from_dict(v, keys_whitelist)\n\n return orig_dict", "def flatten(self):\n flat = {}\n for d in self.dicts:\n flat.update(d)\n return flat", "def decode_dict(d):\n\n new = {}\n for key, value in d.items():\n try:\n new_value = value.decode()\n except: new_value = value\n if isinstance(new_value, str) and new_value and new_value[0] == \"\\x00\":\n new_value = new_value.encode()\n if isinstance(new_value, bytes):\n new_value = parse_binary_field(new_value)\n if isinstance(new_value, list) and new_value:\n if isinstance(new_value[0], dict):\n new_value = [decode_dict(x) for x in new_value]\n elif isinstance(new_value[0], bytes):\n new_value = [x.decode() for x in new_value]\n new[key.decode() if isinstance(key, bytes) else key] = new_value\n return new", "def _unflatten_incr_state(\n self, flat_incr_state: Dict[str, torch.Tensor]\n ) -> Dict[int, Dict[str, Dict[str, torch.Tensor]]]:\n structured_incr_state = defaultdict(lambda: defaultdict(dict))\n for key, state in flat_incr_state.items():\n layer_idx_str, attn_type, state_type = key.split('__')\n structured_incr_state[int(layer_idx_str)][attn_type][state_type] = state\n return dict({k: dict(v) for k, v in structured_incr_state.items()})\n # Turn the nested defaultdicts back into regular dicts", "def filter_dict(fdict, mask):\n\n if fdict is None:\n fdict = dict()\n\n if mask is None:\n mask = []\n\n return {k: v for (k, v) in fdict.items() if k in mask}", "def ordered_dict_to_dict(d: OrderedDict) -> dict:\n return loads(dumps(d))" ]
[ "0.678866", "0.63871247", "0.63075334", "0.6243246", "0.61387753", "0.61324066", "0.5833921", "0.56678116", "0.5610028", "0.5581237", "0.547987", "0.528664", "0.5166779", "0.51488274", "0.5131935", "0.513004", "0.5127286", "0.51143354", "0.5108335", "0.5104038", "0.50658894", "0.5040167", "0.50181496", "0.4999344", "0.49814385", "0.49768424", "0.49741423", "0.4973419", "0.49233168", "0.491484", "0.49138203", "0.48874775", "0.4845291", "0.48338786", "0.48071483", "0.48070642", "0.4794826", "0.47861212", "0.47770166", "0.47766498", "0.47748724", "0.47518197", "0.47490755", "0.47308335", "0.472897", "0.47069323", "0.46998808", "0.46948013", "0.46934924", "0.46824077", "0.4673574", "0.46612746", "0.46547586", "0.46471268", "0.46331736", "0.46262282", "0.46069288", "0.46064824", "0.45996463", "0.4591553", "0.45708337", "0.45656824", "0.45608354", "0.45595428", "0.45564926", "0.45507422", "0.45342037", "0.4513698", "0.4507084", "0.44986528", "0.4494859", "0.44940928", "0.44844913", "0.4481367", "0.44805962", "0.44750804", "0.44679326", "0.44634515", "0.44545266", "0.4453122", "0.44472495", "0.44472328", "0.4442233", "0.4441484", "0.44407386", "0.44305772", "0.44304478", "0.44259986", "0.44226974", "0.44086206", "0.44000208", "0.4397135", "0.43933767", "0.43907094", "0.4386281", "0.4384898", "0.43798634", "0.43703642", "0.43494862", "0.43472135" ]
0.80311483
0
Create a new dict with additional and/or replaced entries. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of `FrozenDict.copy`.
def copy( x: Union[FrozenDict, Dict[str, Any]], add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict( {} ), ) -> Union[FrozenDict, Dict[str, Any]]: if isinstance(x, FrozenDict): return x.copy(add_or_replace) elif isinstance(x, dict): new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x new_dict.update(add_or_replace) return new_dict raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy(\n self, add_or_replace: Mapping[K, V] = MappingProxyType({})\n ) -> 'FrozenDict[K, V]':\n return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]", "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}", "def extend_dict(source_dict, diff=None, deep=False):\n if deep:\n new_dict = deepcopy(source_dict)\n else:\n new_dict = copy(source_dict)\n\n if diff:\n new_dict.update(diff)\n return new_dict", "def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]:\n return FrozenDict(xs)", "def updated_with(orig_dict, *new_values):\n newdict = dict(orig_dict)\n for vals in new_values:\n if vals:\n newdict.update(vals)\n return newdict", "def dict_merge(base, upd, inplace=False):\n assert quacks_like_dict(base), quacks_like_dict(upd)\n dst = base if inplace else deepcopy(base)\n\n stack = [(dst, upd)]\n while stack:\n current_dst, current_src = stack.pop()\n for key in current_src:\n if key not in current_dst:\n current_dst[key] = current_src[key]\n else:\n if quacks_like_dict(current_src[key]) and quacks_like_dict(current_dst[key]) :\n stack.append((current_dst[key], current_src[key]))\n else:\n current_dst[key] = current_src[key]\n return dst", "def override_dict_values(d1, d2):\n new = d1.copy()\n for k, v in d2.items():\n if isinstance(v, dict):\n new[k] = override_dict_values(new[k], d2[k])\n else:\n new[k] = v\n\n return new", "def add_to_dict(from_dict, to_dict):\n for k, v in list(from_dict.items()):\n if hasattr(v, 'copy') and callable(getattr(v, 'copy')):\n to_dict[k] = v.copy()\n else:\n to_dict[k] = v", "def copy_dict(source_dict, diffs):\n result = dict(source_dict)\n result.update(diffs)\n return result", "def merge_dict(d: dict, overwrite=False, inplace=False, **kwargs):\n nd = dict([(k, v) for k, v in d.items()] + [(k, v) for k, v in kwargs.items() if overwrite or k not in d])\n if inplace:\n d.update(nd)\n return d\n return nd", "def test_dict_merge_immutable():\n x1 = {'one': 1, 'two': 2}\n x1_cop = x1.copy()\n ir.dict_merge(x1, {'three': 3, 'two': None})\n assert x1 == x1_cop\n ir.dict_merge({'ten': 10, 'one': '1'}, x1)\n assert x1 == x1_cop", "def extend(primary: Mapping, *others: Mapping, in_place=False):\n others = flatten(others)\n if not in_place:\n primary = dict(primary or {})\n for other in others:\n if other is None:\n continue\n for key, value in other.items():\n primary[key] = value\n return primary", "def __copy__(self):\n d = dict()\n d.update(self.items())\n return d", "def _attrdict_copy(d):\n if isinstance(d, collections.Iterable):\n return AttrDict(dict(d))\n else:\n return AttrDict(vars(d))", "def overwrite_dict(dict_base, dict_new, base_path=None):\n assert isinstance(dict_new, dict)\n for k in dict_new:\n # Add the current key to the path\n k_path = str(k) if base_path is None else f'{base_path}.{str(k)}'\n # Make sure that the key in the new dictionary matches one from the base dictionary\n assert k in dict_base, f'Could not find path {k_path} in the base dictionary'\n # Check that the types match between the base dictionary entry and the new one\n if dict_base[k] is not None:\n assert isinstance(type(dict_base[k]), type(dict_new[k])), \\\n 'The types at {} in the base dictionary do not match (expected {}, got {})'.format(\n k_path, str(type(dict_base[k])), str(type(dict_new[k])))\n # Recursively replace dictionary entries\n if isinstance(dict_base[k], dict):\n overwrite_dict(dict_base[k], dict_new[k], k_path)\n else:\n # Simply copy over leaf entries\n dict_base[k] = dict_new[k]", "def deepish_copy(org):\n out = dict().fromkeys(org)\n for k,v in org.items():\n try:\n out[k] = v.copy() # dicts, sets\n except AttributeError:\n try:\n out[k] = v[:] # lists, tuples, strings, unicode\n except TypeError:\n out[k] = v # ints\n \n return out", "def get_added_dicts(a, b):\n tmp = copy.deepcopy(a)\n for key, val in b.iteritems():\n if key not in tmp:\n tmp[key] = val\n return tmp", "def shallow(dict, exclude=None):\n\tnew_dict = {}\n\tfor key in dict:\n\t\tif not exclude or key not in exclude:\n\t\t\tnew_dict[key] = dict[key]\n\treturn new_dict", "def update_dict(original_dict, new_dict):\n if new_dict == None: return original_dict\n for k in new_dict:\n if k not in original_dict:\n original_dict[k] = []\n original_dict[k].append(new_dict[k])\n else: original_dict[k].append(new_dict[k])\n return original_dict", "def mergedict(x, y):\n z = x.copy()\n z.update(y)\n return z", "def test_merge_overwrite_traditional(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"A\"] = \"b\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"A\": \"b\"})\n self.assertEqual(mdict, ret)", "def copy_obs_dict(obs):\n return {k: np.copy(v) for k, v in obs.items()}", "def _merge(old_dict, new_dict):\n dict3 = old_dict.copy()\n for k, v in new_dict.items():\n if k in dict3:\n dict3[k].append(v)\n else:\n dict3[k] = [v]\n return dict3", "def new_data(first: dict, second: dict, changeables: tuple):\n for name, field in first.items():\n if name not in changeables:\n second[name] = field", "def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less performant because jax.tree_util.tree_map\n # uses an optimized C implementation.\n return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore\n elif isinstance(x, dict):\n ys = {}\n for key, value in x.items():\n ys[key] = unfreeze(value)\n return ys\n else:\n return x", "def copy(self):\n return pdict(dict.copy(self))", "def update_dict(new,old):", "def add_to_dict(source_dict=None, working_dict=None, new_key=None, new_dict=None):\n if source_dict is None or working_dict is None or new_key is None or new_dict is None:\n raise RuntimeError(\"Invalid arguments passed, one of is == None.\")\n\n if working_dict[new_key] is None:\n working_dict[new_key] = new_dict\n else:\n working_dict[new_key].update(new_dict)\n\n return source_dict.update(working_dict)", "def safe_update(dict_to, dict_from):\n for key, val in dict(dict_from).iteritems():\n if key in dict_to:\n raise KeyError(key)\n dict_to[key] = val\n return dict_to", "def merge_dict(own: dict, other: dict) -> dict:\n for element in other:\n if own.get(element, None) is None:\n own[element] = other[element]\n else:\n raise ValueError('Conflicting kwargs')\n return own", "def deepupdate(original, update):\n for key, value in original.iteritems():\n if not key in update:\n update[key] = value\n elif isinstance(value, dict):\n deepupdate(value, update[key])\n return update", "def apply_dict_overrides(dictionary: dict, **overrides) -> dict:\n # I'm not entirely sure the treatment of None is the right thing. Need to look into that.\n # Then again, if None were stored, then apply_dict_overrides(d, var1=1, var2=2, var3=None)\n # would be no different than (dict(d, var1=1, var2=2, var3=None). It might be more useful\n # and/or interesting if it would actually remove the key instead. -kmp 18-Jul-2020\n for k, v in overrides.items():\n if v is not None:\n dictionary[k] = v\n # This function works by side effect, but getting back the changed dict may be sometimes useful.\n return dictionary", "def extend_dict(org_dict, new_dict, allow_overwrite=None):\n if not new_dict:\n return org_dict\n if not org_dict:\n return new_dict\n for key, value in new_dict.iteritems():\n if value:\n if not org_dict.get(key):\n # orginal dict doesn't has this key (or no value), just overwrite\n org_dict[key] = value\n else:\n # original dict already has this key, append results\n if isinstance(value, list):\n # make sure that our original value also is a list\n if isinstance(org_dict[key], list):\n for item in value:\n if item not in org_dict[key]:\n org_dict[key].append(item)\n # previous value was str, combine both in list\n elif isinstance(org_dict[key], (str, unicode)):\n org_dict[key] = org_dict[key].split(\" / \")\n for item in value:\n if item not in org_dict[key]:\n org_dict[key].append(item)\n elif isinstance(value, dict):\n org_dict[key] = extend_dict(org_dict[key], value, allow_overwrite)\n elif allow_overwrite and key in allow_overwrite:\n # value may be overwritten\n org_dict[key] = value\n else:\n # conflict, leave alone\n pass\n return org_dict", "def extend(d, k, v):\n\tn = d.copy()\n\tn[k] = v\n\treturn n", "def dictcopy(dic):\n keys = list(dic.keys())\n values = [list(i) for i in dic.values()]\n return dict(zip(keys,values))", "def copymod(dct, without=None, **kwargs):\r\n if without is None:\r\n without = []\r\n rval = copy(dct)\r\n for a in without:\r\n if a in rval:\r\n del rval[a]\r\n for kw, val in kwargs.items():\r\n rval[kw] = val\r\n return rval", "def _freeze_mapping(d):\r\n d = dict(d)\r\n for k in d.keys():\r\n if hasattr(d[k], '__getitem__') and hasattr(d[k], 'keys'):\r\n d[k] = _freeze_mapping(d[k])\r\n d = types.MappingProxyType(d)\r\n return d", "def filt_dict(d: dict, incl: np.ndarray,\n copy=True, ignore_diff_len=True) -> dict:\n assert np.issubdtype(incl.dtype, bool)\n\n if copy:\n if ignore_diff_len:\n return {\n k: (deepcopy(v[incl]) if (\n (isinstance(v, np.ndarray)\n or isinstance(v, torch.Tensor))\n and v.shape[0] == incl.shape[0]\n ) else deepcopy(v))\n for k, v in d.items()\n }\n else:\n return {k: deepcopy(v[incl]) for k, v in d.items()}\n else:\n if ignore_diff_len:\n return {\n k: (v[incl] if (\n (isinstance(v, np.ndarray)\n or isinstance(v, torch.Tensor))\n and v.shape[0] == incl.shape[0]\n ) else v)\n for k, v in d.items()\n }\n else:\n return {k: v[incl] for k, v in d.items()}", "def _append_dicts(x, y):\n z = x.copy() # start with x's keys and values\n z.update(y) # modifies z with y's keys and values & returns None\n return z", "def dict_none_to_new(base_dict, new_dict):\r\n for key, value in list(new_dict.items()):\r\n base_value = base_dict.get(key)\r\n if base_value is None:\r\n base_dict[key] = value", "def dict_none_to_new(base_dict, new_dict):\r\n for key, value in list(new_dict.items()):\r\n base_value = base_dict.get(key)\r\n if base_value is None:\r\n base_dict[key] = value", "def merge_dict(lhs, rhs, override=True):\n if not isinstance(lhs, dict) or not isinstance(rhs, dict):\n if override:\n return rhs\n else:\n return lhs\n\n for key, value in rhs.items():\n if key not in lhs:\n lhs[key] = rhs[key]\n else:\n lhs[key] = merge_dict(lhs[key], value, override)\n\n return lhs", "def fromkeys(iterable, value=None):\n return FrozenDict(dict.fromkeys(iterable, value))", "def deep_merge(origin: dict, renovator: Mapping) -> dict:\n\n for key, value in renovator.items():\n if isinstance(value, Mapping):\n node = origin.setdefault(key, {})\n deep_merge(node, value)\n else:\n origin[key] = value\n\n return origin", "def merge_dict(change_dict: Dict[str, Any], orig_dict: MutableMapping[str, Any]):\n for k, v in change_dict.items():\n if not orig_dict.get(k):\n orig_dict[k] = v\n else:\n if isinstance(orig_dict[k], dict) and isinstance(v, dict):\n merge_dict(v, orig_dict[k])\n else:\n orig_dict[k] = v", "def deep_copy(old_dict, parent=None, depth=None, main=None):\n\n # Is this a copy starting from the top level?\n if isinstance(old_dict, configobj.ConfigObj):\n new_dict = configobj.ConfigObj('',\n encoding=old_dict.encoding,\n default_encoding=old_dict.default_encoding,\n interpolation=old_dict.interpolation)\n else:\n # No. It's a copy of something deeper down. If no parent or main is given, then\n # adopt the parent and main of the incoming dictionary.\n new_dict = configobj.Section(parent if parent is not None else old_dict.parent,\n depth if depth is not None else old_dict.depth,\n main if main is not None else old_dict.main)\n for entry in old_dict:\n # Avoid interpolation by using the version of __getitem__ from dict\n old_value = dict.__getitem__(old_dict, entry)\n if isinstance(old_value, configobj.Section):\n new_value = deep_copy(old_value, new_dict, new_dict.depth+1, new_dict.main)\n elif isinstance(old_value, list):\n # Make a copy\n new_value = list(old_value)\n elif isinstance(old_value, tuple):\n # Make a copy\n new_value = tuple(old_value)\n else:\n # It's a scalar\n new_value = old_value\n new_dict[entry] = new_value\n return new_dict", "def update_config(original, new):\n for k, v in new.items():\n if isinstance(v, abc.Mapping):\n original[k] = update_config(original.get(k, {}), v)\n else:\n original[k] = v\n return original", "def dict_update(original, temp):\n is_success = False\n result = {}\n original_temp = original.copy()\n for key in temp.keys():\n global_key_value = original_temp.get(key)\n local_key_value = temp.get(key)\n if key not in original_temp.keys():\n result.update({key: local_key_value})\n else:\n result.update({key: local_key_value + global_key_value})\n del original_temp[key]\n result.update(original_temp)\n return result, is_success", "def update_copy(d, _new=None, **kw):\n\n d = d.copy()\n if _new:\n d.update(_new)\n d.update(**kw)\n return d", "def merge_dicts(dict_a, dict_b):\n dict_c = dict_a.copy()\n dict_c.update(dict_b)\n return dict_c", "def merge_dict(base, new, extend=True):\n\n if isinstance(new, dict):\n for key, value in new.items():\n if key not in base:\n base[key] = value\n elif extend and isinstance(value, dict):\n base[key] = merge_dict(\n base=base.get(key, {}), new=value, extend=extend\n )\n elif extend and isinstance(value, list):\n base[key].extend(value)\n elif extend and isinstance(value, (tuple, set)):\n if isinstance(base.get(key), tuple):\n base[key] += tuple(value)\n elif isinstance(base.get(key), list):\n base[key].extend(list(value))\n else:\n base[key] = new[key]\n elif isinstance(new, list):\n if extend:\n base.extend(new)\n else:\n base = new\n\n return base", "def merge_two_dicts(x, y):\r\n z = x.copy()\r\n z.update(y)\r\n return z", "def _update_default_dict(main, other):\r\n for k, v in other.items():\r\n main[k] += v", "def update_dict(d, u, omit_new=False):\n\n for k, v in u.items():\n if k not in d and omit_new:\n continue\n\n if isinstance(v, collections.abc.Mapping):\n d[k] = update_dict(d.get(k, {}), v, omit_new)\n elif isinstance(v, list):\n d[k] = [update_dict(i, j, omit_new) if None not in (i, j) else\n i if j is None else j\n for (i, j) in itertools.zip_longest(d.get(k, []), v)]\n else:\n d[k] = v\n return d", "def update(d, u):\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "def merge_fields(d, new):\n if not new:\n return\n\n for k, v in new.iteritems():\n if k not in d:\n d[k] = v\n elif isinstance(v, list) and isinstance(d[k], list):\n d[k].extend(v)\n elif isinstance(v, dict) and isinstance(d[k], dict):\n d[k].update(v)\n else:\n d[k] = v", "def update(x, **entries):\n if isinstance(x, dict):\n x.update(entries)\n else:\n x.__dict__.update(entries)\n return x", "def update(d, u):\n\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "def new_dict(key, value, n_keys=0):\n # With JIT disabled, ignore all arguments and return a Python dict.\n return dict()", "def fillDict(valDict, nowDate=datetime.now()):\n copyDict = copy.deepcopy(valDict)\n copyDict[names.year] = nowDate.year\n copyDict[names.month] = nowDate.month\n copyDict[names.day] = nowDate.day\n return copyDict", "def merge_kwargs(base_dict: T, **kwargs: Any) -> T:\n return cast(T, {**cast(Dict, base_dict), **kwargs})", "def _cleanse_dict(original):\n return dict((k, v) for k, v in original.items() if \"_pass\" not in k)", "def merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_two_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def update(d, u):\n for k, v in u.items():\n if isinstance(v, Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "def merge_dict(dict1, dict2):\n merged_dict = dict1.copy()\n merged_dict.update(dict2)\n return merged_dict", "def copy(self):\n import copy\n MultiDict.__setitem__ = dict.__setitem__\n cp = copy.deepcopy(self)\n MultiDict.__setitem__ = MultiDict._setitem_list\n return cp", "def copy(self):\n return AttrDict(dict(self).copy())", "def __copy__(self):\n cls = self.__class__\n result = cls.__new__(cls)\n to_copy = {\"_cache\", \"_buffers\", \"_parameters\", \"_modules\"}\n result.__dict__.update(\n {k: v.copy() if k in to_copy else v for k, v in self.__dict__.items()}\n )\n return result", "def compute_dict_delta(old_dict, new_dict) -> Tuple[dict, dict, dict]:\n added_keys, removed_keys, updated_keys = compute_iterable_delta(\n old_dict.keys(), new_dict.keys()\n )\n return (\n {k: new_dict[k] for k in added_keys},\n {k: old_dict[k] for k in removed_keys},\n {k: new_dict[k] for k in updated_keys},\n )", "def copy_dict(in_dict):\n\n if in_dict is None:\n return None\n\n out_dict = {}\n\n for key, val in in_dict.items():\n if isinstance(val, np.ndarray):\n out_dict[key] = val.copy()\n elif isinstance(val, dict):\n out_dict[key] = copy_dict(val)\n\n else:\n out_dict[key] = val\n\n return out_dict", "def merge_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def merge_dicts(x, y):\n z = x.copy()\n z.update(y)\n return z", "def dict_with_attrs2(*args):\n class CustomDict(object):\n __slots__ = args\n __dict__ = {}\n\n def __init__(self, *args, **kwargs):\n super(CustomDict, self).__init__()\n if args:\n self.__dict__.update(*args)\n\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __setitem__(self, key, val):\n self.__dict__[key] = val\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __getattr__(self, name):\n return self.__dict__[name]\n\n return CustomDict", "def merge_dicts(primary, secondary, deepcopy=False):\n # Objective: assemble `out` from\n # (1) `primary` <has a higher priority>\n # (2) `secondary`\n\n out = {}\n if deepcopy:\n two = _copy.deepcopy(secondary)\n else:\n two = secondary.copy()\n out.update(primary)\n\n # Remove those same keys from `secondary`:\n for key in primary.iterkeys():\n two.pop(key, None)\n\n # Then append any remaining values in `secondary` into `out`. However\n # first deepcopy those values, if we've been asked to:\n if deepcopy:\n out.update(_copy.deepcopy(two))\n else:\n out.update(two)\n return out", "def merge_two_dicts(x, y):\n\tz = x.copy()\n\tz.update(y)\n\treturn z", "def underride(d, **options):\n if d is None:\n d = {}\n\n for key, val in options.items():\n d.setdefault(key, val)\n\n return d", "def test_update(inp):\n atty = AttyDict(a={'aa': 1, 'ab': 2})\n regular = dict(a={'aa': 1, 'ab': 2})\n\n atty.update(**inp)\n assert valid_values(atty)\n\n regular.update(**inp)\n assert dict(atty) == regular", "def _update_loose (self, dict):\n self.__dict__.update(dict)", "def dict_merge(dict1, dict2):\n combined = dict(dict1)\n combined.update(dict2)\n return combined", "def dict_merge(dict1, dict2):\n combined = dict(dict1)\n combined.update(dict2)\n return combined", "def merge(a: dict, b: dict) -> dict:\n return __merge(a, b)", "def _app_cache_deepcopy(obj):\n if isinstance(obj, dict):\n return dict((_app_cache_deepcopy(key), _app_cache_deepcopy(val))\n for key, val in obj.items())\n elif isinstance(obj, list):\n return list(_app_cache_deepcopy(val) for val in obj)\n elif isinstance(obj, SortedDict):\n return deepcopy(obj)\n return obj", "def merge_two_dicts(self, x, y):\n z = x.copy()\n z.update(y)\n return z", "def add_dict(dest, src):\n for key in src.keys():\n if key in dest.keys():\n dest[key] += src[key]\n else:\n dest[key] = src[key]", "def merge(first: Dict[Any, Any], second: Dict[Any, Any]) -> Dict[Any, Any]:\n if not isinstance(second, dict):\n return second\n result = deepcopy(first)\n for key, value in second.items():\n if key in result and isinstance(result[key], dict):\n result[key] = merge(result[key], value)\n else:\n result[key] = deepcopy(value)\n return result", "def deepcopy(obj):\n if isinstance(obj, dict):\n return {deepcopy(key): deepcopy(value) for key, value in obj.items()}\n if hasattr(obj, '__iter__'):\n return type(obj)(deepcopy(item) for item in obj)\n return obj", "def copy(self) -> AF:\n if self._base == OrderedDict:\n kopied = dict(self)\n else:\n kopied = self._base.copy(self)\n return self.__class__(kopied, use_fuzzy=self.use_fuzzy, dottable=self._dottable)", "def custom_extend_dict(dict1, dict2):\n common_keys = set([*dict1]).intersection([*dict2])\n for key in common_keys:\n if dict1[key] == dict2[key]:\n continue\n if not dict1[key]:\n dict1[key] = dict2[key]\n else:\n if isinstance(dict2[key], dict) and isinstance(dict1[key], dict):\n dict2[key] = custom_extend_dict(dict1[key], dict2[key])\n elif isinstance(dict1[key], dict):\n dict2[key] = dict1[key]\n elif not isinstance(dict1[key], list):\n if dict1[key]:\n dict2[key] = dict1[key]\n else:\n dict1[key].extend(dict2[key])\n dict2.pop(key)\n\n dict1.update(dict2)\n return dict1", "def test_merge_overwrite_missing_source_key(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"D\"] = \"new\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"D\": \"new\"})\n self.assertEqual(mdict, ret)", "def override_default_dic(dic, default_dic):\n\n for k in dic:\n default_dic[k] = dic.get(k)\n\n return dic", "def copy(self):\n return self.from_dict(self.to_dict(True))", "def conditional_copy(self, other, key, altkey=None):\n if hasattr(self, key):\n possible = getattr(self, key)\n if possible:\n usekey = {True: altkey, False: key}[altkey is not None]\n if hasattr(other, usekey):\n exists = getattr(other, usekey)\n if exists:\n return\n if isinstance(possible, list):\n setattr(other, usekey, [deepcopy(i) for i in possible])\n else:\n setattr(other, usekey, deepcopy(possible))" ]
[ "0.7882248", "0.69423527", "0.6703827", "0.66988987", "0.65337425", "0.6381466", "0.6357913", "0.63407815", "0.63111323", "0.6289166", "0.6279252", "0.626923", "0.62294686", "0.61992794", "0.6063225", "0.60579437", "0.6037078", "0.6023079", "0.5997253", "0.59942675", "0.5949621", "0.5936017", "0.5930166", "0.5910017", "0.5903298", "0.59001744", "0.5896106", "0.5849969", "0.5841193", "0.58295816", "0.5827028", "0.5810055", "0.5804411", "0.5792149", "0.57713234", "0.57690245", "0.5766882", "0.5758334", "0.57269645", "0.57242495", "0.57242495", "0.5670771", "0.5669507", "0.56553274", "0.56369597", "0.56342745", "0.5632116", "0.56220734", "0.56147516", "0.56049806", "0.5585884", "0.55774945", "0.5573147", "0.5556226", "0.554021", "0.55358994", "0.55356455", "0.55329704", "0.55272824", "0.55244297", "0.55199385", "0.5512144", "0.5509652", "0.5509652", "0.5509652", "0.5509652", "0.5509652", "0.5509652", "0.5509652", "0.5509652", "0.5501862", "0.55002856", "0.54998827", "0.54929805", "0.5491479", "0.5490695", "0.5484956", "0.548268", "0.548268", "0.548268", "0.54825246", "0.5482315", "0.54730195", "0.54685163", "0.5465305", "0.54582834", "0.54573023", "0.54573023", "0.5452675", "0.5451443", "0.54314834", "0.54272735", "0.54261357", "0.5413992", "0.5412799", "0.54084134", "0.54077035", "0.54069644", "0.53716815", "0.5361657" ]
0.8017656
0
Create a new dict where one entry is removed. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of `FrozenDict.pop`.
def pop( x: Union[FrozenDict, Dict[str, Any]], key: str ) -> Tuple[Union[FrozenDict, Dict[str, Any]], Any]: if isinstance(x, FrozenDict): return x.pop(key) elif isinstance(x, dict): new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x value = new_dict.pop(key) return new_dict, value raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:\n value = self[key]\n new_dict = dict(self._dict)\n new_dict.pop(key)\n new_self = type(self)(new_dict)\n return new_self, value", "def dict_pop(d, key):\n return d.pop(key)", "def remove_element( self, dictionary, key):\n\n _dict = dictionary.copy()\n _dict.pop(key, None)\n return _dict", "def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less performant because jax.tree_util.tree_map\n # uses an optimized C implementation.\n return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore\n elif isinstance(x, dict):\n ys = {}\n for key, value in x.items():\n ys[key] = unfreeze(value)\n return ys\n else:\n return x", "def pop(self, key, *args):\n return super(ReadOnlyDict, self).pop(key, *args) # pragma: no cover", "def freeze(xs: Mapping[Any, Any]) -> FrozenDict[Any, Any]:\n return FrozenDict(xs)", "def clear_dict(d: dict) -> dict:\n # TODO delete if not used\n return {k: v for k, v in d.items() if v is not None}", "def popitem(self):\n return super(ReadOnlyDict, self).popitem()", "def task_2_remove_dict_fields(data: DT, redundant_keys: List[str]) -> DT:\n dict2 = copy.deepcopy(data)\n for item in dict2:\n for key in redundant_keys:\n item.pop(key)\n return dict2", "def remove_keys(_dict, keys):\n if not _dict:\n return None\n new = dict(_dict)\n for key in keys:\n new.pop(key, None)\n return new", "def copy(\n self, add_or_replace: Mapping[K, V] = MappingProxyType({})\n ) -> 'FrozenDict[K, V]':\n return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]", "def pop(self, key, d=None):\n if self._can_del(key):\n r = dict.pop(self, key, d)\n self._post_del(key)\n return r\n else:\n raise Exception('Cannot `pop`, deletion of key \"{}\" failed.'.format(key))", "def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(\n {}\n ),\n) -> Union[FrozenDict, Dict[str, Any]]:\n\n if isinstance(x, FrozenDict):\n return x.copy(add_or_replace)\n elif isinstance(x, dict):\n new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x\n new_dict.update(add_or_replace)\n return new_dict\n raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')", "def _dpop(dictionary, key, default=None):\n try:\n ret = dictionary[key]\n del dictionary[key]\n except KeyError:\n ret = default\n\n return ret", "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursively copy dictionary to avoid ref sharing\n return {key: _prepare_freeze(val) for key, val in xs.items()}", "def __delitem__(self, key):\n if not self._set:\n raise TypeError('This dict is read-only')\n return self._set(key, None)", "def cut(d, k):\n\tif isinstance(d, dict):\n\t\tn = d.copy()\n\t\tif k in n:\n\t\t\tdel n[k]\n\t\treturn n\n\treturn [v for v in d if v != k]", "def detachDict(dict, key1, key2):\n\n for key in dict.keys():\n if key == key1 or key == key2:\n del dict[key]\n else:\n for subkey in dict[key].keys():\n if subkey == key1 or subkey == key2:\n del dict[key][subkey]", "def CleanUpDict(dct):\n SanityCheck.ValidateTypes(((dct, dict),))\n\n new_dct = {}\n for key in dct:\n if dct[key]:\n new_dct[key] = dct[key]\n\n return new_dct", "def _remove_keys(results: dict, remove: list) -> dict:\n removed = {}\n for key, val in results.items():\n if key not in remove:\n removed[key] = val\n return removed", "def flat_dict(d):\n nd = {}\n for (key, value) in d.items():\n nd[key] = value.pop()\n\n return nd", "def discard(m: MutableMapping[KT, VT], key: KT) -> None:\n try:\n del m[key]\n except KeyError:\n pass", "def remove_keys(d, keys):\n pp = deepcopy(d)\n if isinstance(keys, (list, tuple)):\n for k in keys:\n pp.pop(k, None)\n else:\n pp.pop(keys, None)\n return pp", "def _filter_dict(src_dict, key_set):\n for k in set(src_dict.keys()) - key_set:\n src_dict.pop(k)", "def remove_from_multidict(d: MultiDict, key: str, item: typing.Any):\n # works by popping all, removing, then re-adding into\n i = d.popall(key, [])\n if item in i:\n i.remove(item)\n\n for n in i:\n d.add(key, n)\n\n return d", "def removeDic(dic, key):\n pass", "def unfreeze(cls, frozen):\n timeframe = frozen.get('timeframe')\n finished_threshold = frozen.get('finished_threshold')\n if finished_threshold is not None:\n finished_threshold = timedelta(seconds=finished_threshold)\n # if there's no data or cache config an error has occurred\n data_config = frozen['data']\n cache_config = frozen['cache']\n return cls(data_config,\n cache_config,\n timeframe,\n finished_threshold)", "def clean_dict(to_clean):\n for k in list(to_clean.keys()):\n if not to_clean.get(k):\n to_clean.pop(k)", "def remove(enforcer_dict, key):\n del enforcer_dict['f']\n assert other.keystring == 'abcde'\n assert other.valuesum == 15\n\n enforcer_dict['a'] = 2\n assert other.keystring == 'bcdea'\n assert other.valuesum == 16\n\n enforcer_dict.clear()\n assert other.keystring == ''\n assert other.valuesum == 0", "def delete_dict_entries(dictionary, entries):\n\n for key in entries:\n if key in dictionary:\n del dictionary[key]\n\n return dictionary\n # parameters = {key: parameters[key] for key in parameters if key not in del_parameter}", "def popall(self, k, default=_MISSING):\n super_self = super(OrderedMultiDict, self)\n if super_self.__contains__(k):\n self._remove_all(k)\n if default is _MISSING:\n return super_self.pop(k)\n return super_self.pop(k, default)", "def remove_keys_from_dict(dictionary, keys):\n\n # Copy dictionary\n dictionary_updated = dictionary.copy()\n try:\n [dictionary_updated.pop(key) for key in keys]\n except:\n print(\"Error: No ratio and sampling strategy parameters\")\n return dictionary_updated", "def _remove_key(self):\n heaps = self.priorities\n keys = heaps.keys()\n keys = min(keys)\n heaps.pop(keys)", "def popitem(self):\n if self.used == 0:\n raise KeyError(\"empty dictionary\")\n entry0 = self.table[0]\n entry = entry0\n i = 0\n if entry0.value is None:\n # The first entry in the table's hash is abused to hold the index to\n # the next place to look for a value to pop.\n i = entry0.hash\n if i >= self.size or i < i:\n i = 1\n entry = self.table[i]\n while entry.value is None:\n i += 1\n if i >= self.size:\n i = 1\n entry = self.table[i]\n res = entry.key, entry.value\n self._del(entry)\n # Set the next place to start.\n entry0.hash = i + 1\n return res", "def test_remove_key_not_dict(self):\n\n expected = None\n actual = Dict([\"Hello\", \"World!\"]).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)", "def _map_pop(self, key):\n if not isinstance(key, self.keytype):\n raise KeyError('type of `key` should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))\n if key not in self:\n raise KeyError('key not found')\n ret = self[key]\n del self[key]\n return ret", "def fromkeys(iterable, value=None):\n return FrozenDict(dict.fromkeys(iterable, value))", "def delete_key_HELPER(data_dict, key_list, key_to_delete):\n data_dict = get_key_from_dict_HELPER(data_dict, key_list[:-1])\n data_dict.pop(key_to_delete)\n return data_dict", "def truncate_dict(dictionary: Dict, n: int) -> Dict:\n return {k: v for (k, v) in list(dictionary.items())[:n]}", "def remove_value(self, thing_key, dkey):\n if thing_key in self.things_dict:\n dic = self.things_dict[thing_key]\n if type(dic) != type({}):\n return\n dic.pop(dkey, None)", "def test_remove_key(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\", \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.to_test).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)\n\n # Test of the case that a dict is not given\n expected = None\n actual = Dict([\"Hello\", \"World!\"]).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)", "def pop(self, key, default=None):\n with self.lock:\n try:\n item = dict.__getitem__(self, key)\n del self[key]\n return item[0]\n except KeyError:\n return default", "def shallow(dict, exclude=None):\n\tnew_dict = {}\n\tfor key in dict:\n\t\tif not exclude or key not in exclude:\n\t\t\tnew_dict[key] = dict[key]\n\treturn new_dict", "def compact_dict(source_dict):\n return {k: v for k, v in source_dict.items() if v is not None}", "def dict_subtract(a, b):\n return {k: a[k] for k in set(a) - set(b)}", "def pop(self, key):\n return self.__data_dict.pop(key)", "def filter_dict(dictionary, pred):\n return dict((k, v) for k, v in dictionary.items() if pred(k, v))", "def clean_dict(dictionary):\n return {k: v for k, v in dictionary.items() if v}", "def remove_outlier(dict_object, keys):\n for key in keys:\n dict_object.pop(key, 0)", "def _cleanse_dict(original):\n return dict((k, v) for k, v in original.items() if \"_pass\" not in k)", "def pop(self):\r\n it = iter(self)\r\n try:\r\n value = next(it)\r\n except StopIteration:\r\n raise KeyError\r\n self.discard(value)\r\n return value", "def pop(self, k, d=None):\n try:\n answer = self[k]\n del self[k]\n return answer\n except KeyError:\n return d", "def prune(bushy: dict) -> dict:\n pruned = dict()\n for key in bushy:\n if bushy[key]:\n pruned[key] = bushy[key]\n return pruned", "def without_keys(d, keys):\n return {x: d[x] for x in d if x not in keys}", "def remove_keys(data: dict, keys: list[str]) -> None:\n for k in keys:\n _ = data.pop(k, None)", "def remove_outlier(dict_object, keys):\r\n for key in keys:\r\n dict_object.pop(key, 0)", "def popitem(self):\n return self.__data_dict.popitem()", "def discard(self, key):\r\n if key in self.map: \r\n key, prev, next = self.map.pop(key)\r\n prev[NEXT] = next\r\n next[PREV] = prev", "def copy(self):\n return pdict(dict.copy(self))", "def popitem(self):\n try:\n return self._maps[0].popitem()\n except KeyError:\n raise KeyError('No keys found in the last mapping.')", "def _map_popitem(self):\n if len(self) == 0:\n raise KeyError('key not found')\n key = self.keys()[0]\n return (key, self.pop(key))", "def subtract(d1, d2):\n res = {}\n for key in d1:\n if key not in d2:\n res[key] = None\n return res", "def remove(self, data, key, value):\n if key in data:\n if not value: # value is empty or false, just remove it\n data.pop(key, None) # delete\n elif isinstance(value, type(data[key])): # if same type\n if isinstance(value, list): # if it's a list, like modules\n data[key] = list(set(data[key]) - set(value))\n elif isinstance(\n value, dict\n ): # if it's a dict, difference of the keys and rebuild dict\n for k, v in value.items():\n data[key][k] = self.remove(data[key], k, v)\n else:\n raise TypeError(\n f\"Value of {key} is {type(value)} and\"\n f\" the imported {key} is {type(data[key])}. Type mismatch.\"\n )\n return data[key]", "def pop(self, key, default=NOT_GIVEN):\n if key in self:\n ret = self[key]\n del self[key]\n return ret\n elif default is NOT_GIVEN:\n raise KeyError(key)\n else:\n return default", "def revive(self):\n field_name = self.get_delete_flag_field_name()\n return self.update(**{field_name: None})", "def delete(self, key):\n self.map.pop(key, None)", "def subtract(d1, d2):\n res = {}\n \n for key in d1:\n if key not in d2:\n res[key]=None\n\n return res", "def removeDictItem(self, key):\n if key in self._dentsvertsdata:\n self._dentsvertsdata[key].free()\n del self._dentsvertsdata[key]", "def sweep_record(r):\n return dict_sweep(r, vals=[None])", "def subtract_by_key(dict_a, dict_b):\n difference_dict = {}\n for key in dict_a:\n if key not in dict_b:\n difference_dict[key] = dict_a[key]\n\n return difference_dict", "def copy(self):\r\n new = WeakKeyIdentityDict()\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n new[o] = value\r\n return new", "def _remove_empty_values(data: T) -> T:\n if not isinstance(data, dict):\n return data\n return {k: _remove_empty_values(v) for k, v in data.items() if v is not None}", "def clean_dict_values(d: dict, rogue_values: list) -> dict:\n return {key: value for key, value in d.items() if not value in rogue_values}", "def test_splittable_popitem(self):\n a, b = self.make_shared_key_dict(2)\n\n orig_size = sys.getsizeof(a)\n\n item = a.popitem() # split table is combined\n self.assertEqual(item, ('z', 3))\n with self.assertRaises(KeyError):\n del a['z']\n\n self.assertGreater(sys.getsizeof(a), orig_size)\n self.assertEqual(list(a), ['x', 'y'])\n self.assertEqual(list(b), ['x', 'y', 'z'])", "def _del_item(dic: dict, keys: list):\n\tdic = _get_item(dic, keys[:-1])\n\tdel dic[keys[-1]]", "def dilute_dict(d, num_to_keep=None):\n if num_to_keep is not None:\n if num_to_keep < 0:\n num_to_keep = len(d) + num_to_keep\n keys_to_keep = random.sample(d, num_to_keep)\n d = {k: v for k, v in d.items() if k in keys_to_keep}\n return d", "def exclude_keys(dictionary: Mapping, keys: Sequence[Hashable]) -> dict:\n return {k: v for k, v in dictionary.items() if k not in keys}", "def remove_fc(state_dict):\n return {key: value for key, value in state_dict.items() if not key.startswith('fc.')}", "def popitem(self):\n with self.__plock:\n if len(self._keys) == 0:\n raise KeyError('Empty')\n\n key = self._keys[-1]\n val = self[key]\n del self[key]\n\n return (key, val)", "def remove_empty(d):\n for key in d.keys():\n if d[key] is None:\n del d[key]\n return d", "def _cleanse_dict(original):\n return {k: v for k, v in original.items() if \"_pass\" not in k}", "def customization(record):\n for popkey in pop_list:\n if popkey in record:\n record.pop(popkey)\n return record", "def remove(self, name):\n\n w = self._wdict[name]\n del(self._wdict[name])\n \n \n return w", "def clean_dict(d):\n if not isinstance(d, dict):\n return d\n return dict((clean_dict(k), v) for k, v in d.items() if k is not 'dates')", "def remove(self, key):\n ndx = self._findPosition(key)\n assert ndx, 'Invalid map key'\n self._entryList.pop(key)", "def __delitem__(self, key):\n super(ReadOnlyDict, self).__delitem__(key)", "def revert_dictionary(dictionary):\n return {v: k for k, v in dictionary.items()}", "def remove_one(self):\n item = self.expiry.pop(0)\n if item.updated:\n self.new_expiry.append(item)\n return\n del self.index[item.target]\n return", "def copymod(dct, without=None, **kwargs):\r\n if without is None:\r\n without = []\r\n rval = copy(dct)\r\n for a in without:\r\n if a in rval:\r\n del rval[a]\r\n for kw, val in kwargs.items():\r\n rval[kw] = val\r\n return rval", "def subtract(dict_a, dict_b, strict=False):\n if not strict:\n return subtract_by_key(dict_a, dict_b)\n\n difference_dict = {}\n for key in dict_a:\n if key not in dict_b or dict_b[key] != dict_a[key]:\n difference_dict[key] = dict_a[key]\n\n return difference_dict", "def clean_up_dict(clean_dict, ignore_list):\n for i in ignore_list:\n clean_dict.pop(i, None)\n return clean_dict", "def pop_and_restore(hsh, key, default=None):\n if key in hsh:\n value = hsh.pop(key)\n was_there = True\n else:\n value = default\n was_there = False\n\n yield value\n\n if was_there:\n hsh[key] = value\n else:\n hsh.pop(key, None)", "def _del(self, entry):\n entry.key = dummy\n entry.value = None\n self.used -= 1", "def remove(self, key: str) -> None:\n thekey = self._gethash(key)\n if self.HashMap[thekey] is not None:\n if len(self.HashMap[thekey]) == 2:\n self.HashMap[\n self._gethash(key)\n ] = None # Keep the location but set the value to None\n else:\n hashkey = self._gethash(key)\n idx = self._find_if_hashclash(key, hashkey, \"i\")\n self.HashMap[hashkey].pop(idx)\n self.HashMap[hashkey].pop(idx)\n self.length -= 1", "def relinquish(self):\n return self.pop(self.current_key, None)", "def omit(self, *keys):\n return _({k: self[k] for k in self._ if k not in keys})", "def pop(name, key, default=None):\n\n return get_component(CachingPackage.COMPONENT_NAME).pop(name, key, default=default)", "def except_keys(dic, *keys):\n ret = dic.copy()\n for key in keys:\n try:\n del ret[key]\n except KeyError:\n pass\n return ret", "def _freeze_mapping(d):\r\n d = dict(d)\r\n for k in d.keys():\r\n if hasattr(d[k], '__getitem__') and hasattr(d[k], 'keys'):\r\n d[k] = _freeze_mapping(d[k])\r\n d = types.MappingProxyType(d)\r\n return d", "def pop(self, key, *args): # pylint: disable=arguments-differ\n try:\n return self._maps[0].pop(key, *args)\n except KeyError:\n raise KeyError(\n 'Key not found in the last mapping: {!r}'.format(key))" ]
[ "0.70482343", "0.67293966", "0.6490357", "0.6465873", "0.61706996", "0.6144822", "0.6001672", "0.59998095", "0.5967065", "0.59639794", "0.5955744", "0.59308225", "0.588004", "0.58719283", "0.58055025", "0.57446265", "0.57119524", "0.56699175", "0.5650597", "0.56066054", "0.55868727", "0.55837643", "0.5558249", "0.5529116", "0.55180746", "0.55026454", "0.5492859", "0.5492513", "0.54797816", "0.5472006", "0.54537475", "0.5433725", "0.5428994", "0.54250485", "0.54137313", "0.5399789", "0.53955823", "0.53863", "0.5382324", "0.53768104", "0.5354969", "0.53494215", "0.5310567", "0.53081393", "0.5264375", "0.52517575", "0.52351135", "0.5217379", "0.52146554", "0.52061075", "0.52058554", "0.5195847", "0.5183404", "0.517487", "0.517225", "0.51552594", "0.514537", "0.51344085", "0.51342314", "0.5128078", "0.51234335", "0.51088786", "0.5104472", "0.5089377", "0.50725317", "0.50619173", "0.50584346", "0.5055684", "0.50516754", "0.5051189", "0.5044166", "0.5042008", "0.50402725", "0.50298375", "0.5021259", "0.5020662", "0.50203794", "0.5019133", "0.5015016", "0.50103617", "0.49998665", "0.49973816", "0.49930978", "0.49919164", "0.4987867", "0.49805462", "0.49780273", "0.49742666", "0.49724424", "0.49713314", "0.49441153", "0.49402842", "0.49360198", "0.49278352", "0.49274012", "0.49175242", "0.49157792", "0.49140424", "0.49111754", "0.4907053" ]
0.7112404
0
Returns an indented representation of the nested dictionary. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of `FrozenDict.pretty_repr`. If x is any other dtype, this function will return `repr(x)`.
def pretty_repr(x: Any, num_spaces: int = 4) -> str: if isinstance(x, FrozenDict): return x.pretty_repr() else: def pretty_dict(x): if not isinstance(x, dict): return repr(x) rep = '' for key, val in x.items(): rep += f'{key}: {pretty_dict(val)},\n' if rep: return '{\n' + _indent(rep, num_spaces) + '}' else: return '{}' return pretty_dict(x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pretty_repr(self, num_spaces=4):\n\n def pretty_dict(x):\n if not isinstance(x, dict):\n return repr(x)\n rep = ''\n for key, val in x.items():\n rep += f'{key}: {pretty_dict(val)},\\n'\n if rep:\n return '{\\n' + _indent(rep, num_spaces) + '}'\n else:\n return '{}'\n\n return f'FrozenDict({pretty_dict(self._dict)})'", "def pretty(d, indent=0):\n\tret_str = ''\n\tfor key, value in d.items():\n\n\t\tif isinstance(value, collections.Mapping):\n\t\t\tret_str = ret_str + '\\n' + '\\t' * indent + str(key) + '\\n'\n\t\t\tret_str = ret_str + pretty(value, indent + 1)\n\t\telse:\n\t\t\tret_str = ret_str + '\\n' + '\\t' * indent + str(key) + '\\t' * (indent + 1) + ' => ' + str(value) + '\\n'\n\n\treturn ret_str", "def dict_pretty_print(D: dict, indent_lvl=0):\n print(\"Using 3 decimal places.\")\n base_indent = indent_lvl * \" \"\n indent = (indent_lvl+2)*\" \"\n print(f\"{base_indent}\" + \"{\")\n for key, value in D.items():\n print(f\"{indent}{key}: \", end=\"\")\n if type(value) is dict:\n print(\"\")\n dict_pretty_print(value, indent_lvl + 2)\n else:\n print(f\"{value:.3f}\")\n print(f\"{base_indent}\" + \"}\")", "def pretty_print(d, indent=0):\n for key, value in d.items():\n print('\\t' * indent + str(key) + \":\")\n if isinstance(value, dict):\n pretty_print(value, indent + 1)\n else:\n print('\\t' * (indent + 1) + str(value))", "def pretty_dict(d):\n return '{%s}' % ', '.join('%r: %r' % (k, v)\n for k, v in sorted(d.items(), key=repr))", "def prettyPrintDictHelper_ (d, stream, indent, pretty_print=True, indent_additive=4) :\r\n \r\n # Base case, empty table\r\n entries = len(d)\r\n if entries==0 :\r\n stream.write(\"{ }\")\r\n return\r\n\r\n # Recursive case\r\n stream.write(\"{\")\r\n if pretty_print: stream.write('\\n')\r\n\r\n # Iterate through, printing each element\r\n ii=0\r\n keys = d.keys()\r\n keys.sort()\r\n for key in keys : # Sorted order on keys\r\n if pretty_print : indentOut_(stream, indent+indent_additive)\r\n stream.write(repr(key)+\":\")\r\n value = d[key]\r\n specialStream_(value, stream, indent, pretty_print, indent_additive)\r\n if entries>1 and ii!=entries-1 :\r\n stream.write(\",\")\r\n if pretty_print: stream.write('\\n')\r\n ii += 1\r\n \r\n if pretty_print : indentOut_(stream, indent) \r\n stream.write(\"}\")", "def print_dict_tree(d, max_depth=None, indent=0):\n def _recurse(d, indent=0):\n for key, value in d.items():\n print(' ' * indent + str(key), end=' ')\n if isinstance(value, dict) and indent != max_depth:\n print(); _recurse(value, indent + 1)\n else:\n print(\":\", str(type(d[key])).split(\"'\")[1], \"-\", str(len(str(d[key]))))\n \n return _recurse(d)", "def _format_dict(self, dict_, indent=0):\n prefix = indent*\" \"*4\n output = \"{\\n\"\n for key, val in sorted(dict_.items()):\n if isinstance(val, dict):\n rval = self._format_dict(val, indent+1)\n else:\n rval = repr(val)\n output += prefix + \" \"*4 + repr(key) + \" : \" + rval + \",\\n\"\n output += prefix + \"}\"\n return output", "def prettyPrintODictHelper_ (d, stream, indent, pretty_print=True, indent_additive=4) :\r\n global OTabRepr\r\n # Base case, empty table\r\n entries = len(d)\r\n if entries==0 :\r\n stream.write(OTabEmpty[OTabRepr]) # \"o{ }\"\r\n return\r\n\r\n # Recursive case\r\n stream.write(OTabLeft[OTabRepr]) # \"o{\"\r\n if pretty_print: stream.write('\\n')\r\n\r\n # Iterate through, printing each element\r\n ii=0\r\n keys = d.keys()\r\n for key in keys : # Insertion order on keys\r\n if pretty_print : indentOut_(stream, indent+indent_additive)\r\n if OTabRepr == 0 :\r\n stream.write(\"(\"+repr(key)+\", \")\r\n else :\r\n stream.write(repr(key)+\":\")\r\n value = d[key]\r\n specialStream_(value, stream, indent, pretty_print, indent_additive)\r\n if OTabRepr == 0 :\r\n stream.write(\")\")\r\n \r\n if entries>1 and ii!=entries-1 :\r\n stream.write(\",\")\r\n if pretty_print: stream.write('\\n')\r\n ii += 1\r\n \r\n if pretty_print : indentOut_(stream, indent) \r\n stream.write(OTabRight[OTabRepr]) # \"}\"\r", "def nice_dict_format(d):\n return ''.join([key+\": \"+str(d[key])+\"\\n\" for key in list(d.keys())])", "def pretty(d, indent=0):\n sp = \" \"\n t = \"\"\n \n if isinstance(d, dict):\n l = len(d)\n c = 0\n t += \"<type 'dict'>:{\\n\"\n for key, value in d.items():\n t += sp * (indent + 1) + \"'\" + str(key) + \"':\" + pretty(value, indent + 1)\n \n if c + 1 < l:\n t += \",\"\n \n t += \"\\n\"\n c += 1\n t += sp * indent + \"}\"\n elif isinstance(d, list):\n l = len(d)\n c = 0\n t += \"<type 'list'>:[\\n\"\n for value in d:\n t += sp * (indent + 1) + str(c) + \":\" + pretty(value, indent + 1)\n \n if c + 1 < l:\n t += \",\"\n \n t += \"\\n\"\n c += 1\n t += sp * indent + \"]\"\n elif isinstance(d, tuple):\n l = len(d)\n c = 0\n t += \"<type 'tuple'>:(\\n\"\n for value in d:\n t += sp * (indent + 1) + str(c) + \":\" + pretty(value, indent + 1)\n \n if c + 1 < l:\n t += \",\"\n \n t += \"\\n\"\n c += 1\n t += sp * indent + \")\"\n else:\n t += str(type(d)) + \":'\" + str(d) + \"'\"\n \n return t", "def format_dict(dictionary, depth=0):\n tab = \" \" * 4\n string = \"{\\n\"\n for key, val in dictionary.items():\n string += depth * tab \n string += \"{}: \".format(key)\n if type(val) is dict:\n string += format_dict(val, depth + 1)\n \n else:\n if type(val) is str:\n fmt = \"'{}'\\n\"\n else:\n fmt = \"{}\\n\"\n string += fmt.format(val)\n string += (depth) * tab + '}\\n'\n return string", "def print_dictionary(\n d, nested_level=0, output=sys.stdout, spacing=' ', separator=None,\n):\n if separator:\n print(separator, file=output)\n\n if type(d) == dict:\n print('%s{' % (nested_level * spacing), file=output)\n for k, v in list(d.items()):\n if hasattr(v, '__iter__'):\n print('%s%s:' % ((nested_level + 1) * spacing, k), file=output)\n print_dictionary(v, nested_level + 1, output)\n else:\n print(\n '%s%s: %s' % ((nested_level + 1) * spacing, k, v),\n file=output\n )\n print('%s}' % (nested_level * spacing), file=output)\n elif type(d) == list:\n print('%s[' % (nested_level * spacing), file=output)\n for v in d:\n if hasattr(v, '__iter__'):\n print_dictionary(v, nested_level + 1, output)\n else:\n print('%s%s' % ((nested_level + 1) * spacing, v), file=output)\n print('%s]' % (nested_level * spacing), file=output)\n else:\n print('%s%s' % (nested_level * spacing, d), file=output)", "def _pretty_print(value, indent=''):\n keys = list(value.keys())\n keys.sort()\n for k in keys:\n v = value[k]\n if type(v) == dict:\n print(\"%s%s:\"%(indent, k))\n _pretty_print(v, indent+' ')\n elif type(v) == str:\n if '\\n' in v:\n print(indent+'%s: |'%k)\n for l in v.split('\\n'):\n print(indent+' '+l)\n else:\n print(\"%s%s: %s\"%(indent, k, v))\n else:\n dump = yaml.dump(v)\n # #1617\n # newer versions of python-yaml append the '...' document end\n # syntax. as YAML functions fine w/o it, and as it is\n # confusing to users who are just getting a single scalar, we\n # strip it\n if dump.endswith('\\n...\\n'):\n dump = dump[:-4]\n \n sys.stdout.write(\"%s%s: %s\"%(indent, k, dump))", "def print_recursive(value, indent=0):\n tabs = lambda count: '' + str(' ' * (indent + count))\n if isinstance(value, dict):\n to_print = '{}{}'.format(tabs(1), '{')\n for key, item in value.iteritems():\n to_print += '\\n{}{}:\\n{}'.format(tabs(2), key, print_recursive(item, indent + 2))\n return to_print + '{}{}'.format('\\n' + tabs(1) if len(value) > 0 else ' ', '}')\n if isinstance(value, list):\n to_print = '{}['.format(tabs(1))\n for item in value:\n to_print += '\\n' + print_recursive(item, indent + 1)\n return to_print + '{}{}'.format('\\n' + tabs(1) if len(value) > 0 else ' ', ']')\n if isinstance(value, str) or isinstance(value, unicode):\n return tabs(1) + '\\'' + value + '\\''\n if len(str(value)) > 0:\n return tabs(1) + str(value) + ''\n return ''", "def prettyPrint(self):\n import pprint\n pp = pprint.PrettyPrinter(indent=4)\n x=pp.pformat(self.__dict__)\n print x\n return", "def pretty_print(dictionary: dict):\n return json.dumps(dictionary, indent=4)", "def pretty_print(name, input, val_width=40, key_width=0):\n\n # root\n pretty_str = name + ': {\\n'\n\n # determine key width\n for key in input.keys(): key_width = max(key_width, len(str(key)) + 4)\n\n # cycle keys\n for key in input.keys():\n\n val = input[key]\n\n # round values to 3 decimals..\n if type(val) == np.ndarray: val = np.round(val, 3).tolist()\n\n # difficult formatting\n val_str = str(val)\n if len(val_str) > val_width:\n val_str = pprint.pformat(val, width=val_width, compact=True)\n val_str = val_str.replace('\\n', '\\n{tab}')\n tab = ('{0:' + str(4 + key_width) + '}').format('')\n val_str = val_str.replace('{tab}', tab)\n\n # more difficult formatting\n format_str = '{0:' + str(4) + '}{1:' + str(key_width) + '} {2:' + str(val_width) + '}\\n'\n pretty_str += format_str.format('', key + ':', val_str)\n\n # close root object\n pretty_str += '}'\n\n return pretty_str", "def pretty_print(data, indent=4):\n if type(data) == dict:\n print(json.dumps(data, indent=indent, sort_keys=True))\n else:\n print(data)", "def recursive_dict_key_print(dict_in, spacer=\"\"):\n if type(dict_in) is not dict:\n return\n next_spacer = spacer + \" \"\n for key, value in dict_in.items():\n try:\n print(spacer, f\"{key} : {value.shape}\")\n except(AttributeError):\n print(spacer, key)\n recursive_dict_key_print(value, next_spacer)", "def dumps(data):\n def _dump(d, indent=0):\n for key, value in six.iteritems(d):\n if isinstance(value, dict):\n yield '%s%s {\\n' % (' ' * indent, _escape(key))\n for subs in _dump(value, indent + 2):\n yield subs\n yield '%s}\\n' % (' ' * indent)\n elif isinstance(value, list):\n yield '%s%s = {\\n' % (' ' * indent, _escape(key))\n for subvalue in value:\n if type(subvalue) == dict:\n yield '%s{\\n' % (' ' * (indent + 2))\n for subs in _dump(subvalue, indent + 4):\n yield subs\n yield '%s}\\n' % (' ' * (indent + 2))\n else:\n yield '%s%s\\n' % (' ' * (indent + 2),\n _escape(subvalue))\n\n yield '%s}\\n' % (' ' * indent)\n elif type(value) == bool:\n yield '%s%s = %s\\n' % (' ' * indent, _escape(key),\n _escape(str(value).lower()))\n else:\n yield '%s%s = %s\\n' % (' ' * indent, _escape(key),\n _escape(str(value)))\n return ''.join(list(_dump(data)))", "def _pretty_json_dump(d):\n return json.dumps(d, sort_keys=True, indent=3)", "def print_data(d, indent=0):\n prefix = indent * ' '\n for k in sorted(d):\n v = d[k]\n k = prefix + str(k)\n if isinstance(v, dict):\n print(k)\n print_data(v, indent + 1)\n else:\n if k.endswith('cent'):\n v = ' '.join(\n str(tuple(int(j) if j.is_integer() else j for j in i))\n for i in v\n )\n elif isinstance(v, np.ndarray):\n v = str(v).replace('\\n', '')\n print(k, '=', v)", "def print_json_tree(d, indent=0):\n for key, value in d.items():\n print(' ' * indent + str(key), end=' ')\n if isinstance(value, dict):\n print(); print_json_tree(value, indent+1)\n else:\n print(\":\", str(type(d[key])).split(\"'\")[1], \"-\", str(len(str(d[key]))))", "def pretty_repr(src, indent=0, no_indent_start=False, max_indent=20):\n if _simple(src) or indent >= max_indent:\n indent = 0 if no_indent_start else indent\n if isinstance(src, (six.binary_type, six.text_type)):\n if isinstance(src, six.binary_type):\n string = src.decode(\n encoding='utf-8',\n errors='backslashreplace'\n )\n prefix = 'b'\n else:\n string = src\n prefix = 'u'\n return _formatters['text'](\n spc='',\n indent=indent,\n prefix=prefix,\n string=string\n )\n return _formatters['simple'](\n spc='',\n indent=indent,\n val=src\n )\n if isinstance(src, dict):\n prefix, suffix = '{', '}'\n result = ''\n max_len = len(max([repr(key) for key in src])) if src else 0\n for key, val in src.items():\n result += _formatters['dict'](\n spc='',\n indent=indent + 4,\n size=max_len,\n key=key,\n val=pretty_repr(val, indent + 8, no_indent_start=True)\n )\n return (\n '\\n{start:>{indent}}'.format(\n start=prefix,\n indent=indent + 1\n ) +\n result +\n '\\n{end:>{indent}}'.format(end=suffix, indent=indent + 1)\n )\n if isinstance(src, list):\n prefix, suffix = '[', ']'\n elif isinstance(src, tuple):\n prefix, suffix = '(', ')'\n else:\n prefix, suffix = '{', '}'\n result = ''\n for elem in src:\n if _simple(elem):\n result += '\\n'\n result += pretty_repr(elem, indent + 4) + ','\n return (\n '\\n{start:>{indent}}'.format(\n start=prefix,\n indent=indent + 1) +\n result +\n '\\n{end:>{indent}}'.format(end=suffix, indent=indent + 1)\n )", "def __repr__(self, indent=2):\n return pprint.pformat(self.to_dict(), indent=indent)", "def json_pretty_print(dictionary):\n return json.dumps(dictionary, sort_keys=True,\n indent=2, separators=(',', ': '))", "def format_dictionary(dct, indent=4):\n return json.dumps(dct, indent=indent, sort_keys=True)", "def tree_view(dictionary, level=0, sep=\"| \"):\n return \"\".join([\"{0}{1}\\n{2}\".format(sep * level, k,\n tree_view(v, level + 1, sep=sep) if isinstance(v, dict)\n else \"\") for k, v in dictionary.items()])", "def _pretty_print(self, json_dict):\n if self.prettyprint:\n return \"\\n\" + json.dumps(json_dict, indent=self.indent)\n return json.dumps(json_dict)", "def ppdict(d):\n print '{'\n keys=d.keys()\n keys.sort()\n for k in keys:\n spacing=\" \" * (16-(len(repr(k))+1))\n print \"%s:%s%s,\" % (repr(k),spacing,repr(d[k]))\n print '}'", "def nice_string_output(d, extra_spacing=5, decimals=3):\n\n names = d.keys()\n max_names = len_of_longest_string(names)\n\n values = values_to_string(d.values(), decimals=decimals)\n max_values = len_of_longest_string(values)\n\n string = \"\"\n for name, value in zip(names, values):\n spacing = extra_spacing + max_values + max_names - len(name) - 1\n string += \"{name:s} {value:>{spacing}} \\n\".format(name=name, value=value, spacing=spacing)\n return string[:-2]", "def pretty_json_repr(data):\n return json.dumps(data, sort_keys=True, indent=2)", "def __str__(self):\n try:\n delim = ', ' if len(self) < 8 else ',\\n '\n s = delim.join('%s: %s' % (repr(k), repr(self[k])) for k in self.peys())\n return '{' + s + '}'\n except Exception:\n return dict.__repr__(self)", "def prettyprint(\n D,\n indent=0,\n width=0,\n maxdepth=None,\n step=4,\n only_keys=None,\n output=sys.stdout,\n _key_prefix='',\n _exclude=None):\n # be sure we do not try to recursively dump `D`\n if _exclude is None:\n _exclude = set()\n _exclude.add(id(D))\n for k, v in sorted(D.iteritems()):\n leading_spaces = indent * ' '\n full_name = \"%s%s\" % (_key_prefix, k)\n if only_keys is not None:\n try:\n # is `only_keys` a filter function?\n if not only_keys(str(full_name)):\n continue\n except TypeError:\n # no, then it must be a list of key names, check for\n # keys having the same number of dots as in the prefix\n level = _key_prefix.count('.')\n found = False\n for name in only_keys:\n # take only the initial segment, up to a \"level\" dots\n dots = min(name.count('.'), level) + 1\n prefix = str.join('.', name.split('.')[:dots])\n if str(full_name) == prefix:\n found = True\n break\n if not found:\n continue\n # ignore excluded items\n if id(v) in _exclude:\n continue\n # To make a 'key' valid in YAML it must not start with one of the following chars\n sk = str(k)\n sk = sk if sk[0] not in u'\\0 \\t\\r\\n\\x85\\u2028\\u2029-?:,[]{}#&*!|>\\'\\\"%@`' else \"'%s'\" % sk\n first = str.join('', [leading_spaces, sk, ': '])\n if isinstance(\n v, (dict, UserDict.DictMixin, UserDict.UserDict, OrderedDict)):\n if maxdepth is None or maxdepth > 0:\n if maxdepth is None:\n depth = None\n else:\n depth = maxdepth - 1\n sstream = StringIO.StringIO()\n prettyprint(v, indent + step, width, depth, step,\n only_keys, sstream, full_name + '.', _exclude)\n second = sstream.getvalue()\n sstream.close()\n elif maxdepth == 0:\n second = \"...\"\n elif isinstance(v, (list, tuple)):\n second = str.join(', ', [str(item) for item in v])\n else:\n second = str(v)\n # wrap overlong lines, and always wrap if the second part is multi-line\n if (width > 0 and len(first) + len(second)\n > width) or ('\\n' in second):\n first += '\\n'\n # indent a multi-line block by indent+step spaces\n if '\\n' in second:\n lines = second.splitlines()\n # keep indentation relative to first line\n dedent = 0\n line0 = lines[0].expandtabs(step)\n while line0[dedent].isspace():\n dedent += 1\n # rebuild `second`, indenting each line by (indent+step) spaces\n second = ''\n for line in lines:\n second = str.join('', [\n second,\n ' ' * (indent + step),\n line.rstrip().expandtabs(step)[dedent:],\n '\\n'\n ])\n # there can be multiple trailing '\\n's, which we remove here\n second = second.rstrip()\n # finally print line(s)\n output.write(first)\n output.write(second)\n output.write('\\n')", "def prettify(tree, indent=0):\n for key, value in six.iteritems(tree):\n if key == FILE_MARKER:\n if value:\n print((' ' * indent + str(value)))\n else:\n print((' ' * indent + str(key)))\n if isinstance(value, dict):\n prettify(value, indent+1)\n else:\n print((' ' * (indent+1) + str(value)))", "def __str__(self):\n try:\n delim = ', ' if len(self) < 8 else ',\\n '\n s = delim.join('%s: %s' % (repr(k), repr(self[k])) for k in self.peys())\n return '{' + s + '}'\n except Exception:\n return defaultdict.__repr__(self)", "def _to_string(self) -> str:\n\n string_list = []\n for key, value in self.__dict__.items():\n if isinstance(value, dict):\n string_list.append(key)\n string_list.extend('\\n'.join([\"Key: {:24}\\tValue: {}\".format(_key, _value) for _key, _value in value.items()]))\n else:\n string_list.append(\"Key: {:24}\\tValue: {}\\n\".format(key, value))\n return ''.join(string_list)", "def _pretty(item):\n ivs = \"IVs: \\n\"\n for stat, value in item.get(\"ivs\", {}).items():\n ivs += f\" {value} {stat}\\n\"\n\n evs = \"EVs: \\n\"\n for stat, value in item.get(\"evs\", {}).items():\n evs += f\" {value} {stat}\\n\"\n\n moves = \"\\n\"\n for move, acquired in item.get(\"moves\", {}).items():\n moves += f\" [{'x' if acquired else ' '}] {move}\\n\"\n\n return f\"\"\"\n-------------------------------------------------------------------------------\n{item['Pokemon']} - {item.get('Index')}\n{item.get('nickname', item['Pokemon'])}\n\nAbility: {item.get('ability')}\nNature: {item.get('nature')}\n{ivs}\n{evs}\n{moves}\n\"\"\"", "def debug_repr(self) -> str:\n repr_string = \"{}(Confi):\\n\".format(self.__class__.__name__)\n items = list(self.entries.items())\n items.sort(key = lambda item: item[0])\n indent = ' ' * 4\n for key, entry in items:\n repr_string += f\"{indent}{key}: {repr(entry.value)}\\n\"\n return repr_string", "def testPrettyPrintJSON(self):\n test_dict = {'test': [{'dict1': {'key1': 'val1'}, 'dict2': None}]}\n expected_string = ('{\\n \"test\": [\\n {\\n \"dict1\": {\\n'\n ' \"key1\": \"val1\"\\n }, \\n'\n ' \"dict2\": null\\n }\\n ]\\n}\\n')\n self.assertEqual(expected_string, utils.PrettyPrintJSON(test_dict))", "def _prettify_attributes(self, config_entry, indentation_level):\n def get_string_representation(singular):\n return \"{0}: {1}{2}\".format(singular['@name'], str(singular['@value']), os.linesep)\n \n indent_level = indentation_level * 2\n string_representation = \"\"\n \n if 'attribute' in config_entry:\n if type(config_entry['attribute']) == list:\n for entry in config_entry['attribute']:\n string_representation = \"{0}{1}{2}\".format(string_representation, \" \"*indent_level, get_string_representation(entry))\n else:\n string_representation = \"{0}{1}\".format(\" \"*indent_level, get_string_representation(config_entry['attribute']))\n \n if len(string_representation) > 0 and string_representation[-1] == os.linesep:\n return string_representation[:-1]\n \n return string_representation", "def get_dict_str(d: dict) -> str:\n\treturn str({str(u): str(v) for u, v in d.items()})", "def _DictToString(self, value_dict, str_length=5):\n\n def FormatValue(v, value_format, str_length):\n if isinstance(v, (int, float)):\n return value_format % v\n else:\n return str(v).rjust(str_length)\n\n text = []\n blank = '--'.rjust(str_length)\n\n if self._show_label:\n text.append(' '.join(k.rjust(str_length) for k in self._node_labels))\n\n if not self._precision:\n value_format = '%% %dd' % str_length\n else:\n value_format = '%% %d.%df' % (str_length, self._precision)\n\n text.append(' '.join(\n [FormatValue(value_dict[k], value_format, str_length)\n if k in value_dict else blank for k in self._node_labels]))\n\n return '\\n'.join(text)", "def pprint(self):\n import json\n return json.dumps(OrderedDict(self.items()), indent=4)", "def pprint(obj):\n return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))", "def pprint(obj):\n return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))", "def pprint(*d):\n i = 0\n while i < len(d):\n print(pretty(d[i]))\n i += 1", "def pretty_tree(x, kids, show):\n (MID, END, CONT, LAST, ROOT) = (\"|-- \", \"`-- \", \"| \", \" \", \"\")\n\n def rec(obj, indent, sym):\n line = indent + sym + show(obj)\n obj_kids = kids(obj)\n if len(obj_kids) == 0:\n return line\n else:\n if sym == MID:\n next_indent = indent + CONT\n elif sym == ROOT:\n next_indent = indent + ROOT\n else:\n next_indent = indent + LAST\n chars = [MID] * (len(obj_kids) - 1) + [END]\n lines = [rec(kid, next_indent, sym) for kid, sym in zip(obj_kids, chars)]\n return \"\\n\".join([line] + lines)\n\n return rec(x, \"\", ROOT)", "def pprint(self, indent: str = \"\"):\n\n from os import linesep\n\n res = self.__str__() + linesep\n child_indent = f\"{indent} \"\n\n pos = -1\n for x in self.children:\n pos += 1\n if pos == len(self.children) - 1:\n res += f\"{child_indent}└── {x.pprint(child_indent)}\"\n else:\n res += f\"{child_indent}├── {x.pprint(child_indent)}\"\n return res", "def pprint(self):\n return pformat(repr(self))", "def pprint(tree):\n p = PrettyPrinter(indent=2)\n p.pprint(tree)", "def __repr__(self):\n return repr(dict([(k, v) for k, v in self.iteritems()]))", "def tree_str(self, depth_index=0, recursive_dict=None):\r\n if not hasattr(self,'iteritems'): return ''\r\n if recursive_dict is not None: self = TreeMap(recursive_dict)\r\n buff_str = ''\r\n \r\n for item in self.iteritems():\r\n # Starts working now.\r\n k = item[0]\r\n v = item[1]\r\n \r\n spacer = '\\n' + '| ' * depth_index\r\n \r\n if hasattr(v,'iteritems'):\r\n buff_str += spacer + '+--[ ' + k + ' ]'\r\n buff_str += self.tree_str(depth_index=depth_index + 1, recursive_dict=v)\r\n else:\r\n buff_str += spacer + '\\_.--[ ' + str(k) + ' = ' + str(v) + ' ]'\r\n \r\n return buff_str", "def pretty_str(self) -> str:\n return _yaml_dump(self.to_ordered_dict())", "def dump(self, indentation=0):\n\n dump = []\n\n dump.append('[%s]' % self.name)\n\n # Refer to the __set_format__ method for an explanation\n # of the following construct.\n for keys in self.__keys__:\n for key in keys:\n\n val = getattr(self, key)\n if isinstance(val, int) or isinstance(val, long):\n val_str = '0x%-8X' % (val)\n if key == 'TimeDateStamp' or key == 'dwTimeStamp':\n try:\n val_str += ' [%s UTC]' % time.asctime(time.gmtime(val))\n except exceptions.ValueError, e:\n val_str += ' [INVALID TIME]'\n else:\n val_str = ''.join(filter(lambda c:c != '\\0', str(val)))\n\n dump.append('0x%-8X 0x%-3X %-30s %s' % (\n self.__field_offsets__[key] + self.__file_offset__,\n self.__field_offsets__[key], key+':', val_str))\n\n return dump", "def pretty_print(data):\n print json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))", "def pretty_print(self):\n return self.tree.pretty_print()", "def pprint(self,indent=0,node=None):\n if node == None:\n node = self.root\n if node == None:\n print_indent(indent)\n print \"[empty tree]\"\n return\n if node.type == 'v':\n print_indent(indent)\n print node.value\n elif node.type == 's':\n for (val,c) in node.children.iteritems():\n print_indent(indent)\n print \"-\",self.keys[node.feature],\"=\",val,\":\"\n self.pprint(indent+1,c)\n elif node.type == 'i':\n print_indent(indent)\n print self.keys[node.feature],\"<=\",node.value,\":\"\n self.pprint(indent+1,node.children[0])\n print_indent(indent)\n print self.keys[node.feature],\">\",node.value,\":\"\n self.pprint(indent+1,node.children[1])", "def dictree(in_dict, verbose=False, spaces=None, levels=True, attrs=False, **kwargs):\n try:\n assert hasattr(in_dict, 'keys')\n except AssertionError:\n try:\n assert hasattr(in_dict, 'attrs')\n except:\n raise TypeError('dictree: Input must be dictionary-like')\n\n if not spaces:\n spaces = ''\n print('+')\n\n if 'toplev' in kwargs:\n toplev = kwargs['toplev']\n else:\n toplev = True\n try:\n if toplev and attrs:\n dictree(in_dict.attrs, spaces = ':', verbose = verbose, levels = levels, attrs=attrs, toplev=True)\n toplev = False\n except:\n pass\n\n # TODO, if levels is True why check again?\n if levels:\n try:\n assert levels is True\n except AssertionError:\n levels -= 1\n if levels == 0:\n levels = None\n\n try:\n for key in sorted(in_dict.keys()):\n bar = '|____' + str(key)\n if verbose:\n typestr = str(type(in_dict[key])).split(\"'\")[1]\n #check entry for dict-like OR .attrs dict\n try:\n dimstr = in_dict[key].shape\n dimstr = ' ' + str(dimstr)\n except AttributeError:\n try:\n dimstr = len(in_dict[key])\n dimstr = ' [' + str(dimstr) + ']'\n except:\n dimstr = ''\n print(spaces + bar + ' ('+ typestr + dimstr + ')')\n else:\n print(spaces + bar)\n if hasattr(in_dict[key], 'attrs') and attrs:\n dictree(in_dict[key].attrs, spaces = spaces + ' :', verbose = verbose, levels = levels, attrs=attrs, toplev=False)\n if hasattr(in_dict[key], 'keys') and levels:\n dictree(in_dict[key], spaces = spaces + ' ', verbose = verbose, levels = levels, attrs=attrs, toplev=False)\n except:\n pass\n return None", "def dump_pretty(thing):\n print(json.dumps(thing, indent=1, default=convert_for_json))", "def pformat_in_needed(obj, indent=4):\n if obj:\n formatted_string = pprint.pformat(obj, indent)\n indented_string = ''\n for line in formatted_string.split('\\n'):\n indented_string = indented_string + '\\n' + (' ' * indent * 2) + line\n return \"\\n{}\\n\".format(indented_string)", "def dumps(self, indent=1):\n str_keys_dict = OrderedDict({str(k): v for k, v in self.items()})\n for k, v in str_keys_dict.items():\n if isinstance(v, dict):\n str_keys_dict[k] = OrderedDict({str(k1): v1 for k1, v1 in v.items()})\n for k1, v1 in str_keys_dict[k].items():\n if isinstance(v1, dict):\n str_keys_dict[k][k1] = OrderedDict({str(k2): v2 for k2, v2 in v1.items()})\n return json.dumps(str_keys_dict, indent=indent)", "def pprint(x):\n if is_theano_object(x):\n return _gettheano().printing.pprint(x)\n else:\n return str(x)", "def print_tree(self,root_key='',offset=''):\n itm = self._root\n if root_key:\n itm = self.get_data(root_key)\n tstr = os.linesep \n try: #if isinstance(itm,dict):\n for k in itm.keys():\n x_str = self.print_tree(root_key+'.'+k,offset+' ')\n tstr = tstr+offset+'{}: {}'.format(k,x_str)+os.linesep\n except:\n try: #elif isinstance(itm,list):\n for i,x in enumerate(itm):\n x_str = self.print_tree(root_key+'.'+str(i),offset+' ')\n tstr = tstr+offset+'{}: {}'.format(i,x_str)+os.linesep\n except:\n return '{}'.format(itm)\n return tstr", "def dict_2_string(d):\n buff = io.StringIO()\n print_dictionary(d, output=buff)\n return buff.getvalue()", "def pretty_print(self, indent=8):\n formatted_lines = []\n for line in self.coefficients:\n formatted_items = []\n for item in line:\n formatted_items.append(str(item).ljust(indent, \" \"))\n formatted_lines.append(u\"(\" + \", \".join(formatted_items) + u\")\")\n return u\"(\" + u\",\\n \".join(formatted_lines) + u\")\"", "def pretty_print_drt(self):\n self.drt_manager.pretty_print_drt()", "def json_encode_pretty(data):\n # type: (Union[Dict,List]) -> str\n return json.dumps(data, indent=2, separators=(\",\", \": \"))", "def dict_to_beautified_json(d):\n return jsbeautifier.beautify(json.dumps(d))", "def pprintable(self):\n\n def as_pprintable(v):\n return v.pprintable() if isinstance(v, NQExprProvenance) else v\n\n return dict([(k, as_pprintable(v)) for k, v in self.__dict__.items() if v])", "def pprint(self):\n def pprintStr(node):\n s = \"(\" + str(node.value) \n for action in node.children:\n s = s + \", \" + pprintStr(node.children[action])\n s = s + \")\"\n return s\n\n print pprintStr(self)", "def prettify_details(data):\n new = []\n if \"terminaltables\" in sys.modules:\n for key, value in data.items():\n if key.startswith(\"__\"):\n continue\n if isinstance(value, (int, float)) and not isinstance(value, bool):\n new.append((key, \"{:15,.2f}\".format(value)))\n else:\n new.append((key, value))\n table = terminaltables.DoubleTable(new)\n table.inner_heading_row_border = False\n table.justify_columns[1] = 'right'\n return table.table.replace(\"\\n\", \"<br />\")\n else:\n formatted = json.dumps({k: v for k, v in data.items()\n if not k.startswith(\"__\")}, indent=4)\n new = formatted[2:-2].replace(\"\\n\", \"<br />\")\n return new", "def _walk(self, d, depth=0):\n\n output = ''\n indent = 3\n header_width = 35 - depth*indent\n\n for k, v in sorted(d.items(), key=lambda x: x[0]):\n if isinstance(v, dict):\n output += \"\".ljust(depth * indent)+k+'\\n'\n output += self._walk(v, depth + 1)\n else:\n if isinstance(v, np.ndarray):\n # np array or matrix\n shape = v.shape\n if len(shape) == 1:\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : \" + \"array (%d)\" % (v.shape[0]) + '\\n'\n\n elif len(shape) == 2:\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : \" + \"matrix (%d,%d)\" % (v.shape[0], v.shape[1]) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], str):\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n for item_id, item in enumerate(v):\n output += \"\".ljust((depth + 1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : \" + str(item) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], np.ndarray):\n # List of arrays\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n for item_id, item in enumerate(v):\n if len(item.shape) == 1:\n output += \"\".ljust((depth+1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : array (%d)\" % (item.shape[0]) + '\\n'\n\n elif len(item.shape) == 2:\n output += \"\".ljust((depth+1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : matrix (%d,%d)\" % (item.shape[0], item.shape[1]) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], dict):\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n\n for item_id, item in enumerate(v):\n output += \"\".ljust((depth + 1) * indent) + \"[\"+str(item_id)+\"]\" + '\\n'\n output += self._walk(item, depth + 2)\n\n else:\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : \" + str(v) + '\\n'\n\n return output", "def dict_json_print_beauty(json_dict=dict, encode='utf-8'):\n # type: (dict, str)->None\n print(json.dumps(json_dict, encoding=encode, ensure_ascii=False, indent=4))", "def serialize_dict(d):\n txt = '{'\n for k in d:\n txt += f'\"{k}\":'\n if isinstance(d[k], dict):\n txt += serialize_dict(d[k])\n if isinstance(d[k], str):\n txt += serialize_string(d[k])\n if isinstance(d[k], int):\n txt += serialize_number(d[k])\n txt += ','\n txt += '}'\n return txt", "def print_dict(dictionary, format_=None):\n\n format_ = format_ or DEFAULT\n\n if format_ == TEXT:\n for key, value in iter(sorted(dictionary.items())):\n print(\"%s = %s\" % (key, value))\n elif format_ == DOCKERENV:\n for key, value in iter(sorted(dictionary.items())):\n print(\"%s=%s\" % (key, value))\n elif format_ == BASH:\n for key, value in iter(sorted(dictionary.items())):\n print(\"export %s=%s\" % (key, value))\n elif format_ == JSON:\n print(json.dumps(dictionary))\n elif format_ == NAME_VALUE_DICT:\n print(\"[\")\n for key, value in iter(sorted(dictionary.items())):\n print('{\"name\": \"%s\", \"value\": \"%s\"},' % (key, value))\n print(\"]\")", "def print_object(dict_to_print, *, name='', uppercase=False):\n string = '' if name == '' else name.ljust(10)\n for key, value in dict_to_print.items():\n string += f'{key.upper() if uppercase else key}: {\"\" if value < 0 else \" \"}{float(value):.4}'.ljust(\n len(key) + 10)\n\n print(string)", "def encode_pretty_printed_json(json_object):\n\n return _pretty_encoder.encode(json_object).encode(\"ascii\")", "def pretty_print(self,depth=0):\n\t\tfor i in range(depth):\n\t\t\tprint \"\\t\",\n\t\t\t\t\n\t\tprint self.__str__()\n\t\t\n\t\tfor c in self.tree.children:\n\t\t\tc.viz.pretty_print(depth+1)", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def key_repr(key):\n levels = []\n while key:\n levels.insert(0, '%s %s' % (key.kind(), key.id() or repr(key.name())))\n key = key.parent()\n return '<Key: %s>' % '/'.join(levels)", "def __str__(self):\n if len(self.keys()):\n return '{' + repr(self.keys()[0]) + ':' + repr(self[self.keys()[0]]) + ', ...'\n else:\n return super(FSDict, self).__str__()", "def __repr__(self) -> str:\n return '{}({})'.format(self.__class__.__name__,\n ', '.join('{}={}'.format(key, repr(value))\n for key, value in iter(self.items())))", "def pretty_print(self):\n for dtr in self.dtrs:\n dtr.pretty_print(indent=2)", "def stringify_dict(d: dict) -> dict:\n return {str(key): str(value) for key, value in d.items()}", "def pprint(self,obj):\n return(json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': ')))", "def json_format_dict(self, data, pretty=False):\n if pretty:\n return json.dumps(data, sort_keys=True, indent=2)\n else:\n return json.dumps(data)", "def dump(node, annotate_fields=True, include_attributes=False, indent=' '):\n def _format(node, level=0):\n if isinstance(node, AST):\n fields = [(a, _format(b, level)) for a, b in iter_fields(node)]\n if include_attributes and node._attributes:\n fields.extend([(a, _format(getattr(node, a), level))\n for a in node._attributes])\n return ''.join([\n node.__class__.__name__,\n '(',\n ', '.join(('%s=%s' % field for field in fields)\n if annotate_fields else\n (b for a, b in fields)),\n ')'])\n elif isinstance(node, list):\n lines = ['[']\n lines.extend((indent * (level + 2) + _format(x, level + 2) + ','\n for x in node))\n if len(lines) > 1:\n lines.append(indent * (level + 1) + ']')\n else:\n lines[-1] += ']'\n return '\\n'.join(lines)\n return repr(node)\n\n if not isinstance(node, AST):\n raise TypeError('expected AST, got %r' % node.__class__.__name__)\n return _format(node)", "def simple_formatter(entry, fp, indent=0):\n for key, value in six.iteritems(entry):\n if isinstance(value, dict):\n print('{}{}:'.format(' ' * indent, key))\n simple_formatter(value, fp, indent + 1)\n else:\n print('{}{}: {}'.format(' ' * indent, key, value), file=fp)", "def stringify_keys(d):\n di = copy.deepcopy(d)\n for key in di.keys():\n # check inner dict\n if isinstance(d[key], dict):\n value = stringify_keys(d[key])\n else:\n value = d[key]\n\n # convert nonstring to string if needed\n if not isinstance(key, str):\n try:\n d[str(key)] = value\n except Exception:\n try:\n d[repr(key)] = value\n except Exception:\n raise\n\n # delete old key\n del d[key]\n return d", "def __repr__(self):\r\n return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])", "def dict_to_str(self, param_dict: Dict[str, Any], num_tabs: int) -> str:\n if not isinstance(param_dict, dict):\n return str(param_dict)\n else:\n append_newline = \"\\n\" if num_tabs > 0 else \"\"\n return append_newline + \"\\n\".join(\n [\n \"\\t\"\n + \" \" * num_tabs\n + \"{0}:\\t{1}\".format(\n x, self.dict_to_str(param_dict[x], num_tabs + 1)\n )\n for x in param_dict\n ]\n )", "def __repr__(self):\n return self.pretty_print(self.__dict__)", "def Repr(obj, as_ref=True):\n if hasattr(obj, 'Repr'):\n return obj.Repr(as_ref=as_ref)\n # Since we cannot implement Repr for existing container types, we\n # handle them here.\n if isinstance(obj, list):\n if not obj:\n return '[]'\n return ('[\\n%s\\n]' %\n (',\\n'.join(' %s' % Repr(elem, as_ref).replace('\\n', '\\n ')\n for elem in obj)))\n if isinstance(obj, dict):\n if not obj:\n return '{}'\n return ('{\\n%s\\n}' % (',\\n'.join(' %s: %s' %\n (Repr(key, as_ref).replace('\\n', '\\n '),\n Repr(val, as_ref).replace('\\n', '\\n '))\n for key, val in obj.items())))\n return repr(obj)", "def prettify_jsonc(jsonc_obj, indentation=2):\n\n return simplejson.dumps(_convert_to_object(jsonc_obj), indent=indentation)", "def to_dump(self):\n s = []\n for k in self.keys():\n if isinstance(self[k], int) or isinstance(self[k], long):\n s.append(\"%s=%d\" % (k, self[k]))\n elif isinstance(self[k], float):\n s.append(\"%s=%f\" % (k, self[k]))\n else:\n for v2 in self.list(k):\n if isinstance(v2, str):\n s.append(\"%s=%s\" % (k, v2))\n else:\n s.append(\"%s=%s\" % (k, util.encode(v2)))\n s.append(\"~format=%s\" % self.format)\n s.append(\"\")\n return \"\\n\".join(s)", "def pretty(self):\n return self._pretty", "def format(self, obj, indent=0):\r\n return pformat(obj, indent=indent, depth=self.depth)", "def __str__(self, indent: int=0) -> str:\n root_str = indent * \" \" + str(self.value)\n mid = len(self.non_none_kids()) // 2\n left_str = [c.__str__(indent + 3)\n for c in self.non_none_kids()][: mid]\n right_str = [c.__str__(indent + 3)\n for c in self.non_none_kids()][mid:]\n return '\\n'.join(right_str + [root_str] + left_str)" ]
[ "0.7736835", "0.7247312", "0.7021937", "0.69802797", "0.69656223", "0.6918987", "0.6832074", "0.68211126", "0.67912984", "0.678275", "0.67453086", "0.6687921", "0.66458195", "0.654063", "0.6504709", "0.6453231", "0.64472985", "0.63632375", "0.6358067", "0.6356899", "0.63254225", "0.62625194", "0.625399", "0.6248269", "0.61995083", "0.61536145", "0.61233455", "0.6115827", "0.6093083", "0.6032953", "0.60148734", "0.60055476", "0.59647894", "0.59555656", "0.5930941", "0.5903099", "0.5902058", "0.5873935", "0.58272016", "0.58178914", "0.5797254", "0.5791013", "0.5785912", "0.57540846", "0.5749488", "0.5721405", "0.5721405", "0.57160944", "0.5686643", "0.568598", "0.5678948", "0.56780314", "0.5675657", "0.56695676", "0.56394637", "0.5619014", "0.56117713", "0.559124", "0.5550933", "0.5547444", "0.5522202", "0.5518479", "0.5518424", "0.5511335", "0.55049103", "0.5502727", "0.54747534", "0.5467324", "0.544169", "0.54245585", "0.54221916", "0.54217666", "0.5420331", "0.5407784", "0.54074097", "0.5400549", "0.5369846", "0.53697145", "0.5367871", "0.5367699", "0.5362965", "0.5348399", "0.53388876", "0.5310783", "0.53057534", "0.530328", "0.5300817", "0.5299649", "0.52925223", "0.52917176", "0.5285291", "0.5282669", "0.5278911", "0.5278429", "0.5275434", "0.52741075", "0.5266103", "0.5265437", "0.52559745", "0.5250991" ]
0.83444524
0
Generate instance masks for an image.
def load_mask(self, image_id, coco_offset=0): info = self.image_info[image_id] mask = np.zeros([info["height"], info["width"], len(self.json_data[info["id"]])], dtype=np.uint8) lbls = np.zeros(len(self.json_data[info["id"]]), dtype=np.int32) for idx, (mask_path, mask_info) in enumerate(self.json_data[info["id"]].items()): mask_class = mask_info["class"] mask[:,:,idx] = np.array(PIL.Image.open(mask_path), dtype=np.uint8) lbls[idx] = common.activity_classes_names.index(mask_class) + 1 + coco_offset # Return mask, and array of class IDs of each instance. Since we have # one class ID only, we return an array of 1s return mask.astype(np.bool), lbls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_mask(self, image_id):\n\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"pcb\":\n return super(self.__class__, self).load_mask(image_id)\n\n # convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n \n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n \n for i, p in enumerate(info[\"polygons\"]):\n # get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # return mask, and array of class IDs of each instance.\n # since we have one class ID only, we return an array of 1s\n return mask.astype(np.bool), info[\"class_ids\"]", "def load_mask(self, image_id):\n # If not a vesicle dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"vesicle\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n rr, cc = skimage.draw.polygon(p[1], p[0])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)", "def load_mask(self, image_id):\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"self_annotation\":\n super(CarsAndVehiclesDataset, self).load_mask(image_id)\n\n instance_masks = []\n class_ids = []\n annotations = self.image_info[image_id][\"annotations\"]\n # Build mask of shape [height, width, instance_count] and list\n # of class IDs that correspond to each channel of the mask.\n for annotation in annotations:\n class_id = self.map_source_class_id(\"self_annotation.{}\".format(annotation[\"category_id\"]))\n\n if class_id:\n m = self.annToMask(annotation, image_info[\"height\"], image_info[\"width\"])\n\n # Some objects are so small that they're less than 1 pixel area\n # and end up rounded out. Skip those objects.\n if m.max() < 1:\n continue\n # Is it a crowd? If so, use a negative class ID\n if annotation[\"iscrowd\"]:\n # Use negative class ID for crowds\n class_id *= -1\n # For crowd masks, annToMask() sometimes returns a mask\n # smaller than the given dimensions. If so, resize it.\n if m.shape[0] != image_info[\"height\"] or m.shape[1] != image_info[\"width\"]:\n m = np.ones(image_info[\"height\"], image_info[\"width\"], dtype=bool)\n instance_masks.append(m)\n class_ids.append(class_id)\n\n # Pack instance masks into an array\n if class_ids:\n mask = np.stack(instance_masks, axis=2).astype(np.bool)\n class_ids = np.array(class_ids, dtype=np.int32)\n return mask, class_ids\n else:\n # Call super class to return an empty mask\n return super(CarsAndVehiclesDataset, self).load_mask(image_id)", "def load_mask(self, image_id):\n image_info = self.image_info[image_id]\n annotations = image_info['annotations']\n instance_masks = []\n class_ids = []\n \n for annotation in annotations:\n class_id = annotation['category_id']\n mask = Image.new('1', (image_info['width'], image_info['height']))\n mask_draw = ImageDraw.ImageDraw(mask, '1')\n for segmentation in annotation['segmentation']:\n mask_draw.polygon(segmentation, fill=1)\n bool_array = np.array(mask) > 0\n instance_masks.append(bool_array)\n class_ids.append(class_id)\n\n mask = np.dstack(instance_masks)\n class_ids = np.array(class_ids, dtype=np.int32)\n \n return mask, class_ids", "def load_mask(self, image_id):\n # If not a pedestrian dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"pedestrian\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)", "def bg_mask(query_imgs, method):\n print(\"Obtaining masks\")\n segmentation_method = get_method(method)\n return [segmentation_method(img) for img in query_imgs]", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n image_name = str(info['id']).zfill(6) + \"_10.png\"\n gt_image = imageio.imread(\"./training/instance/\" + image_name)\n instance_gt = np.array(gt_image) % 256\n semantic_gt = np.array(gt_image) // 256\n instance_gt = cv2.resize(instance_gt, (self.width, self.height), interpolation=cv2.INTER_NEAREST)\n semantic_gt = cv2.resize(semantic_gt, (self.width, self.height), interpolation=cv2.INTER_NEAREST)\n labels = [26, 24]\n masks = []\n class_ids = []\n for l in labels:\n mask_sem = (semantic_gt == [l]).astype(np.int_) * 255\n mask_ins = instance_gt & mask_sem\n num_ins = np.max(mask_ins)\n if(num_ins > 30):\n print(\"WARNING: num ins %d for label l %d\" % (num_ins, l))\n\n for i in range(1, num_ins + 1):\n mask_obj = (mask_ins == [i]).astype(np.int_) * 255\n masks.append(mask_obj)\n if l == 24:\n class_ids.append(2)\n else:\n class_ids.append(1)\n masks = np.array(masks)\n masks = np.moveaxis(masks, 0, -1)\n class_ids = np.array(class_ids)\n return masks, class_ids", "def create_masks(image_folder: str, annotation_path: str, outpath: str):\n\n train_reader = ReaderAnnotation(annotation_path)\n\n all_images = os.listdir(image_folder)\n annotated_images = train_reader.annotation.keys()\n\n creator = MaskCreator()\n\n for key in annotated_images:\n file_extension = \".JPG\"\n if not os.path.isfile(\n os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n ):\n file_extension = file_extension.lower()\n\n image_name = os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n print(image_name)\n\n out_image_path = os.path.join(outpath, os.path.split(image_name)[-1])\n assert os.path.exists(out_image_path), \"Out image path doesn't exist\"\n\n image = plt.imread(image_name)\n h, w, c = image.shape\n\n regions = train_reader.get(key)[\"regions\"]\n # less than minimal distance\n radius = int(train_reader.get_radius_min(regions=regions) * 0.9)\n\n masks = []\n for _, center in regions.items():\n masks.append(\n creator.create_circular_mask(\n h=h,\n w=w,\n center=(\n int(center[\"shape_attributes\"][\"cx\"]),\n int(center[\"shape_attributes\"][\"cy\"]),\n ),\n radius=radius,\n )\n )\n\n if len(masks) > 50:\n masks = [creator._unite_masks(masks)]\n\n if masks:\n creator.visualize(\n image=image,\n masks=masks,\n filename=out_image_path,\n use_image=False,\n )\n else:\n creator._create_empty_mask(image=image, filename=out_image_path)\n\n print(\"Empty images:\")\n for empty_image in list(set(all_images) - set(annotated_images)):\n if os.path.exists(out_image_path):\n continue\n empty_image = os.path.join(image_folder, empty_image)\n print(empty_image)\n image = plt.imread(empty_image)\n creator._create_empty_mask(\n image=image,\n filename=os.path.join(\n outpath,\n os.path.split(empty_image)[-1],\n ),\n )", "def load_mask(self, image_id):\n # If not homeobject dataset, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != 'homeobject':\n print(\n \"Warn: \\'{}\\' label not found. Processing with parent load_mask.\".format(image_info[\"source\"]))\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n class_ids = image_info['class_ids']\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])], dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n\n # modify dirt mask if it resides outside of image boundary\n rr[rr > mask.shape[0] - 1] = mask.shape[0] - 1\n cc[cc > mask.shape[1] - 1] = mask.shape[1] - 1\n\n mask[rr, cc, i] = 1\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n class_ids = np.array(class_ids, dtype=np.int32)\n # return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)\n return mask, class_ids", "def load_mask(self, image_id):\n # TODO: build dict **self.image_info** in this form\n # self.image_info.keys() = ['objects', 'imgWidth', 'imgHeight']\n # objects is a list which contains label and polygon (same as annotations form below)\n # imgHeight and imgWidth are numbers (usually 1024, 2048)\n annotations = self.image_info[image_id][\"objects\"]\n # annotations form: [{'label': label, 'polygon': [[x1,y1], [x2,y2] ...]}, ...]\n height = self.image_info[image_id]['imgHeight']\n width = self.image_info[image_id]['imgWidth']\n instance_masks = []\n class_ids = []\n for ann in annotations:\n m = self.annToMask(ann, height, width)\n \n label_tmp = ann['label']\n if ( not label_tmp in list(self.class_labels.keys()) ) and label_tmp.endswith('group'):\n label_tmp = label_tmp[:-len('group')]\n \n class_id = self.class_labels[label_tmp]\n instance_masks.append(m)\n class_ids.append(class_id)\n \n mask = np.stack(instance_masks, axis=2)\n class_ids = np.array(class_ids)\n \n return mask, class_ids", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n # Get mask directory from image path\n mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), \"masks\")\n\n # Read mask files from .png image\n mask = []\n # for f in next(os.walk(mask_dir))[2]:\n m = skimage.io.imread(os.path.join(mask_dir, info['id']+'.png')).astype(np.bool)\n mask.append(m)\n # print(mask)\n mask = np.stack(mask, axis=-1)\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID, we return an array of ones\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n num_cards = info['cards']\n # count = len(num_cards)\n count = 1 # there will only ever be 1 card per image (for simplicity) TODO: do multiple documents?\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n # for i, _ in enumerate(info['cards']):\n mask[:, :, 0] = self.draw_quadrilateral(mask[:, :, 0].copy(), info['cornerpoints'], 1)\n\n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count-2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n # class_ids = np.array([self.class_names.index(s[0]) for s in num_categories])\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)", "def load_mask(self, image_id):\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"glomerulus\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)", "def load_mask(self, image_id):\n global iter_num\n print(\"image_id\",image_id)\n info = self.image_info[image_id]\n count = 1 # number of object\n img = Image.open(info['mask_path'])\n num_obj = self.get_obj_index(img)\n mask = np.zeros([info['height'], info['width'], num_obj], dtype=np.uint8)\n mask = self.draw_mask(num_obj, mask, img,image_id)\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count - 2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n \n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n labels = []\n labels = self.from_yaml_get_class(image_id)\n labels_form = []\n for i in range(len(labels)):\n if labels[i].find(\"bird\") != -1:\n # print \"bird\"\n labels_form.append(\"bird\")\n class_ids = np.array([self.class_names.index(s) for s in labels_form])\n return mask, class_ids.astype(np.int32)", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n # Get mask directory from image path\n mask_dir = os.path.join(\n os.path.dirname(os.path.dirname(info['path'])), 'masks')\n\n # Read mask files from .png image\n masks = []\n for file in next(os.walk(mask_dir))[2]:\n if file.endswith('.png'):\n mask = imread(os.path.join(mask_dir, file),\n as_gray=True).astype(np.bool)\n masks.append(mask)\n masks = np.stack(masks, axis=-1)\n # Return masks, and array of class IDs of each instance. Since we have\n # one class ID, we return an array of ones\n return masks, np.ones([masks.shape[-1]], dtype=np.int32)", "def get_object_mask(self, image_id):\n image_info = self.image_meta[image_id]\n active_class_info = image_info['active_class_info']\n object_cnt = len(active_class_info)\n mask = np.zeros([image_info['height'], image_info['width'], object_cnt], dtype=np.uint8)\n for i, (object_, _, dims) in enumerate(active_class_info):\n mask[:, :, i:i + 1] = self.draw_object_shape(mask[:, :, i:i + 1].copy(), object_, 1, dims)\n \n # Handle occlusions, when two objects intersect, we should ensure that the intersection mask is\n # given to only only object.\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n # print(occlusion)\n \n for i in range(object_cnt - 2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n return mask.astype(np.bool)", "def load_mask(self, image_id):\n\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"dsb\":\n return super(self.__class__, self).load_mask(image_id)\n\n path = image_info[\"dir\"]\n\n mascara = next(os.walk(path + '/masks/'))[2]\n masc = skimage.io.imread(path + '/masks/' + mascara[0])\n height, width = masc.shape\n\n mask = np.zeros((height, width, len(mascara)), dtype=np.uint8)\n\n for i, mask_file in enumerate(mascara):\n mask[:,:,i] = skimage.io.imread(path + '/masks/' + mask_file)\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)", "def model_masks(self, prunable=None):\n # TODO Also accept a dataloader\n pass\n # return masks", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n\n shapes = info['polygons']\n\n for i, p in enumerate(info['polygons']):\n shape = p['shape_attributes']['name']\n mask[:, :, i:i + 1] = self.draw_shape(mask[:, :, i:i + 1].copy(),\n shape, p, 1)\n\n # Map class names to class IDs.\n if (self.config.MODE == \"Combined\"):\n class_ids = np.array([self.class_names.index(s['region_attributes']['element_type'])\n if 'element_type' in s['region_attributes'].keys() else self.class_names.index('door') for s in shapes])\n elif (self.config.MODE == \"Separate\"):\n class_ids = np.array([self.class_names.index(s['region_attributes']['Class']) if 'Class' in s['region_attributes'].keys(\n ) else self.class_names.index('Door (Curve)') for s in shapes])\n\n return mask, class_ids.astype(np.int32)", "def load_mask(self, image_id):\n # Override this function to load a mask from your dataset.\n # Otherwise, it returns an empty mask.\n mask = np.empty([0, 0, 0])\n class_ids = np.empty([0], np.int32)\n return mask, class_ids", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n mask_paths = glob.glob(info['path'].replace('images', 'masks').replace('.png', '*.png'))\n masks = []\n class_ids = []\n for mask_path in mask_paths:\n# print(mask_path)\n mask = cv2.imread(mask_path,cv2.IMREAD_GRAYSCALE) \n masks.append(mask)\n if 'normal' in mask_path:\n class_ids.append(0)\n if 'benign' in mask_path:\n class_ids.append(1)\n if 'malignant' in mask_path:\n class_ids.append(2)\n masks = np.moveaxis(masks,0,-1)\n class_ids = np.array(class_ids)\n return masks, class_ids", "def load_mask(self, image_id):\n # If not a vesicle dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"vesicle\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert 16 bit mask to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask_path = info['mask_path']\n mask = cv.imread(mask_path, cv.IMREAD_GRAYSCALE + cv.IMREAD_ANYDEPTH)\n bin_mask = get_bin_mask(mask)\n n_instance = bin_mask.shape[-1]\n return bin_mask, np.ones([n_instance], dtype=np.int32)", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n ships = info['ships']\n count = len(ships)\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n for i, (ship, dims) in enumerate(info['ships']):\n mask[:, :, i:i + 1] = self.draw_mask(mask[:, :, i:i + 1].copy(),\n ship, dims)\n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count - 2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(\n occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(s[0]) for s in ships])\n return mask, class_ids.astype(np.int32)", "def _generate_masks(self, data, batch_size):\n\n height, width = data.shape[2], data.shape[3]\n\n mask_size = (self._down_sample_size, self._down_sample_size)\n\n up_size = (height + mask_size[0], width + mask_size[1])\n mask = np.random.random((batch_size, 1) + mask_size) < self._mask_probability\n upsample = resize(op.Tensor(mask, data.dtype), up_size,\n self._resize_mode).asnumpy()\n shift_x = np.random.randint(0, mask_size[0] + 1, size=batch_size)\n shift_y = np.random.randint(0, mask_size[1] + 1, size=batch_size)\n\n masks = [sample[:, x_i: x_i + height, y_i: y_i + width] for sample, x_i, y_i\n in zip(upsample, shift_x, shift_y)]\n masks = Tensor(np.array(masks), data.dtype)\n return masks", "def load_mask(self, image_id):\n # If not a COCO image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"coco\":\n return super(ExtendedCocoDataset, self).load_mask(image_id, common.COCO_NUM_CLASSES) # NOTE: this calls ActivityDataset.load_mask()\n\n instance_masks = []\n class_ids = []\n annotations = self.image_info[image_id][\"annotations\"]\n # Build mask of shape [height, width, instance_count] and list\n # of class IDs that correspond to each channel of the mask.\n for annotation in annotations:\n class_id = self.map_source_class_id(\n \"coco.{}\".format(annotation['category_id']))\n if class_id:\n m = self.annToMask(annotation, image_info[\"height\"],\n image_info[\"width\"])\n # Some objects are so small that they're less than 1 pixel area\n # and end up rounded out. Skip those objects.\n if m.max() < 1:\n continue\n # Is it a crowd? If so, use a negative class ID.\n if annotation['iscrowd']:\n # Use negative class ID for crowds\n class_id *= -1\n # For crowd masks, annToMask() sometimes returns a mask\n # smaller than the given dimensions. If so, resize it.\n if m.shape[0] != image_info[\"height\"] or m.shape[1] != image_info[\"width\"]:\n m = np.ones([image_info[\"height\"], image_info[\"width\"]], dtype=bool)\n instance_masks.append(m)\n class_ids.append(class_id)\n\n # Pack instance masks into an array\n if class_ids:\n mask = np.stack(instance_masks, axis=2).astype(np.bool)\n class_ids = np.array(class_ids, dtype=np.int32)\n return mask, class_ids\n else:\n # Call super class to return an empty mask\n return super(CocoDataset, self).load_mask(image_id)", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n# logger.info(\"mask {}\".format(image_id))\n if info[\"mask\"] is None:\n craters = info['craters']\n count = len(craters)\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n for i, dims in enumerate(craters):\n mask[:, :, i:i+1] = self.draw_shape(mask[:, :, i:i+1].copy(),\n \"circle\", dims, 1)\n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count-2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(s) for s in info[\"shapes\"]])\n info[\"mask\"] = mask.astype(np.bool)\n info[\"class_ids\"] = class_ids.astype(np.int32)\n else:\n mask, class_ids = info[\"mask\"], info[\"class_ids\"]\n return mask, class_ids", "def load_mask(self, image_id):\n # Override this function to load a mask from your dataset.\n # Otherwise, it returns an empty mask.\n logging.warning(\"You are using the default load_mask(), maybe you need to define your own one.\")\n mask = np.empty([0, 0, 0])\n class_ids = np.empty([0], np.int32)\n return mask, class_ids", "def create_GT_masks(root_dir, background_dir, intrinsic_matrix,classes):\n list_all_images = load_obj(root_dir + \"all_images_adr\")\n training_images_idx = load_obj(root_dir + \"train_images_indices\")\n for i in range(len(training_images_idx)):\n img_adr = list_all_images[training_images_idx[i]]\n label = os.path.split(os.path.split(os.path.dirname(img_adr))[0])[1]\n regex = re.compile(r'\\d+')\n idx = regex.findall(os.path.split(img_adr)[1])[0]\n\n if i % 1000 == 0:\n print(str(i) + \"/\" + str(len(training_images_idx)) + \" finished!\")\n\n image = cv2.imread(img_adr)\n ID_mask = np.zeros((image.shape[0], image.shape[1]))\n U_mask = np.zeros((image.shape[0], image.shape[1]))\n V_mask = np.zeros((image.shape[0], image.shape[1]))\n\n ID_mask_file = root_dir + label + \\\n \"/ground_truth/IDmasks/color\" + str(idx) + \".png\"\n U_mask_file = root_dir + label + \\\n \"/ground_truth/Umasks/color\" + str(idx) + \".png\"\n V_mask_file = root_dir + label + \\\n \"/ground_truth/Vmasks/color\" + str(idx) + \".png\"\n\n tra_adr = root_dir + label + \"/data/tra\" + str(idx) + \".tra\"\n rot_adr = root_dir + label + \"/data/rot\" + str(idx) + \".rot\"\n rigid_transformation = get_rot_tra(rot_adr, tra_adr)\n\n # Read point Point Cloud Data\n ptcld_file = root_dir + label + \"/object.xyz\"\n pt_cld_data = np.loadtxt(ptcld_file, skiprows=1, usecols=(0, 1, 2))\n ones = np.ones((pt_cld_data.shape[0], 1))\n homogenous_coordinate = np.append(pt_cld_data[:, :3], ones, axis=1)\n\n # Perspective Projection to obtain 2D coordinates for masks\n homogenous_2D = intrinsic_matrix @ (rigid_transformation @ homogenous_coordinate.T)\n coord_2D = homogenous_2D[:2, :] / homogenous_2D[2, :]\n coord_2D = ((np.floor(coord_2D)).T).astype(int)\n x_2d = np.clip(coord_2D[:, 0], 0, 639)\n y_2d = np.clip(coord_2D[:, 1], 0, 479)\n ID_mask[y_2d, x_2d] = classes[label]\n\n if i % 100 != 0: # change background for every 99/100 images\n background_img_adr = background_dir + random.choice(os.listdir(background_dir))\n background_img = cv2.imread(background_img_adr)\n background_img = cv2.resize(background_img, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_AREA)\n background_img[y_2d, x_2d, :] = image[y_2d, x_2d, :]\n background_adr = root_dir + label + \"/changed_background/color\" + str(idx) + \".png\"\n mpimg.imsave(background_adr, background_img)\n\n # Generate Ground Truth UV Maps\n centre = np.mean(pt_cld_data, axis=0)\n length = np.sqrt((centre[0]-pt_cld_data[:, 0])**2 + (centre[1] -\n pt_cld_data[:, 1])**2 + (centre[2]-pt_cld_data[:, 2])**2)\n unit_vector = [(pt_cld_data[:, 0]-centre[0])/length, (pt_cld_data[:,\n 1]-centre[1])/length, (pt_cld_data[:, 2]-centre[2])/length]\n U = 0.5 + (np.arctan2(unit_vector[2], unit_vector[0])/(2*np.pi))\n V = 0.5 - (np.arcsin(unit_vector[1])/np.pi)\n U_mask[y_2d, x_2d] = U\n V_mask[y_2d, x_2d] = V\n\n # Saving ID, U and V masks after using the fill holes function\n ID_mask, U_mask, V_mask = fill_holes(ID_mask, U_mask, V_mask)\n cv2.imwrite(ID_mask_file, ID_mask)\n mpimg.imsave(U_mask_file, U_mask, cmap='gray')\n mpimg.imsave(V_mask_file, V_mask, cmap='gray')", "def get_mask(self, img):\n raise NotImplementedError()", "def _load_mask(self, image_id):\n\n mask_pattern = os.path.join(self.directory, image_id, \"masks/*.png\")\n ic = ImageCollection(mask_pattern)\n\n mask = np.zeros(self.imsize, dtype='uint8')\n for lbl, indiv_mask in enumerate(ic):\n mask += ((\n 1 + lbl) * self._process(indiv_mask, True).astype('uint8'))\n\n return mask", "def create_masks(img_path, frame_num):\n #import the images\n key_frame = cv2.imread(img_path + \"_\" + str(frame_num) + \".png\")\n beam_mask = filter_beam(key_frame)\n key_frame = cv2.cvtColor(cv2.bitwise_and(beam_mask,key_frame), cv2.COLOR_BGR2GRAY)\n cv2.imwrite(img_path + \"_\" + str(frame_num) + \"_beamed.png\",key_frame)\n key_frame = change_contrast(key_frame, 4.0)\n\n #key_mask = cv2.imread(img_path + \"_mask_\" + str(frame_num) + \".png\",0)\n #masked_key = cv2.bitwise_and(key_frame,key_mask)\n new_frame = cv2.imread(img_path + \"_\" + str(frame_num + 1) + \".png\")\n new_frame = cv2.cvtColor(cv2.bitwise_and(beam_mask,new_frame), cv2.COLOR_BGR2GRAY)\n new_frame = change_contrast(new_frame, 4.0)\n\n #trying with a couple methods here:\n #SIFT method\n sift = cv2.SIFT_create()\n keypoints_1, descriptors_1 = sift.detectAndCompute(key_frame,None)\n keypoints_2, descriptors_2 = sift.detectAndCompute(new_frame,None)\n bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)\n\n matches = bf.match(descriptors_1,descriptors_2)\n matches = sorted(matches, key = lambda x:x.distance)\n for x in keypoints_1:\n print(x.pt)\n\n img3 = cv2.drawMatches(key_frame, keypoints_1, new_frame, keypoints_2, matches, new_frame, flags=2)\n cv2.imshow(\"matched\",img3)\n cv2.waitKey(0)\n\n #use the SIFT paradigm but do it semi-manually\n\n #active contouring method", "def generate_subclass_mask(mask_image,\n subclasses=(1, 3)):\n mask_shape = mask_image.shape\n sub_mask = generate_image(mask_shape[0], mask_shape[1], color=0)\n if mask_image.ndim == 2:\n gray_mask_image = mask_image[:, :]\n else:\n gray_mask_image = mask_image[:, :, 0]\n \n if isinstance(subclasses, (list, tuple)):\n if len(subclasses) == 2:\n keep_bool = np.logical_or(gray_mask_image == subclasses[0], gray_mask_image == subclasses[1])\n else:\n keep = []\n for subclass in subclasses:\n keep.append(gray_mask_image == subclass)\n\n keep_bool = np.logical_or.reduce(tuple(keep))\n else:\n keep_bool = (gray_mask_image == subclasses)\n\n sub_mask[keep_bool] = 1\n\n return sub_mask", "def layer_masks(self, module):\n pass\n # return masks", "def visualize(\n cls,\n image: np.array,\n masks: typing.List,\n filename: str = None,\n use_image: bool = False,\n ) -> np.ndarray:\n\n common_mask = cls._unite_masks(masks)\n\n if use_image:\n common_mask = np.array(\n image * common_mask[:, :, np.newaxis], dtype=np.uint8\n )\n\n assert len(np.unique(common_mask)) < 3\n\n if filename:\n # *255 to correct grayscale\n cv2.imwrite(filename, common_mask * int(255))\n\n plt.imshow(common_mask)\n plt.close()", "def object_mask(self):\n\n # Region file directory files\n if isinstance(self._region_file_dir, list):\n reg_files = {self._keyfunct(f): f for f in chain.from_iterable(glob.glob(f'{reg_dir}/*.reg')\n for reg_dir in self._region_file_dir)}\n else:\n reg_files = {self._keyfunct(f): f for f in glob.glob(f'{self._region_file_dir}/*.reg')}\n\n # Select out the IDs of the clusters needing additional masking\n clusters_to_mask = set(reg_files).intersection(self._catalog_dictionary)\n\n for cluster_id in clusters_to_mask:\n cluster_info = self._catalog_dictionary.get(cluster_id, None)\n region_file = reg_files.get(cluster_id, None)\n\n pixel_map_path = cluster_info['cov_mask_path']\n\n # Read in the coverage mask data and header.\n good_pix_mask, header = fits.getdata(pixel_map_path, header=True, ignore_missing_end=True, memmap=False)\n\n # Read in the WCS from the coverage mask we made earlier.\n w = WCS(header)\n\n try:\n assert w.pixel_scale_matrix[0, 1] == 0.\n pix_scale = (w.pixel_scale_matrix[1, 1] * w.wcs.cunit[1]).to(u.arcsec).value\n except AssertionError:\n cd = w.pixel_scale_matrix\n _, eig_vec = np.linalg.eig(cd)\n cd_diag = np.linalg.multi_dot([np.linalg.inv(eig_vec), cd, eig_vec])\n pix_scale = (cd_diag[1, 1] * w.wcs.cunit[1]).to(u.arcsec).value\n\n # Open the regions file and get the lines containing the shapes.\n with open(region_file, 'r') as region:\n objs = [ln.strip() for ln in region\n if ln.startswith('circle') or ln.startswith('box') or ln.startswith('ellipse')]\n\n # For each shape extract the defining parameters and define a path region.\n shapes_to_mask = []\n for mask in objs:\n\n # For circle shapes we need the center coordinate and the radius.\n if mask.startswith('circle'):\n # Parameters of circle shape are as follows:\n # params[0] : region center RA in degrees\n # params[1] : region center Dec in degrees\n # params[2] : region radius in arcseconds\n params = np.array(re.findall(r'[+-]?\\d+(?:\\.\\d+)?', mask), dtype=np.float64)\n\n # Convert the center coordinates into pixel system.\n # \"0\" is to correct the pixel coordinates to the right origin for the data.\n cent_xy = w.wcs_world2pix(params[0], params[1], 0)\n\n # Generate the mask shape.\n shape = Path.circle(center=cent_xy, radius=params[2] / pix_scale)\n\n # For the box we'll need...\n elif mask.startswith('box'):\n # Parameters for box shape are as follows:\n # params[0] : region center RA in degrees\n # params[1] : region center Dec in degrees\n # params[2] : region width in arcseconds\n # params[3] : region height in arcseconds\n # params[4] : rotation of region about the center in degrees\n params = np.array(re.findall(r'[+-]?\\d+(?:\\.\\d+)?', mask), dtype=np.float64)\n\n # Convert the center coordinates into pixel system.\n cent_x, cent_y = w.wcs_world2pix(params[0], params[1], 0)\n\n # Vertices of the box are needed for the path object to work.\n verts = [[cent_x - 0.5 * (params[2] / pix_scale), cent_y + 0.5 * (params[3] / pix_scale)],\n [cent_x + 0.5 * (params[2] / pix_scale), cent_y + 0.5 * (params[3] / pix_scale)],\n [cent_x + 0.5 * (params[2] / pix_scale), cent_y - 0.5 * (params[3] / pix_scale)],\n [cent_x - 0.5 * (params[2] / pix_scale), cent_y - 0.5 * (params[3] / pix_scale)]]\n\n # For rotations of the box.\n rot = Affine2D().rotate_deg_around(cent_x, cent_y, degrees=params[4])\n\n # Generate the mask shape.\n shape = Path(verts).transformed(rot)\n\n elif mask.startswith('ellipse'):\n # Parameters for ellipse shape are as follows\n # params[0] : region center RA in degrees\n # params[1] : region center Dec in degrees\n # params[2] : region semi-major axis in arcseconds\n # params[3] : region semi-minor axis in arcseconds\n # params[4] : rotation of region about the center in degrees\n # Note: For consistency, the semi-major axis should always be aligned along the horizontal axis\n # before rotation\n params = np.array(re.findall(r'[+-]?\\d+(?:\\.\\d+)?', mask), dtype=np.float64)\n\n # Convert the center coordinates into pixel system\n cent_xy = w.wcs_world2pix(params[0], params[1], 0)\n\n # Generate the mask shape\n shape = Ellipse(cent_xy, width=params[2] / pix_scale, height=params[3] / pix_scale, angle=params[4])\n shape = shape.get_path()\n\n # Return error if mask shape isn't known.\n else:\n raise KeyError(\n f'Mask shape is unknown, please check the region file of cluster: {region_file} {mask}')\n\n shapes_to_mask.append(shape)\n\n # Check if the pixel values are within the shape we defined earlier.\n # If true, set the pixel value to 0.\n pts = list(product(range(w.pixel_shape[0]), range(w.pixel_shape[1])))\n\n shape_masks = np.array(\n [shape.contains_points(pts).reshape(good_pix_mask.shape) for shape in shapes_to_mask])\n\n # Combine all the shape masks into a final object mask, inverting the boolean values so we can multiply\n # our mask with our existing good pixel mask\n total_obj_mask = ~np.logical_or.reduce(shape_masks)\n\n # Apply the object mask to the existing good pixel mask\n good_pix_mask *= total_obj_mask.astype(int)\n\n # Write the new mask to disk overwriting the old mask.\n new_mask_hdu = fits.PrimaryHDU(good_pix_mask, header=header)\n new_mask_hdu.writeto(pixel_map_path, overwrite=True)", "def setUp(self):\n img_path = osp.join(osp.dirname(__file__), '../../data/gray.jpg')\n self.results = {\n 'img_path':\n img_path,\n 'img_shape': (300, 400),\n 'instances': [{\n 'bbox': [0, 0, 10, 20],\n 'bbox_label': 1,\n 'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],\n 'ignore_flag': 0\n }, {\n 'bbox': [10, 10, 110, 120],\n 'bbox_label': 2,\n 'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],\n 'ignore_flag': 0\n }, {\n 'bbox': [50, 50, 60, 80],\n 'bbox_label': 2,\n 'mask': [[50, 50, 60, 50, 60, 80, 50, 80]],\n 'ignore_flag': 1\n }]\n }", "def process_mask(self, image):\n image = np.array(image)\n image[image == 5] = 1 # set un-classified to undestroyed\n return Image.fromarray(image)", "def instance_submasks(gti):\n rc_locs = np.where(gti > 0)\n grouped_cc_rcs = util.group_items(\n np.ascontiguousarray(np.vstack(rc_locs).T),\n gti[rc_locs], axis=0\n )\n\n def bounding_box(rcs):\n rc1 = rcs.min(axis=0)\n rc2 = rcs.max(axis=0)\n return rc1, rc2\n\n for label, rcs in grouped_cc_rcs.items():\n rc1, rc2 = bounding_box(rcs)\n r_slice = slice(rc1[0], rc2[0] + 1)\n c_slice = slice(rc1[1], rc2[1] + 1)\n rc_sl = (r_slice, c_slice)\n subimg = gti[rc_sl]\n submask = (subimg == label).astype(np.uint8)\n\n rc_off = rc1\n yield label, submask, rc_off, rc_sl", "def mask_image(image):\n pass", "def __call__(self, image: np.ndarray) -> np.ndarray:\n # convert PIL image to numpy array\n image = np.asarray(image)\n\n # get masks, all pixels\n np_mask = np.array(np.ones(image.shape[0:2]), dtype=bool)\n\n return np_mask", "def apply_mask(query_imgs, masks, method):\n resulting_imgs = []\n for img, mask in zip(query_imgs, masks):\n positions = np.where(mask == 255)\n if method == CBHS: # Special treatment for cell-based bg segmentation to mantain \n x_min, x_max, y_min, y_max = positions[0][0], positions[0][-1], positions[1][0], positions[1][-1]\n img = img[x_min:x_max, y_min:y_max]\n else:\n mask = mask == 255\n img = img[mask].reshape(-1, 3)\n\n resulting_imgs.append(img)\n \n if isDebug():\n addDebugImage(img)\n if isDebug():\n showDebugImage()\n print(\"Finished to apply masks\")\n \n return resulting_imgs", "def create_binary_masks(image_path):\n mask = cv2.imread(image_path, cv2.IMREAD_ANYDEPTH)\n size = mask.shape\n for row_pixel in range(0, size[0]):\n for column_pixel in range(0, size[1]):\n if mask[row_pixel, column_pixel] == 0:\n mask[row_pixel, column_pixel] = 65535\n\n else:\n mask[row_pixel, column_pixel] = 0\n\n cv2.imwrite(image_path[:-4]+'_binary.png', mask)", "def load_mask_custom(self, image_id, image_shape):\n info = self.image_info[image_id]\n filePaths = info['maskPaths']\n classes = info['maskClasses']\n \n masks = []\n class_ids = []\n if(len(image_shape)==3):\n image_shape = image_shape[:2]\n \n # 1 filePath -- 1 class \n for i, filePath in enumerate(filePaths):\n \n if filePath.endswith(\".png\"):\n mask = cv2.imread(filePath, 0)\n mask = np.asarray(mask, dtype = \"uint8\")\n \n masks.append(mask)\n class_ids.append(classes[i])\n \n if len(masks)==0 :\n masks.append(np.zeros(image_shape, dtype = \"uint8\"))\n class_ids.append(0)\n \n image = np.stack(masks, axis=2)\n class_ids = np.array(class_ids, dtype=np.int32)\n return image, class_ids", "def _produce_individual_star_masks(self, dilationWidth=4):\n # TODO: REWRITE THIS METHOD USING THE ASTROPY SEGMENTATION METHODS???\n # Yes, I THINK so...\n\n # Grab binning\n binX, binY = self.imageList[0].binning\n\n # Compute kernel shape\n medianKernShape = (np.int(np.ceil(9.0/binX)), np.int(np.ceil(9.0/binY)))\n\n # Grab the number of images (for user updates)\n numImg = self.numberOfImages\n\n # Construct a blank array to populate with masks\n starMasks = np.zeros(self.shape, dtype=int)\n\n # Loop through the images and compute individual star masks\n for imgNum, img in enumerate(self.imageList):\n print('Building star mask for image {0:g} of {1:g}'.format(imgNum + 1, numImg), end='\\r')\n # Grab the image array\n thisData = img.data.copy()\n\n # Replace bad values with zeros\n badInds = np.where(np.logical_not(np.isfinite(thisData)))\n thisData[badInds] = -1e6\n\n # Filter the image\n medImg = ndimage.median_filter(thisData, size = medianKernShape)\n\n # get stddev of image background\n mean, median, stddev = img.sigma_clipped_stats()\n\n # Look for deviates from the filter (positive values only)\n # starMask1 = np.logical_and(np.abs(thisData - medImg) > 2.0*stddev,\n # thisData > 0)\n starMask1 = (np.abs(thisData - medImg) > 2.0*stddev)\n\n # Use the scipy ndimage opening and closing to clean the mask\n starMask1 = ndimage.binary_opening(starMask1)\n starMask1 = ndimage.binary_closing(starMask1)\n\n # Clean out some edge effects.\n starMask1[:, -4:-1] = 0\n\n #\n # NOTE: This doesn't work when there are nebulae and galaxies in the image!\n #\n # starMask1 = make_source_mask(\n # thisData,\n # snr=2,\n # npixels=5,\n # dilate_size=11,\n # mask_value=-1e6\n # )\n\n # Try using guassian kernel convolution instead\n from astropy.convolution import convolve, convolve_fft, Gaussian2DKernel\n\n # Initalize a dilatingKernel\n gaussian_2D_kernel = Gaussian2DKernel(10.0)\n\n # Normalize the kernel\n gaussian_2D_kernel.normalize()\n\n # If the dialation kernel is larger than 10 pixels, then use FFT\n # convolution.\n starMask11 = convolve_fft(\n starMask1.astype(float),\n gaussian_2D_kernel\n )\n\n # Mask any pixels with values greater than 0.04 (which seems to\n # produce a reasonable result.)\n peakValue = 1/(200*np.pi)\n maskThreshold = 10 * peakValue * np.exp(-0.5*((dilationWidth+0.5)/10.0)**2)\n\n starMask1 = (starMask11 > maskThreshold).astype(np.int8)\n\n # TODO: delete this code if convolution works out\n #\n # # Finally, liberally EXPAND the mask with four dilations\n # starMask1 = ndimage.binary_dilation(\n # starMask1,\n # iterations=starMaskIters\n # ).astype(np.int8)\n\n # TODO: delete this code once I verify everything is working\n #\n # # Count the number of masked neighbors for each pixel\n # neighborCount = np.zeros(thisData.shape, dtype=int)\n # for dx in range(-1,2,1):\n # for dy in range(-1,2,1):\n # neighborCount += np.roll(np.roll(starMask1, dy, axis=0),\n # dx, axis=1).astype(np.int8)\n #\n # # Find pixels with more than two masked neighbor (including self)\n # # starMask1 = np.logical_and(starMask1, neighborCount > 2)\n # starMask1 = (neighborCount > 2).astype(np.int8)\n\n # Place the final mask into its respective slice of the 3D array\n starMasks[imgNum, :, :] = starMask1\n\n # Print a newline character to preserve star mask updates\n print('')\n\n # Once ALL of the star masks have been computed, return them to the user\n return starMasks", "def mask_rcnn_inference(pred_mask_logits, pred_instances):\n cls_agnostic_mask = pred_mask_logits.size(1) == 1\n\n if cls_agnostic_mask:\n mask_probs_pred = pred_mask_logits.sigmoid()\n else:\n # Select masks corresponding to the predicted classes\n num_masks = pred_mask_logits.shape[0]\n class_pred = cat([i.pred_classes for i in pred_instances])\n indices = torch.arange(num_masks, device=class_pred.device)\n mask_probs_pred = pred_mask_logits[indices, class_pred][:, None].sigmoid()\n # mask_probs_pred.shape: (B, 1, Hmask, Wmask)\n\n num_boxes_per_image = [len(i) for i in pred_instances]\n mask_probs_pred = mask_probs_pred.split(num_boxes_per_image, dim=0)\n\n for prob, instances in zip(mask_probs_pred, pred_instances):\n instances.pred_masks = prob # (1, Hmask, Wmask)", "def random_scale(im, inst_masks, mask, boxes, classes, scale):\n # scale = np.random.uniform(down, upper)\n h, w, c = im.shape\n if scale > 1:\n \"\"\"\"\"\"\n max_offx = (scale - 1.) * w\n max_offy = (scale - 1.) * h\n offx = int(np.random.uniform() * max_offx)\n offy = int(np.random.uniform() * max_offy)\n im = cv2.resize(im, (0, 0), fx=scale, fy=scale)\n mask = cv2.resize(mask, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)\n im = im[offy: (offy + h), offx: (offx + w)]\n mask = mask[offy: (offy + h), offx: (offx + w)]\n if inst_masks.size > 0:\n inst_masks = np.transpose(inst_masks, (1, 2, 0)) # to (h, w, n)\n inst_masks = cv2.resize(inst_masks, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)\n inst_masks = inst_masks[offy: (offy + h), offx: (offx + w)]\n try:\n if inst_masks.ndim > 2:\n inst_masks = np.transpose(inst_masks, (2, 0, 1)) # to (n, h, w)\n else:\n inst_masks = inst_masks.reshape((1, h, w))\n except ValueError:\n print (inst_masks.ndim, inst_masks.shape)\n raise\n else:\n inst_masks = np.zeros((0, h, w), inst_masks.dtype)\n else:\n \"\"\"\"\"\"\n canvas = np.zeros(im.shape, im.dtype) + np.array([103, 116, 123], im.dtype)\n canvas_mask = np.zeros(mask.shape, mask.dtype)\n max_offx = (scale - 1.) * w\n max_offy = (scale - 1.) * h\n offx = int(np.random.uniform() * max_offx)\n offy = int(np.random.uniform() * max_offy)\n im = cv2.resize(im, (0, 0), fx=scale, fy=scale)\n mask = cv2.resize(mask, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)\n h_, w_, _ = im.shape\n canvas[-offy: (-offy + h_), -offx: (-offx + w_)] = im\n canvas_mask[-offy: (-offy + h_), -offx: (-offx + w_)] = mask\n if inst_masks.size > 0:\n inst_masks = np.transpose(inst_masks, (1, 2, 0)) # to (h, w, n)\n canvas_instmask = np.zeros(inst_masks.shape, inst_masks.dtype)\n inst_masks = cv2.resize(inst_masks, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)\n if inst_masks.ndim == 2:\n inst_masks = inst_masks[:,:, np.newaxis]\n canvas_instmask[-offy: (-offy + h_), -offx: (-offx + w_)] = inst_masks\n canvas_instmask = np.transpose(canvas_instmask, (2, 0, 1)) # to (n, h, w)\n else:\n canvas_instmask = np.zeros((0, h, w), inst_masks.dtype)\n\n im, mask, inst_masks = canvas, canvas_mask, canvas_instmask\n\n boxes = _offset_boxes(boxes, im.shape, scale, [offx, offy], False)\n boxes, classes, inst_masks = _filter_invalid_boxes(boxes, classes, inst_masks, min_size=3)\n\n return im, inst_masks, mask, boxes, classes", "def load_mask(self, image_id):\r\n info = self.image_info[image_id]\r\n mask = tifffile.imread(self.mask_path[self.ids[image_id]])\r\n\r\n if np.unique(mask).__len__() > 1:\r\n count = np.unique(mask).__len__()-1 # one less because of 0\r\n\r\n mask_new = np.zeros([info['height'], info['width'], count], dtype=np.uint8) # one more for background\r\n running = 0\r\n for i in np.unique(mask): #range(1, count):\r\n if ((i > 0) & ((mask == i).sum() > 0)):\r\n mask_new[:, :, running] = (mask == i)\r\n running = running + 1\r\n # Map class names to class IDs.\r\n class_ids = np.ones(count)\r\n else:\r\n mask_new = np.zeros([info['height'], info['width'], 1], dtype=np.uint8)\r\n class_ids = np.zeros([1])\r\n return mask_new, class_ids.astype(np.int32)", "def makeMaskedImageFromArrays(image, mask=None, variance=None):\n cls = globals()[\"MaskedImage%s\" % suffixes[str(image.dtype.type)]]\n return cls(makeImageFromArray(image), makeMaskFromArray(mask), makeImageFromArray(variance))", "def load_mask(self, image_id):\r\n info = self.image_info[image_id]\r\n mask = self.masks[image_id]\r\n count = int(mask.max())\r\n mask_new = np.zeros([info['height'], info['width'], count+1], dtype=np.uint8) # one more for background\r\n for i in range(count+1):\r\n #mask_new[:, :, i:i+1] = (mask == i).transpose(1, 2, 0)\r\n mask_new[:, :, i:i + 1] = (mask==i).reshape(mask.shape[0], mask.shape[1], -1)\r\n # mask_new[:, :, i:i+1] = (mask==i).transpose(1,2,0)\r\n # Map class names to class IDs.\r\n class_ids = np.ones(count+1) # one more fore background\r\n\r\n #add Background\r\n #class_ids[count] = 0 # add Background\r\n #mask_new[:, :, count:count + 1] = (mask == 0).transpose(1, 2, 0)\r\n #class_ids[count] = 0 # add Background\r\n class_ids[0] = 0 # add Background\r\n # End add Background\r\n\r\n return mask_new, class_ids.astype(np.int32)", "def load_mask(self, image_id):\r\n mask_path = self.mask_path[self.ids[image_id]]\r\n file_pattern = os.path.join(mask_path, \"*.png\")\r\n info = self.image_info[image_id]\r\n mask_files = glob.glob(file_pattern)\r\n #mask_tmp = cv2.imread(mask_files[0])\r\n mask_new = np.zeros([info['height'], info['width'], mask_files.__len__()+1], dtype=np.uint8) # one more for background\r\n count = 1\r\n mask_total = 0\r\n for i in mask_files:\r\n mask = cv2.imread(i)\r\n mask = mask[:, :, 1] / 255.0\r\n #mask = ski_transform.resize(mask, (info['height'], info['width']), mode='reflect')\r\n mask_new[:, :, count] = (mask)\r\n mask_total = mask_total + (mask>0) * count\r\n count = count + 1\r\n # Map class names to class IDs.\r\n class_ids = np.ones(count) # one more fore background\r\n #add Background\r\n class_ids[0] = 0; # Background\r\n mask_new[:, :, 0] = np.invert(mask_total.astype(np.bool))\r\n # End add Background\r\n\r\n return mask_new, class_ids.astype(np.int32)", "def addMaskImage(img):\r\n [h, w, c] = img.shape\r\n h_start = np.random.randint(h/2,h-1)\r\n w_start = np.random.randint(w/2, w-1)\r\n img[h_start:h-1, :,0]= np.random.randint(0,120)\r\n img[h_start:h-1, :,1]= np.random.randint(0,120) \r\n img[h_start:h-1, :,2]= np.random.randint(0,120) \r\n img[:,w_start:w-1,0]= np.random.randint(0,120)\r\n img[:,w_start:w-1,1]= np.random.randint(0,120) \r\n img[:,w_start:w-1,2]= np.random.randint(0,120) \r\n img = np.uint8(img)\r\n return img, h_start, w_start", "def __generate_mask(self):\n mask = np.concatenate([np.ones(len(self.fixed[0])),\n np.zeros(self.num_points),\n np.ones(len(self.fixed[1]))])\n return mask", "def get_seg_masks(self, preds, img_metas, pad_shape):\n\n mask_pred, reg_pred = preds\n h_pad, w_pad = pad_shape\n cell_region_mask, gp_mask_hor, gp_mask_ver = [], [], []\n for i, meta in enumerate(img_metas):\n h_img, w_img, _ = meta['img_shape']\n h_ori, w_ori, _ = meta['ori_shape']\n if isinstance(mask_pred, torch.Tensor):\n mask_pred = mask_pred.sigmoid().cpu().numpy()\n if isinstance(reg_pred, torch.Tensor):\n reg_pred = reg_pred.cpu().numpy()\n\n mask_pred_ = mask_pred[i, 0, :, :]\n mask_pred_resize = mmcv.imresize(mask_pred_, (w_pad, h_pad))\n mask_pred_resize = mmcv.imresize(mask_pred_resize[:h_img, :w_img], (w_ori, h_ori))\n mask_pred_resize = (mask_pred_resize > 0.5)\n cell_region_mask.append(mask_pred_resize)\n\n reg_pred1_ = reg_pred[i, 0, :, :]\n reg_pred2_ = reg_pred[i, 1, :, :]\n reg_pred1_resize = mmcv.imresize(reg_pred1_, (w_pad, h_pad))\n reg_pred2_resize = mmcv.imresize(reg_pred2_, (w_pad, h_pad))\n reg_pred1_resize = mmcv.imresize(reg_pred1_resize[:h_img, :w_img], (w_ori, h_ori))\n reg_pred2_resize = mmcv.imresize(reg_pred2_resize[:h_img, :w_img], (w_ori, h_ori))\n gp_mask_hor.append(reg_pred1_resize)\n gp_mask_ver.append(reg_pred2_resize)\n\n return list(zip(cell_region_mask, gp_mask_hor, gp_mask_ver))", "def create_all_mask(mask, num, stride):\n scale_factor = 1.0 / stride\n small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_CUBIC)\n small_mask = small_mask[:, :, np.newaxis]\n return np.repeat(small_mask, num, axis=2)", "def get_regions_mask(self, input):", "def mask(self):\n return list(self._mask_generator())", "def _reshape_instance_masks(self, keys_to_tensors):\n height = keys_to_tensors['image/height']\n width = keys_to_tensors['image/width']\n to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32)\n masks = keys_to_tensors['image/object/mask']\n if isinstance(masks, tf.SparseTensor):\n masks = tf.sparse_tensor_to_dense(masks)\n masks = tf.reshape(tf.to_float(tf.greater(masks, 0.0)), to_shape)\n return tf.cast(masks, tf.float32)", "def __set_mask_regions(self):\n self.bottom_clip = np.int32(np.int32([[[60,0], [1179,0], [1179,650], [60,650]]]))\n self.roi_clip = np.int32(np.int32([[[640, 425], [1179,550], [979,719],\n [299,719], [100, 550], [640, 425]]]))", "def _populate_mask_data(self, filename: str) -> None:\n if self.seg_images.get(filename) is None:\n return None\n\n mask = cv2.imread(self.seg_targets[filename])\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)\n\n # convert pixel masks to multidimentional\n height, width = mask.shape[:2]\n segmentation_mask = np.zeros((height, width, len(VOC_COLORMAP)), dtype=np.float32)\n for label_index, label in enumerate(VOC_COLORMAP):\n segmentation_mask[:, :, label_index] = np.all(mask == label, axis=-1).astype(float)\n\n return segmentation_mask", "def createMaskDictionary(self):\n try:\n self.maskMap = dict(list(zip(self.inds,list(range(len(self.inds))))))\n self.maskSet = set(self.inds)\n except Exception as error:\n print(\"failed in createMaskDictionary\", error)", "def load_mask_pre(self, image_id, mask_path):\n img = Image.open(mask_path)\n colors = img.getcolors()\n n_dim = np.shape(colors)\n num_obj = n_dim[0]-1 #not include the background\n\n mask = np.zeros([np.shape(img)[0], np.shape(img)[1], num_obj], dtype=np.uint8)\n mask = self.draw_mask(num_obj, mask, img, colors)\n\n # Map class names to class IDs.\n class_ids = []\n for i in range(num_obj):\n class_ids.append(colors[i+1][1])\n\n return mask.astype(np.bool), np.array(class_ids, dtype=np.int32) #mask.astype(np.bool)", "def generate_segmentation_from_masks(masks,\n detected_boxes,\n image_height,\n image_width,\n is_image_mask=False):\n\n def expand_boxes(boxes, scale):\n \"\"\"Expands an array of boxes by a given scale.\"\"\"\n # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/boxes.py#L227\n # The `boxes` in the reference implementation is in [x1, y1, x2, y2] form,\n # whereas `boxes` here is in [x1, y1, w, h] form\n w_half = boxes[:, 2] * .5\n h_half = boxes[:, 3] * .5\n x_c = boxes[:, 0] + w_half\n y_c = boxes[:, 1] + h_half\n\n w_half *= scale\n h_half *= scale\n\n boxes_exp = np.zeros(boxes.shape)\n boxes_exp[:, 0] = x_c - w_half\n boxes_exp[:, 2] = x_c + w_half\n boxes_exp[:, 1] = y_c - h_half\n boxes_exp[:, 3] = y_c + h_half\n\n return boxes_exp\n\n # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/core/test.py#L812\n # To work around an issue with cv2.resize (it seems to automatically pad\n # with repeated border values), we manually zero-pad the masks by 1 pixel\n # prior to resizing back to the original image resolution. This prevents\n # \"top hat\" artifacts. We therefore need to expand the reference boxes by an\n # appropriate factor.\n\n _, mask_height, mask_width = masks.shape\n scale = max((mask_width + 2.0) / mask_width,\n (mask_height + 2.0) / mask_height)\n\n ref_boxes = expand_boxes(detected_boxes, scale)\n ref_boxes = ref_boxes.astype(np.int32)\n padded_mask = np.zeros((mask_height + 2, mask_width + 2), dtype=np.float32)\n segms = []\n for mask_ind, mask in enumerate(masks):\n im_mask = np.zeros((image_height, image_width), dtype=np.uint8)\n if is_image_mask:\n # Process whole-image masks.\n im_mask[:, :] = mask[:, :]\n else:\n # Process mask inside bounding boxes.\n padded_mask[1:-1, 1:-1] = mask[:, :]\n\n ref_box = ref_boxes[mask_ind, :]\n w = ref_box[2] - ref_box[0] + 1\n h = ref_box[3] - ref_box[1] + 1\n w = np.maximum(w, 1)\n h = np.maximum(h, 1)\n\n mask = cv2.resize(padded_mask, (w, h))\n mask = np.array(mask > 0.5, dtype=np.uint8)\n\n x_0 = max(ref_box[0], 0)\n x_1 = min(ref_box[2] + 1, image_width)\n y_0 = max(ref_box[1], 0)\n y_1 = min(ref_box[3] + 1, image_height)\n\n im_mask[y_0:y_1, x_0:x_1] = mask[(y_0 - ref_box[1]):(y_1 - ref_box[1]), (\n x_0 - ref_box[\n 0]):(x_1 - ref_box[\n 0])]\n segms.append(im_mask)\n\n segms = np.array(segms)\n assert masks.shape[0] == segms.shape[0]\n return segms", "def create_model_input_blacked(self, rgb_images, all_boxes, all_masks):\n images = []\n for i in range(all_boxes.size(0)):\n for j in range(all_boxes.size(1)):\n box = all_boxes[i][j]\n if is_dummy_box(box):\n continue\n\n image = rgb_images[i].cpu()\n\n # Apply object mask to the image\n if self.use_masks:\n mask = all_masks[i][j].cpu()\n image = image * mask\n\n image = TF.to_pil_image(image)\n image = blacken_image(image, box)\n image = TF.resize(image, (self.reduced_size, self.reduced_size))\n image = TF.to_tensor(image)\n images.append(image)\n return torch.stack(images)", "def create_mask_list(self, seg_img, K):\n all_ids = np.unique(seg_img)\n chosen_ids = np.random.choice(all_ids, K)\n\n return [(seg_img == ID).astype(np.float32) for ID in chosen_ids]", "def masks(self, **kwargs):\n\n\t\tif \"protectionFactor\" not in kwargs: \n\t\t\traise ValueError(\"must supply protectionFactor\")\n\n\t\tself.addAction(\"Masks\",kwargs)\n\n\t\treturn self", "def get_mask(self, anno, img_info) -> np.ndarray:\n m = np.zeros((img_info[\"height\"], img_info[\"width\"]), dtype=np.float32)\n\n for obj in anno:\n if obj[\"iscrowd\"]:\n rle = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n m += mask\n elif obj[\"num_keypoints\"] == 0:\n rles = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n for rle in rles:\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n\n m += mask\n\n return (m < 0.5).astype(np.float32)", "def random_masks(self):\n # initialize mask\n mask = np.ones((3, self.dim, self.dim))\n\n # generate one of 4 random masks\n choose = 1 # np.random.randint(0, 1)\n if choose == 0:\n mask[:, :self.dim // 2] = 0\n elif choose == 1:\n mask[:, :, :self.dim // 2] = 0\n elif choose == 2:\n mask[:, :, self.dim // 2:] = 0\n elif choose == 3:\n mask[:, self.dim // 2:] = 0\n\n return mask", "def _generate_mask(self):\r\n mask = np.zeros((self.width, self.height), np.uint8)\r\n size = int((self.width + self.height) * 0.01)\r\n if self.width < 32 or self.height < 32:\r\n raise Exception(\"Width and Height of mask must be at least 64!\")\r\n for _ in range(randint(1,int(0.5*self.width))):\r\n x1 = randint(0, self.width-1)\r\n thickness = 1\r\n cv2.line(mask, (0, x1),(self.height-1, x1), 1, thickness)\r\n return 1 - mask", "def test_make_mask_w_ref_image(self):\n output_mask = instance_mask(\n os.path.join(data_dir, 'geotiff_labels.geojson'),\n reference_im=os.path.join(data_dir, 'sample_geotiff.tif'),\n do_transform=True,\n out_file=os.path.join(data_dir, 'test_out.tif')\n )\n truth_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_inst_mask.tif'))\n saved_output_mask = skimage.io.imread(os.path.join(data_dir,\n 'test_out.tif'))\n\n assert np.array_equal(saved_output_mask, truth_mask)\n # clean up\n os.remove(os.path.join(data_dir, 'test_out.tif'))\n assert np.array_equal(output_mask, truth_mask)", "def get_test_pattern(img_size=(2048, 2048)):\n ny, nx = img_size\n # mask = np.zeros((ny, nx))\n\n # patterns with variable spacing\n periods = range(2, 20, 2)\n # vcounter = 0\n for ii, p in enumerate(periods):\n cell = np.zeros((p, nx))\n on_pix = int(np.ceil(p / 2))\n cell[:on_pix, :] = 1\n cell = np.tile(cell, [4, 1])\n\n if ii == 0:\n mask = cell\n else:\n mask = np.concatenate((mask, cell), axis=0)\n\n mask = mask[:, :mask.shape[0]]\n\n mask_block = np.concatenate((mask, np.rot90(mask)), axis=1)\n mask_block2 = np.concatenate((np.rot90(mask), mask), axis=1)\n\n mask_superblock = np.concatenate((mask_block, mask_block2))\n\n ny_reps = int(np.ceil(ny / mask_superblock.shape[0]))\n nx_reps = int(np.ceil(nx / mask_superblock.shape[1]))\n mask = np.tile(mask_superblock, [ny_reps, nx_reps])\n mask = mask[0:ny, 0:nx]\n\n return mask", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n label_path = info['path']\n\n # 读取json文件\n with open(os.path.join(self.DATA_ROOT_DIR, label_path), encoding='utf-8') as json_file:\n labelmeJson = json.load(json_file)\n height = labelmeJson['imageHeight']\n width = labelmeJson['imageWidth']\n shapes = labelmeJson['shapes']\n\n count = len(shapes)\n mask = np.zeros([height, width, count], dtype=np.uint8)\n\n for i, shape in enumerate(shapes):\n mask[:, :, i] = self.shape_to_mask(mask.shape, shape['points'], shape['shape_type'])\n\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(shape['label']) if shape['label'] in self.class_names else self.class_names.index('undefined') for shape in shapes])\n #print('class_ids:', class_ids)\n #input()\n return mask.astype(np.bool), class_ids.astype(np.int32)", "def mask_images(im_dir, wt_dir, im_masked_dir, wt_masked_dir, imtype='intbgsub', wttype='rrhr'):\n int_suff, rrhr_suff = '*-{}.fits'.format(imtype), '*-{}.fits'.format(wttype)\n int_images = sorted(glob.glob(os.path.join(im_dir, int_suff)))\n rrhr_images = sorted(glob.glob(os.path.join(wt_dir, rrhr_suff)))\n\n for i in range(len(int_images)):\n image_infile = int_images[i]\n wt_infile = rrhr_images[i]\n\n image_outfile = os.path.join(im_masked_dir, os.path.basename(image_infile))\n wt_outfile = os.path.join(wt_masked_dir, os.path.basename(wt_infile))\n\n mask_galex(image_infile, wt_infile, image_outfile, wt_outfile)", "def create_mask(masking_positions, img, cells):\n left, right, top, bottom = masking_positions\n left += 1\n right += 1\n top += 1\n bottom += 1\n mask = np.ones((img.shape[0], img.shape[1]))*255\n\n # Compute corresponding positions and put zeros in the background part\n left = (img.shape[1]//cells[0])*left\n mask[:, :left] = 0\n right = img.shape[1]-(img.shape[1]//cells[0])*right\n mask[:, right:] = 0\n top = (img.shape[0]//cells[1])*top\n mask[:top, :] = 0\n bottom = img.shape[0]-(img.shape[0]//cells[0])*bottom\n mask[bottom:, :] = 0\n\n masks = mask.astype(np.uint8)\n return mask", "def mask_images(self, folder_name, mask_image_name):\n\n photo_list = self.get_photo_list(folder_name)\n masked_folder_name = folder_name + '_background'\n\n try:\n print(\"Making dir \" + str(masked_folder_name) + \" for masking\")\n os.mkdir(masked_folder_name)\n except OSError:\n print(\"Folder exists, have you already done this masking??\")\n return\n\n full_mask_image = cv2.imread(mask_image_name, cv2.IMREAD_ANYDEPTH)\n\n for i, image_name in enumerate(photo_list):\n print(i)\n print (folder_name + image_name)\n img = cv2.imread(folder_name + '/' + image_name, cv2.IMREAD_ANYDEPTH)\n masked_image = img\n\n size = img.shape\n for row_pixel in range(0, size[0]):\n for column_pixel in range(0, size[1]):\n if full_mask_image[row_pixel, column_pixel] != 0:\n masked_image[row_pixel, column_pixel] = img[row_pixel, column_pixel]\n\n else:\n masked_image[row_pixel, column_pixel] = 0\n\n cv2.imwrite(masked_folder_name + '/' + image_name, masked_image.astype(np.uint16))", "def create_brainmask(registered_images, truncate_intensity=(.01, .99), verbose=True, antsxnet_cache_directory=None):\n\n preprocessed_image = ants.image_clone(registered_images)\n if antsxnet_cache_directory is None:\n antsxnet_cache_directory = \"ANTsXNet\"\n\n # Truncate intensity\n if truncate_intensity is not None:\n quantiles = (preprocessed_image.quantile(truncate_intensity[0]),\n preprocessed_image.quantile(truncate_intensity[1]))\n if verbose:\n print(\"Preprocessing: truncate intensities ( low =\", quantiles[0], \", high =\", quantiles[1], \").\")\n\n preprocessed_image[preprocessed_image < quantiles[0]] = quantiles[0]\n preprocessed_image[preprocessed_image > quantiles[1]] = quantiles[1]\n\n # Brain extraction\n if verbose:\n print(\"Preprocessing: brain extraction.\")\n probability_mask = antspynet.brain_extraction(preprocessed_image,\n antsxnet_cache_directory=antsxnet_cache_directory,\n verbose=verbose)\n mask = ants.threshold_image(probability_mask, 0.5, 1, 1, 0)\n\n return preprocessed_image, mask", "def _create_observation_mask(self):\n\n\n if self.BLUE_PARTIAL:\n centers, radii = [], []\n for agent in self._team_blue:\n if not agent.isAlive: continue\n centers.append(agent.get_loc())\n radii.append(agent.range)\n self._blue_mask = self._create_vision_mask(centers, radii)\n if self.TEAM_MEMORY == \"fog\":\n self.blue_memory = np.logical_and(self.blue_memory, self._blue_mask)\n else:\n self._blue_mask = np.zeros_like(self._static_map, dtype=bool)\n\n if self.RED_PARTIAL:\n centers, radii = [], []\n for agent in self._team_red:\n if not agent.isAlive: continue\n centers.append(agent.get_loc())\n radii.append(agent.range)\n self._red_mask = self._create_vision_mask(centers, radii)\n if self.TEAM_MEMORY == \"fog\":\n self.red_memory = np.logical_and(self.red_memory, self._red_mask)\n else:\n self._red_mask = np.zeros_like(self._static_map, dtype=bool)", "def do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):\n # On GPU, paste all masks together (up to chunk size)\n # by using the entire image to sample the masks\n # Compared to pasting them one by one,\n # this has more operations but is faster on COCO-scale dataset.\n device = masks.device\n if skip_empty:\n x0_int, y0_int = F.clip(F.floor(boxes.min(axis=0))[:2] - 1, lower=0).astype('int32')\n x1_int = F.clip(F.ceil(boxes[:, 2].max()) + 1, upper=img_w).astype('int32')\n y1_int = F.clip(F.ceil(boxes[:, 3].max()) + 1, upper=img_h).astype('int32')\n else:\n x0_int, y0_int = 0, 0\n x1_int, y1_int = img_w, img_h\n x0, y0, x1, y1 = F.split(boxes, 4, axis=1) # each is Nx1\n\n N = masks.shape[0]\n\n img_y = F.arange(y0_int, y1_int, device=device).astype('float32') + 0.5\n img_x = F.arange(x0_int, x1_int, device=device).astype('float32') + 0.5\n img_y = (img_y - y0) / (y1 - y0) * 2 - 1\n img_x = (img_x - x0) / (x1 - x0) * 2 - 1\n # img_x, img_y have shapes (N, w), (N, h)\n # IsInf op is not supported with ONNX<=1.7.0\n\n if F.isinf(img_x).sum() > 0:\n img_x = F.where(F.isinf(img_x), F.zeros(img_x.shape[0]), img_x)\n if F.isinf(img_y).sum() > 0:\n img_y = F.where(F.isinf(img_y), F.zeros(img_y.shape[0]), img_y)\n\n\n gx = F.broadcast_to(F.expand_dims(img_x, 1), N, img_y.shape[1], img_x.shape[1])\n gy = F.broadcast_to(F.expand_dims(img_y, 2), N, img_y.shape[1], img_x.shape[1])\n\n grid = F.stack([gx, gy], axis=3)\n\n\n img_masks = F.remap(masks.astype('float32'), grid, border_mode='constant')\n\n # img_masks = F.grid_sample(masks.astype('float32'), grid, align_corners=False)\n\n if skip_empty:\n return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))\n else:\n return img_masks[:, 0], ()", "def configure_masking(self, masks):\n self.masks = masks", "def calcmask(self, *args, **kwargs):\n return _image.image_calcmask(self, *args, **kwargs)", "def test_get_mask(self):\n\n spine_data_loader = SpineDataLoader(dirpath_data=self.dirpath,\n batch_size=4)\n\n for idx in range(4):\n mask = spine_data_loader.get_mask(str(idx))\n assert mask.shape == (256, 256, 1)\n assert mask.dtype == 'int64'", "def _get_mask(self, anno, idx):\n coco = self.coco\n img_info = coco.loadImgs(self.img_ids[idx])[0]\n\n m = np.zeros((img_info['height'], img_info['width']), dtype=np.float32)\n\n for obj in anno:\n if 'segmentation' in obj:\n if obj['iscrowd']:\n rle = pycocotools.mask.frPyObjects(obj['segmentation'],\n img_info['height'],\n img_info['width'])\n m += pycocotools.mask.decode(rle)\n elif obj['num_keypoints'] == 0:\n rles = pycocotools.mask.frPyObjects(obj['segmentation'],\n img_info['height'],\n img_info['width'])\n for rle in rles:\n m += pycocotools.mask.decode(rle)\n\n return m < 0.5", "def getHitmask(self,image):\n\t\tmask = []\n\t\tfor x in range(image.get_width()):\n\t\t\tmask.append([])\n\t\t\tfor y in range(image.get_height()):\n\t\t\t\tmask[x].append(bool(image.get_at((x,y))[3]))\n\t\treturn mask", "def mask(self):\n mask = np.zeros((self.height, self.width))\n pts = [\n np.array(anno).reshape(-1, 2).round().astype(int)\n for anno in self.segmentation\n ]\n mask = cv2.fillPoly(mask, pts, 1)\n return mask", "def load_mask(self, image_id):\n # load image infos\n \n info = self.image_info[image_id]\n patch_path = info['path']\n height = info['height']\n width = info['width']\n mag_path = os.path.join(patch_path,\"mag\")\n tissue_path = os.path.join(patch_path,\"tissue\")\n \n # collect mask names\n \n mag_mask_list = os.listdir(mag_path)\n tissue_mask_list = os.listdir(tissue_path)\n \n classes = []\n masks = []\n \n # append masks and ids in list\n \n if mag_mask_list:\n for filename in mag_mask_list:\n a = os.path.join(mag_path,filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(2)\n \n if tissue_mask_list:\n for filename in tissue_mask_list:\n a = os.path.join(tissue_path,filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(1)\n \n return np.stack(masks,axis=2), np.asarray(classes).astype(int)", "def gen(self):\n for path, bg_idx, bbox in zip(self.img_paths, self.bgs, self.bbox):\n img = cv2.imread(self.background[bg_idx])\n for alpha, obj, box in zip(self.alphas, self.objects, bbox):\n img, mask = self.alpha_blend(img, obj, box, alpha)\n yield path, img, mask", "def convert_masks():\n for fn in sorted(glob.glob('../input/extra_data/*/masks/*.png')):\n print(fn)\n img = skimage.io.imread(fn)\n # utils.print_stats('mask', img)\n img[img > 0] = 255\n skimage.io.imsave(fn, img)", "def load_mask(self, image_id):\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"face\":\n return super(self.__class__, self).load_mask(image_id)\n info = self.image_info[image_id]\n mask = np.zeros([info['height'], info['width'], len(info['boundingbox'])], dtype=np.uint8)\n for i, p in enumerate(info['boundingbox'].values()):\n rr, cc = skimage.draw.polygon(p['y'], p['x'])\n mask[rr, cc, i] = 1\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)", "def read_masks(self):\n structure_mask = self.read_image(\n self.filenames[\"structure_mask\"], grayscale=True\n ).astype(np.bool)\n unknown_mask = self.read_image(self.filenames[\"unknown_mask\"], grayscale=True).astype(\n np.bool\n )\n return structure_mask, unknown_mask", "def create_facemask_label(is_training):\n facemask_dir = config.voc_dir\n cls_map = {name: i for i, name in enumerate(config.coco_classes)}\n sub_dir = 'train' if is_training else 'val'\n facemask_dir = os.path.join(facemask_dir, sub_dir)\n if not os.path.isdir(facemask_dir):\n raise ValueError(f'Cannot find {sub_dir} dataset path.')\n\n image_dir = anno_dir = facemask_dir\n if os.path.isdir(os.path.join(facemask_dir, 'images')):\n image_dir = os.path.join(facemask_dir, 'images')\n if os.path.isdir(os.path.join(facemask_dir, 'annotations')):\n anno_dir = os.path.join(facemask_dir, 'annotations')\n\n if not is_training:\n data_dir = config.facemask_root\n json_file = os.path.join(data_dir, config.instances_set.format(sub_dir))\n file_dir = os.path.split(json_file)[0]\n if not os.path.isdir(file_dir):\n os.makedirs(file_dir)\n json_dict = {\"images\": [], \"type\": \"instances\", \"annotations\": [],\n \"categories\": []}\n bnd_id = 1\n\n image_files_dict = {}\n image_anno_dict = {}\n images = []\n for anno_file in os.listdir(anno_dir):\n print(anno_file)\n if not anno_file.endswith('xml'):\n continue\n tree = et.parse(os.path.join(anno_dir, anno_file))\n root_node = tree.getroot()\n file_name = root_node.find('filename').text\n file_name = file_name.split('.')[0] + '.jpg'\n img_id = get_imageId_from_fackmask(file_name)\n image_path = os.path.join(image_dir, file_name)\n print(image_path)\n if not os.path.isfile(image_path):\n print(f'Cannot find image {file_name} according to annotations.')\n continue\n\n labels = []\n for obj in root_node.iter('object'):\n cls_name = obj.find('name').text\n if cls_name not in cls_map:\n print(f'Label \"{cls_name}\" not in \"{config.coco_classes}\"')\n continue\n bnd_box = obj.find('bndbox')\n x_min = int(float(bnd_box.find('xmin').text)) - 1\n y_min = int(float(bnd_box.find('ymin').text)) - 1\n x_max = int(float(bnd_box.find('xmax').text)) - 1\n y_max = int(float(bnd_box.find('ymax').text)) - 1\n labels.append([y_min, x_min, y_max, x_max, cls_map[cls_name]])\n\n if not is_training:\n o_width = abs(x_max - x_min)\n o_height = abs(y_max - y_min)\n ann = {'area': o_width * o_height, 'iscrowd': 0, 'image_id': \\\n img_id, 'bbox': [x_min, y_min, o_width, o_height], \\\n 'category_id': cls_map[cls_name], 'id': bnd_id, \\\n 'ignore': 0, \\\n 'segmentation': []}\n json_dict['annotations'].append(ann)\n bnd_id = bnd_id + 1\n\n if labels:\n images.append(img_id)\n image_files_dict[img_id] = image_path\n image_anno_dict[img_id] = np.array(labels)\n\n if not is_training:\n size = root_node.find(\"size\")\n width = int(size.find('width').text)\n height = int(size.find('height').text)\n image = {'file_name': file_name, 'height': height, 'width': width,\n 'id': img_id}\n json_dict['images'].append(image)\n\n if not is_training:\n for cls_name, cid in cls_map.items():\n cat = {'supercategory': 'none', 'id': cid, 'name': cls_name}\n json_dict['categories'].append(cat)\n json_fp = open(json_file, 'w')\n json_str = json.dumps(json_dict)\n json_fp.write(json_str)\n json_fp.close()\n\n return images, image_files_dict, image_anno_dict", "def create_model_input_default(self, rgb_images, all_boxes, all_masks):\n box_regions = []\n for i in range(all_boxes.size(0)):\n for j in range(all_boxes.size(1)):\n box = all_boxes[i][j]\n if is_dummy_box(box):\n continue\n image = rgb_images[i].cpu()\n\n # Apply object mask to the image\n if self.use_masks:\n image = image.clone()\n mask = all_masks[i][j].cpu()\n image = image * mask\n\n box_region = get_patch_from_image(box, image)\n box_regions.append(box_region)\n\n t = T.Compose([T.ToPILImage(), T.Resize((self.reduced_size, self.reduced_size)), T.ToTensor()])\n box_regions = [t(box.cpu()) for box in box_regions]\n return torch.stack(box_regions)", "def mask_the_images(working_path,set_name):\n\n file_list=glob('/media/talhassid/My Passport/haimTal/test_images_0b8afe447b5f1a2c405f41cf2fb1198e.npy')\n out_images = [] #final set of images for all patients\n for fname in file_list:\n out_images_per_patient = []\n print (\"working on file \", fname)\n imgs_to_process = np.load(fname.replace(\"lungmask\",\"images\")) # images of one patient\n masks = np.load(fname)\n for i in range(len(imgs_to_process)):\n mask = masks[i]\n img = imgs_to_process[i]\n new_size = [512,512] # we're scaling back up to the original size of the image\n img= mask*img # apply lung mask\n #\n # renormalizing the masked image (in the mask region)\n #\n new_mean = np.mean(img[mask>0])\n new_std = np.std(img[mask>0])\n #\n # Pulling the background color up to the lower end\n # of the pixel range for the lungs\n #\n old_min = np.min(img) # background color\n img[img==old_min] = new_mean-1.2*new_std # resetting backgound color\n img = img-new_mean\n img = img/new_std\n #make image bounding box (min row, min col, max row, max col)\n labels = measure.label(mask)\n regions = measure.regionprops(labels)\n #\n # Finding the global min and max row over all regions\n #\n min_row = 512\n max_row = 0\n min_col = 512\n max_col = 0\n for prop in regions:\n B = prop.bbox\n if min_row > B[0]:\n min_row = B[0]\n if min_col > B[1]:\n min_col = B[1]\n if max_row < B[2]:\n max_row = B[2]\n if max_col < B[3]:\n max_col = B[3]\n width = max_col-min_col\n height = max_row - min_row\n if width > height:\n max_row=min_row+width\n else:\n max_col = min_col+height\n #\n # cropping the image down to the bounding box for all regions\n # (there's probably an skimage command that can do this in one line)\n #\n img = img[min_row:max_row,min_col:max_col]\n mask = mask[min_row:max_row,min_col:max_col]\n if max_row-min_row <5 or max_col-min_col<5: # skipping all images with no god regions\n pass\n else:\n # moving range to -1 to 1 to accomodate the resize function\n mean = np.mean(img)\n img = img - mean\n min = np.min(img)\n max = np.max(img)\n img = img/(max-min)\n new_img = resize(img,[512,512], mode='constant')\n out_images_per_patient.append(new_img)\n\n id = re.sub(r'.*_images_(.*)\\.npy',r'\\1',fname)\n patient_images_and_id = (out_images_per_patient,id)\n out_images.append(patient_images_and_id)\n print (\"Delete files: {} \\n\\t {} \".format(fname,re.sub(\"lungmask\",\"images\",fname)))\n os.remove(fname)\n os.remove(fname.replace(\"images\",\"lungmask\")) # images of one patient\n\n\n np.save(working_path+\"{}Images.npy\".format(set_name),out_images)", "def signal_masks(simulatedata_cbma):\n _, (ground_truth_foci, dataset) = simulatedata_cbma\n ground_truth_foci_ijks = [\n tuple(mm2vox(focus, dataset.masker.mask_img.affine)) for focus in ground_truth_foci\n ]\n return _create_signal_mask(np.array(ground_truth_foci_ijks), dataset.masker.mask_img)", "def build_semantic_masks(self):\n with tf.variable_scope(\"build_semantic_masks\"):\n dynamic_tgt_mask = self.__priors\n static_tgt_mask = 1.0 - dynamic_tgt_mask\n return dynamic_tgt_mask, static_tgt_mask", "def get_mask_dictionary(train_names):\n masks={}\n for name in train_names:\n masks[name]=cv.imread(\"../dataset/masks/\"+name+\".png\",cv.IMREAD_GRAYSCALE)\n \n return masks", "def load_masks(self, y, encode_classes=False, one_hot=False, classes=None, open_fn=None):\n masks = self.load_images(y, open_fn=open_fn)\n if encode_classes and not one_hot: # not need for encoding a class if one_hot is requested\n mapping = {cls: i for i, cls in enumerate(classes)}\n masks = [self.encode_mask(mask, mapping) for mask in masks]\n if one_hot:\n masks = [self.one_hot_encode(mask, classes=classes) for mask in masks]\n return masks", "def im_detect_mask(model, im_scales, boxes):\n assert len(im_scales) == 1, \\\n 'Only single-image / single-scale batch implemented'\n\n M_HEIGHT = cfg.MRCNN.RESOLUTION_H\n M_WIDTH = cfg.MRCNN.RESOLUTION_W\n if boxes.shape[0] == 0:\n pred_masks = np.zeros((0, M, M), np.float32)\n return pred_masks\n\n inputs = {'mask_rois': _get_rois_blob(boxes, im_scales)}\n # Add multi-level rois for FPN\n if cfg.FPN.MULTILEVEL_ROIS:\n _add_multilevel_rois_for_test(inputs, 'mask_rois')\n\n for k, v in inputs.items():\n workspace.FeedBlob(core.ScopedName(k), v)\n workspace.RunNet(model.mask_net.Proto().name)\n\n # Fetch masks\n pred_global_masks = workspace.FetchBlob(\n core.ScopedName('mask_fcn_global_probs')\n ).squeeze()\n pred_char_masks = workspace.FetchBlob(\n core.ScopedName('mask_fcn_char_probs')\n ).squeeze()\n # pred_char_boxes = workspace.FetchBlob(\n # core.ScopedName('mask_fcn_charbox_pred')\n # ).squeeze()\n pred_global_masks = pred_global_masks.reshape([-1, 1, M_HEIGHT, M_WIDTH])\n pred_char_masks = pred_char_masks.reshape([-1, M_HEIGHT, M_WIDTH, 37])\n pred_char_masks = pred_char_masks.transpose([0,3,1,2])\n # pred_char_boxes = pred_char_boxes.reshape([-1, 4, M_HEIGHT, M_WIDTH])\n\n return pred_global_masks, pred_char_masks, None", "def include_wcs_in_masks(input_images):\n img_list = [astroim.Astroim(im_name, memmap=True) for im_name in input_images]\n mask_names = [im.primary_header.get(\"MASK\") for im in img_list]\n output = []\n for im_object, mask_name in zip(img_list, mask_names):\n with fits.open(mask_name, 'readonly') as mask:\n mask_header = im_object.chips[0].header.hdr\n mask_data = mask[0].data.copy()\n mask_data[mask_data>0] = 1\n _, path = tempfile.mkstemp(suffix=\".fits\")\n fits.writeto(path, mask_data * 1., mask_header, clobber=True)\n output.append(path)\n return output", "def image_mask(kmeans_labels, img_gray_orig):\n\n\tmask_img = np.zeros((img_gray_orig.shape[0], img_gray_orig.shape[1]))\n\n\tkmeans_labels_arr = kmeans_labels.reshape(img_gray_orig.shape[0],\n\t\t\t\t\t\t\t\t\t\t\t img_gray_orig.shape[1])\n\n\tsort_labels = sorted(pd.Series(kmeans_labels).unique(),\n\t\t\t\t\t\t\t\t\t\t\t\t\treverse = True)\n\tjust_bone = ()\n\n\tif (np.sum(kmeans_labels_arr==sort_labels[0])) > 8000:\n\t just_bone = np.where(kmeans_labels_arr==sort_labels[0])\n\t mask_img[just_bone] = 1\n\t\t \n\tif (np.sum(kmeans_labels_arr==sort_labels[1])) > 8000 and\\\n\t\t\t\t (np.sum(kmeans_labels_arr==sort_labels[1])) < 60000:\n\t just_bone = np.where(kmeans_labels_arr==sort_labels[1])\n\t mask_img[just_bone] = 1\n\t\n\tif (np.sum(kmeans_labels_arr==sort_labels[2]))>8000 and\\\n\t\t\t\t (np.sum(kmeans_labels_arr==sort_labels[2])) < 70000:\n\t just_bone = np.where(kmeans_labels_arr==sort_labels[2])\n\t mask_img[just_bone] = 1\n\t\n\tif (np.sum(kmeans_labels_arr==sort_labels[3]))>8000 and\\\n\t\t\t\t(np.sum(kmeans_labels_arr==sort_labels[3])) < 70000:\n\t just_bone = np.where(kmeans_labels_arr==sort_labels[3])\n\t mask_img[just_bone] = 1\n\t\n\tif not just_bone:\n\t\tjust_bone = np.where(kmeans_labels_arr==sort_labels[1]) \n\t\tmask_img[just_bone] = 1\n\n\treturn just_bone, mask_img", "def build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n # ROI Pooling\n # Shape: [batch, boxes, pool_height, pool_width, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x", "def mask_show(image, mask, groups, name=\"image\"):\n img = cv2.addWeighted(image, 0.4, mask, 0.6, 0)\n img = sg.mark_boundaries(img, groups, color=(1,1,1))\n cv2.imshow(name, img)\n cv2.waitKey(0)" ]
[ "0.69253653", "0.6923163", "0.69147986", "0.68673575", "0.68427163", "0.6760219", "0.6754343", "0.6707345", "0.6699128", "0.66717273", "0.66254276", "0.660529", "0.6602349", "0.6544177", "0.6543895", "0.638877", "0.63362086", "0.6334344", "0.6327872", "0.630387", "0.6286461", "0.62843966", "0.6283221", "0.6261614", "0.624343", "0.62417763", "0.621866", "0.6168096", "0.6162009", "0.61533314", "0.6139637", "0.61384237", "0.61093825", "0.610778", "0.61056167", "0.60950917", "0.6094527", "0.6082216", "0.60811925", "0.6066784", "0.60664296", "0.60612595", "0.605856", "0.6035605", "0.60309595", "0.6017009", "0.6006963", "0.60062605", "0.6002839", "0.5997014", "0.59822637", "0.59756833", "0.5968956", "0.59386003", "0.5934588", "0.59028894", "0.5869483", "0.58655787", "0.5857012", "0.5850168", "0.58400416", "0.583357", "0.5831852", "0.5830849", "0.5819072", "0.5810302", "0.5793864", "0.5777842", "0.5777785", "0.5777389", "0.5775687", "0.576527", "0.5749713", "0.57415414", "0.573821", "0.5734056", "0.57325274", "0.5731437", "0.57235074", "0.57202274", "0.5717605", "0.57147753", "0.5714487", "0.571415", "0.57133967", "0.5693695", "0.56918526", "0.56902456", "0.5684035", "0.5683457", "0.5679065", "0.56677365", "0.56558585", "0.56399125", "0.5628503", "0.56270015", "0.5626128", "0.5624727", "0.5618649", "0.5615267" ]
0.59280497
55
Load a subset of the COCO dataset.
def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None, class_names=None, class_map=None, return_coco=False, auto_download=False): if auto_download is True: self.auto_download(dataset_dir, subset, year) coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year)) if subset == "minival" or subset == "valminusminival": subset = "val" image_dir = "{}/{}{}".format(dataset_dir, subset, year) # Select class_ids from class_names: if class_names: class_ids = sorted(coco.getCatIds(catNms=class_names)) # Load all classes or a subset? if not class_ids: # All classes class_ids = sorted(coco.getCatIds()) # All images or a subset? if class_ids: image_ids = [] for id in class_ids: imgs = [] # list of images to add to image_ids # Select at most COCO_IMAGES_PER_OBJECT and select only the images # that have at most COCO_MAX_NUM_MASK_PER_IMAGE masks inside them: for imgid in list(coco.getImgIds(catIds=[id])): if len(imgs) >= COCO_IMAGES_PER_OBJECT: break if len(coco.loadAnns(coco.getAnnIds(imgIds=[imgid], catIds=class_ids, iscrowd=None))) <= COCO_MAX_NUM_MASK_PER_IMAGE: imgs.append(imgid) image_ids.extend(imgs) #image_ids.extend(list(coco.getImgIds(catIds=[id]))[:COCO_IMAGES_PER_OBJECT]) # Remove duplicates image_ids = list(set(image_ids)) else: # All images image_ids = list(coco.imgs.keys()) # Add classes for i in class_ids: self.add_class("coco", i, coco.loadCats(i)[0]["name"]) # Add images for i in image_ids: #print(len(coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=class_ids, iscrowd=None)))) self.add_image( "coco", image_id=i, path=os.path.join(image_dir, coco.imgs[i]['file_name']), width=coco.imgs[i]["width"], height=coco.imgs[i]["height"], annotations=coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=class_ids, iscrowd=None))) if return_coco: return coco
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(cfg, train_mode, split, shot, query,\n bs, test_bs, num_workers, pin_memory,\n ret_name=False):\n if train_mode == \"train\":\n dataset = COCOTrain(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=bs,\n shuffle=True,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=False)\n else:\n dataset = COCOTest(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=test_bs, # Large batch for evaluation\n shuffle=False,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=False)\n num_classes = 80\n return dataset, data_loader, num_classes", "def load_cifar100(data_path=None, data_home=None, subsets=None,\n label_mode='fine'):\n if data_path is None:\n data_path = _utils.validate_data_home(data_home)\n data_path /= 'cifar-100-python.tar.gz'\n url = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n _ds_utils.get_file(data_path, url)\n \n if subsets is None:\n subsets = ['training', 'test']\n subsets = _ds_utils.validate_tvt(subsets, return_list=True)\n\n label_mode = _utils.validate_option(label_mode, ['fine', 'coarse'],\n name='label_mode')\n \n X, Y = [], []\n with arlib.open(data_path) as ar:\n for subset in subsets:\n if subset == 'training':\n name = [x for x in ar.member_names if x.endswith('train')]\n elif subset == 'test':\n name = [x for x in ar.member_names if x.endswith('test')]\n else:\n raise ValueError('Subset:', subset, ' not supported.')\n assert len(name) == 1\n name = name[0]\n tmp = _load_cifar_batch(ar.open_member(name, 'rb'),\n label_key=label_mode + '_labels')\n X.append(tmp[0])\n Y.append(tmp[1])\n return np.concatenate(X), np.concatenate(Y)", "def load_occupancy_dataset(trainsize=500, testsize=1000):\n filename = 'datasets/numericsequence.csv'\n dataset = loadcsv(filename)\n trainset, testset = splitdataset(dataset, trainsize, testsize)\n return trainset, testset", "def generate_coco_dataset(args):\n\targs.data_root = Path(args.data_root)\n\targs.save_root = Path(args.save_root)\n\targs.save_root.mkdir()\n\n\tgenerate_coco_dataset_sub(args, 'train', 'A', args.cat1)\n\tgenerate_coco_dataset_sub(args, 'train', 'B', args.cat2)\n\tgenerate_coco_dataset_sub(args, 'val', 'A', args.cat1)\n\tgenerate_coco_dataset_sub(args, 'val', 'B', args.cat2)", "def load_dataset(self, subset):\n assert subset in ('train', 'val')\n\n # Add classes\n for id, name in self.class_mapper.items():\n self.add_class('nodule', id, name)\n\n # Add images\n self.df = self.df_all[self.df_all['subset'] == subset]\n\n image_ids = set()\n for row in self.df.itertuples():\n image_id = (row.seriesuid, row.coordZ)\n path = os.path.join(cur_dir, 'data', 'train', '{}_{}.npy'.format(row.seriesuid, row.coordZ))\n if image_id in image_ids:\n continue\n self.add_image(\"nodule\", image_id=image_id, path=path)\n image_ids.add(image_id)", "def load_data(filen, model):\n mass_sel = select_bin(model.fit_var, *model.fit_range)\n selections = [mass_sel]\n for var, bounds in model.get_load_vars():\n selections.append(\n select_bin(var, *[float(v) for v in bounds.split(',')]))\n\n load_vars = ['{costh,phi}_HX_fold'] + collect_requirements(selections)\n\n return apply_selections(get_dataframe(filen, columns=load_vars),\n selections)", "def loadSubset(self, loadsubset):\n libxml2mod.xmlParserSetLoadSubset(self._o, loadsubset)", "def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))", "def load_cifar10(data_path=None, data_home=None, subsets=None):\n if data_path is None:\n data_path = _utils.validate_data_home(data_home)\n data_path /= 'cifar-10-python.tar.gz'\n url = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n _ds_utils.get_file(data_path, url)\n \n if subsets is None:\n subsets = ['training', 'test']\n subsets = _ds_utils.validate_tvt(subsets, return_list=True)\n X, Y = [], []\n with arlib.open(data_path) as ar:\n for subset in subsets:\n if subset == 'training':\n for i in range(1, 6):\n mname = [x for x in ar.member_names\n if x.endswith('data_batch_'+str(i))]\n assert len(mname) == 1\n mname = mname[0]\n tmp = _load_cifar_batch(ar.open_member(mname,'rb'))\n X.append(tmp[0])\n Y.append(tmp[1])\n elif subset == 'test':\n mname = [x for x in ar.member_names if x.endswith('test_batch')]\n assert len(mname) == 1\n mname = mname[0]\n tmp = _load_cifar_batch(ar.open_member(mname, 'rb'))\n X.append(tmp[0])\n Y.append(tmp[1])\n else:\n raise ValueError('Subset:', subset, ' not supported.')\n return np.concatenate(X), np.concatenate(Y)", "def __init__(self, image_set, root_path, data_path, category='all', task='detection'):\n super(coco, self).__init__('COCO', image_set, root_path, data_path)\n self.root_path = root_path\n self.data_path = data_path\n self.category = category\n self.task = task\n self.name = self.name + '_' + category\n # deal with data name\n view_map = {'minival2014': 'val2014',\n 'valminusminival2014': 'val2014'}\n self.data_name = view_map[image_set] if image_set in view_map else image_set", "def download_coco_dataset():\n # Create file structure\n os.makedirs(os.path.join(\"data\", \"coco\", \"train\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"dev\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"test\"), exist_ok=True)\n # Download the train, dev and test datasets\n print(\"Downloading COCO dataset.\")\n url = \"http://images.cocodataset.org/zips/train2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"train2014.zip\"))\n url = \"http://images.cocodataset.org/zips/val2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"val2014.zip\"))\n url = \"http://images.cocodataset.org/zips/test2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"test2014.zip\"))\n print(\"Done downloading COCO dataset.\")\n # Unzip the files\n print(\"Extracting COCO dataset.\")\n # Extract Train dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"train2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"train2014\"),\n os.path.join(\"data\", \"coco\", \"train\", \"dummy\"),\n )\n # Extract Validation dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"val2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"val2014\"),\n os.path.join(\"data\", \"coco\", \"dev\", \"dummy\"),\n )\n # Extract Test dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"test2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"test2014\"),\n os.path.join(\"data\", \"coco\", \"test\", \"dummy\"),\n )\n print(\"Done extracting COCO dataset.\")", "def load_subset_data(data_path, subset_name, timesteps):\n\n selected_subset_paths = subset_paths(os.path.join(data_path, subset_name))\n selected_subset_arrays = subset_arrays(selected_subset_paths)\n\n load_selected_timesteps = lambda x: np.load(x)\n\n if timesteps is not None:\n selected_subset_timesteps = load_selected_timesteps(timesteps)\n else:\n selected_subset_timesteps = np.array(range(int(np.sum(selected_subset_arrays[\"seq_lens\"]))))\n\n return selected_subset_arrays, selected_subset_timesteps", "def load_susy(trainsize=500, testsize=1000):\n filename = 'datasets/susysubset.csv'\n dataset = loadcsv(filename)\n trainset, testset = splitdataset(dataset, trainsize, testsize)\n return trainset, testset", "def F_subset_S5PHCHO(self,path): \n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('S5P_OFFL_L2__HCHO___'+DATE.strftime(\"%Y%m%d\")+'T*.nc')\n l2_list = l2_list+flist\n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n maxsza = self.maxsza\n maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n min_qa_value = self.min_qa_value\n \n # absolute path of useful variables in the nc file\n # not sure about cloud fraction\n # the time_utc string is empty?! why are you doing this to the user!\n data_fields = ['/PRODUCT/SUPPORT_DATA/INPUT_DATA/cloud_fraction_crb',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/surface_albedo',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time',\\\n '/PRODUCT/delta_time',\\\n '/PRODUCT/formaldehyde_tropospheric_vertical_column',\\\n '/PRODUCT/formaldehyde_tropospheric_vertical_column_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['cloud_fraction','latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','albedo','latc','lonc','qa_value','time','delta_time',\\\n 'column_amount','column_uncertainty']\n self.logger.info('Read, subset, and store level 2 data to l2g_data')\n self.logger.info('Level 2 data are located at '+l2_dir)\n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading '+fn)\n outp_nc = self.F_read_S5P_nc(fn_dir,data_fields,data_fields_l2g)\n f1 = outp_nc['SolarZenithAngle'] <= maxsza\n f2 = outp_nc['cloud_fraction'] <= maxcf\n # ridiculously, qa_value has a scale_factor of 0.01. so error-prone\n f3 = outp_nc['qa_value'] >= min_qa_value \n f4 = outp_nc['latc'] >= south\n f5 = outp_nc['latc'] <= north\n tmplon = outp_nc['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_nc['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_nc['UTC_matlab_datenum'] <= self.end_matlab_datenum\n validmask = f1 & f2 & f3 & f4 & f5 & f6 & f7 & f8 & f9\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n l2g_data0 = {}\n # yep it's indeed messed up\n Lat_lowerleft = np.squeeze(outp_nc['latitude_bounds'][:,:,0])[validmask]\n Lat_upperleft = np.squeeze(outp_nc['latitude_bounds'][:,:,3])[validmask]\n Lat_lowerright = np.squeeze(outp_nc['latitude_bounds'][:,:,1])[validmask]\n Lat_upperright = np.squeeze(outp_nc['latitude_bounds'][:,:,2])[validmask]\n Lon_lowerleft = np.squeeze(outp_nc['longitude_bounds'][:,:,0])[validmask]\n Lon_upperleft = np.squeeze(outp_nc['longitude_bounds'][:,:,3])[validmask]\n Lon_lowerright = np.squeeze(outp_nc['longitude_bounds'][:,:,1])[validmask]\n Lon_upperright = np.squeeze(outp_nc['longitude_bounds'][:,:,2])[validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_nc.keys():\n if key not in {'latitude_bounds','longitude_bounds','time_utc','time','delta_time'}:\n l2g_data0[key] = outp_nc[key][validmask]\n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])", "def get_coco_dataset():\n ds = AttrDict()\n # classes = [\n # '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n # 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n # 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n # 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n # 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n # 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n # 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n # 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n # 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n # 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n # 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n # 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n # 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n # ]\n # classes = ['__background__', 'lane']\n #\n base_classes = [\n '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n ]\n classes = ['__background__',\n 'guard rail',\n # 'car',\n 'dashed',\n 'solid',\n 'solid solid',\n 'dashed dashed',\n 'dashed-solid',\n 'solid-dashed',\n 'yellow dashed',\n 'yellow solid',\n 'yellow solid solid',\n 'yellow dashed dashed',\n 'yellow dashed-solid',\n 'yellow solid-dashed',\n 'boundary',\n 'fork_line',\n 'fork_edge',\n 'arrow_s',\n 'arrow_r',\n 'arrow_l',\n 'arrow_lr',\n 'arrow_inclined_r',\n 'arrow_r_s',\n 'arrow_l_s',\n 'sidewalk',\n 'handrail'\n ]\n base_classes.extend(classes[1:])\n classes = base_classes\n ds.classes = {i: name for i, name in enumerate(classes)}\n return ds", "def load_dataset():\n try:\n data_path = ROOT_PATH.joinpath('data', 'Complete_TAVG_Daily_LatLong1_1880.nc')\n ds = xarray.open_dataset(data_path)\n return ds\n except FileNotFoundError:\n raise", "def set_data_subset(self, subset):\n self.data_subset = subset", "def generate_coco_dataset_sub(args, idx1, idx2, cat):\n\tdata_path = args.data_root / '{}2017'.format(idx1)\n\tanno_path = args.data_root / 'annotations/instances_{}2017.json'.format(idx1)\t# eg. anno_path is \"datasets/COCO/annotations/instances_train2017.json\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# or \"datasets/COCO/annotations/instances_val2017.json\"\n\tcoco = COCO(anno_path) # COCO API\n\n\n\timg_path = args.save_root / '{}{}'.format(idx1, idx2)\t\t# eg. img_path is \"datasets/shp2gir_coco/trainA\" or \"datasets/shp2gir_coco/trainB\"\n\tseg_path = args.save_root / '{}{}_seg'.format(idx1, idx2)\t# eg. img_path is \"datasets/shp2gir_coco/trainA_seg\" or \"datasets/shp2gir_coco/trainB_seg\"\n\timg_path.mkdir()\t\t\t\t\t\t\t\t\t\t\t# they are empty, therefore mkdir()s\n\tseg_path.mkdir()\n\n\tcat_id = coco.getCatIds(catNms=cat)\t\t# cat is \"sheep\" or \"giraffe\",get the category's id\n\timg_id = coco.getImgIds(catIds=cat_id)\t# get the ids of sheep/giraffe images,获得所有绵羊的图片id,或者所有长颈鹿的图片id\n\timgs = coco.loadImgs(img_id)\t\t\t# 获得所有绵羊的图片(很多张),或者所有长颈鹿的图片\n\n\t# tqdm表示进度条,progress\n\t# refer:https://tqdm.github.io/\n\tpb = tqdm(total=len(imgs))\n\tpb.set_description('{}{}'.format(idx1, idx2))\n\tfor img in imgs:\n\t\tann_ids = coco.getAnnIds(imgIds=img['id'], catIds=cat_id)\t# get annotation'id\n\t\tanns = coco.loadAnns(ann_ids)\t\t\t\t\t\t\t\t# get the annotation(many)\n\n\t\tcount = 0\n\t\tfor i in range(len(anns)):\t\t\t\t# 真正从标签生成mask的地方。\n\t\t\tseg = coco.annToMask(anns[i])\t\t# annotation to mask, the type is array now\n\t\t\tseg = Image.fromarray(seg * 255)\t# turn the seg array to seg image,each pix multi 255. why?\n\t\t\tseg = resize(seg, args.image_size)\t# resize the seg image\n\t\t\t# np.sum\n\t\t\tif np.sum(np.asarray(seg)) > 0:\t\t\t\t\t\t\t\t# 保存seg\n\t\t\t\tseg.save(seg_path / '{}_{}.png'.format(pb.n, count))\t# pb.n 表示?\n\t\t\t\tcount += 1\n\n\t\tif count > 0: # at least one instance exists\n\t\t\timg = Image.open(data_path / img['file_name'])\n\t\t\timg = resize(img, args.image_size)\n\t\t\timg.save(img_path / '{}.png'.format(pb.n))\n\n\t\tpb.update(1)\n\tpb.close()", "def load_data(subset: str):\n df_train = pd.read_csv(f\"{DATA_PATH}/train_durations_per_speaker.csv\")\n df_test = pd.read_csv(f\"{DATA_PATH}/val_durations_per_speaker.csv\")\n df_global = pd.read_csv(f\"{DATA_PATH}/global_durations_per_speaker.csv\")\n if (subset == \"train\"):\n df = df_train\n elif (subset == \"val\"):\n df = df_test\n else:\n df = df_global\n return df", "def load_cityscapes(self, dataset_dir, subset):\n self.class_labels = {\n 'unlabeled':0,\n 'ego vehicle':1, \n 'rectification border':2,\n 'out of roi':3, \n 'static':4, \n 'dynamic':5, \n 'ground':6, \n 'road':7, \n 'sidewalk':8, \n 'parking':9, \n 'rail track':10, \n 'building':11, \n 'wall':12, \n 'fence':13, \n 'guard rail':14, \n 'bridge':15, \n 'tunnel':16, \n 'pole':17, \n 'polegroup':18, \n 'traffic light':19, \n 'traffic sign':20, \n 'vegetation':21, \n 'terrain':22, \n 'sky':23, \n 'person':24, \n 'rider':25, \n 'car':26, \n 'truck':27, \n 'bus':28, \n 'caravan':29, \n 'trailer':30, \n 'train':31, \n 'motorcycle':32, \n 'bicycle':33, \n 'license plate':34}\n \n annotation_dir = dataset_dir + 'gtFine_trainvaltest/' + subset + '_all.json'\n self.image_info = json.load(open(annotation_dir, 'r'))\n \n # Add classes\n for i in range(len(self.class_labels)):\n self.add_class(\"cityscape\", i, list(self.class_labels.keys())[i])", "def load_subset(self, vocab):\n if self.reserve_zero:\n vocab.insert(0, '__ZERO__')\n if self.allow_oov:\n vocab.insert(self.oov_index, '__OUT_OF_VOCAB__')\n indices = []\n for word in vocab:\n try:\n indices.append(self._index_dict[word])\n except KeyError:\n indices.append(self.oov_index)\n else:\n indices = [self._index_dict[word] for word in vocab]\n matrix = self.matrix[indices]\n return Vectors(matrix=matrix, vocab=vocab)", "def load_coco_ann_files(self):\n if self.type == 'train':\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'train2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_train2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'train2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_train2017.json'))),\n # (os.path.join(self.dataset_root, 'mpii', 'images'),\n # COCO(os.path.join(self.dataset_root, 'mpii',\n # 'annotations', 'train.json')))\n ]\n else:\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'val2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_val2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'val2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_val2017.json')))\n ]\n\n dict_list = []\n for dataset_path, dataset in datasets:\n img_ids = dataset.getImgIds()\n\n for idx in img_ids:\n try:\n img = dataset.loadImgs([idx])[0]\n ann_ids = dataset.getAnnIds([idx])\n anns = dataset.loadAnns(ann_ids)\n\n if [ann['keypoints'] for ann in anns] and not all([ann['keypoints'] == [0]*51 for ann in anns]):\n keypoints = [ann['keypoints'] for ann in anns if ann['keypoints'] != [0]*51]\n for i in range(len(keypoints)):\n if 'coco' in dataset_path:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][17] and keypoints[i][20])\n else [(keypoints[i][15] + keypoints[i][18]) // 2, (keypoints[i][16] + keypoints[i][19]) // 2, 1])\n else:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][41] and keypoints[i][38])\n else [(keypoints[i][39] + keypoints[i][36]) // 2, (keypoints[i][40] + keypoints[i][37]) // 2, 1])\n\n if len([kp for kp in keypoints if kp != [0]*54]) <= 4:\n dict_list.append({'path': os.path.join(dataset_path, img[\"file_name\"]),\n 'keypoints': [kp for kp in keypoints if kp != [0]*54]})\n except:\n print(f'Skipped: {idx}')\n\n final_dataset = pd.DataFrame.from_dict(dict_list)\n\n return final_dataset", "def loadData():\n datfile = glob.glob(DATA_PATH + 'consolidated.npy')\n return np.load(datfile[0])", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def F_subset_S5PNO2(self,path): \n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('S5P_RPRO_L2__NO2____'+DATE.strftime(\"%Y%m%d\")+'T*.nc')\n l2_list = l2_list+flist\n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n maxsza = self.maxsza\n maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n min_qa_value = self.min_qa_value\n \n # absolute path of useful variables in the nc file\n data_fields = ['/PRODUCT/SUPPORT_DATA/DETAILED_RESULTS/cloud_fraction_crb_nitrogendioxide_window',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/surface_albedo_nitrogendioxide_window',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time_utc',\\\n '/PRODUCT/nitrogendioxide_tropospheric_column',\\\n '/PRODUCT/nitrogendioxide_tropospheric_column_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['cloud_fraction','latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','albedo','latc','lonc','qa_value','time_utc',\\\n 'column_amount','column_uncertainty']\n self.logger.info('Read, subset, and store level 2 data to l2g_data')\n self.logger.info('Level 2 data are located at '+l2_dir)\n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading '+fn)\n outp_nc = self.F_read_S5P_nc(fn_dir,data_fields,data_fields_l2g)\n f1 = outp_nc['SolarZenithAngle'] <= maxsza\n f2 = outp_nc['cloud_fraction'] <= maxcf\n # ridiculously, qa_value has a scale_factor of 0.01. so error-prone\n f3 = outp_nc['qa_value'] >= min_qa_value \n f4 = outp_nc['latc'] >= south\n f5 = outp_nc['latc'] <= north\n tmplon = outp_nc['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_nc['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_nc['UTC_matlab_datenum'] <= self.end_matlab_datenum\n validmask = f1 & f2 & f3 & f4 & f5 & f6 & f7 & f8 & f9\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n l2g_data0 = {}\n # yep it's indeed messed up\n Lat_lowerleft = np.squeeze(outp_nc['latitude_bounds'][:,:,0])[validmask]\n Lat_upperleft = np.squeeze(outp_nc['latitude_bounds'][:,:,3])[validmask]\n Lat_lowerright = np.squeeze(outp_nc['latitude_bounds'][:,:,1])[validmask]\n Lat_upperright = np.squeeze(outp_nc['latitude_bounds'][:,:,2])[validmask]\n Lon_lowerleft = np.squeeze(outp_nc['longitude_bounds'][:,:,0])[validmask]\n Lon_upperleft = np.squeeze(outp_nc['longitude_bounds'][:,:,3])[validmask]\n Lon_lowerright = np.squeeze(outp_nc['longitude_bounds'][:,:,1])[validmask]\n Lon_upperright = np.squeeze(outp_nc['longitude_bounds'][:,:,2])[validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_nc.keys():\n if key not in {'latitude_bounds','longitude_bounds','time_utc','time','delta_time'}:\n l2g_data0[key] = outp_nc[key][validmask]\n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])", "def get_coco_dataset():\n ds = AttrDict()\n classes = [\n '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n ]\n ds.classes = {i: name for i, name in enumerate(classes)}\n return ds", "def load():\n filepath = dirname(abspath(__file__))\n data = recfromtxt(filepath + '/scotvote.csv', delimiter=\",\",\n names=True, dtype=float, usecols=(1,2,3,4,5,6,7,8))\n names = list(data.dtype.names)\n endog = array(data[names[0]], dtype=float)\n endog_name = names[0]\n exog = column_stack(data[i] for i in names[1:]).astype(float)\n exog_name = names[1:]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def load_dataset_cifar10():\n dirname = 'cifar-10-batches-py'\n origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n path = get_file(dirname, origin=origin, untar=True)\n\n num_train_samples = 50000\n\n x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')\n y_train = np.empty((num_train_samples,), dtype='uint8')\n\n for i in range(1, 6):\n fpath = os.path.join(path, 'data_batch_' + str(i))\n (x_train[(i - 1) * 10000: i * 10000, :, :, :],\n y_train[(i - 1) * 10000: i * 10000]) = load_batch(fpath)\n\n fpath = os.path.join(path, 'test_batch')\n x_test, y_test = load_batch(fpath)\n\n y_train = np.reshape(y_train, (len(y_train), 1))\n y_test = np.reshape(y_test, (len(y_test), 1))\n\n return (x_train, y_train), (x_test, y_test)", "def load_cifar():\n print('==> Preparing data..')\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n trainset = torchvision.datasets.CIFAR10(\n root='./data', train=True, download=True, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=1024, shuffle=True, num_workers=8)\n\n testset = torchvision.datasets.CIFAR10(\n root='./data', train=False, download=True, transform=transform_test)\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=128, shuffle=False, num_workers=8)\n return trainloader, testloader", "def load_data(path=None, num_words=None, skip_top=0,\n maxlen=None, seed=113,\n start_char=1, oov_char=2, index_from=3, **kwargs):\n # Legacy support\n if 'nb_words' in kwargs:\n warnings.warn('The `nb_words` argument in `load_data` '\n 'has been renamed `num_words`.')\n num_words = kwargs.pop('nb_words')\n if kwargs:\n raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))\n\n if path is None:\n path = '/home/lupeng/neural-network/data/codeforces_full.pkl'\n f = load_pickle(path)\n xs = f['datas']\n ys = f['labels']\n \n if start_char is not None:\n xs = [[start_char] + [w + index_from for w in x] for x in xs]\n elif index_from:\n xs = [[w + index_from for w in x] for x in xs]\n\n if maxlen:\n new_xs = []\n new_ys = []\n for x, y in zip(xs, ys):\n if len(x) < maxlen:\n new_xs.append(x)\n new_ys.append(y)\n xs = new_xs\n ys = new_ys\n if not xs:\n raise ValueError('After filtering for sequences shorter than maxlen=' +\n str(maxlen) + ', no sequence was kept. '\n 'Increase maxlen.')\n if not num_words:\n num_words = max([max(x) for x in xs])\n\n # by convention, use 2 as OOV word\n # reserve 'index_from' (=3 by default) characters:\n # 0 (padding), 1 (start), 2 (OOV)\n if oov_char is not None:\n xs = [[oov_char if (w >= num_words or w < skip_top) else w for w in x] for x in xs]\n else:\n new_xs = []\n for x in xs:\n nx = []\n for w in x:\n if w >= num_words or w < skip_top:\n nx.append(w)\n new_xs.append(nx)\n xs = new_xs\n\n train_data,train_label,test_data,test_label = get_balanced_data(xs, ys)\n\n np.random.seed(seed)\n np.random.shuffle(train_data)\n np.random.seed(seed)\n np.random.shuffle(train_label)\n \n np.random.seed(2*seed)\n np.random.shuffle(test_data)\n np.random.seed(2*seed)\n np.random.shuffle(test_label)\n \n \n x_train = np.array(train_data)\n y_train = np.array(train_label)\n\n x_test = np.array(test_data)\n y_test = np.array(test_label)\n\n return (x_train, y_train), (x_test, y_test)", "def load_cup_data(train=True):\n type = \"TR\" if train else \"TS\"\n csv_file = path_data / Path(f\"ML_CUP/ML-CUP20-{type}.csv\")\n return pd.read_csv(csv_file, skiprows=7, header=None, index_col=0)", "def prepare_train_coco_data(args):\n image_dir, annotation_file, data_dir = args.train_coco_image_dir, args.train_coco_annotation_file, args.train_coco_data_dir\n batch_size = args.batch_size\n basic_model = args.basic_model\n num_roi = args.num_roi\n\n coco = COCO(annotation_file)\n\n img_ids = list(coco.imgToAnns.keys())\n img_files = []\n img_heights = []\n img_widths = []\n anchor_files = []\n gt_classes = []\n gt_bboxes = []\n\n for img_id in img_ids:\n img_files.append(os.path.join(image_dir, coco.imgs[img_id]['file_name'])) \n img_heights.append(coco.imgs[img_id]['height']) \n img_widths.append(coco.imgs[img_id]['width']) \n anchor_files.append(os.path.join(data_dir, os.path.splitext(coco.imgs[img_id]['file_name'])[0]+'_'+basic_model+'_anchor.npz')) \n\n classes = [] \n bboxes = [] \n for ann in coco.imgToAnns[img_id]: \n classes.append(coco_category_to_class[ann['category_id']]) \n bboxes.append([ann['bbox'][1], ann['bbox'][0], ann['bbox'][3]+1, ann['bbox'][2]+1]) \n\n gt_classes.append(classes) \n gt_bboxes.append(bboxes) \n \n print(\"Building the training dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths, batch_size, anchor_files, gt_classes, gt_bboxes, True, True)\n print(\"Dataset built.\")\n return coco, dataset", "def load_cifa_10():\n train_set_x = np.ndarray([ 50000, 3072 ])\n train_set_y = np.ndarray( [50000] )\n\n batch_size = 10000\n for i in xrange(5):\n batch = open( datapath + \"data_batch_\"+str(i+1), 'rb')\n map = cPickle.load( batch )\n batch.close()\n train_set_x[ i*batch_size : (i+1)*batch_size , : ] = np.asarray( map[ 'data' ], dtype = 'float32' )\n train_set_y[ i*batch_size : (i+1)*batch_size ] = np.asarray( map[ 'labels' ], dtype = 'float32' )\n\n test_file = open( datapath + 'test_batch', 'rb')\n map = cPickle.load( test_file )\n test_file.close()\n \n test_set_x = np.asarray( map['data'], dtype = 'float32' )\n test_set_y = np.asarray( map['labels'], dtype = 'float32' )\n \n\n return train_set_x, train_set_y, test_set_x, test_set_y", "def load_dataset(path: str, threshold1: float, threshold2, quantiles: int, clear_cache: bool = False):\n if clear_cache:\n generate_global_ranks(path)\n\n cc = CorrelationClustering(quantiles, threshold1, threshold2)\n for root, dirs, files in os.walk(os.path.join(path)):\n for file in files:\n cc.add_data(pd.read_csv(root + \"/\" + file, index_col=False).fillna(0), str(file.split(\".\")[0]))\n return cc", "def load_nc(file,var):\n\tf = netCDF4.Dataset(file,'r+')\n\tdara = f.variables[var][:]\n\tf.close()\n\treturn data", "def load_cifar(dataset_name='cifar10'):\n dataset_name = dataset_name.strip().lower().replace(' ', '')\n\n if dataset_name.lower() not in ['cifar10', 'cifar100']:\n raise ValueError('Only cifar10 or cifar100 are valid dataset_name.')\n baseURL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n if dataset_name == 'cifar100':\n baseURL = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n\n dirname = os.path.join(_trident_dir, dataset_name.strip())\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n\n \"\"\"Load CIFAR data from `path`\"\"\"\n _,filename,ext=split_path(baseURL)\n download_file(baseURL, dirname, filename+ext, dataset_name)\n file_path = os.path.join(dirname, filename+ext)\n\n\n if '.tar' in ext:\n extract_archive(file_path, dirname, archive_format='auto')\n filelist = glob.glob(dirname + '/*/*.*')\n extract_path ,_,_= split_path(filelist[0])\n filelist = [f for f in os.listdir(extract_path) if os.path.isfile(os.path.join(extract_path, f))]\n data=[]\n label=[]\n test_data=[]\n test_label=[]\n for file_path in filelist:\n if 'data_batch' in file_path:\n with open(os.path.join(extract_path,file_path), 'rb') as f:\n entry = pickle.load(f, encoding='latin1')\n data.append(entry['data'])\n label.append(entry['labels'])\n elif 'test_batch' in file_path:\n with open(os.path.join(extract_path,file_path), 'rb') as f:\n entry = pickle.load(f, encoding='latin1')\n test_data.append(entry['data'])\n test_label.append(entry['labels'])\n data = np.concatenate(data)\n data = data.reshape((data.shape[0], 3, 32, 32))\n data = data.transpose(0, 2, 3, 1).astype(np.float32)\n\n test_data = np.concatenate(test_data)\n test_data = test_data.reshape((test_data.shape[0], 3, 32, 32))\n test_data = test_data.transpose(0, 2, 3, 1).astype(np.float32)\n\n # Prepare labels\n label = np.concatenate(label)\n test_label = np.concatenate(test_label)\n\n trainData = Iterator(data=ImageDataset(data,object_type=ObjectType.rgb), label=LabelDataset(label,object_type=ObjectType.classification_label))\n testData = Iterator(data=ImageDataset(test_data,object_type=ObjectType.rgb), label=LabelDataset(test_label,object_type=ObjectType.classification_label))\n dataset = DataProvider(dataset_name, traindata=trainData, testdata=testData)\n dataset.binding_class_names(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship',\n 'truck'] if dataset_name == 'cifar10' else [], 'en-US')\n return dataset", "def load_oat1_3_big(self):\n source_df = pd.read_csv('./datasets/metabolites/OAT1OAT3Big.csv')\n source_df['SLC'] = source_df['SLC'].astype('category').cat.codes\n\n to_drop = [0, 2, 3, 4, ]\n\n df = source_df.drop(source_df.columns[to_drop], axis=1)\n\n print('Loaded in data, null values found: ', end=' ')\n print(df[pd.isnull(df).any(axis=1)])\n\n label_index = 1 # this is from source\n print(\"Data shape: \", df.shape[0])\n\n X = np.array([np.array(df.iloc[x, :]) for x in range(df.shape[0])])\n Y = np.array(source_df.iloc[:, label_index])\n\n header = np.array(df.columns)\n\n if self.scale:\n feature_scaler = StandardScaler()\n X = feature_scaler.transform(X)\n\n return X, Y, header", "def load_cifar10_data(self, data_path='data/cifar-10-batches-py',\n n_train_samples=50000, n_test_samples=10000):\n train_data = None\n train_labels = []\n\n for i in range(1, 6):\n data_dic = unpickle(data_path + '/data_batch_{}'.format(i))\n if i == 1:\n train_data = data_dic['data']\n else:\n train_data = np.vstack((train_data, data_dic['data']))\n\n train_labels += data_dic['labels']\n\n test_data_dic = unpickle(data_path + '/test_batch')\n test_data = test_data_dic['data']\n test_labels = test_data_dic['labels']\n\n train_data = train_data.reshape((len(train_data),\n self.LOADED_IMG_DEPTH,\n self.LOADED_IMG_HEIGHT,\n self.LOADED_IMG_HEIGHT))\n\n train_data = np.rollaxis(train_data, 1, 4)\n train_labels = np.array(train_labels)\n\n test_data = test_data.reshape((len(test_data),\n self.LOADED_IMG_DEPTH,\n self.LOADED_IMG_HEIGHT,\n self.LOADED_IMG_HEIGHT))\n\n test_data = np.rollaxis(test_data, 1, 4)\n test_labels = np.array(test_labels)\n\n self.train_dataset = {'data': train_data[0:n_train_samples],\n 'labels': train_labels[0:n_train_samples],\n 'cls': [np.zeros(10)\n for i in range(n_train_samples)]}\n\n for i in range(0, n_train_samples):\n self.train_dataset['cls'][i][self.train_dataset['labels'][i]] = 1.\n\n self.test_dataset = {'data': test_data[0:n_test_samples],\n 'labels': test_labels[0:n_test_samples],\n 'cls': [np.zeros(10)\n for i in range(n_train_samples)]}\n\n for i in range(0, n_test_samples):\n self.test_dataset['cls'][i][self.test_dataset['labels'][i]] = 1.\n\n self.train_dataset['data_array'] = np.array(\n [item.flatten() for item in self.train_dataset['data']])\n\n self.train_dataset['labels_array'] = np.array(\n [item.flatten() for item in self.train_dataset['labels']])\n\n self.train_dataset['cls_array'] = np.array(\n [item.flatten() for item in self.train_dataset['cls']])\n\n self.test_dataset['data_array'] = np.array(\n [item.flatten() for item in self.test_dataset['data']])\n\n self.test_dataset['labels_array'] = np.array(\n [item.flatten() for item in self.test_dataset['labels']])\n\n self.test_dataset['cls_array'] = np.array(\n [item.flatten() for item in self.test_dataset['cls']])\n\n return None", "def read_netcdf(self,filename):", "def load_data(self):\n raw_data = np.genfromtxt(self.data_file, delimiter=',')\n self.n_clusters = int(raw_data[-1][-1] + 1)\n self.n_points = len(raw_data) // self.n_clusters\n \n # group data according to label\n data = [raw_data[raw_data[:,-1] == i][:,:-1] \\\n for i in range(self.n_clusters)]\n\n # take only a subset of the data\n if self.split:\n assert 0 <= self.split <= 1, \"Split must be in [0, 1)\"\n\n # update dataset info and print to stdout\n self.n_points = int(self.split * len(data[0]))\n subsampled = self.__len__() - int(self.ood is not None) * self.n_points\n print(f\"INFO: Subsampled {subsampled}/{len(raw_data)} points\")\n \n return [cluster[:self.n_points] for cluster in data]\n return data", "def load_data(self,split='train'):\n raise NotImplementedError", "def load_oat1_3_small(self):\n source_df = pd.read_csv('./datasets/metabolites/OAT1OAT3Small.csv')\n source_df['SLC'] = source_df['SLC'].astype('category').cat.codes\n\n to_drop = [0, 2, 3, 4, ]\n\n df = source_df.drop(source_df.columns[to_drop], axis=1)\n\n print('Loaded in data, null values found: ', end=' ')\n print(df[pd.isnull(df).any(axis=1)])\n\n label_index = 1 # this is from source\n print(\"Data shape: \", df.shape[0])\n\n X = np.array([np.array(df.iloc[x, :]) for x in range(df.shape[0])])\n Y = np.array(source_df.iloc[:, label_index])\n\n header = np.array(df.columns)\n\n if self.scale:\n feature_scaler = StandardScaler()\n X = feature_scaler.transform(X)\n\n return X, Y, header", "def read_ct_data(train_start, train_count, eval_start, eval_count):\n data = pd.read_csv('/opt/train.csv')\n\n # Dropping the id column\n data.drop(['ID_code'], axis=1, inplace=True)\n\n data = data.values\n return (data[train_start:train_start + train_count],\n data[eval_start:eval_start + eval_count])", "def test_load_selections3(self, selection):\n self.image_set.create_subset()\n selection.load_selections([SAMPLE_ROI])\n rows, cols = np.column_stack(self.roi_coords)\n for pixel in self.image_set._roi_data[rows, cols]:\n assert np.array_equal(\n pixel, [255.0, 0.0, 0.0, 255.]\n )\n for pixel in self.subset._roi_data[rows, cols]:\n assert np.array_equal(\n pixel, [0.0, 100.0, 0.0, 255.]\n )", "def load_cifar_data():\n train_loader = torch.utils.data.DataLoader(\n torchvision.datasets.CIFAR10('cifarfiles/', train=True, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=batch_size_train, shuffle=True, pin_memory=True)\n\n test_loader = torch.utils.data.DataLoader(\n torchvision.datasets.CIFAR10('cifarfiles/', train=False, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])),\n batch_size=batch_size_test, shuffle=True, pin_memory=True)\n return train_loader, test_loader", "def __call__(self):\n\n dataset = TextOnlyCocoAnnotation()\n\n with open(self.path) as read_file:\n\n json_loaded = json.load(read_file)\n\n for i, value in tqdm(json_loaded['imgs'].items()):\n image_path = os.path.join(os.path.dirname(self.path), 'train2014',\n value['file_name'])\n dataset_type = value['set']\n\n if dataset_type not in self.sets:\n print(dataset_type)\n continue\n\n for annotation_id in json_loaded['imgToAnns'][i]:\n annotation_value = json_loaded['anns'][str(annotation_id)]\n word_annotation = self.parse_annotation_instance(annotation_value)\n dataset.add_bbox(image_path, imagesize.get(image_path), word_annotation)\n\n return dataset", "def load_cifar() -> Tuple[torchvision.datasets.CIFAR10, torchvision.datasets.CIFAR10]:\n \n # Define the transform for the data.\n transform = transforms.Compose(\n [transforms.ToTensor(), \n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n )\n \n # Initialize Datasets. CIFAR-10 will automatically download if not present\n trainset = torchvision.datasets.CIFAR10(\n root=DATA_ROOT, train=True, download=True, transform=transform\n )\n testset = torchvision.datasets.CIFAR10(\n root=DATA_ROOT, train=False, download=True, transform=transform\n )\n \n # Return the datasets\n return trainset, testset", "def load_data(path=\"data/cora/\", dataset=\"cora\"):\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset), dtype=np.dtype(str))\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n labels = encode_onehot(idx_features_labels[:, -1])\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset), dtype=np.int32)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(labels.shape[0], labels.shape[0]), dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n print('Dataset has {} nodes, {} edges, {} features.'.format(adj.shape[0], edges.shape[0], features.shape[1]))\n\n return features.todense(), adj, labels", "def test_Conll2003Loader(self):\n dataset_path = \"test/data_for_tests/conll_2003_example.txt\"\n loader = Conll2003Loader()\n dataset_2003 = loader.load(dataset_path)", "def __init__(self, subset=-1, verbose=False, month='02', year='2019'):\n\n if not os.path.exists(f'/tmp/gosat{year}{month}.tsv'):\n if verbose:\n print(\"Cache not found, downloading data...\")\n try:\n with open(f'/tmp/gosat{year}{month}.tsv', 'w') as data:\n r = requests.get(f'https://www.eorc.jaxa.jp/GOSAT/GPCG/download/data-g2-{year}{month}.txt')\n if verbose:\n print(\"Downloaded data\")\n lines = r.text.split('\\n')[11:subset]\n for l in lines:\n l = '\\t'.join(l.split()) + \"\\n\"\n data.write(l)\n except:\n os.remove(f'/tmp/gosat{year}{month}.tsv')\n raise ConnectionError(\"You need an internet connection to download the data\")\n \n df = pd.read_csv(f'/tmp/gosat{year}{month}.tsv', '\\t')\n if verbose:\n print(\"Dataset loaded\")\n self.df = df", "def test_select_roi():\n _c = io.create_sample_Dataset(n_frames=5, rows=10, cols=10)\n _c = _c.sel(x=slice(35, 70), y=slice(30, 90))\n assert _c.u.shape == (7, 2, 5) # note the last dimension is preserved", "def load_data():\r\n print ('Loadng all the file one time......')\r\n if not os.path.exists('cifar.pkl'):\r\n set_data()\r\n with open('cifar.pkl', 'rb') as cifar_pickle:\r\n data = six.moves.cPickle.load(cifar_pickle)\r\n return data", "def prepare_data(self):\n import subprocess\n # Download coco data set into dir specified by config then /data/coco\n subprocess.call([f\"{get_original_cwd()}/bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\", f\"{get_original_cwd()}\"])\n # subprocess.call([f\"bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\"])\n task = \"instances\" if self.instance else \"person_keypoints\"\n register_coco_instances(\"train\", {}, f\"{self.dir}/data/coco/{task}_train2014.json\",\n f\"{self.dir}/data/coco/train2014\")\n register_coco_instances(\"val\", {}, f\"{self.dir}/data/coco/{task}_minival2014.json\",\n f\"{self.dir}/data/coco/val2014\")\n register_coco_instances(\"test\", {}, f\"{self.dir}/data/coco/{task}_valminusminival2014.json\",\n f\"{self.dir}/data/coco/val2014\")", "def load_copernicus_ammonia(layers, time_slice, lat_slice, lon_slice, verbose=False):\n xr_layers = []\n\n if 'agl' in layers:\n xr_layers.append(xr.load_dataset(\n './data/copernicus/ammonia/CAMS-GLOB-ANT_Glb_0.1x0.1_anthro_nh3_v4.2_monthly_agl.nc').agl.sel(\n time=time_slice, lat=lat_slice, lon=lon_slice))\n\n if 'ags' in layers:\n xr_layers.append(xr.load_dataset(\n './data/copernicus/ammonia/CAMS-GLOB-ANT_Glb_0.1x0.1_anthro_nh3_v4.2_monthly_ags.nc').ags.sel(\n time=time_slice, lat=lat_slice, lon=lon_slice))\n\n nh3 = sum(xr_layers)\n nh3.name = 'nh3'\n\n if verbose:\n\n shape = gpd.read_file('./shp/lombardia/lombardia.shp').to_crs(epsg=4326)\n\n ncols = len(xr_layers) + 1\n fig, axs = plt.subplots(ncols=ncols, figsize=(8 * ncols, 5))\n\n for i in range(len(xr_layers)):\n shape.plot(ax=axs[i], color='black', alpha=0.5)\n xr_layers[i].mean(dim='time').plot(ax=axs[i], alpha=0.5)\n\n shape.plot(ax=axs[len(xr_layers)], color='black', alpha=0.5)\n nh3.mean(dim='time').plot(ax=axs[len(xr_layers)], alpha=0.5)\n\n plt.show()\n\n return nh3", "def load_data(self, dataset='cifar10', label_mode='fine'):\n if dataset == 'cifar10':\n if self.root:\n x_train, y_train = self.load_from_path(\n [os.path.join(self.root, f'data_batch_{i}') for i in range(1, 6)])\n x_test, y_test = self.load_from_path(\n [os.path.join(self.root, 'test_batch')])\n x_test = x_test.astype(x_train.dtype)\n y_test = y_test.astype(y_train.dtype)\n return (x_train, y_train), (x_test, y_test)\n else:\n return tf.keras.datasets.cifar10.load_data()\n elif dataset in ['cifar20', 'cifar100']:\n if self.root:\n x_train, y_train = self.load_from_path(\n [os.path.join(self.root, 'train')], label_key=label_mode)\n x_test, y_test = self.load_from_path([os.path.join(self.root, 'test')])\n x_test = x_test.astype(x_train.dtype)\n y_test = y_test.astype(y_train.dtype)\n return (x_train, y_train), (x_test, y_test)\n else:\n return tf.keras.datasets.cifar100.load_data(label_mode=label_mode)", "def load_data(path=\"../data/cora/\", dataset=\"cora\"):\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset),\n dtype=np.dtype(str))\n features = sp.sparse.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n# labels = encode_onehot(idx_features_labels[:, -1])\n values = np.unique(idx_features_labels[:, -1])\n values.sort()\n labels = np.zeros(idx_features_labels.shape[0])\n for i in range(labels.shape[0]):\n labels[i] = np.where(values == idx_features_labels[i, -1])[0][0]\n labels = torch.tensor(labels).long()\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset),\n dtype=np.int32)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.sparse.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(labels.shape[0], labels.shape[0]),\n dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n# features = normalize(features)\n adj = normalize(adj + sp.sparse.eye(adj.shape[0]))\n\n idx_train = range(140)\n idx_val = range(200, 500)\n idx_test = range(500, 1500)\n\n features = torch.FloatTensor(np.array(features.todense()))\n# labels = torch.LongTensor(np.where(labels)[1])\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n\n return adj, features, labels, idx_train, idx_val, idx_test", "def __init__(self, dataset: SizedDataset, predicate: Callable):\n\t\tindices = [i for i in range(len(dataset)) if predicate(dataset[i])]\n\t\tsuper().__init__(dataset)\n\t\tself._subset = Subset(dataset, indices)", "def load_data(self):\n self.data = self.read_var(self.datavar)\n self.test_shape(self.datavar, self.data.shape, 2)", "def load_data(path=\"./data/cora/\", dataset=\"cora\"):\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset), dtype=np.dtype(str))\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n labels = encode_onehot(idx_features_labels[:, -1])\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset), dtype=np.int32)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())), dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])), shape=(labels.shape[0], labels.shape[0]), dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n features = normalize_features(features)\n adj = normalize_adj(adj + sp.eye(adj.shape[0]))\n\n idx_train = range(140)\n idx_val = range(200, 500)\n idx_test = range(500, 1500)\n\n adj = torch.FloatTensor(np.array(adj.todense()))\n features = torch.FloatTensor(np.array(features.todense()))\n labels = torch.LongTensor(np.where(labels)[1])\n\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n\n return adj, features, labels, idx_train, idx_val, idx_test", "def _read_csvs(self):\n self.data = pd.read_csv(self.path+self.name, index_col=0)", "def load_data(self,split='train'):\n raise ValueError('Please implement me!')", "def Cifar10_preload_and_split(path=None, splits=[0.4, 0.1, 0.25, 0.25], transform=None):\n\n if path is None:\n path = DATASETS_DIR\n index_file = os.path.join(path, 'cifar10.index.csv')\n\n indices = None\n if os.path.exists(index_file):\n index_csv = np.loadtxt(index_file)\n indices = torch.tensor(index_csv)\n print('Found predefined indexing file {}'.format(index_file))\n \n trainset = torchvision.datasets.CIFAR10(path, train=True, transform=transform[0], download=False)\n testset = torchvision.datasets.CIFAR10(path, train=False, transform=transform[0], download=False)\n fullset = ConcatDataset([trainset, testset])\n print('Initializing CIFAR10Dataset splits')\n \n # Currently five equal splits\n dset_size = fullset.cumulative_sizes[-1]\n int_splits = []\n for i in range(len(splits)):\n int_splits.append(int(dset_size * splits[i]))\n if sum(int_splits) < dset_size:\n rem = dset_size - sum(int_splits)\n int_splits[-1] += rem\n\n indices, splitsets = dataset_split(fullset, int_splits, indices=indices)\n\n if not os.path.exists(index_file):\n print('No predefined indexing file found, so index permutations saving to {}'.format(index_file))\n np.savetxt(index_file, indices.numpy(), fmt='%i', delimiter=',')\n\n print('Finished splitting data.')\n\n return splitsets", "def cma_bst(redownload: bool = False) -> Dataset:\n return Dataset.get(\"cma_bst\", redownload=redownload)", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def _load_a_couple0(self, path):\n assert(self._initialisation)\n X = pd.read_hdf(path[0], key='s')\n Y = np.load(path[1])\n return X , Y", "def loadRes(self, resFile):\n res = COCO()\n res.dataset['images'] = [img for img in self.dataset['images']]\n\n anns = resFile\n annsImgIds = [ann['image_id'] for ann in anns]\n\n assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \\\n 'Results do not correspond to current coco set'\n\n if 'bbox' in anns[0] and not anns[0]['bbox'] == []:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n\n for id, ann in enumerate(anns):\n bb = ann['bbox']\n ann['area'] = bb[2] * bb[3]\n ann['id'] = id + 1\n ann['iscrowd'] = 0\n else:\n return res\n\n res.dataset['annotations'] = anns\n createIndex(res)\n return res", "def test_full_dataset_from_file(full_dataset):\n train_dummy = \"justo. Praesent luctus. Curabitur egestas nunc sed libero. Proin sed\"\n val_dummy = \"malesuada. Integer id magna et ipsum cursus vestibulum. Mauris magna.\"\n\n assert full_dataset.train[0][0] == train_dummy\n assert full_dataset.train[0][1] == '6'\n\n assert full_dataset.val[0][0] == val_dummy\n assert full_dataset.val[0][1] == '8'\n\n assert full_dataset[0][0] == train_dummy\n assert full_dataset[100][0] == val_dummy", "def load_dataset(name, cnn, load_train=True, fold=0):\n loc = paths.dataset_dir[name]\n\n splits = []\n if load_train:\n splits = ['train', 'dev']\n else:\n splits = ['dev', 'test']\n\n\n dataset = {}\n\n for split in splits:\n dataset[split] = {}\n caps = []\n splitName = 'val' if (name == 'coco' or name == 'flickr30k') and split == 'dev' else split\n with open('%s/%s.txt' % (loc, splitName), 'rb') as f:\n for line in f:\n caps.append(line.strip())\n dataset[split]['caps'] = caps\n\n dataset[split]['ims'] = numpy.load('%s/images/%s/%s.npy' % (loc, cnn, splitName))\n dataset[split]['cap_tps'] = numpy.load('%s/topics/tmp/doc-topic_%s_line_t100.npy' % (loc, splitName))\n #dataset[split]['im_tps'] = numpy.load('%s/topics/t100/doc-topic_%s_t100.npy' % (loc, splitName))\n dataset[split]['im_tps'] = numpy.load('%s/topics/tmp/im_pred_%s.npy' % (loc, splitName))\n \n # norm topic vectors\n dataset[split]['cap_tps'] = (dataset[split]['cap_tps'].T / (dataset[split]['cap_tps'].max(axis=1) + 1e-30)).T\n dataset[split]['im_tps'] = (dataset[split]['im_tps'].T / (dataset[split]['im_tps'].max(axis=1) + 1e-30)).T\n # handle coco specially by only taking 1k or 5k captions/images\n if split in ['dev', 'test'] and fold >= 0:\n dataset[split]['ims'] = dataset[split]['ims'][fold*1000:(fold+1)*1000]\n dataset[split]['im_tps'] = dataset[split]['im_tps'][fold*1000:(fold+1)*1000]\n dataset[split]['caps'] = dataset[split]['caps'][fold*5000:(fold+1)*5000]\n dataset[split]['cap_tps'] = dataset[split]['cap_tps'][fold*5000:(fold+1)*5000]\n\n return dataset", "def load_data():\r\n train = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'train.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n val = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'valid.txt', ['words', 'pos', 'ignore', 'chunk'])) # testa will be our val set\r\n test = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'test.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n\r\n return train, val, test", "def load_data_pickle(self, load_full=False):\n self.train = pd.read_pickle('../input/train_mod.pkl')\n self.test = pd.read_pickle('../input/test_mod.pkl')\n if load_full:\n self.train_full = pd.read_pickle('../input/train_full_mod.pkl')", "def coco_raw_data(data_path=None):\n train= _read_chars(os.path.join(data_path, \"train_caps.txt\"))\n val = _read_chars(os.path.join(data_path, \"dev_caps.txt\"))\n test = _read_chars(os.path.join(data_path, \"test_caps.txt\"))\n chars = set(train)\n id_2_word = dict(enumerate(chars))\n word_to_id = {i: w for w, i in id_2_word.items()}\n train_data = _file_to_word_ids(train, word_to_id)\n valid_data = _file_to_word_ids(val, word_to_id)\n test_data = _file_to_word_ids(test, word_to_id)\n return train_data, valid_data, test_data, word_to_id, id_2_word", "def _load_subset(cls_file, subset):\n with open(cls_file) as cls_yml:\n cls_dict = yaml.load(cls_yml)\n\n if not subset:\n return cls_dict\n\n try:\n return {x:cls_dict[x] for x in subset}\n except KeyError as err:\n keys = ', '.join(cls_dict.keys())\n raise ValueError('{} not in {}'.format(err.args[0], keys))", "def readFiles(opt, path, pathCopyData,minlat, maxlat, minlon, maxlon , variables, estaciones):\n date = '\\d\\d\\d\\d-\\d\\d-\\d\\d'\n dirr = pathCopyData\n patron2 = re.compile(date)\n print(dirr + 'tfile.txt')\n tempfile = df.read_csv(dirr + 'tfile.txt')\n tempbase = df.read_csv(dirr + 'tbase.txt')\n tfile = list(tempfile.values.flatten())\n tbase = list(tempbase.values.flatten())\n tfileCopy = list(tempfile.values.flatten())\n tbaseCopy = list(tempbase.values.flatten())\n l = len(tfile)\n for i in range(l):\n tfil = tfile[i]\n tbas = tbase[i]\n ls = tbas + '/' + tfil\n f = patron2.findall(tfil)\n cadena = clearString(tfil)\n print(cadena)\n try:\n #net = open_netcdf(ls, tfil, cadena, pathCopyData)\n net = Dataset(ls)\n for xs in range(len(estaciones)):\n minlat1 = minlat[xs]\n maxlat1 = maxlat[xs]\n minlon1 = minlon[xs]\n maxlon1 = maxlon[xs]\n estacion = estaciones[xs]\n #checkFile(net, tfil, f[0], opt, path, minlat1, maxlat1, minlon1, maxlon1, variables, estacion)\n var_cut = []\n for i in variables:\n var = net.variables[i][:,int(minlat1):int(maxlat1),int(minlon1):int(maxlon1)]\n #print(LON)\n #print(var)\n #return\n # celda.append(var)\n # result = ne(var, LON, LAT, LONsize, LATsize, minlat, maxlat, minlon, maxlon)\n var_cut.append(var)\n\n for ls in range(len(var_cut)):\n saveData(var_cut[ls], variables[ls], f[0], opt, path, estacion)\n tfileCopy.remove(tfil)\n tbaseCopy.remove(tbas)\n except (OSError, EOFError) as e:\n print(e)\n fdata = df.DataFrame(tfileCopy, columns=['nameFile'])\n fbas = df.DataFrame(tbaseCopy, columns=['nameBase'])\n fdata.to_csv(dirr + 'tfile.txt', encoding='utf-8', index=False)\n fbas.to_csv(dirr + 'tbase.txt', encoding='utf-8', index=False)\n if os.path.exists(pathCopyData + cadena):\n os.remove(pathCopyData + cadena)\n sys.exit()\n # readFiles(1);\n except tarfile.ReadError:\n print('error2')\n # fdata = df.DataFrame(tfile,columns=['nameFile']);\n # fbas = df.DataFrame(tbase,columns=['nameBase']);\n # fdata.to_csv(dirr+'tfile.txt',encoding='utf-8',index=False);\n # fbas.to_csv(dirr+'tbase.txt',encoding='utf-8',index=False);\n # readFiles(1);\n except (KeyError, FileNotFoundError):\n print('ERROR DE LECTURA')", "def load_data():\n\n # Load data from categories\n comp = fetch_20newsgroups(subset='all', categories=['comp.graphics', 'comp.sys.mac.hardware', 'comp.windows.x'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n science = fetch_20newsgroups(subset='all', categories=['sci.crypt', 'sci.electronics', 'sci.space'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n politics = fetch_20newsgroups(subset='all', categories=['talk.politics.guns', 'talk.politics.mideast'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n religion = fetch_20newsgroups(subset='all', categories=['alt.atheism', 'soc.religion.christian'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n recreation = fetch_20newsgroups(subset='all', categories=['rec.autos', 'rec.sport.baseball', 'rec.sport.hockey'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n\n # Print total number of documents\n data_len = [len(comp.data), len(science.data), len(politics.data), len(recreation.data), len(religion.data)]\n\n # Subsample classes to create a balanced dataset\n sub_k = min(data_len)\n comp.data, comp.target = [list(t) for t in zip(*random.sample(list(zip(comp.data, comp.target)), sub_k))]\n science.data, science.target = [list(t) for t in zip(*random.sample(list(zip(science.data, science.target)), sub_k))]\n politics.data, politics.target = [list(t) for t in zip(*random.sample(list(zip(politics.data, politics.target)), sub_k))]\n religion.data, religion.target = [list(t) for t in zip(*random.sample(list(zip(religion.data, religion.target)), sub_k))]\n recreation.data, recreation.target = [list(t) for t in zip(*random.sample(list(zip(recreation.data, recreation.target)), sub_k))]\n\n # Subcategories labels\n subcat_comp = np.array(comp.target)\n subcat_scien = np.array(science.target) + len(comp.target_names)\n subcat_polit = np.array(politics.target) + len(comp.target_names) + len(science.target_names)\n subcat_rel = np.array(religion.target) + len(comp.target_names) + len(science.target_names) + len(politics.target_names)\n subcat_rec = np.array(recreation.target) + len(comp.target_names) + len(science.target_names) + len(politics.target_names) + len(religion.target_names)\n\n # Assign labels to train data based on categories\n y_comp = np.ones(len(comp.data))\n y_scien = 2*np.ones(len(science.data))\n y_polit = 3*np.ones(len(politics.data))\n y_rel = 4*np.ones(len(religion.data))\n y_rec = 5*np.ones(len(recreation.data))\n labels = np.concatenate((y_comp,y_scien,y_polit,y_rel,y_rec), axis=None)\n\n # Computers\n train_comp, test_comp, y_train_comp, y_test_comp, subcat_comp_train, subcat_comp_test = train_test_split(comp.data, y_comp, subcat_comp, test_size=0.2, random_state=42)\n train_comp, val_comp, y_train_comp, y_val_comp, subcat_comp_train, subcat_comp_val = train_test_split(train_comp, y_train_comp, subcat_comp_train, test_size=0.25, random_state=42)\n\n # Sciences\n train_scien, test_scien, y_train_scien, y_test_scien, subcat_scien_train, subcat_scien_test = train_test_split(science.data, y_scien, subcat_scien, test_size=0.2, random_state=42)\n train_scien, val_scien, y_train_scien, y_val_scien, subcat_scien_train, subcat_scien_val = train_test_split(train_scien, y_train_scien, subcat_scien_train, test_size=0.25, random_state=42)\n\n # Politics\n train_polit, test_polit, y_train_polit, y_test_polit, subcat_polit_train, subcat_polit_test = train_test_split(politics.data, y_polit, subcat_polit, test_size=0.2, random_state=42)\n train_polit, val_polit, y_train_polit, y_val_polit, subcat_polit_train, subcat_polit_val = train_test_split(train_polit, y_train_polit, subcat_polit_train, test_size=0.25, random_state=42)\n\n # Religion\n train_rel, test_rel, y_train_rel, y_test_rel, subcat_rel_train, subcat_rel_test = train_test_split(religion.data, y_rel, subcat_rel, test_size=0.2, random_state=42)\n train_rel, val_rel, y_train_rel, y_val_rel, subcat_rel_train, subcat_rel_val = train_test_split(train_rel, y_train_rel, subcat_rel_train, test_size=0.25, random_state=42)\n\n # Recreation\n train_rec, test_rec, y_train_rec, y_test_rec, subcat_rec_train, subcat_rec_test = train_test_split(recreation.data, y_rec, subcat_rec, test_size=0.2, random_state=42)\n train_rec, val_rec, y_train_rec, y_val_rec, subcat_rec_train, subcat_rec_val = train_test_split(train_rec, y_train_rec, subcat_rec_train, test_size=0.25, random_state=42)\n\n # Corpus from all categories in train set\n newsgroups_train = train_comp + train_scien + train_polit + train_rel + train_rec\n #print(f\"Total number of documents in all categories in the train set is {len(newsgroups_train)}.\")\n train_labels = np.concatenate((y_train_comp,y_train_scien,y_train_polit,y_train_rel,y_train_rec), axis=None)\n #print(train_labels.shape)\n train_subcat = np.concatenate((subcat_comp_train,subcat_scien_train,subcat_polit_train,subcat_rel_train,subcat_rec_train), axis=None)\n #print(train_subcat.shape)\n\n # Corpus from all categories in test set\n newsgroups_test = test_comp + test_scien + test_polit + test_rel + test_rec\n test_labels = np.concatenate((y_test_comp,y_test_scien,y_test_polit,y_test_rel,y_test_rec), axis=None)\n test_subcat = np.concatenate((subcat_comp_test,subcat_scien_test,subcat_polit_test,subcat_rel_test,subcat_rec_test), axis=None)\n\n # Corpus from all categories in validation set\n newsgroups_val = val_comp + val_scien + val_polit + val_rel + val_rec\n val_labels = np.concatenate((y_val_comp,y_val_scien,y_val_polit,y_val_rel,y_val_rec), axis=None)\n val_subcat = np.concatenate((subcat_comp_val,subcat_scien_val,subcat_polit_val,subcat_rel_val,subcat_rec_val), axis=None)\n\n # Data Split\n total = len(test_labels) + len(val_labels) + len(train_labels)\n\n return newsgroups_train, train_labels, newsgroups_test, test_labels, newsgroups_val, val_labels, train_subcat, test_subcat, val_subcat", "def loadRes(self, resFile):\n res = COCO()\n res.dataset['images'] = [img for img in self.dataset['images']]\n\n print('Loading and preparing results...')\n tic = time.time()\n if type(resFile) == str: #or type(resFile) == unicode:\n anns = json.load(open(resFile))\n elif type(resFile) == np.ndarray:\n anns = self.loadNumpyAnnotations(resFile)\n else:\n anns = resFile\n assert type(anns) == list, 'results in not an array of objects'\n annsImgIds = [ann['image_id'] for ann in anns]\n assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \\\n 'Results do not correspond to current coco set'\n if 'caption' in anns[0]:\n imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])\n res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]\n for id, ann in enumerate(anns):\n ann['id'] = id+1\n elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n bb = ann['bbox']\n x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]\n if not 'segmentation' in ann:\n ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]\n ann['area'] = bb[2]*bb[3]\n ann['id'] = id+1\n ann['iscrowd'] = 0\n elif 'segmentation' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n # now only support compressed RLE format as segmentation results\n ann['area'] = maskUtils.area(ann['segmentation'])\n if not 'bbox' in ann:\n ann['bbox'] = maskUtils.toBbox(ann['segmentation'])\n ann['id'] = id+1\n ann['iscrowd'] = 0\n elif 'keypoints' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n s = ann['keypoints']\n x = s[0::3]\n y = s[1::3]\n x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)\n ann['area'] = (x1-x0)*(y1-y0)\n ann['id'] = id + 1\n ann['bbox'] = [x0,y0,x1-x0,y1-y0]\n print('DONE (t={:0.2f}s)'.format(time.time()- tic))\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res", "def load_CIFAR10(path):\r\n sampleList = []\r\n labelList = []\r\n # load all the data, as there only five training samples name as data_batch_id\r\n for i in range(1, 6):\r\n # get full filename\r\n filename = os.path.join(path, 'data_batch_%d' % (i, ))\r\n x, y = load_CIFAR_batch(filename)\r\n\r\n sampleList.append(x)\r\n labelList.append(y)\r\n\r\n # combine elements as one array\r\n Xtr = np.concatenate(sampleList)\r\n Ytr = np.concatenate(labelList)\r\n del x, y\r\n print(\"Training data loaded, total size : %d\", len(Xtr))\r\n # load test data\r\n Xte, Yte = load_CIFAR_batch(os.path.join(path, 'test_batch'))\r\n return Xtr, Ytr, Xte, Yte", "def load_data(self) -> None:", "def test_load_selections(self, selection):\n selection.load_selections([SAMPLE_ROI])\n rows, cols = np.column_stack(self.roi_coords)\n for pixel in self.image_set._roi_data[rows, cols]:\n assert np.array_equal(\n pixel, [255.0, 0.0, 0.0, 255.]\n )\n for pixel in self.subset._roi_data[rows, cols]:\n assert np.array_equal(\n pixel, [0.0, 100.0, 0.0, 255.]\n )", "def load_data():\n\t\t# load the data\n\t\tDATPATH = \"../data/\"\n\t\t#fnino = DATPATH + \"nino3.csv\" # 1871-2000\n\t\tfnino = DATPATH + \"tas_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_nino3_tseries.csv\" # 1871-2016\n\t\t#fnino = DATPATH + \"nino34.long.data\"\n\t\t#nc_data_nino3 = netCDF4.Dataset(fnino)\n\t\t#nino3_load = nc_data_nino3.variables['tas'][:]\n\t\t#dnino = nino3_load.flatten()\n\n\t\tdnino = np.genfromtxt(fnino, delimiter=\",\", dtype=float).flatten()\n\t\t#fismr = DATPATH + \"ismr.csv\" # 1871-2000\n\t\t#fismr = DATPATH + \"psl_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_1_india_goswami_2002_tseries.csv\" # 1871-2016\n\t\tfismr = DATPATH + \"pr_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_goswami_india_tseries.csv\" # 1871-2016\n\t\tdismr = np.genfromtxt(fismr, delimiter=\",\", dtype=float).flatten()\n\t\t#fvolc = DATPATH + \"robock.txt\" # 1871-2000\n\t\tfvolc = DATPATH + \"sigl.txt\" # 1871-2016\n\t\tdvolc = np.genfromtxt(fvolc, delimiter=\",\", dtype=float).flatten()\n\n\t\tfvolc_source = DATPATH + \"volc_source_850_1850.csv\" # 1871-2016\n\t\tdvolc_source = np.genfromtxt(fvolc_source, delimiter=\",\", dtype=float).flatten()\n\t\t# simple check for data consistency\n\t\tassert dnino.shape == dismr.shape, \"Data sets are unequal!\"\n\t\tassert int(dismr.shape[0]/12) == dvolc.shape[0], \"Data sets are unequal\"\n\t\treturn dnino, dismr, dvolc, dvolc_source", "def load_cows(filename):\n with open(filename) as f:\n cow_set={}\n for line in f:\n cow_info = line.strip().split(\",\")\n cow_set[cow_info[0]] = int(cow_info[1])\n \n return cow_set", "def load_from_netcdf(filename):\n filename = os.path.join(datadir, filename + '.nc')\n return xr.open_dataarray(filename)", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = load_pickle(f)\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000,3072)\n Y = np.array(Y)\n return X, Y", "def load_data(year, begin=0, end=None):\r\n\r\n fname = os.path.join('..', 'Data', 'Morphometrics for Bogue ' + str(year) + '.csv')\r\n data = pd.read_csv(fname, header=0, delimiter=',')\r\n\r\n data = data.loc[data['Profile No.'] >= begin]\r\n\r\n if end is not None:\r\n data = data.loc[data['Profile No.'] <= end]\r\n\r\n return data", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n datadict = load_pickle(f)\r\n X = datadict['data']\r\n Y = datadict['labels']\r\n X = X.reshape(10000,3072)\r\n Y = np.array(Y)\r\n return X, Y", "def load_data(self):", "def load_coworkers(args):\n if os.path.isfile(args.coworkers):\n with open(args.coworkers, 'r') as c:\n list_coworkers = json.load(c)\n else:\n list_coworkers = []\n coworkers = []\n for coworker_set in list_coworkers:\n for pair in itertools.combinations(coworker_set, 2):\n # print(\"pair is {}\".format(pair))\n coworkers.append(set(pair))\n return coworkers", "def load_data(filename):\n return InferenceData.from_netcdf(filename)", "def main(inv1, inv2, countries, profile_path_in, profile_path_out):\n\n for path_1, path_2 in zip(profile_path_in, profile_path_out):\n shutil.copy(path_1,path_2)\n\n for done,path in enumerate(profile_path_out):\n with Dataset(path,'a') as prof:\n for v in prof.variables.copy():\n if v == 'country':\n continue\n var = prof[v]\n\n nc_vars = []\n for inv in [inv1, inv2]:\n nc_var = prof.createVariable(v+'_'+inv, var.dtype, var.dimensions)\n nc_var.long_name = var.long_name + \" for inventory %s\" % (inv)\n nc_var.units = \"1\"\n nc_var.comment = var.comment\n nc_var[:] = var[:]\n nc_vars.append(nc_var)\n\n if not done:\n for i,c in enumerate(prof['country'][:]):\n country_name = [name for name, code in cc.items() if (code == c)]\n deleted = False\n for todel in countries:\n if todel in country_name:\n print(nc_vars[0])\n nc_vars[0][:,i] = np.zeros(SHAPE)\n deleted = True\n if not deleted:\n print(nc_vars[1])\n nc_vars[1][:,i] = np.zeros(SHAPE)", "def load_partition(idx: int):\r\n assert idx in range(10)\r\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\r\n return (\r\n x_train[idx * 5000 : (idx + 1) * 5000],\r\n y_train[idx * 5000 : (idx + 1) * 5000],\r\n ), (\r\n x_test[idx * 1000 : (idx + 1) * 1000],\r\n y_test[idx * 1000 : (idx + 1) * 1000],\r\n )", "def _loadData(self, data):\n Clip._loadData(self, data)\n PlexSession._loadData(self, data)", "def load_ccs10():\n ccs10 = pd.read_csv(pkg_resources.resource_filename(__name__,'ccs_dx_icd10cm_2019_1.csv'))\n ccs10.columns=[i.strip('\\'') for i in ccs10.columns]\n for col in ccs10.columns:\n ccs10.loc[:,col]=ccs10[col].str.strip('\\'')\n ccs10 = ccs10.replace(r'^\\s*$', np.nan, regex=True)\n ccs10.loc[:,'ICD-10-CM CODE'] = ccs10['ICD-10-CM CODE'].str.replace(' ','')\n ccs10=ccs10.iloc[:,0:4]\n return ccs10", "def coco_load_split_from_tfds(batch_size,\n *,\n train,\n preprocess_fn,\n decode_fn,\n cache=False,\n max_size=1333,\n max_boxes=100,\n shuffle_buffer_size=1000,\n shuffle_seed=0):\n split = 'train' if train else 'validation'\n builder = tfds.builder('coco/2017')\n\n # Each host is responsible for a fixed subset of data.\n base_split_name, host_start, host_end = dataset_utils.get_data_range(\n builder, split, jax.process_index(), jax.process_count())\n data_range = tfds.core.ReadInstruction(\n base_split_name, unit='abs', from_=host_start, to=host_end)\n ds = builder.as_dataset(split=data_range, shuffle_files=False)\n options = tf.data.Options()\n options.threading.private_threadpool_size = 48\n ds = ds.with_options(options)\n ds = ds.map(decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n if cache:\n ds = ds.cache()\n\n # TLDR: make sure max_boxes is set >=64.\n # NOTE: the number of boxes/labels always needs to be strictly larger than 63\n # to ensure that there is at least one dummy target corresponding\n # to an empty bounding box, and that the last target box is such a dummy\n # empty target. This is needed for matching functions that in principle only\n # produce matches with non-empty target boxes, and produce dummy matches\n # with an empty target for the rest of the unmatched predicted boxes. The\n # latter behaviour is necessary to ensure that the number of matches per\n # datapoint is the same for all datapoints and shapes are static and jit\n # compatible.\n padded_shapes = {\n 'inputs': [max_size, max_size, 3],\n 'padding_mask': [max_size, max_size],\n 'label': {\n 'area': [max_boxes,],\n 'boxes': [max_boxes, 4],\n 'objects/id': [max_boxes,],\n 'is_crowd': [max_boxes,],\n 'labels': [max_boxes,],\n 'image/id': [],\n 'orig_size': [2,],\n 'size': [2,]\n },\n }\n\n if train:\n # First repeat then batch.\n ds = ds.shuffle(shuffle_buffer_size, seed=shuffle_seed)\n ds = ds.repeat()\n # Augmentation should be done after repeat for true randomness.\n ds = ds.map(preprocess_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n ds = ds.padded_batch(batch_size, padded_shapes=padded_shapes,\n drop_remainder=True)\n\n else:\n ds = ds.map(preprocess_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n # First batch then repeat.\n ds = ds.padded_batch(batch_size, padded_shapes=padded_shapes,\n drop_remainder=False)\n ds = ds.repeat()\n\n ds = ds.prefetch(tf.data.experimental.AUTOTUNE)\n return ds, builder.info", "def set_subset(self):\r\n if self._random_subset:\r\n perm = torch.randperm(len(self._indices))\r\n self._subset = self._indices[perm][:self._subset_size]\r\n else:\r\n self._subset = torch.Tensor(self._indices[:self._subset_size])", "def test_load_selections2(self, selection):\n self.image_set._subsets = []\n selection.load_selections([SAMPLE_ROI])\n rows, cols = np.column_stack(self.roi_coords)\n for pixel in self.image_set._roi_data[rows, cols]:\n assert np.array_equal(\n pixel, [255.0, 0.0, 0.0, 255.]\n )", "def __call__(self, *args, **kwargs):\n\n dataset = TextOnlyCocoAnnotation()\n\n if self.subset == 'train':\n image_paths, annotation_paths = self.collect_train_paths()\n elif self.subset == 'val':\n image_paths, annotation_paths = self.collect_val_paths()\n\n for image_path, annotation_path in tqdm(zip(image_paths, annotation_paths)):\n word_annotations = []\n with open(annotation_path, encoding='utf-8-sig') as read_file:\n content = [line.strip() for line in read_file.readlines()]\n for line in content:\n word_annotations.append(self.parse_line(line))\n should_add = not self.is_latin_required\n if self.is_latin_required:\n for word_annotation in word_annotations:\n if word_annotation['attributes']['language'].lower() == 'latin':\n should_add = True\n break\n if should_add:\n for word_annotation in word_annotations:\n dataset.add_bbox(image_path, imagesize.get(image_path), word_annotation)\n\n return dataset", "def load_cifar10(data_path=\".\", test_size=0.2, random_state=1337):\n test_path = os.path.join(data_path, \"cifar-10-batches-py/test_batch\")\n train_paths = [os.path.join(data_path, \"cifar-10-batches-py/data_batch_%i\" % i) for i in range(1, 6)]\n\n if not os.path.exists(test_path) or not all(list(map(os.path.exists, train_paths))):\n print (\"Dataset not found. Downloading...\")\n download_cifar(data_path,\n url='https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',\n tarname='cifar-10-python.tar.gz')\n\n train_batches = list(map(unpickle, train_paths))\n test_batch = unpickle(test_path)\n\n X = np.concatenate([batch[\"data\"] for batch in train_batches]).reshape([-1, 3, 32, 32]).astype('float32') / 255\n y = np.concatenate([batch[\"labels\"] for batch in train_batches]).astype('int32')\n X_train, X_val, y_train, y_val = train_test_split(X, y,\n test_size=test_size,\n random_state=random_state)\n\n X_test = test_batch[\"data\"].reshape([-1, 3, 32, 32]).astype('float32') / 255\n y_test = np.array(test_batch[\"labels\"]).astype('int32')\n\n return X_train, y_train, X_val, y_val, X_test, y_test", "def F_subset_S5PCH4(self,path,if_trop_xch4=False,s5p_product='RPRO'): \n from scipy.interpolate import interp1d\n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('S5P_'+s5p_product+'_L2__CH4____'+DATE.strftime(\"%Y%m%d\")+'T*.nc')\n l2_list = l2_list+flist\n \n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n #maxsza = self.maxsza \n #maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n min_qa_value = self.min_qa_value\n \n # absolute path of useful variables in the nc file\n data_fields = ['/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time',\\\n '/PRODUCT/delta_time',\\\n '/PRODUCT/methane_mixing_ratio',\\\n '/PRODUCT/methane_mixing_ratio_bias_corrected',\\\n '/PRODUCT/methane_mixing_ratio_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','latc','lonc','qa_value','time','delta_time',\\\n 'column_amount_no_bias_correction','column_amount','column_uncertainty']\n if if_trop_xch4:\n # absolute path of useful variables in the nc file\n data_fields = ['/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/dry_air_subcolumns',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/surface_pressure',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/pressure_interval',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/methane_profile_apriori',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time',\\\n '/PRODUCT/delta_time',\\\n '/PRODUCT/methane_mixing_ratio',\\\n '/PRODUCT/methane_mixing_ratio_bias_corrected',\\\n '/PRODUCT/methane_mixing_ratio_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','dry_air_subcolumns','surface_pressure','pressure_interval',\n 'methane_profile_apriori','latc','lonc','qa_value','time','delta_time',\\\n 'column_amount_no_bias_correction','column_amount','column_uncertainty']\n self.logger.info('Read, subset, and store level 2 data to l2g_data')\n self.logger.info('Level 2 data are located at '+l2_dir)\n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading '+fn)\n outp_nc = self.F_read_S5P_nc(fn_dir,data_fields,data_fields_l2g)\n if if_trop_xch4:\n sounding_interp = F_interp_geos_mat(outp_nc['lonc'],outp_nc['latc'],outp_nc['UTC_matlab_datenum'],\\\n geos_dir='/mnt/Data2/GEOS/s5p_interp/',\\\n interp_fields=['TROPPT'])\n outp_nc['TROPPT'] = sounding_interp['TROPPT']\n #f1 = outp_nc['SolarZenithAngle'] <= maxsza\n #f2 = outp_nc['cloud_fraction'] <= maxcf\n # ridiculously, qa_value has a scale_factor of 0.01. so error-prone\n f3 = outp_nc['qa_value'] >= min_qa_value \n f4 = outp_nc['latc'] >= south\n f5 = outp_nc['latc'] <= north\n tmplon = outp_nc['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_nc['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_nc['UTC_matlab_datenum'] <= self.end_matlab_datenum\n validmask = f3 & f4 & f5 & f6 & f7 & f8 & f9\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n l2g_data0 = {}\n if np.sum(validmask) == 0:\n continue\n # yep it's indeed messed up\n Lat_lowerleft = np.squeeze(outp_nc['latitude_bounds'][:,:,0])[validmask]\n Lat_upperleft = np.squeeze(outp_nc['latitude_bounds'][:,:,3])[validmask]\n Lat_lowerright = np.squeeze(outp_nc['latitude_bounds'][:,:,1])[validmask]\n Lat_upperright = np.squeeze(outp_nc['latitude_bounds'][:,:,2])[validmask]\n Lon_lowerleft = np.squeeze(outp_nc['longitude_bounds'][:,:,0])[validmask]\n Lon_upperleft = np.squeeze(outp_nc['longitude_bounds'][:,:,3])[validmask]\n Lon_lowerright = np.squeeze(outp_nc['longitude_bounds'][:,:,1])[validmask]\n Lon_upperright = np.squeeze(outp_nc['longitude_bounds'][:,:,2])[validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_nc.keys():\n if key not in {'latitude_bounds','longitude_bounds','time_utc','time','delta_time'}:\n l2g_data0[key] = outp_nc[key][validmask]\n if if_trop_xch4:\n # calculate trop xch4 using l2g_data0\n l2g_data0['air_column_strat'] = np.zeros(l2g_data0['latc'].shape)\n l2g_data0['air_column_total'] = np.zeros(l2g_data0['latc'].shape)\n l2g_data0['methane_ap_column_strat'] = np.zeros(l2g_data0['latc'].shape)\n for il2 in range(len(l2g_data0['latc'])):\n cum_air = np.concatenate(([0.],np.cumsum(l2g_data0['dry_air_subcolumns'][il2,].squeeze())))\n cum_methane = np.concatenate(([0.],np.cumsum(l2g_data0['methane_profile_apriori'][il2,].squeeze())))\n # model top is 10 Pa, 12 layers, 13 levels\n plevel = 10.+np.arange(0,13)*l2g_data0['pressure_interval'][il2]\n tropp = l2g_data0['TROPPT'][il2]\n l2g_data0['air_column_total'][il2] = np.sum(l2g_data0['dry_air_subcolumns'][il2,])\n f = interp1d(plevel,cum_air)\n l2g_data0['air_column_strat'][il2] = f(tropp)\n f = interp1d(plevel,cum_methane)\n l2g_data0['methane_ap_column_strat'][il2] = f(tropp)\n del l2g_data0['dry_air_subcolumns']\n del l2g_data0['methane_profile_apriori'] \n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])", "def index_subset(subset, numTestClasses=3, randomSeed=123):\n print(\"datasets.py is ingesting the DF dataset from data/DC.npz\")\n npzfile = np.load(\"data/DC.npz\", allow_pickle=True)\n data = npzfile[\"data\"]\n labels = npzfile[\"labels\"]\n npzfile.close()\n distinctLabelCount= len(set(labels))\n \n #print(\"data\", data.shape)\n #print(\"labels\", labels.shape)\n #print(\"distinctLabelCount\", distinctLabelCount)\n \n #random.seed(randomSeed)\n testClasses = random.sample(range(0,distinctLabelCount),numTestClasses)\n testClasses = [np.float64(i) for i in testClasses]\n \n \n mask = np.isin(labels,testClasses)\n\n X_test = data[np.where(mask)]\n X_train = data[np.where(~mask)]\n y_test = labels[np.where(mask)]\n y_train = labels[np.where(~mask)]\n \n if subset==\"background\":\n print(\"Current working directory is \", os.getcwd())\n X = X_train\n y = y_train\n print(\"DC background data shape\", X.shape)\n elif subset==\"evaluation\":\n X = X_test\n y = y_test\n print(\"DC evaluation data shape\", X.shape)\n images = []\n \n #print(\"y shape\", y.shape)\n \n \n for i in range(y.shape[0]):\n images.append({\n 'subset': subset,\n 'class_name': y[i],\n 'data': X[i].reshape(1,X[1].shape[0]) # This is the shape needed for Matching Networks\n #'data': X[i] #.reshape(X[1].shape[0]) # This is the shape needed for MAML\n })\n \n return images", "def _load_obcfile(casename=None): \n\n data={}\n\n if casename==None:\n print('_load_obcfile requires a filename to load.')\n return\n try:\n fp=open(casename+'_obc.dat','r')\n except IOError:\n print('_load_obcfile: invalid case name.')\n return data\n\n obc_str=fp.readline().split('=')\n obc_num=int(obc_str[1])\n t_data1=np.genfromtxt(casename+'_obc.dat',skip_header=1)\n fp.close()\n\n data['obcf_num']=obc_num\n data['obcf_numbers']=t_data1[:,0]\n data['obcf_nodes']=t_data1[:,1]\n data['obcf_value']=t_data1[:,2]\n\n \n return data", "def _init_img_dataset(self, dataset_path):\n\n # ==\n # Define the classes used in the various states\n # form: (state class : cifar label class)\n class_dict = {\n 'initial': 'automobile',\n 'choice_1': 'dog',\n 'choice_2': 'cat',\n 'corridor': 'bird',\n }\n\n # ==\n # Download / initialize dataset\n ds = CIFAR10(dataset_path, train=self.training,\n download=True)\n\n # Get the CIFAR class index for each of the state classes\n cifar_class_dict = {\n k: ds.class_to_idx[class_dict[k]] for k in class_dict\n }\n\n # Iterate over the CIFAR dataset and get the idxs to each class\n cifar_indexes = {k: [] for k in class_dict}\n for i in range(len(ds)):\n cur_cifar_class = ds[i][1]\n for k in class_dict:\n if cur_cifar_class == cifar_class_dict[k]:\n cifar_indexes[k].append(i)\n\n # Manually sub-sample choice classes\n for k in ['choice_1', 'choice_2']:\n n_imgs = min(self.num_ds_imgs, len(cifar_indexes[k]))\n rng = np.random.default_rng()\n choice_imgs = rng.choice(cifar_indexes[k], size=n_imgs,\n replace=False)\n cifar_indexes[k] = choice_imgs\n\n # Manually shuffle the corridor class\n rng = np.random.default_rng()\n corri_img_shufIdxs = rng.choice(cifar_indexes['corridor'],\n size=len(cifar_indexes['corridor']),\n replace=False)\n cifar_indexes['corridor'] = corri_img_shufIdxs\n\n # ==\n # Construct the data subset dictionary\n ds_dict = {}\n for k in class_dict:\n ds_dict[k] = Subset(ds, cifar_indexes[k])\n\n return ds_dict" ]
[ "0.62512195", "0.6004767", "0.5874558", "0.5784675", "0.57171506", "0.56779504", "0.5658244", "0.5657759", "0.56274396", "0.5618919", "0.5617201", "0.55943984", "0.55781955", "0.55633026", "0.55596197", "0.5551278", "0.54980606", "0.54674935", "0.5462153", "0.54568654", "0.5433031", "0.54185146", "0.54123944", "0.54052603", "0.54035753", "0.5396853", "0.5372947", "0.53500956", "0.53428036", "0.534053", "0.5338753", "0.53359586", "0.5320726", "0.5317437", "0.5304645", "0.5297008", "0.52912724", "0.5290957", "0.5285514", "0.52780676", "0.5273162", "0.52693963", "0.52603585", "0.5222573", "0.5219514", "0.51972806", "0.5181303", "0.5179186", "0.5178229", "0.517305", "0.5158575", "0.51580805", "0.51572496", "0.51490116", "0.5140349", "0.51250345", "0.5122827", "0.5122754", "0.51223165", "0.51161146", "0.51154226", "0.5108666", "0.51070356", "0.5105997", "0.51031137", "0.51005256", "0.50980556", "0.5092329", "0.50880575", "0.508687", "0.50816774", "0.50778973", "0.50652325", "0.506247", "0.5056847", "0.5051913", "0.50410104", "0.50363135", "0.5033477", "0.50326306", "0.5030693", "0.5024479", "0.5013837", "0.5013737", "0.5006719", "0.50057137", "0.50034404", "0.50021935", "0.4999956", "0.49965605", "0.49797633", "0.49750993", "0.49672142", "0.49663332", "0.49642366", "0.49619246", "0.49617514", "0.49516353", "0.49410495", "0.49408978" ]
0.64328516
0
Download the COCO dataset/annotations if requested.
def auto_download(self, dataDir, dataType, dataYear): # Setup paths and file names if dataType == "minival" or dataType == "valminusminival": imgDir = "{}/{}{}".format(dataDir, "val", dataYear) imgZipFile = "{}/{}{}.zip".format(dataDir, "val", dataYear) imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format("val", dataYear) else: imgDir = "{}/{}{}".format(dataDir, dataType, dataYear) imgZipFile = "{}/{}{}.zip".format(dataDir, dataType, dataYear) imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format(dataType, dataYear) # print("Image paths:"); print(imgDir); print(imgZipFile); print(imgURL) # Create main folder if it doesn't exist yet if not os.path.exists(dataDir): os.makedirs(dataDir) # Download images if not available locally if not os.path.exists(imgDir): os.makedirs(imgDir) print("Downloading images to " + imgZipFile + " ...") with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out: shutil.copyfileobj(resp, out) print("... done downloading.") print("Unzipping " + imgZipFile) with zipfile.ZipFile(imgZipFile, "r") as zip_ref: zip_ref.extractall(dataDir) print("... done unzipping") print("Will use images in " + imgDir) # Setup annotations data paths annDir = "{}/annotations".format(dataDir) if dataType == "minival": annZipFile = "{}/instances_minival2014.json.zip".format(dataDir) annFile = "{}/instances_minival2014.json".format(annDir) annURL = "https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0" unZipDir = annDir elif dataType == "valminusminival": annZipFile = "{}/instances_valminusminival2014.json.zip".format(dataDir) annFile = "{}/instances_valminusminival2014.json".format(annDir) annURL = "https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0" unZipDir = annDir else: annZipFile = "{}/annotations_trainval{}.zip".format(dataDir, dataYear) annFile = "{}/instances_{}{}.json".format(annDir, dataType, dataYear) annURL = "http://images.cocodataset.org/annotations/annotations_trainval{}.zip".format(dataYear) unZipDir = dataDir # print("Annotations paths:"); print(annDir); print(annFile); print(annZipFile); print(annURL) # Download annotations if not available locally if not os.path.exists(annDir): os.makedirs(annDir) if not os.path.exists(annFile): if not os.path.exists(annZipFile): print("Downloading zipped annotations to " + annZipFile + " ...") with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out: shutil.copyfileobj(resp, out) print("... done downloading.") print("Unzipping " + annZipFile) with zipfile.ZipFile(annZipFile, "r") as zip_ref: zip_ref.extractall(unZipDir) print("... done unzipping") print("Will use annotations in " + annFile)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_coco_dataset():\n # Create file structure\n os.makedirs(os.path.join(\"data\", \"coco\", \"train\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"dev\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"test\"), exist_ok=True)\n # Download the train, dev and test datasets\n print(\"Downloading COCO dataset.\")\n url = \"http://images.cocodataset.org/zips/train2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"train2014.zip\"))\n url = \"http://images.cocodataset.org/zips/val2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"val2014.zip\"))\n url = \"http://images.cocodataset.org/zips/test2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"test2014.zip\"))\n print(\"Done downloading COCO dataset.\")\n # Unzip the files\n print(\"Extracting COCO dataset.\")\n # Extract Train dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"train2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"train2014\"),\n os.path.join(\"data\", \"coco\", \"train\", \"dummy\"),\n )\n # Extract Validation dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"val2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"val2014\"),\n os.path.join(\"data\", \"coco\", \"dev\", \"dummy\"),\n )\n # Extract Test dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"test2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"test2014\"),\n os.path.join(\"data\", \"coco\", \"test\", \"dummy\"),\n )\n print(\"Done extracting COCO dataset.\")", "def download_coco(): \n file_type = '.zip'\n img_to_download = ['val','test','train']\n ann_to_download = ['annotations_trainval','image_info_test']\n base_url_images = 'http://images.cocodataset.org/zips/'\n base_url_ann = 'http://images.cocodataset.org/annotations/'\n\n\n click.echo(click.style(f\"\\n DOWNLOAD ANNOTATIONS \\n\", bg='green', bold=True, fg='white'))\n for ann in ann_to_download:\n\n ## build Urls\n ann_url = base_url_ann + ann + str(cfg.COCO_YEARS) + file_type\n \n click.echo(click.style(f'\\nDownloading of {ann} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{ann} will be downloaded')\n\n zip_filename_location = save_zip_from_url(ann_url,cfg.PATH_ANNOTATIONS)\n #zip_filename_location = \"/home/kamgo-gpu/Schreibtisch/stuff_annotations_trainval2017.zip\"\n click.echo(f\"the downloaded zip file was saved in to {zip_filename_location}\")\n\n click.echo(click.style(f'\\n Extraction of {ann} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{ann} will be extracted and the zip-file will be deleted')\n\n # Extract zip to annotation directory\n Extract_zip_file(zip_filename_location,cfg.PATH_ANNOTATIONS)\n\n click.echo(click.style(f\"\\n DOWNLOAD IMAGES \\n\", bg='green', bold=True, fg='white'))\n for dataset in img_to_download:\n ## build Urls\n dataset_img_url = base_url_images + dataset + str(cfg.COCO_YEARS) + file_type\n \n click.echo(click.style(f'\\n Downloading of {dataset} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{dataset} will be downloaded')\n\n zip_filename_location = save_zip_from_url(dataset_img_url,cfg.PATH_IMAGES)\n click.echo(f\"the downloaded zip file was saved in to {zip_filename_location}\")\n click.echo(click.style(f'\\n Extraction of {dataset} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{dataset} will be extracted and the zip-File will be deleted')\n\n # set complet Path to save images\n Extract_zip_file(zip_filename_location,cfg.PATH_IMAGES)\n\n click.echo(click.style(f'\\n Download and extraction termined successfull {dataset} ...\\n', bg='green', bold=True, fg='white'))", "def load_coco_ann_files(self):\n if self.type == 'train':\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'train2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_train2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'train2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_train2017.json'))),\n # (os.path.join(self.dataset_root, 'mpii', 'images'),\n # COCO(os.path.join(self.dataset_root, 'mpii',\n # 'annotations', 'train.json')))\n ]\n else:\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'val2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_val2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'val2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_val2017.json')))\n ]\n\n dict_list = []\n for dataset_path, dataset in datasets:\n img_ids = dataset.getImgIds()\n\n for idx in img_ids:\n try:\n img = dataset.loadImgs([idx])[0]\n ann_ids = dataset.getAnnIds([idx])\n anns = dataset.loadAnns(ann_ids)\n\n if [ann['keypoints'] for ann in anns] and not all([ann['keypoints'] == [0]*51 for ann in anns]):\n keypoints = [ann['keypoints'] for ann in anns if ann['keypoints'] != [0]*51]\n for i in range(len(keypoints)):\n if 'coco' in dataset_path:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][17] and keypoints[i][20])\n else [(keypoints[i][15] + keypoints[i][18]) // 2, (keypoints[i][16] + keypoints[i][19]) // 2, 1])\n else:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][41] and keypoints[i][38])\n else [(keypoints[i][39] + keypoints[i][36]) // 2, (keypoints[i][40] + keypoints[i][37]) // 2, 1])\n\n if len([kp for kp in keypoints if kp != [0]*54]) <= 4:\n dict_list.append({'path': os.path.join(dataset_path, img[\"file_name\"]),\n 'keypoints': [kp for kp in keypoints if kp != [0]*54]})\n except:\n print(f'Skipped: {idx}')\n\n final_dataset = pd.DataFrame.from_dict(dict_list)\n\n return final_dataset", "def download(self, verbose):\n\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from COVID-19 Open Data by Google Cloud Platform https://github.com/GoogleCloudPlatform/covid-19-open-data\")\n # Index\n i_cols = [\"location_key\", \"country_name\", \"subregion1_name\", \"subregion2_name\", \"iso_3166_1_alpha_3\"]\n i_df = pd.read_csv(self.URL_I, usecols=i_cols)\n # Mobility\n m_df = pd.read_csv(self.URL_M)\n m_df = (m_df.set_index([\"date\", \"location_key\"]) + 100).reset_index()\n # Combine data\n df = m_df.merge(i_df, how=\"left\", on=\"location_key\")\n # Location (country/province)\n df = df.loc[df[\"subregion2_name\"].isna()]\n df[self.PROVINCE] = df[\"subregion1_name\"].fillna(self.UNKNOWN).apply(unidecode)\n df[\"country_name\"] = df[\"country_name\"].replace(\n {\n # CIV\n \"Ivory Coast\": \"Cote d'Ivoire\",\n }\n )\n return df", "def get_coco_dataset():\n ds = AttrDict()\n # classes = [\n # '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n # 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n # 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n # 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n # 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n # 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n # 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n # 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n # 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n # 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n # 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n # 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n # 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n # ]\n # classes = ['__background__', 'lane']\n #\n base_classes = [\n '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n ]\n classes = ['__background__',\n 'guard rail',\n # 'car',\n 'dashed',\n 'solid',\n 'solid solid',\n 'dashed dashed',\n 'dashed-solid',\n 'solid-dashed',\n 'yellow dashed',\n 'yellow solid',\n 'yellow solid solid',\n 'yellow dashed dashed',\n 'yellow dashed-solid',\n 'yellow solid-dashed',\n 'boundary',\n 'fork_line',\n 'fork_edge',\n 'arrow_s',\n 'arrow_r',\n 'arrow_l',\n 'arrow_lr',\n 'arrow_inclined_r',\n 'arrow_r_s',\n 'arrow_l_s',\n 'sidewalk',\n 'handrail'\n ]\n base_classes.extend(classes[1:])\n classes = base_classes\n ds.classes = {i: name for i, name in enumerate(classes)}\n return ds", "def download(self, verbose):\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from Our World In Data https://github.com/owid/covid-19-data/\")\n # Vaccinations\n v_rec_cols = [\n \"date\", \"location\", \"iso_code\", \"total_vaccinations\", \"people_vaccinated\", \"people_fully_vaccinated\"]\n v_rec_df = pd.read_csv(self.URL_V_REC, usecols=v_rec_cols)\n v_loc_df = pd.read_csv(self.URL_V_LOC, usecols=[\"location\", \"vaccines\"])\n v_df = v_rec_df.merge(v_loc_df, how=\"left\", on=\"location\")\n # Tests\n pcr_rec_cols = [\"ISO code\", \"Date\", \"Daily change in cumulative total\", \"Cumulative total\"]\n pcr_df = pd.read_csv(self.URL_P_REC, usecols=pcr_rec_cols)\n pcr_df = pcr_df.rename(columns={\"ISO code\": \"iso_code\", \"Date\": \"date\"})\n pcr_df[\"cumsum\"] = pcr_df.groupby(\"iso_code\")[\"Daily change in cumulative total\"].cumsum()\n pcr_df = pcr_df.assign(tests=lambda x: x[\"Cumulative total\"].fillna(x[\"cumsum\"]))\n # Combine data (vaccinations/tests)\n df = v_df.set_index([\"iso_code\", \"date\"])\n df = df.combine_first(pcr_df.set_index([\"iso_code\", \"date\"]).loc[:, [\"tests\"]])\n df = df.reset_index()\n # Location (country/province)\n df[\"location\"] = df[\"location\"].replace(\n {\n # COG\n \"Congo\": \"Republic of the Congo\",\n }\n )\n df = df.loc[~df[\"iso_code\"].str.contains(\"OWID_\")]\n df[\"location\"] = df.groupby(\"iso_code\")[\"location\"].bfill()\n df.loc[df[\"location\"] == df[\"iso_code\"], \"location\"] = None\n df.loc[df[\"location\"].isna(), \"location\"] = df.loc[df[\"location\"].isna(), \"iso_code\"].apply(\n lambda x: coco.convert(x, to=\"name_short\", not_found=None))\n df[self.PROVINCE] = self.UNKNOWN\n return df", "def download_dataset(self):\n raise NotImplementedError", "def prepare_data(self):\n import subprocess\n # Download coco data set into dir specified by config then /data/coco\n subprocess.call([f\"{get_original_cwd()}/bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\", f\"{get_original_cwd()}\"])\n # subprocess.call([f\"bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\"])\n task = \"instances\" if self.instance else \"person_keypoints\"\n register_coco_instances(\"train\", {}, f\"{self.dir}/data/coco/{task}_train2014.json\",\n f\"{self.dir}/data/coco/train2014\")\n register_coco_instances(\"val\", {}, f\"{self.dir}/data/coco/{task}_minival2014.json\",\n f\"{self.dir}/data/coco/val2014\")\n register_coco_instances(\"test\", {}, f\"{self.dir}/data/coco/{task}_valminusminival2014.json\",\n f\"{self.dir}/data/coco/val2014\")", "def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None, class_names=None,\n class_map=None, return_coco=False, auto_download=False):\n\n if auto_download is True:\n self.auto_download(dataset_dir, subset, year)\n\n coco = COCO(\"{}/annotations/instances_{}{}.json\".format(dataset_dir, subset, year))\n if subset == \"minival\" or subset == \"valminusminival\":\n subset = \"val\"\n image_dir = \"{}/{}{}\".format(dataset_dir, subset, year)\n\n # Select class_ids from class_names:\n if class_names:\n class_ids = sorted(coco.getCatIds(catNms=class_names))\n\n # Load all classes or a subset?\n if not class_ids:\n # All classes\n class_ids = sorted(coco.getCatIds())\n\n # All images or a subset?\n if class_ids:\n image_ids = []\n for id in class_ids:\n imgs = [] # list of images to add to image_ids\n # Select at most COCO_IMAGES_PER_OBJECT and select only the images\n # that have at most COCO_MAX_NUM_MASK_PER_IMAGE masks inside them:\n for imgid in list(coco.getImgIds(catIds=[id])):\n if len(imgs) >= COCO_IMAGES_PER_OBJECT:\n break\n if len(coco.loadAnns(coco.getAnnIds(imgIds=[imgid], catIds=class_ids, iscrowd=None))) <= COCO_MAX_NUM_MASK_PER_IMAGE:\n imgs.append(imgid)\n image_ids.extend(imgs)\n #image_ids.extend(list(coco.getImgIds(catIds=[id]))[:COCO_IMAGES_PER_OBJECT])\n # Remove duplicates\n image_ids = list(set(image_ids))\n else:\n # All images\n image_ids = list(coco.imgs.keys())\n\n # Add classes\n for i in class_ids:\n self.add_class(\"coco\", i, coco.loadCats(i)[0][\"name\"])\n\n # Add images\n for i in image_ids:\n #print(len(coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=class_ids, iscrowd=None))))\n self.add_image(\n \"coco\", image_id=i,\n path=os.path.join(image_dir, coco.imgs[i]['file_name']),\n width=coco.imgs[i][\"width\"],\n height=coco.imgs[i][\"height\"],\n annotations=coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=class_ids, iscrowd=None)))\n if return_coco:\n return coco", "def prepare_train_coco_data(args):\n image_dir, annotation_file, data_dir = args.train_coco_image_dir, args.train_coco_annotation_file, args.train_coco_data_dir\n batch_size = args.batch_size\n basic_model = args.basic_model\n num_roi = args.num_roi\n\n coco = COCO(annotation_file)\n\n img_ids = list(coco.imgToAnns.keys())\n img_files = []\n img_heights = []\n img_widths = []\n anchor_files = []\n gt_classes = []\n gt_bboxes = []\n\n for img_id in img_ids:\n img_files.append(os.path.join(image_dir, coco.imgs[img_id]['file_name'])) \n img_heights.append(coco.imgs[img_id]['height']) \n img_widths.append(coco.imgs[img_id]['width']) \n anchor_files.append(os.path.join(data_dir, os.path.splitext(coco.imgs[img_id]['file_name'])[0]+'_'+basic_model+'_anchor.npz')) \n\n classes = [] \n bboxes = [] \n for ann in coco.imgToAnns[img_id]: \n classes.append(coco_category_to_class[ann['category_id']]) \n bboxes.append([ann['bbox'][1], ann['bbox'][0], ann['bbox'][3]+1, ann['bbox'][2]+1]) \n\n gt_classes.append(classes) \n gt_bboxes.append(bboxes) \n \n print(\"Building the training dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths, batch_size, anchor_files, gt_classes, gt_bboxes, True, True)\n print(\"Dataset built.\")\n return coco, dataset", "def get_coco_dataset():\n ds = AttrDict()\n classes = [\n '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n ]\n ds.classes = {i: name for i, name in enumerate(classes)}\n return ds", "def download(self):\n cloud_path = f\"gs://{const.GCS_BUCKET}/{self.GCS_PATH}\"\n # download label file\n label_zip = download_file_from_gcs(\n cloud_path, self.root, self.LABEL_ZIP\n )\n with zipfile.ZipFile(label_zip, \"r\") as zip_dir:\n zip_dir.extractall(self.root)\n\n # download tfexamples for a dataset split\n tfexamples_zip = download_file_from_gcs(\n cloud_path, self.root, self.SPLITS_ZIP.get(self.split)\n )\n with zipfile.ZipFile(tfexamples_zip, \"r\") as zip_dir:\n zip_dir.extractall(self.root)", "def download_all_data(self) -> None:\n print(\"Download in progress.\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_CHARACTERS\"], \"nextcloud\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_FRAGMENTS\"], \"nextcloud\")\n self.download_data(os.environ[\"HABBAKUK_URL\"], \"generic_url\")\n print(\"Download complete!\")", "def get_pronto_data():\n download_if_needed(\"https://s3.amazonaws.com/pronto-data/open_data_year_one.zip\",\n \"open_data_year_one.zip\")", "def _download_metafile(dataset, path=None):\n if not path:\n path = sunpy.config.get('downloads', 'sample_dir')\n base_url = 'https://spdf.gsfc.nasa.gov/pub/software/cdawlib/0MASTERS/'\n fname = dataset.lower() + '_00000000_v01.cdf'\n url = base_url + fname\n try:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=True)\n except ModuleNotFoundError:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=False)\n return downloaded_file", "def download_and_load(self, data_path=None):\n if data_path is None:\n data_path = 'data'\n\n if not self.check_files(data_path + '/cifar-10-batches-py'):\n self.download_and_extract(data_path=data_path)\n\n self.load_cifar10_data(data_path=data_path + '/cifar-10-batches-py')", "def __call__(self):\n\n dataset = TextOnlyCocoAnnotation()\n\n with open(self.path) as read_file:\n\n json_loaded = json.load(read_file)\n\n for i, value in tqdm(json_loaded['imgs'].items()):\n image_path = os.path.join(os.path.dirname(self.path), 'train2014',\n value['file_name'])\n dataset_type = value['set']\n\n if dataset_type not in self.sets:\n print(dataset_type)\n continue\n\n for annotation_id in json_loaded['imgToAnns'][i]:\n annotation_value = json_loaded['anns'][str(annotation_id)]\n word_annotation = self.parse_annotation_instance(annotation_value)\n dataset.add_bbox(image_path, imagesize.get(image_path), word_annotation)\n\n return dataset", "def download_datasets():\n if not os.path.exists(\"__data__/cornell/movie_conversations.txt\") \\\n or not os.path.exists(\"__data__/cornell/movie_lines.txt\"):\n subprocess.call(['scripts/download_cornell.sh'])\n if not os.path.isdir('__data__/opensubs'):\n subprocess.call(['scripts/download_opensubs.sh'])", "def generate_coco_dataset(args):\n\targs.data_root = Path(args.data_root)\n\targs.save_root = Path(args.save_root)\n\targs.save_root.mkdir()\n\n\tgenerate_coco_dataset_sub(args, 'train', 'A', args.cat1)\n\tgenerate_coco_dataset_sub(args, 'train', 'B', args.cat2)\n\tgenerate_coco_dataset_sub(args, 'val', 'A', args.cat1)\n\tgenerate_coco_dataset_sub(args, 'val', 'B', args.cat2)", "def fetch_zenodo(self):\n\n # retrieve content from URL\n try:\n logging.info(f\"Downloading example data from {self.url}\")\n r = requests.get(self.url, stream=True)\n with io.BytesIO() as stream:\n with tqdm.wrapattr(\n stream,\n 'write',\n file=sys.stdout,\n miniters=1,\n desc=self.url,\n total=int(r.headers.get('content-length', 0))\n ) as file:\n for chunk in r.iter_content(chunk_size=4096):\n file.write(chunk)\n with zipfile.ZipFile(stream) as zipped:\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n logging.info(\"Unzipped: {}\".format(os.path.join(self.destination, f)))\n zipped.extract(f, self.destination)\n\n logging.info(\"Download and install complete.\")\n\n self.close_logger()\n\n except requests.exceptions.MissingSchema:\n msg = f\"Unable to download data from {self.url}\"\n logging.exception(msg)\n self.close_logger()\n raise", "def download_genotype_data():\n print(\"downloading genotype data\")\n download_from_url(PSAM_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.psam\", desc=\"downloading psam\")\n download_from_url(PVAR_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pvar.zst\",\n desc=\"downloading pvar\")\n download_from_url(PGEN_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pgen.zst\",\n desc=\"downloading pgen\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pvar\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pgen\")", "def load(cfg, train_mode, split, shot, query,\n bs, test_bs, num_workers, pin_memory,\n ret_name=False):\n if train_mode == \"train\":\n dataset = COCOTrain(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=bs,\n shuffle=True,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=False)\n else:\n dataset = COCOTest(cfg, split, shot, query, ret_name=ret_name)\n data_loader = DataLoader(dataset,\n batch_size=test_bs, # Large batch for evaluation\n shuffle=False,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=False)\n num_classes = 80\n return dataset, data_loader, num_classes", "def get_data(input_path):\n all_imgs = []\n classes_count = {}\n class_mapping = {}\n\n # parsing Flag\n visualise = False\n\n # MSCOCO directory\n data_path = input_path\n\n print('Parsing annotation files')\n annot_path = os.path.join(data_path, 'annotations_bbox')\n imgs_path = os.path.join(data_path, 'images')\n\n # images directory (train, val, trainval, test)\n imgsets_path_trainval = os.path.join(data_path, 'images', 'trainval.txt')\n imgsets_path_train = os.path.join(data_path, 'images', 'train.txt')\n imgsets_path_val = os.path.join(data_path, 'images', 'val.txt')\n imgsets_path_test = os.path.join(data_path, 'images', 'test.txt')\n\n trainval_files = []\n train_files = []\n val_files = []\n test_files = []\n\n with open(imgsets_path_trainval) as f:\n for line in f:\n trainval_files.append(line.strip())\n\n with open(imgsets_path_train) as f:\n for line in f:\n train_files.append(line.strip())\n\n with open(imgsets_path_val) as f:\n for line in f:\n val_files.append(line.strip())\n\n # test-set (default) not included in MSCOCO\n if os.path.isfile(imgsets_path_test):\n with open(imgsets_path_test) as f:\n for line in f:\n test_files.append(line.strip())\n\n # annotation read\n annots_train = json.load(open(os.path.join(annot_path, 'bbox_train2017.json'), 'r'))\n annots_val = json.load(open(os.path.join(annot_path, 'bbox_val2017.json'), 'r'))\n annots = dict()\n annots['train'] = annots_train\n annots['val'] = annots_val\n\n for part in ['train', 'val']:\n annots_keys = tqdm(annots[part].keys())\n for img_name in annots_keys:\n annots_keys.set_description(\"Processing %s\" % img_name)\n for bbox in annots[part][img_name]:\n class_name = bbox['label'].replace(' ', '')\n all_imgs.append({\n \"filepath\": os.path.join(data_path, 'images', '%s2017' % part, \"%s.jpg\" % img_name),\n \"width\": None,\n \"height\": None,\n \"bboxes\": [{\n \"class\": class_name,\n \"x1\": bbox['bbox']['x1'],\n \"y1\": bbox['bbox']['x2'],\n \"x2\": bbox['bbox']['y1'],\n \"y2\": bbox['bbox']['y2'],\n \"difficult\": False\n }],\n \"image_id\": img_name,\n \"imageset\": part\n })\n if class_name not in classes_count:\n classes_count[class_name] = 1\n else:\n classes_count[class_name] += 1\n if class_name not in class_mapping:\n class_mapping[class_name] = len(class_mapping)\n\n # visualise bounding boxes\n if visualise:\n img = cv2.imread(annotation_data['filepath'])\n for bbox in annotation_data['bboxes']:\n cv2.rectangle(img, (bbox['x1'], bbox['y1']), (bbox['x2'], bbox['y2']), (0, 0, 255))\n cv2.imshow('img', img)\n print(annotation_data['imageset'])\n cv2.waitKey(0)\n\n return all_imgs, classes_count, class_mapping", "def maybe_download():\n\n print(\"Downloading Inception 5h Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)", "def auto_download(dataDir, dataType, dataYear):\n\n # Setup paths and file names\n if dataType == \"minival\" or dataType == \"valminusminival\":\n imgDir = \"{}/{}{}\".format(dataDir, \"val\", dataYear)\n imgZipFile = \"{}/{}{}.zip\".format(dataDir, \"val\", dataYear)\n imgURL = \"http://images.cocodataset.org/zips/{}{}.zip\".format(\"val\", dataYear)\n else:\n imgDir = \"{}/{}{}\".format(dataDir, dataType, dataYear)\n imgZipFile = \"{}/{}{}.zip\".format(dataDir, dataType, dataYear)\n imgURL = \"http://images.cocodataset.org/zips/{}{}.zip\".format(dataType, dataYear)\n # print(\"Image paths:\"); print(imgDir); print(imgZipFile); print(imgURL)\n\n # Create main folder if it doesn't exist yet\n if not os.path.exists(dataDir):\n os.makedirs(dataDir)\n\n # Download images if not available locally\n if not os.path.exists(imgDir):\n os.makedirs(imgDir)\n print(\"Downloading images to \" + imgZipFile + \" ...\")\n with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out:\n shutil.copyfileobj(resp, out)\n print(\"... done downloading.\")\n print(\"Unzipping \" + imgZipFile)\n with zipfile.ZipFile(imgZipFile, \"r\") as zip_ref:\n zip_ref.extractall(dataDir)\n print(\"... done unzipping\")\n print(\"Will use images in \" + imgDir)\n\n # Setup annotations data paths\n annDir = \"{}/annotations\".format(dataDir)\n if dataType == \"minival\":\n annZipFile = \"{}/instances_minival2014.json.zip\".format(dataDir)\n annFile = \"{}/instances_minival2014.json\".format(annDir)\n annURL = \"https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0\"\n unZipDir = annDir\n elif dataType == \"valminusminival\":\n annZipFile = \"{}/instances_valminusminival2014.json.zip\".format(dataDir)\n annFile = \"{}/instances_valminusminival2014.json\".format(annDir)\n annURL = \"https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0\"\n unZipDir = annDir\n else:\n annZipFile = \"{}/annotations_trainval{}.zip\".format(dataDir, dataYear)\n annFile = \"{}/instances_{}{}.json\".format(annDir, dataType, dataYear)\n annURL = \"http://images.cocodataset.org/annotations/annotations_trainval{}.zip\".format(dataYear)\n unZipDir = dataDir\n # print(\"Annotations paths:\"); print(annDir); print(annFile); print(annZipFile); print(annURL)\n\n # Download annotations if not available locally\n if not os.path.exists(annDir):\n os.makedirs(annDir)\n if not os.path.exists(annFile):\n if not os.path.exists(annZipFile):\n print(\"Downloading zipped annotations to \" + annZipFile + \" ...\")\n with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out:\n shutil.copyfileobj(resp, out)\n print(\"... done downloading.\")\n print(\"Unzipping \" + annZipFile)\n with zipfile.ZipFile(annZipFile, \"r\") as zip_ref:\n zip_ref.extractall(unZipDir)\n print(\"... done unzipping\")\n print(\"Will use annotations in \" + annFile)", "def get_cora_filepath(download_if_not_exist=True):\n # type: (bool) -> Tuple[str, str]\n feat_cache_path, edge_cache_path = _get_cora_filepath()\n if not os.path.exists(feat_cache_path):\n if download_if_not_exist:\n is_successful = download_and_extract_cora(\n save_dirpath=os.path.dirname(feat_cache_path))\n if not is_successful:\n logger = getLogger(__name__)\n logger.warning('Download failed.')\n return feat_cache_path, edge_cache_path", "def generate_coco_dataset_sub(args, idx1, idx2, cat):\n\tdata_path = args.data_root / '{}2017'.format(idx1)\n\tanno_path = args.data_root / 'annotations/instances_{}2017.json'.format(idx1)\t# eg. anno_path is \"datasets/COCO/annotations/instances_train2017.json\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# or \"datasets/COCO/annotations/instances_val2017.json\"\n\tcoco = COCO(anno_path) # COCO API\n\n\n\timg_path = args.save_root / '{}{}'.format(idx1, idx2)\t\t# eg. img_path is \"datasets/shp2gir_coco/trainA\" or \"datasets/shp2gir_coco/trainB\"\n\tseg_path = args.save_root / '{}{}_seg'.format(idx1, idx2)\t# eg. img_path is \"datasets/shp2gir_coco/trainA_seg\" or \"datasets/shp2gir_coco/trainB_seg\"\n\timg_path.mkdir()\t\t\t\t\t\t\t\t\t\t\t# they are empty, therefore mkdir()s\n\tseg_path.mkdir()\n\n\tcat_id = coco.getCatIds(catNms=cat)\t\t# cat is \"sheep\" or \"giraffe\",get the category's id\n\timg_id = coco.getImgIds(catIds=cat_id)\t# get the ids of sheep/giraffe images,获得所有绵羊的图片id,或者所有长颈鹿的图片id\n\timgs = coco.loadImgs(img_id)\t\t\t# 获得所有绵羊的图片(很多张),或者所有长颈鹿的图片\n\n\t# tqdm表示进度条,progress\n\t# refer:https://tqdm.github.io/\n\tpb = tqdm(total=len(imgs))\n\tpb.set_description('{}{}'.format(idx1, idx2))\n\tfor img in imgs:\n\t\tann_ids = coco.getAnnIds(imgIds=img['id'], catIds=cat_id)\t# get annotation'id\n\t\tanns = coco.loadAnns(ann_ids)\t\t\t\t\t\t\t\t# get the annotation(many)\n\n\t\tcount = 0\n\t\tfor i in range(len(anns)):\t\t\t\t# 真正从标签生成mask的地方。\n\t\t\tseg = coco.annToMask(anns[i])\t\t# annotation to mask, the type is array now\n\t\t\tseg = Image.fromarray(seg * 255)\t# turn the seg array to seg image,each pix multi 255. why?\n\t\t\tseg = resize(seg, args.image_size)\t# resize the seg image\n\t\t\t# np.sum\n\t\t\tif np.sum(np.asarray(seg)) > 0:\t\t\t\t\t\t\t\t# 保存seg\n\t\t\t\tseg.save(seg_path / '{}_{}.png'.format(pb.n, count))\t# pb.n 表示?\n\t\t\t\tcount += 1\n\n\t\tif count > 0: # at least one instance exists\n\t\t\timg = Image.open(data_path / img['file_name'])\n\t\t\timg = resize(img, args.image_size)\n\t\t\timg.save(img_path / '{}.png'.format(pb.n))\n\n\t\tpb.update(1)\n\tpb.close()", "def _download_cxr_model(self):\n file_id = \"1KIsLmVv8jKTVG_LxchMZAvR7rugHy7uB\"\n download_from_google_drive(file_id=file_id, folder=\"data/\", name=\"covid_cxr.zip\")", "def _download_obm_data_from_cos(self, cos_client: 'resource') -> 'DataFrame':\n\n # note: fetch all OBM file part names\n cos_summary = cos_client.Bucket(self.location.bucket).objects.filter(Prefix=self._obm_cos_path)\n file_names = [file_name.key for file_name in cos_summary]\n\n # note: if path does not exist, try to find in different one\n if not file_names:\n cos_summary = cos_client.Bucket(self.location.bucket).objects.filter(\n Prefix=self._obm_cos_path.split('./')[-1])\n file_names = [file_name.key for file_name in cos_summary]\n # --- end note\n # --- end note\n\n # TODO: this can be done simultaneously (multithreading / multiprocessing)\n # note: download all data parts and concatenate them into one output\n parts = []\n for file_name in file_names:\n file = cos_client.Object(self.location.bucket, file_name).get()\n buffer = io.BytesIO(file['Body'].read())\n parts.append(try_load_dataset(buffer=buffer))\n\n data = concat(parts)\n # --- end note\n return data", "def fetch_data():\n log = logging.getLogger(__name__)\n log.info('Checking data files...')\n if not os.path.isfile('CGN.txt'):\n params_cgn = {\n 'institute.code': ['NLD037'],\n # 'crops': ['tomato'],\n 'taxonomy.genus': ['Solanum', 'Lycopersicon'],\n 'taxonomy.species': species\n }\n cgn = GenesysParser(params_cgn)\n cgn.fetch2json('CGN.txt')\n log.info('CGN data has been saved.')\n else:\n log.info('CGN data file already exists.')\n\n if not os.path.isfile('USDA.txt'):\n params_usda = {\n 'institute.code': usda_all,\n # 'crops': ['tomato'],\n 'taxonomy.genus': ['Solanum', 'Lycopersicon'],\n 'taxonomy.species': species\n }\n usda = GenesysParser(params_usda)\n usda.fetch2json('USDA.txt')\n log.info('USDA data has been saved.')\n else:\n log.info('USDA data file already exists.')", "def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()", "def downloadCoreNLP():\n\tdeleteOldCoreNLP()\n\n\tglobal downloadDirectory\n\tcorenlpDir = kindred.utils._findDir(currentCoreNLPInfo['directory'],downloadDirectory)\n\tif corenlpDir is None:\n\t\tfiles = []\n\t\tfiles.append((currentCoreNLPInfo['url'],currentCoreNLPInfo['archive'],currentCoreNLPInfo['sha256']))\n\t\t\n\t\tprint(\"Downloading CoreNLP to %s\" % downloadDirectory)\n\t\tsys.stdout.flush()\n\t\tkindred.utils._downloadFiles(files,downloadDirectory)\n\t\tcorenlpDir = kindred.utils._findDir(currentCoreNLPInfo['directory'],downloadDirectory)\n\t\tassert not corenlpDir is None, \"Error after downloading, could not find corenlp directory\"\n\t\tprint (\"Download complete.\")\n\telse:\n\t\tprint (\"CoreNLP is already downloaded. No need to download\")", "def download(args):\n with_dataset(args, Dataset._download)", "def data_fetch_netcdf(self):\n self.client = boto3.client('s3', aws_access_key_id=self.creds_data['key_id'],\n aws_secret_access_key=self.creds_data['key_access'])\n year = self.month_year[0]\n month = self.month_year[1]\n # change output folder to desired location from TRMM website\n # folder structure to partitioned the data year_month\n output_temp = self.output_folder + year + '_' + month\n url_data = \"http://trmm.atmos.washington.edu/{}interp_data/{}/{}\".format(self.output_folder, year, month)\n print(url_data)\n start_time_year_month = time.time()\n r = requests.get(url_data, auth=self.auth_data)\n # check if url exists then extract netcdf links to download and upload to s3.\n if r.status_code == 200:\n soup = BeautifulSoup(r.text, features='lxml')\n for link in soup.findAll('a'):\n link_url = link.get('href')\n write_path = os.path.join(output_temp, link_url)\n if link_url.endswith('.nc4'):\n file_url = url_data + '/' + link_url\n r = requests.get(file_url, auth=self.auth_data, stream=True)\n if r.status_code == 200:\n self.client.put_object(Body=r.content, Bucket='himatdata', Key='Trmm/' + write_path)\n logging.info(\"Done with Year Month: %s\", month_year)\n print(\"--- %s seconds ---\" % (time.time() - start_time_year_month))\n\n else:\n print('No data/authentication for'.format(month_year))", "def load_data(self, annotation_json, images_dir):\r\n # Load json from file\r\n json_file = open(annotation_json)\r\n coco_json = json.load(json_file)\r\n json_file.close()\r\n \r\n # Add the class names using the base method from utils.Dataset\r\n source_name = \"coco_like\"\r\n ids={}\r\n i=0\r\n for category in coco_json['categories']:\r\n i+=1\r\n class_id = category['id']\r\n ids[class_id]=i\r\n class_name = category['name']\r\n if class_id < 1:\r\n print('Error: Class id for \"{}\" cannot be less than one. (0 is reserved for the background)'.format(class_name))\r\n return\r\n \r\n self.add_class(source_name, class_id, class_name)\r\n for annotation in coco_json['annotations']:\r\n annotation[\"category_id\"]=ids[annotation[\"category_id\"]]\r\n \r\n # Get all annotations\r\n \r\n annotations = {}\r\n for annotation in coco_json['annotations']:\r\n image_id = annotation['image_id']\r\n if image_id not in annotations:\r\n annotations[image_id] = []\r\n annotations[image_id].append(annotation)\r\n \r\n # Get all images and add them to the dataset\r\n seen_images = {}\r\n for image in coco_json['images']:\r\n image_id = image['id']\r\n if image_id in seen_images:\r\n print(\"Warning: Skipping duplicate image id: {}\".format(image))\r\n else:\r\n seen_images[image_id] = image\r\n try:\r\n image_file_name = image['file_name']\r\n image_width = image['width']\r\n image_height = image['height']\r\n except KeyError as key:\r\n print(\"Warning: Skipping image (id: {}) with missing key: {}\".format(image_id, key))\r\n \r\n image_path = os.path.abspath(os.path.join(images_dir, image_file_name))\r\n image_annotations = annotations[image_id]\r\n \r\n # Add the image using the base method from utils.Dataset\r\n self.add_image(\r\n source=source_name,\r\n image_id=image_id,\r\n path=image_path,\r\n width=image_width,\r\n height=image_height,\r\n annotations=image_annotations\r\n )", "def download_models_and_data():\n\n for file in DATA_FILES:\n download_file(file[\"url\"], file[\"path\"])", "def download_and_prepare_dmipy_example_dataset(self):\r\n subject_ID = 100307\r\n self.download_subject(subject_ID)\r\n self.prepare_example_slice(subject_ID)", "def _get_mscoco(directory):\n for url in _MSCOCO_URLS:\n filename = os.path.basename(url)\n download_url = os.path.join(_MSCOCO_ROOT_URL, url)\n path = generator_utils.maybe_download(directory, filename, download_url)\n unzip_dir = os.path.join(directory, filename.strip(\".zip\"))\n if not tf.gfile.Exists(unzip_dir):\n zipfile.ZipFile(path, \"r\").extractall(directory)", "def download_country_data(\n url=default_url,\n filename=default_data_file,\n force=False\n):\n if not os.path.isfile(filename) or force:\n text = requests.get(url).text\n with open(filename, 'w') as fp:\n fp.write(text)", "def create_coco_label(is_training):\n from pycocotools.coco import COCO\n\n coco_root = config.coco_root\n data_type = config.val_data_type\n if is_training:\n data_type = config.train_data_type\n\n # Classes need to train or test.\n train_cls = config.coco_classes\n train_cls_dict = {}\n for i, cls in enumerate(train_cls):\n train_cls_dict[cls] = i\n\n anno_json = os.path.join(coco_root, config.instances_set.format(data_type))\n\n coco = COCO(anno_json)\n classs_dict = {}\n cat_ids = coco.loadCats(coco.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"id\"]] = cat[\"name\"]\n\n image_ids = coco.getImgIds()\n images = []\n image_path_dict = {}\n image_anno_dict = {}\n\n for img_id in image_ids:\n image_info = coco.loadImgs(img_id)\n file_name = image_info[0][\"file_name\"]\n anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = coco.loadAnns(anno_ids)\n image_path = os.path.join(coco_root, data_type, file_name)\n annos = []\n iscrowd = False\n for label in anno:\n bbox = label[\"bbox\"]\n class_name = classs_dict[label[\"category_id\"]]\n iscrowd = iscrowd or label[\"iscrowd\"]\n if class_name in train_cls:\n x_min, x_max = bbox[0], bbox[0] + bbox[2]\n y_min, y_max = bbox[1], bbox[1] + bbox[3]\n annos.append(list(map(round, [y_min, x_min, y_max, x_max])) + [train_cls_dict[class_name]])\n\n if not is_training and iscrowd:\n continue\n if len(annos) >= 1:\n images.append(img_id)\n image_path_dict[img_id] = image_path\n image_anno_dict[img_id] = np.array(annos)\n\n return images, image_path_dict, image_anno_dict", "def download(dataset_name,dataset_url):\n directory = \"tmp\"\n if not os.path.exists(os.path.join(directory,dataset_name)):\n os.makedirs(os.path.join(directory,dataset_name))\n for url, filename in get_all_data(dataset_url):\n if not os.path.exists(os.path.join(directory,dataset_name,filename)):\n print(\"Downloading \"+filename+\":\",)\n ul.urlretrieve(url,os.path.join(directory,dataset_name,filename),reporthook)\n unzip_ecco_tcp_xmls(os.path.join(directory, dataset_name), os.path.join(directory, dataset_name + \"_unzipped\"))\n shutil.rmtree(os.path.join(directory, dataset_name))\n shutil.move(os.path.join(directory, dataset_name + \"_unzipped\"), os.path.join(directory, dataset_name))\n headers_to_csv(directory, dataset_name)\n corpus_to_csv(directory, dataset_name)\n erase_all_files_with_extension(directory, dataset_name, \".hdr\")\n erase_all_files_with_extension(directory, dataset_name, \".xml\")", "def maybe_download_and_extract():\n dest_directory = FLAGS.data_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n r = requests.Session().get(DATA_URL)\n with open(filepath, 'wb') as fd:\n for chunk in r.iter_content(500):\n fd.write(chunk)\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n if not os.path.exists(extracted_dir_path):\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "def write_coco_json(filepath, dataset_dicts, name_to_id, **kwargs):\n info = {\n \"description\": kwargs.get(\"description\", \"\"),\n \"url\": kwargs.get(\"url\", \"\"),\n \"version\": kwargs.get(\"version\", \"0.0\"),\n \"year\": kwargs.get(\"year\", \"2017\"),\n \"contributor\": kwargs.get(\"contributor\", \"\"),\n \"date_created\": kwargs.get(\"date_created\", \"2017/01/01\"),\n }\n\n licenses = {\n \"url\": \"closed\",\n \"id\": 0,\n \"name\": \"closed\",\n }\n\n images, annotations = [], []\n annotation_id = 1\n for record in dataset_dicts:\n images.append({\n \"id\": record[\"image_id\"],\n \"width\": record[\"width\"],\n \"height\": record[\"height\"],\n \"file_name\": record[\"file_name\"]\n })\n\n for annotation in record[\"annotations\"]:\n x0, y0, x1, y1 = annotation[\"bbox\"]\n annotations.append({\n \"id\": annotation_id,\n \"category_id\": annotation[\"category_id\"],\n \"bbox\": [x0, y0, x1 - x0, y1 - y0],\n \"iscrowd\": annotation[\"iscrowd\"],\n \"image_id\": record[\"image_id\"],\n \"area\": (x1 - x0) * (y1 - y0),\n })\n annotation_id += 1\n\n categories = [{\n \"id\": category_id,\n \"name\": \"{}\".format(category_name),\n \"supercategory\": \"\"\n } for category_name, category_id in name_to_id.items()]\n\n coco_dict = {\n \"info\": info,\n \"licenses\": licenses,\n \"images\": images,\n \"annotations\": annotations,\n \"categories\": categories,\n }\n\n with filepath.open(mode=\"w\") as file_handle:\n json.dump(coco_dict, file_handle)", "def download_and_extract_data(tmp_dir, dataset):\n url = dataset[0]\n print(dataset)\n compressed_filename = os.path.basename(url)\n compressed_file = generator_utils.maybe_download(\n tmp_dir, compressed_filename, url)\n\n for file in dataset[1]:\n tf.logging.info(\"Reading file: %s\" % file)\n filepath = os.path.join(tmp_dir, file)\n\n # Extract from tar if needed.\n if not tf.gfile.Exists(filepath):\n with tarfile.open(compressed_file, \"r:gz\") as corpus_tar:\n corpus_tar.extractall(tmp_dir)\n\n documents_filename, labels_filename = dataset[1]\n documents_filepath = os.path.join(tmp_dir, documents_filename)\n labels_filepath = os.path.join(tmp_dir, labels_filename)\n return documents_filepath, labels_filepath", "def downloadFile()-> None:\n logging.info(f\"Downloading current data set {getTime()}\")\n with open(DATA_FILE,\"wb\") as f:\n f.write(get(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\").text.encode())\n logging.info(f\"Finished Downloading current data set {getTime()}\")", "def _fetch_large():\n # Large training data:\n resource(\n target=data_path(\"eeg\", \"SMNI_CMI_TRAIN.tar.gz\"),\n url=\"https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TRAIN.tar.gz\",\n )\n dependency(\n target=data_path(\"eeg\", \"train\"),\n source=data_path(\"eeg\", \"SMNI_CMI_TRAIN.tar.gz\"),\n commands=[\n \"tar xzf SMNI_CMI_TRAIN.tar.gz\",\n \"mv SMNI_CMI_TRAIN train\",\n \"find train | grep gz$ | xargs gunzip\",\n ],\n )\n # Large test data:\n resource(\n target=data_path(\"eeg\", \"SMNI_CMI_TEST.tar.gz\"),\n url=\"https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TEST.tar.gz\",\n )\n dependency(\n target=data_path(\"eeg\", \"test\"),\n source=data_path(\"eeg\", \"SMNI_CMI_TEST.tar.gz\"),\n commands=[\n \"tar xzf SMNI_CMI_TEST.tar.gz\",\n \"mv SMNI_CMI_TEST test\",\n \"find test | grep gz$ | xargs gunzip\",\n ],\n )", "def prepare_dibco(data_dir=DEFAULT_DATA_DIR,\n out_dir=None,\n force=False):\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n if out_dir is None:\n out_dir = data_dir\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n train_record = 'train.record'\n test_record = 'test.record'\n num_train = 'num_train'\n num_test = 'num_test'\n\n if not (os.path.exists(os.path.join(data_dir, train_record)) and\n os.path.exists(os.path.join(data_dir, test_record)) and\n os.path.exists(os.path.join(data_dir, num_train)) and\n os.path.exists(os.path.join(data_dir, num_test))) or force:\n maybe_download(get_dibco_meta_data(data_dir)['url'], data_dir, force=force)\n\n extracted_dir = os.path.join(data_dir, DATA_EXTRACTED_DIR)\n\n with open(os.path.join(extracted_dir, 'train.txt')) as i_f:\n with open(os.path.join(data_dir, num_train), mode='w') as o_f:\n o_f.write(str(len(i_f.readlines())))\n train_writer = tf.python_io.TFRecordWriter(\n os.path.join(out_dir, train_record))\n for data in get_label_map_dict(\n extracted_dir, os.path.join(extracted_dir, 'train.txt')):\n example = dict_to_example(data)\n train_writer.write(example.SerializeToString())\n train_writer.close()\n\n with open(os.path.join(extracted_dir, 'test.txt')) as i_f:\n with open(os.path.join(data_dir, num_test), mode='w') as o_f:\n o_f.write(str(len(i_f.readlines())))\n val_writer = tf.python_io.TFRecordWriter(os.path.join(out_dir, test_record))\n for data in get_label_map_dict(\n extracted_dir, os.path.join(extracted_dir, 'test.txt')):\n example = dict_to_example(data)\n val_writer.write(example.SerializeToString())\n val_writer.close()\n print()\n\n return get_dibco_meta_data(data_dir)", "def download_compressed_dataset(url):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\n\n dataset = TextOnlyCocoAnnotation()\n\n n_images = 1000 if self.is_train else 500\n for i in tqdm(range(1, n_images + 1)):\n image_path = os.path.join(self.images_folder, 'img_{}.jpg'.format(i))\n annotation_path = os.path.join(self.annotations_folder, 'gt_img_{}.txt'.format(i))\n\n with open(annotation_path, encoding='utf-8-sig') as read_file:\n content = [line.strip() for line in read_file.readlines()]\n for line in content:\n dataset.add_bbox(image_path, imagesize.get(image_path), self.parse_line(line))\n\n return dataset", "def prepare_val_coco_data(args):\n image_dir, annotation_file = args.val_coco_image_dir, args.val_coco_annotation_file\n\n coco = COCO(annotation_file)\n\n img_ids = list(coco.imgToAnns.keys())\n img_files = []\n img_heights = []\n img_widths = []\n\n for img_id in img_ids:\n img_files.append(os.path.join(image_dir, coco.imgs[img_id]['file_name']))\n img_heights.append(coco.imgs[img_id]['height']) \n img_widths.append(coco.imgs[img_id]['width']) \n\n print(\"Building the validation dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths)\n print(\"Dataset built.\")\n return coco, dataset", "def uci_gotrack(redownload: bool = False) -> Dataset:\n return Dataset.get(\"uci_gotrack\", redownload=redownload)", "def convert_labelme_to_coco(path_to_data):\r\n # convert labelme annotations to coco\r\n labelme2coco.convert(path_to_data, path_to_data + r'\\coco_annotation.json')\r\n\r\n # Open the coco format data\r\n with open(path_to_data + r'\\coco_annotation.json') as f:\r\n coco_d = json.load(f)\r\n\r\n # Get the category IDs for each category and create a new \"categories\" section.\r\n categories = []\r\n # for category in coco_d['categories']:\r\n # if category['name'] == 'Bad':\r\n # categories.append({\"id\": category['id'],\r\n # \"name\": category['id'],\r\n # \"supercategory\": category['id'],\r\n # \"isthing\": 1,\r\n # \"color\": [222, 23, 1]\r\n # })\r\n # elif category['name'] == 'Good':\r\n # categories.append({\"id\": category['id'],\r\n # \"name\": \"Good\",\r\n # \"supercategory\": \"Good\",\r\n # \"isthing\": 1,\r\n # \"color\": [133, 23, 1]\r\n # })\r\n\r\n # Update the \"catogories\" section of the coco format data with the correct category IDs.\r\n # coco_d['categories'] = categories\r\n\r\n categories = []\r\n for cat in coco_d['categories']:\r\n cat['isthing'] = 1\r\n categories.append(cat['name'])\r\n\r\n # Fix the segmentation and bbox.\r\n for annot in coco_d['annotations']:\r\n annot['bbox_mode'] = 0\r\n seg = annot['segmentation'][0]\r\n annot['bbox'] = seg\r\n annot['segmentation'] = [[seg[0], seg[1], seg[0], seg[3], seg[2], seg[3], seg[2], seg[1]]]\r\n\r\n # Save the modified coco format data.\r\n with open(path_to_data + r'\\coco_annotation.json', 'w') as j:\r\n json.dump(coco_d, j, sort_keys=True, indent=4)\r\n\r\n # Show the images to the user to validate the annotations.\r\n # Register the image information.\r\n register_coco_instances(\"coco_visualise\", {}, path_to_data + r\"/coco_annotation.json\",\r\n path_to_data)\r\n MetadataCatalog.get(\"meta_visualise\").set(thing_classes=categories)\r\n # MetadataCatalog.get(\"meta_train\").set(thing_classes=[\"Bad\", \"Good\"], thing_colors=[(172, 0, 0), (229, 0, 0)])\r\n train_metadata = MetadataCatalog.get(\"meta_visualise\")\r\n coco_train_dataset = DatasetCatalog.get(\"coco_visualise\")\r\n\r\n st.write('Showing the randomly picked 5 images. Check if the annotation is correctly embedded.')\r\n # Randomly pick 5 images to show to the user to validate the annotations.\r\n for d in random.sample(coco_train_dataset, 5):\r\n im = Image.open(d['file_name'])\r\n im_array = np.asarray(im)\r\n v = Visualizer(im_array, metadata=train_metadata, instance_mode=ColorMode.SEGMENTATION, scale=0.5)\r\n v = v.draw_dataset_dict(d)\r\n pil_image = Image.fromarray(v.get_image())\r\n st.image(pil_image)\r\n # window = tk.Toplevel()\r\n # window.tkimage = ImageTk.PhotoImage(pil_image)\r\n # window.attributes('-topmost', True)\r\n # label = tk.Label(window, image=window.tkimage)\r\n # label.pack()\r\n # button_close = tk.Button(window, text=\"Close\", command=window.destroy)\r\n # button_close.pack(fill='x')\r\n\r\n # Confirm the annotations with user. If the annotations are correct, it will proceed further.\r\n # If not, it terminates the program.\r\n # if messagebox.askyesno(title=\"Validate Annotations\", message=\"Were all annotations correct?\"):\r\n # pass\r\n DatasetCatalog.clear()\r\n MetadataCatalog.clear()", "def download():\n basedir = os.path.dirname(os.path.dirname(__file__))\n print(basedir)\n datadir = os.path.join(basedir,\"data/NeonTreeEvaluation/\")\n print(\"Downloading data files to {}\".format(datadir)) \n eval_url = zenodo_url(concept_rec_id=\"3723356\", datadir=datadir)", "def fetch_pwc():\n for url in [\n \"https://production-media.paperswithcode.com/about/papers-with-abstracts.json.gz\",\n \"https://production-media.paperswithcode.com/about/links-between-papers-and-code.json.gz\",\n \"https://production-media.paperswithcode.com/about/evaluation-tables.json.gz\",\n \"https://production-media.paperswithcode.com/about/methods.json.gz\",\n \"https://production-media.paperswithcode.com/about/datasets.json.gz\",\n ]:\n logging.info(f\"Fetching and saving url {url}\")\n fetch_save(url)", "def __call__(self, *args, **kwargs):\n\n dataset = TextOnlyCocoAnnotation()\n\n if self.subset == 'train':\n image_paths, annotation_paths = self.collect_train_paths()\n elif self.subset == 'val':\n image_paths, annotation_paths = self.collect_val_paths()\n\n for image_path, annotation_path in tqdm(zip(image_paths, annotation_paths)):\n word_annotations = []\n with open(annotation_path, encoding='utf-8-sig') as read_file:\n content = [line.strip() for line in read_file.readlines()]\n for line in content:\n word_annotations.append(self.parse_line(line))\n should_add = not self.is_latin_required\n if self.is_latin_required:\n for word_annotation in word_annotations:\n if word_annotation['attributes']['language'].lower() == 'latin':\n should_add = True\n break\n if should_add:\n for word_annotation in word_annotations:\n dataset.add_bbox(image_path, imagesize.get(image_path), word_annotation)\n\n return dataset", "def _download_data_from_cos(self, cos_client: 'resource') -> 'DataFrame':\n\n try:\n file = cos_client.Object(self.location.bucket,\n self.location.path).get()\n except:\n file = list(cos_client.Bucket(self.location.bucket).objects.filter(\n Prefix=self.location.path))[0].get()\n\n buffer = io.BytesIO(file['Body'].read())\n data = try_load_dataset(buffer=buffer,\n sheet_name=self.auto_pipeline_params.get('excel_sheet', 0),\n separator=self.auto_pipeline_params.get('csv_separator', ','),\n encoding=self.auto_pipeline_params.get('encoding', 'utf-8')\n )\n\n return data", "def __call__(self, *args, **kwargs):\n\n dataset = TextOnlyCocoAnnotation()\n\n image_paths, annotation_paths = self.collect_train_paths()\n\n for image_path, annotation_path in tqdm(zip(image_paths, annotation_paths)):\n word_annotations = []\n with open(annotation_path, encoding='utf-8-sig') as read_file:\n content = [line.strip() for line in read_file.readlines()]\n for line in content:\n word_annotations.append(self.parse_line(line))\n should_add = not self.is_latin_required\n if self.is_latin_required:\n for word_annotation in word_annotations:\n if word_annotation['attributes']['language'].lower() == 'latin':\n should_add = True\n break\n if should_add:\n for word_annotation in word_annotations:\n dataset.add_bbox(image_path, imagesize.get(image_path), word_annotation)\n\n return dataset", "def download():\n\n trainset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=True, download=True)\n testset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=False, download=True)\n train_images = numpy.array(trainset.data)\n train_labels = numpy.array(trainset.targets)\n test_images = numpy.array(testset.data)\n test_labels = numpy.array(testset.targets)\n\n assert numpy.max(train_images) == 255\n\n train_images = train_images/255.\n test_images = test_images/255.\n\n utils.write_hdf5(paths.cifar10_train_images_file(), train_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_train_images_file())\n utils.write_hdf5(paths.cifar10_test_images_file(), test_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_test_images_file())\n utils.write_hdf5(paths.cifar10_train_labels_file(), train_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_train_labels_file())\n utils.write_hdf5(paths.cifar10_test_labels_file(), test_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_test_labels_file())", "def geolife(redownload: bool = False) -> Dataset:\n return Dataset.get(\"geolife\", redownload=redownload)", "def download_model_from_gcs(self):\n # download model\n download_file_from_gcs(self.config.model_bucket_name,\n self.config.model_gcs_path,\n self.config.model_local_path)\n\n # download lable columns\n download_file_from_gcs(self.config.model_bucket_name,\n self.config.labels_gcs_path,\n self.config.labels_local_path)", "def cma_bst(redownload: bool = False) -> Dataset:\n return Dataset.get(\"cma_bst\", redownload=redownload)", "def process_coco(coco_file_path: str) -> (list, dict):\n coco_dict = load_json(coco_file_path)\n\n # rearrange coco file for better annotation reach\n images = list()\n for image in coco_dict[\"images\"]:\n image_annotations = list()\n for annotation in coco_dict[\"annotations\"]:\n if image[\"id\"] == annotation[\"image_id\"]:\n image_annotations.append(annotation)\n image[\"annotations\"] = image_annotations\n images.append(image)\n\n return images, coco_dict[\"categories\"]", "def download(self, download) -> None:\n path_cifarh = path.join(self.root, self.filename_cifarh)\n path_cifar = path.join(self.root, self.filename_cifar)\n is_there = path.isfile(path_cifarh) and path.isfile(path_cifar)\n if is_there:\n print(\"Files already exist.\")\n if download == \"force\" or not is_there:\n download_and_extract_archive(\n self.url_cifar, self.root, filename=self.filename_cifar\n )\n download_and_extract_archive(\n self.url_cifarh, self.root, filename=self.filename_cifarh\n )", "def get_data(datadir):\n return sklearn.datasets.fetch_california_housing(\n datadir,\n return_X_y=True)", "def _load_jsons(self):\n items = []\n labels = []\n segms = []\n for split in self._splits:\n anno = os.path.join(self._root, 'annotations', split) + '.json'\n _coco = COCO(anno)\n self._coco.append(_coco)\n classes = [c['name'] for c in _coco.loadCats(_coco.getCatIds())]\n if not classes == self.classes:\n raise ValueError(\"Incompatible category names with COCO: \")\n assert classes == self.classes\n json_id_to_contiguous = {\n v: k for k, v in enumerate(_coco.getCatIds())}\n if self.json_id_to_contiguous is None:\n self.json_id_to_contiguous = json_id_to_contiguous\n self.contiguous_id_to_json = {\n v: k for k, v in self.json_id_to_contiguous.items()}\n else:\n assert self.json_id_to_contiguous == json_id_to_contiguous\n\n # iterate through the annotations\n image_ids = sorted(_coco.getImgIds())\n for entry in _coco.loadImgs(image_ids):\n filename = entry['file_name']\n dirname = split.split('_')[-1] # \"train\" or \"val\"\n abs_path = os.path.join(self._root, dirname, filename)\n if not os.path.exists(abs_path):\n raise IOError('Image: {} not exists.'.format(abs_path))\n label, segm = self._check_load_bbox(_coco, entry)\n # skip images without objects\n if self._skip_empty and label is None:\n continue\n items.append(abs_path)\n labels.append(label)\n segms.append(segm)\n return items, labels, segms", "def from_coco_folder(\n cls,\n data_dir: str,\n max_num_images: Optional[int] = None,\n cache_dir: Optional[str] = None,\n ) -> 'Dataset':\n cache_files = dataset_util.get_cache_files_coco(data_dir, cache_dir)\n if not dataset_util.is_cached(cache_files):\n label_map = dataset_util.get_label_map_coco(data_dir)\n cache_writer = dataset_util.COCOCacheFilesWriter(\n label_map=label_map, max_num_images=max_num_images\n )\n cache_writer.write_files(cache_files, data_dir)\n return cls.from_cache(cache_files.cache_prefix)", "def __download_nips4b_plus_annotations(self) -> None:\n\n nips4bplus_annotations_path = os.path.join(self.nips4bplus_folder, \"nips4bplus_annotations.zip\")\n\n logger.info(\"Download NIPS4BPlus label files...\")\n self.download_file(NIPS4BPlusDownloader.nips4bplus_annotations_url, nips4bplus_annotations_path,\n cache_subdir=\"nips4bplus\")\n\n with zipfile.ZipFile(nips4bplus_annotations_path, 'r') as zip_file:\n logger.info(\"Unzip NIPS4BPlus label files...\")\n zip_file.extractall(self.nips4bplus_folder)\n\n os.remove(nips4bplus_annotations_path)", "def download_dataset():\n \n ID = \"1-3_oB5iSF-c_V65-uSdUlo024NzlgSYZ\"\n script1 = f\"\"\"\n wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id='{ID} -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=\"{ID} -O Data.zip && rm -rf /tmp/cookies.txt\n \"\"\"\n script2 = \"\"\"unzip Data.zip\"\"\"\n\n os.system(script1)\n os.system(script2)", "def download_training_data(data_dir, task):\n\n COMMENTS_FILE = \"%s_annotated_comments.tsv\" % task\n LABELS_FILE = \"%s_annotations.tsv\" % task\n\n if task == \"attack\":\n download_file(ATTACK_ANNOTATED_COMMENTS_URL,\n os.path.join(data_dir, COMMENTS_FILE))\n download_file(ATTACK_ANNOTATIONS_URL, os.path.join(data_dir,\n LABELS_FILE))\n elif task == \"recipient_attack\":\n download_file(ATTACK_ANNOTATED_COMMENTS_URL,\n os.path.join(data_dir, COMMENTS_FILE))\n download_file(ATTACK_ANNOTATIONS_URL, os.path.join(data_dir,\n LABELS_FILE))\n elif task == \"aggression\":\n download_file(AGGRESSION_ANNOTATED_COMMENTS_URL,\n os.path.join(data_dir, COMMENTS_FILE))\n download_file(AGGRESSION_ANNOTATIONS_URL,\n os.path.join(data_dir, LABELS_FILE))\n elif task == \"toxicity\":\n download_file(TOXICITY_ANNOTATED_COMMENTS_URL,\n os.path.join(data_dir, COMMENTS_FILE))\n download_file(TOXICITY_ANNOTATIONS_URL,\n os.path.join(data_dir, LABELS_FILE))\n else:\n print(\"No training data for task: \", task)", "def __call__(self, *args, **kwargs):\n\n dataset = TextOnlyCocoAnnotation()\n\n for image_name in tqdm(sorted(os.listdir(self.folder))):\n if image_name.endswith('JPG'):\n image_path = os.path.join(self.folder, image_name)\n annotation_path = os.path.join(self.folder, image_name.replace('.JPG', '.gt'))\n\n with open(annotation_path, encoding='utf-8-sig') as read_file:\n content = [line.strip() for line in read_file.readlines()]\n for line in content:\n dataset.add_bbox(image_path, imagesize.get(image_path),\n self.parse_line(line))\n\n return dataset", "def main(root_dir):\n # load annotations\n print('Loading instances and annotations...')\n captions_file = json.load(open('{}/annotations/captions_train2017.json'.format(root_dir), 'r'))\n categories_file = json.load(open('{}/annotations/instances_train2017.json'.format(root_dir), 'r'))\n print('Done.')\n\n # group categories by image\n image_categories = group_categories(categories_file)\n\n # group captions by image\n image_captions = group_captions(captions_file['annotations'])\n\n # get filename of each image\n image_file = get_filename(captions_file['images'])\n\n # assign each category an id.\n # we are not using the default ids given in the dataset because\n # the id ranges are not continuous.\n category_id, id_category = map_category_id(categories_file['categories'])\n \n # save parsed coco dataset\n save_dataset(image_categories, image_captions, image_file, category_id, id_category, root_dir)", "def download_entire_dataset(dataset_name, num_data, labels, method, cache_dir):\n\n print('Downloading {}...'.format(dataset_name))\n preprocessor = preprocess_method_dict[method]()\n\n # Select the first `num_data` samples from the dataset.\n target_index = numpy.arange(num_data) if num_data >= 0 else None\n dataset_parts = D.molnet.get_molnet_dataset(dataset_name, preprocessor,\n labels=labels,\n target_index=target_index)\n dataset_parts = dataset_parts['dataset']\n\n # Cache the downloaded dataset.\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n for i, part in enumerate(['train', 'valid', 'test']):\n filename = dataset_part_filename(part, num_data)\n path = os.path.join(cache_dir, filename)\n NumpyTupleDataset.save(path, dataset_parts[i])\n return dataset_parts", "def main():\n get_obofoundry(force_download=True)", "def getMpcorb(url='https://minorplanetcenter.net/iau/MPCORB/MPCORB.DAT.gz', fname='MPCORB.DAT.gz', verbose=True):\n\n #filename = wget.download(url)\n try:\n r = requests.get(url, allow_redirects=True)\n open(fname, 'wb').write(r.content)\n if (verbose):\n print('Download complete:', url)\n except:\n print(\"Error in getMpcorb: could not download \", fname, \" at \", url)\n raise\n return", "def download():\n toydata = requests.get(DATA_URL).json()\n return toydata", "def download_caffe_model(model_name, meta_info, dst_dir='./model'):\n if not os.path.isdir(dst_dir):\n os.mkdir(dst_dir)\n model_name = os.path.join(dst_dir, model_name)\n assert 'prototxt' in meta_info, \"missing prototxt url\"\n prototxt = mx.test_utils.download(meta_info['prototxt'], model_name+'_deploy.prototxt')\n assert 'caffemodel' in meta_info, \"mssing caffemodel url\"\n caffemodel = mx.test_utils.download(meta_info['caffemodel'], model_name+'.caffemodel')\n assert 'mean' in meta_info, 'no mean info'\n mean = meta_info['mean']\n if isinstance(mean, str):\n mean = mx.test_utils.download(mean, model_name+'_mean.binaryproto')\n return (prototxt, caffemodel, mean)", "def load_cifar(dataset_name='cifar10'):\n dataset_name = dataset_name.strip().lower().replace(' ', '')\n\n if dataset_name.lower() not in ['cifar10', 'cifar100']:\n raise ValueError('Only cifar10 or cifar100 are valid dataset_name.')\n baseURL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n if dataset_name == 'cifar100':\n baseURL = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n\n dirname = os.path.join(_trident_dir, dataset_name.strip())\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n\n \"\"\"Load CIFAR data from `path`\"\"\"\n _,filename,ext=split_path(baseURL)\n download_file(baseURL, dirname, filename+ext, dataset_name)\n file_path = os.path.join(dirname, filename+ext)\n\n\n if '.tar' in ext:\n extract_archive(file_path, dirname, archive_format='auto')\n filelist = glob.glob(dirname + '/*/*.*')\n extract_path ,_,_= split_path(filelist[0])\n filelist = [f for f in os.listdir(extract_path) if os.path.isfile(os.path.join(extract_path, f))]\n data=[]\n label=[]\n test_data=[]\n test_label=[]\n for file_path in filelist:\n if 'data_batch' in file_path:\n with open(os.path.join(extract_path,file_path), 'rb') as f:\n entry = pickle.load(f, encoding='latin1')\n data.append(entry['data'])\n label.append(entry['labels'])\n elif 'test_batch' in file_path:\n with open(os.path.join(extract_path,file_path), 'rb') as f:\n entry = pickle.load(f, encoding='latin1')\n test_data.append(entry['data'])\n test_label.append(entry['labels'])\n data = np.concatenate(data)\n data = data.reshape((data.shape[0], 3, 32, 32))\n data = data.transpose(0, 2, 3, 1).astype(np.float32)\n\n test_data = np.concatenate(test_data)\n test_data = test_data.reshape((test_data.shape[0], 3, 32, 32))\n test_data = test_data.transpose(0, 2, 3, 1).astype(np.float32)\n\n # Prepare labels\n label = np.concatenate(label)\n test_label = np.concatenate(test_label)\n\n trainData = Iterator(data=ImageDataset(data,object_type=ObjectType.rgb), label=LabelDataset(label,object_type=ObjectType.classification_label))\n testData = Iterator(data=ImageDataset(test_data,object_type=ObjectType.rgb), label=LabelDataset(test_label,object_type=ObjectType.classification_label))\n dataset = DataProvider(dataset_name, traindata=trainData, testdata=testData)\n dataset.binding_class_names(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship',\n 'truck'] if dataset_name == 'cifar10' else [], 'en-US')\n return dataset", "def download(self):\n if not self.url:\n raise RuntimeError(self.tips)\n\n download_file_name = os.path.join(\n self.raw_path, os.path.splitext(os.path.basename(self.url))[0]\n )\n file_format = self.url.split(\".\")[-1]\n if \"amazon\" in self.url:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.json.{file_format}\"\n )\n else:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if \"1drv.ms\" in self.url:\n file_format = \"zip\"\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if not os.path.exists(raw_file_path):\n print(f\"download_file: url: {self.url}, raw_file_path: {raw_file_path}\")\n download_file(self.url, raw_file_path)\n if \"amazon\" in raw_file_path:\n # amazon dataset do not unzip\n print(\"amazon dataset do not decompress\")\n return\n elif file_format == \"gz\":\n file_name = raw_file_path.replace(\".gz\", \"\")\n with gzip.open(raw_file_path, \"rb\") as fin:\n with open(file_name, \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n else:\n shutil.unpack_archive(\n raw_file_path, self.raw_path, format=get_format(file_format)\n )\n\n if not os.path.exists(download_file_name):\n return\n elif os.path.isdir(download_file_name):\n os.rename(\n download_file_name, os.path.join(self.raw_path, self.dataset_name)\n )\n else:\n os.rename(\n download_file_name,\n os.path.join(\n self.raw_path,\n f'{self.dataset_name}.{download_file_name.split(\".\")[-1]}',\n ),\n )", "def fetch(data_dir, dest=\"aida\"):\n\n # Get CoNLL03\n conll_dir = conll03.fetch(data_dir)\n\n # Create folder\n aida_dir = os.path.join(data_dir, dest)\n utils.create_folder(aida_dir)\n\n # Download AIDA\n aida_file = os.path.join(aida_dir, AIDA_FILE)\n if not os.path.exists(aida_file):\n utils.urlretrieve(AIDA_URL, aida_file)\n\n # Extract annotations\n final_dir = os.path.join(aida_dir, AIDA_NAME)\n if not os.path.exists(final_dir):\n with zipfile.ZipFile(aida_file, \"r\") as aida:\n aida.extractall(aida_dir)\n\n # Run AIDA script\n final_file = os.path.join(final_dir, AIDA_FINAL_FILE)\n if not os.path.exists(final_file):\n os.chdir(final_dir)\n subprocess.call(AIDA_SCRIPT.format(conll_dir), shell=True)\n\n return final_dir", "def download_all_datasets():\n print(\"Downloading all datasets ...\")\n for dataset in get_available_datasets():\n download_dataset(dataset)", "def _get_cla_raw_data(cla_document_url):\n data = ''\n error = None\n try:\n r = requests.get(cla_document_url)\n except Exception as error:\n pass\n\n if error is None and r.status_code in [200]:\n data = r.text\n\n return data", "def download_style_data(self) -> None:\n print(\"Download in progress.\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_CHARACTER_STYLE\"], \"nextcloud\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_FRAGMENT_STYLE\"], \"nextcloud\")", "def download_potholes():\n\n\tlink = \"https://data.cityofchicago.org/api/views/7as2-ds3y/rows.csv?accessType=DOWNLOAD\"\n\tdf = pd.read_csv(link)\n\tdf = df[(df.STATUS == \"Open\") | (df.STATUS == \"Open - Dup\")]\n\tdf = df[[\"LATITUDE\", \"LONGITUDE\"]]\n\tdf = df.dropna(axis =0, subset=[\"LATITUDE\", \"LONGITUDE\"])\n\treturn df", "def download_chicago_graph():\n\n\tG = ox.graph_from_place(\"Chicago,IL, United States\", network_type='drive')\n\treturn G", "def get_ecg_data_annotations(database_name, is_debug=False):\n\t\n\tdata_annotations_set = []\n\tfile_name_set = None\n\tno_apn = None\n\t\n\tif database_name[0] == \"apnea-ecg\":\n\t\troot_file_path = APNEA_ECG_DATABASE_PATH\n\t\tif database_name[1] == \"train\":\n\t\t\tfile_name_set = APNEA_ECG_TRAIN_FILENAME\n\t\t\tno_apn = False\n\t\telif database_name[1] == \"test\":\n\t\t\tfile_name_set = APNEA_ECG_TEST_FILENAME\n\t\t\tno_apn = True\n\t\n\t# if database name is test, we first read label file\n\ttest_label_set = []\n\tif no_apn is True:\n\t\t# read event-2.txt, which is test label downloading from PhysioNet\n\t\ttest_annotation_path = root_file_path + \"event-2.txt\"\n\t\twith open(test_annotation_path) as f:\n\t\t\tlines = f.readlines()\n\t\t\tfor line in lines:\n\t\t\t\tline = line.replace(\"\\n\", \"\")\n\t\t\t\tfor index_str in range(len(line)):\n\t\t\t\t\tif line[index_str] == \"A\" or line[index_str] == \"N\":\n\t\t\t\t\t\ttest_label_set.append(line[index_str])\n\t\n\tfile_count = 0 # use when the database name is test.\n\ttest_label_index = 0 # use when the database name is test.\n\tfor name in file_name_set:\n\t\tif is_debug:\n\t\t\tprint(\"process file \" + name + \"...\")\n\t\t\n\t\tfile_path = root_file_path + name\n\t\tecg_data = wfdb.rdrecord(file_path) # use wfdb.rdrecord to read data\n\t\t\n\t\tif no_apn is False:\n\t\t\t# use wfdb.rdann to read annotation\n\t\t\tannotation = wfdb.rdann(file_path, \"apn\")\n\t\t\t# annotation range\n\t\t\tannotation_range_list = annotation.sample\n\t\t\t# annotation\n\t\t\tannotation_list = annotation.symbol\n\t\telse:\n\t\t\tannotation_range_list = []\n\t\t\tannotation_list = []\n\t\t\tfor index_label in range(TEST_LABEL_AMOUNT[file_count]):\n\t\t\t\tannotation_range_list.append(np.array(index_label * 6000))\n\t\t\t\tannotation_list.append(test_label_set[test_label_index])\n\t\t\t\ttest_label_index += 1\n\t\t\tfile_count += 1\n\t\t\tannotation_range_list = np.array(annotation_range_list)\n\t\t\n\t\tdata_annotations_set.append([ecg_data, annotation_range_list, annotation_list, name])\n\t\n\treturn data_annotations_set", "def __init__(self, image_set, root_path, data_path, category='all', task='detection'):\n super(coco, self).__init__('COCO', image_set, root_path, data_path)\n self.root_path = root_path\n self.data_path = data_path\n self.category = category\n self.task = task\n self.name = self.name + '_' + category\n # deal with data name\n view_map = {'minival2014': 'val2014',\n 'valminusminival2014': 'val2014'}\n self.data_name = view_map[image_set] if image_set in view_map else image_set", "def download_dataset(base_dir, scene):\n\n # setup depends on dataset\n if len(scene.split('_')) == 1: # default\n modality, part = None, None # declaration necessary for instatiation check\n base_dir = Path(base_dir).expanduser().joinpath(scene)\n filepath_data = base_dir.joinpath(DATASETS_CONFIG[scene]['img']['name'])\n filepath_labels = base_dir.joinpath(DATASETS_CONFIG[scene]['gt']['name'])\n \n elif len(scene.split('_')) == 3: # AeroRIT\n scene, modality, part = scene.split('_')\n base_dir = Path(base_dir).expanduser().joinpath(scene)\n filepath_data = base_dir.joinpath(DATASETS_CONFIG[scene][modality]['img']['name'])\n filepath_labels = base_dir.joinpath(DATASETS_CONFIG[scene][modality]['gt']['name'])\n else :\n raise RuntimeError('Given scene unknown!')\n\n base_dir.mkdir(parents=True, exist_ok=True)\n\n # download data and load from file\n if filepath_data.suffix == '.mat': # datasets from ehu.es\n if not filepath_data.is_file():\n with TqdmUpTo(unit='B', unit_scale=True, miniters=1,\n desc=\"Downloading {}\".format(filepath_data)) as t:\n url = DATASETS_CONFIG[scene]['img']['url']\n urlretrieve(url, filename=filepath_data, reporthook=t.update_to)\n\n if not filepath_labels.is_file():\n with TqdmUpTo(unit='B', unit_scale=True, miniters=1,\n desc=\"Downloading {}\".format(filepath_labels)) as t:\n url = DATASETS_CONFIG[scene]['gt']['url']\n urlretrieve(url, filename=filepath_labels, reporthook=t.update_to)\n \n data = loadmat(filepath_data)[DATASETS_CONFIG[scene]['img']['key']]\n labels = loadmat(filepath_labels)[DATASETS_CONFIG[scene]['gt']['key']]\n\n elif filepath_data.suffix == '.tif': # aerorit\n if not filepath_data.is_file(): # download image if necessary\n print(\"Downloading {}\".format(filepath_data))\n url = DATASETS_CONFIG[scene][modality]['img']['url']\n gdown.download(url=url, output=str(filepath_data), quiet=False)\n\n if not filepath_labels.is_file(): # download labels if necessary\n print(\"Downloading {}\".format(filepath_labels))\n url = DATASETS_CONFIG[scene][modality]['gt']['url']\n gdown.download(url=url, output=str(filepath_labels), quiet=False)\n \n # extract part of image as defined in Rangnekar et al.\n base_dir = base_dir.joinpath(modality).joinpath(part)\n base_dir.mkdir(parents=True, exist_ok=True)\n \n # check early if data exists already to avoid unecessarily loading and encoding data\n filepath_hdf = base_dir.joinpath(f'aerorit_{modality}_{part}.h5')\n if filepath_hdf.is_file():\n return filepath_hdf\n\n # extract defined part of dataset\n start_col = DATASETS_CONFIG[scene][part]['start_col']\n end_col = DATASETS_CONFIG[scene][part]['end_col']\n \n data = np.transpose(io.imread(filepath_data), (1,2,0))[53:,7:,:]\n data = data[:, start_col:end_col, :]\n\n labels = encode_labelmap(io.imread(filepath_labels), AERORIT_COLOURLABELMAP)[53:,7:]\n labels = labels[:, start_col:end_col]\n filepath_data = filepath_hdf\n\n filepath_hdf = filepath_data.with_suffix('.h5')\n \n # export data and labels to hdf\n if not filepath_hdf.is_file():\n with h5py.File(filepath_hdf, \"w\") as f:\n f.create_dataset(\"data\", data=data)\n f.create_dataset(\"labels\", data=labels)\n f.attrs['scene'] = scene\n if not modality is None:\n f.attrs['modality'] = modality\n if not part is None:\n f.attrs['part'] = part\n return filepath_hdf\n\n return filepath_hdf", "def download_cola(dir_path: str, dest_folder: str='data') -> None:\n\n dest_path = os.path.join(dir_path, f'../{dest_folder}')\n if not os.path.isdir(dest_path):\n os.mkdir(dest_path)\n\n url = 'https://nyu-mll.github.io/CoLA/cola_public_1.1.zip'\n\n if not os.path.exists('./cola_public_1.1.zip'):\n wget.download(url, './cola_public_1.1.zip')\n\n try:\n with zipfile.ZipFile('cola_public_1.1.zip') as z:\n z.extractall(dest_path)\n os.remove('./cola_public_1.1.zip')\n except:\n logger.info('zip extraction failed')", "def fetch(self,url=URL):\n\t\tlog.info('downloading latest PHE case data')\n#\t\tself.data=lookup_json(url)\n\t\tself.fetch_csv() #JSON discontinued; switched back to CSV\n\t\tself.edition=self.latest_samples\n\t\tlog.info(f'Last samples from {self.edition}')", "def maybe_download_and_extract():\n dest_directory = FLAGS.data_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n if not os.path.exists(extracted_dir_path):\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "def load_data(nlp, cue_verbs, poly):\n train_dicts, _ = load_quote_authors(nlp)\n author_prediction_dataset = AuthorPredictionDataset(train_dicts, cue_verbs, poly)\n return np.array(train_dicts), author_prediction_dataset", "def load_data(self, bpod_only=False, download_data=True):\n self.extractor = TaskQCExtractor(\n self.session_path, one=self.one, download_data=download_data, bpod_only=bpod_only)", "def fetch_community_crime_data(dpath='/tmp/glm-tools'):\n if os.path.exists(dpath):\n shutil.rmtree(dpath)\n os.mkdir(dpath)\n\n fname = os.path.join(dpath, 'communities.csv')\n base_url = (\"http://archive.ics.uci.edu/ml/machine-learning-databases\")\n url = os.path.join(base_url, \"communities/communities.data\")\n urllib.urlretrieve(url, fname)\n\n # Read in the file\n df = pd.read_csv('/tmp/glm-tools/communities.csv', header=None)\n\n # Remove missing values\n df.replace('?', np.nan, inplace=True)\n df.dropna(inplace=True, axis=1)\n df.dropna(inplace=True, axis=0)\n df.reset_index(inplace=True, drop=True)\n\n # Extract predictors and target from data frame\n X = np.array(df[df.keys()[range(3, 102)]])\n y = np.array(df[127])\n\n return X, y", "def download():\n raise NotImplementedError", "def download_dataset(dataset):\n\n if dataset not in URLS:\n print(f\"unknown dataset {dataset}\")\n sys.exit(0)\n\n filename = f'{dataset}.tar.gz'\n url = URLS[dataset]\n\n if not os.path.exists(filename):\n print(f'downloading dataset \"{dataset}\"')\n os.system(f'curl \"{url}\" -o {filename}')\n else:\n print(f'zipfile \"{filename}\" already exists, remove it if you want to re-download.')\n\n if not os.path.exists(dataset):\n print(f'extracting \"{filename}\"')\n os.system(f'tar -xvf {filename}')\n else:\n print(f'folder \"{dataset}\" already exists, remove it if you want to re-create.')\n\n image_chips = f'{dataset}/image-chips'\n label_chips = f'{dataset}/label-chips'\n if not os.path.exists(image_chips) and not os.path.exists(label_chips):\n print(\"creating chips\")\n libs.images2chips.run(dataset)\n else:\n print(f'chip folders \"{image_chips}\" and \"{label_chips}\" already exist, remove them to recreate chips.')", "def download_dataset(url=DATASET_URL):\n # disable insecure https warning\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n c = urllib3.PoolManager()\n with c.request(\"GET\", url, preload_content=False) as res, open(\n LOCAL_FILE_NAME, \"wb\"\n ) as out_file:\n shutil.copyfileobj(res, out_file)\n logging.info(\"Download completed.\")", "def cli(ctx, dataset_collection_id, file_path):\n return ctx.gi.dataset_collections.download_dataset_collection(dataset_collection_id, file_path)", "def fetch_scil_b0():\n zipname = 'datasets_multi-site_all_companies'\n url = 'http://scil.dinf.usherbrooke.ca/wp-content/data/'\n uraw = url + zipname + '.zip'\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, zipname)\n\n if not os.path.exists(folder):\n print('Creating new directory %s' % folder)\n os.makedirs(folder)\n print('Downloading SCIL b=0 datasets from multiple sites and multiple companies (9.2MB)...')\n opener = urlopen(uraw)\n open(folder+'.zip', 'wb').write(opener.read())\n\n print('Unziping '+folder+'.zip ...')\n zip = zipfile.ZipFile(folder+'.zip', 'r')\n zip.extractall(dipy_home)\n\n print('Done.')\n print('Files copied in folder %s' % dipy_home)\n else:\n print('Dataset already in place. If you want to fetch again please first remove folder %s ' % dipy_home)", "def get_criteo(root):\n\n url = 'https://s3-eu-west-1.amazonaws.com/kaggle-display-advertising-challenge-dataset/dac.tar.gz'\n\n raw_folder = os.path.join(root, 'criteo', 'raw')\n processed_folder = os.path.join(root, 'criteo', 'processed')\n makedir_exist_ok(raw_folder)\n makedir_exist_ok(processed_folder)\n\n # download files and extract\n filename = url.rpartition('/')[2]\n print('Downloading...')\n download_url(url, root=raw_folder, filename=filename, md5=None)\n print('Extracting...')\n extract_file(os.path.join(raw_folder, filename), processed_folder)\n print('Done!')\n return Path(processed_folder)", "def load_corpus(name, download=True):\n\n # Get the path from the datasets\n path = corpora[name]\n\n # Check if the data exists, otherwise download or raise\n if not os.path.exists(path):\n raise ValueError((\n \"'{}' dataset has not been downloaded, \"\n \"use the download.py module to fetch datasets\"\n ).format(name))\n\n # Read the directories in the directory as the categories.\n categories = [\n cat for cat in os.listdir(path)\n if os.path.isdir(os.path.join(path, cat))\n ]\n\n files = [] # holds the file names relative to the root\n data = [] # holds the text read from the file\n target = [] # holds the string of the category\n\n # Load the data from the files in the corpus\n for cat in categories:\n for name in os.listdir(os.path.join(path, cat)):\n files.append(os.path.join(path, cat, name))\n target.append(cat)\n\n with open(os.path.join(path, cat, name), 'r') as f:\n data.append(f.read())\n\n # Return the data bunch for use similar to the newsgroups example\n return Bunch(\n categories=categories,\n files=files,\n data=data,\n target=target,\n )" ]
[ "0.72907704", "0.71969664", "0.64465344", "0.633483", "0.61967856", "0.61923695", "0.59949607", "0.59754235", "0.5947754", "0.5928412", "0.5897793", "0.5720691", "0.5706716", "0.56930333", "0.56358683", "0.5610616", "0.5585786", "0.55818254", "0.553139", "0.55226374", "0.5510653", "0.55095816", "0.54824907", "0.5469402", "0.54686177", "0.54675907", "0.5464698", "0.5413597", "0.5409973", "0.5400305", "0.5383455", "0.535352", "0.53407115", "0.5337841", "0.53377503", "0.53314686", "0.53274435", "0.5325736", "0.53135026", "0.52970976", "0.5294845", "0.5268901", "0.52564293", "0.5242378", "0.52411807", "0.52399486", "0.52318794", "0.5230229", "0.52268696", "0.5220075", "0.5190809", "0.5190769", "0.51866853", "0.5186039", "0.518067", "0.5160958", "0.5154563", "0.51542425", "0.5152947", "0.51522565", "0.5144056", "0.51415336", "0.51404566", "0.51374155", "0.51370597", "0.5134659", "0.51301986", "0.5128014", "0.51212674", "0.5118509", "0.5118477", "0.5117328", "0.50793725", "0.50785166", "0.5070758", "0.50681955", "0.5064408", "0.50508577", "0.50456464", "0.50456035", "0.5037059", "0.5027787", "0.50178367", "0.5014288", "0.5010816", "0.50088537", "0.50069934", "0.50064456", "0.49916708", "0.49906924", "0.4984941", "0.49808872", "0.49767005", "0.49637598", "0.49552423", "0.49491748", "0.49451163", "0.49426264", "0.49347878", "0.49289507" ]
0.51952803
50
Load instance masks for the given image. Different datasets use different ways to store masks. This function converts the different mask format to one format in the form of a bitmap [height, width, instances].
def load_mask(self, image_id): # If not a COCO image, delegate to parent class. image_info = self.image_info[image_id] if image_info["source"] != "coco": return super(ExtendedCocoDataset, self).load_mask(image_id, common.COCO_NUM_CLASSES) # NOTE: this calls ActivityDataset.load_mask() instance_masks = [] class_ids = [] annotations = self.image_info[image_id]["annotations"] # Build mask of shape [height, width, instance_count] and list # of class IDs that correspond to each channel of the mask. for annotation in annotations: class_id = self.map_source_class_id( "coco.{}".format(annotation['category_id'])) if class_id: m = self.annToMask(annotation, image_info["height"], image_info["width"]) # Some objects are so small that they're less than 1 pixel area # and end up rounded out. Skip those objects. if m.max() < 1: continue # Is it a crowd? If so, use a negative class ID. if annotation['iscrowd']: # Use negative class ID for crowds class_id *= -1 # For crowd masks, annToMask() sometimes returns a mask # smaller than the given dimensions. If so, resize it. if m.shape[0] != image_info["height"] or m.shape[1] != image_info["width"]: m = np.ones([image_info["height"], image_info["width"]], dtype=bool) instance_masks.append(m) class_ids.append(class_id) # Pack instance masks into an array if class_ids: mask = np.stack(instance_masks, axis=2).astype(np.bool) class_ids = np.array(class_ids, dtype=np.int32) return mask, class_ids else: # Call super class to return an empty mask return super(CocoDataset, self).load_mask(image_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_mask(self, image_id):\n info = self.image_info[image_id]\n # Get mask directory from image path\n mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), \"masks\")\n\n # Read mask files from .png image\n mask = []\n # for f in next(os.walk(mask_dir))[2]:\n m = skimage.io.imread(os.path.join(mask_dir, info['id']+'.png')).astype(np.bool)\n mask.append(m)\n # print(mask)\n mask = np.stack(mask, axis=-1)\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID, we return an array of ones\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n # Get mask directory from image path\n mask_dir = os.path.join(\n os.path.dirname(os.path.dirname(info['path'])), 'masks')\n\n # Read mask files from .png image\n masks = []\n for file in next(os.walk(mask_dir))[2]:\n if file.endswith('.png'):\n mask = imread(os.path.join(mask_dir, file),\n as_gray=True).astype(np.bool)\n masks.append(mask)\n masks = np.stack(masks, axis=-1)\n # Return masks, and array of class IDs of each instance. Since we have\n # one class ID, we return an array of ones\n return masks, np.ones([masks.shape[-1]], dtype=np.int32)", "def load_mask(self, image_id):\n # If not a vesicle dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"vesicle\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n rr, cc = skimage.draw.polygon(p[1], p[0])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)", "def load_mask(self, image_id):\n\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"pcb\":\n return super(self.__class__, self).load_mask(image_id)\n\n # convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n \n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n \n for i, p in enumerate(info[\"polygons\"]):\n # get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # return mask, and array of class IDs of each instance.\n # since we have one class ID only, we return an array of 1s\n return mask.astype(np.bool), info[\"class_ids\"]", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n image_name = str(info['id']).zfill(6) + \"_10.png\"\n gt_image = imageio.imread(\"./training/instance/\" + image_name)\n instance_gt = np.array(gt_image) % 256\n semantic_gt = np.array(gt_image) // 256\n instance_gt = cv2.resize(instance_gt, (self.width, self.height), interpolation=cv2.INTER_NEAREST)\n semantic_gt = cv2.resize(semantic_gt, (self.width, self.height), interpolation=cv2.INTER_NEAREST)\n labels = [26, 24]\n masks = []\n class_ids = []\n for l in labels:\n mask_sem = (semantic_gt == [l]).astype(np.int_) * 255\n mask_ins = instance_gt & mask_sem\n num_ins = np.max(mask_ins)\n if(num_ins > 30):\n print(\"WARNING: num ins %d for label l %d\" % (num_ins, l))\n\n for i in range(1, num_ins + 1):\n mask_obj = (mask_ins == [i]).astype(np.int_) * 255\n masks.append(mask_obj)\n if l == 24:\n class_ids.append(2)\n else:\n class_ids.append(1)\n masks = np.array(masks)\n masks = np.moveaxis(masks, 0, -1)\n class_ids = np.array(class_ids)\n return masks, class_ids", "def load_mask(self, image_id):\n # If not a vesicle dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"vesicle\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert 16 bit mask to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask_path = info['mask_path']\n mask = cv.imread(mask_path, cv.IMREAD_GRAYSCALE + cv.IMREAD_ANYDEPTH)\n bin_mask = get_bin_mask(mask)\n n_instance = bin_mask.shape[-1]\n return bin_mask, np.ones([n_instance], dtype=np.int32)", "def load_mask(self, image_id):\n # TODO: build dict **self.image_info** in this form\n # self.image_info.keys() = ['objects', 'imgWidth', 'imgHeight']\n # objects is a list which contains label and polygon (same as annotations form below)\n # imgHeight and imgWidth are numbers (usually 1024, 2048)\n annotations = self.image_info[image_id][\"objects\"]\n # annotations form: [{'label': label, 'polygon': [[x1,y1], [x2,y2] ...]}, ...]\n height = self.image_info[image_id]['imgHeight']\n width = self.image_info[image_id]['imgWidth']\n instance_masks = []\n class_ids = []\n for ann in annotations:\n m = self.annToMask(ann, height, width)\n \n label_tmp = ann['label']\n if ( not label_tmp in list(self.class_labels.keys()) ) and label_tmp.endswith('group'):\n label_tmp = label_tmp[:-len('group')]\n \n class_id = self.class_labels[label_tmp]\n instance_masks.append(m)\n class_ids.append(class_id)\n \n mask = np.stack(instance_masks, axis=2)\n class_ids = np.array(class_ids)\n \n return mask, class_ids", "def load_mask(self, image_id):\n\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"dsb\":\n return super(self.__class__, self).load_mask(image_id)\n\n path = image_info[\"dir\"]\n\n mascara = next(os.walk(path + '/masks/'))[2]\n masc = skimage.io.imread(path + '/masks/' + mascara[0])\n height, width = masc.shape\n\n mask = np.zeros((height, width, len(mascara)), dtype=np.uint8)\n\n for i, mask_file in enumerate(mascara):\n mask[:,:,i] = skimage.io.imread(path + '/masks/' + mask_file)\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)", "def load_mask(self, image_id):\n image_info = self.image_info[image_id]\n annotations = image_info['annotations']\n instance_masks = []\n class_ids = []\n \n for annotation in annotations:\n class_id = annotation['category_id']\n mask = Image.new('1', (image_info['width'], image_info['height']))\n mask_draw = ImageDraw.ImageDraw(mask, '1')\n for segmentation in annotation['segmentation']:\n mask_draw.polygon(segmentation, fill=1)\n bool_array = np.array(mask) > 0\n instance_masks.append(bool_array)\n class_ids.append(class_id)\n\n mask = np.dstack(instance_masks)\n class_ids = np.array(class_ids, dtype=np.int32)\n \n return mask, class_ids", "def load_mask(self, image_id):\n # If not a pedestrian dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"pedestrian\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n mask_paths = glob.glob(info['path'].replace('images', 'masks').replace('.png', '*.png'))\n masks = []\n class_ids = []\n for mask_path in mask_paths:\n# print(mask_path)\n mask = cv2.imread(mask_path,cv2.IMREAD_GRAYSCALE) \n masks.append(mask)\n if 'normal' in mask_path:\n class_ids.append(0)\n if 'benign' in mask_path:\n class_ids.append(1)\n if 'malignant' in mask_path:\n class_ids.append(2)\n masks = np.moveaxis(masks,0,-1)\n class_ids = np.array(class_ids)\n return masks, class_ids", "def load_mask(self, image_id):\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"glomerulus\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)", "def load_mask(self, image_id):\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"self_annotation\":\n super(CarsAndVehiclesDataset, self).load_mask(image_id)\n\n instance_masks = []\n class_ids = []\n annotations = self.image_info[image_id][\"annotations\"]\n # Build mask of shape [height, width, instance_count] and list\n # of class IDs that correspond to each channel of the mask.\n for annotation in annotations:\n class_id = self.map_source_class_id(\"self_annotation.{}\".format(annotation[\"category_id\"]))\n\n if class_id:\n m = self.annToMask(annotation, image_info[\"height\"], image_info[\"width\"])\n\n # Some objects are so small that they're less than 1 pixel area\n # and end up rounded out. Skip those objects.\n if m.max() < 1:\n continue\n # Is it a crowd? If so, use a negative class ID\n if annotation[\"iscrowd\"]:\n # Use negative class ID for crowds\n class_id *= -1\n # For crowd masks, annToMask() sometimes returns a mask\n # smaller than the given dimensions. If so, resize it.\n if m.shape[0] != image_info[\"height\"] or m.shape[1] != image_info[\"width\"]:\n m = np.ones(image_info[\"height\"], image_info[\"width\"], dtype=bool)\n instance_masks.append(m)\n class_ids.append(class_id)\n\n # Pack instance masks into an array\n if class_ids:\n mask = np.stack(instance_masks, axis=2).astype(np.bool)\n class_ids = np.array(class_ids, dtype=np.int32)\n return mask, class_ids\n else:\n # Call super class to return an empty mask\n return super(CarsAndVehiclesDataset, self).load_mask(image_id)", "def load_mask(self, image_id):\n # If not homeobject dataset, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != 'homeobject':\n print(\n \"Warn: \\'{}\\' label not found. Processing with parent load_mask.\".format(image_info[\"source\"]))\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n class_ids = image_info['class_ids']\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])], dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n\n # modify dirt mask if it resides outside of image boundary\n rr[rr > mask.shape[0] - 1] = mask.shape[0] - 1\n cc[cc > mask.shape[1] - 1] = mask.shape[1] - 1\n\n mask[rr, cc, i] = 1\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n class_ids = np.array(class_ids, dtype=np.int32)\n # return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)\n return mask, class_ids", "def load_mask(self, image_id):\n global iter_num\n print(\"image_id\",image_id)\n info = self.image_info[image_id]\n count = 1 # number of object\n img = Image.open(info['mask_path'])\n num_obj = self.get_obj_index(img)\n mask = np.zeros([info['height'], info['width'], num_obj], dtype=np.uint8)\n mask = self.draw_mask(num_obj, mask, img,image_id)\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count - 2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n \n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n labels = []\n labels = self.from_yaml_get_class(image_id)\n labels_form = []\n for i in range(len(labels)):\n if labels[i].find(\"bird\") != -1:\n # print \"bird\"\n labels_form.append(\"bird\")\n class_ids = np.array([self.class_names.index(s) for s in labels_form])\n return mask, class_ids.astype(np.int32)", "def load_mask(self, image_id):\n # Override this function to load a mask from your dataset.\n # Otherwise, it returns an empty mask.\n mask = np.empty([0, 0, 0])\n class_ids = np.empty([0], np.int32)\n return mask, class_ids", "def load_mask(self, image_id):\n # Override this function to load a mask from your dataset.\n # Otherwise, it returns an empty mask.\n logging.warning(\"You are using the default load_mask(), maybe you need to define your own one.\")\n mask = np.empty([0, 0, 0])\n class_ids = np.empty([0], np.int32)\n return mask, class_ids", "def load_mask(self, image_id, coco_offset=0):\n\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(self.json_data[info[\"id\"]])], dtype=np.uint8)\n lbls = np.zeros(len(self.json_data[info[\"id\"]]), dtype=np.int32)\n\n for idx, (mask_path, mask_info) in enumerate(self.json_data[info[\"id\"]].items()):\n mask_class = mask_info[\"class\"]\n mask[:,:,idx] = np.array(PIL.Image.open(mask_path), dtype=np.uint8)\n lbls[idx] = common.activity_classes_names.index(mask_class) + 1 + coco_offset\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), lbls", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n num_cards = info['cards']\n # count = len(num_cards)\n count = 1 # there will only ever be 1 card per image (for simplicity) TODO: do multiple documents?\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n # for i, _ in enumerate(info['cards']):\n mask[:, :, 0] = self.draw_quadrilateral(mask[:, :, 0].copy(), info['cornerpoints'], 1)\n\n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count-2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n # class_ids = np.array([self.class_names.index(s[0]) for s in num_categories])\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)", "def load_mask(self, image_id):\r\n info = self.image_info[image_id]\r\n mask = tifffile.imread(self.mask_path[self.ids[image_id]])\r\n\r\n if np.unique(mask).__len__() > 1:\r\n count = np.unique(mask).__len__()-1 # one less because of 0\r\n\r\n mask_new = np.zeros([info['height'], info['width'], count], dtype=np.uint8) # one more for background\r\n running = 0\r\n for i in np.unique(mask): #range(1, count):\r\n if ((i > 0) & ((mask == i).sum() > 0)):\r\n mask_new[:, :, running] = (mask == i)\r\n running = running + 1\r\n # Map class names to class IDs.\r\n class_ids = np.ones(count)\r\n else:\r\n mask_new = np.zeros([info['height'], info['width'], 1], dtype=np.uint8)\r\n class_ids = np.zeros([1])\r\n return mask_new, class_ids.astype(np.int32)", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n label_path = info['path']\n\n # 读取json文件\n with open(os.path.join(self.DATA_ROOT_DIR, label_path), encoding='utf-8') as json_file:\n labelmeJson = json.load(json_file)\n height = labelmeJson['imageHeight']\n width = labelmeJson['imageWidth']\n shapes = labelmeJson['shapes']\n\n count = len(shapes)\n mask = np.zeros([height, width, count], dtype=np.uint8)\n\n for i, shape in enumerate(shapes):\n mask[:, :, i] = self.shape_to_mask(mask.shape, shape['points'], shape['shape_type'])\n\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(shape['label']) if shape['label'] in self.class_names else self.class_names.index('undefined') for shape in shapes])\n #print('class_ids:', class_ids)\n #input()\n return mask.astype(np.bool), class_ids.astype(np.int32)", "def load_mask(self, image_id):\r\n mask_path = self.mask_path[self.ids[image_id]]\r\n file_pattern = os.path.join(mask_path, \"*.png\")\r\n info = self.image_info[image_id]\r\n mask_files = glob.glob(file_pattern)\r\n #mask_tmp = cv2.imread(mask_files[0])\r\n mask_new = np.zeros([info['height'], info['width'], mask_files.__len__()+1], dtype=np.uint8) # one more for background\r\n count = 1\r\n mask_total = 0\r\n for i in mask_files:\r\n mask = cv2.imread(i)\r\n mask = mask[:, :, 1] / 255.0\r\n #mask = ski_transform.resize(mask, (info['height'], info['width']), mode='reflect')\r\n mask_new[:, :, count] = (mask)\r\n mask_total = mask_total + (mask>0) * count\r\n count = count + 1\r\n # Map class names to class IDs.\r\n class_ids = np.ones(count) # one more fore background\r\n #add Background\r\n class_ids[0] = 0; # Background\r\n mask_new[:, :, 0] = np.invert(mask_total.astype(np.bool))\r\n # End add Background\r\n\r\n return mask_new, class_ids.astype(np.int32)", "def load_mask(self, image_id):\r\n info = self.image_info[image_id]\r\n mask = self.masks[image_id]\r\n count = int(mask.max())\r\n mask_new = np.zeros([info['height'], info['width'], count+1], dtype=np.uint8) # one more for background\r\n for i in range(count+1):\r\n #mask_new[:, :, i:i+1] = (mask == i).transpose(1, 2, 0)\r\n mask_new[:, :, i:i + 1] = (mask==i).reshape(mask.shape[0], mask.shape[1], -1)\r\n # mask_new[:, :, i:i+1] = (mask==i).transpose(1,2,0)\r\n # Map class names to class IDs.\r\n class_ids = np.ones(count+1) # one more fore background\r\n\r\n #add Background\r\n #class_ids[count] = 0 # add Background\r\n #mask_new[:, :, count:count + 1] = (mask == 0).transpose(1, 2, 0)\r\n #class_ids[count] = 0 # add Background\r\n class_ids[0] = 0 # add Background\r\n # End add Background\r\n\r\n return mask_new, class_ids.astype(np.int32)", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n ships = info['ships']\n count = len(ships)\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n for i, (ship, dims) in enumerate(info['ships']):\n mask[:, :, i:i + 1] = self.draw_mask(mask[:, :, i:i + 1].copy(),\n ship, dims)\n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count - 2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(\n occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(s[0]) for s in ships])\n return mask, class_ids.astype(np.int32)", "def _load_mask(self, image_id):\n\n mask_pattern = os.path.join(self.directory, image_id, \"masks/*.png\")\n ic = ImageCollection(mask_pattern)\n\n mask = np.zeros(self.imsize, dtype='uint8')\n for lbl, indiv_mask in enumerate(ic):\n mask += ((\n 1 + lbl) * self._process(indiv_mask, True).astype('uint8'))\n\n return mask", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n\n shapes = info['polygons']\n\n for i, p in enumerate(info['polygons']):\n shape = p['shape_attributes']['name']\n mask[:, :, i:i + 1] = self.draw_shape(mask[:, :, i:i + 1].copy(),\n shape, p, 1)\n\n # Map class names to class IDs.\n if (self.config.MODE == \"Combined\"):\n class_ids = np.array([self.class_names.index(s['region_attributes']['element_type'])\n if 'element_type' in s['region_attributes'].keys() else self.class_names.index('door') for s in shapes])\n elif (self.config.MODE == \"Separate\"):\n class_ids = np.array([self.class_names.index(s['region_attributes']['Class']) if 'Class' in s['region_attributes'].keys(\n ) else self.class_names.index('Door (Curve)') for s in shapes])\n\n return mask, class_ids.astype(np.int32)", "def load_ela(self, image_id):\n # info = self.image_info[image_id]\n # # Get mask directory from image path\n # ela_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), \"elas\")\n\n # # Read mask files from .png image\n # ela = []\n # # for f in next(os.walk(mask_dir))[2]:\n # e = skimage.io.imread(os.path.join(ela_dir, info['id']+'.jpg')).astype(np.float32)\n # ela.append(e)\n # # print(mask)\n # ela = np.stack(ela, axis=-1)\n # # Return mask, and array of class IDs of each instance. Since we have\n # # one class ID, we return an array of ones\n # return ela\n info = self.image_info[image_id]\n ela_dir=os.path.join(os.path.dirname(os.path.dirname(info['path'])), \"elas\")\n image = skimage.io.imread(os.path.join(ela_dir, info['id']+\".jpg\"))\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "def load_mask_custom(self, image_id, image_shape):\n info = self.image_info[image_id]\n filePaths = info['maskPaths']\n classes = info['maskClasses']\n \n masks = []\n class_ids = []\n if(len(image_shape)==3):\n image_shape = image_shape[:2]\n \n # 1 filePath -- 1 class \n for i, filePath in enumerate(filePaths):\n \n if filePath.endswith(\".png\"):\n mask = cv2.imread(filePath, 0)\n mask = np.asarray(mask, dtype = \"uint8\")\n \n masks.append(mask)\n class_ids.append(classes[i])\n \n if len(masks)==0 :\n masks.append(np.zeros(image_shape, dtype = \"uint8\"))\n class_ids.append(0)\n \n image = np.stack(masks, axis=2)\n class_ids = np.array(class_ids, dtype=np.int32)\n return image, class_ids", "def load_mask(self, image_id):\n # load image infos\n \n info = self.image_info[image_id]\n patch_path = info['path']\n height = info['height']\n width = info['width']\n mag_path = os.path.join(patch_path,\"mag\")\n tissue_path = os.path.join(patch_path,\"tissue\")\n \n # collect mask names\n \n mag_mask_list = os.listdir(mag_path)\n tissue_mask_list = os.listdir(tissue_path)\n \n classes = []\n masks = []\n \n # append masks and ids in list\n \n if mag_mask_list:\n for filename in mag_mask_list:\n a = os.path.join(mag_path,filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(2)\n \n if tissue_mask_list:\n for filename in tissue_mask_list:\n a = os.path.join(tissue_path,filename)\n masks.append(skimage.io.imread(a).astype(bool))\n classes.append(1)\n \n return np.stack(masks,axis=2), np.asarray(classes).astype(int)", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n# logger.info(\"mask {}\".format(image_id))\n if info[\"mask\"] is None:\n craters = info['craters']\n count = len(craters)\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n for i, dims in enumerate(craters):\n mask[:, :, i:i+1] = self.draw_shape(mask[:, :, i:i+1].copy(),\n \"circle\", dims, 1)\n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count-2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(s) for s in info[\"shapes\"]])\n info[\"mask\"] = mask.astype(np.bool)\n info[\"class_ids\"] = class_ids.astype(np.int32)\n else:\n mask, class_ids = info[\"mask\"], info[\"class_ids\"]\n return mask, class_ids", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n mask_image_path = info['path'].replace(\"images\", \"masks\")\n mask = cv2.imread(mask_image_path)\n mask = (np.max(mask, axis=2) if len(mask.shape) > 2 else mask).reshape((128,128,1))\n \n return mask, np.array([1,])", "def load_mask(self):\n mask_file = fetch_one_file(self.ica_dir, self._mask_fname, pat_type='re.match')\n return niimg.load_img(mask_file)", "def load_mask_pre(self, image_id, mask_path):\n img = Image.open(mask_path)\n colors = img.getcolors()\n n_dim = np.shape(colors)\n num_obj = n_dim[0]-1 #not include the background\n\n mask = np.zeros([np.shape(img)[0], np.shape(img)[1], num_obj], dtype=np.uint8)\n mask = self.draw_mask(num_obj, mask, img, colors)\n\n # Map class names to class IDs.\n class_ids = []\n for i in range(num_obj):\n class_ids.append(colors[i+1][1])\n\n return mask.astype(np.bool), np.array(class_ids, dtype=np.int32) #mask.astype(np.bool)", "def load_masks(self, y, encode_classes=False, one_hot=False, classes=None, open_fn=None):\n masks = self.load_images(y, open_fn=open_fn)\n if encode_classes and not one_hot: # not need for encoding a class if one_hot is requested\n mapping = {cls: i for i, cls in enumerate(classes)}\n masks = [self.encode_mask(mask, mapping) for mask in masks]\n if one_hot:\n masks = [self.one_hot_encode(mask, classes=classes) for mask in masks]\n return masks", "def _load_masks_3d(self, results):\n pts_instance_mask_path = results[\"ann_info\"][\"pts_instance_mask_path\"]\n\n if self.file_client is None:\n self.file_client = mmcv.FileClient(**self.file_client_args)\n try:\n mask_bytes = self.file_client.get(pts_instance_mask_path)\n pts_instance_mask = np.frombuffer(mask_bytes, dtype=np.int)\n except ConnectionError:\n mmcv.check_file_exist(pts_instance_mask_path)\n pts_instance_mask = np.fromfile(pts_instance_mask_path, dtype=np.long)\n\n results[\"pts_instance_mask\"] = pts_instance_mask\n results[\"pts_mask_fields\"].append(\"pts_instance_mask\")\n return results", "def load_mask(self, image_id):\n info = self.image_info[image_id]['mask_info']\n mask_, id_ = info\n\n return mask_, id_ #mask.astype(np.bool)", "def _populate_mask_data(self, filename: str) -> None:\n if self.seg_images.get(filename) is None:\n return None\n\n mask = cv2.imread(self.seg_targets[filename])\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)\n\n # convert pixel masks to multidimentional\n height, width = mask.shape[:2]\n segmentation_mask = np.zeros((height, width, len(VOC_COLORMAP)), dtype=np.float32)\n for label_index, label in enumerate(VOC_COLORMAP):\n segmentation_mask[:, :, label_index] = np.all(mask == label, axis=-1).astype(float)\n\n return segmentation_mask", "def load_annaation(self, image_id):\n info = self.image_info[image_id]\n # Get mask directory from image path\n mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), \"masks\")\n\n\n m = skimage.io.imread(os.path.join(mask_dir, info['id']+'.png')) / 255\n\n return m", "def load_img_and_mask(img, mask):\n img = cv2.imread(img)\n mask = cv2.imread(mask)\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)\n return img, mask", "def load_mask(path, image, mask_name='module_unet', center=True):\n with open(path, 'r') as file:\n data = json.load(file)\n # if len(data[\"objects\"]) == 0:\n # return None\n # code = data[\"objects\"][0][\"bitmap\"][\"data\"]\n # origin = data[\"objects\"][0][\"bitmap\"][\"origin\"]\n # else:\n # flag = True\n # for obj in data[\"objects\"]:\n # if obj['classTitle'] == mask_name:\n inx = has_mask(mask_name, data=data)\n if inx is not False:\n obj = data[\"objects\"][inx]\n code = obj[\"bitmap\"][\"data\"]\n origin = obj[\"bitmap\"][\"origin\"]\n else:\n mask = np.zeros((image.shape[0], image.shape[1]))\n mask = mask.astype('uint8')\n mask_center = np.array([mask.shape[1]/2, mask.shape[0]/2])\n if center:\n return mask, mask_center\n else:\n return mask\n mask = base64_2_mask(code)\n mask_center = np.array([mask.shape[1]/2, mask.shape[0]/2])\n mask_center += origin\n\n up = np.zeros((origin[1], mask.shape[1]))\n mask2 = np.vstack((up, mask))\n left = np.zeros((mask2.shape[0], origin[0]))\n mask3 = np.hstack((left, mask2))\n down = np.zeros((image.shape[0] - mask3.shape[0], mask3.shape[1]))\n mask4 = np.vstack((mask3, down))\n right = np.zeros((mask4.shape[0], image.shape[1] - mask4.shape[1]))\n mask5 = np.hstack((mask4, right))\n\n if center:\n return mask5.astype('uint8'), mask_center.astype(int)\n else:\n return mask5.astype('uint8')", "def visualize(\n cls,\n image: np.array,\n masks: typing.List,\n filename: str = None,\n use_image: bool = False,\n ) -> np.ndarray:\n\n common_mask = cls._unite_masks(masks)\n\n if use_image:\n common_mask = np.array(\n image * common_mask[:, :, np.newaxis], dtype=np.uint8\n )\n\n assert len(np.unique(common_mask)) < 3\n\n if filename:\n # *255 to correct grayscale\n cv2.imwrite(filename, common_mask * int(255))\n\n plt.imshow(common_mask)\n plt.close()", "def load_Data(img_path, mask_path):\n image_files = glob(os.path.join(img_path, '*.*'))\n mask_files = glob(os.path.join(mask_path, '*.*'))\n image_files.sort()\n mask_files.sort()\n images_list = []\n masks_list = []\n\n for _ in range(len(image_files)):\n image = cv2.imread(image_files[_])\n mask = cv2.imread(mask_files[_])\n images_list.append(image)\n masks_list.append(mask)\n\n return images_list, masks_list", "def bg_mask(query_imgs, method):\n print(\"Obtaining masks\")\n segmentation_method = get_method(method)\n return [segmentation_method(img) for img in query_imgs]", "def read_masks(self):\n structure_mask = self.read_image(\n self.filenames[\"structure_mask\"], grayscale=True\n ).astype(np.bool)\n unknown_mask = self.read_image(self.filenames[\"unknown_mask\"], grayscale=True).astype(\n np.bool\n )\n return structure_mask, unknown_mask", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n mask_image_path = info['path'].replace(\"_resized.png\", \"_gt_chambers_resized.png\")\n mask = cv2.imread(mask_image_path)\n mask = np.max(mask, axis=2).reshape((128,128,1))\n # If grayscale. Convert to RGB for consistency.\n #if mask.ndim != 3:\n # mask = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if mask.shape[-1] == 4:\n mask = mask[..., :3]\n \n return mask,\\\n np.array([1,])\n #np.array([1, 2, 3])", "def apply_mask(query_imgs, masks, method):\n resulting_imgs = []\n for img, mask in zip(query_imgs, masks):\n positions = np.where(mask == 255)\n if method == CBHS: # Special treatment for cell-based bg segmentation to mantain \n x_min, x_max, y_min, y_max = positions[0][0], positions[0][-1], positions[1][0], positions[1][-1]\n img = img[x_min:x_max, y_min:y_max]\n else:\n mask = mask == 255\n img = img[mask].reshape(-1, 3)\n\n resulting_imgs.append(img)\n \n if isDebug():\n addDebugImage(img)\n if isDebug():\n showDebugImage()\n print(\"Finished to apply masks\")\n \n return resulting_imgs", "def load_dataset(image_home, mask_home, patient_list, \n size = 512, \n downsample = 0.5, \n overlap = 1.5, \n verbose=False):\n\n image_list = np.concatenate([sorted(glob.glob(f'{image_home}/{p}/*')) for p in patient_list])\n mask_list = np.concatenate([sorted(glob.glob(f'{mask_home}/{p}/*')) for p in patient_list])\n\n if verbose:\n for i, (im, m) in enumerate(zip(image_list, mask_list)):\n print(i, im, m)\n\n x = []\n y = [] \n\n for im, m in zip(image_list, mask_list):\n image = cv2.imread(im)[:,:,::-1]\n mask = cv2.imread(m, -1)\n mask = squash_labels(mask)\n \n image = cv2.resize(image, dsize=(0,0), fx=downsample, fy=downsample)\n mask = cv2.resize(mask, dsize=(0,0), fx=downsample, fy=downsample,\n interpolation=cv2.INTER_NEAREST)\n\n # assert (image.shape == mask.shape).all()\n split_x , split_y = split(image, mask, int(size * downsample), overlap)\n\n x.append(split_x)\n y.append(split_y)\n\n\n x = np.concatenate(x, axis=0)\n y = np.concatenate(y, axis=0)\n y = np.eye(N=y.shape[0], M=4)[y]\n\n shuffle = np.arange(x.shape[0]).astype(np.int)\n np.random.shuffle(shuffle)\n x = x[shuffle, :]\n y = y[shuffle, :]\n\n x = (x / 255.).astype(np.float32)\n\n print('split_datasets returning x:', x.shape, x.dtype, x.min(), x.max())\n print('split_datasets returning y:', y.shape, y.dtype)\n return x, y", "def load_mask_one_layer(self, image_id):\r\n mask_path = self.mask_path[self.ids[image_id]]\r\n file_pattern = os.path.join(mask_path, \"*.png\")\r\n info = self.image_info[image_id]\r\n mask_files = glob.glob(file_pattern)\r\n #mask_tmp = cv2.imread(mask_files[0])\r\n mask_new = np.zeros([info['width'], info['height'], mask_files.__len__()+1], dtype=np.uint8) # one more for background\r\n count = 1\r\n mask_total = 0\r\n for i in mask_files:\r\n mask = cv2.imread(i)\r\n mask = mask[:, :, 1] / 255.0\r\n #mask = ski_transform.resize(mask, (info['height'], info['width']), mode='reflect')\r\n mask_new[:, :, count] = (mask)\r\n mask_total = mask_total * (mask == 0)\r\n mask_total = mask_total + (mask>0) * count\r\n count = count + 1\r\n return mask_total", "def load_mask(self, window_id):\n streams = self.load_streams(window_id)\n info=self.window_info[window_id]\n shape=info[\"shape\"]\n\n mask = np.zeros([shape[0], shape[1], 1], dtype=np.uint8)\n\n for stream_id,stream in enumerate(streams):\n\n for trace in stream:\n if trace.stats.channel==\"U\":\n start=int(round(trace.stats.sac[\"a\"]*100))\n end=int(round(trace.stats.sac[\"t0\"]*100))\n else:\n continue\n\n mask[stream_id,start:end+1,0]= 1\n\n class_ids = np.ones([1])\n\n if self.shuffle:\n random.seed(window_id)\n random_index=random.sample(range(shape[0]),shape[0])\n mask[:,:,0]=mask[:,:,0][random_index]\n\n streams=[streams[i] for i in random_index]\n\n\n\n station=np.zeros([shape[0],shape[0],2])\n for i,j in itertools.product(range(shape[0]),range(shape[0])):\n station[i,j]=[streams[j][0].stats.sac[\"stla\"]/streams[i][0].stats.sac[\"stla\"],streams[j][0].stats.sac[\"stlo\"]/streams[i][0].stats.sac[\"stlo\"]]\n\n\n return mask.astype(np.bool), class_ids.astype(np.int32),station.astype(np.float32)", "def load_mask(self, image_id):\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"face\":\n return super(self.__class__, self).load_mask(image_id)\n info = self.image_info[image_id]\n mask = np.zeros([info['height'], info['width'], len(info['boundingbox'])], dtype=np.uint8)\n for i, p in enumerate(info['boundingbox'].values()):\n rr, cc = skimage.draw.polygon(p['y'], p['x'])\n mask[rr, cc, i] = 1\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)", "def convert_masks():\n for fn in sorted(glob.glob('../input/extra_data/*/masks/*.png')):\n print(fn)\n img = skimage.io.imread(fn)\n # utils.print_stats('mask', img)\n img[img > 0] = 255\n skimage.io.imsave(fn, img)", "def _load_images_and_masks(imlist, masklist=None, **kwargs):\n # Load images\n images = [readImage(im, **kwargs) for im in imlist]\n\n # Load masks\n if masklist is not None:\n masks = [None if m is None else readImage(m, dtype=bool) \\\n for m in masklist]\n else:\n masks = [None] * len(images)\n\n # Return\n return(images, masks)", "def mask_rcnn_inference(pred_mask_logits, pred_instances):\n cls_agnostic_mask = pred_mask_logits.size(1) == 1\n\n if cls_agnostic_mask:\n mask_probs_pred = pred_mask_logits.sigmoid()\n else:\n # Select masks corresponding to the predicted classes\n num_masks = pred_mask_logits.shape[0]\n class_pred = cat([i.pred_classes for i in pred_instances])\n indices = torch.arange(num_masks, device=class_pred.device)\n mask_probs_pred = pred_mask_logits[indices, class_pred][:, None].sigmoid()\n # mask_probs_pred.shape: (B, 1, Hmask, Wmask)\n\n num_boxes_per_image = [len(i) for i in pred_instances]\n mask_probs_pred = mask_probs_pred.split(num_boxes_per_image, dim=0)\n\n for prob, instances in zip(mask_probs_pred, pred_instances):\n instances.pred_masks = prob # (1, Hmask, Wmask)", "def test_get_mask(self):\n\n spine_data_loader = SpineDataLoader(dirpath_data=self.dirpath,\n batch_size=4)\n\n for idx in range(4):\n mask = spine_data_loader.get_mask(str(idx))\n assert mask.shape == (256, 256, 1)\n assert mask.dtype == 'int64'", "def load_mask(filename):\n nib_image = nib.load(filename)\n mask_affine = nib_image.affine\n\n return preprocess_nib(nib_image, is_mask=True), mask_affine", "def load_mask_from_file(filename):\n mask = imread(filename)\n\n return mask", "def model_masks(self, prunable=None):\n # TODO Also accept a dataloader\n pass\n # return masks", "def create_masks(image_folder: str, annotation_path: str, outpath: str):\n\n train_reader = ReaderAnnotation(annotation_path)\n\n all_images = os.listdir(image_folder)\n annotated_images = train_reader.annotation.keys()\n\n creator = MaskCreator()\n\n for key in annotated_images:\n file_extension = \".JPG\"\n if not os.path.isfile(\n os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n ):\n file_extension = file_extension.lower()\n\n image_name = os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n print(image_name)\n\n out_image_path = os.path.join(outpath, os.path.split(image_name)[-1])\n assert os.path.exists(out_image_path), \"Out image path doesn't exist\"\n\n image = plt.imread(image_name)\n h, w, c = image.shape\n\n regions = train_reader.get(key)[\"regions\"]\n # less than minimal distance\n radius = int(train_reader.get_radius_min(regions=regions) * 0.9)\n\n masks = []\n for _, center in regions.items():\n masks.append(\n creator.create_circular_mask(\n h=h,\n w=w,\n center=(\n int(center[\"shape_attributes\"][\"cx\"]),\n int(center[\"shape_attributes\"][\"cy\"]),\n ),\n radius=radius,\n )\n )\n\n if len(masks) > 50:\n masks = [creator._unite_masks(masks)]\n\n if masks:\n creator.visualize(\n image=image,\n masks=masks,\n filename=out_image_path,\n use_image=False,\n )\n else:\n creator._create_empty_mask(image=image, filename=out_image_path)\n\n print(\"Empty images:\")\n for empty_image in list(set(all_images) - set(annotated_images)):\n if os.path.exists(out_image_path):\n continue\n empty_image = os.path.join(image_folder, empty_image)\n print(empty_image)\n image = plt.imread(empty_image)\n creator._create_empty_mask(\n image=image,\n filename=os.path.join(\n outpath,\n os.path.split(empty_image)[-1],\n ),\n )", "def create_facemask_label(is_training):\n facemask_dir = config.voc_dir\n cls_map = {name: i for i, name in enumerate(config.coco_classes)}\n sub_dir = 'train' if is_training else 'val'\n facemask_dir = os.path.join(facemask_dir, sub_dir)\n if not os.path.isdir(facemask_dir):\n raise ValueError(f'Cannot find {sub_dir} dataset path.')\n\n image_dir = anno_dir = facemask_dir\n if os.path.isdir(os.path.join(facemask_dir, 'images')):\n image_dir = os.path.join(facemask_dir, 'images')\n if os.path.isdir(os.path.join(facemask_dir, 'annotations')):\n anno_dir = os.path.join(facemask_dir, 'annotations')\n\n if not is_training:\n data_dir = config.facemask_root\n json_file = os.path.join(data_dir, config.instances_set.format(sub_dir))\n file_dir = os.path.split(json_file)[0]\n if not os.path.isdir(file_dir):\n os.makedirs(file_dir)\n json_dict = {\"images\": [], \"type\": \"instances\", \"annotations\": [],\n \"categories\": []}\n bnd_id = 1\n\n image_files_dict = {}\n image_anno_dict = {}\n images = []\n for anno_file in os.listdir(anno_dir):\n print(anno_file)\n if not anno_file.endswith('xml'):\n continue\n tree = et.parse(os.path.join(anno_dir, anno_file))\n root_node = tree.getroot()\n file_name = root_node.find('filename').text\n file_name = file_name.split('.')[0] + '.jpg'\n img_id = get_imageId_from_fackmask(file_name)\n image_path = os.path.join(image_dir, file_name)\n print(image_path)\n if not os.path.isfile(image_path):\n print(f'Cannot find image {file_name} according to annotations.')\n continue\n\n labels = []\n for obj in root_node.iter('object'):\n cls_name = obj.find('name').text\n if cls_name not in cls_map:\n print(f'Label \"{cls_name}\" not in \"{config.coco_classes}\"')\n continue\n bnd_box = obj.find('bndbox')\n x_min = int(float(bnd_box.find('xmin').text)) - 1\n y_min = int(float(bnd_box.find('ymin').text)) - 1\n x_max = int(float(bnd_box.find('xmax').text)) - 1\n y_max = int(float(bnd_box.find('ymax').text)) - 1\n labels.append([y_min, x_min, y_max, x_max, cls_map[cls_name]])\n\n if not is_training:\n o_width = abs(x_max - x_min)\n o_height = abs(y_max - y_min)\n ann = {'area': o_width * o_height, 'iscrowd': 0, 'image_id': \\\n img_id, 'bbox': [x_min, y_min, o_width, o_height], \\\n 'category_id': cls_map[cls_name], 'id': bnd_id, \\\n 'ignore': 0, \\\n 'segmentation': []}\n json_dict['annotations'].append(ann)\n bnd_id = bnd_id + 1\n\n if labels:\n images.append(img_id)\n image_files_dict[img_id] = image_path\n image_anno_dict[img_id] = np.array(labels)\n\n if not is_training:\n size = root_node.find(\"size\")\n width = int(size.find('width').text)\n height = int(size.find('height').text)\n image = {'file_name': file_name, 'height': height, 'width': width,\n 'id': img_id}\n json_dict['images'].append(image)\n\n if not is_training:\n for cls_name, cid in cls_map.items():\n cat = {'supercategory': 'none', 'id': cid, 'name': cls_name}\n json_dict['categories'].append(cat)\n json_fp = open(json_file, 'w')\n json_str = json.dumps(json_dict)\n json_fp.write(json_str)\n json_fp.close()\n\n return images, image_files_dict, image_anno_dict", "def loadData(image, mask, im_shape):\r\n X, y = [], []\r\n\r\n img = transform.resize(image, im_shape, mode='constant')\r\n img = np.expand_dims(img, -1)\r\n mask = transform.resize(mask, im_shape, mode='constant')\r\n mask = np.expand_dims(mask, -1)\r\n X.append(img)\r\n y.append(mask)\r\n X = np.array(X)\r\n y = np.array(y)\r\n X -= X.mean()\r\n X /= X.std()\r\n\r\n return X, y", "def random_scale(im, inst_masks, mask, boxes, classes, scale):\n # scale = np.random.uniform(down, upper)\n h, w, c = im.shape\n if scale > 1:\n \"\"\"\"\"\"\n max_offx = (scale - 1.) * w\n max_offy = (scale - 1.) * h\n offx = int(np.random.uniform() * max_offx)\n offy = int(np.random.uniform() * max_offy)\n im = cv2.resize(im, (0, 0), fx=scale, fy=scale)\n mask = cv2.resize(mask, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)\n im = im[offy: (offy + h), offx: (offx + w)]\n mask = mask[offy: (offy + h), offx: (offx + w)]\n if inst_masks.size > 0:\n inst_masks = np.transpose(inst_masks, (1, 2, 0)) # to (h, w, n)\n inst_masks = cv2.resize(inst_masks, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)\n inst_masks = inst_masks[offy: (offy + h), offx: (offx + w)]\n try:\n if inst_masks.ndim > 2:\n inst_masks = np.transpose(inst_masks, (2, 0, 1)) # to (n, h, w)\n else:\n inst_masks = inst_masks.reshape((1, h, w))\n except ValueError:\n print (inst_masks.ndim, inst_masks.shape)\n raise\n else:\n inst_masks = np.zeros((0, h, w), inst_masks.dtype)\n else:\n \"\"\"\"\"\"\n canvas = np.zeros(im.shape, im.dtype) + np.array([103, 116, 123], im.dtype)\n canvas_mask = np.zeros(mask.shape, mask.dtype)\n max_offx = (scale - 1.) * w\n max_offy = (scale - 1.) * h\n offx = int(np.random.uniform() * max_offx)\n offy = int(np.random.uniform() * max_offy)\n im = cv2.resize(im, (0, 0), fx=scale, fy=scale)\n mask = cv2.resize(mask, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)\n h_, w_, _ = im.shape\n canvas[-offy: (-offy + h_), -offx: (-offx + w_)] = im\n canvas_mask[-offy: (-offy + h_), -offx: (-offx + w_)] = mask\n if inst_masks.size > 0:\n inst_masks = np.transpose(inst_masks, (1, 2, 0)) # to (h, w, n)\n canvas_instmask = np.zeros(inst_masks.shape, inst_masks.dtype)\n inst_masks = cv2.resize(inst_masks, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)\n if inst_masks.ndim == 2:\n inst_masks = inst_masks[:,:, np.newaxis]\n canvas_instmask[-offy: (-offy + h_), -offx: (-offx + w_)] = inst_masks\n canvas_instmask = np.transpose(canvas_instmask, (2, 0, 1)) # to (n, h, w)\n else:\n canvas_instmask = np.zeros((0, h, w), inst_masks.dtype)\n\n im, mask, inst_masks = canvas, canvas_mask, canvas_instmask\n\n boxes = _offset_boxes(boxes, im.shape, scale, [offx, offy], False)\n boxes, classes, inst_masks = _filter_invalid_boxes(boxes, classes, inst_masks, min_size=3)\n\n return im, inst_masks, mask, boxes, classes", "def get_mask_dictionary(train_names):\n masks={}\n for name in train_names:\n masks[name]=cv.imread(\"../dataset/masks/\"+name+\".png\",cv.IMREAD_GRAYSCALE)\n \n return masks", "def _reshape_instance_masks(self, keys_to_tensors):\n height = keys_to_tensors['image/height']\n width = keys_to_tensors['image/width']\n to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32)\n masks = keys_to_tensors['image/object/mask']\n if isinstance(masks, tf.SparseTensor):\n masks = tf.sparse_tensor_to_dense(masks)\n masks = tf.reshape(tf.to_float(tf.greater(masks, 0.0)), to_shape)\n return tf.cast(masks, tf.float32)", "def load_inbreast_mask(mask_path, imshape=(4084, 3328)):\n\n def load_point(point_string):\n x, y = tuple([float(num) for num in point_string.strip('()').split(',')])\n return y, x\n\n mask_shape = np.transpose(imshape)\n mask = np.zeros(mask_shape)\n with open(mask_path, 'rb') as mask_file:\n plist_dict = plistlib.load(mask_file, fmt=plistlib.FMT_XML)['Images'][0]\n numRois = plist_dict['NumberOfROIs']\n rois = plist_dict['ROIs']\n assert len(rois) == numRois\n for roi in rois:\n numPoints = roi['NumberOfPoints']\n points = roi['Point_px']\n assert numPoints == len(points)\n points = [load_point(point) for point in points]\n if len(points) <= 2:\n for point in points:\n mask[int(point[0]), int(point[1])] = 1\n else:\n x, y = zip(*points)\n x, y = np.array(x), np.array(y)\n poly_x, poly_y = polygon(x, y, shape=mask_shape)\n mask[poly_x, poly_y] = 1\n return mask", "def _load_image(self, filename):\n\n path = filename.split(\"/\")\n image_id = path[len(self.directory.split(\"/\")) - 1]\n\n try:\n img = imread(filename)[:, :, :self.num_channels]\n except IndexError:\n tmp = imread(filename)\n img = np.stack([tmp] * 3).transpose(1, 2, 0)\n orig_shape = img.shape[:2]\n img = self._process(img)\n\n masks = np.zeros(self.imsize)\n\n # Load training labels if we're loading a training dataset\n if self.train:\n masks = self._load_mask(image_id)\n\n return (img, masks, image_id, orig_shape)", "def decode_labels(mask, num_images=1, num_classes=20):\n\tn, h, w = mask.shape\n\tassert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)\n\toutputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)\n\tfor i in range(num_images):\n\t img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))\n\t pixels = img.load()\n\t for j_, j in enumerate(mask[i, :, :]):\n\t\t for k_, k in enumerate(j):\n\t\t\t if k < num_classes:\n\t\t\t\t pixels[k_,j_] = label_colours[k]\n\t outputs[i] = np.array(img)\n\treturn outputs", "def decode_labels(mask, num_classes=41):\n h, w = mask.shape\n outputs = np.zeros((h, w, 3), dtype=np.uint8)\n\n img = Image.new('RGB',(len(mask[0]), len(mask)))\n pixels = img.load()\n for j_, j in enumerate(mask):\n for k_, k in enumerate(j):\n if k < num_classes:\n pixels[k_, j_] = label_colours[k]\n outputs = np.array(img)\n return outputs", "def get_object_mask(self, image_id):\n image_info = self.image_meta[image_id]\n active_class_info = image_info['active_class_info']\n object_cnt = len(active_class_info)\n mask = np.zeros([image_info['height'], image_info['width'], object_cnt], dtype=np.uint8)\n for i, (object_, _, dims) in enumerate(active_class_info):\n mask[:, :, i:i + 1] = self.draw_object_shape(mask[:, :, i:i + 1].copy(), object_, 1, dims)\n \n # Handle occlusions, when two objects intersect, we should ensure that the intersection mask is\n # given to only only object.\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n # print(occlusion)\n \n for i in range(object_cnt - 2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n return mask.astype(np.bool)", "def loader(self, input_path, mask_path):\n input_image = cv2.imread(input_path)\n # h, w = input_image.shape\n # print(\"input_image:\", h, w)\n # gt_mask = cv2.imread(mask_path)\n # bgr --> rgb\n # # input_image = input_image[:, :, ::-1]\n # gt_mask = gt_mask[:, :, ::-1]\n\n # the gt_mask should be gray image\n gt_mask = cv2.imread(mask_path, 0)\n # h, w = gt_mask.shape\n # print(\"gt_mask:\", h, w)\n\n # randomly horizontal flip\n input_image, gt_mask = horizontal_flip(input_image, gt_mask, axis=1)\n\n # randomly scale\n scale = np.random.uniform(low=0.5, high=2.0, size=1)\n input_image, gt_mask = rescale(input_image, gt_mask, scale)\n\n input_image = cv2.resize(input_image, (self.img_width, self.img_height), interpolation=cv2.INTER_LINEAR)\n gt_mask = cv2.resize(gt_mask, (self.img_width, self.img_height), interpolation=cv2.INTER_NEAREST)\n # print('input_image:', input_image.shape) # -> (512, 512, 3)\n # print('gt_mask:', gt_mask.shape) # -> (512, 512, 3)\n gt_mask = np.expand_dims(gt_mask, axis=-1)\n return input_image, gt_mask", "def im_detect_mask(model, im_scales, boxes):\n assert len(im_scales) == 1, \\\n 'Only single-image / single-scale batch implemented'\n\n M_HEIGHT = cfg.MRCNN.RESOLUTION_H\n M_WIDTH = cfg.MRCNN.RESOLUTION_W\n if boxes.shape[0] == 0:\n pred_masks = np.zeros((0, M, M), np.float32)\n return pred_masks\n\n inputs = {'mask_rois': _get_rois_blob(boxes, im_scales)}\n # Add multi-level rois for FPN\n if cfg.FPN.MULTILEVEL_ROIS:\n _add_multilevel_rois_for_test(inputs, 'mask_rois')\n\n for k, v in inputs.items():\n workspace.FeedBlob(core.ScopedName(k), v)\n workspace.RunNet(model.mask_net.Proto().name)\n\n # Fetch masks\n pred_global_masks = workspace.FetchBlob(\n core.ScopedName('mask_fcn_global_probs')\n ).squeeze()\n pred_char_masks = workspace.FetchBlob(\n core.ScopedName('mask_fcn_char_probs')\n ).squeeze()\n # pred_char_boxes = workspace.FetchBlob(\n # core.ScopedName('mask_fcn_charbox_pred')\n # ).squeeze()\n pred_global_masks = pred_global_masks.reshape([-1, 1, M_HEIGHT, M_WIDTH])\n pred_char_masks = pred_char_masks.reshape([-1, M_HEIGHT, M_WIDTH, 37])\n pred_char_masks = pred_char_masks.transpose([0,3,1,2])\n # pred_char_boxes = pred_char_boxes.reshape([-1, 4, M_HEIGHT, M_WIDTH])\n\n return pred_global_masks, pred_char_masks, None", "def display_instances(image, boxes, masks, ids, names, scores):\n n_instances = boxes.shape[0]\n\n if not n_instances:\n print('NO INSTANCES TO DISPLAY')\n else:\n assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]\n\n for i in range(n_instances):\n if not np.any(boxes[i]):\n continue\n\n y1, x1, y2, x2 = boxes[i]\n label = names[ids[i]]\n print(label)\n color = class_dict[label]\n score = scores[i] if scores is not None else None\n caption = '{} {:.2f}'.format(label, score) if score else label\n mask = masks[:, :, i]\n\n image = apply_mask(image, mask, color)\n image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)\n image = cv2.putText(\n image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2\n )\n\n return image", "def get_image_mask_from_xml(bbox_path, image_size, valid_class_names=[]):\n masked_img = np.ones(image_size, dtype='uint8')\n \n root = elem.parse(bbox_path).getroot()\n annotations = root.findall('object')\n if valid_class_names:\n annotations = filter(lambda x: x.find('name').text in valid_class_names, annotations)\n \n for obj in annotations:\n bbox = obj.find('bndbox')\n get_coord = lambda name: int(bbox.find(name).text)\n masked_img[\n get_coord('ymin'):get_coord('ymax'),\n get_coord('xmin'):get_coord('xmax')\n ] = 0\n return masked_img", "def decode_labels(mask, num_images=1, num_classes=21, task='seg'):\n n, h, w, c = mask.shape\n assert (n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)\n outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)\n for i in range(num_images):\n if task == 'normal':\n outputs[i] = mask[i]\n elif task == 'seg':\n img = Image.new('RGB', (w, h), (255, 255, 255)) # unlabeled part is white (255, 255, 255)\n pixels = img.load()\n for j_, j in enumerate(mask[i, :, :, 0]):\n for k_, k in enumerate(j):\n if k < num_classes:\n pixels[k_, j_] = label_colours[k]\n outputs[i] = np.array(img)\n else:\n raise Exception('task name is not recognized!')\n\n return outputs", "def mask_rcnn_dct_inference(self,pred_mask_logits, pred_instances):\n num_masks = pred_mask_logits.shape[0]\n device = pred_mask_logits.device\n if num_masks == 0:\n pred_instances[0].pred_masks = torch.empty([0, 1, self.mask_size, self.mask_size]).to(device)\n return pred_instances\n else:\n pred_mask_rc = self.dct_encoding.decode(pred_mask_logits.detach())\n pred_mask_rc = pred_mask_rc[:, None, :, :]\n pred_instances[0].pred_masks = pred_mask_rc\n return pred_instances", "def crop_images_label_big(dataset_dir, is_mask=True):\n data = []\n for folder in os.listdir(dataset_dir):\n path = os.path.join(dataset_dir, folder, \"*_labelIds.png\")\n data.extend(glob(path))\n\n for index, filePath in enumerate(data):\n print ('{}/{}'.format(index, len(data)))\n\n img = scipy.misc.imread(filePath).astype(np.float32)\n if is_mask:\n mask = np.zeros((img.shape[0], img.shape[1]), dtype=np.float32)\n\n mask[np.nonzero(img == 24)] = 255\n img = mask\n\n img = scipy.misc.imresize(img, 0.25, interp='bilinear', mode=None)\n img[np.nonzero(img > 0)] = 255\n scipy.misc.imsave('/data/vllab1/dataset/CITYSCAPES/CITY_valid/fine_mask/' + filePath.split('/')[-1], img)", "def __init__(self, image_dir, instances_json, classes_file, image_size=(64, 64), mask_size=16,\n normalize_images=True, max_samples=None, min_object_size=0.01,\n min_objects_per_image=1, max_objects_per_image=8,\n include_other=False, instance_whitelist=None):\n super(Dataset, self).__init__()\n\n self.image_dir = image_dir\n self.mask_size = mask_size\n self.max_samples = max_samples\n self.normalize_images = normalize_images\n self.set_image_size(image_size)\n self.vocab = {\n 'object_name_to_idx': {},\n 'pred_name_to_idx': {},\n }\n self.classes = []\n annotations = list(paths.list_files(os.path.join(instances_json), validExts=(\".xml\")))\n\n # with open(instances_json, 'r') as f:\n # instances_data = json.load(f)\n\n self.image_ids = []\n self.image_id_to_filename = {}\n self.image_id_to_size = {}\n new_image_ids = []\n self.image_id_to_objects = defaultdict(list)\n\n for j, ann in enumerate(annotations):\n\n tree = ET.parse(ann)\n anno_xml = tree.getroot()\n # anno_json = open(ann, 'r')\n # image_id = anno_xml.find('path').text\n image_id = j\n filename = anno_xml.find('filename').text\n size = anno_xml.findall('size')[0]\n width = size.find('width').text\n height = size.find('height').text\n self.image_ids.append(image_id)\n self.image_id_to_filename[image_id] = filename\n self.image_id_to_size[image_id] = (width, height)\n\n cls = open(classes_file, 'r')\n\n object_idx_to_name = {}\n all_instance_categories = []\n for i, category_data in enumerate(cls):\n category_id = i\n category_name = category_data\n all_instance_categories.append(str(category_name[:-1]))\n object_idx_to_name[category_id] = category_name\n self.vocab['object_name_to_idx'][category_name] = category_id\n\n if instance_whitelist is None:\n instance_whitelist = all_instance_categories\n category_whitelist = set(instance_whitelist)\n\n for object_data in anno_xml.findall('object'):\n bndbox = object_data.findall('bndbox')[0]\n xmin = bndbox.find('xmin').text\n ymin = bndbox.find('ymin').text\n xmax = bndbox.find('xmax').text\n ymax = bndbox.find('ymax').text\n w = int(xmax) - int(xmin)\n h = int(ymax) - int(ymin)\n # _, _, w, h = object_data['bndbox']\n # Esto no se que es lo que hace exactamente\n W, H = self.image_id_to_size[image_id]\n W = int(W)\n H = int(H)\n box_area = (w * h) / (W * H)\n box_ok = box_area > min_object_size\n object_name = object_data.find('name').text\n\n if object_name not in self.classes:\n self.classes.append(object_name)\n object_data.find('name').set(\"id\", str(self.classes.index(object_name)))\n # object_name = object_idx_to_name[object_data['category_id']]\n category_ok = object_name in category_whitelist\n other_ok = object_name != 'other' or include_other\n if box_ok and category_ok and other_ok:\n self.image_id_to_objects[image_id].append(object_data)\n\n self.vocab = {\n 'object_name_to_idx': {},\n 'pred_name_to_idx': {},\n }\n\n # COCO category labels start at 1, so use 0 for __image__\n self.vocab['object_name_to_idx']['__image__'] = 0\n\n # Build object_idx_to_name\n name_to_idx = self.vocab['object_name_to_idx']\n # assert len(name_to_idx) == len(set(name_to_idx.values()))\n max_object_idx = max(name_to_idx.values())\n idx_to_name = ['NONE'] * (1 + max_object_idx)\n for name, idx in self.vocab['object_name_to_idx'].items():\n idx_to_name[idx] = name\n self.vocab['object_idx_to_name'] = idx_to_name\n self.num_objects = len(self.vocab['object_idx_to_name'])\n\n # Prune images that have too few or too many objects\n total_objs = 0\n for image_id in self.image_ids:\n # Hay que comprobar o cambiar esto a un id numerico por que al ser string no puede usarse como clave o asi para esto y da error. Investigar que se puede hacer con esto\n num_objs = len(self.image_id_to_objects[image_id])\n total_objs += num_objs\n if min_objects_per_image <= num_objs <= max_objects_per_image:\n new_image_ids.append(image_id)\n self.image_ids = new_image_ids\n self.vocab['pred_idx_to_name'] = [\n '__in_image__',\n 'left of',\n 'right of',\n 'above',\n 'below',\n 'inside',\n 'surrounding',\n ]\n self.vocab['pred_name_to_idx'] = {}\n for idx, name in enumerate(self.vocab['pred_idx_to_name']):\n self.vocab['pred_name_to_idx'][name] = idx", "def read_mask(filename):\n # single label images\n # source_msk = (cv.imread(mask_filename) > 0).astype(np.float)\n # display_img = blend_in_channel(source_img, source_msk)\n\n # multi-label images\n try:\n source_msk = cv.imread(filename, cv.IMREAD_ANYCOLOR)\n except FileNotFoundError as e:\n logger.warning(\"'%s' not found, creating empty\" % filename)\n source_msk = np.zeros(source_img.shape[:2], dtype=np.uint8)\n logger.debug(\"source_msk.shape: '%s'\" % str(source_msk.shape))\n\n # if the image is multichannel, take only the first channel\n if len(source_msk.shape) > 2:\n logger.warning(\"'%s'.shape = %s, reducing to first channel\" % (basename(filename), str(source_msk.shape)))\n source_msk = source_msk.mean(axis=-1).astype(int)\n\n source_msk = source_msk[..., np.newaxis]\n\n # mask label values\n labels = np.unique(source_msk)\n logger.info(\"'%s':%s:%s %i labels\" % (basename(filename), str(source_msk.shape), str(source_msk.dtype), len(labels)))\n\n if any([label > max(colourmap.keys()) for label in labels]):\n logger.warning(\"label values > colourmap range [%i, %i] are mapped to %i\" % (\n min(colourmap.keys()), max(colourmap.keys()), 1))\n\n for label in labels:\n if label > max(colourmap.keys()):\n source_msk[source_msk==label] = 1\n\n labels = np.unique(source_msk)\n logger.info(\"'%s':%s:%s labels: %s\" % (basename(filename), str(source_msk.shape), str(source_msk.dtype), labels))\n\n return source_msk.astype(float)", "def makeMaskedImageFromArrays(image, mask=None, variance=None):\n cls = globals()[\"MaskedImage%s\" % suffixes[str(image.dtype.type)]]\n return cls(makeImageFromArray(image), makeMaskFromArray(mask), makeImageFromArray(variance))", "def load_large_dataset_with_class(patch_size=None, align=False):\n\n def wrapped():\n dataset_path = 'image_data'\n imgs_list = glob.glob(dataset_path + '/img/*.jpg')\n random.shuffle(imgs_list)\n\n # gather all corresponding masks for each image\n all_masks_files = glob.glob(dataset_path + '/bool_mask_sep_inst/*.npy')\n image_to_masks = defaultdict(list)\n for x in all_masks_files:\n x = os.path.basename(x)\n # MaskId := ImageId_MaskNum.npy\n image_id = x[:x.rindex('_')]\n image_to_masks[image_id].append(x)\n\n for fname in imgs_list:\n image_id = os.path.basename(fname).rstrip('.jpg')\n mask_base = random.choice(image_to_masks[image_id])\n ref_mask_path = random.choice(all_masks_files)\n\n image = skimage.img_as_float(imread(dataset_path + '/img/' + image_id + '.jpg'))\n mask = np.load(dataset_path + '/bool_mask_sep_inst/' + mask_base)\n ref_mask = np.load(ref_mask_path)\n\n if patch_size is not None:\n image = sk_resize(image, (patch_size, patch_size))\n mask = skimage.img_as_bool(sk_resize(mask * 255., (patch_size, patch_size)))\n ref_mask = skimage.img_as_bool(sk_resize(ref_mask * 255., (patch_size, patch_size)))\n\n if align:\n ref_mask = align_mask(mask, ref_mask)\n\n yield (image, mask, ref_mask)\n\n return wrapped", "def layer_masks(self, module):\n pass\n # return masks", "def load_data_from_dir(instance_dir, image_size=256, pad_size=0.1, skip_indices=()):\n image_dir = osp.join(instance_dir, \"images\")\n mask_dir = osp.join(instance_dir, \"masks\")\n data_dict = {\n \"images_og\": [],\n \"images\": [],\n \"masks\": [],\n \"masks_dt\": [],\n \"bbox\": [],\n \"image_centers\": [],\n \"crop_scales\": [],\n }\n for i, image_path in enumerate(sorted(glob(osp.join(image_dir, \"*.jpg\")))):\n if i in skip_indices:\n continue\n image_name = osp.basename(image_path)\n mask_path = osp.join(mask_dir, image_name.replace(\"jpg\", \"png\"))\n image_og = Image.open(image_path).convert(\"RGB\")\n mask = Image.open(mask_path).convert(\"L\")\n bbox = get_bbox(np.array(mask) / 255.0 > 0.5)\n center = (bbox[:2] + bbox[2:]) / 2.0\n s = max(bbox[2:] - bbox[:2]) / 2.0 * (1 + pad_size)\n square_bbox = np.concatenate([center - s, center + s]).astype(int)\n # Crop image and mask.\n image = image_util.crop_image(image_og, square_bbox)\n image = np.array(image.resize((image_size, image_size), Image.LANCZOS)) / 255.0\n mask = image_util.crop_image(mask, square_bbox)\n mask = np.array(mask.resize((image_size, image_size), Image.BILINEAR))\n mask = mask / 255.0 > 0.5\n image_center, crop_scale = compute_crop_parameters(image_og.size, square_bbox)\n data_dict[\"bbox\"].append(square_bbox)\n data_dict[\"crop_scales\"].append(crop_scale)\n data_dict[\"image_centers\"].append(image_center)\n data_dict[\"images\"].append(image)\n data_dict[\"images_og\"].append(image_og)\n data_dict[\"masks\"].append(mask)\n data_dict[\"masks_dt\"].append(compute_distance_transform(mask))\n for k, v in data_dict.items():\n if k != \"images_og\": # Original images can have any resolution.\n data_dict[k] = np.stack(v)\n\n if osp.exists(osp.join(instance_dir, \"metadata.json\")):\n metadata = json.load(open(osp.join(instance_dir, \"metadata.json\")))\n data_dict[\"extents\"] = metadata[\"extents\"]\n azimuths = metadata[\"azimuths\"]\n elevations = metadata[\"elevations\"]\n R, T = pytorch3d.renderer.look_at_view_transform(\n dist=2,\n elev=elevations,\n azim=azimuths,\n )\n data_dict[\"initial_poses\"] = R.tolist()\n return data_dict", "def displayInstances(image, boxes, masks, ids, names, scores):\n n_instances = boxes.shape[0]\n colours = randomColours(n_instances)\n\n if not n_instances:\n print('NO INSTANCES TO DISPLAY')\n else:\n assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]\n\n for i, colour in enumerate(colours):\n if not np.any(boxes[i]):\n continue\n\n y1, x1, y2, x2 = boxes[i]\n label = names[ids[i]]\n score = scores[i] if scores is not None else None\n caption = '{} {:.2f}'.format(label, score) if score else label\n mask = masks[:, :, i]\n\n image = applyMask(image, mask, colour)\n image = cv2.rectangle(image, (x1, y1), (x2, y2), colour, 1)\n image = cv2.putText(\n image, caption, (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, colour, 1\n )\n\n return image", "def label_to_mask(labels):\n # get the image size\n h, w = labels.shape\n\n # build a color to label map\n idx_to_color = {}\n for label in class_info:\n idx_to_color[class_info[label].id] = class_info[label].color\n\n # generate label matrix\n mask = np.zeros((h, w, 3), dtype=np.uint8)\n for y in range(h):\n for x in range(w):\n id = labels[y, x]\n r, g, b = idx_to_color[id]\n mask[y, x] = np.array([b, g, r])\n\n return mask", "def create_binary_masks(image_path):\n mask = cv2.imread(image_path, cv2.IMREAD_ANYDEPTH)\n size = mask.shape\n for row_pixel in range(0, size[0]):\n for column_pixel in range(0, size[1]):\n if mask[row_pixel, column_pixel] == 0:\n mask[row_pixel, column_pixel] = 65535\n\n else:\n mask[row_pixel, column_pixel] = 0\n\n cv2.imwrite(image_path[:-4]+'_binary.png', mask)", "def createDataset_detection(outputPath, imagePathList, box_x_list, box_y_list,\n labelList, region_mask_list, pixel_mask_list):\n assert (len(imagePathList) == len(box_x_list) == len(box_y_list))\n nSamples = len(imagePathList)\n if not os.path.exists(outputPath):\n os.mkdir(outputPath)\n env = lmdb.open(outputPath, map_size=1099511627776)\n cache = {}\n cnt = 1\n for i in range(nSamples):\n imagePath = imagePathList[i]\n box_x = box_x_list[i]\n box_y = box_y_list[i]\n if len(box_x) == 0:\n continue\n if not os.path.exists(imagePath):\n print('%s does not exist' % imagePath)\n continue\n with open(imagePath, 'rb') as f:\n imageBin = f.read()\n\n imageKey = 'image-%09d' % cnt\n cache[imageKey] = imageBin\n box_x_Key = 'boxes_x-%09d' % cnt\n box_y_Key = 'boxes_y-%09d' % cnt\n cache[box_x_Key] = box_x.encode()\n cache[box_y_Key] = box_y.encode()\n\n if labelList:\n labelKey = 'label-%09d' % cnt\n cache[labelKey] = labelList[i].encode()\n if region_mask_list:\n region_mask_Key = 'region_mask-%09d' % cnt\n cache[region_mask_Key] = open(region_mask_list[i], 'rb').read()\n if pixel_mask_list:\n pixel_mask_Key = 'pixel_mask-%09d' % cnt\n cache[pixel_mask_Key] = open(pixel_mask_list[i], 'rb').read()\n # embed()\n if cnt % 1000 == 0:\n writeCache(env, cache)\n cache = {}\n print('Written %d / %d' % (cnt, nSamples))\n cnt += 1\n nSamples = cnt - 1\n cache['num-samples'] = str(nSamples).encode()\n writeCache(env, cache)\n print('Created dataset with %d samples' % nSamples)", "def read_data_sets_label(data_dir, label):\n train_data, test_data = read_data_sets(data_dir, one_hot=False)\n train_mask = create_mask(train_data, label)\n test_mask = create_mask(test_data, label)\n return (train_data.images[train_mask], test_data.images[test_mask])", "def crop_images_label(dataset_dir, is_mask=True):\n data = []\n for folder in os.listdir(dataset_dir):\n path = os.path.join(dataset_dir, folder, \"*_labelIds.png\")\n data.extend(glob(path))\n\n for index, filePath in enumerate(data):\n print ('{}/{}'.format(index, len(data)))\n\n img = scipy.misc.imread(filePath).astype(np.uint8)\n img = scipy.misc.imresize(img, 0.25, interp='nearest', mode=None)\n if is_mask:\n mask = np.zeros((img.shape[0], img.shape[1]), dtype=np.bool_)\n\n mask[np.nonzero(img == 24)] = True\n img = mask\n\n scipy.misc.imsave('/mnt/data/andy/dataset/CITYSCAPES/label/' + filePath.split('/')[-1], img)\n #break", "def create_GT_masks(root_dir, background_dir, intrinsic_matrix,classes):\n list_all_images = load_obj(root_dir + \"all_images_adr\")\n training_images_idx = load_obj(root_dir + \"train_images_indices\")\n for i in range(len(training_images_idx)):\n img_adr = list_all_images[training_images_idx[i]]\n label = os.path.split(os.path.split(os.path.dirname(img_adr))[0])[1]\n regex = re.compile(r'\\d+')\n idx = regex.findall(os.path.split(img_adr)[1])[0]\n\n if i % 1000 == 0:\n print(str(i) + \"/\" + str(len(training_images_idx)) + \" finished!\")\n\n image = cv2.imread(img_adr)\n ID_mask = np.zeros((image.shape[0], image.shape[1]))\n U_mask = np.zeros((image.shape[0], image.shape[1]))\n V_mask = np.zeros((image.shape[0], image.shape[1]))\n\n ID_mask_file = root_dir + label + \\\n \"/ground_truth/IDmasks/color\" + str(idx) + \".png\"\n U_mask_file = root_dir + label + \\\n \"/ground_truth/Umasks/color\" + str(idx) + \".png\"\n V_mask_file = root_dir + label + \\\n \"/ground_truth/Vmasks/color\" + str(idx) + \".png\"\n\n tra_adr = root_dir + label + \"/data/tra\" + str(idx) + \".tra\"\n rot_adr = root_dir + label + \"/data/rot\" + str(idx) + \".rot\"\n rigid_transformation = get_rot_tra(rot_adr, tra_adr)\n\n # Read point Point Cloud Data\n ptcld_file = root_dir + label + \"/object.xyz\"\n pt_cld_data = np.loadtxt(ptcld_file, skiprows=1, usecols=(0, 1, 2))\n ones = np.ones((pt_cld_data.shape[0], 1))\n homogenous_coordinate = np.append(pt_cld_data[:, :3], ones, axis=1)\n\n # Perspective Projection to obtain 2D coordinates for masks\n homogenous_2D = intrinsic_matrix @ (rigid_transformation @ homogenous_coordinate.T)\n coord_2D = homogenous_2D[:2, :] / homogenous_2D[2, :]\n coord_2D = ((np.floor(coord_2D)).T).astype(int)\n x_2d = np.clip(coord_2D[:, 0], 0, 639)\n y_2d = np.clip(coord_2D[:, 1], 0, 479)\n ID_mask[y_2d, x_2d] = classes[label]\n\n if i % 100 != 0: # change background for every 99/100 images\n background_img_adr = background_dir + random.choice(os.listdir(background_dir))\n background_img = cv2.imread(background_img_adr)\n background_img = cv2.resize(background_img, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_AREA)\n background_img[y_2d, x_2d, :] = image[y_2d, x_2d, :]\n background_adr = root_dir + label + \"/changed_background/color\" + str(idx) + \".png\"\n mpimg.imsave(background_adr, background_img)\n\n # Generate Ground Truth UV Maps\n centre = np.mean(pt_cld_data, axis=0)\n length = np.sqrt((centre[0]-pt_cld_data[:, 0])**2 + (centre[1] -\n pt_cld_data[:, 1])**2 + (centre[2]-pt_cld_data[:, 2])**2)\n unit_vector = [(pt_cld_data[:, 0]-centre[0])/length, (pt_cld_data[:,\n 1]-centre[1])/length, (pt_cld_data[:, 2]-centre[2])/length]\n U = 0.5 + (np.arctan2(unit_vector[2], unit_vector[0])/(2*np.pi))\n V = 0.5 - (np.arcsin(unit_vector[1])/np.pi)\n U_mask[y_2d, x_2d] = U\n V_mask[y_2d, x_2d] = V\n\n # Saving ID, U and V masks after using the fill holes function\n ID_mask, U_mask, V_mask = fill_holes(ID_mask, U_mask, V_mask)\n cv2.imwrite(ID_mask_file, ID_mask)\n mpimg.imsave(U_mask_file, U_mask, cmap='gray')\n mpimg.imsave(V_mask_file, V_mask, cmap='gray')", "def instance_masks_to_semseg_mask(instance_masks, category_labels):\n assert len(category_labels) == instance_masks.shape[0], \\\n \"Number of instances do not match: {}, {}\".format(len(category_labels), len(instance_masks))\n semseg_masks = instance_masks.long()\n\n for i, label in enumerate(category_labels):\n semseg_masks[i] = torch.where(instance_masks[i], label, semseg_masks[i])\n\n # for pixels with differing labels, assign to the category with higher ID number (arbitrary criterion)\n return semseg_masks.max(dim=0)[0] # [T, H, W]", "def generate_segmentation_from_masks(masks,\n detected_boxes,\n image_height,\n image_width,\n is_image_mask=False):\n\n def expand_boxes(boxes, scale):\n \"\"\"Expands an array of boxes by a given scale.\"\"\"\n # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/boxes.py#L227\n # The `boxes` in the reference implementation is in [x1, y1, x2, y2] form,\n # whereas `boxes` here is in [x1, y1, w, h] form\n w_half = boxes[:, 2] * .5\n h_half = boxes[:, 3] * .5\n x_c = boxes[:, 0] + w_half\n y_c = boxes[:, 1] + h_half\n\n w_half *= scale\n h_half *= scale\n\n boxes_exp = np.zeros(boxes.shape)\n boxes_exp[:, 0] = x_c - w_half\n boxes_exp[:, 2] = x_c + w_half\n boxes_exp[:, 1] = y_c - h_half\n boxes_exp[:, 3] = y_c + h_half\n\n return boxes_exp\n\n # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/core/test.py#L812\n # To work around an issue with cv2.resize (it seems to automatically pad\n # with repeated border values), we manually zero-pad the masks by 1 pixel\n # prior to resizing back to the original image resolution. This prevents\n # \"top hat\" artifacts. We therefore need to expand the reference boxes by an\n # appropriate factor.\n\n _, mask_height, mask_width = masks.shape\n scale = max((mask_width + 2.0) / mask_width,\n (mask_height + 2.0) / mask_height)\n\n ref_boxes = expand_boxes(detected_boxes, scale)\n ref_boxes = ref_boxes.astype(np.int32)\n padded_mask = np.zeros((mask_height + 2, mask_width + 2), dtype=np.float32)\n segms = []\n for mask_ind, mask in enumerate(masks):\n im_mask = np.zeros((image_height, image_width), dtype=np.uint8)\n if is_image_mask:\n # Process whole-image masks.\n im_mask[:, :] = mask[:, :]\n else:\n # Process mask inside bounding boxes.\n padded_mask[1:-1, 1:-1] = mask[:, :]\n\n ref_box = ref_boxes[mask_ind, :]\n w = ref_box[2] - ref_box[0] + 1\n h = ref_box[3] - ref_box[1] + 1\n w = np.maximum(w, 1)\n h = np.maximum(h, 1)\n\n mask = cv2.resize(padded_mask, (w, h))\n mask = np.array(mask > 0.5, dtype=np.uint8)\n\n x_0 = max(ref_box[0], 0)\n x_1 = min(ref_box[2] + 1, image_width)\n y_0 = max(ref_box[1], 0)\n y_1 = min(ref_box[3] + 1, image_height)\n\n im_mask[y_0:y_1, x_0:x_1] = mask[(y_0 - ref_box[1]):(y_1 - ref_box[1]), (\n x_0 - ref_box[\n 0]):(x_1 - ref_box[\n 0])]\n segms.append(im_mask)\n\n segms = np.array(segms)\n assert masks.shape[0] == segms.shape[0]\n return segms", "def make_dataset_from_image_paths_with_masks(\n image_path_list: List[Path], target_img_height, target_img_width\n) -> tf.data.Dataset:\n # read all the images as tensors\n image_tensors_list = [\n read_image_as_tensor(cur_img_path) for cur_img_path in image_path_list\n ]\n\n # resize them\n image_tensors_list_resized = [\n tf.image.resize(\n cur_image_tensor,\n size=(target_img_height, target_img_width),\n antialias=True,\n )\n for cur_image_tensor in image_tensors_list\n ]\n\n # read all the bounding boxes\n bounding_boxes_list = [\n get_bounding_box_from_xml_path(\n get_bounding_box_xml_path_from_image_path(cur_img_path)\n )\n for cur_img_path in image_path_list\n ]\n\n # resize them and convert them to binary masks\n bounding_boxes_list = [\n bounding_box_to_binary_mask(\n scale_bounding_box(\n cur_bounding_box,\n img_height=image_tensors_list[i].shape[0],\n img_width=image_tensors_list[i].shape[1],\n target_img_height=target_img_height,\n target_img_width=target_img_width,\n ),\n img_height=target_img_height,\n img_width=target_img_width,\n )\n for i, cur_bounding_box in enumerate(bounding_boxes_list)\n ]\n\n # images = tf.image.rgb_to_grayscale(tf.stack(image_tensors_list_resized))\n images = tf.stack(image_tensors_list_resized)\n bounding_boxes = tf.stack(bounding_boxes_list)\n\n dataset = tf.data.Dataset.from_tensor_slices((images, bounding_boxes))\n\n return dataset", "def read_mask(mask_path, mask_type, mask_name,patch_size,show_image=None):\n path_test = mask_path\n\n mask= Image.open(path_test+\"/\"+\"{}\".format(mask_type)+\n \"/\"+\"{}.tif\".format(mask_name))\n mask_list = np.asarray(list (mask.getdata() ))\n\n mask_list = mask_list / np.amax(mask_list)\n #either use from future or use // to get float result\n mask_list = np.reshape(mask_list,(patch_size,patch_size))\n if (show_image == True):\n\n print(mask_list.shape)\n plt.figure()\n plt.imshow(mask_list,cmap='gray')\n plt.show()\n print(mask_list)\n return mask_list", "def getMask(label, scribble, class_id, class_ids):\n # Dense Mask\n fg_mask = torch.where(label == class_id,\n torch.ones_like(label), torch.zeros_like(label))\n bg_mask = torch.where(label != class_id,\n torch.ones_like(label), torch.zeros_like(label))\n for class_id in class_ids:\n bg_mask[label == class_id] = 0\n\n # Scribble Mask\n bg_scribble = scribble == 0\n fg_scribble = torch.where((fg_mask == 1)\n & (scribble != 0)\n & (scribble != 255),\n scribble, torch.zeros_like(fg_mask))\n scribble_cls_list = list(set(np.unique(fg_scribble)) - set([0,]))\n if scribble_cls_list: # Still need investigation\n fg_scribble = fg_scribble == random.choice(scribble_cls_list).item()\n else:\n fg_scribble[:] = 0\n\n return {'fg_mask': fg_mask,\n 'bg_mask': bg_mask,\n 'fg_scribble': fg_scribble.long(),\n 'bg_scribble': bg_scribble.long()}", "def __getitem__(self, index):\n img_name = self.files[self.split][index]\n msk_name = img_name.replace(\".bmp\", \".png\")\n\n image_path = os.path.join(self.root, self.split, img_name)\n label_path = os.path.join(self.root, self.split, msk_name)\n\n assert os.path.exists(os.path.join(label_path)), \\\n \"> Corresponding Mask: {} do not exist!!!\".format(msk_name)\n\n image = misc.imread(image_path)\n image = np.array(image, dtype=np.uint8)\n\n # image = Image.fromarray(image, mode='RGB')\n\n # bright_enhancer = ImageEnhance.Brightness(image)\n # image = bright_enhancer.enhance(1.25)\n #\n # con_enhancer = ImageEnhance.Contrast(image)\n # image = con_enhancer.enhance(1.75)\n\n # sharp_enhancer = ImageEnhance.Sharpness(image)\n # image = sharp_enhancer.enhance(2.25)\n\n # image = image.filter(ImageFilter.EMBOSS)\n\n # image = np.array(image, dtype=np.uint8)\n image = image[:, :, ::-1] # From RGB to BGR\n\n # Histogram Equalization\n # image[:, :, 0] = cv2.equalizeHist(image[:, :, 0])\n # image[:, :, 1] = cv2.equalizeHist(image[:, :, 1])\n # image[:, :, 2] = cv2.equalizeHist(image[:, :, 2])\n\n label = misc.imread(label_path, mode=\"L\")\n label[label > 0] = 1\n label = np.array(label, dtype=np.uint8)\n\n # data augmentation used in training\n if self.aug_ext is not None:\n image = self.aug_ext(image)\n if self.augmentations is not None:\n image, label = self.augmentations(image, label)\n\n if self.is_transform:\n image = self.transform(image)\n\n image = image.transpose(2, 0, 1) # From HWC to CHW (For PyTorch we use N*C*H*W tensor)\n return torch.from_numpy(image).float(), torch.from_numpy(label).long()", "def test_make_mask_w_ref_image(self):\n output_mask = instance_mask(\n os.path.join(data_dir, 'geotiff_labels.geojson'),\n reference_im=os.path.join(data_dir, 'sample_geotiff.tif'),\n do_transform=True,\n out_file=os.path.join(data_dir, 'test_out.tif')\n )\n truth_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_inst_mask.tif'))\n saved_output_mask = skimage.io.imread(os.path.join(data_dir,\n 'test_out.tif'))\n\n assert np.array_equal(saved_output_mask, truth_mask)\n # clean up\n os.remove(os.path.join(data_dir, 'test_out.tif'))\n assert np.array_equal(output_mask, truth_mask)", "def display_instances(image, boxes, masks, ids, names, scores):\r\n n_instances = boxes.shape[0]\r\n colors = random_colors(n_instances)\r\n\r\n if not n_instances:\r\n print('NO INSTANCES TO DISPLAY')\r\n else:\r\n assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]\r\n\r\n for i, color in enumerate(colors):\r\n if not np.any(boxes[i]):\r\n continue\r\n\r\n y1, x1, y2, x2 = boxes[i]\r\n label = names[ids[i]]\r\n score = scores[i] if scores is not None else None\r\n caption = '{} {:.2f}'.format(label, score) if score else label\r\n mask = masks[:, :, i]\r\n\r\n image = apply_mask(image, mask, color)\r\n image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)\r\n image = cv2.putText(\r\n image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2\r\n )\r\n\r\n return image", "def __getitem__(self,image_id):\n # read the image\n image_path = (os.path.join(self.dataset_dir,self.list_dir[image_id],\"images/{}.png\".format(self.list_dir[image_id])))\n image = io.imread(image_path)\n # read the mask\n mask_dir = os.path.join(self.dataset_dir,self.list_dir[image_id],'masks')\n masks_list = []\n\n for i, f in enumerate (next(os.walk(mask_dir))[2]):\n if f.endswith ('.png'):\n m = io.imread(os.path.join(mask_dir,f)).astype(np.bool)\n m = m[:,:,0]\n masks_list.append(m)\n #combine all the masks corresponding of an invidual sample image into single binary mask\n if len(masks_list) != 1:\n masks = np.logical_or(masks,masks_list[i])\n else:\n masks = masks_list[i]\n # do the transforms..\n trans_img,trans_masks = self.transform(image,masks,self.aug)\n sample = {\"image\":trans_img,\"masks\":trans_masks}\n\n return(sample)", "def fixed_scale(im, inst_masks, mask, boxes, classes, target_h, target_w):\n h, w, c = im.shape\n scale = float(target_h) / h\n im = cv2.resize(im, (target_w, target_h))\n mask = cv2.resize(mask, (target_w, target_h), interpolation=cv2.INTER_NEAREST)\n if inst_masks.size > 0:\n inst_masks = np.transpose(inst_masks, (1, 2, 0)) # to (h, w, n)\n inst_masks = cv2.resize(inst_masks, (target_w, target_h), interpolation=cv2.INTER_NEAREST)\n try:\n if inst_masks.ndim > 2:\n inst_masks = np.transpose(inst_masks, (2, 0, 1)) # to (n, h, w)\n else:\n inst_masks = inst_masks.reshape((1, target_h, target_w))\n except ValueError:\n print (inst_masks.ndim, inst_masks.shape)\n raise\n else:\n inst_masks = np.zeros((0, h, w), inst_masks.dtype)\n boxes[:, 0:4:2] = boxes[:, 0:4:2] * float(target_w) / w\n boxes[:, 1:4:2] = boxes[:, 1:4:2] * float(target_h) / h\n\n return im, inst_masks, mask, boxes, classes", "def compute_masks_for_splits(\n graph):\n masks = {}\n num_nodes = graph.num_nodes()\n for split, split_nodes in zip(\n ['train', 'validation', 'test'],\n [graph.train_nodes, graph.validation_nodes, graph.test_nodes]):\n split_mask = np.zeros(num_nodes, dtype=bool)\n split_mask[split_nodes] = True\n masks[split] = split_mask\n return masks", "def __data_generation(self, image_mask_dirs): # X : (n_samples, *dim, n_channels)\n # Initialization\n X = np.empty((self.batch_size, *self.dim, self.n_channels))\n y = np.empty((self.batch_size, *self.dim, 1))\n\n # Generate data\n for i, dirs in enumerate(image_mask_dirs):\n # Store image\n x_img = cv2.imread(dirs[0])\n X[i,] = cv2.cvtColor(x_img, cv2.COLOR_BGR2RGB)\n\n # Store mask\n y_img = cv2.imread(dirs[1], cv2.IMREAD_GRAYSCALE).reshape((*self.dim, 1))\n y[i,] = y_img\n\n if self.preprocessor is not None:\n X = self.preprocessor(X)\n y = self.preprocessor(y)\n\n X = X.astype('float32')\n X /= 255\n y = y.astype('float32')\n y /= 255\n\n return X, y" ]
[ "0.75807637", "0.75707793", "0.7464837", "0.74508286", "0.73990464", "0.7395329", "0.73903126", "0.733224", "0.72805625", "0.72684896", "0.72458494", "0.7228455", "0.7193414", "0.71730536", "0.7096771", "0.70429015", "0.69910985", "0.688985", "0.6886687", "0.6880056", "0.6802511", "0.6781349", "0.6777628", "0.674453", "0.6739016", "0.6724426", "0.67172986", "0.6702522", "0.66551965", "0.6653047", "0.6585384", "0.65502435", "0.6524222", "0.64307076", "0.63797235", "0.63030314", "0.6282095", "0.62535787", "0.61947304", "0.6194178", "0.61568236", "0.61053807", "0.60669726", "0.60654265", "0.60106647", "0.5984767", "0.5950368", "0.5948504", "0.5945046", "0.5942892", "0.594238", "0.5914715", "0.59036946", "0.5860878", "0.58273524", "0.58266467", "0.5821049", "0.5814811", "0.5806338", "0.5804508", "0.57813483", "0.57740045", "0.577053", "0.5738703", "0.5737903", "0.572022", "0.57012105", "0.5691026", "0.5672624", "0.56595755", "0.56052965", "0.5579762", "0.557125", "0.55659336", "0.55335456", "0.55263317", "0.5517954", "0.55168957", "0.5514865", "0.5508943", "0.5503274", "0.5490132", "0.5481203", "0.5478855", "0.5466403", "0.5465714", "0.54580706", "0.54522187", "0.5439983", "0.54225016", "0.54217106", "0.54043466", "0.5394349", "0.53929657", "0.5362041", "0.5361563", "0.53572136", "0.53463393", "0.53294975", "0.5327307" ]
0.6600161
30
Return a link to the image in the COCO Website.
def image_reference(self, image_id): info = self.image_info[image_id] if info["source"] == "coco": return "http://cocodataset.org/#explore?id={}".format(info["id"]) else: super(CocoDataset, self).image_reference(image_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_image_comic_url(session, response):\n soup = bs(response.text, 'lxml')\n for div in soup.find_all('div', class_=\"img-comic-container\"):\n for a in div.find_all('a', class_=\"img-comic-link\"):\n for img in a.find_all('img', src=True):\n return \"https:\" + img['src']", "def get_image_url():", "def image_url(self):\n context = aq_inner(self.context)\n obj_url = context.absolute_url()\n if hasattr(context, 'getField'):\n field = self.context.getField('image')\n if not field:\n field = context.getField(IMAGE_FIELD_NAME)\n\n if field and field.get_size(context) > 0:\n return u'%s/%s_%s' % (obj_url, field.getName(), 'thumb')\n\n return u\"%s/isaw_logo.png\" % self.portal.absolute_url()", "def image_url(self) -> str:\n return pulumi.get(self, \"image_url\")", "def get_image_url(article):\n get_image = article.find_all(\"img\")[0]\n return get_image.get(\"src\").replace(\"../../\", \"http://books.toscrape.com/\")", "def getNewsIconURL(newsBrain):", "def get_image_link(self):\n table = self.soup.find('table')\n image_tag = table.find('img')\n image_name = self.soup.find_all(\"b\")[1].text\n return image_tag['src'], image_name\n\n # image = td.find_all('img')\n # print(image)\n # if image is not None:\n # return urljoin(self.base_url, image['src'])", "def get_url_image(self, obj):\n return settings.IMAGE_HOST + obj.image.url", "def get_url_image(self, obj):\n return settings.IMAGE_HOST + obj.image.url", "def get_url_image(self, obj):\n return settings.IMAGE_HOST + obj.image.url", "def logo_url(self):\n return self.get_url(\"logo\", \"images/logo.png\")", "def course_image_url(course):\r\n loc = StaticContent.compute_location(course.location.course_key, course.course_image)\r\n path = loc.to_deprecated_string()\r\n return path", "def get_url_image(self, obj):\n return settings.SERVER_HOST + obj.image.url", "def logo_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"logo_url\")", "def logo_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"logo_url\")", "def show_image_ref():\n return get_image_ref()", "def image(self, link, title, alt):\n if not link.startswith(('http://', 'https://')):\n source_dir = os.path.dirname(self.source_path)\n link = os.path.abspath(os.path.join(source_dir, link))\n return '<img src=\"%s\" title=\"%s\" alt=\"%s\" />' % (link, title, alt)", "def image_url(self):\n return self.photo_url or GENERIC_IMAGE", "def get_comic_src(url):\n html = requests.get(url).text\n soup = BeautifulSoup(html)\n images = soup.select('.img-comic-container a img')\n return images[0].attrs['src']", "def get_image(result):\n article_id = result['id']\n id_ = article_id[14:]\n href = article_id[:14]\n\n #FIXME: not working\n image_url = \"http://www.jpress.nli.org.il/Olive/APA/NLI_heb/get/GetImage.ashx?kind=block&href=%s&id=%s&ext=.png\" %(href, id_)\n \n return image_url", "def or_meta_image_url(context):\n try:\n request = context['request']\n absolute_url = request.build_absolute_uri(OR_META_IMAGE_URL)\n except KeyError:\n absolute_url = BASE_URL + OR_META_IMAGE_URL\n return absolute_url", "def still_image_url(self) -> str:\n\t\treturn 'grab.jpg?oid={0}'.format(self._oid)", "def img_url_display(self):\n url = '%s=s%s' % (self.img_url, self.DISPLAY_SIZE_PX)\n if self.img_rot in Plaque.ALLOWED_ROTATIONS:\n url = \"%s-r%s\" % (url, self.img_rot)\n return url", "def image(self) -> str:\n return pulumi.get(self, \"image\")", "def upload_closet_image(item):\n if item:\n response = cloudinary.uploader.upload(item) \n image_url = response['secure_url']\n\n return image_url", "def cosmic_link(variant_obj):\n\n cosmic_ids = variant_obj.get('cosmic_ids')\n\n if not cosmic_ids:\n return None\n else:\n cosmic_id = cosmic_ids[0]\n url_template = (\"https://cancer.sanger.ac.uk/cosmic/mutation/overview?id={}\")\n\n\n return url_template.format(cosmic_id)", "def test_get_cover_url(self):\n blob = [\"image\"]\n result = self.connector.get_cover_url(blob)\n self.assertEqual(result, \"https://covers.openlibrary.org/b/id/image-L.jpg\")", "def course_image_url(course, image_name=None):\n image = image_name or course.course_image\n try:\n loc = StaticContent.compute_location(course.location.course_key, image)\n _ = contentstore().find(loc)\n except NotFoundError:\n loc = StaticContent.compute_location(course.location.course_key, course.course_image)\n\n return loc.to_deprecated_string()", "def image_url(self) -> str:\n return self._image_url", "def url(self):\n\t\treturn self.base_url+\"{}/{}/{}.jpg\".format(self.template,self._escape(self.top_text),self._escape(self.bottom_text))+(\"?\"+\"&\".join([\"{}={}\".format(k,quote(self.kwargs[k])) for k in self.kwargs]) if self.kwargs else \"\")", "def image_url(organization, user_key=API_KEY):\n return 'http://api.crunchbase.com/v/2/organization/%s/primary_image?user_key=%s' % (organization, user_key)", "def getReferenceImageUrl(self, name):\n bucket = self.productSearch.bucket\n blobName = self._getReferenceImageBlobName(name)\n return bucket.blob(blobName).public_url", "def og_logo(self):\n # first try fb logo\n uf = self.app.url_for\n img = self._get_image(self.barcamp.fb_image)\n if img is None:\n img = self._get_image(self.barcamp.logo)\n if img is None:\n return \"\" # no url\n\n v = img.variants.get('facebook', None) # fb size\n if v is None:\n return \"\"\n return self.app.url_for(\"asset\", asset_id = v._id, _full=True)", "def image_preview(self):\r\n h = '<img src=\"%s\" alt=\"Campaign badge\"/>' % self.image.url\r\n return mark_safe(h)", "def logo(self):\n from app import textify\n try:\n asset = self.app.module_map.uploader.get(self.barcamp.logo)\n except AssetNotFound:\n asset = None\n if not asset:\n return u\"\"\n v = asset.variants['logo_full']\n url = self.app.url_for(\"asset\", asset_id = v._id, _full = True)\n alt = 'Logo '+self.barcamp.name# + \" - \" + textify(self.barcamp.seo_description)\n alt = alt.replace('\"', '&quot;')\n alt = alt.replace(\"'\", '&quot;')\n return \"\"\"<a title=\"%s\" href=\"%s\"><img alt=\"%s\" class=\"img-responsive\" src=\"%s\" width=\"%s\" height=\"%s\"></a>\"\"\" %(\n self.barcamp.name,\n self.handler.url_for(\"barcamps.index\", slug = self.barcamp.slug, _full = True),\n alt,\n url,\n v.metadata['width'],\n v.metadata['height'])", "def logo_small_url(self):\n return self.get_url(\"logo_small\", \"images/logo-small.png\")", "def image_url(self):\n return \"{}/mjpeg_read.php\".format(self.base_url)", "async def olá(self):\r\n\t\tawait self.client.say('© Maddie 2017')\r\n\t\te = Embed()\r\n\t\te.set_image(url='https://cdn.discovery.pgsitecore.com/en-us/-/media/Olay_PathFinder/Images/a/OLAY%20TE%207IN1%20DEEP%20PENETRATING%20MOISTURE%20BODY%20WASH_Front.png?w=460&v=1-201705260605')\r\n\t\tawait self.client.say(embed=e)", "def get_thumbnail_url():", "def image(self, where):\n cook = cookie()\n I = Image(cook, self)\n self.call('image', cook, where)\n print(\"IMAGE\", where)\n return I", "def create_HTML_a_img(link_url, image_url):\n img = '<img src=\"' + image_url + '\">'\n linked_image = create_HTML_a(link_url, img)\n return linked_image", "def find_circuit_image(self, url):\n try:\n soup = set_soup(url)\n img_url_container = soup.find(\n \"div\", {\"class\": \"f1-race-hub--schedule-circuit-map\"}\n )\n img_url = img_url_container.find(\"a\")[\"href\"]\n soup = set_soup(self.BASE_URL + img_url)\n img_container = soup.find(\"div\", {\"class\": \"f1-race-hub--map-container\"})\n img = img_container.find(\"img\", {\"class\": \"lazy\"})[\"data-src\"]\n return self._add_timestamp_to_image(img)\n except Exception:\n logger.exception(\"Error getting circuit image\")", "def _get_pic_link(self, tree, xpath_adr='/html/body/div[1]/div[2]/div[3]/div[1]/div[1]/div/div/img/@data-src'):\n try:\n return tree.xpath(xpath_adr)[0]\n except:\n print('WARNING: Could not scrape game card web address, check review xpath address')\n return np.nan", "def logo_uri(self) -> str:\n return pulumi.get(self, \"logo_uri\")", "def get_xkcd() -> str:\n if xkcd == 'true':\n r = requests.get(\"https://xkcd.com/info.0.json\").json()\n img = r['img']\n\n return f'<a href=\"https://xkcd.com/\">\\n <img src=\"{img}\" />\\n</a>'\n\n return \"\"", "def show_orion_url(self, obj):\n return obj.orion_url", "def test_get_image_url(self):\r\n course = CourseFactory.create(org='edX', course='999')\r\n url = utils.course_image_url(course)\r\n self.assertEquals(url, '/c4x/edX/999/asset/{0}'.format(course.course_image))", "def return_image(val, model_id, message_name, field_name, mime, sind):\n column_data_source = curdoc().get_model_by_name(sind)\n index = column_data_source.tags[0]\n url = \"http://{0}/image/\".format(_host) + \"---\".join([model_id, message_name, field_name, mime, sind, str(index)])\n return url", "def get_kegg_image(self):\n return 'http://rest.kegg.jp/get/%s/img' % self.kegg_id", "def get_image_url(self, image, hostname):\n\n if image.image_type == 'local':\n return \"http://\" + hostname + \"/\" + settings.IMAGES_URL + \"/\" + image.deployment.campaign.short_name + \"/\" + image.deployment.short_name + \"/\" + image.image_name\n elif image.image_type == 'envirocoms':\n return \"http://\" + hostname + \"/ecoms_proxy?image=\" + image.image_path", "async def cat(self):\r\n async with aiohttp.request('get', 'http://thecatapi.com/api/images/get?format=src') as resp:\r\n await self.bot.say(resp.url)", "def build_image_path(self, src):\r\n o = urlparse(src)\r\n # we have a full url\r\n if o.hostname:\r\n return o.geturl()\r\n # we have a relative url\r\n return urljoin(self.target_url, src)", "def fetch_logo_url(organization):\n return fetch_json(image_url, organization)", "def get_image_url(img):\n # element['data-src'] and element.get('data-src') doesn't work\n for k, v in img.items():\n if k == 'data-src':\n # https://t.nhentai.net/galleries/<gallerycode>/<page#>t.<extension>\n # https://i.nhentai.net/galleries/<gallerycode>/<page#>.<extension>\n return v[:8] + 'i' + v[9:32] + v[32:].replace('t.', '.', 1)", "def logo_image(self):\n return self.company_logo or \"upload/default_avatar.gif\"", "def image_link(self):\r\n\r\n if not self._image_link:\r\n warnings.warn(\"Seems like you are trying to pull out the image link while not having it.\", Warning, stacklevel=2)\r\n\r\n return self._image_link", "def get_image_info(client, my_massage):\n # Get Javascript updated HTML page\n response = client.commands.getPageText()\n assert response['status']\n assert response['result']\n\n # Create soup from HTML page and get desired information\n soup = BeautifulSoup(response['result'], markupMassage=my_massage)\n image_info = {'name': soup.find(id='caption_region').h3.string,\n 'link': urlparse.urljoin('http://www.nasa.gov',\n soup.find(attrs='Full_Size')['href'])}\n return image_info", "def l10n_img(ctx, url):\n return static(l10n_img_file_name(ctx, url))", "def logo_url(self):\n asset = self._get_image(self.barcamp.logo)\n if asset is None:\n return None\n uf = self.app.url_for\n return dict(\n [(vid, uf('asset', asset_id = asset._id)) for vid, asset in asset.variants.items()]\n )", "def media_image_url(self):\n url = self._state.get(\"albumart\", None)\n return self._volumio.canonic_url(url)", "def url_for(**options):\n\n url_parts = get_url_parts(**options)\n image_hash = hashlib.md5(b(options[\"image_url\"])).hexdigest()\n url_parts.append(image_hash)\n\n return \"/\".join(url_parts)", "def _get_image_url(self, image_filepath):\n return self.IMAGE_URL + image_filepath", "def process_image_url(value, field):\n if field.width:\n if not value:\n return u\"无\"\n return mark_safe(\"\"\"\n <a href=\"{0}\" target=\"_blank\"><img src=\"{0}\" width=\"{1}\" a>\n \"\"\".format(absolute_media_path(value), field.width))\n # only show url address.\n elif value:\n shorten_value = ''\n if len(value) > 20:\n shorten_value = value[0:12] + \"...\"\n return mark_safe(\"\"\"\n <a href=\"{0}\" target=\"_blank\" title=\"{0}\" >{1}</a>\n \"\"\".format(absolute_media_path(value), shorten_value if shorten_value else value))\n else:\n return \"\"", "def test_get_image_url(self):\r\n course = CourseFactory.create(org='edX', course='999')\r\n self.assertEquals(course_image_url(course), '/c4x/edX/999/asset/{0}'.format(course.course_image))", "def basic_url(self):\n return self.base_name + '.cloudlabs.rc.ucl.ac.uk'", "def get_image(self):\n shop = lfs_get_object_or_404(Shop, pk=1)\n return shop.image", "def MakeImageURL(fname, hyperlink='openfile', **kwargs):\n prefix = 'cdb://image/'\n if not _isSupportedUriPath(fname):\n fname = 'file:///%s' % os.path.basename(fname)\n else:\n fname = fname.replace('\\\\', '/')\n if hyperlink:\n hyperlink = ' cdb:hyperlink:%s' % hyperlink\n else:\n hyperlink = ''\n return '%s%s%s' % (prefix, fname, hyperlink)", "async def inspire(self, ctx):\n async with aiohttp.ClientSession() as session:\n async with session.get('http://inspirobot.me/api?generate=true') as response:\n if(response.status == 200):\n imgurl = await response.text()\n embed = discord.Embed(colour=discord.Colour.dark_blue())\n embed.set_image(url=imgurl)\n embed.set_footer(text='http://inspirobot.me/')\n await ctx.bot.send_message(ctx.message.channel, embed=embed)", "def get_image_url(self, image_url):\n if image_url:\n return '{0}?source={1}'.format(self.config['links']['imageProxy'], image_url)\n else:\n return None", "def app_logo_url():\n return \"https://raw.githubusercontent.com/aiidalab/aiidalab-hello-world/master/img/logo.png\"", "def logo():\n try:\n numLogos = Logo.objects.filter(active=True).count()\n rand = random.randint(0, numLogos - 1)\n L = Logo.objects.filter(active=True)[rand]\n logo = L.file.url\n alt = \"#%s - By %s\" % (L.id, L.creator)\n except:\n logo = \"%slogos/default.png\" % settings.MEDIA_URL\n alt = \"Logo\"\n return '<img id=\"logo\" src=\"%s\" title=\"%s\" alt=\"%s\" />' % (logo, alt, alt)", "def _getImagePath(self, link):\n return settings.WIKI_UPLOAD_URL + urlquote(link)", "def retrieve_an_catto():\n\n response = requests.get('https://api.thecatapi.com/v1/images/search')\n response_json = json.loads(response.text)\n catto_url = response_json[0]['url']\n\n return catto_url", "def logo_uri(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"logo_uri\")", "def course_image_url(course):\r\n if course.static_asset_path or modulestore().get_modulestore_type(course.id) == XML_MODULESTORE_TYPE:\r\n # If we are a static course with the course_image attribute\r\n # set different than the default, return that path so that\r\n # courses can use custom course image paths, otherwise just\r\n # return the default static path.\r\n url = '/static/' + (course.static_asset_path or getattr(course, 'data_dir', ''))\r\n if hasattr(course, 'course_image') and course.course_image != course.fields['course_image'].default:\r\n url += '/' + course.course_image\r\n else:\r\n url += '/images/course_image.jpg'\r\n else:\r\n loc = StaticContent.compute_location(course.id, course.course_image)\r\n url = loc.to_deprecated_string()\r\n return url", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def image(self) -> Optional[str]:\n return pulumi.get(self, \"image\")", "def logo_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"logo_url\")", "def logo_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"logo_url\")", "async def inspire(self, ctx):\n async with aiohttp.ClientSession() as session:\n async with session.get('http://inspirobot.me/api?generate=true') as response:\n if(response.status == 200):\n imgurl = await response.text()\n embed = discord.Embed(colour=discord.Colour.dark_blue())\n embed.set_image(url=imgurl)\n embed.set_footer(text='http://inspirobot.me/')\n await ctx.send(embed=embed)" ]
[ "0.7119464", "0.6980274", "0.67780954", "0.672729", "0.65255696", "0.6525551", "0.65081525", "0.64593345", "0.64593345", "0.64593345", "0.64545566", "0.64158964", "0.64125633", "0.62915957", "0.62915957", "0.62628806", "0.62458694", "0.62403053", "0.6228512", "0.6224288", "0.6169613", "0.6130535", "0.61108536", "0.6092", "0.60803497", "0.6080325", "0.60787016", "0.60735935", "0.60720396", "0.604139", "0.60367954", "0.6006848", "0.5993195", "0.59731865", "0.5944549", "0.5933435", "0.59208953", "0.5904797", "0.5887993", "0.583692", "0.5834718", "0.5828234", "0.5821467", "0.58001864", "0.5785814", "0.57824284", "0.576297", "0.5762885", "0.575668", "0.5754859", "0.57536286", "0.57439566", "0.5742672", "0.5740444", "0.5721741", "0.57166713", "0.5707242", "0.5706076", "0.56910896", "0.56862897", "0.5678413", "0.5672419", "0.56483966", "0.5638587", "0.5627583", "0.5625144", "0.5612588", "0.5603022", "0.5602591", "0.55978304", "0.5591926", "0.5577412", "0.557547", "0.5574375", "0.5568121", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5567589", "0.5564587", "0.5564587", "0.5564394" ]
0.69248754
2
Convert annotation which can be polygons, uncompressed RLE to RLE.
def annToRLE(self, ann, height, width): segm = ann['segmentation'] if isinstance(segm, list): # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(segm, height, width) rle = maskUtils.merge(rles) elif isinstance(segm['counts'], list): # uncompressed RLE rle = maskUtils.frPyObjects(segm, height, width) else: # rle rle = ann['segmentation'] return rle
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def annToRLE(self, ann):\n t = self.imgs[ann['image_id']]\n h, w = t['height'], t['width']\n segm = ann['segmentation']\n if type(segm) == list:\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, h, w)\n rle = maskUtils.merge(rles)\n elif type(segm['counts']) == list:\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, h, w)\n else:\n # rle\n rle = ann['segmentation']\n return rle", "def annToRLE(ann, height, width):\n segm = ann['segmentation']\n if isinstance(segm, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, height, width)\n rle = maskUtils.merge(rles)\n elif isinstance(segm['counts'], list):\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, height, width)\n else:\n # rle\n rle = ann['segmentation']\n return rle", "def annToRLE(self, ann, height, width):\n segm = ann['segmentaion']\n # convert segm from [[x1, y1], [x2, y2]...] to [[x1, y1, x2, y2, ...]] \n segm = [np.ravel(segm)]\n if isinstance(segm, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, height, width)\n rle = maskUtils.merge(rles)\n elif isinstance(segm['counts'], list):\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, height, width)\n else:\n # rle\n rle = ann['segmentaion']\n return rle", "def annToRLE(ann, h, w):\n segm = ann['segmentation']\n if type(segm) == list:\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, h, w)\n rle = maskUtils.merge(rles)\n elif type(segm['counts']) == list:\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, h, w)\n else:\n # rle\n rle = ann['segmentation']\n return rle", "def annToRLE(self, ann, height, width):\n segm = ann[\"segmentation\"]\n if isinstance(segm, list):\n rles = maskUtils.frPyObjects(segm, height, width)\n rle = maskUtils.merge(rles)\n elif isinstance(segm[\"counts\"], list):\n rle = maskUtils.frPyObjects(segm, height, width)\n else:\n rle = ann[\"segmentation\"]\n return rle", "def poly2rle(self, poly, im_height, im_width):\n\n assert type(poly) == list, \"Poly must be a list of polygon vertices\"\n\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = mask.frPyObjects(poly, im_height, im_width)\n rle = mask.merge(rles)\n\n return rle", "def visibleToRLE(ann,coco):\n t = coco.imgs[ann['image_id']]\n h, w = t['height'], t['width']\n segm = ann['visible_mask']\n if type(segm) == list:\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, h, w)\n rle = maskUtils.merge(rles)\n elif type(segm['counts']) == list:\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, h, w)\n else:\n # rle\n rle = ann['visible_mask']\n return rle", "def seg2poly_old(rle):\n # TODO: debug for this function\n # try:\n # binary_mask = mask_utils.decode(rle)\n # contours, hierarchy = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL)\n # contour_lens = np.array(list(map(len, contours)))\n # max_id = contour_lens.argmax()\n # max_contour = contours[max_id]\n # rect = cv2.minAreaRect(max_contour)\n # poly = cv2.boxPoints(rect)\n # return poly\n # except:\n # return -1\n try:\n binary_mask = mask_utils.decode(rle)\n contours, hierarchy = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # len is not appropriate\n # contour_lens = np.array(list(map(len, contours)))\n # max_id = contour_lens.argmax()\n contour_areas = np.array(list(map(cv2.contourArea, contours)))\n max_id = contour_areas.argmax()\n max_contour = contours[max_id]\n rect = cv2.minAreaRect(max_contour)\n poly = cv2.boxPoints(rect)\n poly = TuplePoly2Poly(poly)\n return poly\n except:\n return []", "def invisibleToRLE(ann,coco):\n t = coco.imgs[ann['image_id']]\n h, w = t['height'], t['width']\n segm = ann.get(\"invisible_mask\", None) \n if type(segm) == list:\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, h, w)\n rle = maskUtils.merge(rles)\n elif type(segm['counts']) == list:\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, h, w)\n else:\n # rle\n rle = ann['invisible_mask']\n return rle", "def get_obj_mask(seg_ann_data, height, width):\r\n if isinstance(seg_ann_data, list):\r\n # polygon -- a single object might consist of multiple parts\r\n # we merge all parts into one mask rle code\r\n rles = maskUtils.frPyObjects(seg_ann_data, height, width)\r\n rle = maskUtils.merge(rles)\r\n elif isinstance(seg_ann_data['counts'], list):\r\n # uncompressed RLE\r\n rle = maskUtils.frPyObjects(seg_ann_data, height, width)\r\n else:\r\n rle = seg_ann_data\r\n\r\n m = maskUtils.decode(rle)\r\n\r\n return m", "def seg2poly(rle):\n # TODO: debug for this function\n # try:\n # binary_mask = mask_utils.decode(rle)\n # contours, hierarchy = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL)\n # contour_lens = np.array(list(map(len, contours)))\n # max_id = contour_lens.argmax()\n # max_contour = contours[max_id]\n # rect = cv2.minAreaRect(max_contour)\n # poly = cv2.boxPoints(rect)\n # return poly\n # except:\n # return -1\n try:\n binary_mask = mask_utils.decode(rle)\n contours, hierarchy = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # len is not appropriate\n # contour_lens = np.array(list(map(len, contours)))\n # max_id = contour_lens.argmax()\n # contour_areas = np.array(list(map(cv2.contourArea, contours)))\n # max_id = contour_areas.argmax()\n # max_contour = contours[max_id]\n max_contour = max(contours, key=cv2.contourArea)\n rect = cv2.minAreaRect(max_contour)\n poly = cv2.boxPoints(rect)\n poly = TuplePoly2Poly(poly)\n return poly\n except:\n return []", "def get_polygons(annotation):\n print(f\"Loadding: {annotation}\")\n tree = ET.parse(annotation)\n root = tree.getroot()\n polygons = {}\n for obj in root.findall('object'):\n name = obj.find('name').text\n id_ = obj.find('id').text\n polygon = []\n for pt in obj.find('polygon').findall('pt'):\n polygon.append([pt.find('x').text, pt.find('y').text])\n if name in polygons:\n x_ref= int(polygons[name]['left'][0][0])\n x = int(polygon[0][0])\n if x > x_ref:\n polygons[name]['right'] = polygons[name]['left']\n id_ = 'left'\n else:\n id_ = 'right'\n else:\n polygons[name] = {}\n id_ = 'left'\n polygons[name][id_] = polygon\n for i in list(polygons.keys()):\n if not('right' in polygons[i]):\n print(i,' only has one polygon: ',polygons[i]['left'])\n y = input('Do you wish to label it as \\'right\\'? (leave empy if No): ')\n if (y):\n polygons[i]['right'] = polygons[i]['left']\n polygons[i].pop('left')\n return polygons", "def multi2poly(returned_vector_pred, layer_name=None):\n try: # Try to convert multipolygon to polygon\n df = gpd.read_file(returned_vector_pred, layer=layer_name)\n if 'MultiPolygon' in df['geometry'].geom_type.values:\n logging.info(\"\\nConverting multiPolygon to Polygon...\")\n gdf_exploded = df.explode(index_parts=True, ignore_index=True)\n gdf_exploded.to_file(returned_vector_pred, layer=layer_name) # overwrite the layer readed\n except Exception as e:\n logging.error(f\"\\nSomething went wrong during the conversion of Polygon. \\nError {type(e)}: {e}\")", "def convert_uncompressed_RLE_COCO_type(\n element: Dict, height: int, width: int\n ) -> Dict:\n p = maskUtils.frPyObjects(element, height, width)\n return p", "def raster_to_polyvert(dataset):\n rastercoords = read_gdal_coordinates(dataset, mode=\"edge\")\n\n polyvert = georef.grid_to_polyvert(rastercoords)\n\n return polyvert", "def _get_polygon(areasrc):\n\n str = areasrc.geometry.wkt\n str = re.sub('POLYGON\\(\\(', '', str)\n str = re.sub('\\)\\)', '', str)\n aa = re.split('\\,', str)\n lons = []\n lats = []\n for str in aa:\n bb = re.split('\\s+', re.sub('^\\s+', '', str))\n lons.append(float(bb[0]))\n lats.append(float(bb[1]))\n return lons, lats", "def rle_decode(rle, shape):\n rle = list(map(int, rle.split()))\n rle = np.array(rle, dtype=np.int32).reshape([-1, 2])\n rle[:, 1] += rle[:, 0]\n rle -= 1\n mask = np.zeros([shape[0] * shape[1]], np.bool)\n for s, e in rle:\n assert 0 <= s < mask.shape[0]\n assert 1 <= e <= mask.shape[0], \"shape: {} s {} e {}\".format(shape, s, e)\n mask[s:e] = 1\n # Reshape and transpose\n mask = mask.reshape([shape[1], shape[0]]).T\n return mask", "def gt_roidb(self):\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print '{} gt roidb loaded from {}'.format(self.name, cache_file)\n return roidb\n\n gt_roidb = [self._load_pascal_annotation(index)\n for index in self.image_index]\n with open(cache_file, 'wb') as fid:\n cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print 'wrote gt roidb to {}'.format(cache_file)\n\n return gt_roidb", "def convert_rle_to_mask(self, rle, shape):\n\n # Initialize a zero canvas (one-dimensional here)\n mask = np.zeros(shape[0] * shape[1], dtype=np.uint8)\n\n # Split each run-length string\n s = rle.split()\n for i in range(len(s) // 2):\n start = int(s[2 * i]) - 1\n length = int(s[2 * i + 1])\n mask[start:start + length] = 1 # Assign this run to ones\n # Reshape to 2D\n img2 = mask.reshape(shape).T\n return img2", "def process_latlon(self):\n data = self.unixtext.replace(\"\\n\", \" \")\n search = LAT_LON_PREFIX.search(data)\n if search is None:\n return None\n pos = search.start()\n newdata = data[pos+9:]\n # Go find our next non-digit, non-space character, if we find it, we\n # should truncate our string, this could be improved, I suspect\n search = re.search(r\"[^\\s0-9]\", newdata)\n if search is not None:\n pos2 = search.start()\n newdata = newdata[:pos2]\n\n poly = str2polygon(newdata)\n if poly is None:\n return None\n\n # check 0, PGUM polygons are east longitude akrherz/pyIEM#74\n if self.tp.source == 'PGUM':\n newpts = [[0 - pt[0], pt[1]] for pt in poly.exterior.coords]\n poly = Polygon(newpts)\n\n # check 1, is the polygon valid?\n if not poly.is_valid:\n self.tp.warnings.append(\n (\"LAT...LON polygon is invalid!\\n%s\") % (poly.exterior.xy,))\n return\n # check 2, is the exterior ring of the polygon clockwise?\n if poly.exterior.is_ccw:\n self.tp.warnings.append(\n (\"LAT...LON polygon exterior is CCW, reversing\\n%s\"\n ) % (poly.exterior.xy,))\n poly = Polygon(zip(poly.exterior.xy[0][::-1],\n poly.exterior.xy[1][::-1]))\n self.giswkt = 'SRID=4326;%s' % (dumps(MultiPolygon([poly]),\n rounding_precision=6),)\n return poly", "def rle_to_mask(rle_string, height, width):\n rows,cols = height,width\n rle_numbers = [int(num_string) for num_string in rle_string.split(' ')]\n rle_pairs = np.array(rle_numbers).reshape(-1,2)\n img = np.zeros(rows*cols,dtype=np.uint8)\n for index,length in rle_pairs:\n index -= 1\n img[index:index+length] = 255\n img = img.reshape(cols,rows)\n img = img.T\n return img", "def gt_roidb(self):\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print '{} gt roidb loaded from {}'.format(self.name, cache_file)\n return roidb\n\n gt_roidb = [self._load_nimble_annotation(index)\n for index in self.image_index]\n with open(cache_file, 'wb') as fid:\n cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print 'wrote gt roidb to {}'.format(cache_file)\n\n return gt_roidb", "def gt_roidb(self):\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print '{} gt roidb loaded from {}'.format(self.name, cache_file)\n return roidb\n\n raw_annotations = self._load_annotations()\n gt_roidb = self._format_raw_annotations(raw_annotations)\n with open(cache_file, 'wb') as fid:\n cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print 'wrote gt roidb to {}'.format(cache_file)\n\n return gt_roidb", "def gt_roidb(self):\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):# and False:\n with open(cache_file, 'rb') as fid:\n try:\n roidb = pickle.load(fid)\n except:\n roidb = pickle.load(fid, encoding='bytes')\n print('{} gt roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n gt_roidb = [self._load_pascal_annotation(index)\n for index in self.image_index]\n #with open(cache_file, 'wb') as fid:\n # pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)\n #print('wrote gt roidb to {}'.format(cache_file))\n\n return gt_roidb", "def unpack_annotation(path):\n buffer = []\n with open(path, 'r') as file:\n lines = file.read()\n\n lines = lines.splitlines()\n for line in lines:\n if not line.startswith('#') and line:\n buffer.append(line)\n\n # Filename to match annotation with photo\n filename = ''\n for line in buffer:\n if 'Image filename' in line:\n filename = line.replace(' ', '').split(':')[1]\n\n # How many person-like objects in photo\n how_many = 0\n for line in buffer:\n if 'Objects with ground truth' in line:\n how_many = int((line.replace(' ', '').split(':')[1][0]))\n break\n\n person_id = []\n for i in range(how_many):\n person_id.append(f'{i+1} \"PASperson\"')\n\n # Centers of objects\n centers = []\n which_one = 0\n for line in buffer:\n if which_one == how_many:\n break\n if person_id[which_one] + ' (X, Y)' in line:\n buf = line.replace(\" \", \"\").split(':')[1]\n buf = buf.replace('(', \"\").replace(')', '').split(',')\n centers.append((int(buf[0]), int(buf[1])))\n which_one += 1\n\n # Bounding boxes of objects\n boxes = []\n which_one = 0\n for line in buffer:\n if which_one == how_many:\n break\n if person_id[which_one] + ' (Xmin, Ymin)' in line:\n buf = line.replace(\" \", \"\").split(':')[1]\n buf = buf.replace('(', \"\").replace(')', '').split('-')\n buf0 = buf[0].split(',')\n buf1 = buf[1].split(',')\n boxes.append((int(buf0[0]), int(buf0[1]), int(buf1[0]), int(buf1[1])))\n which_one += 1\n\n return filename, how_many, centers, boxes", "def gt_roidb(self):\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print '{} gt roidb loaded from {}'.format(self.name, cache_file)\n return roidb\n\n annotations_data = open(self.annotations_file, 'r')\n #self.ann_json_data = json.load(annotations_data)\n gt_roidb = [self._load_hover_annotation(line)\n for line in annotations_data]\n annotations_data.close()\n# pdb.set_trace()\n with open(cache_file, 'wb') as fid:\n cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print 'wrote gt roidb to {}'.format(cache_file)\n\n return gt_roidb", "def gt_roidb(self):\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print '{} gt roidb loaded from {}'.format(self.name, cache_file)\n return roidb\n\n num_image = len(self.image_index)\n if cfg.MNC_MODE:\n gt_roidb = [self._load_sbd_annotations(index) for index in xrange(num_image)]\n else:\n gt_roidb = [self._load_pascal_annotations(index) for index in xrange(num_image)]\n with open(cache_file, 'wb') as fid:\n cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print 'wrote gt roidb to {}'.format(cache_file)\n return gt_roidb", "def gt_roidb(self):\n cache_file = os.path.join(self.cfg.cache_path, self.cfg.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print('{} gt roidb loaded from {}'.format(self.cfg.name, cache_file))\n return roidb\n\n # self.image_index = self._load_image_set_index()\n gt_roidb = [self._load_pascal_annotation(index)\n for index in self.image_index]\n with open(cache_file, 'wb') as fid:\n cPickle.dump(gt_roidb, fid, protocol=-1)\n print('wrote gt roidb to {}'.format(cache_file))\n\n return gt_roidb", "def parse_annotation_instance(annotation):\n\n text = annotation['utf8_string']\n language = annotation['language']\n legible = int(annotation['legibility'] == 'legible')\n\n mask = np.reshape(np.array(annotation['mask'], np.int32), (-1, 2))\n box = cv2.boxPoints(cv2.minAreaRect(mask))\n quadrilateral = [int(x) for x in box.reshape([-1])]\n\n xmin = min(quadrilateral[0::2])\n xmax = max(quadrilateral[0::2])\n\n ymin = min(quadrilateral[1::2])\n ymax = max(quadrilateral[1::2])\n\n word_annotation = {\n 'bbox': [xmin, ymin, xmax - xmin, ymax - ymin],\n 'segmentation': [quadrilateral],\n 'attributes': {\n 'transcription': text,\n 'legible': legible,\n 'language': language,\n }\n }\n\n return word_annotation", "def transform(self, results: Dict) -> Dict:\n # gt_polygons -> gt_masks\n if 'gt_polygons' in results.keys():\n gt_polygons = results.pop('gt_polygons')\n gt_polygons = [[gt_polygon] for gt_polygon in gt_polygons]\n gt_masks = PolygonMasks(gt_polygons, *results['img_shape'])\n\n if self.poly2mask:\n gt_masks = gt_masks.to_bitmap()\n\n results['gt_masks'] = gt_masks\n # gt_ignore_flags -> gt_ignored\n if 'gt_ignored' in results.keys():\n gt_ignored = results.pop('gt_ignored')\n results['gt_ignore_flags'] = gt_ignored\n\n return results", "def annToMask(ann, height, width):\n rle = annToRLE(ann, height, width)\n m = maskUtils.decode(rle)\n return m", "def decode(tiling_encode):\n # according to the define of TBETilingResult structure,\n # the one member of tiling_encode occupies 32 bits\n # and includes multiple members of TBETilingResult structure\n # tiling_encode[0] includes 32-bit digits, AL1_shape_0 occupies 32-bit\n al1_shape_0 = (tiling_encode[0] & MAX_UINT32)\n # tiling_encode[1] includes 32-bit digits, BL1_shape_0 occupies 32-bit\n bl1_shape_0 = (tiling_encode[1] & MAX_UINT32)\n # tiling_encode[2] includes 32-bit digits,\n # AL1_shape_1 occupies low 16-bit , AL1_shape_2 occupies high 16-bit\n al1_shape_1 = ((tiling_encode[2] & MAX_UINT16))\n al1_shape_2 = ((tiling_encode[2] >> 16) & MAX_UINT16)\n # tiling_encode[3] includes AL1_shape_3 and BL1_shape_1,\n # AL1_shape_3 occupies low 16-bit, BL1_shape_1 occupies high 16-bit\n al1_shape_3 = (tiling_encode[3] & MAX_UINT16)\n bl1_shape_1 = ((tiling_encode[3] >> 16) & MAX_UINT16)\n # tiling_encode[4] includes BL1_shape_2 and BL1_shape_3,\n # BL1_shape_2 occupies low 16-bit, BL1_shape_3 occupies high 16-bit\n bl1_shape_2 = (tiling_encode[4] & MAX_UINT16)\n bl1_shape_3 = ((tiling_encode[4] >> 16) & MAX_UINT16)\n # tiling_encode[5] includes AL0_matrix_0 and AL0_matrix_1,\n # AL0_matrix_0 occupies low 16-bit, AL0_matrix_1 occupies high 16-bit\n al0_matrix_0 = (tiling_encode[5] & MAX_UINT16)\n al0_matrix_1 = ((tiling_encode[5] >> 16) & MAX_UINT16)\n # tiling_encode[6] includes AL0_matrix_2, AL0_matrix_3 and AL0_matrix_4,\n # AL0_matrix_2 occupies low 8-bit, AL0_matrix_3 occupies middle 8-bit,\n # AL0_matrix_4 occupies high 16-bit\n al0_matrix_2 = (tiling_encode[6] & MAX_UINT8)\n al0_matrix_3 = ((tiling_encode[6] >> 8) & MAX_UINT8)\n al0_matrix_4 = ((tiling_encode[6] >> 16) & MAX_UINT16)\n # tiling_encode[7] includes AL0_matrix_5 and BL0_matrix_0,\n # AL0_matrix_5 occupies low 16-bit, BL0_matrix_0 occupies high 16-bit\n al0_matrix_5 = (tiling_encode[7] & MAX_UINT16)\n bl0_matrix_0 = ((tiling_encode[7] >> 16) & MAX_UINT16)\n # tiling_encode[8] includes BL0_matrix_1, BL0_matrix_2 and BL0_matrix_3,\n # BL0_matrix_1 occupies low 16-bit, # BL0_matrix_2 occupies middle 8-bit,\n # BL0_matrix_3 occupies high 8-bit\n bl0_matrix_1 = (tiling_encode[8] & MAX_UINT16)\n bl0_matrix_2 = ((tiling_encode[8] >> 16) & MAX_UINT8)\n bl0_matrix_3 = ((tiling_encode[8] >> 24) & MAX_UINT8)\n # tiling_encode[9] includes BL0_matrix_4 and BL0_matrix_5,\n # BL0_matrix_4 occupies low 16-bit, BL0_matrix_5 occupies high 16-bit\n bl0_matrix_4 = (tiling_encode[9] & MAX_UINT16)\n bl0_matrix_5 = ((tiling_encode[9] >> 16) & MAX_UINT16)\n # tiling_encode[10] includes CL0_matrix_0 and CL0_matrix_1,\n # CL0_matrix_0 occupies low 16-bit, CL0_matrix_1 occupies high 16-bit\n cl0_matrix_0 = (tiling_encode[10] & MAX_UINT16)\n cl0_matrix_1 = ((tiling_encode[10] >> 16) & MAX_UINT16)\n # tiling_encode[11] includes CL0_matrix_2, CL0_matrix_3 and CL0_matrix_4,\n # CL0_matrix_2 occupies low 8-bit, # CL0_matrix_3 occupies middle 8-bit,\n # CL0_matrix_4 occupies high 16-bit\n cl0_matrix_2 = (tiling_encode[11] & MAX_UINT8)\n cl0_matrix_3 = ((tiling_encode[11] >> 8) & MAX_UINT8)\n cl0_matrix_4 = ((tiling_encode[11] >> 16) & MAX_UINT16)\n # tiling_encode[12] includes CL0_matrix_5 and CUB_matrix_0,\n # CL0_matrix_5 occupies low 16-bit, CUB_matrix_0 occupies high 16-bit\n cl0_matrix_5 = (tiling_encode[12] & MAX_UINT16)\n cub_matrix_0 = ((tiling_encode[12] >> 16) & MAX_UINT16)\n # tiling_encode[13] includes CUB_matrix_1, CUB_matrix_2 and CUB_matrix_3,\n # CUB_matrix_1 occupies low 16-bit,\n # CUB_matrix_2 occupies middle 8-bit, CUB_matrix_3 occupies high 8-bit\n cub_matrix_1 = (tiling_encode[13] & MAX_UINT16)\n cub_matrix_2 = ((tiling_encode[13] >> 16) & MAX_UINT8)\n cub_matrix_3 = ((tiling_encode[13] >> 24) & MAX_UINT8)\n # tiling_encode[14] includes CUB_matrix_4 and CUB_matrix_5,\n # CUB_matrix_4 occupies low 16-bit, CUB_matrix_5 occupies high 16-bit\n cub_matrix_4 = (tiling_encode[14] & MAX_UINT16)\n cub_matrix_5 = ((tiling_encode[14] >> 16) & MAX_UINT16)\n # tiling_encode[15] includes AUB_shape_0, AUB_shape_0 occupies 32-bit\n aub_shape_0 = (tiling_encode[15] & MAX_UINT32)\n # tiling_encode[16] includes BUB_shape_0, BUB_shape_0 occupies 32-bit\n bub_shape_0 = (tiling_encode[16] & MAX_UINT32)\n # tiling_encode[17] includes AUB_shape_1 and AUB_shape_2,\n # AUB_shape_1 occupies low 16-bit, AUB_shape_2 occupies high 16-bit\n aub_shape_1 = (tiling_encode[17] & MAX_UINT16)\n aub_shape_2 = ((tiling_encode[17] >> 16) & MAX_UINT16)\n # tiling_encode[18] includes AUB_shape_3 and BUB_shape_1,\n # AUB_shape_3 occupies low 16-bit, BUB_shape_1 occupies high 16-bit\n aub_shape_3 = (tiling_encode[18] & MAX_UINT16)\n bub_shape_1 = ((tiling_encode[18] >> 16) & MAX_UINT16)\n # tiling_encode[19] includes BUB_shape_2 and BUB_shape_3,\n # BUB_shape_2 occupies low 16-bit, BUB_shape_3 occupies high 16-bit\n bub_shape_2 = (tiling_encode[19] & MAX_UINT16)\n bub_shape_3 = ((tiling_encode[19] >> 16) & MAX_UINT16)\n # tiling_encode[20] includes batch_dim and n_dim,\n # batch_dim occupies low 16-bit, n_dim occupies high 16-bit\n batch_dim = (tiling_encode[20] & MAX_UINT16)\n n_dim = ((tiling_encode[20] >> 16) & MAX_UINT16)\n # tiling_encode[21] includes m_dim and group_dim,\n # m_dim occupies low 16-bit, group_dim occupies high 16-bit\n m_dim = (tiling_encode[21] & MAX_UINT16)\n group_dim = ((tiling_encode[21] >> 16) & MAX_UINT16)\n # tiling_encode[22] includes AUB_pbuffer, BUB_pbuffer,\n # AL1_pbuffer, BL1_pbuffer, AL0_pbuffer, BL0_pbuffer,\n # CL0_pbuffer and CUB_pbuffer,\n # AUB_pbuffer occupies low 16-bit, BUB_pbuffer occupies middle 4-bit,\n # AL1_pbuffer occupies next 4-bit, BL1_pbuffer occupies next 4-bit,\n # AL0_pbuffer: 4 bits, BL0_pbuffer: 4 bits,\n # CL0_pbuffer: 4 bits, CUB_pbuffer: 4 bits\n aub_pbuffer = (tiling_encode[22] & MAX_UINT4)\n bub_pbuffer = ((tiling_encode[22] >> 4) & MAX_UINT4)\n al1_pbuffer = ((tiling_encode[22] >> 8) & MAX_UINT4)\n bl1_pbuffer = ((tiling_encode[22] >> 12) & MAX_UINT4)\n al0_pbuffer = ((tiling_encode[22] >> 16) & MAX_UINT4)\n bl0_pbuffer = ((tiling_encode[22] >> 20) & MAX_UINT4)\n cl0_pbuffer = ((tiling_encode[22] >> 24) & MAX_UINT4)\n cub_pbuffer = ((tiling_encode[22] >> 28) & MAX_UINT4)\n # tiling_encode[23] includes UBG_pbuffer, n_bef_batch_flag,\n # n_bef_group_flag, batch_bef_group_flag,\n # A_overhead_opt_flag and B_overhead_opt_flag,\n # UBG_pbuffer occupies low 4-bit, n_bef_batch_flag occupies next 1-bit,\n # n_bef_group_flag: 1 bits, batch_bef_group_flag: 1 bits,\n # A_overhead_opt_flag: 1 bits, B_overhead_opt_flag occupies 1 bit,\n ubg_pbuffer = (tiling_encode[23] & MAX_UINT4)\n n_bef_batch_flag = ((tiling_encode[23] >> 4) & MAX_BOOL)\n n_bef_group_flag = ((tiling_encode[23] >> 5) & MAX_BOOL)\n batch_bef_group_flag = ((tiling_encode[23] >> 6) & MAX_BOOL)\n a_overhead_opt_flag = ((tiling_encode[23] >> 7) & MAX_BOOL)\n b_overhead_opt_flag = ((tiling_encode[23] >> 8) & MAX_BOOL)\n\n # BUB_shape_2 support special value None\n if bub_shape_2 == 0:\n bub_shape_2 = None\n\n # BL1_shape_2 support special value None\n if bl1_shape_2 == 0:\n bl1_shape_2 = None\n\n # BL0_matrix_4 support special value None\n if bl0_matrix_4 == 0:\n bl0_matrix_4 = None\n\n # default set channel_wise_flag\n\n aub_channel_wise_flag = None\n bub_channel_wise_flag = None\n cub_channel_wise_flag = True\n\n # Fill the dictionary of Tiling\n tiling = {\"AUB_shape\": [aub_shape_0, aub_shape_1, aub_shape_2, \\\n aub_shape_3], \\\n \"BUB_shape\": [bub_shape_0, bub_shape_1, bub_shape_2, \\\n bub_shape_3], \\\n \"AL1_shape\": [al1_shape_0, al1_shape_1, al1_shape_2, \\\n al1_shape_3], \\\n \"BL1_shape\": [bl1_shape_0, bl1_shape_1, bl1_shape_2, \\\n bl1_shape_3], \\\n \"AL0_matrix\": [al0_matrix_0, al0_matrix_1, al0_matrix_2, \\\n al0_matrix_3, al0_matrix_4, al0_matrix_5], \\\n \"BL0_matrix\": [bl0_matrix_0, bl0_matrix_1, bl0_matrix_2, \\\n bl0_matrix_3, bl0_matrix_4, bl0_matrix_5], \\\n \"CL0_matrix\": [cl0_matrix_0, cl0_matrix_1, cl0_matrix_2, \\\n cl0_matrix_3, cl0_matrix_4, cl0_matrix_5], \\\n \"CUB_matrix\": [cub_matrix_0, cub_matrix_1, cub_matrix_2, \\\n cub_matrix_3, cub_matrix_4, cub_matrix_5], \\\n \"block_dim\": [batch_dim, n_dim, m_dim, group_dim], \\\n \"n_bef_batch_flag\": n_bef_batch_flag, \\\n \"n_bef_group_flag\": n_bef_group_flag, \\\n \"batch_bef_group_flag\": batch_bef_group_flag, \\\n \"A_overhead_opt_flag\": a_overhead_opt_flag, \\\n \"B_overhead_opt_flag\": b_overhead_opt_flag, \\\n \"AUB_channel_wise_flag\": aub_channel_wise_flag, \\\n \"BUB_channel_wise_flag\": bub_channel_wise_flag, \\\n \"CUB_channel_wise_flag\": cub_channel_wise_flag, \\\n \"manual_pingpong_buffer\": {\"AUB_pbuffer\": aub_pbuffer, \\\n \"BUB_pbuffer\": bub_pbuffer, \\\n \"AL1_pbuffer\": al1_pbuffer, \\\n \"BL1_pbuffer\": bl1_pbuffer, \\\n \"AL0_pbuffer\": al0_pbuffer, \\\n \"BL0_pbuffer\": bl0_pbuffer, \\\n \"CL0_pbuffer\": cl0_pbuffer, \\\n \"CUB_pbuffer\": cub_pbuffer, \\\n \"UBG_pbuffer\": ubg_pbuffer}}\n\n # AUB_shape support special value None\n if aub_shape_0 == 0:\n aub_shape_0 = None\n tiling[\"AUB_shape\"] = aub_shape_0\n\n # BUB_shape support special value None\n if bub_shape_0 == 0:\n bub_shape_0 = None\n tiling[\"BUB_shape\"] = bub_shape_0\n\n # AL1_shape support special value [] and None\n if al1_shape_0 == MAX_UINT32:\n al1_shape_0 = []\n tiling[\"AL1_shape\"] = al1_shape_0\n elif al1_shape_0 == 0:\n al1_shape_0 = None\n tiling[\"AL1_shape\"] = al1_shape_0\n\n # BL1_shape support special value [] and None\n if bl1_shape_0 == 0:\n bl1_shape_0 = None\n tiling[\"BL1_shape\"] = bl1_shape_0\n elif bl1_shape_0 == MAX_UINT32:\n bl1_shape_0 = []\n tiling[\"BL1_shape\"] = bl1_shape_0\n\n # BL0_matrix support special value []\n if bl0_matrix_0 == MAX_UINT16:\n tiling['BL0_matrix'] = []\n\n return tiling", "def mask_to_poly_geojson(pred_arr, channel_scaling=None, reference_im=None,\n output_path=None, output_type='geojson', min_area=40,\n bg_threshold=0, do_transform=None, simplify=False,\n tolerance=0.5, **kwargs):\n\n mask_arr = preds_to_binary(pred_arr, channel_scaling, bg_threshold)\n\n if do_transform and reference_im is None:\n raise ValueError(\n 'Coordinate transformation requires a reference image.')\n\n if do_transform:\n with rasterio.open(reference_im) as ref:\n transform = ref.transform\n crs = ref.crs\n ref.close()\n else:\n transform = Affine(1, 0, 0, 0, 1, 0) # identity transform\n crs = rasterio.crs.CRS()\n\n mask = mask_arr > bg_threshold\n mask = mask.astype('uint8')\n\n polygon_generator = features.shapes(mask_arr,\n transform=transform,\n mask=mask)\n polygons = []\n values = [] # pixel values for the polygon in mask_arr\n for polygon, value in polygon_generator:\n p = shape(polygon).buffer(0.0)\n if p.area >= min_area:\n polygons.append(shape(polygon).buffer(0.0))\n values.append(value)\n\n polygon_gdf = gpd.GeoDataFrame({'geometry': polygons, 'value': values},\n crs=crs.to_wkt())\n if simplify:\n polygon_gdf['geometry'] = polygon_gdf['geometry'].apply(\n lambda x: x.simplify(tolerance=tolerance)\n )\n # save output files\n if output_path is not None:\n if output_type.lower() == 'geojson':\n if len(polygon_gdf) > 0:\n polygon_gdf.to_file(output_path, driver='GeoJSON')\n else:\n save_empty_geojson(output_path, polygon_gdf.crs.to_epsg())\n elif output_type.lower() == 'csv':\n polygon_gdf.to_csv(output_path, index=False)\n\n return polygon_gdf", "def mask_to_poly_geojson(pred_arr, channel_scaling=None, reference_im=None,\n output_path=None, output_type='geojson', min_area=40,\n bg_threshold=0, do_transform=None, simplify=False,\n tolerance=0.5, **kwargs):\n\n mask_arr = preds_to_binary(pred_arr, channel_scaling, bg_threshold)\n\n if do_transform and reference_im is None:\n raise ValueError(\n 'Coordinate transformation requires a reference image.')\n\n if do_transform:\n with rasterio.open(reference_im) as ref:\n transform = ref.transform\n crs = ref.crs\n ref.close()\n else:\n transform = Affine(1, 0, 0, 0, 1, 0) # identity transform\n crs = rasterio.crs.CRS()\n\n mask = mask_arr > bg_threshold\n mask = mask.astype('uint8')\n\n polygon_generator = features.shapes(mask_arr,\n transform=transform,\n mask=mask)\n polygons = []\n values = [] # pixel values for the polygon in mask_arr\n for polygon, value in polygon_generator:\n p = shape(polygon).buffer(0.0)\n if p.area >= min_area:\n polygons.append(shape(polygon).buffer(0.0))\n values.append(value)\n\n polygon_gdf = gpd.GeoDataFrame({'geometry': polygons, 'value': values},\n crs=crs.to_wkt())\n if simplify:\n polygon_gdf['geometry'] = polygon_gdf['geometry'].apply(\n lambda x: x.simplify(tolerance=tolerance)\n )\n # save output files\n if output_path is not None:\n if output_type.lower() == 'geojson':\n if len(polygon_gdf) > 0:\n polygon_gdf.to_file(output_path, driver='GeoJSON')\n else:\n save_empty_geojson(output_path, polygon_gdf.crs.to_epsg())\n elif output_type.lower() == 'csv':\n polygon_gdf.to_csv(output_path, index=False)\n\n return polygon_gdf", "def gt_roidb(self):\n \"\"\"\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print '{} gt roidb loaded from {}'.format(self.name, cache_file)\n return roidb\n \"\"\"\n gt_roidb = [self._load_kitti_annotation(index)\n for index in self.image_index]\n \"\"\"\n with open(cache_file, 'wb') as fid:\n cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print 'wrote gt roidb to {}'.format(cache_file)\n \"\"\"\n\n return gt_roidb", "def shapefileToRaster(in_shapefile, model_raster, out_dir, name_override=None, zone_field:str = None, dtype = None, *args, **kwargs) -> str:\n\t# correct variable names\n\tshapefile_path = in_shapefile\n\t# get out_path\n\tif name_override:\n\t\tout_path = os.path.join(out_dir,name_override)\n\telse:\n\t\tin_base = os.path.splitext(os.path.basename(in_shapefile))[0]\n\t\tmodel_ext = os.path.splitext(model_raster)[1]\n\t\tout_path= os.path.join(out_dir,in_base+\"_RASTER\"+model_ext)\n\t# read file\n\tshp = gpd.read_file(shapefile_path)\n\twith rasterio.open(model_raster,'r') as rst:\n\t\tmeta = rst.meta.copy()\n\n\t# this is where we create a generator of geom, value pairs to use in rasterizing\n\tif zone_field is not None:\n\t\tzone_vals = []\n\t\tfor i in range(len(shp)):\n\t\t\tzone_vals.append(shp.at[i,zone_field])\n\t\tzone_codes = [i for i, val in enumerate(zone_vals)]\n\t\tshapes = ((geom,val) for geom, val in zip(shp.geometry,zone_codes))\n\telse:\n\t\tshapes = ((geom,1) for geom in shp.geometry)\n\n\t# set data type\n\tif dtype:\n\t\tmeta.update(dtype=dtype)\n\telif zone_field:\n\t\tmeta.update(dtype=rasterio.dtypes.get_minimum_dtype(zone_codes))\n\telse:\n\t\tmeta.update(dtype=\"int16\")\n\n\ttry:\n\t\tout = rasterio.open(out_path, 'w+', **meta)\n\t# merra-2 files have a very high nodata value, beyond the range of int32.\n\t# This block catches the resulting ValueError and swaps in the minimum\n\t# allowable data type. Nice of rasterio to have such a function.\n\texcept ValueError:\n\t\tmeta.update(dtype=rasterio.dtypes.get_minimum_dtype([meta['nodata']]))\n\t\tout = rasterio.open(out_path, 'w+', **meta)\n\t\tout_arr = out.read(1)\n\t\tburned = features.rasterize(shapes=shapes, fill=0, out=out_arr, transform=out.transform)\n\t\tout.write_band(1, burned)\n\tout.close()\n\n\treturn out_path", "def ll2en(shape1, /, *, debug = False, prefix = \".\", tol = 1.0e-10):\n\n\n # Import special modules ...\n try:\n import shapely\n import shapely.geometry\n except:\n raise Exception(\"\\\"shapely\\\" is not installed; run \\\"pip install --user Shapely\\\"\") from None\n\n # Import sub-functions ...\n from .ll2enSrc import ll2en_LinearRing\n from .ll2enSrc import ll2en_LineString\n from .ll2enSrc import ll2en_MultiLineString\n from .ll2enSrc import ll2en_MultiPoint\n from .ll2enSrc import ll2en_MultiPolygon\n from .ll2enSrc import ll2en_Point\n from .ll2enSrc import ll2en_Polygon\n\n # Check if it is a Point and return it transformed ...\n if isinstance(shape1, shapely.geometry.point.Point):\n return ll2en_Point(\n shape1,\n debug = debug,\n prefix = prefix,\n )\n\n # Check if it is a MultiPoint and return it transformed ...\n if isinstance(shape1, shapely.geometry.multipoint.MultiPoint):\n return ll2en_MultiPoint(\n shape1,\n debug = debug,\n prefix = prefix,\n )\n\n # Check if it is a LinearRing and return it transformed ...\n if isinstance(shape1, shapely.geometry.polygon.LinearRing):\n return ll2en_LinearRing(\n shape1,\n debug = debug,\n prefix = prefix,\n )\n\n # Check if it is a LineString and return it transformed ...\n if isinstance(shape1, shapely.geometry.linestring.LineString):\n return ll2en_LineString(\n shape1,\n debug = debug,\n prefix = prefix,\n )\n\n # Check if it is a MultiLineString and return it transformed ...\n if isinstance(shape1, shapely.geometry.multilinestring.MultiLineString):\n return ll2en_MultiLineString(\n shape1,\n debug = debug,\n prefix = prefix,\n )\n\n # Check if it is a Polygon and return it transformed ...\n if isinstance(shape1, shapely.geometry.polygon.Polygon):\n return ll2en_Polygon(\n shape1,\n debug = debug,\n prefix = prefix,\n tol = tol,\n )\n\n # Check if it is a MultiPolygon and return it transformed ...\n if isinstance(shape1, shapely.geometry.multipolygon.MultiPolygon):\n return ll2en_MultiPolygon(\n shape1,\n debug = debug,\n prefix = prefix,\n tol = tol,\n )\n\n # Crash ...\n raise TypeError(f\"\\\"shape1\\\" is an unexpected type ({repr(type(shape1))})\") from None", "def convert_alleles(self, alleles):\n raise NotImplementedError", "def decode_geometry(geom: str) -> BasePolygon:\n return shape(geobuf.decode(bytes.fromhex(geom))).buffer(0)", "def annToMask(self, ann, height, width):\n rle = self.annToRLE(ann, height, width)\n m = maskUtils.decode(rle)\n return m", "def annToMask(self, ann, height, width):\n rle = self.annToRLE(ann, height, width)\n m = maskUtils.decode(rle)\n return m", "def annToMask(self, ann, height, width):\n rle = self.annToRLE(ann, height, width)\n m = maskUtils.decode(rle)\n return m", "def annToMask(self, ann, height, width):\n rle = self.annToRLE(ann, height, width)\n m = maskUtils.decode(rle)\n return m", "def transfer_rast_to_vect(poly_cstr, lyrname, out_field, rast_cstr, srs, method, where=None, geom_field=\"geometry\",\n id_field=\"ogc_fid\", buffer_rad=0, restrict_to_tile=True):\n ds = gdal.Open(rast_cstr)\n georef = ds.GetGeoTransform()\n raster_array = ds.ReadAsArray()\n img_shape = (ds.RasterYSize, ds.RasterXSize)\n LOG.info(\"Done reading raster, shape is: %s\", img_shape)\n ctx = {\n 'lyrname': lyrname,\n 'out_field': out_field,\n 'where': where,\n 'geom_field': geom_field,\n 'id_field': id_field,\n \"srs\": srs\n }\n if buffer_rad:\n ctx['geom_field'] = 'st_buffer({}, {})'.format(geom_field, buffer_rad)\n layer_sql = \"\"\"select {geom_field}, {out_field}, {id_field} as the_id from {lyrname}\"\"\".format(**ctx)\n if restrict_to_tile:\n # Weird geoms could be skipped by this, so add as an optione\n layer_sql += \" where st_intersects({geom_field}, st_geomfromtext(WKT_EXT, {srs}))\".format(**ctx)\n\n if where:\n if restrict_to_tile:\n layer_sql += \" and \" + where\n else:\n layer_sql += \" where \" + where\n LOG.info(\"Layersql: %s\", layer_sql)\n extent = get_extent(georef, img_shape)\n LOG.info(\"Extent: %s\", extent)\n vec_ds, lyr = open(poly_cstr, layersql=layer_sql, extent=extent, open_for_update=True)\n mask = just_burn_layer(lyr, georef, img_shape, attr='the_id', dtype=np.int32, all_touched=False)\n LOG.info(\"Done burning - setting attr in %d features\", lyr.GetFeatureCount())\n LOG.debug(\"%s\", np.unique(mask))\n n_ok = 0\n for n, feat in enumerate(lyr):\n if n % 100 == 0:\n LOG.info(\"Done: %d, ok: %d\", n, n_ok)\n daid = feat['the_id']\n ctx['the_id'] = daid\n area = feat.GetGeometryRef().GetArea()\n I, J = np.where(mask == daid)\n # At least 30% covered if already set - todo: provide this as argument\n if I.size > 0 and (feat[out_field] is None or I.size * (georef[1] ** 2) > area * 0.3):\n is_ok, val = method(raster_array, I, J)\n if is_ok:\n n_ok += 1\n ctx['_value_'] = val\n updatesql = \"update {lyrname} set {out_field}={_value_} where {id_field}={the_id}\".format(**ctx)\n LOG.debug(\"Executing: %s\", updatesql)\n vec_ds.ExecuteSQL(updatesql)\n else:\n LOG.debug(\"Nothing found for %s - mask size: %s, valid: %s, area: %s\",\n daid, I.size, feat.GetGeometryRef().IsValid(), area)", "def _transform_polygons(self, polygons):\n if self.rotation is not None:\n ct = numpy.cos(self.rotation * numpy.pi / 180.0)\n st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone\n if self.magnification is not None:\n mag = numpy.array((self.magnification, self.magnification), dtype=float)\n if self.origin is not None:\n orgn = numpy.array(self.origin)\n if self.x_reflection:\n xrefl = numpy.array((1, -1))\n if isinstance(polygons, dict):\n out_polygons = {}\n for kk in polygons.keys():\n out_polygons[kk] = []\n for ii in range(self.columns):\n for jj in range(self.rows):\n spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])\n for points in polygons[kk]:\n if self.magnification:\n out_polygons[kk].append(points * mag + spc)\n else:\n out_polygons[kk].append(points + spc)\n if self.x_reflection:\n out_polygons[kk][-1] = out_polygons[kk][-1] * xrefl\n if self.rotation is not None:\n out_polygons[kk][-1] = (\n out_polygons[kk][-1] * ct\n + out_polygons[kk][-1][:, ::-1] * st\n )\n if self.origin is not None:\n out_polygons[kk][-1] = out_polygons[kk][-1] + orgn\n else:\n out_polygons = []\n for ii in range(self.columns):\n for jj in range(self.rows):\n spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])\n for points in polygons:\n if self.magnification is not None:\n out_polygons.append(points * mag + spc)\n else:\n out_polygons.append(points + spc)\n if self.x_reflection:\n out_polygons[-1] = out_polygons[-1] * xrefl\n if self.rotation is not None:\n out_polygons[-1] = (\n out_polygons[-1] * ct + out_polygons[-1][:, ::-1] * st\n )\n if self.origin is not None:\n out_polygons[-1] = out_polygons[-1] + orgn\n return out_polygons", "def elan_annotation_to_binary(annotation_data):\n label_dict = {}\n for annotation in annotation_data:\n label = 1 if annotation[2] == 'Engaged' else 0\n label_dict[\"{0},{1}\".format(annotation[0], annotation[1])] = label\n return label_dict", "def _get_polygon(element):\n polygon = element.find('%s/%s/%s/%s/%s' %\n (NRML04_AREA_GEOMETRY, gml.GML_POLYGON,\n gml.GML_EXTERIOR, gml.GML_LINEAR_RING,\n gml.GML_POS_LIST)).text\n\n polygon = gml._get_polygon_from_2DLinestring(polygon)\n\n return polygon", "def _preprocess_polygon(polygon):\n\n # Could catch ValueErrors for unsuitable inputs\n polygon = numpy.array(polygon)\n\n if len(polygon.shape) == 1:\n if len(polygon) % 2:\n raise ValueError('Number of values for polygon not divisible by two.'\n 'Coordinates need an x and y coordinate: '.format(polygon))\n polygon = polygon.reshape((-1, 2))\n\n if not len(polygon.shape) == 2 or polygon.shape[1] != 2:\n raise ValueError('polygon of wrong dimensions. It should be of shape. '\n 'Should be: (num_points, 2). Input: {}'.format(polygon))\n\n polygon = Polygon(numpy.array(polygon))\n\n # Mainly for self-intersection\n if not polygon.is_valid:\n raise ValueError('polygon is invalid, likely self-intersecting: {}'.\n format(polygon))\n\n return polygon", "def decode(encoded):\n #six degrees of precision in valhalla\n inv = 1.0 / 1e6;\n \n decoded = []\n previous = [0,0]\n i = 0\n #for each byte\n while i < len(encoded):\n #for each coord (lat, lon)\n ll = [0,0]\n for j in [0, 1]:\n shift = 0\n byte = 0x20\n #keep decoding bytes until you have this coord\n while byte >= 0x20:\n byte = ord(encoded[i]) - 63\n i += 1\n ll[j] |= (byte & 0x1f) << shift\n shift += 5\n #get the final value adding the previous offset and remember it for the next\n ll[j] = previous[j] + (~(ll[j] >> 1) if ll[j] & 1 else (ll[j] >> 1))\n previous[j] = ll[j]\n #scale by the precision and chop off long coords also flip the positions so\n #its the far more standard lon,lat instead of lat,lon\n decoded.append([float('%.6f' % (ll[1] * inv)), float('%.6f' % (ll[0] * inv))])\n #hand back the list of coordinates\n return decoded", "def annToMask(self, ann, height, width):\n rle = self.annToRLE(ann, height, width)\n m = maskUtils.decode(rle)\n\n return m", "def array_to_raster(inTiff,array,outFile,dataType=gdal.GDT_Float32):\n \n inDataset = gdal.Open(inTiff, GA_ReadOnly)\n\n # You need to get those values like you did.\n x_pixels = inDataset.RasterXSize # number of pixels in x\n y_pixels = inDataset.RasterYSize # number of pixels in y\n PIXEL_SIZE = inDataset.GetGeoTransform()[1] # size of the pixel... \n x_min = inDataset.GetGeoTransform()[0] \n y_max = inDataset.GetGeoTransform()[3] # x_min & y_max are like the \"top left\" corner.\n wkt_projection = inDataset.GetProjectionRef()\n\n driver = gdal.GetDriverByName('GTiff')\n\n outDataset = driver.Create(\n outFile,\n x_pixels,\n y_pixels,\n 1,\n dataType, )\n\n outDataset.SetGeoTransform((\n x_min, # 0\n PIXEL_SIZE, # 1\n 0, # 2\n y_max, # 3\n 0, # 4\n -PIXEL_SIZE))\n\n outDataset.SetProjection(wkt_projection)\n outDataset.GetRasterBand(1).WriteArray(array)\n outDataset.FlushCache() # Write to disk.\n return outDataset, outDataset.GetRasterBand(1) #If you need to return, remenber to return also the dataset because the band don`t live without dataset.", "def array_to_poly(array):\n array = np.asarray(array)\n size = np.shape(array)\n if size[1] != 2:\n raise ValueError('Array is not the proper size.')\n return\n geom_array = np.append(array, [array[0]], axis = 0).tolist()\n geom = {\"type\": \"Polygon\", \"coordinates\": [geom_array]}\n poly = ogr.CreateGeometryFromJson(json.dumps(geom))\n return poly", "def from_gca_polygon(gca_obj, name_header, folder_name, folder_description='',\n altitude_mode=\"ctg\", style_to_use=None, poly_hidden=False,\n poly_follow_terrain=True, poly_extrude_to_ground=False, folder_collapsed=True):\n\n name_col = gca_obj.headers.index(name_header)\n\n polygons = list()\n\n for feature in gca_obj.features:\n name = feature[0][name_col]\n coords = feature[1]\n attrs = feature[0]\n headers = gca_obj.headers\n\n poly = kml_base.polygon(coords, name, headers, attrs, altitude_mode, style_to_use, poly_hidden, poly_follow_terrain, poly_extrude_to_ground)\n polygons.append(poly)\n\n poly_folder = kml_base.folder(folder_name, polygons, folder_description, folder_collapsed)\n\n return poly_folder", "def convertor(geometry, method=\"wgs2gcj\"):\n if geometry['type'] == 'Point':\n coords = geometry['coordinates']\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'LineString' or geometry['type'] == 'MutliPoint':\n coordinates = geometry['coordinates']\n for coords in coordinates:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'Polygon' or geometry['type'] == 'MultiLineString':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for coords in rings:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'MultiPolygon':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for lines in rings:\n for coords in lines:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n return geometry", "def localize_pixel(img_pos,camera : Camera,lidar : Lidar, scan : LaserScan) -> tuple:\n\n # ---OBJ--\n # x r1 /\\ r2 x\n # / \\\n #cam_ray / \\ average_ray\n # / \\\n # / \\\n # CAM ----> LID\n # \n\n # has to be 2d\n assert (img_pos.size == 2)\n\n cam_ray = camera.get_ray_through_image(img_pos)\n\n cam_ray_robot = camera.get_ray_in_robot_frame(cam_ray)\n\n cam_ray_lidar = lidar.get_ray_in_lidar_frame(cam_ray_robot)\n\n # flatten camera ray\n cam_ray_lidar_flat = lidar.get_ray_projection(cam_ray_lidar)\n\n # figure out which lidar rays correspond to the camera ray\n (ray1,ray2) = lidar.get_corresponding_lidar_rays(cam_ray_lidar_flat,scan)\n\n # if no rays found corresponding to scan data\n if ray1 is None or ray2 is None:\n return (None,None)\n\n # get the normal to the lidar hit\n intersection_normal = lidar.get_normal_to_plane(ray1,ray2)\n\n # get the distance data in horizontal plane, from lidar to object\n lidar_to_target_length = lidar.get_camera_ray_length(cam_ray_lidar_flat,ray1,ray2)\n\n # get the vector from camera to lidar (flattened to lidar plane)\n # i.e. origin of lidar frame in camera frame\n lidar_to_cam_vec = cam_ray_lidar_flat.origin\n cam_to_lidar_flat = Ray(lidar_to_cam_vec,-lidar_to_cam_vec,np.linalg.norm(lidar_to_cam_vec))\n \n # now workout the lidar to object ray, i.e. interpolate between ray1's and ray2's tips\n lidar_to_object_flat = interpolated_ray(ray1,ray2,0.5,lidar_to_target_length)\n\n # now finally workout the vector from camera to object (flattened)\n # this lets us access the true z-distance in the camera\n cam_to_object_flat = lidar_to_object_flat.get_vec() + cam_to_lidar_flat.get_vec()\n \n cam_to_object_flat_length = np.linalg.norm(cam_to_object_flat)\n\n # angle from horizontal on camera ray\n cam_ray_theta = angle_between(cam_ray_lidar.get_vec(),cam_to_object_flat)\n\n # length of original camera ray (knowing the length of its projection)\n # will fail if ray is pointing straight up or down\n cam_ray_robot.length = cam_to_object_flat_length / math.cos(cam_ray_theta)\n\n\n object_robot = cam_ray_robot.get_vec()+cam_ray_robot.origin\n\n return (object_robot,intersection_normal)", "def _add_roidb_from_annotations(self, entry):\n ann_ids = self._COCO.getAnnIds(imgIds=entry['id'], iscrowd=None)\n objs = self._COCO.loadAnns(ann_ids)\n width = entry['width']\n height = entry['height']\n # valid objs\n # change the annotation boxes from 'xywh' to 'xyxy'\n valid_objs = []\n for obj in objs:\n x1 = np.max((0, obj['bbox'][0]))\n y1 = np.max((0, obj['bbox'][1]))\n x2 = np.min((width, x1 + np.max((0, obj['bbox'][2]))))\n y2 = np.min((height, y1 + np.max((0, obj['bbox'][3]))))\n if obj['area'] > 0 and x2 >= x1 and y2 >= y1:\n obj['clean_box'] = [x1, y1, x2, y2]\n valid_objs.append(obj)\n objs = valid_objs\n num_objs = len(objs)\n\n bboxes = np.zeros((num_objs, 4), dtype=entry['bboxes'].dtype)\n gt_classes = np.zeros((num_objs), dtype=entry['gt_classes'].dtype)\n\n coco_cat_id_to_class_ind = dict(\n [(self._class_to_coco_cat_id[cls], self._class_to_ind[cls]) for cls in self._classes[1:]])\n for ix, obj in enumerate(objs):\n bboxes[ix, :] = obj['clean_box']\n gt_classes[ix] = coco_cat_id_to_class_ind[obj['category_id']]\n entry['bboxes'] = np.append(entry['bboxes'], bboxes, axis=0)\n entry['gt_classes'] = np.append(entry['gt_classes'], gt_classes)", "def _rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n height, width = rle[\"size\"]\n mask = np.empty(height * width, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity = not parity\n mask = mask.reshape(width, height)\n return mask.transpose() # Reshape to original shape", "def rltn2poly(osm_container, relation):\n cltn = []\n for m in relation.members:\n if m.type == Way:\n way = osm_container.get_osm_way_by_id(m.member_id)\n ln = way2line(osm_container, way)\n cltn.append(ln)\n merged_line = linemerge(cltn)\n return shpgeo.Polygon(merged_line)", "def array_to_raster(array, x, y):\n\n # Files info\n dst_filename = 'atiff.tiff'\n \n # Load matlab file\n front_dict = loadmat(infile,squeeze_me=True, struct_as_record=False)\n #print front_dict\n \n # You need to get those values like you did.\n x_pixels = len(x) # number of pixels in x\n y_pixels = len(y) # number of pixels in y\n PIXEL_SIZE = 1000 # size of the pixel...(in m?) \n x_min = np.min(x)\n y_max = np.min(y) # x_min & y_max are like the \"top left\" corner.\n wkt_projection = 'a projection in wkt that you got from other file'\n\n driver = gdal.GetDriverByName('GTiff')\n\n dataset = driver.Create(\n dst_filename,\n x_pixels,\n y_pixels,\n 1,\n gdal.GDT_Float32, )\n\n dataset.SetGeoTransform((\n x_min, # 0\n PIXEL_SIZE, # 1\n 0, # 2\n y_max, # 3\n 0, # 4\n -PIXEL_SIZE)) \n\n dataset.SetProjection(wkt_projection)\n dataset.GetRasterBand(1).WriteArray(array)\n dataset.FlushCache() # Write to disk.\n return dataset, dataset.GetRasterBand(1) #If you need to ret", "def _transform_polygons(self, polygons):\n if self.rotation is not None:\n ct = numpy.cos(self.rotation * numpy.pi / 180.0)\n st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone\n if self.x_reflection:\n xrefl = numpy.array((1, -1))\n if self.magnification is not None:\n mag = numpy.array((self.magnification, self.magnification), dtype=float)\n if self.origin is not None:\n orgn = numpy.array(self.origin)\n if isinstance(polygons, dict):\n for kk in polygons.keys():\n for ii in range(len(polygons[kk])):\n if self.x_reflection:\n polygons[kk][ii] = polygons[kk][ii] * xrefl\n if self.magnification is not None:\n polygons[kk][ii] = polygons[kk][ii] * mag\n if self.rotation is not None:\n polygons[kk][ii] = (\n polygons[kk][ii] * ct + polygons[kk][ii][:, ::-1] * st\n )\n if self.origin is not None:\n polygons[kk][ii] = polygons[kk][ii] + orgn\n else:\n for ii in range(len(polygons)):\n if self.x_reflection:\n polygons[ii] = polygons[ii] * xrefl\n if self.magnification is not None:\n polygons[ii] = polygons[ii] * mag\n if self.rotation is not None:\n polygons[ii] = polygons[ii] * ct + polygons[ii][:, ::-1] * st\n if self.origin is not None:\n polygons[ii] = polygons[ii] + orgn\n return polygons", "def annToMask(self, ann):\n rle = self.annToRLE(ann)\n m = maskUtils.decode(rle)\n return m", "def preprocess_image(image: Image) -> np.ndarray:\n return np.array(image.convert('L'))", "def createROAnnotationBody(self, rouri, anngr):\n # Create annotation body\n (status, reason, bodyproxyuri, bodyuri) = self.aggregateResourceInt(rouri,\n ctype=\"application/rdf+xml\",\n body=anngr.serialize(format=\"xml\"))\n if status != 201:\n raise self.error(\"Error creating annotation body resource\",\n \"%03d %s (%s)\"%(status, reason, str(rouri)))\n return (status, reason, bodyuri)", "def prob_to_rles(x, cutoff=0.5):\n lab_img = label(x > cutoff)\n for i in range(1, lab_img.max() + 1):\n yield rle_encoding(lab_img == i)", "def convert_lane_boundaries_to_polygon(right_lane_bounds: np.ndarray, left_lane_bounds: np.ndarray) -> np.ndarray:\n assert right_lane_bounds.shape[0] == left_lane_bounds.shape[0]\n polygon = np.vstack([right_lane_bounds, left_lane_bounds[::-1]])\n polygon = np.vstack([polygon, right_lane_bounds[0]])\n return polygon", "def _deflate(self, tile: bytes) -> np.ndarray:\n decoded = self._reshape(\n np.frombuffer(imagecodecs.zlib_decode(tile), self.dtype)\n )\n self._unpredict(decoded)\n return np.rollaxis(decoded, 2, 0)", "def prepare_roidb(self):\n # for pascal_voc dataset\n roidb = self.gt_roidb()\n # data argument\n if self.cfg.if_flipped is True:\n print('append flipped images to training')\n roidb = self.append_flipped_images(roidb)\n\n sizes = [PIL.Image.open(self.image_path_at(i)).size\n for i in range(self.num_images)]\n\n for i in range(len(self.image_index)):\n roidb[i]['image'] = self.image_path_at(i)\n roidb[i]['width'] = sizes[i][0]\n roidb[i]['height'] = sizes[i][1]\n # need gt_overlaps as a dense array for argmax\n gt_overlaps = roidb[i]['gt_overlaps'].toarray()\n # max overlap with gt over classes (columns)\n max_overlaps = gt_overlaps.max(axis=1)\n # gt class that had the max overlap\n max_classes = gt_overlaps.argmax(axis=1)\n roidb[i]['max_classes'] = max_classes\n roidb[i]['max_overlaps'] = max_overlaps\n # sanity checks\n # max overlap of 0 => class should be zero (background)\n zero_inds = np.where(max_overlaps == 0)[0]\n assert all(max_classes[zero_inds] == 0)\n # max overlap > 0 => class should not be zero (must be a fg class)\n nonzero_inds = np.where(max_overlaps > 0)[0]\n assert all(max_classes[nonzero_inds] != 0)\n\n self.roi_data = ROIGenerator(roidb, self.num_classes, self.cfg)\n return self.roi_data", "def decode_line(encoded):\n\n encoded_len = len(encoded)\n index = 0\n array = []\n lat = 0\n lng = 0\n\n while index < encoded_len:\n b = 0\n shift = 0\n result = 0\n\n while True:\n b = ord(encoded[index]) - 63\n index = index + 1\n result |= (b & 0x1f) << shift\n shift += 5\n if b < 0x20:\n break\n\n dlat = ~(result >> 1) if result & 1 else result >> 1\n lat += dlat\n\n shift = 0\n result = 0\n\n while True:\n b = ord(encoded[index]) - 63\n index = index + 1\n result |= (b & 0x1f) << shift\n shift += 5\n if b < 0x20:\n break\n\n dlng = ~(result >> 1) if result & 1 else result >> 1\n lng += dlng\n\n array.append((lat * 1e-5, lng * 1e-5))\n\n return array", "def single_to_rgb(R_file,G_file,B_file): \n R=gdal_array.LoadFile(R_file)\n G=gdal_array.LoadFile(G_file)\n B=gdal_array.LoadFile(B_file)\n \n \n basename=os.path.basename(R_file)\n basename=basename[:3]+basename[4:]\n basename=basename[:-4]+\"_rgb_.tif\" \n \n\n file_path=os.path.dirname(os.path.abspath(R_file))+\"/\"+basename\n\n \n driver=osgeo.gdal.GetDriverByName(\"GTiff\")\n options = ['PHOTOMETRIC=RGB', 'PROFILE=GeoTIFF']\n print(file_path)\n print(np.max(np.array([R.shape[1],B.shape[1],G.shape[1]])), np.max(np.array([R.shape[0],B.shape[0],G.shape[0]])))\n Xlen=np.max(np.array([R.shape[1],B.shape[1],G.shape[1]]))\n Ylen= np.max(np.array([R.shape[0],B.shape[0],G.shape[0]]))\n dataset=driver.Create(file_path, int(Xlen),int(Ylen), 3, osgeo.gdal.GDT_UInt16, options) \n \n dataset.GetRasterBand(1).WriteArray(R)\n dataset.GetRasterBand(2).WriteArray(G)\n dataset.GetRasterBand(3).WriteArray(B)\n \n return file_path", "def getROAnnotation(self, annuri):\n (status, reason, headers, uri, anngr) = self.getROResourceRDF(annuri)\n return (status, reason, uri, anngr)", "def _svg_to_polygons(cdata):\n polygons = []\n groups = parse(cdata['image'])\n\n #iterating this dict in a strange way, need to refactor maybe\n for g in groups:\n for path in groups[g]:\n #this list comprehension gets the region coordinates\n points = ([(p[0] * cdata['width_ratio'] + cdata['start_pos'], p[1]\n * cdata['height_ratio']) for p in path[1]])\n\n polygons.append({_convert_state_to_region(g):points})\n\n return polygons", "def parse_annotation(annotation):\n\n root = xml_parser.parse(annotation).getroot()\n\n boxes = list()\n classnames = list()\n\n for obj in root.findall('object'):\n # xmin, ymin, xmax, ymax\n boxes.append([int(coord.text) for coord in obj.find('bndbox')])\n classnames.append(obj.find('name').text)\n\n return BoundBoxArray.from_boundboxes(boxes, classnames)", "def rle_encode(mask: np.ndarray):\n pixels = mask.T.flatten()\n # We need to allow for cases where there is a '1' at either end of the sequence.\n # We do this by padding with a zero at each end when needed.\n use_padding = False\n if pixels[0] or pixels[-1]:\n use_padding = True\n pixel_padded = np.zeros([len(pixels) + 2], dtype=pixels.dtype)\n pixel_padded[1:-1] = pixels\n pixels = pixel_padded\n rle = np.where(pixels[1:] != pixels[:-1])[0] + 2\n if use_padding:\n rle = rle - 1\n rle[1::2] = rle[1::2] - rle[:-1:2]\n return rle", "def _ul_lr(self):\n ulx, xres, xskew, uly, yskew, yres = self.geotransform\n # Index from the end - GDal usually orders bands-first:\n lrx = ulx + (self.array.shape[-2] * xres)\n lry = uly + (self.array.shape[-1] * yres)\n return ulx, uly, lrx, lry", "def extract_roi(reg_with_roi, ir_with_roi, reg_unmarked, ir_unmarked):\n roi_pos = np.where( reg_with_roi[:,:,2] == 255 ) \n \n x = list(roi_pos[0])\n y = list(roi_pos[1])\n \n #make a 2-d mask\n \n mask = np.zeros_like(reg_with_roi[:,:,1])\n mask[x,y] = 255\n \n _, cntrs = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[:2]\n\n reg_roi_list = []\n ir_roi_list = []\n \n #masks = []\n for cnt in cntrs:\n \n if reg_unmarked.ndim == 3:\n reg_unmarked = cv2.cvtColor(reg_unmarked, cv2.COLOR_BGR2GRAY)\n \n if ir_unmarked.ndim == 3:\n ir_unmarked = cv2.cvtColor(ir_unmarked, cv2.COLOR_BGR2GRAY)\n \n temp_mask = np.zeros_like(reg_unmarked)\n cv2.fillPoly(temp_mask, [cnt], (255,255,255))\n #masks.append(temp_mask)\n \n reg_roi = cv2.bitwise_and(temp_mask, reg_unmarked)\n ir_roi = cv2.bitwise_and(temp_mask, ir_unmarked)\n \n x, y, w, h = cv2.boundingRect(cnt)\n reg_roi = reg_roi[y:y+h, x:x+w]\n ir_roi = ir_roi[y:y+h, x:x+w]\n \n reg_roi_list.append(reg_roi)\n ir_roi_list.append(ir_roi)\n \n return reg_roi_list, ir_roi_list, cntrs", "def decode(cls, flattened):\n if len(flattened) < 8:\n return None\n t = binary_cast(flattened[:8], 'BBBBBBBB', 'd')[0]\n img = cls.decompress(flattened[8:])\n return t, img", "def convert_allele_to_ag(allele):\n\tallele_dict = {}\n\tallele = allele.rstrip(\"p P g G\")\n\tif allele in allele_to_ag_dict:\t\n\t\tag = allele_to_ag_dict[allele][0]\n\t\trule = allele_to_ag_dict[allele][1]\n\t\tbw4_6 = allele_to_ag_dict[allele][2]\n\t\n\telse:\n\t\tag = \"NA\"\n\tallele_dict[allele] = [ag, bw4_6]\n\treturn allele_dict", "def load_aal_atlas(atlas_dir, aal_basename=\"ROI_MNI_V4\", verbose=0):\n \n if not osp.isdir(atlas_dir):\n raise ValueError(\"%s not a directory\" % atlas_dir)\n\n aal_img_name = glob.glob(osp.join(atlas_dir, aal_basename+\"*.nii\"))[0]\n aal_labels_name = glob.glob(osp.join(atlas_dir, aal_basename+\"*.txt\"))[0]\n aalimg = nib.load(aal_img_name)\n data = aalimg.get_data()\n\n labels = []\n with open(aal_labels_name) as f:\n for line in f.read().splitlines():\n labels.append(line.split(\"\\t\"))\n \n # labels is now a list of [\"short name\", \"long name\", \"ROI_value\"]\n # [['FAG', 'Precentral_L', '2001'], ['FAD', 'Precentral_R', '2002'], ...]\n n_roi = len(labels)\n split_data = np.ndarray(aalimg.shape + (n_roi,), dtype=bool)\n split_data.fill(False)\n \n only_name_labels = []\n roi_size = []\n for idx,lab in enumerate(labels):\n only_name_labels.append(lab[1])\n split_data[...,idx] = data==int(lab[2])\n roi_size.append(split_data[...,idx].sum())\n \n return (split_data, aalimg.get_affine(), only_name_labels, roi_size)", "def get_zone_pixels(feat, input_zone_polygon, input_value_raster, band, coords=[]): #, raster_band\n \n \n \n # Open data\n raster = gdal.Open(input_value_raster)\n shp = ogr.Open(input_zone_polygon)\n lyr = shp.GetLayer()\n \n # Get raster georeference info\n transform = raster.GetGeoTransform()\n xOrigin = transform[0]\n yOrigin = transform[3]\n pixelWidth = transform[1]\n pixelHeight = transform[5]\n \n sizeX = raster.RasterXSize\n sizeY = raster.RasterYSize\n lrx = xOrigin + (sizeX * pixelWidth)\n lry = yOrigin + (sizeY * pixelHeight)\n \n \n \n # Reproject vector geometry to same projection as raster\n #sourceSR = lyr.GetSpatialRef()\n #targetSR = osr.SpatialReference()\n #targetSR.ImportFromWkt(raster.GetProjectionRef())\n #coordTrans = osr.CoordinateTransformation(sourceSR,targetSR)\n #feat = lyr.GetNextFeature()\n #geom = feat.GetGeometryRef()\n #geom.Transform(coordTrans)\n \n # Get extent of feat\n geom = feat.GetGeometryRef()\n if (geom.GetGeometryName() == 'MULTIPOLYGON'):\n count = 0\n pointsX = []; pointsY = []\n for polygon in geom:\n geomInner = geom.GetGeometryRef(count)\n ring = geomInner.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n count += 1\n elif (geom.GetGeometryName() == 'POLYGON'):\n ring = geom.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n pointsX = []; pointsY = []\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n\n else:\n sys.exit(\"ERROR: Geometry needs to be either Polygon or Multipolygon\")\n\n #xmin = min(pointsX) \n #xmax = max(pointsX)\n #ymin = min(pointsY)\n #ymax = max(pointsY)\n \n \n if len(coords) == 0: \n xmin = xOrigin if (min(pointsX) < xOrigin) else min(pointsX)\n xmax = lrx if (max(pointsX) > lrx) else max(pointsX)\n ymin = lry if (min(pointsY) < lry) else min(pointsY)\n ymax = yOrigin if (max(pointsY) > yOrigin) else max(pointsY)\n else:\n xmin = coords[0] if (min(pointsX) < coords[0]) else min(pointsX)\n xmax = coords[1] if (max(pointsX) > coords[1]) else max(pointsX)\n ymin = coords[2] if (min(pointsY) < coords[2]) else min(pointsY)\n ymax = coords[3] if (max(pointsY) > coords[3]) else max(pointsY)\n \n # Specify offset and rows and columns to read\n xoff = int((xmin - xOrigin)/pixelWidth)\n yoff = int((yOrigin - ymax)/pixelWidth)\n xcount = int((xmax - xmin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the right side\n ycount = int((ymax - ymin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the bottom side\n \n #print(xoff, yoff, xcount, ycount)\n \n # Create memory target raster\n target_ds = gdal.GetDriverByName('MEM').Create('', xcount, ycount, 1, gdal.GDT_Byte)\n target_ds.SetGeoTransform((\n xmin, pixelWidth, 0,\n ymax, 0, pixelHeight,\n ))\n\n # Create for target raster the same projection as for the value raster\n raster_srs = osr.SpatialReference()\n raster_srs.ImportFromWkt(raster.GetProjectionRef())\n target_ds.SetProjection(raster_srs.ExportToWkt())\n\n # Rasterize zone polygon to raster\n gdal.RasterizeLayer(target_ds, [1], lyr, burn_values=[1])\n\n # Read raster as arrays\n dataBandRaster = raster.GetRasterBand(band)\n data = dataBandRaster.ReadAsArray(xoff, yoff, xcount, ycount).astype(np.float)\n bandmask = target_ds.GetRasterBand(1)\n datamask = bandmask.ReadAsArray(0, 0, xcount, ycount).astype(np.float)\n\n # data zone of raster\n dataZone = np.ma.masked_array(data, np.logical_not(datamask))\n\n raster_srs = None\n raster = None\n shp = None\n lyr = None\n return [dataZone, [xmin,xmax,ymin,ymax]]", "def reconstructFromLaplacianPyramid(pyramid):\n \n nLevels = len(pyramid)\n out = pyramid[-1]\n if len(pyramid) == 1:\n return out\n\n useStack = False\n if pyramid[0].shape[0:2] == pyramid[-1].shape[0:2]:\n useStack = True\n\n dtp = out.dtype\n for i in range(nLevels-2,-1,-1):\n newSz = pyramid[i].shape[0:2]\n if useStack:\n up = out\n else:\n up = cv2.pyrUp(out,dstsize=(newSz[1],newSz[0]))\n if len(up.shape) < 3:\n up.shape += (1,)\n out = up + pyramid[i]\n out = out.astype(dtp)\n\n return out", "def _convert_annotations(self, ast):\n self.annotations = IDLAnnotations(ast)", "def ijk2ras(self,A):\n #productive #math #coordinate-space-conversion\n profprint()\n m=vtk.vtkMatrix4x4()\n volumeNode = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic().GetBackgroundLayer().GetVolumeNode()\n volumeNode.GetIJKToRASMatrix(m)\n imageData = volumeNode.GetImageData()\n ras=[0,0,0]\n k = vtk.vtkMatrix4x4()\n o = vtk.vtkMatrix4x4()\n k.SetElement(0,3,A[0])\n k.SetElement(1,3,A[1])\n k.SetElement(2,3,A[2])\n k.Multiply4x4(m,k,o)\n ras[0] = o.GetElement(0,3)\n ras[1] = o.GetElement(1,3)\n ras[2] = o.GetElement(2,3)\n return ras", "def __init__(self, raster_path):\n self.raster_path = raster_path\n dataset = gdal.Open(raster_path)\n self.width = dataset.RasterXSize\n self.height = dataset.RasterYSize\n # Gets the gdal geo transformation tuples\n # gdal_version = gdal.__version__\n self._txf = dataset.GetGeoTransform()\n # self._inv_txf = gdal.InvGeoTransform(self._txf)[1]\n self._inv_txf = gdal.InvGeoTransform(self._txf)\n # Gets the transformation from lat/lon to coordinates\n wgs84_ref = osr.SpatialReference()\n wgs84_ref.ImportFromEPSG(4326) # WGS84\n sref = osr.SpatialReference()\n sref.ImportFromWkt(dataset.GetProjection())\n if int(osgeo.__version__[0]) >= 3:\n # Output order has changed in osgeo v3\n wgs84_ref.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)\n sref.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)\n\n self._transform = osr.CoordinateTransformation(wgs84_ref, sref)\n inv_transform = osr.CoordinateTransformation(sref, wgs84_ref)\n # Find a loose lat/lon bounding box for quick check without\n # having to do full coordinates transformation\n corners = []\n for x in [0, self.width]:\n for y in [0, self.height]:\n corners.append([self._txf[0] + self._txf[1] * x + self._txf[2] * y,\n self._txf[3] + self._txf[4] * x + self._txf[5] * y])\n self.max_lat = -100\n self.min_lat = 100\n self.max_lon = -500\n self.min_lon = 500\n for c in corners:\n p = inv_transform.TransformPoint(c[0], c[1])\n if p[0] > self.max_lon:\n self.max_lon = p[0]\n if p[0] < self.min_lon:\n self.min_lon = p[0]\n if p[1] > self.max_lat:\n self.max_lat = p[1]\n if p[1] < self.min_lat:\n self.min_lat = p[1]\n dataset = None", "def polygonize(inRas, outPoly, outField=None, mask=True, band=1, filetype=\"ESRI Shapefile\"):\n\n options = []\n src_ds = gdal.Open(inRas)\n if src_ds is None:\n print('Unable to open %s' % inRas)\n sys.exit(1)\n\n try:\n srcband = src_ds.GetRasterBand(band)\n except RuntimeError as e:\n # for example, try GetRasterBand(10)\n print('Band ( %i ) not found')\n print(e)\n sys.exit(1)\n if mask == True:\n maskband = src_ds.GetRasterBand(band)\n options.append('-mask')\n else:\n mask = False\n maskband = None\n\n srs = osr.SpatialReference()\n srs.ImportFromWkt( src_ds.GetProjectionRef() )\n\n #\n # create output datasource\n #\n dst_layername = outPoly\n drv = ogr.GetDriverByName(filetype)\n dst_ds = drv.CreateDataSource(dst_layername)\n dst_layer = dst_ds.CreateLayer(dst_layername, srs=srs)\n\n if outField is None:\n dst_fieldname = 'DN'\n fd = ogr.FieldDefn(dst_fieldname, ogr.OFTInteger)\n dst_layer.CreateField(fd)\n dst_field = dst_layer.GetLayerDefn().GetFieldIndex(dst_fieldname)\n\n else:\n dst_field = dst_layer.GetLayerDefn().GetFieldIndex(outField)\n\n gdal.Polygonize(srcband, maskband, dst_layer, dst_field,\n callback=gdal.TermProgress)\n dst_ds.FlushCache()\n\n srcband = None\n src_ds = None\n dst_ds = None", "def _rle_decode_frame(\n data: bytes,\n rows: int,\n columns: int,\n nr_samples: int,\n nr_bits: int,\n segment_order: str = \">\",\n) -> bytearray:\n if nr_bits % 8:\n raise NotImplementedError(\n \"Unable to decode RLE encoded pixel data with a (0028,0100) \"\n f\"'Bits Allocated' value of {nr_bits}\"\n )\n\n # Parse the RLE Header\n offsets = _parse_rle_header(data[:64])\n nr_segments = len(offsets)\n\n # Check that the actual number of segments is as expected\n bytes_per_sample = nr_bits // 8\n if nr_segments != nr_samples * bytes_per_sample:\n raise ValueError(\n \"The number of RLE segments in the pixel data doesn't match the \"\n f\"expected amount ({nr_segments} vs. \"\n f\"{nr_samples * bytes_per_sample} segments)\"\n )\n\n # Ensure the last segment gets decoded\n offsets.append(len(data))\n\n # Preallocate with null bytes\n decoded = bytearray(rows * columns * nr_samples * bytes_per_sample)\n\n # Example:\n # RLE encoded data is ordered like this (for 16-bit, 3 sample):\n # Segment: 0 | 1 | 2 | 3 | 4 | 5\n # R MSB | R LSB | G MSB | G LSB | B MSB | B LSB\n # A segment contains only the MSB or LSB parts of all the sample pixels\n\n # To minimise the amount of array manipulation later, and to make things\n # faster we interleave each segment in a manner consistent with a planar\n # configuration of 1 (and use little endian byte ordering):\n # All red samples | All green samples | All blue\n # Pxl 1 Pxl 2 ... Pxl N | Pxl 1 Pxl 2 ... Pxl N | ...\n # LSB MSB LSB MSB ... LSB MSB | LSB MSB LSB MSB ... LSB MSB | ...\n\n # `stride` is the total number of bytes of each sample plane\n stride = bytes_per_sample * rows * columns\n for sample_number in range(nr_samples):\n le_gen = range(bytes_per_sample)\n byte_offsets = le_gen if segment_order == \"<\" else reversed(le_gen)\n for byte_offset in byte_offsets:\n # Decode the segment\n ii = sample_number * bytes_per_sample + byte_offset\n # ii is 1, 0, 3, 2, 5, 4 for the example above\n # This is where the segment order correction occurs\n segment = _rle_decode_segment(data[offsets[ii] : offsets[ii + 1]])\n\n # Check that the number of decoded bytes is correct\n actual_length = len(segment)\n if actual_length < rows * columns:\n raise ValueError(\n \"The amount of decoded RLE segment data doesn't match the \"\n f\"expected amount ({actual_length} vs. \"\n f\"{rows * columns} bytes)\"\n )\n elif actual_length != rows * columns:\n warnings.warn(\n \"The decoded RLE segment contains non-conformant padding \"\n f\"- {actual_length} vs. {rows * columns} bytes expected\"\n )\n\n if segment_order == \">\":\n byte_offset = bytes_per_sample - byte_offset - 1\n\n # For 100 pixel/plane, 32-bit, 3 sample data, `start` will be\n # 0, 1, 2, 3, 400, 401, 402, 403, 800, 801, 802, 803\n start = byte_offset + (sample_number * stride)\n decoded[start : start + stride : bytes_per_sample] = segment[\n : rows * columns\n ]\n\n return decoded", "def to_ir(self):", "def vec_rotate_g2r(al, be, ga, lon, lat, ugeo, vgeo, flag):\n\n # first get another coordinate\n if flag == 1:\n (rlon, rlat) = scalar_g2r(al, be, ga, lon, lat)\n else:\n rlon = lon\n rlat = lat\n (lon, lat) = scalar_r2g(al, be, ga, rlon, rlat)\n\n # then proceed...\n rad = mt.pi / 180\n al = al * rad\n be = be * rad\n ga = ga * rad\n\n rotate_matrix = np.zeros(shape=(3, 3))\n rotate_matrix[0, 0] = np.cos(ga) * np.cos(al) - np.sin(ga) * np.cos(be) * np.sin(al)\n rotate_matrix[0, 1] = np.cos(ga) * np.sin(al) + np.sin(ga) * np.cos(be) * np.cos(al)\n rotate_matrix[0, 2] = np.sin(ga) * np.sin(be)\n rotate_matrix[1, 0] = -np.sin(ga) * np.cos(al) - np.cos(ga) * np.cos(be) * np.sin(\n al\n )\n rotate_matrix[1, 1] = -np.sin(ga) * np.sin(al) + np.cos(ga) * np.cos(be) * np.cos(\n al\n )\n rotate_matrix[1, 2] = np.cos(ga) * np.sin(be)\n rotate_matrix[2, 0] = np.sin(be) * np.sin(al)\n rotate_matrix[2, 1] = -np.sin(be) * np.cos(al)\n rotate_matrix[2, 2] = np.cos(be)\n\n #rotate_matrix = np.linalg.pinv(rotate_matrix) \n \n rlat = rlat * rad\n rlon = rlon * rad\n lat = lat * rad\n lon = lon * rad\n \n # vector in Cartesian \n txg = -vgeo * np.sin(lat) * np.cos(lon) - ugeo * np.sin(lon)\n tyg = -vgeo * np.sin(lat) * np.sin(lon) + ugeo * np.cos(lon)\n tzg = vgeo * np.cos(lat)\n\n # vector in rotated Cartesian\n txr = (\n rotate_matrix[0, 0] * txg\n + rotate_matrix[0, 1] * tyg\n + rotate_matrix[0, 2] * tzg\n )\n tyr = (\n rotate_matrix[1, 0] * txg\n + rotate_matrix[1, 1] * tyg\n + rotate_matrix[1, 2] * tzg\n )\n tzr = (\n rotate_matrix[2, 0] * txg\n + rotate_matrix[2, 1] * tyg\n + rotate_matrix[2, 2] * tzg\n )\n\n # vector in rotated coordinate\n v = (\n -np.sin(rlat) * np.cos(rlon) * txr\n - np.sin(rlat) * np.sin(rlon) * tyr\n + np.cos(rlat) * tzr\n )\n u = -np.sin(rlon) * txr + np.cos(rlon) * tyr\n\n u = np.array(u)\n v = np.array(v)\n\n return (u, v)", "def convert_bboxes_to_albumentations(shape, bboxes, source_format):\n return [convert_bbox_to_albumentations(shape, bbox, source_format) for bbox in bboxes]", "def test_array2wkt(self):\n\n # Arrays first\n A = numpy.arange(10)\n A = A.reshape(5, 2)\n\n wkt = array2wkt(A, geom_type='POLYGON')\n assert wkt.startswith('POLYGON((')\n fields = wkt[9:-2].split(',')\n for i, field in enumerate(fields):\n x, y = field.split()\n assert numpy.allclose(A[i, :], [float(x), float(y)])\n\n # Then list\n wkt = array2wkt(A.tolist(), geom_type='POLYGON')\n assert wkt.startswith('POLYGON((')\n fields = wkt[9:-2].split(',')\n for i, field in enumerate(fields):\n x, y = field.split()\n assert numpy.allclose(A[i, :], [float(x), float(y)])\n\n # Then a linestring example (note one less bracket)\n wkt = array2wkt(A, geom_type='LINESTRING')\n assert wkt.startswith('LINESTRING(')\n fields = wkt[11:-1].split(',')\n for i, field in enumerate(fields):\n x, y = field.split()\n assert numpy.allclose(A[i, :], [float(x), float(y)])", "def json2polygon(geojson_str):\n geojson_object = geojson.loads(geojson_str)\n return geometry.shape(geojson_object)", "def to_vrs_allele_ranges(\n self, ac: str, coordinate: str, alt_type: AltType, errors: List,\n ival: models.SequenceInterval) -> Optional[Dict]:\n if coordinate == \"c\":\n # TODO: Once we add support for ranges on c. coord\n return None\n if alt_type in {AltType.UNCERTAIN_DELETION, AltType.UNCERTAIN_DUPLICATION,\n AltType.DELETION_RANGE, AltType.DUPLICATION_RANGE}:\n sstate = models.LiteralSequenceExpression(\n sequence=\"\", type=\"LiteralSequenceExpression\"\n )\n else:\n errors.append(\"No state\")\n return None\n\n return self.vrs_allele(ac, ival, sstate, alt_type, errors)", "def read_geojson_polygon(geojson_polygon: str) -> List:\n geojson_polygon_dict = json.loads(geojson_polygon)\n polygon_coordinates = geojson_polygon_dict['features'][0]['geometry']['coordinates'][0]\n polygon = []\n for item in polygon_coordinates:\n polygon += [[item[1], item[0]]]\n return polygon", "def _parse_annotation(annotation_object: ET.Element) -> dt.Annotation:\n class_name = _find_text_value(annotation_object, \"name\")\n\n bndbox = _find_element(annotation_object, \"bndbox\")\n xmin = int(float(_find_text_value(bndbox, \"xmin\")))\n xmax = int(float(_find_text_value(bndbox, \"xmax\")))\n ymin = int(float(_find_text_value(bndbox, \"ymin\")))\n ymax = int(float(_find_text_value(bndbox, \"ymax\")))\n\n return dt.make_bounding_box(class_name, xmin, ymin, xmax - xmin, ymax - ymin)", "def _untangle_roi(da, sep):\n # start by extrating sources / targets names\n sources, targets = [], []\n for k in da['roi'].data:\n sources += [k.split(sep)[0]]\n targets += [k.split(sep)[1]]\n\n # merge sources and targets to force square matrix\n roi_tot = sources + targets\n _, u_idx = np.unique(roi_tot, return_index=True)\n roi_tot = np.array(roi_tot)[np.sort(u_idx)]\n\n return sources, targets, roi_tot", "def lidar_prep(lasbin, lidardir, spatial_shp, naip_folder, ndvi_thresh=0.4, aoi_shp=''):\n print('Unzipping LAZ files...')\n lidar_footptint(lasbin, lidardir, spatial_shp)\n print('Done')\n\n print('Generating inputs for LiDAR processing...')\n foot = lidardir + '\\\\las_footprint.shp'\n define_ground_polygon(foot, lidardir, naip_folder, ndvi_thresh, aoi_shp)\n print('Done')", "def get_map(img, vertices, labels, annotations, embeddings, scale, length, embedding_size):\n\n score_map = np.zeros((int(img.height * scale), int(img.width * scale), 1), np.float32)\n geo_map = np.zeros((int(img.height * scale), int(img.width * scale), 5), np.float32)\n ignored_map = np.zeros((int(img.height * scale), int(img.width * scale), 1), np.float32)\n embedding_map = np.zeros((int(img.height * scale), int(img.width * scale), embedding_size), np.float32)\n\n index = np.arange(0, length, int(1 / scale))\n index_x, index_y = np.meshgrid(index, index)\n ignored_polys = []\n polys = []\n\n for i, vertice in enumerate(vertices):\n if labels[i] == 0:\n ignored_polys.append(np.around(scale * vertice.reshape((4, 2))).astype(np.int32))\n continue\n if np.any(np.around(scale * vertice.reshape((4, 2))).astype(np.int32) <= 0):\n continue\n if np.any(np.around(scale * vertice.reshape((4, 2))).astype(np.int32) >= int(scale * img.height)):\n continue\n\n poly = np.around(scale * shrink_poly(vertice, coef=0.2).reshape((4, 2))).astype(np.int32) # scaled & shrink\n polys.append(poly)\n temp_mask = np.zeros(score_map.shape[:-1], np.float32)\n cv2.fillPoly(temp_mask, [poly], 1)\n\n theta = find_min_rect_angle(vertice)\n rotate_mat = get_rotate_mat(theta)\n\n rotated_vertices = rotate_vertices(vertice, theta)\n x_min, x_max, y_min, y_max = get_boundary(rotated_vertices)\n rotated_x, rotated_y = rotate_all_pixels(rotate_mat, vertice[0], vertice[1], length)\n\n d1 = rotated_y - y_min\n d1[d1 < 0] = 0\n d2 = y_max - rotated_y\n d2[d2 < 0] = 0\n d3 = rotated_x - x_min\n d3[d3 < 0] = 0\n d4 = x_max - rotated_x\n d4[d4 < 0] = 0\n geo_map[:, :, 0] += d1[index_y, index_x] * temp_mask\n geo_map[:, :, 1] += d2[index_y, index_x] * temp_mask\n geo_map[:, :, 2] += d3[index_y, index_x] * temp_mask\n geo_map[:, :, 3] += d4[index_y, index_x] * temp_mask\n geo_map[:, :, 4] += theta * temp_mask\n\n min_x = int(min(poly[0][0], poly[1][0], poly[2][0], poly[3][0]))\n max_x = int(max(poly[0][0], poly[1][0], poly[2][0], poly[3][0]))\n min_y = int(min(poly[0][1], poly[1][1], poly[2][1], poly[3][1]))\n max_y = int(max(poly[0][1], poly[1][1], poly[2][1], poly[3][1]))\n embedding_map[min_y:max_y, min_x:max_x] = embeddings[annotations[i]]\n\n cv2.fillPoly(ignored_map, ignored_polys, 1)\n cv2.fillPoly(score_map, polys, 1)\n\n return torch.Tensor(score_map).permute(2, 0, 1), torch.Tensor(geo_map).permute(2, 0, 1), \\\n torch.Tensor(ignored_map).permute(2, 0, 1), torch.Tensor(embedding_map).permute(2, 0, 1)", "def _mri_voxels_to_mri_scanner_ras(mri_landmarks, img_mgh):\n # Get landmarks in voxel space, using the T1 data\n vox2ras = img_mgh.header.get_vox2ras()\n ras_landmarks = apply_trans(vox2ras, mri_landmarks) # in scanner RAS\n return ras_landmarks", "def polygon_to_lonlat(polygon):\n poly_coords = polygon.split('((')[1].split('))')[0].split(',')\n coords = [(float(lon), float(lat)) for lon, lat in\n [co.split(' ') for co in poly_coords]]\n lon, lat = zip(*coords)\n return (lon, lat)" ]
[ "0.73268646", "0.730984", "0.72336936", "0.6892055", "0.6486158", "0.60719514", "0.58431333", "0.56835", "0.56191874", "0.5562281", "0.5433442", "0.54270655", "0.53969246", "0.53223056", "0.52950984", "0.5280965", "0.5236743", "0.52261084", "0.52257967", "0.5216231", "0.51639766", "0.51334745", "0.5103151", "0.5074191", "0.5058256", "0.5053184", "0.5038166", "0.5037942", "0.50364465", "0.50185734", "0.5013919", "0.5006016", "0.49760458", "0.49760458", "0.4938428", "0.49357876", "0.49350166", "0.49029922", "0.48912737", "0.48806402", "0.48806402", "0.48806402", "0.48806402", "0.4859424", "0.4845383", "0.48287162", "0.48131558", "0.47958705", "0.47941086", "0.47833368", "0.47767356", "0.47758672", "0.4775508", "0.47695413", "0.47438577", "0.47412974", "0.47234783", "0.4722051", "0.47153524", "0.46893057", "0.46822575", "0.46751732", "0.46683833", "0.46604237", "0.46551478", "0.4643548", "0.46341145", "0.4608628", "0.46077898", "0.45999697", "0.4588736", "0.45883065", "0.4585266", "0.4563414", "0.4563195", "0.45560542", "0.4553327", "0.45525157", "0.45507297", "0.45390713", "0.45276764", "0.45258942", "0.45203173", "0.4517557", "0.45103508", "0.45034823", "0.4502326", "0.44988927", "0.4497434", "0.44954297", "0.44899803", "0.44886622", "0.44886047", "0.44858506", "0.44801325", "0.44716504", "0.44686013", "0.44675967" ]
0.7293852
3
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
def annToMask(self, ann, height, width): rle = self.annToRLE(ann, height, width) m = maskUtils.decode(rle) return m
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def annToMask(self, ann):\n rle = self.annToRLE(ann)\n m = maskUtils.decode(rle)\n return m", "def annToMask(ann, height, width):\n rle = annToRLE(ann, height, width)\n m = maskUtils.decode(rle)\n return m", "def annToMask(self, ann, height, width):\n rle = self.annToRLE(ann, height, width)\n m = maskUtils.decode(rle)\n\n return m", "def get_obj_mask(seg_ann_data, height, width):\r\n if isinstance(seg_ann_data, list):\r\n # polygon -- a single object might consist of multiple parts\r\n # we merge all parts into one mask rle code\r\n rles = maskUtils.frPyObjects(seg_ann_data, height, width)\r\n rle = maskUtils.merge(rles)\r\n elif isinstance(seg_ann_data['counts'], list):\r\n # uncompressed RLE\r\n rle = maskUtils.frPyObjects(seg_ann_data, height, width)\r\n else:\r\n rle = seg_ann_data\r\n\r\n m = maskUtils.decode(rle)\r\n\r\n return m", "def annToRLE(self, ann):\n t = self.imgs[ann['image_id']]\n h, w = t['height'], t['width']\n segm = ann['segmentation']\n if type(segm) == list:\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, h, w)\n rle = maskUtils.merge(rles)\n elif type(segm['counts']) == list:\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, h, w)\n else:\n # rle\n rle = ann['segmentation']\n return rle", "def convertAnnotationtoBinary(row, field):\n\n if str(row[field]).__contains__('NON'):\n return 0\n else:\n return 1", "def convert_rle_to_mask(self, rle, shape):\n\n # Initialize a zero canvas (one-dimensional here)\n mask = np.zeros(shape[0] * shape[1], dtype=np.uint8)\n\n # Split each run-length string\n s = rle.split()\n for i in range(len(s) // 2):\n start = int(s[2 * i]) - 1\n length = int(s[2 * i + 1])\n mask[start:start + length] = 1 # Assign this run to ones\n # Reshape to 2D\n img2 = mask.reshape(shape).T\n return img2", "def annToRLE(ann, height, width):\n segm = ann['segmentation']\n if isinstance(segm, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, height, width)\n rle = maskUtils.merge(rles)\n elif isinstance(segm['counts'], list):\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, height, width)\n else:\n # rle\n rle = ann['segmentation']\n return rle", "def elan_annotation_to_binary(annotation_data):\n label_dict = {}\n for annotation in annotation_data:\n label = 1 if annotation[2] == 'Engaged' else 0\n label_dict[\"{0},{1}\".format(annotation[0], annotation[1])] = label\n return label_dict", "def annToRLE(self, ann, height, width):\n segm = ann['segmentation']\n if isinstance(segm, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, height, width)\n rle = maskUtils.merge(rles)\n elif isinstance(segm['counts'], list):\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, height, width)\n else:\n # rle\n rle = ann['segmentation']\n return rle", "def annToRLE(self, ann, height, width):\n segm = ann['segmentation']\n if isinstance(segm, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, height, width)\n rle = maskUtils.merge(rles)\n elif isinstance(segm['counts'], list):\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, height, width)\n else:\n # rle\n rle = ann['segmentation']\n return rle", "def annToRLE(self, ann, height, width):\n segm = ann['segmentation']\n if isinstance(segm, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, height, width)\n rle = maskUtils.merge(rles)\n elif isinstance(segm['counts'], list):\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, height, width)\n else:\n # rle\n rle = ann['segmentation']\n return rle", "def json2mask(txt, mattr, filepath):\n img = np.zeros((2048, 2448, 3),\n dtype=np.uint8)\n info = json.loads(txt)['codes']\n for code in info:\n barcode_area = (slice(code['y0'], code['y1']),\n slice(code['x0'], code['x1']), slice(0, 3))\n leny = barcode_area[0].stop - barcode_area[0].start\n lenx = barcode_area[1].stop - barcode_area[1].start\n img[barcode_area] = 1\n if leny * lenx > (2048 * 2448) / 16: # if barcodearea larger than a\n # 16th of the original image\n return None\n return img", "def binary_mask_to_polygon(binary_mask, tolerance=0):\n polygons = []\n # pad mask to close contours of shapes which start and end at an edge\n padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0)\n contours = measure.find_contours(padded_binary_mask, 0.5)\n contours = np.subtract(contours, 1)\n for contour in contours:\n contour = close_contour(contour)\n contour = measure.approximate_polygon(contour, tolerance)\n if len(contour) < 3:\n continue\n contour = np.flip(contour, axis=1)\n segmentation = contour\n # after padding and subtracting 1 we may get -0.5 points in our segmentation\n segmentation = [np.clip(i,0.0,i).tolist() for i in segmentation]\n polygons.append(segmentation)\n\n return polygons", "def _parse_ann_info(self, ann_info, with_mask=True):\n gt_bboxes = []\n gt_labels = []\n gt_bboxes_ignore = []\n # Two formats are provided.\n # 1. mask: a binary map of the same size of the image.\n # 2. polys: each mask consists of one or several polys, each poly is a\n # list of float.\n if with_mask:\n gt_masks = []\n gt_mask_polys = []\n gt_poly_lens = []\n for i, ann in enumerate(ann_info):\n if ann.get('ignore', False):\n continue\n x1, y1, w, h = ann['bbox']\n #if ann['area'] <= 0 or w < 1 or h < 1:\n # continue\n if w < 1 or h < 1:\n continue\n bbox = [x1, y1, x1 + w - 1, y1 + h - 1]\n if ann['iscrowd']:\n gt_bboxes_ignore.append(bbox)\n else:\n gt_bboxes.append(bbox)\n gt_labels.append(self.cat2label[ann['category_id']])\n if with_mask:\n #create fake segmentation\n if \"segmentation\" not in ann:\n bbox = ann['bbox']\n ann['segmentation'] = [[bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1],\n bbox[0], bbox[1] + bbox[3], bbox[0] + bbox[2], bbox[1] + bbox[3]]]\n gt_masks.append(self.coco.annToMask(ann))\n # cv2.imshow('', gt_masks[-1]*255)\n # cv2.waitKey(0)\n # print(gt_masks[-1].shape)\n mask_polys = [\n p for p in ann['segmentation'] if len(p) >= 6\n ] # valid polygons have >= 3 points (6 coordinates)\n poly_lens = [len(p) for p in mask_polys]\n gt_mask_polys.append(mask_polys)\n gt_poly_lens.extend(poly_lens)\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n ann = dict(\n bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore)\n\n if with_mask:\n ann['masks'] = gt_masks\n # poly format is not used in the current implementation\n ann['mask_polys'] = gt_mask_polys\n ann['poly_lens'] = gt_poly_lens\n return ann", "def annToRLE(self, ann, height, width):\n segm = ann['segmentaion']\n # convert segm from [[x1, y1], [x2, y2]...] to [[x1, y1, x2, y2, ...]] \n segm = [np.ravel(segm)]\n if isinstance(segm, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, height, width)\n rle = maskUtils.merge(rles)\n elif isinstance(segm['counts'], list):\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, height, width)\n else:\n # rle\n rle = ann['segmentaion']\n return rle", "def preprocess_mask(y):\n y[y <= 255./2] = 0 # Needs to be in this order, otherwise 1 gets overwritten\n y[y > 255./2] = 1\n binary_mask = y.astype(np.uint8)\n\n return binary_mask", "def mask(self):\n mask = np.zeros((self.height, self.width))\n pts = [\n np.array(anno).reshape(-1, 2).round().astype(int)\n for anno in self.segmentation\n ]\n mask = cv2.fillPoly(mask, pts, 1)\n return mask", "def get_mask(self, anno, img_info) -> np.ndarray:\n m = np.zeros((img_info[\"height\"], img_info[\"width\"]), dtype=np.float32)\n\n for obj in anno:\n if obj[\"iscrowd\"]:\n rle = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n m += mask\n elif obj[\"num_keypoints\"] == 0:\n rles = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n for rle in rles:\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n\n m += mask\n\n return (m < 0.5).astype(np.float32)", "def binary_mask_to_polygon(binary_mask, tolerance=0):\n polygons = []\n # pad mask to close contours of shapes which start and end at an edge\n padded_binary_mask = np.pad(\n binary_mask, pad_width=1, mode='constant', constant_values=0)\n contours = measure.find_contours(padded_binary_mask, 0.5)\n contours = np.subtract(contours, 1)\n for contour in contours:\n contour = close_contour(contour)\n contour = measure.approximate_polygon(contour, tolerance)\n if len(contour) < 3:\n continue\n contour = np.flip(contour, axis=1)\n segmentation = contour.ravel().tolist()\n # after padding and subtracting 1 we may\n # get -0.5 points in our segmentation\n segmentation = [0 if i < 0 else i for i in segmentation]\n polygons.append(segmentation)\n return polygons", "def annsToMask(anns, h, w):\n masks = []\n anns = sorted(anns, key=lambda x: x['area']) # Smaller items first, so they are not covered by overlapping segs\n for ann in anns:\n rle = annToRLE(ann, h, w)\n m = maskUtils.decode(rle)\n masks.append(m)\n return masks, anns", "def transform(self, results: Dict) -> Dict:\n # gt_polygons -> gt_masks\n if 'gt_polygons' in results.keys():\n gt_polygons = results.pop('gt_polygons')\n gt_polygons = [[gt_polygon] for gt_polygon in gt_polygons]\n gt_masks = PolygonMasks(gt_polygons, *results['img_shape'])\n\n if self.poly2mask:\n gt_masks = gt_masks.to_bitmap()\n\n results['gt_masks'] = gt_masks\n # gt_ignore_flags -> gt_ignored\n if 'gt_ignored' in results.keys():\n gt_ignored = results.pop('gt_ignored')\n results['gt_ignore_flags'] = gt_ignored\n\n return results", "def base64_2_mask(s):\n z = zlib.decompress(base64.b64decode(s))\n n = np.frombuffer(z, np.uint8)\n mask = cv.imdecode(n, cv.IMREAD_UNCHANGED)[:, :, 3].astype(bool)\n\n return mask", "def annToRLE(ann, h, w):\n segm = ann['segmentation']\n if type(segm) == list:\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, h, w)\n rle = maskUtils.merge(rles)\n elif type(segm['counts']) == list:\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, h, w)\n else:\n # rle\n rle = ann['segmentation']\n return rle", "def test_make_binary_and_fp(self):\n output_mask = boundary_mask(df=os.path.join(data_dir, 'sample.csv'),\n geom_col=\"PolygonWKT_Pix\")\n truth_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_b_mask_inner.tif'))\n\n assert np.array_equal(output_mask, truth_mask)", "def binary_mask_to_polygon(binary_mask, tolerance=0):\r\n\r\n polygons = []\r\n if isinstance(binary_mask, torch.Tensor):\r\n binary_mask = binary_mask.cpu().numpy()\r\n # pad mask to close contours of shapes which start and end at an edge\r\n padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0)\r\n contours = measure.find_contours(padded_binary_mask, 0.5)\r\n contours = np.subtract(contours, 1)\r\n for contour in contours:\r\n contour = close_contour(contour)\r\n contour = measure.approximate_polygon(contour, tolerance)\r\n if len(contour) < 3:\r\n continue\r\n contour = np.flip(contour, axis=1) # x, y\r\n polygon = np.maximum(contour, 0)\r\n #segmentation = contour.ravel().tolist()\r\n # after padding and subtracting 1 we may get -0.5 points in our segmentation\r\n #segmentation = [0 if i < 0 else i for i in segmentation]\r\n polygons.append(polygon)\r\n\r\n return polygons", "def apply_mask(binary, mask_dict):\n result = \"\"\n for i, val in enumerate(binary):\n if mask_dict[i] in ('X', '1'):\n result += mask_dict[i]\n else:\n result += binary[i]\n return result", "def invisibleToRLE(ann,coco):\n t = coco.imgs[ann['image_id']]\n h, w = t['height'], t['width']\n segm = ann.get(\"invisible_mask\", None) \n if type(segm) == list:\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, h, w)\n rle = maskUtils.merge(rles)\n elif type(segm['counts']) == list:\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, h, w)\n else:\n # rle\n rle = ann['invisible_mask']\n return rle", "def encode_binary_mask(mask: np.ndarray) -> t.Text:\n\n # check input mask --\n if mask.dtype != np.bool:\n raise ValueError(\n \"encode_binary_mask expects a binary mask, received dtype == %s\" %\n mask.dtype)\n\n mask = np.squeeze(mask)\n if len(mask.shape) != 2:\n raise ValueError(\n \"encode_binary_mask expects a 2d mask, received shape == %s\" %\n mask.shape)\n\n # convert input mask to expected COCO API input --\n mask_to_encode = mask.reshape(mask.shape[0], mask.shape[1], 1)\n mask_to_encode = mask_to_encode.astype(np.uint8)\n mask_to_encode = np.asfortranarray(mask_to_encode)\n\n # RLE encode mask --\n encoded_mask = coco_mask.encode(mask_to_encode)[0][\"counts\"]\n\n # compress and base64 encoding --\n binary_str = zlib.compress(encoded_mask, zlib.Z_BEST_COMPRESSION)\n base64_str = base64.b64encode(binary_str)\n\n return base64_str", "def cocoseg_to_binary(seg, height, width):\n if type(seg) == list:\n rle = cocomask.frPyObjects(seg, height, width)\n rle = cocomask.merge(rle)\n mask = cocomask.decode([rle])\n elif type(seg['counts']) == list:\n rle = cocomask.frPyObjects(seg, height, width)\n mask = cocomask.decode([rle])\n else:\n rle = cocomask.merge(seg)\n mask = cocomask.decode([rle])\n assert mask.shape[2] == 1\n return mask[:, :, 0]", "def _rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n height, width = rle[\"size\"]\n mask = np.empty(height * width, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity = not parity\n mask = mask.reshape(width, height)\n return mask.transpose() # Reshape to original shape", "def rle_to_mask(rle_string, height, width):\n rows,cols = height,width\n rle_numbers = [int(num_string) for num_string in rle_string.split(' ')]\n rle_pairs = np.array(rle_numbers).reshape(-1,2)\n img = np.zeros(rows*cols,dtype=np.uint8)\n for index,length in rle_pairs:\n index -= 1\n img[index:index+length] = 255\n img = img.reshape(cols,rows)\n img = img.T\n return img", "def mask_and_fit(mask, binary_warped, flag):\n img = cv2.bitwise_and(binary_warped, binary_warped, mask=mask)\n x, y = extract_pixels(img)\n fit, foundFlag, confidence_index = check_and_fit(x, y, flag)\n return fit, foundFlag, confidence_index", "def training_mask_generation(img_pan_filename, input_geojson_filename, labels):\r\n with rasterio.open(img_pan_filename) as f:\r\n metadata_pan = f.profile\r\n img_pan = f.read(1)\r\n \r\n mask = np.zeros((img_pan.shape[0], img_pan.shape[1]))\r\n \r\n xres = metadata_pan['transform'][0]\r\n ulx = metadata_pan['transform'][2]\r\n yres = metadata_pan['transform'][4]\r\n uly = metadata_pan['transform'][5]\r\n \r\n lrx = ulx + (metadata_pan['width'] * xres) \r\n lry = uly - (metadata_pan['height'] * abs(yres))\r\n\r\n polygons = json.load(open(input_geojson_filename))\r\n \r\n for polygon in range(len(polygons['features'])):\r\n layer_num = labels.index(str(polygons['features'][polygon]['properties']['Label']))\r\n coords = np.array(polygons['features'][polygon]['geometry']['coordinates'][0][0]) \r\n xf = ((metadata_pan['width']) ** 2 / (metadata_pan['width'] + 1)) / (lrx - ulx)\r\n yf = ((metadata_pan['height']) ** 2 / (metadata_pan['height'] + 1)) / (lry - uly)\r\n coords[:, 1] = yf * (coords[:, 1] - uly)\r\n coords[:, 0] = xf * (coords[:, 0] - ulx) \r\n position = np.round(coords).astype(np.int32)\r\n cv2.fillConvexPoly(mask, position, layer_num)\r\n \r\n return np.expand_dims(mask, axis = 2)", "def seg_to_mask(seg, width=1.0, height=1.0):\n if type(seg) == list:\n rles = mask_utils.frPyObjects(seg, height, width)\n rle = mask_utils.merge(rles)\n elif type(seg['counts']) == list:\n rle = mask_utils.frPyObjects(seg, height, width)\n else:\n rle = seg\n return mask_utils.decode(rle)", "def get_binary_mask(op_weights):\n return op_weights[\"mask\"]", "def get_binary_mask(self,index):\n mask = self.load_mask_png(index)\n (rows,cols) = np.where(mask>0)[0:2] #pixels in mask disregarding the color\n new_mask = np.zeros(shape=mask.shape[0:2], dtype=np.uint8)\n new_mask[(rows,cols)] = 255\n return new_mask", "def image_binary(image_convert):\n image_bit=cv2.bitwise_not(image_convert)\n _, image_bina = cv2.threshold(image_bit, 125, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n image_bina=image_bina/255.0\n return image_bina", "def visibleToRLE(ann,coco):\n t = coco.imgs[ann['image_id']]\n h, w = t['height'], t['width']\n segm = ann['visible_mask']\n if type(segm) == list:\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, h, w)\n rle = maskUtils.merge(rles)\n elif type(segm['counts']) == list:\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, h, w)\n else:\n # rle\n rle = ann['visible_mask']\n return rle", "def cfmask_to_mask(raster):\r\n mask = raster.ReadAsArray()\r\n # A value of 0 is clear of clouds/water. Make all other values = 1.\r\n mask[mask != 0] = 1\r\n\r\n # That's it, just return the result...\r\n return mask", "def rle_decode(rle, shape):\n rle = list(map(int, rle.split()))\n rle = np.array(rle, dtype=np.int32).reshape([-1, 2])\n rle[:, 1] += rle[:, 0]\n rle -= 1\n mask = np.zeros([shape[0] * shape[1]], np.bool)\n for s, e in rle:\n assert 0 <= s < mask.shape[0]\n assert 1 <= e <= mask.shape[0], \"shape: {} s {} e {}\".format(shape, s, e)\n mask[s:e] = 1\n # Reshape and transpose\n mask = mask.reshape([shape[1], shape[0]]).T\n return mask", "def load_mask(self, image_id):\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"face\":\n return super(self.__class__, self).load_mask(image_id)\n info = self.image_info[image_id]\n mask = np.zeros([info['height'], info['width'], len(info['boundingbox'])], dtype=np.uint8)\n for i, p in enumerate(info['boundingbox'].values()):\n rr, cc = skimage.draw.polygon(p['y'], p['x'])\n mask[rr, cc, i] = 1\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)", "def base64_2_mask(s: str) -> np.array:\n z = zlib.decompress(base64.b64decode(s))\n n = np.fromstring(z, np.uint8)\n \n return cv2.imdecode(n, cv2.IMREAD_UNCHANGED)[:, :, 3].astype(bool) * 1", "def _get_mask(self, anno, idx):\n coco = self.coco\n img_info = coco.loadImgs(self.img_ids[idx])[0]\n\n m = np.zeros((img_info['height'], img_info['width']), dtype=np.float32)\n\n for obj in anno:\n if 'segmentation' in obj:\n if obj['iscrowd']:\n rle = pycocotools.mask.frPyObjects(obj['segmentation'],\n img_info['height'],\n img_info['width'])\n m += pycocotools.mask.decode(rle)\n elif obj['num_keypoints'] == 0:\n rles = pycocotools.mask.frPyObjects(obj['segmentation'],\n img_info['height'],\n img_info['width'])\n for rle in rles:\n m += pycocotools.mask.decode(rle)\n\n return m < 0.5", "def load_inbreast_mask(mask_path, imshape=(4084, 3328)):\n\n def load_point(point_string):\n x, y = tuple([float(num) for num in point_string.strip('()').split(',')])\n return y, x\n\n mask_shape = np.transpose(imshape)\n mask = np.zeros(mask_shape)\n with open(mask_path, 'rb') as mask_file:\n plist_dict = plistlib.load(mask_file, fmt=plistlib.FMT_XML)['Images'][0]\n numRois = plist_dict['NumberOfROIs']\n rois = plist_dict['ROIs']\n assert len(rois) == numRois\n for roi in rois:\n numPoints = roi['NumberOfPoints']\n points = roi['Point_px']\n assert numPoints == len(points)\n points = [load_point(point) for point in points]\n if len(points) <= 2:\n for point in points:\n mask[int(point[0]), int(point[1])] = 1\n else:\n x, y = zip(*points)\n x, y = np.array(x), np.array(y)\n poly_x, poly_y = polygon(x, y, shape=mask_shape)\n mask[poly_x, poly_y] = 1\n return mask", "def _parse_ann_info(self, ann_info, with_mask=True):\n gt_bboxes = []\n gt_labels = []\n gt_bboxes_ignore = []\n gt_instance = []\n # Two formats are provided.\n # 1. mask: a binary map of the same size of the image.\n # 2. polys: each mask consists of one or several polys, each poly is a\n # list of float.\n if with_mask:\n gt_masks = []\n gt_mask_polys = []\n gt_poly_lens = []\n for i, ann in enumerate(ann_info):\n if ann.get('ignore', False):\n continue\n x1, y1, w, h = ann['bbox']\n if ann['area'] <= 0 or w < 1 or h < 1:\n continue\n bbox = [x1, y1, x1 + w - 1, y1 + h - 1]\n if ann['iscrowd']:\n gt_bboxes_ignore.append(bbox)\n else:\n gt_bboxes.append(bbox)\n gt_labels.append(self.cat2label[ann['category_id']])\n gt_instance.append(ann['ins_id'])\n if with_mask:\n gt_masks.append(self.coco.annToMask(ann))\n mask_polys = [\n p for p in ann['segmentation'] if len(p) >= 6\n ] # valid polygons have >= 3 points (6 coordinates)\n poly_lens = [len(p) for p in mask_polys]\n gt_mask_polys.append(mask_polys)\n gt_poly_lens.extend(poly_lens)\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n gt_instance = np.array(gt_instance)\n\n ann = dict(bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n instance_id=gt_instance)\n #ann = dict(bboxes=gt_bboxes,\n # labels=gt_labels,\n # bboxes_ignore=gt_bboxes_ignore)\n\n if with_mask:\n ann['masks'] = gt_masks\n # poly format is not used in the current implementation\n ann['mask_polys'] = gt_mask_polys\n ann['poly_lens'] = gt_poly_lens\n return ann", "def load_mask(self, image_id):\n # If not homeobject dataset, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != 'homeobject':\n print(\n \"Warn: \\'{}\\' label not found. Processing with parent load_mask.\".format(image_info[\"source\"]))\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n class_ids = image_info['class_ids']\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])], dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n\n # modify dirt mask if it resides outside of image boundary\n rr[rr > mask.shape[0] - 1] = mask.shape[0] - 1\n cc[cc > mask.shape[1] - 1] = mask.shape[1] - 1\n\n mask[rr, cc, i] = 1\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n class_ids = np.array(class_ids, dtype=np.int32)\n # return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)\n return mask, class_ids", "def whole_mask2mask(whole_mask, bbox):\n if len(whole_mask) != len(bbox):\n raise ValueError(\n 'The length of whole_mask and bbox should be the same')\n mask = list()\n for whole_m, bb in zip(whole_mask, bbox):\n bb = np.round(bb).astype(np.int32)\n mask.append(whole_m[bb[0]:bb[2], bb[1]:bb[3]])\n return mask", "def create_binary_masks(image_path):\n mask = cv2.imread(image_path, cv2.IMREAD_ANYDEPTH)\n size = mask.shape\n for row_pixel in range(0, size[0]):\n for column_pixel in range(0, size[1]):\n if mask[row_pixel, column_pixel] == 0:\n mask[row_pixel, column_pixel] = 65535\n\n else:\n mask[row_pixel, column_pixel] = 0\n\n cv2.imwrite(image_path[:-4]+'_binary.png', mask)", "def get_mask_from_alignment(al):\n alignment_str = str(al).split(\"\\n\")[1]\n return alignment_str.replace(\"|\", \"+\")", "def annToRLE(self, ann, height, width):\n segm = ann[\"segmentation\"]\n if isinstance(segm, list):\n rles = maskUtils.frPyObjects(segm, height, width)\n rle = maskUtils.merge(rles)\n elif isinstance(segm[\"counts\"], list):\n rle = maskUtils.frPyObjects(segm, height, width)\n else:\n rle = ann[\"segmentation\"]\n return rle", "def get_regions_mask(self, input):", "def annotation(self, ann_type: str = None):\n if ann_type is None: ann_type = self.ann\n if ann_type != self.ann:\n warnings.warn('Please note that the annotation type is mismatch with the dataset setting!')\n\n if ann_type == 'label':\n xml_path = self.xml_path.format(id=self.id)\n ann = int(ET.parse(xml_path).find('defective').text)\n elif ann_type == 'bbox':\n xml_path = self.xml_path.format(id=self.id)\n objs = ET.parse(xml_path).findall('bbox')\n ann = []\n for ix, bbox in enumerate(objs):\n y1 = int(float(bbox.find('ymin').text))\n y2 = int(float(bbox.find('ymax').text))\n x1 = int(float(bbox.find('xmin').text))\n x2 = int(float(bbox.find('xmax').text))\n ann.append((y1, y2, x1, x2))\n elif ann_type == 'mask':\n mask_path = self.mask_path.format(id=self.id)\n if os.path.exists(mask_path):\n ann = Image.open(mask_path).convert('L')\n else:\n ann = Image.fromarray(np.zeros((512, 512), dtype=np.uint8)).convert('L')\n elif ann_type == 'none':\n ann = []\n else:\n raise NotImplementedError\n return ann", "def mask_to_bbox(mask, label=None):\n mask = mask if label is None else mask == label\n coords = np.where(mask)\n return coords_to_bbox(coords)", "def mask_to_poly_geojson(pred_arr, channel_scaling=None, reference_im=None,\n output_path=None, output_type='geojson', min_area=40,\n bg_threshold=0, do_transform=None, simplify=False,\n tolerance=0.5, **kwargs):\n\n mask_arr = preds_to_binary(pred_arr, channel_scaling, bg_threshold)\n\n if do_transform and reference_im is None:\n raise ValueError(\n 'Coordinate transformation requires a reference image.')\n\n if do_transform:\n with rasterio.open(reference_im) as ref:\n transform = ref.transform\n crs = ref.crs\n ref.close()\n else:\n transform = Affine(1, 0, 0, 0, 1, 0) # identity transform\n crs = rasterio.crs.CRS()\n\n mask = mask_arr > bg_threshold\n mask = mask.astype('uint8')\n\n polygon_generator = features.shapes(mask_arr,\n transform=transform,\n mask=mask)\n polygons = []\n values = [] # pixel values for the polygon in mask_arr\n for polygon, value in polygon_generator:\n p = shape(polygon).buffer(0.0)\n if p.area >= min_area:\n polygons.append(shape(polygon).buffer(0.0))\n values.append(value)\n\n polygon_gdf = gpd.GeoDataFrame({'geometry': polygons, 'value': values},\n crs=crs.to_wkt())\n if simplify:\n polygon_gdf['geometry'] = polygon_gdf['geometry'].apply(\n lambda x: x.simplify(tolerance=tolerance)\n )\n # save output files\n if output_path is not None:\n if output_type.lower() == 'geojson':\n if len(polygon_gdf) > 0:\n polygon_gdf.to_file(output_path, driver='GeoJSON')\n else:\n save_empty_geojson(output_path, polygon_gdf.crs.to_epsg())\n elif output_type.lower() == 'csv':\n polygon_gdf.to_csv(output_path, index=False)\n\n return polygon_gdf", "def mask_to_poly_geojson(pred_arr, channel_scaling=None, reference_im=None,\n output_path=None, output_type='geojson', min_area=40,\n bg_threshold=0, do_transform=None, simplify=False,\n tolerance=0.5, **kwargs):\n\n mask_arr = preds_to_binary(pred_arr, channel_scaling, bg_threshold)\n\n if do_transform and reference_im is None:\n raise ValueError(\n 'Coordinate transformation requires a reference image.')\n\n if do_transform:\n with rasterio.open(reference_im) as ref:\n transform = ref.transform\n crs = ref.crs\n ref.close()\n else:\n transform = Affine(1, 0, 0, 0, 1, 0) # identity transform\n crs = rasterio.crs.CRS()\n\n mask = mask_arr > bg_threshold\n mask = mask.astype('uint8')\n\n polygon_generator = features.shapes(mask_arr,\n transform=transform,\n mask=mask)\n polygons = []\n values = [] # pixel values for the polygon in mask_arr\n for polygon, value in polygon_generator:\n p = shape(polygon).buffer(0.0)\n if p.area >= min_area:\n polygons.append(shape(polygon).buffer(0.0))\n values.append(value)\n\n polygon_gdf = gpd.GeoDataFrame({'geometry': polygons, 'value': values},\n crs=crs.to_wkt())\n if simplify:\n polygon_gdf['geometry'] = polygon_gdf['geometry'].apply(\n lambda x: x.simplify(tolerance=tolerance)\n )\n # save output files\n if output_path is not None:\n if output_type.lower() == 'geojson':\n if len(polygon_gdf) > 0:\n polygon_gdf.to_file(output_path, driver='GeoJSON')\n else:\n save_empty_geojson(output_path, polygon_gdf.crs.to_epsg())\n elif output_type.lower() == 'csv':\n polygon_gdf.to_csv(output_path, index=False)\n\n return polygon_gdf", "def parse_annotation_instance(annotation):\n\n text = annotation['utf8_string']\n language = annotation['language']\n legible = int(annotation['legibility'] == 'legible')\n\n mask = np.reshape(np.array(annotation['mask'], np.int32), (-1, 2))\n box = cv2.boxPoints(cv2.minAreaRect(mask))\n quadrilateral = [int(x) for x in box.reshape([-1])]\n\n xmin = min(quadrilateral[0::2])\n xmax = max(quadrilateral[0::2])\n\n ymin = min(quadrilateral[1::2])\n ymax = max(quadrilateral[1::2])\n\n word_annotation = {\n 'bbox': [xmin, ymin, xmax - xmin, ymax - ymin],\n 'segmentation': [quadrilateral],\n 'attributes': {\n 'transcription': text,\n 'legible': legible,\n 'language': language,\n }\n }\n\n return word_annotation", "def load_mask(self, image_id):\n\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"pcb\":\n return super(self.__class__, self).load_mask(image_id)\n\n # convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n \n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n \n for i, p in enumerate(info[\"polygons\"]):\n # get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # return mask, and array of class IDs of each instance.\n # since we have one class ID only, we return an array of 1s\n return mask.astype(np.bool), info[\"class_ids\"]", "def preprocess_mask(mask):\n # Project values interval on [0.0; 1.0]\n if mask.max() > 1:\n mask[mask <= 127.5] = 0.\n mask[mask > 127.5] = 1.\n else:\n mask[mask <= .5] = 0.\n mask[mask > .5] = 1.\n return mask", "def load_mask(self, image_id):\n image_info = self.image_info[image_id]\n annotations = image_info['annotations']\n instance_masks = []\n class_ids = []\n \n for annotation in annotations:\n class_id = annotation['category_id']\n mask = Image.new('1', (image_info['width'], image_info['height']))\n mask_draw = ImageDraw.ImageDraw(mask, '1')\n for segmentation in annotation['segmentation']:\n mask_draw.polygon(segmentation, fill=1)\n bool_array = np.array(mask) > 0\n instance_masks.append(bool_array)\n class_ids.append(class_id)\n\n mask = np.dstack(instance_masks)\n class_ids = np.array(class_ids, dtype=np.int32)\n \n return mask, class_ids", "def filter_binaries(bin_arr, remove_bordering=True, min_size=None, max_size=None, min_minor=None, max_minor=None,\r\n min_major=None, max_major=None):\r\n\r\n out = np.empty_like(bin_arr)\r\n for i, img in enumerate(bin_arr):\r\n if len(np.unique(img)) > 2: # Image is already labeled\r\n labeled = img\r\n else:\r\n labeled, n = mh.labeled.label(img)\r\n labeled, n = mh.labeled.filter_labeled(labeled, remove_bordering=remove_bordering, min_size=min_size, max_size=max_size)\r\n out[i] = (labeled > 0).astype(int) * labeled # Restore labels\r\n\r\n for j, img in enumerate(out):\r\n for i in np.unique(img)[1:]:\r\n selected_binary = (img == i).astype('int')\r\n min1, max1, min2, max2 = mh.bbox(selected_binary)\r\n selection = selected_binary[min1:max1, min2:max2]\r\n major, minor = mh.features.ellipse_axes(selection)\r\n\r\n if min_minor and minor < min_minor:\r\n img[img == i] = 0\r\n if max_minor and minor > max_minor:\r\n img[img == i] = 0\r\n if min_major and major < min_major:\r\n img[img == i] = 0\r\n if max_major and major > max_major:\r\n img[img == i] = 0\r\n\r\n return out", "def polygon_to_mask_array(dims: tuple, vertices: CoordinatePair) -> np.ndarray:\n\n poly_vertices = [\n (vertices.x_ul, vertices.y_ul),\n (vertices.x_ul, vertices.y_br),\n (vertices.x_br, vertices.y_br),\n (vertices.x_br, vertices.y_ul),\n ]\n\n img = PIL.Image.new(\"L\", dims, 0)\n PIL.ImageDraw.Draw(img).polygon(poly_vertices, outline=1, fill=1)\n return np.array(img).astype(bool)", "def get_label_masks(self, vocabs, language):\n fn = 'data/{}/conll09/train.txt'.format(language)\n lemma_to_preds = get_lemma_to_preds(fn)\n masks = np.zeros((vocabs['plemmas'].size, vocabs['predicates'].size),\n dtype=np.float32)\n for i, lemma in vocabs['plemmas'].idx_to_word.iteritems():\n if lemma in lemma_to_preds:\n preds = lemma_to_preds[lemma]\n idxs = vocabs['predicates'].encode_sequence(preds)\n for j in idxs:\n masks[i][j] = 1.0\n else:\n masks[i, :] = 1.0 # Allow everything\n return masks", "def regions_from_binary_mask(binary_mask: np.ndarray) -> List[Region]:\n\n thumb_labeled_regions = label(binary_mask)\n regions = [\n Region(index=i, area=rp.area, bbox=rp.bbox, center=rp.centroid)\n for i, rp in enumerate(regionprops(thumb_labeled_regions))\n ]\n return regions", "def create_binary_mask(self, type='negative'):\n if not self.thresh_map_name:\n return None\n mode = self.thresh_mode\n limits = self.thresh_limits\n map = self.map_scalars\n if mode=='mask lower':\n m = (map < limits[0]) if type=='negative' else (map >= limits[0])\n elif mode=='mask higher':\n m = (map > limits[1]) if type=='negative' else (map <= limits[1])\n elif mode=='mask between':\n m = ( (map > limits[0]) & (map < limits[1]) ) \\\n if type=='negative' \\\n else ( (map <= limits[0]) | (map >= limits[1]) )\n else: # mask outside\n m = ( (map < limits[0]) | (map > limits[1]) ) \\\n if type=='negative' \\\n else ( (map >= limits[0]) & (map <= limits[1]) )\n return m", "def rasterToBinary(input_raster,output_dir,name_override=None):\n\tif name_override:\n\t\tout_path = os.path.join(output_dir,name_override)\n\telse:\n\t\tin_base,in_ext = os.path.splitext(os.path.basename(input_raster))\n\t\tout_path = os.path.join(output_dir,in_base+\"_BINARY\"+in_ext)\n\n\tds = gdal.Open(input_raster,0)\n\tband = ds.GetRasterBand(1)\n\tnoData = band.GetNoDataValue()\n\tsrs = ds.GetProjection()\n\tgt = ds.GetGeoTransform()\n\tarr = BandReadAsArray(band)\n\tds = band = None # close dataset and band\n\tarr[arr != noData] = 1\n\tarr[arr == noData] = noData\n\trasterYSize, rasterXSize = arr.shape\n\tdriver = gdal.GetDriverByName('GTiff')\n\tdataset = driver.Create(out_path,rasterXSize,rasterYSize,1,gdal.GDT_Byte,['COMPRESS=DEFLATE'])\n\tdataset.GetRasterBand(1).WriteArray(arr)\n\tdataset.GetRasterBand(1).SetNoDataValue(noData)\n\tdataset.SetGeoTransform(gt)\n\tdataset.SetProjection(srs)\n\tdataset.FlushCache() # Write to disk\n\tdel dataset\n\n\treturn out_path", "def polys_to_mask(polygons, height, width):\n rle = mask_util.frPyObjects(polygons, height, width)\n mask = np.array(mask_util.decode(rle), dtype=np.float32)\n # Flatten in case polygons was a list\n mask = np.sum(mask, axis=2)\n mask = np.array(mask > 0, dtype=np.float32)\n return mask", "def extract_roi(reg_with_roi, ir_with_roi, reg_unmarked, ir_unmarked):\n roi_pos = np.where( reg_with_roi[:,:,2] == 255 ) \n \n x = list(roi_pos[0])\n y = list(roi_pos[1])\n \n #make a 2-d mask\n \n mask = np.zeros_like(reg_with_roi[:,:,1])\n mask[x,y] = 255\n \n _, cntrs = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[:2]\n\n reg_roi_list = []\n ir_roi_list = []\n \n #masks = []\n for cnt in cntrs:\n \n if reg_unmarked.ndim == 3:\n reg_unmarked = cv2.cvtColor(reg_unmarked, cv2.COLOR_BGR2GRAY)\n \n if ir_unmarked.ndim == 3:\n ir_unmarked = cv2.cvtColor(ir_unmarked, cv2.COLOR_BGR2GRAY)\n \n temp_mask = np.zeros_like(reg_unmarked)\n cv2.fillPoly(temp_mask, [cnt], (255,255,255))\n #masks.append(temp_mask)\n \n reg_roi = cv2.bitwise_and(temp_mask, reg_unmarked)\n ir_roi = cv2.bitwise_and(temp_mask, ir_unmarked)\n \n x, y, w, h = cv2.boundingRect(cnt)\n reg_roi = reg_roi[y:y+h, x:x+w]\n ir_roi = ir_roi[y:y+h, x:x+w]\n \n reg_roi_list.append(reg_roi)\n ir_roi_list.append(ir_roi)\n \n return reg_roi_list, ir_roi_list, cntrs", "def load_mask(self, image_id):\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"glomerulus\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)", "def create_masks(image_folder: str, annotation_path: str, outpath: str):\n\n train_reader = ReaderAnnotation(annotation_path)\n\n all_images = os.listdir(image_folder)\n annotated_images = train_reader.annotation.keys()\n\n creator = MaskCreator()\n\n for key in annotated_images:\n file_extension = \".JPG\"\n if not os.path.isfile(\n os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n ):\n file_extension = file_extension.lower()\n\n image_name = os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n print(image_name)\n\n out_image_path = os.path.join(outpath, os.path.split(image_name)[-1])\n assert os.path.exists(out_image_path), \"Out image path doesn't exist\"\n\n image = plt.imread(image_name)\n h, w, c = image.shape\n\n regions = train_reader.get(key)[\"regions\"]\n # less than minimal distance\n radius = int(train_reader.get_radius_min(regions=regions) * 0.9)\n\n masks = []\n for _, center in regions.items():\n masks.append(\n creator.create_circular_mask(\n h=h,\n w=w,\n center=(\n int(center[\"shape_attributes\"][\"cx\"]),\n int(center[\"shape_attributes\"][\"cy\"]),\n ),\n radius=radius,\n )\n )\n\n if len(masks) > 50:\n masks = [creator._unite_masks(masks)]\n\n if masks:\n creator.visualize(\n image=image,\n masks=masks,\n filename=out_image_path,\n use_image=False,\n )\n else:\n creator._create_empty_mask(image=image, filename=out_image_path)\n\n print(\"Empty images:\")\n for empty_image in list(set(all_images) - set(annotated_images)):\n if os.path.exists(out_image_path):\n continue\n empty_image = os.path.join(image_folder, empty_image)\n print(empty_image)\n image = plt.imread(empty_image)\n creator._create_empty_mask(\n image=image,\n filename=os.path.join(\n outpath,\n os.path.split(empty_image)[-1],\n ),\n )", "def _make_masks(ilens, olens):\n # (B, T_in)\n in_masks = make_non_pad_mask(ilens)\n # (B, T_out)\n out_masks = make_non_pad_mask(olens)\n # (B, T_out, T_in)\n\n return paddle.logical_and(\n out_masks.unsqueeze(-1), in_masks.unsqueeze(-2))", "def interpretMask(mask,shape=None):\n maskout = None\n ## simplest case, an existing file\n if isinstance(mask,str) and os.path.isfile(mask):\n maskout = read(mask).astype(np.bool)\n ## mask string\n elif isinstance(mask,str) and not os.path.isfile(mask):\n if isinstance(shape,np.ndarray) : shape = shape.shape\n err_msg = ValueError(\"The string '%s' could not be interpreted as simple\\\n mask; it should be something like x>10\"%mask)\n assert shape is not None, \"_interpretMask needs a shape to interpret a string\"\n # interpret string\n maskout = np.zeros(shape,dtype=bool)\n match = g_mask_str.match(mask)\n if match is None: raise err_msg\n (axis,sign,lim) = match.groups()\n if axis not in (\"x\",\"y\"): raise err_msg\n if sign not in (\">\",\"<\"): raise err_msg\n lim = int(lim)\n idx = slice(lim,None) if sign == \">\" else slice(None,lim)\n if axis == 'y':\n maskout[idx,:] = True\n else:\n maskout[:,idx] = True\n elif isinstance(mask,np.ndarray):\n maskout = mask.astype(np.bool)\n elif mask is None:\n assert shape is not None, \"_interpretMask needs a shape to interpret a string\"\n maskout = np.zeros(shape,dtype=bool)\n else:\n maskout = None\n raise ValueError(\"Could not interpret %s as mask input\"%mask)\n \n if shape is not None and maskout.shape != shape:\n raise ValueError(\"The mask shape %s does not match the shape given as\\\n argument %s\"%(maskout.shape,shape))\n return maskout", "def getGeocodedMask ( self, image, intersects, out_path, fill=0 ):\n\n # construct filename\n _, extension = os.path.splitext( image.pathname )\n filename = os.path.basename( image.pathname )\n filename = filename.replace( extension, '-mask.tif' )\n\n # delete label pathname if exists\n label_pathname = os.path.join( out_path, filename )\n if not os.path.exists( out_path ):\n os.makedirs( out_path )\n\n # create mask with lossless compression\n driver = gdal.GetDriverByName('GTiff')\n ds = driver.Create( label_pathname, \n image.cols, \n image.rows, \n 1, \n gdal.GDT_Byte, \n options=[ 'TILED=YES', 'COMPRESS=DEFLATE' ] )\n\n if ds is not None:\n\n # copy image geocoding to mask\n ds.SetProjection( image.projection )\n ds.SetGeoTransform( image.transform ) \n ds.GetRasterBand(1).Fill( fill )\n\n # add polygon(s) to new label image\n self.addPolygonsToMask( ds, intersects, 255-fill )\n ds = None\n\n return", "def load_mask(self, image_id):\n # TODO: build dict **self.image_info** in this form\n # self.image_info.keys() = ['objects', 'imgWidth', 'imgHeight']\n # objects is a list which contains label and polygon (same as annotations form below)\n # imgHeight and imgWidth are numbers (usually 1024, 2048)\n annotations = self.image_info[image_id][\"objects\"]\n # annotations form: [{'label': label, 'polygon': [[x1,y1], [x2,y2] ...]}, ...]\n height = self.image_info[image_id]['imgHeight']\n width = self.image_info[image_id]['imgWidth']\n instance_masks = []\n class_ids = []\n for ann in annotations:\n m = self.annToMask(ann, height, width)\n \n label_tmp = ann['label']\n if ( not label_tmp in list(self.class_labels.keys()) ) and label_tmp.endswith('group'):\n label_tmp = label_tmp[:-len('group')]\n \n class_id = self.class_labels[label_tmp]\n instance_masks.append(m)\n class_ids.append(class_id)\n \n mask = np.stack(instance_masks, axis=2)\n class_ids = np.array(class_ids)\n \n return mask, class_ids", "def _prepare_mask_file(mask):\n result = np.ndarray((mask.shape[0], mask.shape[1]), dtype=np.uint8)\n for i in range(mask.shape[0]):\n for j in range(mask.shape[1]):\n\n if mask[i][j] > 0:\n result[i][j] = 1\n else:\n result[i][j] = 0\n \n return result", "def _add_roidb_from_annotations(self, entry):\n ann_ids = self._COCO.getAnnIds(imgIds=entry['id'], iscrowd=None)\n objs = self._COCO.loadAnns(ann_ids)\n width = entry['width']\n height = entry['height']\n # valid objs\n # change the annotation boxes from 'xywh' to 'xyxy'\n valid_objs = []\n for obj in objs:\n x1 = np.max((0, obj['bbox'][0]))\n y1 = np.max((0, obj['bbox'][1]))\n x2 = np.min((width, x1 + np.max((0, obj['bbox'][2]))))\n y2 = np.min((height, y1 + np.max((0, obj['bbox'][3]))))\n if obj['area'] > 0 and x2 >= x1 and y2 >= y1:\n obj['clean_box'] = [x1, y1, x2, y2]\n valid_objs.append(obj)\n objs = valid_objs\n num_objs = len(objs)\n\n bboxes = np.zeros((num_objs, 4), dtype=entry['bboxes'].dtype)\n gt_classes = np.zeros((num_objs), dtype=entry['gt_classes'].dtype)\n\n coco_cat_id_to_class_ind = dict(\n [(self._class_to_coco_cat_id[cls], self._class_to_ind[cls]) for cls in self._classes[1:]])\n for ix, obj in enumerate(objs):\n bboxes[ix, :] = obj['clean_box']\n gt_classes[ix] = coco_cat_id_to_class_ind[obj['category_id']]\n entry['bboxes'] = np.append(entry['bboxes'], bboxes, axis=0)\n entry['gt_classes'] = np.append(entry['gt_classes'], gt_classes)", "def binary_mask_fn(input_shape, masking, mask_state):\n if masking == 'channel':\n assert(input_shape[-1] % 2 == 0)\n sub_shape = np.copy(input_shape)\n sub_shape[-1] = sub_shape[-1] // 2\n binary_mask = np.concatenate([np.ones(sub_shape),\n np.zeros(sub_shape)],\n axis=-1)\n if masking == 'checkerboard':\n assert(len(input_shape) == 3)\n column_odd = [k % 2 for k in range(input_shape[-2])]\n column_even = [(k + 1) % 2 for k in range(input_shape[-2])]\n binary_mask = np.zeros((input_shape[-3], input_shape[-2]))\n for j in range(input_shape[-2]):\n if j % 2:\n binary_mask[:, j] = column_even\n else:\n binary_mask[:, j] = column_odd\n binary_mask = binary_mask.reshape(\n list(binary_mask.shape) + [1])\n binary_mask = np.repeat(binary_mask, input_shape[-1], axis=-1)\n\n binary_mask = binary_mask.reshape([1] + list(binary_mask.shape))\n if mask_state:\n return tf.cast(binary_mask, tf.float32)\n else:\n return tf.cast((1 - binary_mask), tf.float32)", "def convert_masks():\n for fn in sorted(glob.glob('../input/extra_data/*/masks/*.png')):\n print(fn)\n img = skimage.io.imread(fn)\n # utils.print_stats('mask', img)\n img[img > 0] = 255\n skimage.io.imsave(fn, img)", "def mask(self, polygon: Union[Polygon, MultiPolygon], srs=\"EPSG:4326\") -> 'ImageCollection':\n geojson = mapping(polygon)\n geojson['crs'] = {\n 'type': 'name',\n 'properties': {\n 'name': srs\n }\n }\n\n process_id = 'mask'\n\n args = {\n 'imagery': self.graph,\n 'mask_shape': geojson\n }\n\n return self.graph_add_process(process_id, args)", "def mask(self, polygon: Union[Polygon, MultiPolygon], srs=\"EPSG:4326\") -> 'ImageCollection':\n geojson = mapping(polygon)\n geojson['crs'] = {\n 'type': 'name',\n 'properties': {\n 'name': srs\n }\n }\n\n process_id = 'mask'\n\n args = {\n 'imagery': self.graph,\n 'mask_shape': geojson\n }\n\n return self.graph_add_process(process_id, args)", "def annot_to_gifti(atlas):\n\n labels, ctab, names = nib.freesurfer.read_annot(atlas)\n\n darr = nib.gifti.GiftiDataArray(labels, intent='NIFTI_INTENT_LABEL',\n datatype='NIFTI_TYPE_INT32')\n labeltable = nib.gifti.GiftiLabelTable()\n for key, label in enumerate(names):\n (r, g, b), a = (ctab[key, :3] / 255), (1.0 if key != 0 else 0.0)\n glabel = nib.gifti.GiftiLabel(key, r, g, b, a)\n glabel.label = label.decode()\n labeltable.labels.append(glabel)\n\n return nib.GiftiImage(darrays=[darr], labeltable=labeltable)", "def encode_segmap(self, mask):\n for voidc in self.void_labels:\n mask[mask == voidc] = self.ignore_index\n for validc in self.valid_labels:\n mask[mask == validc] = self.class_map[validc]\n # remove extra idxs from updated dataset\n mask[mask > 33] = self.ignore_index\n return mask", "def get_polygons(annotation):\n print(f\"Loadding: {annotation}\")\n tree = ET.parse(annotation)\n root = tree.getroot()\n polygons = {}\n for obj in root.findall('object'):\n name = obj.find('name').text\n id_ = obj.find('id').text\n polygon = []\n for pt in obj.find('polygon').findall('pt'):\n polygon.append([pt.find('x').text, pt.find('y').text])\n if name in polygons:\n x_ref= int(polygons[name]['left'][0][0])\n x = int(polygon[0][0])\n if x > x_ref:\n polygons[name]['right'] = polygons[name]['left']\n id_ = 'left'\n else:\n id_ = 'right'\n else:\n polygons[name] = {}\n id_ = 'left'\n polygons[name][id_] = polygon\n for i in list(polygons.keys()):\n if not('right' in polygons[i]):\n print(i,' only has one polygon: ',polygons[i]['left'])\n y = input('Do you wish to label it as \\'right\\'? (leave empy if No): ')\n if (y):\n polygons[i]['right'] = polygons[i]['left']\n polygons[i].pop('left')\n return polygons", "def label_to_mask(labels):\n # get the image size\n h, w = labels.shape\n\n # build a color to label map\n idx_to_color = {}\n for label in class_info:\n idx_to_color[class_info[label].id] = class_info[label].color\n\n # generate label matrix\n mask = np.zeros((h, w, 3), dtype=np.uint8)\n for y in range(h):\n for x in range(w):\n id = labels[y, x]\n r, g, b = idx_to_color[id]\n mask[y, x] = np.array([b, g, r])\n\n return mask", "def convert_tcia_labels(mask, keep_all_label=False):\n \n mask[np.isin(mask, [14])] = 0 # Remove duodenum\n label = [1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1] # no right kidney\n\n if keep_all_label:\n label += [0,0]\n\n return mask, label", "def interpretMasks(masks,shape=None):\n if isinstance(masks,np.ndarray): return masks.astype(bool)\n # make iterable\n if not isinstance( masks, (list,tuple,np.ndarray) ): masks = (masks,)\n masks = [interpretMask(mask,shape) for mask in masks]\n # put them all together\n mask = masks[0]\n for m in masks[1:]:\n mask = np.logical_or(mask,m)\n return mask", "def rle_encode(mask: np.ndarray):\n pixels = mask.T.flatten()\n # We need to allow for cases where there is a '1' at either end of the sequence.\n # We do this by padding with a zero at each end when needed.\n use_padding = False\n if pixels[0] or pixels[-1]:\n use_padding = True\n pixel_padded = np.zeros([len(pixels) + 2], dtype=pixels.dtype)\n pixel_padded[1:-1] = pixels\n pixels = pixel_padded\n rle = np.where(pixels[1:] != pixels[:-1])[0] + 2\n if use_padding:\n rle = rle - 1\n rle[1::2] = rle[1::2] - rle[:-1:2]\n return rle", "def gobject2mask(uri, im):\n valid_gobject = set(['polygon','circle','square','ellipse','rectangle','gobject'])\n\n mask = np.zeros([])\n #add view deep to retrieve vertices\n\n uri_full = BQServer().prepare_url(uri, view='full')\n\n response = fetch_resource(uri_full)\n #need to check if value xml\n try:\n xml = etree.fromstring(response)\n except etree.XMLSyntaxError:\n raise FeatureExtractionError(None, 415, 'Url: %s, was not xml for gobject' % uri)\n\n #need to check if its a valid gobject\n if xml.tag not in valid_gobject:\n raise FeatureExtractionError(None, 415, 'Url: %s, Gobject tag: %s is not a valid gobject to make a mask' % (uri,xml.tag))\n\n if xml.tag in set(['gobject']):\n tag = xml.attrib.get('type')\n if tag is None:\n raise FeatureExtractionError(None, 415, 'Url: %s, Not an expected gobject' % uri)\n else:\n tag = xml.tag\n\n col = im.shape[0]\n row = im.shape[1]\n img = Image.new('L', (row, col), 0)\n\n if tag in set(['polygon']):\n contour = []\n for vertex in xml.xpath('vertex'):\n x = vertex.attrib.get('x')\n y = vertex.attrib.get('y')\n if x is None or y is None:\n raise FeatureExtractionError(None, 415, 'Url: %s, gobject does not have x or y coordinate' % uri)\n contour.append((int(float(x)),int(float(y))))\n if len(contour)<2:\n raise FeatureExtractionError(None, 415, 'Url: %s, gobject does not have enough vertices' % uri)\n# import pdb\n# pdb.set_trace()\n ImageDraw.Draw(img).polygon(contour, outline=255, fill=255)\n mask = np.array(img)\n\n if tag in set(['square']):\n #takes only the first 2 points\n contour = []\n for vertex in xml.xpath('vertex'):\n x = vertex.attrib.get('x')\n y = vertex.attrib.get('y')\n if x is None or y is None:\n raise FeatureExtractionError(None, 415, 'Url: %s, gobject does not have x or y coordinate' % uri)\n contour.append((int(float(x)),int(float(y))))\n if len(contour)<2:\n raise FeatureExtractionError(None, 415, 'Url: %s, gobject does not have enough vertices' % uri)\n\n (x1,y1)= contour[0]\n (x2,y2)= contour[1]\n py = np.min([y1, y2])\n px = np.min([x1, x2])\n side = np.abs(x1-x2)\n contour = [(px,py),(px,py+side),(px+side,py+side),(px+side, py)]\n ImageDraw.Draw(img).polygon(contour, outline=255, fill=255)\n mask = np.array(img)\n\n\n if tag in set(['rectangle']):\n #takes only the first 2 points\n contour = []\n for vertex in xml.xpath('vertex'):\n x = vertex.attrib.get('x')\n y = vertex.attrib.get('y')\n if x is None or y is None:\n raise FeatureExtractionError(None, 415, 'Url: %s, gobject does not have x or y coordinate' % uri)\n contour.append((int(float(x)),int(float(y))))\n if len(contour)<2:\n raise FeatureExtractionError(None, 415, 'Url: %s, gobject does not have enough vertices' % uri)\n\n (x1,y1)= contour[0]\n (x2,y2)= contour[1]\n y_min = np.min([y1, y2])\n x_min = np.min([x1, x2])\n y_max = np.max([y1, y2])\n x_max = np.max([x1, x2])\n contour = [(x_min, y_min), (x_min, y_max), (x_max, y_max), (x_max, y_min)]\n ImageDraw.Draw(img).polygon(contour, outline=255, fill=255)\n mask = np.array(img)\n\n\n if tag in set(['circle','ellipse']): #ellipse isnt supported really, its just a circle also\n #takes only the first 2 points\n contour = []\n for vertex in xml.xpath('vertex'):\n x = vertex.attrib.get('x')\n y = vertex.attrib.get('y')\n if x is None or y is None:\n raise FeatureExtractionError(None, 415, 'Url: %s, gobject does not have x or y coordinate' % uri)\n contour.append((int(float(x)),int(float(y))))\n if len(contour)<2:\n raise FeatureExtractionError(None, 415, 'Url: %s, gobject does not have enough vertices' % uri)\n\n (x1,y1) = contour[0]\n (x2,y2) = contour[1]\n\n r = np.sqrt(np.square(int(float(x2))-int(float(x1)))+\n np.square(int(float(y2))-int(float(y1))))\n bbox = (int(float(x1))-r, int(float(y1))-r, int(float(x1))+r, int(float(y1))+r)\n ImageDraw.Draw(img).ellipse(bbox, outline=255, fill=255)\n mask = np.array(img)\n return mask", "def merge_binary_rois(roi1, roi2):\n if (roi1.pixelSizeX != roi2.pixelSizeX) or (roi1.pixelSizeY != roi2.pixelSizeY):\n raise ValueError('The pixel sizes of the two WeightedROI objects should match!')\n\n if roi1.pixelSizeUnit != roi2.pixelSizeUnit:\n raise ValueError('The pixel size units of the two WeightedROI objects should match!')\n\n mask1 = roi1.get_binary_mask(); mask2 = roi2.get_binary_mask(); mask3 = np.logical_or(mask1, mask2).astype(np.int8)\n\n return ROI(mask3, pixelSize=[roi1.pixelSizeY, roi1.pixelSizeX], pixelSizeUnit=roi1.pixelSizeUnit)", "def _parse_ann_info(self, img_info, ann_info):\r\n gt_bboxes = []\r\n gt_labels = []\r\n gt_bboxes_ignore = []\r\n gt_masks_ann = []\r\n\r\n for i, ann in enumerate(ann_info):\r\n if ann.get('ignore', False):\r\n continue\r\n x1, y1, w, h = ann['bbox']\r\n if ann['area'] <= 0 or w < 1 or h < 1:\r\n continue\r\n bbox = [x1, y1, x1 + w - 1, y1 + h - 1]\r\n if ann.get('iscrowd', False):\r\n gt_bboxes_ignore.append(bbox)\r\n else:\r\n gt_bboxes.append(bbox)\r\n gt_labels.append(self.cat2label[ann['category_id']])\r\n gt_masks_ann.append(ann['segmentation'])\r\n\r\n if gt_bboxes:\r\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\r\n gt_labels = np.array(gt_labels, dtype=np.int64)\r\n else:\r\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\r\n gt_labels = np.array([], dtype=np.int64)\r\n\r\n if gt_bboxes_ignore:\r\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\r\n else:\r\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\r\n\r\n seg_map = img_info['filename'].replace('jpg', 'png')\r\n\r\n ann = dict(\r\n bboxes=gt_bboxes,\r\n labels=gt_labels,\r\n bboxes_ignore=gt_bboxes_ignore,\r\n masks=gt_masks_ann,\r\n seg_map=seg_map)\r\n\r\n return ann", "def manual_mask(\n cls, array, mask, roe_corner=(1, 0), scans=None, exposure_info=None\n ):\n\n array = abstract_array.convert_array(array=array)\n\n array = frame_util.rotate_array_from_roe_corner(\n array=array, roe_corner=roe_corner\n )\n mask = frame_util.rotate_array_from_roe_corner(\n array=mask, roe_corner=roe_corner\n )\n\n array[mask == True] = 0.0\n\n scans = abstract_frame.Scans.rotated_from_roe_corner(\n roe_corner=roe_corner, shape_2d=array.shape, scans=scans\n )\n\n return Frame(\n array=array,\n mask=mask,\n original_roe_corner=roe_corner,\n scans=scans,\n exposure_info=exposure_info,\n )", "def binToGray(sigOrVal) -> RtlSignalBase:\n return (sigOrVal >> 1) ^ sigOrVal\n #width = sigOrVal._dtype.bit_length()\n #return Concat(sigOrVal[width - 1],\n # sigOrVal[width - 1:0] ^ sigOrVal[width:1])", "def transform(self, results: Dict) -> Dict:\n # gt_masks -> gt_polygons\n if 'gt_masks' in results.keys():\n gt_polygons = []\n gt_masks = results.pop('gt_masks')\n if len(gt_masks) > 0:\n # PolygonMasks\n if isinstance(gt_masks[0], PolygonMasks):\n gt_polygons = [mask[0] for mask in gt_masks.masks]\n # BitmapMasks\n else:\n polygons = []\n for mask in gt_masks.masks:\n contours, _ = bitmap_to_polygon(mask)\n polygons += [\n contour.reshape(-1) for contour in contours\n ]\n # filter invalid polygons\n gt_polygons = []\n for polygon in polygons:\n if len(polygon) < 6:\n continue\n gt_polygons.append(polygon)\n\n results['gt_polygons'] = gt_polygons\n # gt_ignore_flags -> gt_ignored\n if 'gt_ignore_flags' in results.keys():\n gt_ignored = results.pop('gt_ignore_flags')\n results['gt_ignored'] = gt_ignored\n\n return results", "def _parse_anno_info(self, annotations):\n gt_bboxes, gt_bboxes_ignore = [], []\n gt_masks, gt_masks_ignore = [], []\n gt_labels = []\n for ann in annotations:\n if ann.get('iscrowd', False):\n gt_bboxes_ignore.append(ann['bbox'])\n gt_masks_ignore.append(ann.get('segmentation', None))\n else:\n gt_bboxes.append(ann['bbox'])\n gt_labels.append(ann['category_id'])\n gt_masks.append(ann.get('segmentation', None))\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n ann = dict(\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks_ignore=gt_masks_ignore,\n masks=gt_masks)\n\n return ann", "def __call__(self):\n return {self.idx: rle_encoding(self.mask)}", "def _coerce_builtin(cls, value: Any, annotation: Type) -> Any:\n # Special case: truthy value that was previously coerced to str ('0', ...)\n # Special case: JSON/YAML for a dict or list field\n if annotation in (bool, dict, list, tuple, set, frozenset) and isinstance(\n value, (str, bytes)\n ):\n processed, value = safe_eval(value)\n if annotation in (bytearray, bytes) and not isinstance(\n value, (bytes, bytearray)\n ):\n value = str(value).encode(cls.DEFAULT_BYTE_ENCODING)\n elif annotation is str and isinstance(value, (bytes, bytearray)):\n value = value.decode(cls.DEFAULT_BYTE_ENCODING)\n\n return annotation(value)", "def annotationToUpDown(annotations):\n left = np.nan\n right = np.nan\n bin = np.zeros((2, annotations.size))*np.nan\n for t in range(np.trim_zeros(annotations,'b').size):\n if annotations[t] == 1:\n left = 1\n if annotations[t] == 2:\n left = 0\n if annotations[t] == 3:\n right = 1\n if annotations[t] == 4:\n right = 0\n bin[0, t] = left\n bin[1, t] = right\n return bin" ]
[ "0.64569485", "0.6399482", "0.6213335", "0.605465", "0.6021838", "0.5922467", "0.58719254", "0.57193065", "0.57102245", "0.5670913", "0.5670913", "0.5670913", "0.5657575", "0.5603988", "0.5571696", "0.5569111", "0.55674887", "0.5559433", "0.5557507", "0.55520123", "0.55243313", "0.5521444", "0.5481345", "0.54620165", "0.5451042", "0.54342896", "0.54100174", "0.53982025", "0.53793675", "0.53790355", "0.5369107", "0.5367494", "0.53450245", "0.5340229", "0.53385186", "0.5330147", "0.5324299", "0.5316299", "0.5311873", "0.5309594", "0.53015757", "0.52952445", "0.5280451", "0.52803904", "0.5254192", "0.5246399", "0.5245632", "0.52165955", "0.521552", "0.5206686", "0.5187952", "0.5187364", "0.51826626", "0.51752114", "0.5155996", "0.5155996", "0.5136177", "0.5131918", "0.5122087", "0.50892633", "0.508284", "0.5071673", "0.5067373", "0.50567144", "0.5041499", "0.5030724", "0.50182754", "0.50167394", "0.50165164", "0.50080913", "0.5007762", "0.4995311", "0.4990807", "0.49864203", "0.49819678", "0.49759114", "0.49752352", "0.4974523", "0.4963483", "0.4963483", "0.4947926", "0.49264124", "0.4890773", "0.48894843", "0.48762074", "0.4864646", "0.48537487", "0.48488075", "0.4846184", "0.48440742", "0.48360527", "0.48333022", "0.48321638", "0.48243797", "0.4804897", "0.48033845", "0.48001927" ]
0.6287126
4
Normalize the feature matrix for training, store the normal mean & normal min
def normalize(self, feature_matrix): if len(feature_matrix) > 0: nmin = [1000000 for _ in range(len(feature_matrix[0]))] nsum = [0 for _ in range(len(feature_matrix[0]))] for r in feature_matrix: for c in range(len(r)): nmin[c] = min(nmin[c], r[c]) nsum[c] += r[c] self.norm_mean = map(lambda x: float(x)/float(len(feature_matrix)), nsum) self.norm_min = nmin return self.apply_normal(feature_matrix) else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def featureNormalization(X):\n mean=np.hstack(np.mean(X[:,0]),np.mean(X[:,1]),np.mean(X[:,2]))\n std=np.hstack(np.std(X[:,0]),np.std(X[:,1]),np.std(X[:,2]))\n \n X_norm = (X - mean)/std\n \n return X_norm", "def feature_normalize(X):\n X_mean = np.mean(X, axis=0)\n X_std = np.std(X, axis=0)\n X_std[0, 0] = 1\n X_normalize = (X - X_mean) / X_std\n X_normalize[:, 0] = 1.0\n return X_normalize, X_mean, X_std", "def featureNormalize(X):\n\n mu = np.mean(X, axis=0)\n sigma = np.std(X, axis=0)\n\n X_normalized = (X - mu) / sigma\n\n return X_normalized, mu, sigma", "def normalize_features(X):\n std = X.std(axis=0)\n std = np.where(std == 0, 1, std) # to avoid division by zero\n x_normed = (X - X.mean(axis=0)) / std\n return x_normed", "def _normalize_feature(self, feature):\n\n for ic in range(self.data_shape[0]):\n feature[ic] = (feature[ic] - self.feature_mean[ic]\n ) / self.feature_std[ic]\n return feature", "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def feature_normalization(train, test):\n (N,p) = np.shape(train)\n mins = np.amin(train,axis=0)\n maxs = np.amax(train,axis=0) + mins\n train = (train + mins)/maxs\n test = (test + mins)/maxs\n return train, test", "def featureNormalize(X):\n X_norm, mu, sigma = X,0,0\n # ====================== YOUR CODE HERE ======================\n # Instructions: First, for each feature dimension, compute the mean\n # of the feature and subtract it from the dataset,\n # storing the mean value in mu. Next, compute the\n # standard deviation of each feature and divide\n # each feature by it's standard deviation, storing\n # the standard deviation in sigma.\n #\n # Note that X is a matrix where each column is a\n # feature and each row is an example. You need\n # to perform the normalization separately for\n # each feature.\n #\n # Hint: You might find the 'mean' and 'std' functions useful.\n #\n \n # get the number of features in X and norm 1 col at a time \n \n for i in range(X.shape[1]):\n mu_i = np.mean(X[:,i]) #calculate mean for each col\n sigma_i = np.std(X[:,i]) #calculate sigma for each col\n X_norm[:,i] = ((X_norm[:,i] - mu_i) / sigma_i) #norm data in col\n \n # want to make an array of all values of mu and sigma\n if i == 0: \n mu = mu_i\n sigma = sigma_i\n else:\n mu = np.append(mu,mu_i)\n sigma = np.append(sigma,sigma_i)\n # ============================================================\n \n return X_norm, mu, sigma", "def normalize_features(array):\n \n array_normalized = (array-array.mean())/array.std()\n mu = array.mean()\n sigma = array.std()\n\n return array_normalized, mu, sigma", "def feature_normalization(train, test):\n # TODO\n col_max = np.apply_along_axis(max, 0, train)\n col_min = np.apply_along_axis(min, 0, train)\n\n train_normalized = (train-col_min)/(col_max-col_min)\n test_normalized = (test-col_min)/(col_max-col_min)\n \n return train_normalized, test_normalized", "def feature_normalization(train, test):\n mins_of_features = np.amin(train, axis=0)\n maxs_of_features = np.amax(train, axis=0)\n range_of_features = maxs_of_features-mins_of_features\n range_of_features[range_of_features==0] = 1\n \n train_normalized = (train - mins_of_features)/range_of_features\n test_normalized = (test - mins_of_features)/range_of_features\n \n return (train_normalized, test_normalized)", "def _localNormalizeData(self,values,names,feat):\n self.muAndSigmaFeatures[feat] = (0.0,1.0)", "def normalize_feature_data(feature, X_train, X_valid, X_test):\r\n if type(feature) == list:\r\n for i, f in enumerate(feature):\r\n \r\n if f in __normalizing_features__:\r\n stds = np.std(X_train[i], axis=0)\r\n stds[stds==0.0] = 1.0\r\n means = np.mean(X_train[i], axis=0)\r\n X_train[i] = (X_train[i]-means)/stds\r\n X_valid[i] = (X_valid[i]-means)/stds\r\n X_test[i] = (X_test[i]-means)/stds\r\n else:\r\n if feature in __normalizing_features__:\r\n stds = np.std(X_train, axis=0)\r\n stds[stds==0.0] = 1.0\r\n means = np.mean(X_train, axis=0)\r\n X_train = (X_train-means)/stds\r\n X_valid = (X_valid-means)/stds\r\n X_test = (X_test-means)/stds\r\n \r\n return X_train, X_valid, X_test", "def normalize_feature(feature):\n # Compute mean and standard deviation, and return (x-mu)/std\n mean = np.mean(feature)\n std = np.std(feature)\n return np.divide(np.subtract(feature, mean), std)", "def normalize_features(self, data_dict, ind):\n pre_norm_list = []\n for title in data_dict:\n pre_norm_list.append(data_dict[title][ind])\n if self.normalization_method == 'min_max':\n mini, maxi, norm_list = normalize.min_max_normalize(pre_norm_list)\n self.normalization_n.append(mini)\n self.normalization_d.append(maxi - mini)\n elif self.normalization_method == 'z_score':\n mean, var, norm_list = normalize.z_score_normalize(pre_norm_list)\n self.normalization_n.append(mean)\n self.normalization_d.append(var)\n elif self.normalization_method == 'none':\n norm_list = pre_norm_list[:]\n self.normalization_n.append(0)\n self.normalization_d.append(1)\n for i, title in enumerate(data_dict):\n data_dict[title][ind] = norm_list[i]", "def normalize_data(self):\n self.x_mean, self.x_std = du.get_mean_std(self.x_train)\n self.x_train = du.normalize(self.x_train, self.x_mean, self.x_std)\n if self.x_test is not None and self.y_test is not None:\n self.x_test = du.normalize(self.x_test, self.x_mean, self.x_std)\n self.normalized_data = True", "def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))", "def normalize_feature(df):\n return df.apply(lambda column: (column - column.mean()) / column.std())", "def normalize_features(df):\r\n mu = df.mean()\r\n sigma = df.std()\r\n \r\n if (sigma == 0).any():\r\n raise Exception(\"One or more features had the same value for all samples, and thus could \" + \\\r\n \"not be normalized. Please do not include features with only a single value \" + \\\r\n \"in your model.\")\r\n df_normalized = (df - df.mean()) / df.std()\r\n\r\n return df_normalized, mu, sigma", "def normalize_features(df):\r\n mu = df.mean()\r\n sigma = df.std()\r\n \r\n if (sigma == 0).any():\r\n raise Exception(\"One or more features had the same value for all samples, and thus could \" + \\\r\n \"not be normalized. Please do not include features with only a single value \" + \\\r\n \"in your model.\")\r\n df_normalized = (df - df.mean()) / df.std()\r\n\r\n return df_normalized, mu, sigma", "def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X", "def normalizeFeatureVector(self):\n # Normalize features\n total = 0.0\n for v in self.features.values(): total += abs(v)\n if total == 0.0: \n total = 1.0\n for k,v in self.features.iteritems():\n self.features[k] = float(v) / total", "def test_scale_features_mean_norm(self):\n data = array([[0.564, 20.661], [-18.512, 41.168], [-0.009, 20.440]])\n cdata = CData(data)\n\n # correct answer computed in Mathematica\n # TODO: can we compute the right answer in Python?\n answer = array([[0.343346, -0.326225], [-0.656654, 0.663113], [0.313308, -0.336887]])\n\n # perform mean norm scaling on features and check answer\n cdata.scale_features('mean norm')\n self.assertTrue(allclose(cdata.data, answer))", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def normalize_features(dataframe):\n print(\"Normalizing feature matrix...\")\n tmp = dataframe\n feats = tmp.drop(columns=['year', 'county'])\n fmax = feats.max()\n fmin = feats.min() \n # normalize the feature matrix\n feats = (feats - fmin) / (fmax - fmin)\n tmp[feats.columns] = feats\n\n return tmp", "def normalize_data(self, df):\n result = df.copy()\n for feature_name in self.continuous_feature_names:\n max_value = self.train_df[feature_name].max()\n min_value = self.train_df[feature_name].min()\n result[feature_name] = (\n df[feature_name] - min_value) / (max_value - min_value)\n return result", "def normalize_features(df):\n mu = df.mean()\n sigma = df.std()\n\n if (sigma == 0).any():\n raise Exception(\"One or more features had the same value for all samples, and thus could \" +\n \"not be normalized. Please do not include features with only a single value \" +\n \"in your model.\")\n df_normalized = (df - df.mean()) / df.std()\n\n return df_normalized, mu, sigma", "def normalize(self, x, train=True):\n if train is not None:\n mean, variance = tf.nn.moments(x, [0,1,2])\n assign_mean = self.mean.assign(mean)\n assign_variance = self.variance.assign(variance)\n with tf.control_dependencies([assign_mean, assign_variance]):\n return tf.nn.batch_norm_with_global_normalization(\n x, mean, variance, self.beta, self.gamma,\n self.epsilon, self.scale_after_norm)\n else:\n mean = self.ewma_trainer.average(self.mean)\n variance = self.ewma_trainer.average(self.variance)\n local_beta = tf.identity(self.beta)\n local_gamma = tf.identity(self.gamma)\n return tf.nn.batch_norm_with_global_normalization(\n x, mean, variance, local_beta, local_gamma,\n self.epsilon, self.scale_after_norm)", "def normalize(raw_feature_list):\n result={}\n for feature in raw_feature_list:\n mean=statistics.mean(raw_feature_list[feature])\n stdev=statistics.pstdev(raw_feature_list[feature])\n print(feature,':','mean:',mean,'stdev:',stdev)\n for i in range(len(raw_feature_list[feature])):\n raw_feature_list[feature][i]-= mean\n raw_feature_list[feature][i]/= stdev", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize(X):\n # z-score\n mean = np.mean(X, axis=(0, 1, 2, 3))\n std = np.std(X, axis=(0, 1, 2, 3))\n # avoid dividing zero by adding a very small number\n X = (X - mean) / (std + 1e-7)\n\n return X", "def normalise(self):\n return self / self.mean(axis=1).reshape(self.shape[0], 1)", "def normalize(self, X):\n return X - X.mean()", "def set_normalization(self, dataloader):\n mean = 0\n square = 0\n for (data_in, _) in dataloader:\n mean += data_in.mean()\n square += data_in.pow(2).mean()\n\n mean /= len(dataloader)\n square /= len(dataloader)\n std = np.sqrt(square - mean ** 2)\n\n # The input data should be roughly normally distributed after\n # passing through net_fixed.\n self.scale_in.bias.data.fill_(- mean / std)\n self.scale_in.weight.data.fill_(1 / std)", "def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data", "def normalize(self):\r\n max = np.amax(self.matrix)\r\n min = np.amin(self.matrix)\r\n\r\n self.matrix = ((self.matrix - min) / (max - min))", "def BatchNormalize(S):\n mu = np.mean(S, axis=0)\n v = np.mean((S-mu)**2, axis=0)\n S = (S - mu) / np.sqrt(v + epsilon)\n return S", "def normalize_features(block, norm=1):\n for k in block:\n for b in block[k]:\n nrm = np.sqrt((block[k][b].reshape((block[k][b].shape[0],-1))**2).sum(axis=1).mean(axis=0))\n if nrm > 0.0:\n block[k][b] *= norm/nrm", "def scalarNormalizer(df):\r\n arr=dict()\r\n for col in CONT_FEATURES_COL_TO_USE:\r\n mean, std =df[col].mean(), df[col].std()\r\n df[col]=df[col].apply(lambda x: (x-mean)/std)\r\n arr[col] = [mean, std]\r\n json.dump(arr, open('normalize.json', 'w'))\r\n return df", "def normalize(self):\n self._data /= self.norm()", "def normalize(x):\n MEAN_VALUES = np.array([104, 117, 123])\n means = theano.shared(MEAN_VALUES.astype(\"float32\"))\n return x[:, ::-1, :, :] - means[np.newaxis, :, np.newaxis, np.newaxis]", "def normalizeData(pre_signal):\n\n if sp.any(sp.isnan(pre_signal)):\n print('there are NaNs in the data matrix, making them zero')\n\n pre_signal[sp.isnan(pre_signal)] = 0\n mean_vector = sp.mean(pre_signal, axis=0, keepdims=True)\n normed_signal = pre_signal - mean_vector\n norm_vector = sp.linalg.norm(normed_signal, axis=0, keepdims=True)\n norm_vector[norm_vector == 0] = 1e-116\n normed_signal = normed_signal / norm_vector\n\n return normed_signal, mean_vector, norm_vector", "def normalize_ds(dataset):\n dataset = copy.copy(dataset)\n\n dim_dataset = dataset.shape\n\n for n_row in range(dim_dataset[0]):\n k = dataset[n_row,:]\n k_norm =(k - np.min(k))/(np.max(k) - np.min(k))\n dataset[n_row,:] = k_norm\n\n return dataset", "def normalize_data(data):\n mean = np.mean(data)\n std = np.std(data)\n return (data - mean) / std", "def _normalize(self):\r\n self.dataframe['norm_intensity'] = self.dataframe['intensity']\r\n self.dataframe['norm_intensity'] -= self.dataframe['norm_intensity'].min()\r\n self.dataframe['norm_intensity'] /= self.dataframe['norm_intensity'].max() * 0.01", "def df_normalizer(df):\n df = tf.keras.utils.normalize(df, axis=1)\n\n return df", "def normalizeData(meanAndStd, dataset):\n\n for i in range(len(dataset)):\n for j in range(len(dataset[i])-1):\n mean = meanAndStd[j][\"mean\"]\n std = meanAndStd[j][\"std\"]\n dataset[i][j] = (dataset[i][j] - mean)/std", "def normalize_distancematrix(self):\n INF = self.distmat.max().max()\n df = self.distmat.fillna(INF)\n self.distmat = (df - df.min()) / (df.max() - df.min())", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def normalize(self, df):\n return (df - df.mean()) / (df.max() - df.min())", "def normalize(X, mu, sigma):\n return (X - mu) / sigma", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def normalize(self):\n det = self._mat[0][0]*self._mat[1][1] - self._mat[0][1]*self._mat[1][0]\n for i in range(2):\n for j in range(2):\n self._mat[i][j] = (self._mat[i][j])/(np.sqrt(det))", "def normalize(data):\n row = np.size(data, 0) # number of data points\n col = np.size(data, 1) # dimensionality of data points\n for j in range(col):\n # find the average for each column\n col_sum = 0\n for i in range(row):\n col_sum = col_sum + data[i][j]\n col_sum = col_sum / row\n # subtract the average from each value in the column\n for i in range(row):\n data[i][j] = data[i][j] - col_sum\n return data", "def normalize(data):\n row = np.size(data, 0) # number of data points\n col = np.size(data, 1) # dimensionality of data points\n for j in range(col):\n # find the average for each column\n col_sum = 0\n for i in range(row):\n col_sum = col_sum + data[i][j]\n col_sum = col_sum / row\n # subtract the average from each value in the column\n for i in range(row):\n data[i][j] = data[i][j] - col_sum\n return data", "def normalize_mean(dataset):\n normalized_dataset = np.array(dataset)\n return normalized_dataset - np.mean(normalized_dataset)", "def normalise(self):\n fitness_sum = np.sum(self.fitness)\n for i in range(self.loops):\n self.normalised_fitness[i] = self.fitness[i] / fitness_sum", "def normalize_data(self, data):\n self.find_mean_std(data)\n return (data - self._data_mean) / self._data_std", "def normalize_data(batch_data):\n B, N, C = batch_data.shape\n normal_data = np.zeros((B, N, C))\n for b in range(B):\n pc = batch_data[b]\n centroid = np.mean(pc, axis=0)\n pc = pc - centroid\n m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))\n pc = pc / m\n normal_data[b] = pc\n return normal_data", "def normalize(A: np.array) -> np.array:\n for i in range(A.shape[1]):\n A[:, i] = (A[:, i] - np.min(A[:, i])) / (np.max(A[:, i] - np.min(A[:, i])))\n return A", "def compute_norm(self):\n\n # logger.info(\" Normalization factor:\")\n\n # loop over all the complexes in the database\n first = True\n for comp in tqdm(self.index_complexes):\n fname, molname = comp[0], comp[1]\n\n # get the feature/target\n if self.mapfly:\n feature, target = self.map_one_molecule(\n fname, mol=molname)\n else:\n feature, target = self.load_one_molecule(\n fname, mol=molname)\n\n # create the norm isntances at the first passage\n if first:\n self.param_norm = {'features': [], 'targets': None}\n for ifeat in range(feature.shape[0]):\n self.param_norm['features'].append(NormParam())\n self.param_norm['targets'] = MinMaxParam()\n first = False\n\n # update the norm instances\n for ifeat, mat in enumerate(feature):\n self.param_norm['features'][ifeat].add(\n np.mean(mat), np.var(mat))\n self.param_norm['targets'].update(target)\n\n # process the std of the features and make array for fast access\n nfeat, ncomplex = len(\n self.param_norm['features']), len(self.index_complexes)\n self.feature_mean, self.feature_std = [], []\n for ifeat in range(nfeat):\n\n # process the std and check\n self.param_norm['features'][ifeat].process(ncomplex)\n if self.param_norm['features'][ifeat].std == 0:\n logger.info(' Final STD Null. Changed it to 1')\n self.param_norm['features'][ifeat].std = 1\n\n # store as array for fast access\n self.feature_mean.append(\n self.param_norm['features'][ifeat].mean)\n self.feature_std.append(\n self.param_norm['features'][ifeat].std)\n\n self.target_min = self.param_norm['targets'].min[0]\n self.target_max = self.param_norm['targets'].max[0]\n\n logger.info(f'{self.target_min}, {self.target_max}')", "def z_score_normalization(data):\n # import data\n\n features = data[:, 0:-1]\n target = data[:, -1]\n\n # First 10 rows\n print('Training Data:\\n\\n' + str(features))\n print('\\n')\n print('Targets:\\n\\n' + str(target))\n\n # Data standarization\n standardized_data = preprocessing.scale(features)\n\n # First 10 rows of new feature vector\n print('\\nNew feature vector:\\n')\n print(standardized_data[:10])\n print('\\n\\n')\n\n new_data = np.append(standardized_data, target.reshape(target.shape[0], -1), axis=1)\n print('\\nNew array\\n')\n print(new_data)\n\n return new_data", "def _scale(self, normalize, mat):\n mat = mat.astype(float)\n if normalize:\n mat = sklearn_norm(mat,\n feature_range=(0, 1),\n axis=0,\n copy=True)\n else:\n return mat\n return mat", "def normalize(values):\n return (values - np.mean(values)) / np.std(values)", "def normalisation_l2(x):\n res = np.zeros(x.shape)\n print(x.shape)\n for i in range(x.shape[0]):\n res[i] = x[i]/(np.linalg.norm(x[i],2)+1e-5)\n std = res.std()\n mean = res.mean()\n print(\"normalisation done\")\n return(mean,std,res)", "def specific_normalization(df):\n # Need to scale some vars. This is done using a StandardScaler from sklearn package\n scaler = StandardScaler()\n df['Pclass'] = df['Pclass'].astype('float64')\n df['Family'] = df['Family'].astype('float64')\n # .reshape(-1, 1) is mandatory otherwise an exception is thrown (as 'data has a single feature')\n df['Pclass'] = scaler.fit_transform(df['Pclass'].values.reshape(-1, 1))\n df['Family'] = scaler.fit_transform(df['Family'].values.reshape(-1, 1))\n\n return df", "def normalize(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_NORM) )\n\n ret_df = df.copy()\n t = ret_df[comm_keys]\n ret_df[comm_keys] = (t - t.mean()) / t.std()\n\n return ret_df", "def normalizeRows(x):\n\n ### YOUR CODE HERE\n norm2 = np.linalg.norm(x,2,axis = 1).reshape(x.shape[0],-1)\n x = x/norm2\n ### END YOUR CODE\n\n return x", "def standardize_data(f, train_mask):\n # standardize data\n f = f.todense()\n mu = f[train_mask == True, :].mean(axis=0)\n sigma = f[train_mask == True, :].std(axis=0)\n f = f[:, np.squeeze(np.array(sigma > 0))]\n mu = f[train_mask == True, :].mean(axis=0)\n sigma = f[train_mask == True, :].std(axis=0)\n f = (f - mu) / sigma\n return f", "def normalize_data(img):\n nor = np.linalg.norm(img, axis = 1)\n nor = np.reshape(nor, (len(img), 1))\n img = np.divide(img, nor)\n return img", "def test_normalization_scalar(features: List[List[float]]) -> List[List[float]]:\n normalized_features = []\n for feature in features:\n sum_squares = 0\n for i in feature:\n sum_squares += i * i\n sum_squares_root = np.sqrt(sum_squares)\n if sum_squares == 0:\n normalized_features.append(feature)\n else:\n normalized_features.append([x / sum_squares_root for x in feature])\n return normalized_features", "def norm_data(self):\n if (self.nrows, self.ncolumns) < self.data.shape:\n self.data = self.data[0:self.nrows, 0:self.ncolumns]\n if self.data.dtype != np.float64:\n self.data = self.data.astype(np.float64)\n self.meanval = self.data.mean()\n self.stdval = self.data.std()", "def normalize_datasets(train, test):\n columns = train.columns[:-1]\n train[columns] = (train[columns] - train[columns].mean()) / (train[columns].max() - train[columns].min())\n test[columns] = (test[columns] - test[columns].mean()) / (test[columns].max() - test[columns].min())\n\n return train, test", "def demean_normalize(one_d_array):\n\n temp_arr = one_d_array - np.nanmean(one_d_array)\n\n return temp_arr/np.nanstd(temp_arr)", "def normalise(self):\n total = 0\n for feat_set in self.values():\n for value in feat_set.values():\n total += value\n norm = 1/total\n for feat_set in self.values():\n for feat in feat_set:\n feat_set[feat] *= norm\n return self", "def normalize(image):\n image = image.astype(np.float32)\n mean = np.mean(image)\n std = np.std(image)\n if std > 0:\n ret = (image - mean) / std\n else:\n ret = image * 0.\n return ret", "def normalize(tensor: np.ndarray):\n if len(tensor.shape) < 4:\n tensor = np.expand_dims(tensor, axis=2)\n mean = np.array([tensor[..., chn, :].mean() for chn in range(tensor.shape[2])])\n std = np.array([tensor[..., chn, :].std() for chn in range(tensor.shape[2])])\n return (tensor - mean[:, np.newaxis]) / std[:, np.newaxis]", "def normalize_X(X):\n scaler = preprocessing.StandardScaler()\n X = scaler.fit_transform(X)\n return X", "def standardize(X):\n mu = X.mean(axis=0, keepdims=True)\n s = X.std(axis=0, keepdims=True)\n return (X-mu)/s", "def normalize(self):\n d = learning_utils.convert_data_to_2d(self._data)\n d = learning_utils.normalize_2d(d)\n self._data = learning_utils.convert_data_to_1d(d)", "def normalize(img, mean, std, data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n mean = paddle.to_tensor(mean, place=img.place)\n std = paddle.to_tensor(std, place=img.place)\n\n if _is_channel_first(data_format):\n mean = mean.reshape([-1, 1, 1])\n std = std.reshape([-1, 1, 1])\n\n return (img - mean) / std", "def normalize_train_data(train_data, hter=False):\n feats = train_data[:, :-1]\n labels = train_data[:, -1]\n if hter:\n labels_pw = labels\n else:\n labels_pw = labels / feats[:, 1]\n scaler = pp.StandardScaler()\n scaler.fit(feats)\n norm_feats = scaler.transform(feats)\n return np.concatenate((norm_feats, labels_pw[:, None]), axis=1), scaler", "def normalize(array):\n\n # calculate the mean of array\n array_mean = numpy.mean(array)\n if _DEBUG:\n print \"Mean of gr is:\"\n print array_mean\n\n # divide all elements by the mean\n norm_list = []\n for item in array:\n norm_list.append(item/array_mean - 1)\n\n # return the result\n return norm_list", "def normalize_train_data(self, data_vector, clf_type = \"generic\"):\n\t\tassert(clf_type in [\"generic\", \"specific\"])\n\n\t\tif clf_type == \"generic\":\n\t\t\tself.mean_per_dim_generic = []\n\t\t\tmean_per_dim = self.mean_per_dim_generic\n\t\t\tself.std_per_dim_generic = []\n\t\t\tstd_per_dim = self.std_per_dim_generic\n\t\telse:\n\t\t\tself.mean_per_dim_specific = []\n\t\t\tmean_per_dim = self.mean_per_dim_specific\n\t\t\tself.std_per_dim_specific = []\n\t\t\tstd_per_dim = self.std_per_dim_specific\n\n\t\tper_dim = zip(*data_vector)\n\n\t\tfor i in xrange(len(per_dim)):\n\t\t\n\t\t\tm = np.float64(sum (per_dim[i]) / float (len(per_dim[i])))\n\t\t\ts = np.std(per_dim[i])\n\t\t\tper_dim[i] -= m\n\t\t\tif s>0:\n\t\t\t\tper_dim[i] /= s\n\t\t\n\t\t\tmean_per_dim.append(m)\n\t\t\tstd_per_dim.append(s)\n\t\n\t\tdata_vector = zip(*per_dim)\n\t\tfor i in xrange(len(data_vector)):\n\t\t\tdata_vector[i] = list(data_vector[i])\n\n\t\treturn data_vector", "def centerMeanAndNormalize(df):\n return minMax(df - df.mean(axis=0))", "def _compute_normalization(self, normalize=True):\n if normalize:\n if self._img_norm is None:\n if np.sum(self._data) == 0:\n self._img_norm = 1\n else:\n self._img_norm = self._compute_raw_image_norm()\n\n if self._img_norm != 0.0 and np.isfinite(self._img_norm):\n self._data /= (self._img_norm * self._normalization_correction)\n self._normalization_status = 0\n else:\n self._normalization_status = 1\n self._img_norm = 1\n warnings.warn('Overflow encountered while computing '\n 'normalization constant. Normalization '\n 'constant will be set to 1.', NonNormalizable)\n else:\n self._normalization_status = 2", "def _normal_abnormal(self):\n mp_pool()\n print(\"Classification of WCE to normal vs abnormal --> DONE\")", "def normalize(feats_Xy, trace_normalize=True, data=None):\n feats, labels = zip(*feats_Xy)\n if data is None:\n train_f = feats[0]\n m = train_f.mean(axis=0)\n s = np.maximum(train_f.std(axis=0), 1e-8)\n else:\n m = data['train_mean']\n s = data['train_std']\n feats = [(f - m) / s for f in feats]\n if trace_normalize:\n if data is None:\n train_f = feats[0]\n tr = np.maximum(np.sqrt((train_f**2).sum(axis=1)).mean(), 1e-8)\n else:\n tr = data['trace']\n else:\n tr = None\n if trace_normalize:\n feats = [f / tr for f in feats]\n feats_Xy = tuple(zip(feats,labels))\n return feats_Xy + (m, s, tr)", "def _normalize(M):\r\n\r\n minVal = np.min(M)\r\n maxVal = np.max(M)\r\n\r\n Mn = M - minVal;\r\n\r\n if maxVal == minVal:\r\n return np.zeros(M.shape);\r\n else:\r\n return Mn / (maxVal-minVal)", "def standardize_data(Xtrain,Xtest):\n \n ### Import modulates\n import numpy as np\n\n Xmean = np.nanmean(Xtrain,axis=0)\n Xstd = np.nanstd(Xtrain,axis=0)\n Xtest = (Xtest - Xmean)/Xstd\n Xtrain = (Xtrain - Xmean)/Xstd\n \n stdVals = (Xmean,Xstd)\n stdVals = stdVals[:]\n \n return Xtrain,Xtest,stdVals", "def normalize(array, inplace=False):\n if inplace:\n array -= ds_mean\n array /= ds_std\n else:\n array = (array - ds_mean) / ds_std\n return array", "def normalize(self, attr_name): # DONE\n self.data[attr_name] = (self.data[attr_name] - self.data[attr_name].mean()) / self.data[attr_name].std()", "def normalize_matrix(mat):\n return (mat + abs(mat.min())) / (mat.max() - mat.min())", "def normalize(self, x, train=True):\n if train:\n mean, variance = tf.nn.moments(x, [0])\n assign_mean = self.mean.assign(mean)\n assign_variance = self.variance.assign(tf.mul(variance, self.keep_prob_prior))\n with tf.control_dependencies([assign_mean, assign_variance]):\n act_bn = tf.mul((x - mean), tf.rsqrt(variance + self.epsilon), name=\"act_bn\")\n return tf.add(tf.mul(act_bn, self.gamma), self.beta)\n \n else:\n mean = self.ewma_trainer.average(self.mean) or self.epsilon\n variance = self.ewma_trainer.average(self.variance) or self.epsilon\n local_beta = tf.identity(self.beta)\n local_gamma = tf.identity(self.gamma)\n act_bn = tf.mul((x-mean), tf.rsqrt(variance + self.epsilon), name=\"act1_bn\")\n return tf.add(tf.mul(act_bn, local_gamma), local_beta)", "def normalize_batch(batch, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\n # normalize using imagenet mean and std\n batch = batch.clone()\n mean = torch.tensor(mean).view(-1, 1, 1)\n std = torch.tensor(std).view(-1, 1, 1)\n # if your image data is scaled to scale 0-255, uncomment the line below\n # batch.div_(255.0)\n return (batch - mean) / std", "def normalize_transform():\n\n # Default for PyTorch's pre-trained models\n return transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])", "def normalize(feats, train_nid, dtype=np.float32):\n train_feats = feats[train_nid]\n scaler = preprocessing.StandardScaler()\n scaler.fit(train_feats)\n feats = scaler.transform(feats)\n return feats.astype(dtype)", "def normalize(X, norm=..., *, axis=..., copy=..., return_norm=...):\n ..." ]
[ "0.78171694", "0.77575696", "0.77201444", "0.77085596", "0.7680281", "0.7661087", "0.76458883", "0.762358", "0.74592733", "0.740253", "0.7402252", "0.7352509", "0.7277138", "0.7251356", "0.7241375", "0.7223175", "0.7204997", "0.7183116", "0.7146349", "0.7146349", "0.7142496", "0.7126844", "0.71085644", "0.71001536", "0.70972645", "0.7065237", "0.70624", "0.7059337", "0.7008126", "0.6997142", "0.6997142", "0.697558", "0.6960512", "0.694874", "0.6904074", "0.6876741", "0.6870595", "0.6834514", "0.68092155", "0.676642", "0.67636687", "0.67470455", "0.6718951", "0.6706209", "0.6674813", "0.66631716", "0.66407204", "0.6636947", "0.6620699", "0.661997", "0.6618102", "0.66180676", "0.6613409", "0.6613409", "0.65989745", "0.6575462", "0.6575462", "0.6561585", "0.655545", "0.65459865", "0.65381134", "0.6512271", "0.650054", "0.64978033", "0.6495863", "0.649552", "0.6489514", "0.64561886", "0.6451534", "0.6444781", "0.6444433", "0.6443603", "0.64426", "0.6433565", "0.6429618", "0.6428123", "0.6421351", "0.6417533", "0.64161783", "0.6415993", "0.6412119", "0.64111245", "0.64025074", "0.63986576", "0.63957596", "0.6393542", "0.63911676", "0.6390796", "0.63745165", "0.6373422", "0.6358712", "0.63553417", "0.63492006", "0.6340722", "0.6330186", "0.63263094", "0.63254887", "0.63223374", "0.6319683", "0.6307398" ]
0.7496143
8
May not fit the message content limit
def fancy_traceback(exc: Exception) -> str: text = "".join(traceback.format_exception(type(exc), exc, exc.__traceback__)) return f"```py\n{text[-4086:]}\n```"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_long_message(self):\n message = \"few characters\"\n message_displayed = truncate_message(message, limit=5)\n\n self.assertLessEqual(len(message_displayed), 5)\n self.assertEqual(message_displayed, \"fe...\")", "def limit_size(msg, max_size, trunc_symbol=\"...\"):\n if len(msg) > max_size:\n msg = msg[:max_size - len(trunc_symbol)] + trunc_symbol\n return msg", "def __ensure_error_message_restriction_compliance(full_message):\n message_size_limit = Constants.STATUS_ERROR_MSG_SIZE_LIMIT_IN_CHARACTERS\n formatted_message = re.sub(r\"\\s+\", \" \", str(full_message))\n return formatted_message[:message_size_limit-3] + '...' if len(formatted_message) > message_size_limit else formatted_message", "def test_small_message(self):\n message = \"few characters\"\n message_displayed = truncate_message(message, limit=50)\n\n self.assertLessEqual(len(message_displayed), 50)\n self.assertEqual(message_displayed, \"few characters\")", "def test_sufficientWidth(self):\n msg = \"barbazbo\"\n maxLen = len(\"PRIVMSG foo :{}\".format(msg)) + 2\n self.client.msg(\"foo\", msg, maxLen)\n self.assertEqual(self.client.lines, [\"PRIVMSG foo :{}\".format(msg)])\n self.client.lines = []\n self.client.msg(\"foo\", msg, maxLen - 1)\n self.assertEqual(2, len(self.client.lines))\n self.client.lines = []\n self.client.msg(\"foo\", msg, maxLen + 1)\n self.assertEqual(1, len(self.client.lines))", "def __message_content__(self) -> MessageContent:", "def step_impl_the_msg_to_is_set_too_long(context):\n context.bdd_helper.message_data[\"msg_to\"][0] = \"x\" * (constants.MAX_TO_LEN + 1)", "def valid_message_length(self):\n if self.message_len() > 0:\n if self.message_len() <= self.max_msg_length:\n return True\n return False", "def massage_addcontent(self) -> str:\n self.message_str += self.content if len(self.content) <= self.notification_message_max_len else self.content[\n :self.notification_message_max_len] + \"...\"", "def massage_addcontent(self) -> str:\n self.message_str += self.content if len(self.content) <= self.notification_message_max_len else self.content[\n :self.notification_message_max_len] + \"...\"", "def length(message_data):\n if not message_data:\n raise TypeError('message_data must not be None')\n\n max_length = config.max_message_length(message_data['topic'])\n return (len(message_data['message']) == 0 or max_length < len(message_data['message'])) and \\\n len(message_data['message']) <= 1600", "def large_text(self):\n pass", "def getMessageCount(self):\n return 9", "def test_body_len_without_reaching_the_limit(self):\n client = self.base_scenario(\n frang_config=\"http_body_len 10;\",\n requests=[(self.post_request, \"x\" * 10)],\n )\n self.check_response(\n client, status_code=\"200\", warning_msg=\"frang: HTTP body length exceeded for\"\n )", "def test_long_snippet(self):\n message = Message(clean_text=''.join('x' for _ in range(0, 200)))\n self.assertEqual(\n message.long_snippet,\n 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'\n 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'\n 'xxxxxxxx'\n )", "def split_message(message, max_length):\n ms = []\n while len(message) > max_length:\n ms.append(message[:max_length])\n message = message[max_length:]\n ms.append(message)\n return ms", "def test_body_len_without_reaching_the_limit(self):\n client = self.base_scenario(\n frang_config=\"http_body_len 10;\",\n requests=[\n f\"POST /1234 HTTP/1.1\\r\\nHost: localhost\\r\\nContent-Length: 10\\r\\n\\r\\n{'x' * 10}\"\n ],\n )\n self.check_response(\n client, status_code=\"200\", warning_msg=\"frang: HTTP body length exceeded for\"\n )", "def test_too_short_limit(self):\n message = \"few characters\"\n\n with self.assertRaises(AssertionError) as error:\n truncate_message(message, limit=2)\n\n self.assertEqual(str(error.exception), \"Limit too short\")", "def test_messenger_limit():\n all_messages_resp = requests.get(BASE_URL)\n all_messages = all_messages_resp.json()\n total_message_count = len(all_messages)\n message_limit = total_message_count // 2\n\n query_params = {\"limit\": message_limit}\n limit_resp = requests.get(BASE_URL, params=query_params)\n limited_messages = limit_resp.json()\n assert limit_resp.status_code == 200\n assert len(limited_messages) == message_limit", "def __len__(self):\n return len(self.content)", "def __len__(self):\n return len(self.content)", "def __len__(self):\n return len(self.content)", "def test_splitLongMessagesWithOverride(self):\n message = \"o\" * (irc.MAX_COMMAND_LENGTH - 2)\n self.assertLongMessageSplitting(message, 3, length=irc.MAX_COMMAND_LENGTH // 2)", "def email_rfc2822_compliance(message, max_line_length=900):\n returnmsg = \"\"\n while len(message) > 0:\n returnmsg = returnmsg + message[:max_line_length] + \"\\r\\n\"\n message = message[max_line_length:]\n\n return returnmsg", "def test_body_len_without_reaching_the_limit_zero_len(self):\n client = self.base_scenario(\n frang_config=\"http_body_len 10;\",\n requests=[self.post_request],\n disable_hshc=True,\n )\n self.check_response(\n client, status_code=\"200\", warning_msg=\"frang: HTTP body length exceeded for\"\n )", "def message_box_size_limit(self) -> ConfigNodePropertyInteger:\n return self._message_box_size_limit", "def test_splitLongMessagesWithDefault(self):\n message = \"o\" * (irc.MAX_COMMAND_LENGTH - 2)\n self.assertLongMessageSplitting(message, 2)", "def _get_octet_counted_message(self):\n if not self._length:\n pos = self._buffer.find(b' ')\n if pos != -1:\n try:\n self._length = int(self._buffer[0:pos])\n del self._buffer[0:pos+1]\n except ValueError:\n # Handle as random chunk of log noncompliant message that happens to start with a digit\n return self._get_non_transparent_framed_message()\n else:\n return\n\n if self._length > constant.MAX_MESSAGE_LENGTH:\n self._discard = self._length - constant.MAX_MESSAGE_LENGTH\n self._length = constant.MAX_MESSAGE_LENGTH\n\n if len(self._buffer) >= self._length:\n message = self._buffer[:self._length]\n del self._buffer[:self._length]\n self._length = 0\n return message\n else:\n return", "def test_has_correct_length(self) -> None:\n assert len(list(ccc.MessageDataset())) == 138737", "def _update_length(self, field, tag_id, value):\n # pylint: disable=unused-argument\n if tag_id not in {8, 9, 10}:\n self._message_length += len(field) + 1\n if self._message_length >= self._max_length:\n raise FIXLengthTooLongError(\n f'message too long: {self._message_length}')", "def SendBufferSize(self) -> int:", "def SendBufferSize(self) -> int:", "def test_field_without_reaching_the_limit(self):\n client = self.base_scenario(\n frang_config=\"http_field_len 300;\",\n requests=[f\"POST /1234 HTTP/1.1\\r\\nHost: localhost\\r\\nX-Long: {'1' * 200}\\r\\n\\r\\n\"],\n )\n self.check_response(\n client,\n status_code=\"200\",\n warning_msg=\"frang: HTTP (in-progress )?field length exceeded for\",\n )", "def handle_request(self):\n try:\n data = self.sock.recv(1024)\n except socket.error as e: # ...,e:\n if e == 10040:\n print('Message too long, ignoring.')\n return\n raise\n self.append_to_seq(parse_packet(data))", "def message_check(self, message):\n if(message == \"\"):\n return False\n\n if(len(message) > 256):\n return False\n\n return True", "def message_count(self):\n pass", "def test_invalidMaxLength(self):\n self.assertRaises(ValueError, self.client.msg, \"foo\", \"bar\", 0)\n self.assertRaises(ValueError, self.client.msg, \"foo\", \"bar\", 3)", "def get_sendable_message(text, max_length=400):\n unicode_max_length = max_length\n excess = ''\n\n while len(text.encode('utf-8')) > max_length:\n last_space = text.rfind(' ', 0, unicode_max_length)\n if last_space == -1:\n # No last space, just split where it is possible\n excess = text[unicode_max_length:] + excess\n text = text[:unicode_max_length]\n # Decrease max length for the unicode string\n unicode_max_length = unicode_max_length - 1\n else:\n # Split at the last best space found\n excess = text[last_space:] + excess\n text = text[:last_space]\n\n return text, excess.lstrip()", "def test_body_len_without_reaching_the_limit_zero_len(self):\n client = self.base_scenario(\n frang_config=\"http_body_len 10;\",\n requests=[f\"POST /1234 HTTP/1.1\\r\\nHost: localhost\\r\\nContent-Length: 0\\r\\n\\r\\n\"],\n )\n self.check_response(\n client, status_code=\"200\", warning_msg=\"frang: HTTP body length exceeded for\"\n )", "def test_lineBreakOnWordBoundaries(self):\n # Because MAX_COMMAND_LENGTH includes framing characters, this long\n # line is slightly longer than half the permissible message size.\n longline = \"o\" * (irc.MAX_COMMAND_LENGTH // 2)\n\n self.client.msg(\"foo\", longline + \" \" + longline)\n self.assertEqual(\n self.client.lines, [\"PRIVMSG foo :\" + longline, \"PRIVMSG foo :\" + longline]\n )", "def restrict_text(text):\n\n if len(text) > 400:\n\n return text[:400] + ' ...'\n \n return text", "def allowedLimit(self, number, msg=None):\n return allowed_limit(number, msg)", "def horde_message(self, message):", "def message_len(self):\n # expect F, use zero\n return len(self.message) if self.message else 0", "def test_maxMsgSize():\n nt.assert_equal(CisInterface.maxMsgSize(), CIS_MSG_MAX)", "def getLength(msg):\n return len(msg)", "def test_character_count(self):\r\n from chatterbot_corpus.corpus import DIALOG_MAXIMUM_CHARACTER_LENGTH\r\n\r\n corpora = corpus.load_corpus('chatterbot.corpus')\r\n\r\n for conversations in corpora:\r\n for conversation in conversations:\r\n for statement in conversation:\r\n if len(statement) > DIALOG_MAXIMUM_CHARACTER_LENGTH:\r\n self.fail(\r\n u'\"{}\" cannot be longer than {} characters'.format(\r\n statement,\r\n DIALOG_MAXIMUM_CHARACTER_LENGTH\r\n )\r\n )", "def checkSize(self, package):\n\n if len(package) > self.MAX_LENGTH:\n package = pickle.dumps(Failure(protocols.MessageSizeError()),2)\n return package", "def _read_amt(self, byte_count):\n full_msg = bytearray()\n while len(full_msg) < byte_count:\n block = self.request.recv(byte_count - len(full_msg))\n full_msg.extend(block)\n return full_msg", "def test_body_len(self):\n client = self.base_scenario(\n frang_config=\"http_body_len 10;\",\n requests=[(self.post_request, \"x\" * 20)],\n )\n self.check_response(\n client, status_code=\"403\", warning_msg=\"frang: HTTP body length exceeded for\"\n )", "def test_field_without_reaching_the_limit(self):\n client = self.base_scenario(\n frang_config=\"http_field_len 300;\",\n requests=[self.post_request + [(\"header\", \"x\" * 200)]],\n disable_hshc=True,\n )\n self.check_response(\n client,\n status_code=\"200\",\n warning_msg=\"frang: HTTP (in-progress )?field length exceeded for\",\n )", "def message_box_size_limit(self, message_box_size_limit: ConfigNodePropertyInteger):\n\n self._message_box_size_limit = message_box_size_limit", "def recv (self, max_size=None):\n if max_size and max_size < len(self.buffer):\n msg = self.buffer[0:max_size]\n self.buffer = self.buffer[max_size:]\n else:\n msg = self.buffer\n self.buffer = \"\"\n return msg", "def set_max_message_size(self, size: int = 1_073_741_824) -> None:\n self.set_db_conf(\"proto-max-bulk-len\", str(size))", "def message_count_limit(self) -> ConfigNodePropertyInteger:\n return self._message_count_limit", "def check_item(self, item: PoItem):\n for line in item.msgstr:\n if len(line) > MAX_LINE_LENGTH - 2: # 2 is for \"\"\n item.add_error(\n self.name,\n f\"Line too long ({len(line) + 2} > \"\n f\"{MAX_LINE_LENGTH}): {line}\",\n )", "def chunkify(msg):\n return [\"%s %s\" % (i, msg[i*158 : (i+1)*158]) for i in range(len(msg)/158 + 1)]", "async def cclimit(self, ctx, limit_amount: int = None):\n if limit_amount is None:\n return await ctx.send_help()\n if limit_amount < 0:\n return await ctx.send(\"You need to use a number larger than 0.\")\n await self.config.limit.set(limit_amount)\n await ctx.send(f\"Chatchart is now limited to {limit_amount} messages.\")", "def test_field_without_reaching_the_limit_2(self):\n client = self.base_scenario(\n frang_config=\"http_field_len 300;\",\n requests=[f\"POST /1234 HTTP/1.1\\r\\nHost: localhost\\r\\nX-Long: {'1' * 292}\\r\\n\\r\\n\"],\n )\n self.check_response(\n client,\n status_code=\"200\",\n warning_msg=\"frang: HTTP (in-progress )?field length exceeded for\",\n )", "def __len__(self):\n return len(self.sent)", "def message_body_html(self):\n ...", "def __len__(self):\n if not hasattr(self.limitedstream, \"limit\"):\n return 0\n return self.limitedstream.limit", "def message_body_messenger(self) -> str:\n ...", "def test_attachments_max_size(self):\n # receive an email\n data = mailgun_payload\n open(self.test_upload_txt, 'rb').read()\n open(self.test_upload_png, 'rb').read()\n data['attachment-1'] = open(self.test_upload_txt, 'rb')\n data['attachment-2'] = open(self.test_upload_png, 'rb')\n request = self.factory.post(self.url, data=data)\n\n # should except\n with self.assertRaises(AttachmentTooLargeError):\n self.parser.parse(request),", "def test_buf_too_large(self):\n connection = Connection(Context(SSLv23_METHOD), None)\n with pytest.raises(ValueError) as exc_info:\n connection.send(VeryLarge())\n exc_info.match(r\"Cannot send more than .+ bytes at once\")", "def test_warn_deprecated_limited_cap(self):\n occurrences = 100\n cap = 10\n\n printouts = set()\n messages = set()\n for i in range(occurrences):\n message = _hash_limit_string(\n \"this is a unique message: %d\", cap, (i,)\n )\n printouts.add(str(message))\n messages.add(message)\n\n eq_(len(printouts), occurrences)\n eq_(len(messages), cap)", "def split_message(text, max_length=640):\n res = []\n sub_message = ''\n sentences = split_into_sentences(text)\n for sentence in sentences:\n new_sub_message = sub_message + ' ' + sentence if sub_message else sentence\n if len(sentence) > max_length:\n res.extend(split_by_spaces(sentence, max_length))\n elif len(new_sub_message) > max_length:\n if len(sub_message) > 0:\n res.append(sub_message)\n sub_message = sentence\n else:\n sub_message = new_sub_message\n if len(sub_message) > 0:\n res.append(sub_message)\n return res", "def _is_frame_legal_size(data: bytes) -> bool:\n return len(data) < UDP_MAX_SIZE", "def test_no_body_max_line_length_option_ignored(self, custom_config):\n del custom_config['body']['max_line_length']\n check = CommitMessagesCheck(CheckConfig('whatever', 'error', **custom_config))\n result = check.run(\n {\n 'commits': [\n {\n 'stats': {'total': 2},\n 'message': 'xxxxx\\n\\n{}'.format('A' * 1000),\n 'sha': 'aa',\n 'url': '',\n }\n ]\n }\n )[0]\n assert result.success is True", "def test_max_cookie_length(self):\n storage = self.get_storage()\n response = self.get_response()\n\n for i in range(5):\n storage.add(str(i) * 900)\n unstored_messages = storage.update(response)\n\n cookie_storing = self.stored_messages_count(storage, response)\n self.assertEqual(cookie_storing, 4)\n\n self.assertEqual(len(unstored_messages), 1)\n self.assert_(unstored_messages[0].message == '0' * 900)", "def _cutBody(self, body_contents):\n body_contents = str(body_contents)\n\n # adding dots at the end of contents if contents length too large\n dots = ''\n if len(body_contents) > self._chatwork_message_max_len:\n dots = '\\n...'\n\n # Cut to chatwork_message_max_len.\n body_contents = body_contents[:self._chatwork_message_max_len]\n # Use /n, whitespace,、 and 。as cut border.\n trimmed_body_contents = \"\".join(re.split(\"([\\n 。 、]+)\", body_contents)[:-1])\n if trimmed_body_contents and dots:\n body_contents = trimmed_body_contents\n # Cut excessive newlines at the end\n body_contents = body_contents.strip('\\n')\n\n # If [/code] tag was trimmed, then add it\n if body_contents.find(\"[code]\") != -1 and body_contents.find(\"[/code]\") == -1:\n body_contents += \"[/code]\"\n\n return body_contents + dots", "def test_body_len(self):\n client = self.base_scenario(\n frang_config=\"http_body_len 10;\",\n requests=[\n f\"POST /1234 HTTP/1.1\\r\\nHost: localhost\\r\\nContent-Length: 20\\r\\n\\r\\n{'x' * 20}\"\n ],\n )\n self.check_response(\n client, status_code=\"403\", warning_msg=\"frang: HTTP body length exceeded for\"\n )", "def _check_description_count(self):\n\n for rec in self:\n if rec.description and len(rec.description)>50:\n raise except_orm(_('Warning!'),\n _(\"Description Lenght must be less than or equal to 50. \"))", "def limit_bytes(self):\n return self._limit_bytes", "def ReceiveBufferSize(self) -> int:", "def ReceiveBufferSize(self) -> int:", "def __len__(self):\n return len(self.contents)", "def __len__(self):\n return len(self.contents)", "def maxsize(self):\r\n return None", "def maximum():\n if len(a_variable.get()) > MAX_CHARACTERS:\n messagebox.showwarning(title=\"Max Characters Exceeded!\",\n message=\"Please enter no more than 25\\n\"\n \"characters, thanks.\")\n clear_box() # Clears the entry field", "def generate_message(self, mtu):\r\n raise GeneratorExit(\"No more message to send\")", "def message_length(self):\n return self._message_length", "def message_no_limit(request):\n \n # this is only available in development - should use our\n # parse.decorators.dev_only decorator instead of this\n if PRODUCTION_SERVER:\n raise Http404 \n\n # insert the token in the session and return a plaintext response\n # confirming the success of the operation\n if request.method == \"GET\":\n request.session[\"message_limit_off\"] = True\n return HttpResponse(\"Limit for sending messages has been turned off.\" +\\\n \"To turn it back on, please log out and log back in.\")\n \n # only accept GET methods \n return HttpResponse(\"Bad Request\")", "def test_client_email_max_length(self):\n request = Request.objects.get(id=1)\n max_length = request._meta.get_field('client_email').max_length\n self.assertEquals(max_length, 100)", "def manage_messages(_) -> int:\n return 1 << 13", "def manage_messages(_) -> int:\n return 1 << 13", "def set_message(self, message):\n if len(message) > globals.MAX_MESSEGE_LENGTH:\n mess = message[0:globals.MAX_MESSEGE_LENGTH-3]+\"...\"\n else:\n mess = message\n self._message.set_message(mess)", "def getSecretMessage(limit):\n\n\tsecret = None\n\twhile secret == None or len(secret) not in range(1, limit+1):\n\t\tsecret = raw_input(\"Enter the secret message (Max length %d): \" % limit)\n\t\tif len(secret) > limit:\n\t\t\tprint \"Invalid message: too long!\"\n\t\telif len(secret) < 1:\n\t\t\tprint \"Invalid message: empty input!\"\n\n\treturn secret", "def clean_bio(self):\n bio = BeautifulSoup(self.cleaned_data['bio'], \"html.parser\")\n char_num = len(bio.get_text().replace(' ', ''))\n print(char_num)\n if 0 < char_num < 10:\n raise forms.ValidationError('If you want to share bio, make it '\n '10 characters or longer')\n return self.cleaned_data['bio']", "def massage_addcontent(self) -> str:\n pass", "def test_field_without_reaching_the_limit_2(self):\n client = self.base_scenario(\n frang_config=\"http_field_len 300;\",\n requests=[self.post_request + [(\"header\", \"x\" * 294)]],\n disable_hshc=True,\n )\n self.check_response(\n client,\n status_code=\"200\",\n warning_msg=\"frang: HTTP (in-progress )?field length exceeded for\",\n )", "def _indefinite_content_bound(self) :\n return 2 * self.index() // 3", "def test_exceeded_limit(self):\n msg=self.sample_data(\"error_exceeded_limit.xml\")\n error = ErrorParser().process_all(msg)\n assert isinstance(error, PatronLoanLimitReached)\n eq_(u'Patron cannot loan more than 12 documents', error.message)", "def __len__(self) -> int:\n return len(self.contents)", "def test_send_message_not_fitting_parragraphs(bot_arg, monkeypatch):\n monkeypatch.setattr(telegram.constants, 'MAX_MESSAGE_LENGTH', 20)\n msg = 'hello\\n\\nworld, this is a message'\n send_message(msg, bot_arg, 1)\n assert bot_arg.msg_log[0] == 'hello'\n assert bot_arg.msg_log[1].strip() == 'world, this is a message'", "def __len__(self):\n return 30", "def handle_message(self, message):", "def _metadata_too_large(self):\n # currently the entire POST JSON request body is limited by default to 100kb\n return sys.getsizeof(self.metadata) > 10000", "def max_size(self):\n raise NotImplementedError()", "def test_stringLiteralTooLong(self):\n self.server._literalStringLimit = 4\n self.server.lineReceived(b\"001 LOGIN {5}\\r\\n\")\n\n self.assertEqual(self.transport.value(),\n b\"001 BAD Illegal syntax: Literal too long!\"\n b\" I accept at most 4 octets\\r\\n\")", "def consolidate_messages(self, msg):" ]
[ "0.6797087", "0.6786985", "0.66775584", "0.6666431", "0.6509975", "0.64643615", "0.64408034", "0.6314759", "0.62594056", "0.62594056", "0.62315077", "0.6188969", "0.6176606", "0.6108867", "0.6095591", "0.6054365", "0.60128444", "0.5996038", "0.5942682", "0.5922216", "0.5922216", "0.5922216", "0.5899321", "0.5884887", "0.5870159", "0.5868113", "0.58616406", "0.5842672", "0.58333516", "0.58075726", "0.58021533", "0.58021533", "0.5779573", "0.5747064", "0.57450575", "0.57332414", "0.5712532", "0.5697769", "0.56943274", "0.5692089", "0.5684182", "0.568034", "0.56708264", "0.5670455", "0.56606245", "0.56508183", "0.5639245", "0.5632587", "0.5630314", "0.5613817", "0.5605684", "0.55789375", "0.5574306", "0.5571198", "0.55704373", "0.55547875", "0.5545979", "0.55314887", "0.55303305", "0.5528338", "0.552526", "0.5516353", "0.55087686", "0.5505481", "0.5501444", "0.5499624", "0.5497128", "0.5494776", "0.54870516", "0.5476717", "0.54730314", "0.54659086", "0.54614747", "0.546046", "0.54388505", "0.54388505", "0.5433342", "0.5433342", "0.5424017", "0.5423158", "0.542073", "0.54167557", "0.54087687", "0.54077816", "0.5406087", "0.5406087", "0.540018", "0.5386171", "0.5377724", "0.53746295", "0.5374041", "0.5370697", "0.53647375", "0.5364229", "0.53569406", "0.5353787", "0.533639", "0.5335975", "0.53331184", "0.533288", "0.5322565" ]
0.0
-1
Solution for part one.
def solve_part_one(self): password = "" index = 0 while len(password) < 8: (s, found_index) = self.find_next_hash(index) password += s[5] index = found_index + 1 return password
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task4_1(self):\n\n pass", "def exo2():", "def task4(self):\n\n pass", "def substantiate():", "def apply(self) -> None:", "def apply(self) -> None:", "def support(self):", "def mezclar_bolsa(self):", "def solve(self):", "def solvate(self):\n\n pass", "def falcon():", "def exercise_b2_106():\r\n pass", "def CL(self):", "def task3(self):\n\n pass", "def result(self):", "def result(self):", "def apply(self):", "def use(self):", "def exercise_b2_113():\r\n pass", "def process(self):", "def process(self):", "def process(self):", "def task5(self):\n\n pass", "def solve(self):\n ...", "def part1(_input):\n\n return None", "def main(self):", "def exercise_b2_107():\r\n pass", "def exercise_b2_52():\r\n pass", "def exercise_b2_69():\r\n pass", "def exercise_b2_53():\r\n pass", "def part_2():\n pass", "def degibber(self):", "def preprocess(self):", "def problem_298():\n pass", "def common(self):", "def exercise_b2_82():\r\n pass", "def decide():", "def part_5a():\n\n raise NotImplementedError", "def exercise_b2_70():\r\n pass", "def task1(self):\n \n pass", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def firstFunction(self):", "def one(self):", "def regular(self):", "def exercise_b2_27():\r\n pass", "def get_sol(self):", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def solve(self):\n pass", "def solve(self):\n pass", "def _build(self):", "def _build(self):", "def part2(_input):\n\n return None", "def part_6():\n\n raise NotImplementedError", "def _build_impl(self):", "def transform(self):", "def _regr_basic():", "def _prepare(self):", "def _prepare(self):", "def solution(self) -> State:", "def implement(self):\n\t#@DEBUG remove comments", "def _optimise(self):\n pass", "def exercise_b2_98():\r\n pass", "def __call__(self) -> None:", "def exercise_b2_43():\r\n pass", "def MINET(self):", "def pick_up(self):", "def task2(self):\n\n pass", "def test_get_solution(self):\n pass", "def input(self):", "def project(self, X):", "def project(self, X):" ]
[ "0.67894316", "0.6702227", "0.64681834", "0.6225672", "0.62181926", "0.62181926", "0.6214918", "0.62091845", "0.61323327", "0.6128199", "0.6067991", "0.60675985", "0.6043714", "0.602853", "0.60285074", "0.60285074", "0.60218054", "0.6005739", "0.5983086", "0.5963465", "0.5963465", "0.5963465", "0.5958028", "0.5914259", "0.5907054", "0.58979076", "0.58809215", "0.5875311", "0.58694124", "0.58687425", "0.58665264", "0.58538747", "0.5838984", "0.58384234", "0.58328587", "0.58266276", "0.578585", "0.57736856", "0.577261", "0.57643825", "0.57618344", "0.57618344", "0.57618344", "0.57618344", "0.57618344", "0.57618344", "0.57618344", "0.57618344", "0.57618344", "0.57618344", "0.5734593", "0.57321817", "0.57245797", "0.5713552", "0.57086414", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.5693452", "0.56871223", "0.56871223", "0.5652655", "0.5652655", "0.56507707", "0.56500196", "0.56415653", "0.56257284", "0.5614916", "0.5610619", "0.5610619", "0.5598211", "0.5597677", "0.55940604", "0.5591433", "0.55902135", "0.5589901", "0.5582843", "0.55777407", "0.5571332", "0.55670774", "0.55647516", "0.55645126", "0.55645126" ]
0.0
-1
Solution for part two.
def solve_part_two(self): password = list("XXXXXXXX") index = 0 counter = 0 while counter < 8: (s, found_index) = self.find_next_hash(index) index = found_index + 1 offset = ord(s[5]) - ord("0") # Offset invalid or password character already set previously? if offset >= 8 or password[offset] != "X": continue password[offset] = s[6] counter += 1 return "".join(password)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exercise_b2_113():\r\n pass", "def exo2():", "def exercise_b2_82():\r\n pass", "def exercise_b2_106():\r\n pass", "def exercise_b2_52():\r\n pass", "def exercise_b2_69():\r\n pass", "def exercise_b2_53():\r\n pass", "def exercise_b2_27():\r\n pass", "def exercise_b2_107():\r\n pass", "def exercise_b2_98():\r\n pass", "def exercise_b2_70():\r\n pass", "def exercise_b2_95():\r\n pass", "def exercise_b2_26():\r\n pass", "def exercise_b2_43():\r\n pass", "def part2(_input):\n\n return None", "def exercise_b2_93():\r\n pass", "def two(self):", "def exercise_b2_39():\r\n pass", "def exercise_b2_56():\r\n pass", "def solve(self):", "def exercise_2b():\n\n return", "def task4_1(self):\n\n pass", "def exercise_b2_86():\r\n pass", "def substantiate():", "def part_2():\n pass", "def solution(s):", "def solve_part_two(self):\n return self.outputs[0] * self.outputs[1] * self.outputs[2]", "def part1(_input):\n\n return None", "def solvate(self):\n\n pass", "def problem_298():\n pass", "def get_sol(self):", "def solution(self) -> State:", "def solve(self):\n ...", "def task4(self):\n\n pass", "def decide():", "def task2(self):\n\n pass", "def apply(self) -> None:", "def apply(self) -> None:", "def mezclar_bolsa(self):", "def prove_I2() -> Proof:\n # Optional Task 6.7a", "def solve(self):\n pass", "def solve(self):\n pass", "def task3(self):\n\n pass", "def apply(self):", "def part2a_0():\n xs = exampleInput\n phi = Counter({('-BEGIN-', '-FEAT-'): 1.0, ('-FEAT-', 'Beautiful'): 1.0, ('-FEAT-', 'PREV:-BEGIN-'): 1.0, ('-FEAT-', 'NEXT:2'): 1.0, ('-FEAT-', '-CAPITALIZED-'): 1.0, ('-FEAT-', '-POST-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(0, '-BEGIN-', '-FEAT-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )\n\n phi = Counter({('-FEAT-', '-SIZE-'): 1.0, ('-SIZE-', 'PREV:Beautiful'): 1.0, ('-SIZE-', 'NEXT:bedroom'): 1.0, ('-SIZE-', '-PRE-CAPITALIZED-'): 1.0, ('-SIZE-', '2'): 1.0, ('-SIZE-', '-POST-CAPITALIZED-'): 0.0, ('-SIZE-', '-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(1, '-FEAT-', '-SIZE-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )\n \n phi = Counter({('-SIZE-', '-SIZE-'): 1.0, ('-SIZE-', 'PREV:2'): 1.0, ('-SIZE-', 'bedroom'): 1.0, ('-SIZE-', 'NEXT:-END-'): 1.0, ('-SIZE-', '-CAPITALIZED-'): 0.0, ('-SIZE-', '-PRE-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(2, '-SIZE-', '-SIZE-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )", "def GetPoint2(self):\n ...", "def GetPoint2(self):\n ...", "def solution(self):\n return [(\"the\", 1579644)] * 100", "def main():\n data = read_data()\n print('Part one solution: {}'.format(part_one(data)))\n print('Part two solution: {}'.format(part_two(data)))", "def test_part_2(arguments, distance, output):\n assert part_2.solution(arguments, distance) == output", "def part_2(puzzle_input: Tuple[Number] = p1) -> Number:\n for (noun, verb) in permutations(range(len(p1)), 2):\n # Create a fresh copy for each run\n program = list(p1)\n restore_program(memory_updates={1: noun, 2: verb}, memory=program)\n c = Computer(program)\n c.run_program()\n if c.read(0) == 19_690_720:\n return 100 * noun + verb\n raise ExecutionError(\"Could not satisfy requirement\")", "def CL(self):", "def result(self):", "def result(self):", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move", "def solution(self):\n return [(\"the\", 1561900)] * 100", "def g(self):\n return 2", "def task5(self):\n\n pass", "def degibber(self):", "def task1(self):\n \n pass", "def _Schoof_mod2(self):\n if not self.b:\n result = 0\n _log.debug(\"(%d, 2) #\" % result)\n else:\n linearfactors = UniVarPolynomial({card(self.basefield):self.basefield.one, 1:-self.basefield.one}, self.basefield)\n if GCD(self.cubic, linearfactors).degree() == 0:\n result = 1\n _log.debug(\"(%d, 2) ##\" % result)\n else:\n result = 0\n _log.debug(\"(%d, 2) ###\" % result)\n return (result, 2)", "def test_part2_example1(example1):\n assert aoc.part2(example1) == 2 + 2 + 966 + 50346", "def 3Sat(B):", "def falcon():", "def calculate_output(self):", "def test_get_solution(self):\n pass", "def elementCom(Paire1,Paire2) :\n elem_com=\" \"\n elementPaire1=\" \"\n elementPaire2=\" \"\n p1 = Paire1[1]\n p2 = Paire2[1]\n if p1 != p2 :\n for i in range (2):\n for j in range (2):\n if p1[i] == p2[j]:\n elem_com = p1[i] \n elementPaire1 = p1[1-i] \n elementPaire2 = p2[1-j] \n return elem_com, elementPaire1, elementPaire2", "def solveOneStep(self):\n ### Student code goes here\n return True", "def exercise_4(inputs): # DO NOT CHANGE THIS LINE\n output = inputs\n\n return output # DO NOT CHANGE THIS LINE", "def problem_1b():\n # BEGIN_YOUR_ANSWER (our solution is 1 lines of code, but don't worry if you deviate from this)\n return 4\n # END_YOUR_ANSWER", "def SecondPart():\n return countAllBagsIn(targetBag, organizedBags)", "def testBeliefs2sk(self):", "def test_T2():\n infile = \"cisd/T2.in\"\n assert(os.path.exists(infile))\n with open(infile) as f:\n lines = f.readlines()\n assert(len(lines) == 10)\n\n hl1 = HirataLine(lines[0])\n assert(set(hl1.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl1.summation_indices == '')\n assert(hl1.prefactors == [\"+ 1.0 \"])\n assert(hl1.postfactors == ['v ( p3 p4 h1 h2 )'])\n assert(str(hl1) == lines[0].replace('\\n', ''))\n cl1 = Cc4sLine(hl1)\n assert(set(cl1.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl1.summation_indices == \"\")\n assert(cl1.prefactors == [\"+ 1.0 \"])\n assert(cl1.postfactors == ['Vabij[\"cdij\"]'])\n assert(cl1.to_cpp() == ['( + 1.0 ) * Vabij[\"cdij\"];'])\n\n hl8 = HirataLine(lines[7])\n assert(set(hl8.free_indices.split()) == set(\"p3 p4 h1 h2\".split()))\n assert(hl8.summation_indices == ' h6 p5 ')\n assert(\n hl8.prefactors == [\n '+ 1.0 ',\n '- 1.0 * P( p3 p4 h2 h1 => p4 p3 h2 h1 ) ',\n '- 1.0 * P( p3 p4 h2 h1 => p3 p4 h1 h2 ) ',\n '+ 1.0 * P( p3 p4 h2 h1 => p4 p3 h1 h2 ) '\n ]\n )\n assert(\n hl8.postfactors ==\n ['Sum ( h6 p5 )', 't ( p5 p3 h6 h2 )', 'v ( h6 p4 h1 p5 )']\n )\n assert(str(hl8) == lines[7].replace('\\n', ''))\n cl8 = Cc4sLine(hl8)\n assert(set(cl8.free_indices.split()) == set([\"c\", \"d\", \"i\", \"j\"]))\n assert(cl8.summation_indices == \" n e \")\n assert(\n cl8.prefactors ==\n ['+ 1.0 ',\n '- 1.0 * P( c d j i => d c j i ) ',\n '- 1.0 * P( c d j i => c d i j ) ',\n '+ 1.0 * P( c d j i => d c i j ) ']\n )\n assert(cl8.postfactors == ['Tabij[\"ecnj\"]', 'Viajb[\"ndie\"]'])\n assert(\n cl8.to_cpp() == [\n '( + 1.0 ) * Tabij[\"ecnj\"] * Viajb[\"ndie\"];',\n '( - 1.0 ) * Tabij[\"ednj\"] * Viajb[\"ncie\"];',\n '( - 1.0 ) * Tabij[\"ecni\"] * Viajb[\"ndje\"];',\n '( + 1.0 ) * Tabij[\"edni\"] * Viajb[\"ncje\"];'\n ]\n )", "def genPrimerPairs_3Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 3\\' extension half-asstemers')\n\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[8:10]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_L10 = forwPrimer5_3[10:]\n print(f\"Last 10 Nucleotides of forward primer: {forwPrimer_L10}\")\n\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_L10[::-1])):\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n\n \"\"\"First 10 Nuc of rev primer must be identical to last 10 Nuc of forward Primer\"\"\"\n revPrimer5_3 = forwPrimer_L10 + revPrimer_L10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3", "def part_two(puzzle: Puzzle) -> typing.Optional[typing.Union[str, int]]:\n for number_one, number_two in itertools.combinations(puzzle[\"set\"], 2):\n if (2020 - number_one - number_two) in puzzle[\"set\"]:\n return (2020 - number_one - number_two) * number_one * number_two", "def idealOpAmp():", "def common(self):", "def answer():\n for k in range(2,3000):\n for j in range(k-1,0,-1):\n pj, pk = P(j), P(k)\n #print( j, k, pj, pk )\n if isPent(pk-pj):\n #print( j, k, pj, pk, pk+pj, isPent(pk+pj), pk-pj )\n if isPent(pk+pj) and isPent(pk-pj):\n return pk-pj", "def bloqueio_2(tab,jog):\r\n jog*=-1\r\n return vitoria_1(tab,jog)", "def test_sw2():\n B1 = 100\n B2 = 200\n h = 18\n t = 1\n H = h + 2 * t\n E1 = 20000\n E2 = 10000\n sections = ((B1, t, 0, E1), (B2, t, h + t, E2))\n EI, top, bot = bm.EI(sections, E1)\n EIc = E1 * B1 * (H ** 3 - h ** 3) / 12\n assert 0.99 < EI / EIc < 1.01", "def pulp_smash():", "def _optimise(self):\n pass", "def prob2(N1, N2, P1, P2):\n raise NotImplementedError(\"Problem 2 Incomplete\")", "def firstFunction(self):", "def REC_2s():\n return 2", "def solution(self):\n return [(\"simple 1\", 1.),\n (\"simple 2\", 1.),\n (\"simple 3\", 1.),\n (\"simple 4\", 1.),\n (\"simple 5\", 1.),\n (\"simple 10\", 1.),\n (\"simple 15\", 1.),\n (\"thai 1\", 1.),\n (\"thai 2\", 1.),\n (\"thai 3\", 1.),\n (\"thai 4\", 1.),\n (\"thai 5\", 1.),\n (\"thai 10\", 1.),\n (\"thai 15\", 1.),\n ]", "def part1a_2():\n mediumCRF = submission.LinearChainCRF( [\"-FEAT-\", \"-SIZE-\"],\n submission.binaryFeatureFunction,\n Counter({\n (\"-FEAT-\", \"-SIZE-\") : 0.8,\n (\"-SIZE-\", \"-FEAT-\") : 0.5,\n (\"-SIZE-\", \"-SIZE-\") : 1.,\n (\"-FEAT-\", \"-FEAT-\") : 1.,\n (\"-FEAT-\", \"Beautiful\") : 1.,\n (\"-SIZE-\", \"Beautiful\") : 0.5,\n (\"-FEAT-\", \"house\") : 1.,\n (\"-SIZE-\", \"house\") : 0.5,\n (\"-FEAT-\", \"2\") : 0.5,\n (\"-SIZE-\", \"2\") : 1.0,\n (\"-FEAT-\", \"bedroom\") : 0.5,\n (\"-SIZE-\", \"bedroom\") : 1.0,}) )\n moreExampleInputs = [\n \"This is a Beautiful 2 bedroom\".split(),\n \"2 bedroom Beautiful house\".split(),\n ]\n moreExampleTags = [\n ['-FEAT-', '-FEAT-', '-FEAT-', '-FEAT-', '-SIZE-', '-SIZE-'],\n ['-SIZE-', '-SIZE-', '-FEAT-', '-FEAT-']\n ]\n for xs, ys in zip(moreExampleInputs, moreExampleTags):\n ys_ = submission.computeViterbi(mediumCRF, xs)\n grader.requireIsEqual( ys, ys_ )", "def _regr_basic():", "def step2(self):\n\t\tif self.b[self.k - 1] == 'a':\n\t\t\tif self.ends(\"ational\"): self.r(\"ate\")\n\t\t\telif self.ends(\"tional\"): self.r(\"tion\")\n\t\telif self.b[self.k - 1] == 'c':\n\t\t\tif self.ends(\"enci\"):\t self.r(\"ence\")\n\t\t\telif self.ends(\"anci\"): self.r(\"ance\")\n\t\telif self.b[self.k - 1] == 'e':\n\t\t\tif self.ends(\"izer\"):\t self.r(\"ize\")\n\t\telif self.b[self.k - 1] == 'l':\n\t\t\tif self.ends(\"bli\"):\t self.r(\"ble\") # --DEPARTURE--\n\t\t\t# To match the published algorithm, replace this phrase with\n\t\t\t#\tif self.ends(\"abli\"):\t self.r(\"able\")\n\t\t\telif self.ends(\"alli\"): self.r(\"al\")\n\t\t\telif self.ends(\"entli\"): self.r(\"ent\")\n\t\t\telif self.ends(\"eli\"):\t self.r(\"e\")\n\t\t\telif self.ends(\"ousli\"): self.r(\"ous\")\n\t\telif self.b[self.k - 1] == 'o':\n\t\t\tif self.ends(\"ization\"): self.r(\"ize\")\n\t\t\telif self.ends(\"ation\"): self.r(\"ate\")\n\t\t\telif self.ends(\"ator\"): self.r(\"ate\")\n\t\telif self.b[self.k - 1] == 's':\n\t\t\tif self.ends(\"alism\"):\t self.r(\"al\")\n\t\t\telif self.ends(\"iveness\"): self.r(\"ive\")\n\t\t\telif self.ends(\"fulness\"): self.r(\"ful\")\n\t\t\telif self.ends(\"ousness\"): self.r(\"ous\")\n\t\telif self.b[self.k - 1] == 't':\n\t\t\tif self.ends(\"aliti\"):\t self.r(\"al\")\n\t\t\telif self.ends(\"iviti\"): self.r(\"ive\")\n\t\t\telif self.ends(\"biliti\"): self.r(\"ble\")\n\t\telif self.b[self.k - 1] == 'g': # --DEPARTURE--\n\t\t\tif self.ends(\"logi\"):\t self.r(\"log\")\n\t\t# To match the published algorithm, delete this phrase", "def merge_two_calls(self) -> None:", "def test_problem2():\n print('Testing problem2. The next line should be 18, 23536, 61, 5')\n print(problem2(4, 2), end=', ')\n print(problem2(105, 2), end=', ')\n print(problem2(2, 5), end=', ')\n print(problem2(2, 2))", "def regular(self):", "def part_5a():\n\n raise NotImplementedError", "def qst2(self):\n self.success = False", "def calculate(self):", "def genPrimerPairs_5Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 5\\' extension half-asstemers')\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[10:12]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_f10 = forwPrimer5_3[:10]\n print(f\"First 10 Nucleotides of forward primer: {forwPrimer_f10}\")\n\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_f10)):\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n\n revPrimer5_3 = revPrimer_f10 + forwPrimer_f10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3", "def f1_score(self):", "def part1a_0():\n xs = exampleInput\n ys = exampleTags\n ys_ = submission.computeViterbi(simpleCRF, xs)\n grader.requireIsEqual( ys, ys_ )", "def part1_2(puzzle_input):\n [initial_state_string, configurations] = puzzle_input.split('\\n\\n')\n initial_state = re.sub('initial state: ', '', initial_state_string)\n rules_arr = configurations.split('\\n')\n rules = [re.split(' => ', line) for line in rules_arr]\n rules = {t[0]: t[1] for t in rules}\n current_state = '..........' + initial_state + '...............................................................................................................................................'\n for i in range(100): # After 100th cycle, the only change is that there is a '#' that shifts right\n next_generation_string = \"\"\n for index, pot in enumerate(current_state):\n if index == 0:\n temp_string = '..' + current_state[:3]\n elif index == 1:\n temp_string = '.' + current_state[:4]\n elif index == len(current_state) - 2:\n temp_string = current_state[-4:] + '.'\n elif index == len(current_state) - 1:\n temp_string = current_state[-3:] + '..'\n else:\n temp_string = current_state[index-2:index+3]\n if temp_string in rules:\n next_generation_string += rules[temp_string]\n else:\n next_generation_string += pot\n current_state = next_generation_string\n\n # For part 1\n part1_sum = 0\n if i == 19:\n for index, pot in enumerate(current_state):\n if pot == '#':\n part1_sum += index - 10\n print(part1_sum)\n\n # Part 2\n part2_sum = 0\n for index, pot in enumerate(current_state):\n if pot == '#':\n part2_sum += index - 10 + 50000000000 - 100\n print(part2_sum)", "def project(self, X):", "def project(self, X):" ]
[ "0.6879843", "0.6871387", "0.6860534", "0.6821481", "0.6766025", "0.67570114", "0.6722933", "0.66447437", "0.6609891", "0.65726656", "0.6570282", "0.65633136", "0.6554844", "0.6482532", "0.6472665", "0.64377874", "0.6428559", "0.6427989", "0.63757336", "0.6357774", "0.63287115", "0.6279073", "0.627079", "0.62071586", "0.61873275", "0.60596913", "0.6043972", "0.5997983", "0.5917516", "0.5905285", "0.5898481", "0.5859372", "0.58418256", "0.58359516", "0.5818848", "0.58050734", "0.5722754", "0.5722754", "0.5656766", "0.56442", "0.56397057", "0.56397057", "0.5633216", "0.5618579", "0.55983824", "0.5581739", "0.5581739", "0.55573875", "0.55573714", "0.55428886", "0.5534347", "0.5507896", "0.54758894", "0.54758894", "0.5471618", "0.54697347", "0.54694766", "0.54670966", "0.5462047", "0.54576105", "0.54424226", "0.5432521", "0.5393374", "0.5391384", "0.53845376", "0.53747326", "0.53693295", "0.5367425", "0.53660166", "0.5345747", "0.5344487", "0.53332794", "0.53249484", "0.53249305", "0.53240865", "0.5321835", "0.53210294", "0.53168565", "0.5313229", "0.53077155", "0.5306414", "0.5305692", "0.52984434", "0.5296285", "0.52954805", "0.5290965", "0.5290016", "0.52835226", "0.5282542", "0.5281662", "0.52752644", "0.52554893", "0.52527046", "0.5248542", "0.5239233", "0.52322066", "0.5230776", "0.52274984", "0.522059", "0.5219202", "0.5219202" ]
0.0
-1
Updates this store's current state with incoming data from the network. data should be a mapping containing 'metacontacts', 'order', and 'info' structures (see comment at top of file)
def update_data(self, data): rebuild = False # This method needs to substitute some defaultdicts for the normal # dictionaries that come back from the server. # Metacontact information #if data['metacontacts'] mc_dict = data.get('metacontacts', {}) if not isinstance(mc_dict, dict): log.critical('invalid metacontacts dictionary') mc_dict = {} # Contact information like SMS numbers and email addresses. self.info = defaultdict(dict) si = self.info if 'info' in data: for (k, v) in data['info'].iteritems(): if isinstance(k, str): cmpk = k.decode('utf8') else: cmpk = k if not isinstance(cmpk, unicode): continue if cmpk.startswith('Meta') or any((cmpk.endswith('_' + prot) for prot in protocols.iterkeys())): if any(v.values()): si[k] = v for c, v in si.iteritems(): for attr in ('email', 'sms'): if attr in v: self.contact_info_changed(c, attr, v[attr]) self.metacontacts = MetaContactManager(self, mc_dict) if hasattr(self, 'new_sorter'): on_thread('sorter').call(self.new_sorter.removeAllContacts) rebuild = True # Manual ordering of groups try: self.order = deepcopy(data['order']) self.order['groups'] = list(oset(self.order['groups'])) contacts = self._filtered_contacts() self.order['contacts'] = defaultdict(list) self.order['contacts'].update(contacts) except Exception: log.critical('error receiving order') self._init_order() # note: loading tofrom data from the network is deprecated. this data # now goes out to disk. see save/load_local_data if 'tofrom' in data and isinstance(data['tofrom'], dict) and \ 'im' in data['tofrom'] and 'email' in data['tofrom']: self.dispatch.set_tofrom(deepcopy(data['tofrom'])) if rebuild: self.rebuild() self.update_order()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_state_notification(self, data):\n\n self.channel_data.update(data)\n\n # synchronize DataManager data with processed update & entity data\n self.sync_data_update_ha()", "def update(self, data):\n logging.info('update state', data)\n self._client.update_state(data)\n\n # Also locally update our state so things aren't out of sync\n self._state.update(data)", "def update(self, data):\n self.data.update(data)", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data[self._json_key]\n self._attributes = self.data_service.attributes[self._json_key]", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data\n self._attributes = self.data_service.attributes", "def update_data():\n pass", "def update(self, data):\n return self._data.update(data)", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data[self._json_key]", "def update(self):\n self.data_service.update()\n attr = self.data_service.attributes.get(self._json_key)\n self._state = attr[\"soc\"]", "def update(self,data):\r\n data = data.split(':',1)\r\n\r\n self.log('Signal','Received an update: %s...' % repr(data)[0:10],'update')\r\n \r\n #print \"*** local: \" + repr(data)\r\n \r\n if data[0] == 'Results':\r\n\r\n self.log('Signal','The local node returned these passwords: %s' % repr(data[1]),'update')\r\n\r\n self.addResult(data[1])\r\n elif data[0] == 'Bench':\r\n self.log('Signal','The local node returned these benches: %s' % repr(data[1]),'update')\r\n \r\n self.addBench(data[1])\r\n\r\n elif data[0] == 'Work':\r\n if data[1] == 'Done':\r\n self.finished += 1\r\n if self.finished >= len(self.nodes):\r\n self.runningWork = False\r\n self.log('Signal','Finished working','update')\r\n\r\n notification = 'Work:Done'\r\n self.notifyObservers(notification)", "def _update_data(self, data, update_original=False):\n self._data.update(dict((key, self._deserialize(key, value))\n for key, value in data.items()))\n\n if update_original:\n self._original_data = copy.deepcopy(self._data)", "def update(self, data):\n return data", "def update(self):\n self._data.update()\n\n self._state = self._data.get_value(self._type)", "def update(self, data):\n\n if not isinstance(data, (dict, list, set)):\n raise TypeError(\"Unsupported type\")\n\n if self.payload_undefined:\n\n if isinstance(data, dict):\n self._attr = {}\n elif isinstance(data, set):\n self._attr = set()\n elif isinstance(data, list):\n self._attr = []\n\n if not self.is_payload(type(data)):\n p_type = str(type(self._attr))\n d_type = str(type(data))\n msg = (\n f\"The type of the update data '{d_type}' doesn't match current payload's \"\n f\"type: '{p_type}'\"\n )\n raise TypeError(msg)\n\n if self.is_payload(dict):\n for k, v in data.items():\n if isinstance(v, dict):\n self._attr[k] = Pinnate(v)\n else:\n self._attr[k] = v\n\n elif self.is_payload(list):\n\n for v in data:\n if isinstance(v, dict):\n self._attr.append(Pinnate(v))\n else:\n self._attr.append(v)\n\n elif self.is_payload(set):\n\n for v in data:\n if isinstance(v, dict):\n self._attr.add(Pinnate(v))\n else:\n self._attr.add(v)", "def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data.get(self._json_key)\n self._attributes = self.data_service.attributes.get(self._json_key)\n self._unit_of_measurement = self.data_service.unit", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data.get(self._json_key)\n self._attributes = self.data_service.attributes.get(self._json_key)\n self._unit_of_measurement = self.data_service.unit", "def update(self, data):\n if not isinstance(data, list): data = [data] # otherwise no conversion is necessary\n master = Handler.ALL_VERS_DATA\n for record in data:\n #print(record)\n for k,v in iteritems(record): # ensure record contents aretyped appropriately\n try: record[k] = int(v)\n except ValueError: record[k] = v\n try: label = record[\"label\"] # verify this record has the required 'label' key\n except KeyError:\n raise ValueError(\"Must provide a valid label argument. Given:%s%s\"%(\\\n os.linesep, (\"%s \"%(os.linesep)).join(\n [\"%15s:%s\"%(k,v) for k,v in iteritems(kwargs)]\n )))\n try: masterLabel = master[label] # identify the already existing record that matches this to-be-updated record, if any\n except KeyError: # master hasn't been defined yet\n master[label] = record\n self._updated = True # a new record should also be saved\n continue\n for k,v in iteritems(record): # determine whether master needs to be updated\n try:\n if masterLabel[k] == v: continue # whether an entry in the record needs to be updated (doesn't match)\n except KeyError: pass # this condition means that k is a new key, so the record must be updated\n self._updated = True\n try: master[label].update(record) # index each record by its label\n except KeyError: break", "def update_from_dict(self, data: dict) -> \"Device\":\n if \"info\" in data and data[\"info\"]:\n self.info = Info.from_dict(data[\"info\"])\n\n if \"locations\" in data and data[\"locations\"]:\n locations = [Location.from_dict(location) for location in data[\"locations\"]]\n self.locations = locations\n\n return self", "def update(self, data: Mapping[str, np.ndarray]) -> Self:\n\n raise NotImplementedError", "def update_from_dict(self, data: dict):\n self.algo = data[\"algo\"]\n self.version = data[\"sbx\"]\n self.next_session = data[\"next\"]\n self.last_session = data[\"last\"]\n self.past_quality = unpack_int_list(data[\"pastq\"])\n\n # Revert to length of past_quality if reps are not set\n possible_rep = len(self.past_quality)\n self.actual_repetitions = data.get(\"reps\", possible_rep)\n\n # Other keys are used by algorithm\n self.algo_state = data.copy()\n for required_key in REQUIRED_FIELDS:\n del self.algo_state[required_key]", "def update_current_data(self, data):\n if self.current_data is not None:\n current_results = self.get_results()\n self._history.append((self.current_data, current_results))\n\n self.current_data = data", "def update(self, data):\n if self.mode == 'image':\n data = self.preprocess(data)\n self.main_object.set_data(data)\n\n vmin, vmax = self._parse_vrange(data)\n self.main_object.set_clim([vmin, vmax])\n\n if self.mode == 'histogram':\n raise NotImplementedError(\"Updating layer data is not in supported in 'histogram' mode. \")\n\n if self.mode == 'curve':\n x_data, y_data = self.preprocess(data)\n self.main_object.set_data(x_data, y_data)\n self.update_lims()\n\n if self.mode == 'loss':\n raise NotImplementedError(\"Updating layer data is not in supported in 'loss' mode. \")", "def _async_process_data(self):\n _LOGGER.debug(\"Update switch called\")\n\n data = self._api.get_device_data(self._dev_id)\n\n if not data:\n _LOGGER.error(\"Received no data for device %s\", self._name)\n self.async_write_ha_state()\n return\n\n if \"relay\" in data:\n self._is_on = data[\"relay\"]\n\n self.async_write_ha_state()", "def set_state(self, sync_data):\n # Send an echo update message immediately\n if JUPYTER_WIDGETS_ECHO:\n echo_state = {}\n for attr, value in sync_data.items():\n if attr in self.keys and self.trait_metadata(attr, 'echo_update', default=True):\n echo_state[attr] = value\n if echo_state:\n echo_state, echo_buffer_paths, echo_buffers = _remove_buffers(echo_state)\n msg = {\n 'method': 'echo_update',\n 'state': echo_state,\n 'buffer_paths': echo_buffer_paths,\n }\n self._send(msg, buffers=echo_buffers)\n\n # The order of these context managers is important. Properties must\n # be locked when the hold_trait_notification context manager is\n # released and notifications are fired.\n with self._lock_property(**sync_data), self.hold_trait_notifications():\n for name in sync_data:\n if name in self.keys:\n from_json = self.trait_metadata(name, 'from_json',\n self._trait_from_json)\n self.set_trait(name, from_json(sync_data[name], self))", "def update(self, data: bytes):\n self.send(data)", "def update(self, data):\n self.content = data", "def data_received(self, data):\n # self.debug(\"received data=%r\", binascii.hexlify(data))\n self.dispatcher.add_data(data)", "def Update(self, data):\n self.Write(data)", "def _update_attributes(self, data):\n self._set_avatar(data)\n self.boosts_since = parse_boosts_since(data)\n self.flags = parse_flags(data)\n self.nick = parse_nick(data)\n self.pending = parse_pending(data)\n self.role_ids = parse_role_ids(data)\n self.timed_out_until = parse_timed_out_until(data)", "def update_data(self):\n self._model.update()\n self.__refresh()", "def store_data(self, data):\n self.data.append(data)", "def data_in(self, data, **kwargs):\n action_type = data.get(\"t\", \"UNKNOWN\")\n\n if action_type == \"MESSAGE_CREATE\":\n # someone posted a message on Discord that the bot can see\n data = data[\"d\"]\n if data[\"author\"][\"id\"] == self.discord_id:\n # it's by the bot itself! disregard\n return\n message = data[\"content\"]\n channel_id = data[\"channel_id\"]\n keywords = {\"channel_id\": channel_id}\n if \"guild_id\" in data:\n # message received to a Discord channel\n keywords[\"type\"] = \"channel\"\n author = data[\"member\"][\"nick\"] or data[\"author\"][\"username\"]\n author_id = data[\"author\"][\"id\"]\n keywords[\"sender\"] = (author_id, author)\n keywords[\"guild_id\"] = data[\"guild_id\"]\n\n else:\n # message sent directly to the bot account via DM\n keywords[\"type\"] = \"direct\"\n author = data[\"author\"][\"username\"]\n author_id = data[\"author\"][\"id\"]\n keywords[\"sender\"] = (author_id, author)\n\n # pass the processed data to the server\n self.sessionhandler.data_in(self, bot_data_in=(message, keywords))\n\n elif action_type in (\"GUILD_CREATE\", \"GUILD_UPDATE\"):\n # we received the current status of a guild the bot is on; process relevant info\n data = data[\"d\"]\n keywords = {\"type\": \"guild\", \"guild_id\": data[\"id\"], \"guild_name\": data[\"name\"]}\n keywords[\"channels\"] = {\n chan[\"id\"]: {\"name\": chan[\"name\"], \"guild\": data[\"name\"]}\n for chan in data[\"channels\"]\n if chan[\"type\"] == 0\n }\n # send the possibly-updated guild and channel data to the server\n self.sessionhandler.data_in(self, bot_data_in=(\"\", keywords))\n\n elif \"DELETE\" in action_type:\n # deletes should possibly be handled separately to check for channel removal\n # for now, just ignore\n pass\n\n else:\n # send the data for any other action types on to the bot as-is for optional server-side handling\n keywords = {\"type\": action_type}\n keywords.update(data[\"d\"])\n self.sessionhandler.data_in(self, bot_data_in=(\"\", keywords))", "def set_data(self, data):\n self.closeContext()\n self.clear()\n self.clear_messages()\n\n self.data = data\n if data is not None:\n n_instances = len(data)\n n_attrs = len(data.domain.attributes)\n self.infoLabel.setText(\"%i instances on input\\n%i attributes\" % (\n n_instances, n_attrs))\n\n self.graph_variables = [var for var in data.domain.attributes\n if var.is_continuous]\n if len(self.graph_variables) < 1:\n self.Information.not_enough_attrs()\n else:\n groupvars = [var for var in data.domain.variables +\n data.domain.metas if var.is_discrete]\n\n if len(groupvars) > 0:\n self.cb_attr.addItems([str(var) for var in groupvars])\n self.group_var = str(groupvars[0])\n self.group_variables = groupvars\n self.update_group_var()\n else:\n self._setup_plot()\n\n self.selection = []\n self.openContext(data)\n self.select_data_instances()\n self.commit()", "def updateData(self):\n self.needsData.emit(self.property(\"number\"))", "def update(self, data):\n self.data = data\n # extract data\n subject = data.identifier\n self.id = _uri_to_oai(subject)\n self.modified = datetime.utcnow()\n self.deleted = False\n\n itemtype, subtype = _get_itemtype(data, subject)\n\n self.metadata = {}\n\n # fixed fields:\n self.metadata['rif_key'] = _uri_to_key(subject)\n self.metadata['rif_group'] = self.provider.groupDescription\n self.metadata['rif_originatingSource'] = self.provider.originatingSource\n self.metadata['rif_object'] = {'value': itemtype,\n 'type': subtype,\n #'dateModified': '',\n }\n\n if itemtype == 'collection':\n self.updateCollection(data, subject)\n elif itemtype == 'party':\n if subtype == 'person':\n self.updatePartyPerson(data, subject)\n else:\n self.updateParty(data, subject)\n elif itemtype == 'activity':\n self.updateActivity(data, subject)\n elif itemtype == 'service':\n self.updateService(data, subject)", "def on_data(self, data):\r\n if 'in_reply_to_status_id' in data:\r\n self.keep_or_update_tgid()\r\n self.insert_data(data)", "def reinit_data(self):\n self.if_name_map, \\\n self.if_alias_map, \\\n self.if_id_map, \\\n self.oid_name_map = Namespace.get_sync_d_from_all_namespace(mibs.init_sync_d_interface_tables, self.db_conn)\n\n self.update_data()", "def set_data(self, data):\n self._model.set_data(data)\n self.__refresh()", "def update_data(self, newData):\r\n self.AllData = newData", "def updateFromDict(self, data):\n for key, value in data.items():\n setattr(self, key, value)", "def update(self, new_gameStateData):\r\n pass", "def update_service_data(self, data, etag):\r\n self.service.name = data[\"service\"][\"name\"]\r\n self.service.etag = etag\r\n self.service.set_mirrors(data[\"service\"][\"filelocations\"])\r\n self.service.torrent = data[\"service\"].get(\"torrents\", \"\")\r\n self.service.save()", "def update(self):\n\n if len(self._data) > 0:\n if not self._switch._is_on:\n tmp = list(self._data.keys())\n\n random.shuffle(tmp)\n\n data = random.sample(tmp,1)[0]\n\n if (self._state == data):\n random.shuffle(tmp)\n random.shuffle(tmp)\n data = random.sample(tmp, 1)[0]\n\n self._state = self._data[data]\n \n self._now_key = data\n self._now_val = self._data[data]\n\n return\n\n self._api.load_file()\n\n self._data = self._api._data\n\n tmp = list(self._data.keys())\n\n random.shuffle(tmp)\n data = random.sample(tmp,1)[0]\n\n if (self._state == data):\n random.shuffle(tmp)\n random.shuffle(tmp)\n data = random.sample(tmp,1)[0]\n\n self._state = self._data[data]\n \n self._now_key = data\n self._now_val = self._data[data]", "def update_state(self, data):\n # Note: Odmetry message also provides covariance\n self.current_pose = data.pose.pose\n self.current_twist = data.twist.twist\n quat = (\n self.current_pose.orientation.x,\n self.current_pose.orientation.y,\n self.current_pose.orientation.z,\n self.current_pose.orientation.w)\n (roll, pitch, yaw) = euler_from_quaternion (quat)\n # rospy.loginfo(\"State from Odom: (%.5f, %.5f, %.5f) \" % (self.current_pose.position.x, self.current_pose.position.y, yaw))", "def _update_object(self, data_dict):\r\n pass", "def ingestState(self, data):\n # ignorant agents might not do anything with the state\n if not data:\n return\n\n args = struct.unpack('!ffffffB',data)\n \n self.gameState = GameState(args[0],args[1],args[2],args[3],args[4],args[5])\n self.gameState.parseFlags(args[6])", "def update(self, data: Mapping[str, np.ndarray]) -> Self:\n\n for metric in self.metrics:\n metric.update(data)\n\n return self", "def data(self, data):\n self.__data = data", "def update_gear_data(self, geardata):\n\n tempdata = self.data.copy()\n tempdata.update(geardata)\n self.__init__(geardata, self.modifications)", "def reload_data(self):\n super(UpdateMessage, self).reload_data()\n self._previous_avro_payload.reload_data()", "def _update_(self, data):\n # imports\n from intervals import Interval, LinkedIntervalSet\n \n # create a LinkedIntervalSet to store the time-based states for this\n # hero\n states = [Interval(dict((HER_KWD_MAP.get(k, k), d[1][k]) for k in d[1]), d[0]) for d in data]\n self.states = LinkedIntervalSet.from_starts(states)", "def _async_process_data(self):\n data = self._api.get_device_data(self._dev_id)\n\n if not data:\n _LOGGER.error(\"Received no data for device %s\", self._name)\n self.async_write_ha_state()\n return\n\n if \"relay\" in data:\n self._is_on = data[\"relay\"]\n\n self.async_write_ha_state()", "def update(self, new_gameStateData):\r\n self.data = new_gameStateData\r\n self._refresh()", "def handle_data(self, data):\r\n self.fed.append(data)", "def data(self, data):\n self._data = data", "def data(self, data):\n self._data = data", "def act_on_data(self, dct, base='entry/measurement/'):\n for d, k, v in walk_dict(dct):\n if isinstance(v, Link):\n d[k] = {'type':'Link',\n 'filename':v.filename,\n 'path':v.path,\n 'universal':v.universal} \n dct['status']='running' \n self.socket.send_pyobj(dct, protocol=2)", "def set_data(self, data):\n\n pass", "def set_data(self, new_data):\n self.data = new_data", "def set_data(self, data):\n self.data = data", "def update_data(self, **kwargs):\n self.source_data = self.get_dict()\n for c in self.callbacks[\"update_data\"]:\n c()", "def data(self, data):\n\n self._data = data", "def data(self, data):\n\n self._data = data", "def data(self, data):\n\n self._data = data", "def data(self, data):\n\n self._data = data", "def data(self, data):\n\n self._data = data", "def data(self, data):\n\n self._data = data", "def update(self):\n if self._data_provider_state is not None:\n self._state = self._data_provider_state()\n \n if self._data_provider_attributes is not None:\n self._attributes = self._data_provider_attributes()", "def update(self, data: dict):\n for key in data:\n model_att = getattr(self.__class__, key, None)\n value = data.get(key)\n\n setattr(self, key, type(model_att.type.python_type())(value))\n\n self.commit()\n return self", "def pass_data(self, data):\n self.data = data\n self.load_input_fields()", "def update_metadata(self):\n self.data[\"keywords\"] = self.repo.topics(self.data.get(\"keywords\", []))\n self.data[\"description\"] = self.data.get(\"description\") or self.repo.description\n self.data[\"codeRepository\"] = (\n self.data.get(\"codeRepository\") or self.repo.html_url\n )\n self.data[\"name\"] = self.data.get(\"name\") or self.repo.name\n self.data[\"issueTracker\"] = (\n self.data.get(\"issueTracker\") or self.repo.issues_url\n )\n self.data[\"license\"] = self.data.get(\"license\") or self.repo.license", "def store_data(self, store_data):\n self._store_data = store_data", "def unserialize(self, data):\n (self.handle, self.gramps_id, the_type, self.name, note_list,\n address_list, urls, self.change, tag_list, self.private) = data\n\n self.type = RepositoryType()\n self.type.unserialize(the_type)\n NoteBase.unserialize(self, note_list)\n AddressBase.unserialize(self, address_list)\n UrlBase.unserialize(self, urls)\n TagBase.unserialize(self, tag_list)\n return self", "def update_original_data(self):\n pass", "def store_data(self, data):\n if not self.light.hasAttr(self.custom_data_storage_attr_name):\n pm.addAttr(\n self.light,\n ln=self.custom_data_storage_attr_name,\n dt='string'\n )\n\n self.light.setAttr(self.custom_data_storage_attr_name, data)", "def _update_from_data(self, data):\n try:\n self.channelId = data[\"channelId\"]\n except (KeyError, TypeError):\n raise ValueError(\"Foretold data missing or invalid\")\n\n # If floatCdf is not available, we can just keep it as None\n try:\n self.floatCdf = data[\"previousAggregate\"][\"value\"][\"floatCdf\"]\n except (KeyError, TypeError):\n self.floatCdf = None", "def setData(self, data):\n self._data = pickle.dumps(data)", "def set_in_data(self, data: NodeData, port: Port):\n self.inputs[port.index] = copy(data)\n if self._check_inputs():\n try:\n self.compute()\n except Exception as e:\n traceback.print_exc()\n else:\n self._statusLabel.setText('×')\n for i in self.outputs:\n self.outputs[i] = None\n for i in range(self.num_ports[PortType.output]):\n self.data_updated.emit(i)", "def store_data(self, data):\n self.data = data\n # HERE\n the_main_dict = {**self.user_data(), **self.entities_data(), **self.extract_relevant(), **self.locate(),\n **self.calculate_days(), **self.clean_user_desc()}\n # The below is the reason that the table creation must be written in alphabetical order. This is simpler than\n # writing the complex joins that would otherwise be needed.\n my_keys_list = sorted(the_main_dict.keys())\n my_items = list(map(lambda x: str(the_main_dict[x]).replace(\"'\", ''), my_keys_list))\n try:\n # Unpacks the items into an insert statement for the SQLite table\n self.conn.execute(\"INSERT INTO {0} VALUES('{1}','{2}','{3}','{4}','{5}','{6}','{7}','{8}','{9}',\"\n \"'{10}','{11}','{12}','{13}','{14}','{15}','{16}','{17}','{18}','{19}','{20}',\"\n \"'{21}','{22}','{23}','{24}','{25}','{26}','{27}','{28}')\".format(self.table, *my_items))\n self.limiting += 1\n return 0\n except sqlite3.IntegrityError:\n return 1", "def _handle_coordinator_update(self) -> None:\n self._update_data()\n self.async_write_ha_state()", "def update_data(self):\n for sai_id_key in self.if_id_map:\n namespace, sai_id = mibs.split_sai_id_key(sai_id_key)\n if_idx = mibs.get_index_from_str(self.if_id_map[sai_id_key])\n counter_table = self.namespace_db_map[namespace].get_all(mibs.COUNTERS_DB, \\\n mibs.counter_table(sai_id))\n if counter_table is None:\n counter_table = {}\n self.if_counters[if_idx] = counter_table\n\n\n self.lag_name_if_name_map, \\\n self.if_name_lag_name_map, \\\n self.oid_lag_name_map, _, _ = Namespace.get_sync_d_from_all_namespace(mibs.init_sync_d_lag_tables, self.db_conn)\n\n self.if_range = sorted(list(self.oid_name_map.keys()) + list(self.oid_lag_name_map.keys()))\n self.if_range = [(i,) for i in self.if_range]", "def update(self, new_data):\n if type(new_data) is not dict:\n raise TypeError(\"Input parameter must be a dict\")\n # Update parameters\n self._type = new_data.get(\"_type\", self._type)\n self.time_units = new_data.get(\"time_units\", self.time_units)\n self.len_units = new_data.get(\"len_units\", self.len_units)\n self.parameters = new_data.get(\"parameters\", self.parameters)\n # Update drawdown\n self.drawdown.update(new_data.get(\"drawdown\", self.drawdown.to_dict()))\n # Update data\n if \"data\" in new_data:\n n = len(new_data[\"data\"])\n if n > 0:\n self.reset_data()\n for i in range(n):\n self.add_data(0, 0)\n self.data[i].update(new_data[\"data\"][i])\n # End Function", "def data_received(self, data):\n print('S> data received ['+str(len(data))+']: '+str(data))\n self.deserializer.append(data)\n if self.deserializer.ready():\n msg = self.deserializer.deserialize()\n status = TSDBStatus.OK # until proven otherwise.\n response = TSDBOp_Return(status, None) # until proven otherwise.\n try:\n op = TSDBOp.from_json(msg)\n except TypeError as e:\n print(e)\n response = TSDBOp_Return(TSDBStatus.INVALID_OPERATION, None)\n if status is TSDBStatus.OK:\n if isinstance(op, TSDBOp_InsertTS):\n response = self._insert_ts(op)\n elif isinstance(op, TSDBOp_UpsertMeta):\n response = self._upsert_meta(op)\n elif isinstance(op, TSDBOp_Select):\n response = self._select(op)\n elif isinstance(op, TSDBOp_AugmentedSelect):\n response = self._augmented_select(op)\n elif isinstance(op, TSDBOp_AddTrigger):\n response = self._add_trigger(op)\n elif isinstance(op, TSDBOp_RemoveTrigger):\n response = self._remove_trigger(op)\n elif isinstance(op, TSDBOp_DeleteTS):\n print('running delete')\n response = self._delete_ts(op)\n else:\n response = TSDBOp_Return(TSDBStatus.UNKNOWN_ERROR,\n op['op'])\n\n self.conn.write(serialize(response.to_json()))\n # print(\"close\")\n self.conn.close()", "def setData(self, data):\n self.data = data\n dagPath, components = self.__getGeometryComponents()\n self.setInfluenceWeights(dagPath, components)\n self.setBlendWeights(dagPath, components)\n\n for attr in ['skinningMethod', 'normalizeWeights']:\n cmds.setAttr('%s.%s' % (self.node, attr), self.data[attr])", "def _collect_data(self) -> None:\n self.set_websocket_data()\n self.set_stratum_data()\n self.set_cache_data()\n self.collect_peer_connection_metrics()\n self.set_tx_storage_data()", "def _update_data(self):\n for attribute in [\"flow_rate\"]:\n self._data[attribute] = self._connection.measure", "def update_data(self, extra_data):\n self._data.update(extra_data)\n return self", "def set_data(self, data):\n\n self._data = data", "def set_data(self, data):\n\n self._data = data", "def set_data(self, data):\n\n self._data = data", "def _loadData(self, data):\n self._data = data\n self.id = utils.cast(int, data.attrib.get('id'))\n self.accountID = utils.cast(int, data.attrib.get('accountID'))\n self.serverId = utils.cast(int, data.attrib.get('serverId'))\n self.machineIdentifier = data.attrib.get('machineIdentifier')\n self.name = data.attrib.get('name')\n self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))\n self.numLibraries = utils.cast(int, data.attrib.get('numLibraries'))\n self.allLibraries = utils.cast(bool, data.attrib.get('allLibraries'))\n self.owned = utils.cast(bool, data.attrib.get('owned'))\n self.pending = utils.cast(bool, data.attrib.get('pending'))", "def add_data(self, data):\n self.data = data", "def __init__(self, data={}):\n self._update_(data)", "def socket_cache_set(self, context,data):\r\n global data_cache\r\n s_id = self.get_socket_hash(self)\r\n s_ng = self.id_data.name\r\n if s_ng not in data_cache:\r\n data_cache[s_ng] = {}\r\n data_cache[s_ng][s_id] = data\r\n output_nodes = set()\r\n if self.is_linked and self.is_output:\r\n for node_output_link in self.links:\r\n output_nodes.add(node_output_link.to_node)\r\n for node in output_nodes:\r\n node.execute_node(context)", "def update_has_data(self):\n self.main()" ]
[ "0.6476063", "0.62110347", "0.61491877", "0.6079043", "0.6079043", "0.6079043", "0.6079043", "0.60150313", "0.59856397", "0.592851", "0.5848428", "0.58295083", "0.58125436", "0.579534", "0.5732395", "0.5716034", "0.56988144", "0.5688092", "0.5683766", "0.5636451", "0.5636451", "0.56280696", "0.5590911", "0.55697817", "0.5553052", "0.554236", "0.5535166", "0.55267394", "0.5511158", "0.55028915", "0.5444237", "0.5414257", "0.5391385", "0.5388974", "0.5359936", "0.53189975", "0.5318993", "0.5304059", "0.5302737", "0.52967423", "0.527881", "0.52472305", "0.5244203", "0.5241769", "0.52333874", "0.52222645", "0.5218426", "0.5211186", "0.5179061", "0.51678723", "0.51603585", "0.5158761", "0.5134056", "0.5133415", "0.51273626", "0.5124881", "0.51236576", "0.5114793", "0.5109157", "0.5105744", "0.5105744", "0.51053", "0.50967014", "0.509323", "0.50922596", "0.50914997", "0.5088871", "0.5088871", "0.5088871", "0.5088871", "0.5088871", "0.5088871", "0.50856566", "0.5082002", "0.50776327", "0.507391", "0.5054867", "0.503335", "0.50314844", "0.5029052", "0.5019902", "0.5019052", "0.50114226", "0.5006455", "0.50048053", "0.49971813", "0.49922177", "0.49913096", "0.49904928", "0.4990346", "0.4984107", "0.49814802", "0.49703315", "0.49703315", "0.49703315", "0.49663815", "0.49654123", "0.49525827", "0.49484774", "0.49463418" ]
0.70325893
0
translates an rgb tuple of int to a tkinter friendly color code
def _from_rgb(self, rgb): return "#%02x%02x%02x" % rgb
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def int2color_tuple(x):\n red_val = int(1000 * x % 255)\n green_val = int(10000 * x % 255)\n blue_val = int(100000 * x % 255)\n return red_val, green_val, blue_val", "def matplotlib_rgb_color(rgb_color):\r\n return tuple([i / 255. for i in rgb_color])", "def translate_rgb(rgb_tuple):\n mapped_rgb_value = []\n for component in rgb_tuple:\n mapped_rgb_value.append(translate(component, 0, 1, 0, 255))\n\n return tuple(mapped_rgb_value)", "def rgb_to_color(*rgb):\n if(len(rgb)==1):\n r,g,b = rgb[0]\n else:\n r,g,b = rgb\n return \"#%02x%02x%02x\" % (r,g,b)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def color_rgb(self):\n return tuple(int(self.color[i : i + 2], 16) for i in (0, 2, 4))", "def color_map(val):\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def _rgb(color):\n warnings.warn('Use color.rgba instead of color._rgb', FutureWarning, stacklevel=2)\n return (int(color[-6:-4], 16), int(color[-4:-2], 16), int(color[-2:], 16))", "def RGB2HTMLColor(rgb_tuple):\n hexcolor = '#%02x%02x%02x' % rgb_tuple\n # that's it! '%02x' means zero-padded, 2-digit hex values\n return hexcolor", "def label_rgb(colors):\n return ('rgb(%s, %s, %s)' % (colors[0], colors[1], colors[2]))", "def int2color(x):\n # r = int(1000 * x % 255)\n # g = int(10000 * x % 255)\n # b = int(100000 * x % 255)\n x = 0 if x == 0 else int(1/x)\n b = x & 0xff\n g = (x >> 8) & 0xff\n r = (x >> 16) & 0xff\n return [r, g, b]", "def _from_rgb(rgb):\n return \"#%02x%02x%02x\" % rgb", "def hex_to_rgb(self,value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))", "def color(c):\n\n if isinstance(c, tuple) and len(c) == 4:\n return c\n\n if c is None:\n return c\n\n if isinstance(c, basestring):\n if c[0] == '#':\n c = c[1:]\n\n if len(c) == 6:\n r = int(c[0]+c[1], 16)\n g = int(c[2]+c[3], 16)\n b = int(c[4]+c[5], 16)\n a = 255\n elif len(c) == 8:\n r = int(c[0]+c[1], 16)\n g = int(c[2]+c[3], 16)\n b = int(c[4]+c[5], 16)\n a = int(c[6]+c[7], 16)\n elif len(c) == 3:\n r = int(c[0], 16) * 0x11\n g = int(c[1], 16) * 0x11\n b = int(c[2], 16) * 0x11\n a = 255\n elif len(c) == 4:\n r = int(c[0], 16) * 0x11\n g = int(c[1], 16) * 0x11\n b = int(c[2], 16) * 0x11\n a = int(c[3], 16) * 0x11\n else:\n raise Exception(\"Color string must be 3, 4, 6, or 8 hex digits long.\")\n\n return (r, g, b, a)\n\n raise Exception(\"Not a color: %r\" % (c,))", "def RGBToHTMLColor(rgb_tuple):\n\thexcolor = '#%02x%02x%02x' % rgb_tuple\n\t# that's it! '%02x' means zero-padded, 2-digit hex values", "def parseColor(c):\n if c in baseColors:\n return baseColors[c]\n if len(c) == 6:\n return tuple(map(lambda x: int(x, 16), (c[:2], c[2:4], c[4:])))\n if len(c) == 3:\n return tuple(map(lambda x: 16*int(x, 16), c))\n raise ValueError(\"Can't find color '{}'\".format(c))", "def RGBToHTMLColor(rgb_tuple):\n\tr,g,b=rgb_tuple\n\tr=int(r/255.0*16)\n\tg=int(g/255.0*16)\n\tb=int(b/255.0*16)\n\tif r == 16:\n\t\tr = 15\n\tif g == 16:\n\t\tg = 15\n\tif b == 16:\n\t\tb = 15\n\thexcolor = '#%x%x%x' % (r,g,b)\n\t# that's it! '%02x' means zero-padded, 2-digit hex values\n\treturn hexcolor", "def _to_color(indx, base):\n base2 = base * base\n b = 2 - indx / base2\n r = 2 - (indx % base2) / base\n g = 2 - (indx % base2) % base\n return b * 127, r * 127, g * 127", "def hextorgb(value):\n colrgb = tuple(int(value[i:i+2], 16) for i in (0, 2, 4))\n print('RGB =', colrgb)\n return colrgb", "def RGBToHTMLColor(rgb_tuple):\n hexcolor = '#%02x%02x%02x' % rgb_tuple\n # that's it! '%02x' means zero-padded, 2-digit hex values\n return hexcolor", "def translate_to_tkcolor(color):\n return rgb_to_hex(translate_rgb(color))", "def color_rgb(r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)", "def color_rgb(r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)", "def color_rgb(r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)", "def websafe(color_tuple):\n return tuple(tbl[c] for c in color_tuple)", "def IntToColor(number):\n color = COLORS_INDEX.get(number)\n return color if color else 'default'", "def rgbcolor(h, f):\n # q = 1 - f\n # t = f\n if h == 0:\n return v, f, p\n elif h == 1:\n return 1 - f, v, p\n elif h == 2:\n return p, v, f\n elif h == 3:\n return p, 1 - f, v\n elif h == 4:\n return f, p, v\n elif h == 5:\n return v, p, 1 - f", "def _indexTupleToColor(self, index):\n coltuple = self.graphColors[index]\n color = wx.Colour()\n color.Set(coltuple[0] * 255, coltuple[1] * 255, coltuple[2] * 255)\n return color", "def get_rgb(self, r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)", "def from_rgb(self, rgb):\n return \"#%02x%02x%02x\" % rgb", "def led(color: Tuple[int, int, int], /) -> None:", "def color_tuple_bgr_to_plt(color_tuple):\n return (color_tuple[2]/255, color_tuple[1]/255, color_tuple[0]/255)", "def get_hexcode(rgb):\n return \"#\" + \"\".join(f\"{hex(int(x))[2:]:0>2}\" for x in rgb)", "def rgb_color( color ):\n color = color.strip()\n if color[0] == '#':\n color=color[1:]\n if len(color) != 6:\n raise ValueError, \"#%s incorrect format use #rrggbb\" % color\n r, g, b = color[:2], color[2:4], color[4:]\n r, g, b = [int(n, 16) for n in (r, g, b)]\n return (r, g, b)", "def _get_color(self, r, g, b):\n clr = (r, g, b)\n return clr", "def colors(self):\n\t\treturn [(0, 30, 255),(0, 30, 120)]", "def color_code_to_rbg_tuple(color_code):\n code_r = color_code[1:3]\n code_g = color_code[3:5]\n code_b = color_code[5:7]\n r = int(code_r, 16)\n g = int(code_g, 16)\n b = int(code_b, 16)\n return r, g, b", "def colour_to_rgb_tuple(cls, col_str):\n hex_6 = cls.RE_COLOUR_HEX_6.search(col_str)\n if hex_6:\n #Simply converts hex directly to dec \n return tuple(int(c,16) for c in hex_6.groups())\n hex_3 = cls.RE_COLOUR_HEX_3.search(col_str)\n if hex_3:\n #First must convert single value range 0-15 to range 0-255 \n return tuple(int(int(c,16)/15.0*255.0) for c in hex_3.groups())\n rgb = cls.RE_COLOUR_RGB.search(col_str)\n if rgb:\n return tuple(int(c) for c in rgb.groups()) #Direct output of tuple from regex!\n return None #Otherwise canny do i' captain", "def convert_to_RGB_255(colors):\n return (colors[0]*255.0, colors[1]*255.0, colors[2]*255.0)", "def intToRGB(RGBInt):\n b = RGBInt & 255\n r = (RGBInt >> 8) & 255\n g = (RGBInt >> 16) & 255\n return (r,g,b)", "def get_color(self, value):\n value = min(max(0,value), 1) * 510\n\n if value < 255:\n redValue = 255\n greenValue = math.sqrt(value) * 16\n greenValue = int(greenValue)\n else:\n greenValue = 255\n value = value - 255\n redValue = 255 - (value * value / 255)\n redValue = int(redValue)\n return '#' + f\"{redValue:0{2}x}\" + f\"{greenValue:0{2}x}\" + '00'", "def rgbTuple(rgb):\n return struct.unpack('BBB',rgb)", "def get_color_in_rgb_decimal():\n\n # Grabbing custom colormap from matplotlib\n a = cm.get_cmap('cool', 32)\n b = cm.get_cmap('spring', 32)\n c = cm.get_cmap('autumn_r', 64)\n d = cm.get_cmap('bwr_r', 192)\n e = cm.get_cmap('Greens', 192)\n\n # Adding the colormaps into one stack to have a more comprehensive color spectrum \n newcolors = np.vstack((a(np.linspace(0, 1, 32)), \n b(np.linspace(0, 1, 32)), \n c(np.linspace(0, 1, 64)),\n d(np.linspace(0, 0.5, 192)),\n e(np.linspace(0, 1, 192)),\n ))\n return newcolors", "def convColor(colorString):\n if len(colorString) != 6:\n return None\n r, g, b = colorString[:2], colorString[2:4], colorString[4:]\n r, g, b = [int(n, 16) for n in (r, g, b)]\n return (r, g, b)", "def _to_code(rgb):\n code = 0\n if 'r' in rgb:\n code += 1\n\n if 'g' in rgb:\n code += 2\n\n if 'b' in rgb:\n code += 4\n return code", "def fromInts(r, g, b):\n return IColor(r/255.,g/255.,b/255.)", "def _proc_color(self, tokens):\n\n keys = tokens.keys()\n if \"red\" in keys: # RGB(A)\n rr, gg, bb = tokens[\"red\"], tokens[\"green\"], tokens[\"blue\"]\n hex2int = lambda h: int(h, 16)\n if \"alpha\" in keys:\n a = tokens[\"alpha\"]\n c = str((hex2int(rr), hex2int(gg), hex2int(bb), hex2int(a)))\n else:\n c = str((hex2int(rr), hex2int(gg), hex2int(bb)))\n elif \"hue\" in keys: # HSV\n r, g, b = hsv_to_rgb(tokens[\"hue\"],\n tokens[\"saturation\"],\n tokens[\"value\"])\n c = str((int(r*255), int(g*255), int(b*255)))\n else:\n c = tokens[\"color\"]\n\n return c", "def rgb(r, g, b):\n return \"\".join([\"%02X\" % max(0, min(x, 255)) for x in [r, g, b]])", "def to_color(self):\n return (int(self.r * 255), int(self.g * 255), int(self.b * 255))", "def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))", "def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))", "def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))", "def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))", "def hex_to_rgb(value):\r\n lv = len(value)\r\n out = tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))\r\n out = tuple([x/256.0 for x in out])\r\n return out", "def color(value):\r\n return 'RGB({}, {}, {})'.format(value.red(), value.blue(), value.green())", "def wx_to_enable_color(color):\n \n enable_color = array((1.0,1.0,1.0,1.0))\n enable_color[:3] = asarray(color.Get())/255.\n\n return tuple(enable_color)", "def toRGB(self):\n\t\treturn '#'+ (hex(self.politics)[2:].upper().rjust(6,'0'))", "def _calcColor(self, colorTuple):\n return milight.color_from_rgb(*colorTuple)", "def RGBIntToRGB(RGB):\n\tr,g,b,a = RGBIntToRGBA(RGB)\n\treturn (r,g,b)", "def rrggbb_to_triplet(color):\n rgbtuple = _namedict.get(color)\n if rgbtuple is None:\n if color[0] != '#':\n raise BadColor(color)\n red = color[1:3]\n green = color[3:5]\n blue = color[5:7]\n rgbtuple = int(red, 16), int(green, 16), int(blue, 16)\n _namedict[color] = rgbtuple\n return rgbtuple", "def rgb(r, g, b):\n return (r/255, g/255, b/255)", "def color_val_matplotlib(color):\n color = mmcv.color_val(color)\n color = [color / 255 for color in color[::-1]]\n return tuple(color)", "def decode_color_code(color):\n\n try:\n if len(color) != 6:\n raise ValueError\n color = int(color, 16)\n except ValueError:\n sys.exit(\"Invalid command line color argument.\")\n return (color >> 16, (color >> 8) & 0xff, color & 0xff)", "def RGB_hex_to_color(text: str, a: int=255):\n num = int(text, 16)\n r = num // 65536\n g = (num - r * 65536) // 256\n b = num - r * 65536 - g * 256\n return np.clip(np.array([r, g, b, a], dtype='u1'), 0, 255)", "def _update_color(self, rgb_tuple):\n for color in rgb_tuple._fields:\n pin = getattr(PINS, color)\n value = getattr(rgb_tuple, color)\n # Ensure color between 0 and 255\n value = max(min(value, 255), 0)\n # print(pin, value)\n self.pi.set_PWM_dutycycle(pin, value)", "def get_rgb(self):\n\n return \"#%02X%02X%02X\" % (self.r, self.g, self.b)", "def hex2rgb(self,color):\n if type(color)!=str or len(color)!=7:\n raise ValueError\n \n value = color.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))", "def _color(self, args):", "def clr_tuple(colorstring):\n\n if colorstring[0] == '#':\n if len(colorstring) == 7:\n return (ONE_OVER_256 * float(_hexbyte(colorstring[1:3])),\n ONE_OVER_256 * float(_hexbyte(colorstring[3:5])),\n ONE_OVER_256 * float(_hexbyte(colorstring[5:7])))\n if len(colorstring) == 4:\n return (ONE_OVER_16 * float(_hexchar(colorstring[1])),\n ONE_OVER_16 * float(_hexchar(colorstring[2])),\n ONE_OVER_16 * float(_hexchar(colorstring[3])))\n if colorstring in colors.CSS4_COLORS:\n return clr_tuple(colors.CSS4_COLORS[colorstring])\n if colorstring in colors.BASE_COLORS:\n return clr_tuple(colors.BASE_COLORS[colorstring])\n\n rgb_re = re.compile(\"rgb:(.*),(.*),(.*)\")\n\n rgb_match = rgb_re.search(colorstring)\n if rgb_match:\n return (float(rgb_match.group(1)),\n float(rgb_match.group(2)),\n float(rgb_match.group(3)))\n return None", "def create_unique_color_uchar(tag, hue_step=0.41):\n r, g, b = create_unique_color_float(tag, hue_step)\n return int(255*r), int(255*g), int(255*b)", "def _binary_to_rgb(*args) -> tuple:\n if len(args) == 1:\n red = args[0][0]\n green = args[0][1]\n blue = args[0][2]\n elif len(args) == 3:\n red = args[0]\n green = args[1]\n blue = args[2]\n else:\n raise ValueError(\n \"Arguments must be RGB tuple or Red, Green, Blue as 3 arguments.\"\n )\n\n r_int = int(red, 2)\n g_int = int(green, 2)\n b_int = int(blue, 2)\n\n return (r_int, g_int, b_int)", "def unlabel_rgb(colors):\n str_vals = ''\n for index in range(len(colors)):\n try:\n float(colors[index])\n str_vals = str_vals + colors[index]\n except ValueError:\n if colors[index] == ',' or colors[index] == '.':\n str_vals = str_vals + colors[index]\n\n str_vals = str_vals + ','\n numbers = []\n str_num = ''\n for char in str_vals:\n if char != ',':\n str_num = str_num + char\n else:\n numbers.append(float(str_num))\n str_num = ''\n return (numbers[0], numbers[1], numbers[2])", "def int_to_rgb(integer):\n hexadecimal = hex(int(integer))\n hexadecimal = hexadecimal.split(\"x\")[1].zfill(6)\n r = int(\"0x\" + hexadecimal[-7:-4], 16)\n g = int(\"0x\" + hexadecimal[-4:-2], 16)\n b = int(\"0x\" + hexadecimal[-2::], 16)\n return r, g, b", "def rgbString(red,green,blue):\n return chr(red)+chr(green)+chr(blue)", "def toColor(n):\n color = ('%X'%(n+ID_OFFSET)).rjust(6,'0')\n if not len(color) == 6:\n raise ColorError(n)\n else:\n r = int(color[0:2], 16)\n g = int(color[2:4], 16)\n b = int(color[4:6], 16)\n return '%.3d %.3d %.3d'%(r,g,b)", "def color565(red, green=0, blue=0):\n try:\n red, green, blue = red # see if the first var is a tuple/list\n except TypeError:\n pass\n return (red & 0xf8) << 8 | (green & 0xfc) << 3 | blue >> 3", "def mage_hsv_tuple_to_rgb(hsv):\r\n hsv_0_to_1 = hsv[0] / 360.0, hsv[1] / 100.0, hsv[2] / 100.0\r\n rgb = hsv_to_rgb(*tuple(hsv_0_to_1))\r\n return int(rgb[0] * 255), int(rgb[1] * 255), int(rgb[2] * 255)", "def gbc2rgb(c):\n #GBC format: 0bbbbbgggggrrrrr (b-blue, g-green, r-red)\n r = (c % (1 << 5)) << 3\n g = ((c / (1 << 5)) % (1 << 5)) << 3\n b = ((c / (1 << 10)) % (1 << 5)) << 3\n return (r,g,b)", "def color_to_rgb(color):\n \n if isinstance(color, tuple):\n # if a RGB tuple already\n return color\n else:\n # to_rgb() returns colors from (0-1)\n color = tuple(int(x * 255) for x in to_rgb(color))\n return color", "def hex2rgb(hexcode):\n\treturn tuple(map(ord, hexcode[1:].decode('hex')))", "def _color(self,c):\n return self.colorlist[c%len(self.colorlist)]", "def getColor(n, total = 255, decimal = False):\n\n value = round(255*n/(total * 1.0))\n\n #red value\n if value < 96:\n red = 0\n elif value < 160:\n red = 255/((160 - 96)*1.0) * (value - 96)\n elif value < 224:\n red = 255\n else:\n red = 255 - ((255 - 128)/((255 - 224) * 1.0) * (value - 224))\n\n\n #Green value\n if value < 32:\n green = 0\n elif value < 96:\n green = 255/((96 - 32)*1.0) * (value - 32)\n elif value < 160:\n green = 255\n elif value < 224:\n green = 255 - (255/((224 - 160) * 1.0) * (value - 160))\n else:\n green = 0\n\n\n #Blue value\n if value < 32:\n blue = 128 + (255 - 128)/((32 - 0) * 1.0) * (value - 0)\n elif value < 96:\n blue = 255\n elif value < 160:\n blue = 255 - ((255 - 0)/((160 - 96) * 1.0) * (value - 96))\n else:\n blue = 0\n\n if decimal:\n return (red / 255.0, green / 255.0, blue / 255.0)\n return (int(red), int(green), int(blue))", "def getColor(self,number):\n if number >= 0:\n if self.inverse:\n ret = cs.hsv_to_rgb(0,0,abs(number/self.maxp))\n else:\n ret = cs.hsv_to_rgb(0,0,1-abs(number/self.maxp))\n else:\n if self.inverse:\n ret = cs.hsv_to_rgb(0,1-abs(number/self.maxn),1)\n else:\n ret = cs.hsv_to_rgb(0,abs(number/self.maxn),1)\n return [ret[0]*255.0,ret[1]*255.0,ret[2]*255.0]", "def rgb(self):\n return (self.red, self.green, self.blue)", "def random_colour(rng: random.Random) -> TupleInt3:\n r = rng.randint(0, 255)\n g = rng.randint(0, 255)\n b = rng.randint(0, 255)\n return r, g, b", "def test_mage_hsv_tuple_to_rgb(self):\r\n htr = mage_hsv_tuple_to_rgb # for convenience\r\n self.assertEqual(htr((0, 0, 0)), (0, 0, 0))\r\n self.assertEqual(htr((0, 100, 100)), (255, 0, 0))\r\n self.assertEqual(htr((120, 100, 100)), (0, 255, 0))\r\n self.assertEqual(htr((240, 100, 100)), (0, 0, 255))\r\n assert_almost_equal(htr((0, 0, 49.803921568627452)), (127, 127, 127))", "def rgb2unit(rgb: Union[Tuple3, Tuple4]) -> tuple:\n if len(rgb) == 3:\n return tuple(c / 255 for c in rgb)\n return tuple(c / 255 if i < 3 else c for i, c in enumerate(rgb))", "def get_label_html_color_code(idx):\n color_array = get_label_color_mapping(idx)\n return f\"#{color_array[0]:02X}{color_array[1]:02X}{color_array[2]:02X}\"", "def randcolor():\n return (randint(0,255), randint(0,255), randint(0,255))", "def change( p ):\n red = p[0]\n green = p[1]\n blue = p[2]\n return [ 255-red, 255-green, 255-blue ]", "def from_rgb(r, g, b) -> str:\n return \"#{0:02x}{1:02x}{2:02x}\".format(r, g, b)", "def getColor(self,number):\n if number >= 0:\n ret = cs.hsv_to_rgb(0,0,1-abs(number/self.maxp))\n else:\n ret = cs.hsv_to_rgb(0,abs(number/self.maxn),1)\n hexcolor = '#%02x%02x%02x' % (ret[0]*255,ret[1]*255,ret[2]*255)\n return hexcolor", "def color_hex(self):\n n = 2\n return tuple(\n hex(int(self.color[i : i + n], 16)) for i in range(0, len(self.color), n)\n )", "def test_color__int_arg(self):\n for value in (0x0, 0xFFFFFFFF, 0xAABBCCDD):\n color = pygame.Color(value)\n\n self.assertEqual(color.r, (value >> 24) & 0xFF)\n self.assertEqual(color.g, (value >> 16) & 0xFF)\n self.assertEqual(color.b, (value >> 8) & 0xFF)\n self.assertEqual(color.a, value & 0xFF)", "def _bin_to_int(self, rgb):\n r, g, b = rgb\n return int(r, 2), int(g, 2), int(b, 2)", "def Color(red, green, blue, white = 0):\n\treturn (white << 24) | (red << 16)| (green << 8) | blue", "def hex_to_rgb(value):\n value = value.lstrip('#')\n hex_total_length = len(value)\n rgb_section_length = hex_total_length // 3\n return tuple(int(value[i:i + rgb_section_length], 16)\n for i in range(0, hex_total_length, rgb_section_length))" ]
[ "0.7461026", "0.70466757", "0.70459116", "0.70223033", "0.69848394", "0.69848394", "0.69848394", "0.69848394", "0.6970065", "0.6949212", "0.6946806", "0.6917932", "0.68870723", "0.68848807", "0.6877446", "0.6874237", "0.68409014", "0.68385947", "0.68327475", "0.6821566", "0.67950803", "0.6786944", "0.6769461", "0.6736301", "0.67293906", "0.67293906", "0.67293906", "0.67198473", "0.6679793", "0.6662124", "0.6629766", "0.661673", "0.6591876", "0.6588213", "0.6574808", "0.65584356", "0.65563667", "0.6555701", "0.6554145", "0.6516087", "0.6514289", "0.6512754", "0.6482537", "0.6478707", "0.64720124", "0.6468717", "0.64686847", "0.6462821", "0.64595765", "0.64531857", "0.6448155", "0.64402413", "0.6439206", "0.6439206", "0.6439206", "0.6439206", "0.6437127", "0.6435107", "0.6434274", "0.64338636", "0.6413647", "0.64065325", "0.6405685", "0.6392018", "0.63882583", "0.63816375", "0.6380949", "0.6375484", "0.6373338", "0.6372909", "0.63655716", "0.6364719", "0.63304216", "0.6323258", "0.63172245", "0.6315864", "0.63121647", "0.629992", "0.629645", "0.62812537", "0.62772524", "0.626345", "0.62569", "0.62515545", "0.6244848", "0.62432003", "0.624208", "0.62377346", "0.6229464", "0.6228403", "0.622479", "0.62175137", "0.6217445", "0.62130463", "0.6210552", "0.62052447", "0.6204108", "0.6203335", "0.61914724", "0.6185161" ]
0.7030737
3
x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature
def __init__(self,*args): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *more): # real signature unknown; restored from __doc__\n pass", "def x_init(self):\n pass", "def _init_signature(func_name, restype, argtypes):\n global cfi\n f = getattr(cfi, func_name)\n f.restype = restype\n f.argtypes = argtypes", "def init(self, *args, **kwds):\n pass", "def __init__(self, sig, *args, **kwargs):\n if not isinstance(sig, Signature):\n raise TypeError(\"a Signature is required as first argument\")\n BasicCall.__init__(self, sig, *args)\n for arg, val in kwargs.iteritems():\n setattr(self, arg, val)", "def initialize(self, *args, **kwargs):", "def __init__(self, *args, **kwargs) -> None:\n pass", "def __init__(self, *args, **kwargs) -> None:\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, **kwds):\n raise NotImplementedError", "def __init__(self, xstart, **more_args):\r\n self.xstart = xstart\r\n self.more_args = more_args\r\n self.initialize()", "def __init__(self, *args, **kwargs):\n raise NotImplementedError", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(*args):", "def __init__(self,*args):\r\n pass", "def __init__(self,*args):\r\n pass" ]
[ "0.7455761", "0.7428699", "0.7428699", "0.7428699", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.72378224", "0.6951459", "0.6779704", "0.6773422", "0.6681886", "0.6597313", "0.65501034", "0.65501034", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65199494", "0.65124166", "0.64941543", "0.6491486", "0.64520335", "0.64520335", "0.64520335", "0.64520335", "0.64520335", "0.64520335", "0.64520335", "0.64520335", "0.64520335", "0.64520335", "0.64520335", "0.64520335", "0.64520335", "0.64520335", "0.64520335", "0.64520335", "0.64520335", "0.64520335", "0.64520335", "0.64500874", "0.64500874" ]
0.0
-1
Read file returns read file as string
def rSeqFile(FilePath): f=open(FilePath, 'r') #check if file open if f.mode == 'r': return(f.read())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_file(self) -> str:\n with open(self._file_name) as fp:\n return fp.read()", "def read_file(file):\n with open(file, 'r') as f:\n file_string = f.read()\n return file_string", "def read_file(self, file: Path) -> str:\n with open(file) as f:\n return f.read()", "def read_file(self, file_name: str)-> str:\n if not os.path.exists(file_name):\n raise IOError(\"The File {} doesn't exists!\".format(file_name))\n\n with open(file_name) as file:\n return file.read().strip()", "def read_file(path): #TODO implementme, handling paths more intelligently\n f = open(path, \"r\")\n string = f.read()\n f.close()\n return string", "def read_file(name_file):\n with open(name_file, 'r') as file:\n return file.read()", "def read_file(file_name):\n with open(file_name, 'r') as f:\n return f.read()", "def readfile(path: Union[str, Path]) -> str:\n with open(path) as infile:\n return infile.read()", "def read_file(file_name):\n with open(file_name, \"r\") as f:\n return f.read()", "def read_file(file_path):\n with open(file_path, 'r') as infile:\n return infile.read()", "def ReadFile(self, filename):\n file = open(filename, 'rb')\n result = \"\"\n try:\n result = file.read()\n finally:\n file.close()\n return result", "def ReadFile(self, filename):\r\n file = open(filename, 'rb')\r\n result = \"\"\r\n try:\r\n result = file.read()\r\n finally:\r\n file.close()\r\n return result", "def read_file(filename):\n return open(filename).read()", "def read_file(filepath: str) -> str:\n with open(filepath, \"r\") as filep:\n return filep.read()", "def read_file(file):\n with open(file, \"r\") as fid:\n return fid.read()", "def _Read(filename):\n with open(filename, 'rb') as f:\n return f.read()", "def read_file():\n with open(FILE_NAME) as f:\n data = f.read()\n return data", "def read_file(filename):\n with open(filename) as fp:\n return fp.read()", "def read_file(filename: str, mode: str = \"r\") -> str:\n with open(filename, mode) as file:\n file_content = file.read()\n return file_content", "def read(file_name):\n with io.open(os.path.join(os.path.dirname(__file__), file_name),\n encoding='utf-8') as f:\n return f.read()", "def _readfile(dirpath, filename):\n try:\n with codecs.open(os.path.join(dirpath, filename), \"r\", \"utf-8\") as f:\n return f.read()\n except IOError:\n return u\"\"", "def ReadFileIntoString(filepath):\n with open(filepath, 'r') as file_handle:\n contents = file_handle.read()\n return contents", "def read_file(filename):\n path = os.path.abspath(os.path.dirname(__file__))\n filepath = os.path.join(path, filename)\n try:\n return open(filepath).read()\n except:\n return ''", "def read_file(filename):\n path = os.path.abspath(os.path.dirname(__file__))\n filepath = os.path.join(path, filename)\n try:\n return open(filepath).read()\n except IOError:\n return ''", "def read(file):\n with open(file, 'r') as file:\n return file.read()", "def read_file(*file_name: str) -> str:\n with open(os.path.join(HERE, *file_name)) as f:\n return f.read()", "def read(filename):\n\n path = os.path.join(os.path.dirname(__file__), filename)\n\n with open(path) as f:\n return f.read()", "def read(filename):\n with open(os.path.join(os.path.dirname(__file__), filename)) as f:\n return f.read()", "def _ReadFile(filepath):\n with open(filepath) as f:\n return f.read()", "def readfile(filename):\n with open(filename, encoding=\"utf-8\") as file:\n raw = file.read()\n return raw", "def read_file(name):\n with open(name, 'r') as my_file:\n return my_file.read().encode('utf-8')", "def read_from_file(file_name):\n with open(file_name, \"rb\") as text_file:\n return text_file.read()", "def read_file(filename):\n if os.path.isfile(filename):\n with open(filename, 'r') as f:\n return f.read()", "def read(path):\n with open(path) as f:\n return f.read()", "def ReadFile(f_path):\n data = ''\n\n if f_path:\n try:\n fh = open(f_path, 'r')\n try:\n data = fh.read()\n finally:\n fh.close()\n except IOError:\n return ''\n\n return data", "def file_read(path: str) -> str:\n if os.path.isfile(path):\n while True:\n try:\n with open(path, \"r\") as fptr:\n return fptr.read()\n except PermissionError:\n pass\n return \"\"", "def read(self, filename):\n\t\treturn codecs.open(filename, 'r', 'utf8').read()", "def read_file(file_path, mode='r', encoding=\"utf-8\"):\n with codecs.open(file_path, mode, encoding=encoding) as fp:\n return fp.read().strip()", "def local_read(filename):\n full_filename = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n filename)\n return codecs.open(full_filename, 'r', 'utf-8').read()", "def read(fn):\n with open(os.path.join(os.path.dirname(__file__), fn), encoding='utf-8') as f:\n return f.read()", "def read(name):\n\n return open(name).read()", "def ReadFile(path, mode='r'):\n with open(path, mode) as f:\n return f.read()", "def read_file(self, file_name):\n\n with open(file_name, 'r') as file_input:\n file_content = file_input.read()\n return file_content", "def _read_file(file_name):\n file_handle = file(file_name)\n try:\n return file_handle.read()\n finally:\n file_handle.close()", "def _read_file(self, filePath):\n with open(filePath) as f:\n fileContent = f.read()\n f.close()\n return fileContent.strip()", "def _read(fname):\n fpath = os.path.dirname(__file__)\n fpath = os.path.join(fpath, fname)\n with open(fpath, 'r') as file_:\n return file_.read()", "def read_file(file_name):\n return open(os.path.join(os.path.dirname(os.path.dirname(__file__)), file_name)).read()", "def read_text_file(str_name_file: str):\n content: str = ''\n with open(str_name_file, mode=\"r\", encoding='utf-8') as file:\n print(\"file being read: \" + str_name_file + \"\\n\")\n content = file.read()\n return content", "def read(path):", "def SimpleRead(fn):\n content = \"\"\n try:\n content = open(fn).read()\n except :\n print(\"Failed to read file: %s\\n\"%(fn))\n print sys.exc_info()[1]\n\n return content", "def readFile(self, name):\n\t\ttry:\n\t\t\tf = open(name, 'r')\n\t\t\tlines = f.readlines()\n\t\t\tf.close()\n\t\texcept IOError:\n\t\t\treturn None\n\n\t\treturn join(lines, \"\")", "def read(self, path: str) -> str:\n raise NotImplementedError", "def _file_read(self, file: str) -> str:\n with open(f\"tests/resources/{file}\", \"r\") as fs:\n result = \"\\n\".join(fs.read().splitlines())\n return result", "def read_file(path):\n try:\n with open(path, 'r') as text_file:\n return \"\".join(text_file.readlines()).strip()\n except IOError:\n exit(\"Error: file '%s' is not readable!\" % path)", "def read_file(filename):\n open_kwargs = {}\n if sys.version_info.major == 3:\n open_kwargs = {'encoding': 'utf-8'}\n\n path = os.path.abspath(os.path.dirname(__file__))\n filepath = os.path.join(path, filename)\n with open(filepath, **open_kwargs) as filecontents:\n return filecontents.read()", "def read_file(filename):\n with codecs.open(filename, 'r', 'utf8') as f:\n return f.read()", "def read_file(filename):\n with codecs.open(filename, 'r', 'utf8') as f:\n return f.read()", "def read(self, name: str) -> str:\n path = self.get_path(name)\n if not os.path.exists(path):\n return \"\"\n\n with open(path, \"r\") as fh:\n return fh.read()", "def read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()", "def read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()", "def read_file(filename):\n f = open(filename)\n contents = f.read()\n f.close()\n return contents", "def read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read().strip()", "def get_file_content(self, file_name: str):\n file_name = Path(__file__).absolute().parents[1].joinpath(file_name)\n try:\n with file_name.open('r') as file:\n intermediate = file.readlines()\n return ''.join(intermediate)\n except FileNotFoundError as message:\n self.logger.error(message)\n return ''", "def readFromFile(self, path):\n log(logging.DEBUG, \"Read from file: \" + path)\n with open(path, \"r\") as f:\n return f.read()", "def readFromTextFile(self, file_name):\n with open(file_name, 'r') as file_obj:\n return file_obj.read()", "def read_file(file):\n f = open(file, \"r\", encoding=\"utf8\")\n return f.read()", "def readfile(fname, mode='rb'):\n f = open(fname, mode)\n raw = f.read()\n f.close()\n return raw", "def open_and_read_file(file_path):\n\n # your code goes here\n file_name = (open(file_path)).read()\n return file_name", "def read_file(self, file_name):\n f = file(file_name, \"r\")\n temp = f.read()\n f.close()", "def read(f_relative_path: str) -> str:\n here = path.dirname(path.abspath(__file__))\n with io.open(path.join(here, f_relative_path), mode=\"rt\", encoding=\"utf8\") as f:\n return f.read()", "def open_and_read_file(file_path):\n text_data = open(file_path).read()\n # print text_data\n return text_data", "def read_file(file) -> str:\n file = open(file, \"r\")\n my_string = file.read()\n return get_clean_text(my_string)", "def read_file(file_path):\n try:\n input_file = open(file_path)\n text_content = input_file.read()\n input_file.close()\n return text_content\n except IOError:\n print (\"Can not read from file\")", "def read_file(self, file):\n fd = open(file)\n data = fd.read()\n fd.close()\n return data", "def read(path, encoding=\"utf-8\"):\n try:\n with io.open(path, encoding=encoding) as f:\n return f.read()\n except Exception as e:\n logger.error(\"read: %s failed. Error: %s\", path, e)\n return \"\"", "def read_file(filename):\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content", "def read_file(path):\n assert_is_string(path)\n f = open(path, \"r\")\n data = f.read()\n f.close()\n return data", "def read_file(fname):\n with open(fname, 'r') as fopen:\n fdata = fopen.read()\n return fdata", "def readText(fileName):\n fileText = \"\"\n with open(fileName,\"r\") as fileObject:\n fileText = fileObject.read()\n \n return fileText", "def read_file(self, *args):\n with open(os.path.join(self.temp_path, *args)) as fp:\n return fp.read()", "def file2str(file):\n with open(file, \"r\") as textFile:\n return textFile.read()", "def read_data() -> str:\n with open('input.txt') as input_file:\n return input_file.read()", "def readfile(filename, mode='r'):\n if mode != 'r' and mode != 'rb':\n print(f\"ERROR: incorrect mode : expected 'r' or 'rb' given {mode}\\n\")\n else:\n with open(Path(os.path.expanduser(filename)), mode)as f:\n content = f.read()\n f.close()\n return content", "def local_file_as_string(self, file_path):\n with open(file_path, 'rb') as file:\n string = file.read().decode('utf-8')\n return string", "def open_and_read_file(file_path):\n\n # your code goes here\n return open(file_path).read()", "def file_to_string(file_name):\n with open(file_name, 'r') as f:\n text = f.read()\n # delete original file\n os.remove(file_name)\n return text", "def file_to_string(path_to_file):\n\t\twith open(path_to_file, 'r') as f:\n\t\t\tcontent = f.read()\n\t\treturn content", "def read(path):\n with open(path) as f:\n contents = f.read()\n return contents", "def open_and_read_file(file_path):\n\n # your code goes here\n text_file = open(file_path)\n text_string= text_file.read()\n text_file.close()\n return text_string", "def read_file(file_path):\n\n file_string = ''\n\n with open(file_path, 'r', newline='') as file:\n for line in file:\n file_string = file_string + line.rstrip('\\n')\n\n return file_string", "def get_file_text(file_name):\n\tf = open(file_name, 'r')\n\ttext = f.read()\n\treturn text", "def read_file(file_path: str) -> str:\n try:\n with open(file=file_path, mode='r', encoding=\"utf8\") as f:\n return f.read()\n\n except FileNotFoundError:\n raise FileNotFoundError(f'No text file was found at location {file_path}')", "def read_file(file_name, enc=\"latin-1\"):\n f = open(file_name, \"r\", encoding=enc)\n content = \"\".join(f.readlines())\n f.close()\n return content", "def read(cls, path):\n with cls.open(path, 'rt') as fd:\n return fd.read()", "def fs_read(file_path):\n try:\n with open(str(file_path), 'r') as f:\n return f.read()\n except UnicodeDecodeError:\n with open(str(file_path), 'r', encoding='latin-1') as f:\n return f.read()\n except IOError as e:\n raise e", "def readFile(fileName):\n with open(fileName, 'r', encoding='utf-8') as f:\n text = f.read()\n return text", "def readfile(filename):\n\n infile = open(filename, \"r\") # open file for reading\n\n # Use Python's file read function to read the file contents\n filetext = infile.read().splitlines()\n\n infile.close() # close the file\n\n return filetext # the text of the file, as a single string", "def read(cls):\n x_i=\"vas.txt\"\n with open(x_i, 'r')as txt_file:\n file = txt_file.read()\n return file", "def open_and_read_file(file_path):\n\n # Read the file, return text as a string titled \"contents\"\n contents = open(file_path).read()\n\n # Return contents of your file as one long string\n return contents", "def readFile( *args ):\n filePath = path.join( *args )\n if not path.exists( filePath ):\n return None\n with open( filePath, 'r' ) as fd:\n return fd.read()", "def read_raw(file_path):\n file = open(file_path, 'rb')\n content = file.read()\n file.close()\n return content" ]
[ "0.8561569", "0.81707746", "0.8130362", "0.8053142", "0.8042461", "0.8016344", "0.80138814", "0.80110985", "0.8004594", "0.7972023", "0.79629755", "0.796143", "0.79515415", "0.7903697", "0.7901838", "0.7858624", "0.7825868", "0.7805045", "0.7800547", "0.77981794", "0.7788316", "0.77735704", "0.77720565", "0.7770504", "0.7754681", "0.77474946", "0.77405924", "0.77220595", "0.77147055", "0.7714365", "0.77130485", "0.7691175", "0.7678022", "0.7671556", "0.7662947", "0.7662577", "0.7643867", "0.763916", "0.7637408", "0.7633891", "0.76322997", "0.76290447", "0.76208866", "0.7601358", "0.75820917", "0.75757825", "0.75645167", "0.754385", "0.753941", "0.7520863", "0.75117373", "0.7496154", "0.7487309", "0.7460979", "0.74313885", "0.7425687", "0.7425687", "0.7422454", "0.7419456", "0.7419456", "0.74182236", "0.7416227", "0.7398861", "0.7392188", "0.7391106", "0.7383671", "0.73796725", "0.73696905", "0.73631877", "0.73540443", "0.73259187", "0.73220545", "0.73126024", "0.7311697", "0.7308763", "0.7308716", "0.73039335", "0.72974527", "0.7297126", "0.7296062", "0.72944003", "0.7279638", "0.72678554", "0.72676325", "0.72609466", "0.7255822", "0.724867", "0.72436064", "0.72273296", "0.72272474", "0.7221349", "0.7206733", "0.719111", "0.7188067", "0.7180637", "0.71743315", "0.71669036", "0.71595377", "0.7155636", "0.71548873", "0.715345" ]
0.0
-1
Tidying Lines preparation for process, delete empty lines, split lines returns list of lines
def TidyLines(SeqFile): TSeqs = SeqFile.splitlines() TSeqs = list(filter(None,TSeqs)) return(TSeqs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(lines):\n lines = list(map(_clean, lines))\n # lines = list(map(_split, lines))\n return lines", "def clean_lines(lines):\n _lines = []\n for l in lines:\n l = l.strip().rstrip()\n if len(l) > 0:\n _lines.append(l)\n return _lines", "def __format_lines(cls, lines):\n\n result = []\n\n for line in [x for x in lines if x]:\n if not line.startswith(\"#\"):\n if \"#\" in line:\n line = line[: line.find(\"#\")]\n\n if \"\\t\" in line or \" \" in line:\n splited_line = line.split()\n\n for element in splited_line[:1]:\n if element:\n line = element\n break\n result.append(line)\n\n return result", "def prepare_data(self, lines: List[str]) -> List[str]:\n if self.is_tokenized:\n if self.parser == \"spacy\":\n lines = [l.split() for l in lines]\n elif self.parser == \"udpipe\":\n lines = [[l.split()] for l in lines]\n\n return lines", "def _strip_lines(lines):\n for line in lines:\n stripped = line.strip()\n if stripped:\n yield stripped", "def split_by_lines(text, remove_empty=False):\n\tlines = text.splitlines()\n\t\n\treturn remove_empty and [line for line in lines if line.strip()] or lines", "def clean_data(self, lines):\n\n data = []\n curr = None\n for line in lines:\n line = self.clean_line(line)\n\n temp = []\n quotes = 0\n for item in line.split():\n if quotes % 2 == 0:\n temp.append(item)\n else:\n temp[-1] += item\n quotes += item.count(\"\\\"\")\n line = temp\n\n if not line:\n continue\n if curr:\n if self.compare_keys(curr, line):\n curr = self.merge_lines(curr, line)\n else:\n data.append(self.add_line(curr))\n curr = line\n else:\n curr = line\n if curr:\n data.append(self.add_line(curr))\n return data", "def splitlines(self) -> List[String]:\n pass", "def clean(text):\n lines = text.split('\\n')\n\n indx = range(len(lines))\n indx.reverse()\n for i in indx:\n temp = lines[i].strip()\n if temp == '' or temp.startswith('#'):\n del lines[i]\n else:\n lines[i] = temp\n\n return lines", "def _split_lines(self, lines, separator_marker):\n result = []\n current_group = []\n for line in lines:\n if re.match(rf'[^\\S\\n]*{separator_marker}\\w+(\\(.*\\))?:', line):\n if current_group:\n result.append(current_group)\n current_group = []\n current_group.append(line)\n if current_group:\n result.append(current_group)\n return result", "def line_split(self, line):\n parts = []\n part = None\n quote = None\n for c in line:\n if part is None and not self.is_space(c):\n quote = c if self.is_quote(c) else None\n part = c if quote is None else \"\"\n elif part is not None and quote is None and not self.is_space(c):\n part += c\n elif part is not None and quote is not None:\n if c != quote:\n part += c\n else:\n parts.append(part)\n part = None\n quote = None\n elif part is not None and quote is None and self.is_space(c):\n parts.append(part)\n part = None\n quote = None\n if part is not None:\n parts.append(part)\n return parts", "def _strip(lines: Sequence[str]) -> Sequence[str]:\n lines = [i.rstrip() for i in lines]\n return lines", "def split_line_robust(line):\n\n line_split0 = [x.rstrip('\\n') for x in line.split(' ') if x]\n line_split1 = [x.split('\\t') for x in line_split0 if x]\n line_split = []\n for l_one in line_split1:\n for l_two in l_one:\n if l_two: line_split.append(l_two)\n return(line_split)", "def splitBodyLines(cls, text):\n\n def remove_comments(line):\n \"\"\"\n Returns the given line stripped of any comments.\n \"\"\"\n hashPos = line.find('#')\n return line[:hashPos] if hashPos >= 0 else line\n\n # Remove comments, strip whitespace, and return only non-blank lines\n lines = map(str.strip, map(remove_comments, text.splitlines()))\n return [l for l in lines if l]", "def dedent_lines(lines):\r\n return textwrap.dedent(\"\\n\".join(lines)).split(\"\\n\")", "def dedent_lines(lines):\r\n return textwrap.dedent(\"\\n\".join(lines)).split(\"\\n\")", "def nonempty_lines(text):\n return [line for line in text.split('\\n') if line]", "def clean_data(self):\n for line in self.file:\n if line.startswith('//') or line.isspace():\n continue\n if '//' in line:\n line = line.split('//')[0]\n line = line.replace('\\n', '')\n line = line.replace(' ','')\n self.commands.append(line)", "def lines(text):\n return [l.strip() for l in text.strip().splitlines() if l.strip()]", "def AutoSplitlines(self):\n\t\ttry:\n\t\t\tends_with_cr = self.content.endswith('\\n')\n\t\t\tself.lines = self.content.splitlines()\n\t\t\tyield\n\t\tfinally:\n\t\t\tself.content = '\\n'.join(self.lines)\n\t\t\tif ends_with_cr:\n\t\t\t\tself.content += '\\n'", "def prep_difflines(content):\n return [ x+\"\\n\" for x in content.split(\"\\n\") ]", "def parse_lines(s):\n return [l.strip() for l in s.splitlines() if l.strip()]", "def clean_rows(reader):\n return [[a.strip() for a in row] for row in reader if row]", "def _str2lines(cls, text):\n lines = text.replace('\\r', '').split('\\n')\n lines[0] = lines[0].strip() # firts line is always detented\n if len(lines) > 1:\n # Get minimal indentation\n min_indent = 99999\n for line in lines[1:]:\n if line.strip(): # don't count empty lines\n min_indent = min(min_indent, len(line) - len(line.lstrip()))\n # Remove indentation\n for i in range(1, len(lines)):\n lines[i] = lines[i][min_indent:].rstrip()\n # Remove empty line only at beginning\n if not lines[0]:\n lines.pop(0)\n return lines", "def listFromLines(lines):\n reComment = re.compile('#.*')\n temp = [reComment.sub('',x).strip() for x in lines.split('\\n')]\n temp = [x for x in temp if x]\n return temp", "def format_lines(unprocessed_text: str) -> List[List[str]]:\n stored_lines: List[List[str]] = []\n new_line: List = []\n new_word: str = \"\"\n for char in unprocessed_text:\n if char != \"\\n\":\n if char != \" \" and char.isalpha():\n new_word += char\n else:\n new_line.append(new_word)\n new_word = \"\"\n else:\n stored_lines.append(new_line)\n new_line = []\n return stored_lines", "def filter_lines(view_lines):\n\n # this still doesn't work because we need to filter xml above the line level\n # do newlines from the server ever contain meaningful data or are they pointless?\n # is all the newline data given by a terminating xml-type tag?\n\n # filter lines that start with an exclude string - non-regex\n excludes = ['<prompt time=\"']\n for exclude in excludes:\n view_lines = [line for line in view_lines if line[0 : len(exclude)] != exclude]\n\n # first lets just rip out the xml... later we will want to process it back into the stream\n # mostly we can use the xml just to update the state, if that's the case then if we miss\n # one then it's no proble, we just catch the next one... provided they are regular enough.\n # if they are not, or set state once, then we definitely want to catch every one\n xml_free_lines = list()\n for line in view_lines:\n\n # assuming lines only have xml if they start with xml? interesting idea, not sure if real\n i = 0\n xml_free_line_segments = list()\n xml_line_segments = list()\n xml_free_line_part = b\"\"\n xml_line_part = b\"\"\n ordered_parsed_line = list() # give a tuple of string, type\n\n # ISSUE: i'm pretty sure this is dropping a letter off the first non-xml line segment (or more)\n # make a bunch of line segments\n # note that line is a bytes() type, indexing line[i] returns int\n # if we slice into it line[i:i+1] we get a bytes() type of length 1\n while i < len(line):\n\n if line[i : i + 1] != b\"<\":\n xml_free_line_part += line[i : i + 1]\n\n else:\n\n # increment until you get out of the xml tag or out of the line\n while i < len(line) and line[i : i + 1] != b\">\":\n xml_line_part += line[i : i + 1]\n i += 1\n\n # toss the last b'>' on the end!\n xml_line_part += line[i : i + 1]\n\n # store the xml part off\n xml_line_segments.append(xml_line_part)\n ordered_parsed_line.append((\"xml\", xml_line_part))\n xml_line_part = b\"\" # reset the xml part\n\n # store xml free part off\n if len(xml_free_line_part) > 1:\n xml_free_line_segments.append(xml_free_line_part)\n ordered_parsed_line.append((\"text\", xml_free_line_part))\n xml_free_line_part = b\"\" # reset the xml_free_line_part\n\n i += 1 # covers incrementing past the '>' and incrementing if not yet in a '<'\n\n \"\"\"\n # https://lxml.de/tutorial.html\n # if the xml cannot be parsed, we just want to catch it and decide what to do\n try:\n xml = [lxml.etree.XML(xml_line) for xml_line in xml_line_segments]\n xml_tags = [x.tag for x in xml]\n # just testing lxml tag parsing\n if b'streamWindow' in xml_tags:\n xml_free_lines.append(b'streamWindow skipped...')\n\n except lxml.etree.XMLSyntaxError:\n xml = list() # no tags\n # toss any failing XML onto the text stream for manual parsing?\n # we can follow this approach even if we replace or wrap lxml with a manual parser\n xml_free_lines.extend(xml_line_segments)\n \"\"\"\n # do stuff with the xml components of the line\n op_line = ordered_parsed_line\n\n # strip the line back down to text\n clean_line = [x[1].replace(b\"&gt;\", b\">\") for x in op_line if x[0] == \"text\"]\n xml_free_lines.append(b\"\".join(clean_line))\n\n # send a hunk of xml so we can see what happened\n xml_line = [x[1].replace(b\"&gt;\", b\">\") for x in op_line if x[0] == \"xml\"]\n xml_free_lines.append(b\"\".join(xml_line))\n\n # just point it here for now so we don't have to change the return\n view_lines = xml_free_lines\n\n \"\"\"\n EXCLUDES = [\n r'<prompt.*>',\n r'</prompt.*>',\n ]\n\n SUBS = [\n (r'<.*>', ''),\n ]\n\n # drop empty lines before the regex to save processing\n # what about lines with whitespace only...\n view_lines = [line for line in view_lines if line != b'' or line != b'&gt']\n\n for exclude in EXCLUDES:\n view_lines = [str(line) for line in view_lines if not re.search(exclude, str(line))]\n\n for expr, sub in SUBS:\n view_lines = [re.sub(expr, sub, str(line)) for line in view_lines]\n\n # drop empty lines after the regex so they aren't shown\n view_lines = [line for line in view_lines if line != b'' or line != b'&gt']\n \"\"\"\n\n return view_lines", "def prepare_file(lines):\n return \" \".join(line.strip() for line in lines)", "def stripText(self, rawText):\n strippedText = []\n for line in rawText:\n if line.rstrip():\n if line[0] != '#':\n strippedText.append(line.rstrip()) #also remove newline character\n return strippedText", "def chunks(text):\n lines = []\n for line in text.splitlines():\n lines.append(re.sub(' {2,}', ' ', line.strip()))\n return '\\n'.join(lines).split('\\n\\n')", "def split_list_into_chunks(lines):\n qas = []\n qa = []\n for line in lines:\n if line == '\\n':\n qas.append(qa)\n qa = []\n continue\n qa.append(line[:-1]) # remove '\\n' at the end of each line\n return qas", "def list_wrapped_lines():\n for line in text.split('\\n'):\n if len(line) <= ncolumns:\n yield line\n else:\n while True:\n # try to wrap at a word-break\n last_word_break = re.search(r\"\\s+(?=\\S*$)\", line[:ncolumns])\n if last_word_break:\n yield line[:last_word_break.start()]\n line = line[last_word_break.end():].lstrip()\n else:\n yield line[:ncolumns]\n line = line[ncolumns:].lstrip()\n if len(line) == 0:\n break\n elif len(line) <= ncolumns:\n yield line\n break", "def groupLines(self, parts):\n\t\tline = []\n\t\thasDifference = False\n\t\treplaceline = lambda line: list((p[0], self.color_linedifferent if hasDifference and p[1] == self.color_normal else p[1]) for p in line)\n\t\tfor part in parts:\n\t\t\tif part[1] != self.color_normal:\n\t\t\t\thasDifference = True\n\t\t\tif not len(part[0]): continue\n\t\t\tline += [part]\n\t\t\tif part[0][-1] == \"\\n\":\n\t\t\t\tyield replaceline(line)\n\t\t\t\tline = []\n\t\t\t\thasDifference = False\n\t\tyield replaceline(line)", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def data_file_lines(year=None, day=None, part_num=1, preserve_blank_lines=False, testing=False):\n lines = read_data_file(year, day, part_num, testing).split(\"\\n\")\n if not preserve_blank_lines:\n return [line for line in lines if line]\n\n groups = []\n for k, g in itertools.groupby(lines, key=lambda x: x == ''):\n groups.append(list(g))\n return [g for g in groups if g[0]]", "def strip_text(text):\n\n return [line.strip() for line in text.splitlines()]", "def _checkForBlankLines(self, datalines):\n empties = None\n count = 0\n rtlines = []\n for line in datalines:\n if line.strip() == \"\":\n empties = 1\n else:\n if empties == 1: # If data line found after empty line then raise\n raise Exception(\"Empty line found in data section at line: \" + str(count))\n else:\n rtlines.append(line)\n count = count + 1\n return rtlines", "def strip_blanklines(blob):\n lines = blob.split('\\n')\n return '\\n'.join([line for line in lines if line.strip() != ''])", "def parse(input):\n return [l.strip() for l in input.splitlines() if l.strip()]", "def removeLines(self) -> List['StateNode']:\n lines = self.state[0]\n states: List[StateNode] = []\n for i in range(len(lines)):\n for j in range(i + 1, len(lines) + 1):\n new_lines = lines[:i] + lines[j:]\n if len(new_lines) == 0:\n continue\n states.append(StateNode(self.table, \n (new_lines, self.state[1]),\n (lines[i:j], []),\n self.cost + len(self.state[1]),\n self))\n return states", "def remove_empty_lines(self, string_list):\r\n string_list2 = []\r\n for strn in string_list:\r\n if strn:\r\n line = strn.strip()\r\n if line == \"\":\r\n continue\r\n else:\r\n string_list2.append(line)\r\n return string_list2", "def mapper_data_cleaning(self, l, line):\n lineitems = line.split(\",\")\n yield (lineitems[0], lineitems[2])", "def read_content(path):\n input_file = open(path, \"r\")\n file_handler = input_file.readlines()\n input_file.close()\n list = []\n\n for line in file_handler:\n line = line.split('\\n')\n without_empty_strings = []\n for string in line:\n if (string != \"\"):\n without_empty_strings.append(string)\n line = without_empty_strings\n line = \"\".join(line)\n list.append(line)\n return list", "def ped_line_reader(line):\n s = []\n for c in line:\n if c in '\\t\\n':\n yield ''.join(s)\n s = []\n continue\n\n s.append(c)", "def splitlines(self):\n bucket_shift = 6\n lines = [[] for _ in xrange((len(self) >> bucket_shift) + 1)]\n pos = 0\n new_lines = []\n line_count = 0\n find = self.find\n l = len(self)\n while pos < l:\n line_end = find(\"\\n\", pos)\n if line_end == -1:\n line_end = len(self) # - 1\n new_lines.append(AttrText(self[pos:line_end]))\n for line_no in xrange(pos >> bucket_shift, (line_end >> bucket_shift) + 1):\n lines[line_no].append((pos, line_end, line_count))\n line_count += 1\n pos = line_end + 1\n\n for start, end, attrs in self.attr_spans:\n for line_list in lines[start >> bucket_shift : (end >> bucket_shift) + 1]:\n for line_start, line_end, line_offset in line_list:\n line = new_lines[line_offset]\n line.attr_spans.append(\n (\n max(0, start - line_start),\n min(len(line), end - line_start),\n attrs,\n )\n )\n\n return new_lines", "def test_line_strip():\n for _x in range(100):\n l_str = \" \".join([random_str(5, 10) for x in range(30)])\n l_str = (\" \" * randint(0, 10)) + l_str + (\" \" * randint(0, 10))\n line = Line(l_str, random_str(10, 20), randint(1, 10000))\n # Strip the string\n l_stripped = line.strip()\n assert l_stripped == l_str.strip()\n assert isinstance(l_stripped, Line)\n assert l_stripped.file == line.file\n assert l_stripped.number == line.number", "def clean_comments(self):\n new_lines = list()\n for line in self.lines:\n if ((not line.startswith(\"//\")) & (not line.isspace()) &\n (not line.startswith(\"/*\") & (not line.startswith(\"*/\")))):\n line = Parser.strip_line(line)\n new_lines.append(line)\n self.lines = new_lines", "def remove_blank_lines(text):\n out_text = \"\"\n blank = True\n for line in text.splitlines(True):\n if line.isspace():\n if not blank:\n blank = True\n out_text = out_text + line\n else:\n blank = False\n out_text = out_text + line\n return out_text", "def _split_line( self, data_list, line_num, text ):\n\t\t# if blank line or context separator, just add it to the output list\n\t\tif not line_num:\n\t\t\tdata_list.append( ( line_num, text ) )\n\t\t\treturn\n\n\t\t# if line text doesn't need wrapping, just add it to the output list\n\t\tsize = len( text )\n\t\tmax_len = self._wrapcolumn\n\t\tif ( size <= max_len ) or ( ( size - ( text.count( '\\0' ) * 3 ) ) <= max_len ):\n\t\t\tdata_list.append( ( line_num, text ) )\n\t\t\treturn\n\n\t\t# scan text looking for the wrap point, keeping track if the wrap\n\t\t# point is inside markers\n\t\ti = 0\n\t\tn = 0\n\t\tmark = ''\n\t\twhile n < max_len and i < size:\n\t\t\tif text[i] == '\\0':\n\t\t\t\ti += 1\n\t\t\t\tmark = text[i]\n\t\t\t\ti += 1\n\t\t\telif text[i] == '\\1':\n\t\t\t\ti += 1\n\t\t\t\tmark = ''\n\t\t\telse:\n\t\t\t\ti += 1\n\t\t\t\tn += 1\n\n\t\t# wrap point is inside text, break it up into separate lines\n\t\tline1 = text[:i]\n\t\tline2 = text[i:]\n\n\t\t# if wrap point is inside markers, place end marker at end of first\n\t\t# line and start marker at beginning of second line because each\n\t\t# line will have its own table tag markup around it.\n\t\tif mark:\n\t\t\tline1 += '\\1'\n\t\t\tline2 = '\\0' + mark + line2\n\n\t\t# tack on first line onto the output list\n\t\tdata_list.append( ( line_num, line1 ) )\n\n\t\t# use this routine again to wrap the remaining text\n\t\tself._split_line( data_list, '>', line2 )", "def _split_lines(msg):\n # Find the EOL sequence encoded as bytes.\n eol = EOL.encode(HEADER_ENCODING)\n\n # The list into which we will collect header lines.\n lines = []\n # Keep going until we find a blank line.\n while True:\n # Look for the EOL sequence.\n index = msg.index(eol)\n # Split off the line, not including the EOL.\n line = msg[: index]\n # In the message, skip over the line, past the EOL.\n msg = msg[index + len(eol) :]\n # Is the line blank?\n if len(line) == 0:\n # Yes. We're done; return the lines and whatever data is left in\n # the message.\n return lines, msg\n else:\n # No. Decode the line.\n line = line.decode(HEADER_ENCODING)\n # Store it in the list of lines.\n lines.append(line)\n # Now continue at the top of the loop.", "def line_to_list(self, _line):\n\n\t\tresult = list()\t\t\n\t\t_line_splited = _line.split('\\t')\n\t\t\n\t\tfor value in _line_splited:\n\t\t\tvalue_stripped = value.strip().rstrip()\t\t\t\n\t\t\tresult.append(value_stripped)\t\t\t\t\n\t\t\n\t\treturn result", "def remove_empty_lines(self):\n self.result_code = open(\"result.c\", \"r\") # Opening the intermediate file in 'read' mode.\n self.line_array = self.result_code.readlines() # Obtaining an array of strings, where each string is a line from the intermediate file.\n self.result_code.close() # Closing the intermediate file.\n self.result_code = open(\"result.c\",\"w\") #Opening the intermediate file in 'write' mode.\n # Looping over all the lines in the input file.\n for line in self.line_array:\n # Checking if the line is empty.\n if line != \"\\n\":\n self.result_code.write(line) # Writing the non-empty line onto the intermediate file.\n self.result_code.close() # Closing the intermediate file.", "def base_parsing(lines):\n lines = [l.strip() for l in lines]\n return [ tuple(line.split(sep='-')) for line in lines ]", "def clean_lines(df_column):\n \n clean_lines = []\n # pattern for html tags\n tag_match = re.compile('<.*?>')\n # patterm for website\n website_match = re.compile('https?:\\/\\/.*[\\r\\n]*')\n # pattern for tex\n tex_match = re.compile('\\$\\$?.+?\\$\\$?')\n \n for line in df_column:\n s = re.sub(tag_match, '', line)\n s = re.sub(website_match, '[website]', s)\n s = re.sub(tex_match, '[tex]', s)\n # replace extra whitespace with spaces\n for x in string.whitespace:\n s = s.replace(x, ' ')\n clean_lines.append(s)\n \n return clean_lines", "def splitlines(self, keepends=None):\n return splitlines(self, keepends)", "def process_all(self):\n global multi_comment_line_mode\n multi_comment_line_mode = False\n for line in self.fileToProcess:\n line = line.strip() # creating a strip line, with no whitespace in the beginning and in the end\n # multi_comment_line_mode = False\n # first, we want to filter all the lines which are comments or part of comments\n while line != '':\n ignoring_status,newline = self.shouldTheLineBeIgnored(line)\n if ignoring_status:\n break # we are ignoring the line\n elif (not ignoring_status) and (newline != '') and newline != '$endOfMultiLine':\n line = newline\n continue\n elif not ignoring_status and newline == '$endOfMultiLine':\n break\n else:\n line = self.isThereApartToIgnore(line) #getting the good line\n line = line.strip()\n if line.endswith('$endOfMultiLine'):\n # line = line[:-1]\n line = line[:-15]\n # in this case we don't want to ignore the current line\n # if multi_comment_line_mode:\n # # this comes from the\n list_of_line_strings = re.split('(\\W)', line) # the default of this method is to remove all the white spaces\n list_of_line_strings = list(filter(None, list_of_line_strings))\n global i\n i = 0\n global first_index\n first_index = 0\n global second_index\n second_index = 0\n len_of_list = len(list_of_line_strings)\n while i < len_of_list:\n # first adding the string literals\n if (list_of_line_strings[i] == '\"' and i == 0) or (i>0 and list_of_line_strings[i] == '\"' and\n list_of_line_strings[i-1]!='*'):\n first_index = i\n i = i + 1\n if i == len(list_of_line_strings):\n break\n while list_of_line_strings[i] != '\"':\n i = i + 1\n if i>=len(list_of_line_strings):\n # in case it's the end\n i = first_index\n break\n else:\n continue\n second_index = i\n list_of_line_strings[first_index:second_index + 1] = [\n ''.join(list_of_line_strings[first_index:second_index + 1])]\n i = i + 2\n len_of_list = len(list_of_line_strings)\n else:\n i = i + 1\n j=0\n global skip_mode\n skip_mode = False\n global counter\n counter = 0\n for string in list_of_line_strings:\n if j != len(list_of_line_strings)-1:\n j+=1\n if counter == 1:\n counter = 0\n continue\n if skip_mode and not (string == '*' and list_of_line_strings[j] == '/'):\n continue\n if skip_mode and string == '*' and list_of_line_strings[j] == '/':\n skip_mode = False\n counter = 1\n continue\n if string == \"/\" and (list_of_line_strings[j] == \"/\" ):\n # this is a comment that appeared in the line\n break # in this case, there are no more chars to read because it's a note\n if string == \"/\" and list_of_line_strings[j] == \"*\":\n skip_mode = True\n counter = 1\n continue # entering a skip mode\n if string.strip() == '':\n continue\n self.currStringToProcess = string\n type = self.tokenType()\n self.createToken(type,self.currStringToProcess)\n break", "def _remove_beginning_newlines(lines):\n first_non_blank_line = 0\n\n for line in lines:\n if line.strip():\n break\n\n first_non_blank_line += 1\n\n return lines[first_non_blank_line:]", "def merge_lines(self, lines):\n pairs = []\n for line in lines:\n if len(pairs) and self.is_left(pairs[-1][-1]) == self.is_left(line):\n pairs[-1].append(line)\n else:\n pairs.append([line])\n\n lines = []\n for pair in pairs:\n if len(pair) == 1:\n lines.append(pair[0])\n else:\n x1 = sum([line.x1 for line in pair]) // len(pair)\n x2 = sum([line.x2 for line in pair]) // len(pair)\n y1 = sum([line.y1 for line in pair]) // len(pair)\n y2 = sum([line.y2 for line in pair]) // len(pair)\n lines.append(GripPipeline.Line(x1, y1, x2, y2))\n return lines", "def clean(self, contents: list) -> list:\n _new_content = []\n\n for line in contents:\n if line:\n try:\n if self.__long__line_case(_new_content, line):\n continue\n\n if self.__comma_case(_new_content, line):\n continue\n\n if 'Professor Grace Wong' in line:\n self.__double_add(_new_content, line, 33)\n continue\n\n if self.__specific_string_case(\n _new_content,\n line,\n ['substitutions', 'the study of Liver Diseases']\n ):\n continue\n\n if line.strip() == 'Professor Henry Chan':\n line += ' (Hong Kong)'\n\n _new_content.append(line.strip())\n except IndexError:\n pass\n return _new_content", "def process_data(file_object: TextIO) -> list:\n text_list = [line.replace('\\n', '').split(' ') for line in file_object]\n return text_list", "def clean(source_name):\n with open(source_name, 'r') as f:\n text = f.read()\n text_list = re.split('; |, |\\n| |\\!|\\?', text)\n if '' in text_list:\n text_list = list(filter(lambda x: x != \" \" and x != \"\", text_list))\n return text_list", "def Split_to_Lines(self):\r\n\r\n line = []\r\n word = \"\"\r\n comment = False\r\n String = False\r\n for i in range(0, len(self.Code)):\r\n if self.Code[i] == '\\n':\r\n if word != '':\r\n if (String is True) and (word[0] != word[len(word) - 1]):\r\n return False\r\n line.append(word)\r\n if len(line) != 0:\r\n self.Code_Lines.append(line)\r\n if len(line) >= 2:\r\n if line[0] == \"end\":\r\n break\r\n word = \"\"\r\n line = []\r\n comment = False\r\n String = False\r\n elif not comment:\r\n if self.Code[i] == ' ':\r\n if not String:\r\n if word != \"\" and word != '':\r\n line.append(str(word))\r\n word = \"\"\r\n else:\r\n word += self.Code[i]\r\n else:\r\n if self.Code[i] == '\"':\r\n if not String:\r\n if word != \"\":\r\n if word != '':\r\n line.append(word)\r\n word = '\"'\r\n String = True\r\n elif word[0] == self.Code[i]:\r\n String = False\r\n word += self.Code[i]\r\n if word != '':\r\n line.append(word)\r\n word = \"\"\r\n else:\r\n word += self.Code[i]\r\n elif self.Code[i] == '\\'':\r\n if not String:\r\n if word != \"\":\r\n if word != '':\r\n line.append(word)\r\n word = '\\''\r\n String = True\r\n elif word[0] == self.Code[i]:\r\n String = False\r\n word += self.Code[i]\r\n if word != '':\r\n line.append(word)\r\n word = \"\"\r\n else:\r\n word += self.Code[i]\r\n else:\r\n if String:\r\n word += self.Code[i]\r\n else:\r\n if self.Code[i] == ';':\r\n comment = True\r\n\r\n elif self.Code[i] in self.Special_Symbols:\r\n if word != '':\r\n line.append(word)\r\n line.append(self.Code[i])\r\n word = \"\"\r\n else:\r\n line.append(self.Code[i])\r\n\r\n else:\r\n word += self.Code[i].lower()\r\n\r\n return self.Code_Lines", "def text_to_list(text):\n text = text.split(\"\\n\")\n mylist = []\n a, b = [], []\n x = \" \"\n for el in text:\n temp = []\n el = el.split()\n for pos in el:\n if len(pos) == 0:\n continue\n if x not in pos:\n temp.append(pos)\n try:\n a.append(temp[0])\n b.append(temp[1])\n except IndexError:\n pass\n a = del_empty(a)\n b = del_empty(b)\n mylist.append(a)\n mylist.append(b)\n return mylist", "def cut_data(data):\n out = [[], []]\n data = data.split(\"\\n\")\n for line in data:\n line = line.split(\" \")\n line = remove_empty(line)\n try:\n out[0].append(float(line[0]))\n out[1].append(float(line[1]))\n except IndexError:\n pass\n file = open(\"test.txt\", \"w\")\n for i in out[1]: # DELETE\n file.write(str(i))\n file.write(\"\\n\")\n file.close()\n return out", "def _fileLinesToList(filename) :\n o = []\n with open(filename, \"r\") as fi :\n for l in fi :\n if l.strip() != \"\" :\n o.append(l.strip())\n return o", "def emptyline(self):", "def _extract_content(lines: list[Strip]) -> list[str]:\n content = [\"\".join(segment.text for segment in line) for line in lines]\n return content", "def lines_to_blocks(text):\n n_sep = text.count('\\n\\n')\n n_lines = text.count('\\n')\n #approximate ratio of double newlines vs single newline: 40\n if int(n_sep/n_lines*100) > 40:\n text = re.sub('\\n\\n', '\\n',text)\n #try to split it up with topic indicators such as numbers or bullet points\n text = re.sub(r'[0-9]+[.]', '\\n',text)\n text = re.sub('•', '\\n',text)\n return text", "def _line_wrapper( self, diffs ):\n\n\t\t# pull from/to data and flags from mdiff iterator\n\t\tfor fromdata, todata, flag in diffs:\n\t\t\t# check for context separators and pass them through\n\t\t\tif flag is None:\n\t\t\t\tyield fromdata, todata, flag\n\t\t\t\tcontinue\n\t\t\t( fromline, fromtext ), ( toline, totext ) = fromdata, todata\n\t\t\t# for each from/to line split it at the wrap column to form\n\t\t\t# list of text lines.\n\t\t\tfromlist, tolist = [], []\n\t\t\tself._split_line( fromlist, fromline, fromtext )\n\t\t\tself._split_line( tolist, toline, totext )\n\t\t\t# yield from/to line in pairs inserting blank lines as\n\t\t\t# necessary when one side has more wrapped lines\n\t\t\twhile fromlist or tolist:\n\t\t\t\tif fromlist:\n\t\t\t\t\tfromdata = fromlist.pop( 0 )\n\t\t\t\telse:\n\t\t\t\t\tfromdata = ( '', ' ' )\n\t\t\t\tif tolist:\n\t\t\t\t\ttodata = tolist.pop( 0 )\n\t\t\t\telse:\n\t\t\t\t\ttodata = ( '', ' ' )\n\t\t\t\tyield fromdata, todata, flag", "def line_split(self, line):\n\t\tline = re.sub(r\"`(.*?)'\", quote_replace, line)\n\t\tline = line.translate(None, '.:,()+*')\n\t\treturn line.split()", "def chunk_split(cls, text):\n parts = []\n current = []\n for line in text.splitlines():\n size = sum(len(part) + 1 for part in current)\n extra = len(line)\n if size + extra >= 2000:\n if current:\n # The message is full, split here.\n parts.append(\"\\n\".join(current))\n current.clear()\n if extra >= 2000:\n # The line itself is too long, split on whitespace instead.\n *lines, line = wrap(line, 2000, expand_tabs=False, replace_whitespace=False)\n parts.extend(lines)\n current.append(line)\n if current:\n parts.append(\"\\n\".join(current))\n return parts", "def splitline (self, line):\n\t\treturn line.split('\\t')", "def stripped_lines(lines, ignore_comments, ignore_docstrings, ignore_imports):\n if ignore_imports:\n tree = astroid.parse(\"\".join(lines))\n node_is_import_by_lineno = (\n (node.lineno, isinstance(node, (astroid.Import, astroid.ImportFrom)))\n for node in tree.body\n )\n line_begins_import = {\n lineno: all(is_import for _, is_import in node_is_import_group)\n for lineno, node_is_import_group in groupby(\n node_is_import_by_lineno, key=lambda x: x[0]\n )\n }\n current_line_is_import = False\n\n strippedlines = []\n docstring = None\n for lineno, line in enumerate(lines, start=1):\n line = line.strip()\n if ignore_docstrings:\n if not docstring and any(\n line.startswith(i) for i in ['\"\"\"', \"'''\", 'r\"\"\"', \"r'''\"]\n ):\n docstring = line[:3]\n line = line[3:]\n if docstring:\n if line.endswith(docstring):\n docstring = None\n line = \"\"\n if ignore_imports:\n current_line_is_import = line_begins_import.get(\n lineno, current_line_is_import\n )\n if current_line_is_import:\n line = \"\"\n if ignore_comments:\n line = line.split(\"#\", 1)[0].strip()\n strippedlines.append(line)\n return strippedlines", "def _split(self):\n text = self.md\n self.parts = parts = []\n self.headers = headers = []\n lines = []\n\n # Split in parts\n for line in text.splitlines():\n if line.startswith((\"# \", \"## \", \"### \", \"#### \", \"##### \")):\n # Finish pending lines\n parts.append(\"\\n\".join(lines))\n lines = []\n # Process header\n level = len(line.split(\" \")[0])\n title = line.split(\" \", 1)[1]\n title_short = title.split(\"(\")[0].split(\"<\")[0].strip().replace(\"`\", \"\")\n headers.append((level, title_short))\n parts.append((level, title_short, title))\n else:\n lines.append(line)\n parts.append(\"\\n\".join(lines))\n\n # Now convert all text to html\n for i in range(len(parts)):\n if not isinstance(parts[i], tuple):\n parts[i] = markdown.markdown(parts[i], extensions=[]) + \"\\n\\n\"", "def remove_leading_whitespace_and_empty_lines(text: str) -> str:\n # We call lstrip() twice on the same line. This is inefficient but ok for small unit tests.\n # Please change it if you want to.\n return '\\n'.join([line.lstrip() for line in text.split('\\n') if line.lstrip() != ''])", "def split(self, bytes):\n # '''Split by lines heroku payload and apply filters.'''\n\n # lines = []\n\n lines = []\n while len(bytes) > 0:\n # find first space character\n i = 0\n while bytes[i] != 32: # 32 is white space in unicode\n i += 1\n msg_len = int(bytes[0:i].decode('utf-8'))\n msg = bytes[i + 1:i + msg_len + 1]\n\n # remove \\n at the end of the line if found\n eol = msg[len(msg)-1]\n if eol == 10 or eol == 13: # \\n or \\r in unicode\n msg = msg[:-1]\n\n decoded_msg = msg.decode('utf-8', 'replace')\n if self.truncate_to > -1:\n # replace token by __TOKEN_REPLACED__\n decoded_msg = self.patternToken.sub(lambda x:\n '{}__TOKEN_REPLACED__{}'\n .format(x.group(1),\n x.group(3)),\n decoded_msg)\n\n max_ = self.truncate_to\n # TRUNCATE Big logs except stack traces\n if not self.patternStackTrace.search(\n decoded_msg) and len(decoded_msg) > max_:\n decoded_msg = '%s __TRUNCATED__ %s' % (\n decoded_msg[:max_//2], decoded_msg[-max_//2:])\n\n lines.append(decoded_msg)\n\n bytes = bytes[i + 1 + msg_len:]\n return lines", "def __preprocess_line(line):\n return [int(element) for element in line.lstrip().rstrip().split()] # preprocess the input line", "def ConvertToSingleLine(lines):\n state = []\n total_length = 0\n for l in lines:\n total_length += len(l)\n # TODO: Use a tuple instead.\n state.append({'pos': total_length, # the line split point\n 'blocks': [], # blocks which belong to this line\n })\n result = \"\".join(lines)\n assert len(state) == len(lines)\n return (result, state)", "def parse_lines(lines):\n image_ids = []\n cleaned_captions = []\n\n # QUESTION 1.1\n\n for line in lines:\n # first we split the image id from caption text based on \\t\n id = line.split('\\t')[0]\n # then we extract remove .jpg#x part from image id (where x = 1 to 5)\n id = id.split('.')[0]\n # finally we extract raw text caption\n raw_caption = line.split('\\t')[1]\n # and forward to other function for cleaning the text\n caption = clean_caption(raw_caption)\n\n image_ids.append(id)\n cleaned_captions.append(caption)\n\n return image_ids, cleaned_captions", "def procesar_linea(separador,linea):\n return (linea.rstrip('\\n')).split(separador)", "def separate(self, lines):\n\n seps = []\n curr = \"\"\n left = 0\n right = 0\n\n for line in lines.split(\"\\n\"):\n if not line:\n continue\n l = line.count(\"{\")\n r = line.count(\"}\")\n left += l\n right += r\n curr += line + \"\\n\"\n\n if left == right:\n left = 0\n right = 0\n if curr:\n seps.append(curr)\n curr = \"\"\n return seps", "def process_lines(self):\n\n for line in self.all_lines:\n container_re = re.compile(r'(.*?) bags')\n bags_re = re.compile(r'(?:(\\d+)|no other) (.*?) bags*')\n container_name = re.match(container_re, line).group(1)\n bags = re.findall(bags_re, line)\n self.all_bags[container_name] = bags", "def pre_process_data(linelist):\r\n for index in range(len(linelist)):\r\n if not linelist[index]:\r\n linelist[index] = '0'\r\n return linelist", "def _read_split_file(filepath):\n with open(filepath) as f:\n trajs = f.readlines()\n trajs = [x.strip() for x in trajs]\n\n return trajs", "def get_lines(self):\n return self.split('\\n')", "def _collect_lines( self, diffs ):\n\n\t\tfromlist, tolist, flaglist = [], [], []\n\t\t# pull from/to data and flags from mdiff style iterator\n\t\tfor fromdata, todata, flag in diffs:\n\t\t\ttry:\n\t\t\t\t# store HTML markup of the lines into the lists\n\t\t\t\tfromlist.append( self._format_line( 0, flag, *fromdata ) )\n\t\t\t\ttolist.append( self._format_line( 1, flag, *todata ) )\n\t\t\texcept TypeError:\n\t\t\t\t# exceptions occur for lines where context separators go\n\t\t\t\tfromlist.append( None )\n\t\t\t\ttolist.append( None )\n\t\t\tflaglist.append( flag )\n\t\treturn fromlist, tolist, flaglist", "def clean_line(self, line):\n\n if \"#\" in line:\n temp = line.split(\"#\")\n if len(temp) < 2:\n return \"\"\n else:\n temp = temp[0] + \"\\n\"\n\n # make sure the \"#\" isn't in quotes\n if temp.count(\"\\\"\") % 2 == 0:\n line = temp\n\n line = line.replace(\"}\", \" } \").replace(\"{\", \" { \")\n while \"=\" in line:\n line = self.replace_equals(line)\n line = line.lstrip()\n return line", "def lify_split_buffers(lines):\n code_len = len(lines)\n for pos in range(code_len):\n line = lines[pos]\n if line.find('variable=buf_data_split') != -1:\n # Search for the variable declaration section\n decl_pos = -1\n prev_pos = pos - 1\n while prev_pos >= 0:\n prev_line = lines[prev_pos]\n if prev_line.find('Variable Declaration') != -1:\n decl_pos = prev_pos\n break\n prev_pos -= 1\n # Move the two code lines at [pos - 1] and [pos] to [decl_pos] and [decl_pos + 1]\n indent = lines[decl_pos].find('/*')\n line1 = ' ' * indent + lines[pos - 1].lstrip()\n line2 = ' ' * indent + lines[pos].lstrip()\n del lines[pos - 1]\n del lines[pos - 1]\n lines.insert(decl_pos, line1)\n lines.insert(decl_pos + 1, line2)\n\n return lines", "def _trunc_lines(self):\n\t\tif self._appendMessages:\n\t\t\tself._trunc_lines_append()\n\t\telse:\n\t\t\tself._trunc_lines_prepend()", "def split_lines(layout):\n children = []\n current_line = None\n\n for child in layout.children:\n if isinstance(child, _LineBreak):\n current_line = None\n else:\n if current_line is None:\n current_line = LineBox(child.style)\n children.append(current_line)\n current_line.children.append(child)\n\n layout.children = children", "def test_chunks(year, day, part_number):\n chunks = []\n chunk_index = -1\n data_file_lines(part_number).each do |line|\n if line[0] == '#'\n chunk_index += 1\n chunks[chunk_index] = [line[1..-1].strip, []]\n elsif chunk_index >= 0\n chunks[chunk_index][1] << line\n end\n end\n chunks", "def list_strip(line: list):\n new_line = [field.strip() for field in line]\n if new_line != line:\n tpl = \"Removed trailing whitespaces in fields of line: {}\"\n msg = tpl.format(line)\n warnings.warn(msg, ParseIsatabWarning)\n return new_line", "def test_no_truncation(self):\n fixed_splitter = utils.WidthSplitter((4, 3, 5, 6, 10, 10, 10, 10, 10, 10))\n line = \" 1 C 1 s -0.00000 -0.00000 0.00000\"\n ref_not_truncated = ['1', 'C', '1', 's', '-0.00000', '-0.00000', '0.00000', '', '', '']\n tokens_not_truncated = fixed_splitter.split(line, truncate=False)\n self.assertEqual(ref_not_truncated, tokens_not_truncated)", "def rows(file, prep=None,\n whitespace='[\\n\\r\\t]',\n comments='#.*',\n sep=\",\"\n ):\n doomed = re.compile('(' + whitespace + '|' + comments + ')')\n with open(file) as fs:\n for line in fs:\n line = re.sub(doomed, \"\", line)\n if line:\n row = map(lambda z: z.strip(), line.split(sep))\n if len(row) > 0:\n yield prep(row) if prep else row", "def split(line, skipspace=0):\n a = re.sub('\\s+',' ', line.strip()) ##normalize white's -> 1 space\n a = re.split('(\\s)', a) ##split/keep space\n for aa in a: ## for each PPH, convert it to SPH\n if aa==' ':\n if skipspace == 0: yield aa\n else:\n for aaa in toSPH.pph2sph(aa): yield aaa", "def cleanup (text) :\n l_idx = 1\n lines = text.split ('\\n')\n\n # count leading non-empty lines\n for line in lines :\n if not line.strip () :\n l_idx += 1\n else :\n break\n\n # check if there is anything more to evaluate\n if len (lines) <= l_idx :\n return text\n\n # determine indentation of that line\n indent = 0\n for c in lines[l_idx] :\n if c == ' ' :\n indent += 1\n else : \n break\n\n # if nothing found, check the following line\n if not indent :\n\n if len (lines) <= l_idx + 1:\n return text\n for c in lines[l_idx + 1] :\n if c == ' ' :\n indent += 1\n else : \n break\n\n # if still nothing found, give up\n if not indent :\n return text\n\n\n # oitherwise trim all lines by that indentation\n out = \"\"\n replace = ' ' * indent\n for line in lines :\n out += re.sub (\"%s\" % ' ' * indent, \"\", line)\n out += \"\\n\"\n\n return out", "def run(self, lines: List[str]) -> List[str]:\n inserts = 0\n in_code_fence: bool = False\n open_fences: List[Fence] = []\n copy = lines[:]\n for i in range(len(lines) - 1):\n # Ignore anything that is inside a fenced code block but not quoted.\n # We ignore all lines where some parent is a non-quote code block.\n m = FENCE_RE.match(lines[i])\n if m:\n fence_str = m.group(\"fence\")\n lang: Optional[str] = m.group(\"lang\")\n is_code = lang not in (\"quote\", \"quoted\")\n matches_last_fence = (\n fence_str == open_fences[-1].fence_str if open_fences else False\n )\n closes_last_fence = not lang and matches_last_fence\n\n if closes_last_fence:\n open_fences.pop()\n else:\n open_fences.append(Fence(fence_str, is_code))\n\n in_code_fence = any(fence.is_code for fence in open_fences)\n\n # If we're not in a fenced block and we detect an upcoming list\n # hanging off any block (including a list of another type), add\n # a newline.\n li1 = self.LI_RE.match(lines[i])\n li2 = self.LI_RE.match(lines[i + 1])\n if (\n not in_code_fence\n and lines[i]\n and (\n (li2 and not li1)\n or (li1 and li2 and (len(li1.group(1)) == 1) != (len(li2.group(1)) == 1))\n )\n ):\n copy.insert(i + inserts + 1, \"\")\n inserts += 1\n return copy", "def _trunc_lines_prepend(self):\n\t\tp = self._edit.get_buffer()\n\t\tnLines = p.get_line_count()\n\t\twhile nLines > 0:\n\t\t\tif nLines <= self._maxLines +1:\n\t\t\t\tbreak\n\t\t\tend = p.get_end_iter()\n\t\t\tstart = p.get_end_iter()\n\t\t\tstart.backward_line()\n\t\t\tp.delete(start, end)\n\t\t\tnLines = p.get_line_count()", "def _lines(filename):\n \n handle = gzip.open(filename, 'rt') if _gz(filename) else open(filename)\n for line in handle:\n if not line.startswith('#'):\n yield line.strip().split('\\t')" ]
[ "0.7803936", "0.7344712", "0.72013485", "0.7061363", "0.70215124", "0.6967967", "0.6914489", "0.6854614", "0.66259325", "0.6553807", "0.6536147", "0.65114903", "0.65092206", "0.6506624", "0.64733094", "0.64733094", "0.6465733", "0.6464278", "0.6462324", "0.6399896", "0.6393337", "0.6389096", "0.63557136", "0.6332589", "0.6325802", "0.6305556", "0.627506", "0.6251746", "0.6205863", "0.619445", "0.61874664", "0.6161195", "0.61541474", "0.61484736", "0.61484736", "0.6128532", "0.60934746", "0.6062839", "0.60481155", "0.6003841", "0.59958494", "0.5974975", "0.59613097", "0.59577054", "0.5947892", "0.59291154", "0.5929099", "0.5924631", "0.5917961", "0.5917779", "0.58818245", "0.5864519", "0.58534634", "0.585301", "0.585192", "0.58480865", "0.5843999", "0.5836819", "0.58233464", "0.58065504", "0.5806054", "0.57929444", "0.57902133", "0.57866234", "0.5780837", "0.57745546", "0.5738224", "0.572208", "0.57098246", "0.5706884", "0.5702307", "0.5699305", "0.5692972", "0.5687354", "0.5685542", "0.5679195", "0.5673916", "0.5665316", "0.5662985", "0.565976", "0.56560194", "0.5648521", "0.56482935", "0.5644161", "0.5637985", "0.56366676", "0.5626737", "0.56161225", "0.56129164", "0.5606352", "0.55861264", "0.5583396", "0.5571661", "0.5560708", "0.5554144", "0.55490804", "0.5541012", "0.55387187", "0.55293965", "0.55265886" ]
0.6184828
31
Error in label only whitespace allowed, no tabs if checked label differs, raise an error
def CheckLabel(Line): for i in Line: if i == '\t': #can't detect leading tabs, stops at the first \ raise InputError(Line,"malformed input") elif i != ' ': break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_label(self):\n nt = NewickTokenizer(newick=\"(a\\n'b',(b,c),(d,e));\")\n self.assertRaises(ValueError, nt.tokens)", "def checkLabel(label):\n\n label = str(label)\n if not label:\n raise ValueError('label cannot be empty string')\n\n label = str(label)\n\n if not label:\n raise ValueError('label cannot be empty string')\n\n if not label[0].isalpha():\n raise ValueError('label must start with a letter')\n\n if not (''.join(label.split('_'))).isalnum():\n raise ValueError('label may contain alphanumeric characters and '\n 'underscore, {0} is not valid'.format(label))\n\n if isReserved(label):\n raise ValueError('{0} is a reserved word and cannot be used '\n 'as a label'.format(repr(label)))\n\n if label in READONLY:\n raise AttributeError('{0} is read-only'.format(label))\n\n return label", "def test_is_valid_label_value_invalid_input():\n # test length violations\n assert not is_valid_label_value(value=f\"{'v' * 64}\") # value too long\n # test first character violations (not alphanum)\n assert not is_valid_label_value(value=\"-\")\n assert not is_valid_label_value(value=\"-a\")\n assert not is_valid_label_value(value=\".b\")\n assert not is_valid_label_value(value=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_label_value(value=\"a-\")\n assert not is_valid_label_value(value=\"b.\")\n assert not is_valid_label_value(value=\"c \")\n assert not is_valid_label_value(value=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_label_value(value=\"a$$a\")\n assert not is_valid_label_value(value=\"b b\")", "def checkcontent(label, c):\n if len(c) > 0:\n raise ValueError(\"{} with content={}\".format(label, c))", "def _is_label(self, words):\n if words[0] == 'label':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_LABEL command.\".format(self._file_line))\n return True\n else:\n return False", "def test_is_valid_label_value_valid_input():\n # test valid label values\n assert is_valid_label_value(value=None)\n assert is_valid_label_value(value=\"\")\n assert is_valid_label_value(value=\"l0L\")\n assert is_valid_label_value(value=\"L-l\")\n assert is_valid_label_value(value=\"L.L\")\n assert is_valid_label_value(value=\"l_4\")\n assert is_valid_label_value(value=\"4-you\")\n assert is_valid_label_value(value=\"You.2\")", "def want_label(self, op):\n return self.want_line(r'\\s*\\S*(%s)\\S*\\:.*' % (op))", "def test_whitespace(self):\n self.assertRaises(ParseException, self.flag.parseString, ' ')", "def test_label_with_non_string():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n LINE bigfoot\n Y classification COUNT\n SPLIT BY classification\n X date BY YEAR LABEL 1.2\n \"\"\"\n\n # TODO Make this exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def test_label(self):\n try:\n t = self.OntTerm(label='diffuse')\n raise AssertionError(f'should fail {t!r}')\n except TypeError:\n pass", "def test_arg_env_invalid(self, dfparser, instruction, label):\n dfparser.lines = [\"FROM fedora\\n\",\n \"{0} v=v\\n\".format(instruction),\n \"LABEL TEST={0}\\n\".format(label)]\n try:\n dfparser.labels['TEST']\n except KeyError:\n pass", "def test_label_cannot_be_converted_to_string(self):\n\n class NoStr:\n def __str__(self) -> str:\n raise NotImplementedError\n\n with pytest.raises(TypeError, match=\"The given label\"):\n State(\"water\", label=NoStr())", "def validateLabel(cls, label: str, labeling_version: int) -> bool:\r\n\r\n return len(label.split('.')) in [2, 3]", "def test_label(self):\n xs = t.Label(t.Exactly(\"x\"), 'CustomLabel')\n self.assertEqual(writePython(xs),\n dd(\"\"\"\n def _G_label_1():\n _G_exactly_2, lastError = self.exactly('x')\n self.considerError(lastError, None)\n return (_G_exactly_2, self.currentError)\n _G_label_3, lastError = self.label(_G_label_1, \"CustomLabel\")\n self.considerError(lastError, None)\n _G_label_3\n \"\"\"))", "def test_label_not_in_config(self):\n with self.assertRaisesRegex(\n ValueError, 'The config \\'Label\\' field should contain the positive'\n ' class label.'):\n self.ci.run_with_metadata(\n indexed_inputs=self.dataset.indexed_examples,\n model=self.model,\n dataset=self.dataset,\n )", "def test_no_start_open_parens(self):\n self.assertRaises(ValueError, NewickTokenizer, newick='hi')", "def test_whitespace_not_used_if_layout():\n grammar = \"\"\"\n S: 'a' 'b';\n LAYOUT: 'k' | EMPTY;\n \"\"\"\n g = Grammar.from_string(grammar)\n parser = Parser(g)\n with pytest.raises(ParseError):\n parser.parse('a b')", "def validate_label(self, label):\n if label != self.label:\n raise KeypointsSchemaError(\n \"Label '%s' does not match keypoints schema\" % label\n )", "def test_issue_remove_label(self):\n pass", "def _is_label(self) -> bool:\n return self.lines[self.counter].startswith(\"(\") and self.lines[\n self.counter\n ].endswith(\")\")", "def t_error(t):\n print(\"Illegal character '%s'\" % repr(t.value[0]))\n t.lexer.skip(1)", "def catch_tabs(self):\n lnum = 1\n for line in self.text:\n cnum = line.find(\"\\t\")\n if 0 <= cnum:\n self.errmsg(\"TAB detected in input. Please use spaces.\",\n pos=(lnum,cnum))\n lnum += 1", "def check_sanity(self):\n # ensure numeric labels\n try:\n list(map(int, flatten(self.labels[:1])))\n except ValueError as ve:\n error(\"Non-numeric label encountered: {}\".format(ve))\n except TypeError as ve:\n warning(\"Non-collection labelitem encountered: {}\".format(ve))", "def label(cls) -> str:\n return \"!lobotomy.error\"", "def labels_validation(ele,actultext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(ele))\r\n print \"Current label returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(actultext)+\" label does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if actultext.strip() == text_heading.strip():\r\n print (str(actultext)+\" label has been found!!!\")\r\n else:\r\n print(\"Sorry!!!lable has been mismatched,it should be \"+str(actultext))\r\n print (\"label shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def test_invalid_input_tag(self):\r\n with self.assertRaisesRegexp(Exception, \"Error in xml\"):\r\n self.check_group('checkboxtextgroup', 'invalid', 'checkbox')", "def verify_labeled(self, d_stmt, table):\n d_label = d_stmt.find_first(\"p_name\")\n if d_label:\n self.label = d_label.value\n table.check_table(d_stmt.linespan, Symbol(self.label, DanaType(\"label\")))", "def test_info_whitespace():\n pytest.raises(SaltInvocationError, mac_group.info, \"white space\")", "def is_label_definition(line):\n\n return line.startswith(\"LABEL \")", "def test_is_valid_label_key_invalid_input():\n # test length violations\n assert not is_valid_label_key(key=None) # Too short\n assert not is_valid_label_key(key=\"\") # Too short\n assert not is_valid_label_key(key=f\"{'p' * 254}/n\") # prefix too long\n assert not is_valid_label_key(key=\"/n\") # prefix too short\n assert not is_valid_label_key(key=\"p/\") # name too short\n assert not is_valid_label_key(key=\"a\" * 254) # name too long\n assert not is_valid_label_key(key=f\"d/{'b'*64}\") # name too long\n # test first character violations (not alphanum)\n assert not is_valid_label_key(key=\"-a\")\n assert not is_valid_label_key(key=\".b\")\n assert not is_valid_label_key(key=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_label_key(key=\"a-\")\n assert not is_valid_label_key(key=\"b.\")\n assert not is_valid_label_key(key=\"c \")\n assert not is_valid_label_key(key=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_label_key(key=\"a$$a\")\n assert not is_valid_label_key(key=\"b b\")", "def _check_whitespace_formatting(self, docstring: PetscDocStringImpl) -> None:\n format_diag = self.diags.formatting\n base_mess = f'{self.transform(self.name)} values must be (1) space away from colon not ({{}})'\n for (line, line_after_colon), sub_items in self.items:\n colon_idx = line.find(':')\n if colon_idx < 0:\n continue\n\n correct_offset = colon_idx + 2\n rest_idx = line.find(line_after_colon)\n if rest_idx == correct_offset:\n continue\n\n nspaces = rest_idx - correct_offset\n if rest_idx > correct_offset:\n sub = ' ' * nspaces\n offset = correct_offset\n fix = ''\n else:\n sub = ':'\n offset = colon_idx\n fix = ': '\n floc = docstring.make_source_range(sub, line, sub_items[0][0].start.line, offset=offset)\n docstring.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, format_diag, base_mess.format(nspaces + 1), floc, patch=Patch(floc, fix)\n )\n return", "def test_missing_label_value():\n svl_string = \"\"\"\n DATASETS\n bigfoot \"bigfoot.csv\"\n SCATTER bigfoot\n X date BY YEAR LABEL\n Y report_number COUNT\n SPLIT BY classification\n \"\"\"\n\n with pytest.raises(SvlMissingValue):\n parse_svl(svl_string)", "def test__create_label_w_no_ent_id(ruler: SpaczzRuler) -> None:\n assert ruler._create_label(\"TEST\", None) == \"TEST\"", "def check_input(naming):\n\n if naming not in ['label', 'id']:\n raise ValueError('naming must be \"label\" or \"id\"')", "def test_label_seconde(self):\n self.assertIsInstance(self.address.label_second, str)\n self.assertEqual(self.address.label_second, \"\")", "def test_error_initialisation_from_xdmf_missing_label():\n with pytest.raises(ValueError, match=r\"label\"):\n festim.InitialCondition(value=\"my_file.xdmf\", label=None, time_step=1)", "def _check_bids_label(label):\n if not isinstance(label, str):\n raise TypeError(\n f\"All bids labels must be string. \"\n f\"Got '{type(label)}' for {label} instead.\"\n )\n if not all(char.isalnum() for char in label):\n raise ValueError(\n f\"All bids labels must be alphanumeric. Got '{label}' instead.\"\n )", "def test_median1D_bad_label(self):\n\n with pytest.raises(KeyError) as verr:\n avg.median1D(self.testInst, self.test_bins, \"bad_label\",\n self.test_data)\n\n estr = \"bad_label\"\n assert str(verr).find(estr) >= 0\n\n return", "def test_is_valid_annotation_value_invalid_input():\n # test valid label values\n assert not is_valid_annotation_value(value=1)", "def check_horizontal(line):\n\tline = line.rstrip('\\n')\n\tif line.rstrip() != line:\n\t\traise StyleError(\"Line has trailing white-space\")\n\tif '\\t' in line.lstrip('\\t'):\n\t\traise StyleError(\"Tabs may only be used for indentation\")", "def CheckSpacing(fn, filename, clean_lines, linenum, nesting_state, error):\n fn(filename, clean_lines, linenum, nesting_state,\n makeErrorFn(error, ['readability/braces', 'whitespace/braces'], []))", "def test_incorrect_indent(self, x=1, y=2): # noqa: D207, D213, D407", "def check_no_whitespace(args):\n for arg in args:\n for char in arg:\n if char in string.whitespace:\n raise RuntimeError(\"No whitespace characters are currently allowed in input arguments. Replace spaces in file and folder names with underscores ('_').\")\n return", "def test_missing_dataset_label():\n svl_string = \"HISTOGRAM X temperature\"\n\n with pytest.raises(SvlMissingValue):\n parse_svl(svl_string)", "def test_no_arg(self):\n self.assertRaises(ValueError, NewickTokenizer)", "def test_missing_axis_field():\n svl_string = \"\"\"\n BAR bigfoot X LABEL \"Classification\" Y classification COUNT\n \"\"\"\n\n with pytest.raises(SvlMissingValue):\n parse_svl(svl_string)", "def test_lint_fail_malformed(self, style):\n with ExpectedException(LinterFailure):\n run_linter_throw(\"path/to/file\",\n \"{s}\\n{m} Text{e}\",\n style,\n whitelist=[\"headerblock/desc_space\"])", "def invalid(self):\r\n inGG = 0\r\n if self.root and self.root.parent: inGG = self.root.parent.editGGLabel\r\n for asg in self.mergedASG:\r\n if asg.root and asg.root.parent: inGG = inGG or asg.root.parent.editGGLabel\r\n if inGG: # This means we are a LHS or RHS of a GG rule\r\n GGlabels = [] # A list to obtain all the GG labels... \r\n for type in self.listNodes.keys():\r\n for node in self.listNodes[type]:\r\n label = node.GGLabel.getValue() # Get the node's GG label\r\n if label in GGlabels: # it means it is repeated\r\n return \"Graph Grammar labels are not unique.\"\r\n else: GGlabels.append(label)\r\n return ASGNode.invalid(self) # perform the checking in my parent's invalid code\r", "def _check_wrong_tabs_instead_of_spaces(self):\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n countline = 0\n with open(ext_file, 'rb') as fp:\n for line in fp:\n countline += 1\n line_space_trip = line.lstrip(b' ')\n if line_space_trip != line_space_trip.lstrip(b'\\t'):\n self.msg_args.append(\n (\"%s:%d\" % (ext_file_rel, countline)))\n if self.msg_args:\n return False\n return True", "def label(self) -> str: # Enforcing every node defines a label\n pass", "def white_spaces(value):\n if re.search(r'[\\s]', value):\n raise ValidationError(_('El login no puede contener espacios en blanco'))", "def test_general_subset_invalid_space():\n pass", "def validate_class_label(self, label_uri):\n label = extract_name_from_uri_or_curie(label_uri)\n if not label[0].isupper():\n raise ValueError('Class label {} is incorrect. The first letter of each word should be capitalized!'.format(label))", "def test_unclosed(self):\n nt = NewickTokenizer(newick='(a,(b,c)')\n self.assertRaises(ValueError, nt.tokens)", "def test_toplevel_else(self):\n with pytest.raises(ParseError) as e:\n Template(\"{%else%}\").render()\n assert e.value.pc.tag == 'else'", "def test_label():\n label_path = pjoin(data_path, \"label\", \"lh.BA1.label\")\n label = read_label(label_path)\n # XXX : test more\n assert_true(np.all(label > 0))", "def is_valid_label(self, label):\n try:\n self.validate_label(label)\n return True\n except etal.LabelsSchemaError:\n return False", "def test_invalid_tag(self):\r\n with self.assertRaises(Exception):\r\n self.check_group('invalid', 'choice', 'checkbox')", "def test_false_lower(self):\n self.assertRaises(ParseException, self.flag.parseString, 'n')", "def test_indented_with_spaces(question_text, question_path):\n if \"\\t\" in question_text:\n raise ValueError(\n \"Found tab indentation in question {}. Please run \\\"sed -i '' 's/\\\\\\\\t/ /g' {}\\\" to switch to spaces.\".format(\n question_path, path.join(REPO, question_path)\n )\n )", "def test_spaces(self):\n self.assertTrue(validate_measure_input('1 ', self.measures))\n self.assertFalse(validate_measure_input('1 1', self.measures))", "def test_issue_replace_labels(self):\n pass", "def _MaybeAddLabel(label_name):\n if label_name.lower() in labels_already_seen:\n return\n labels_already_seen.add(label_name.lower())\n if '-' in label_name:\n col, _value = label_name.split('-', 1)\n _MaybeAddCol(col)", "def test_extra_closed(self):\n nt = NewickTokenizer(newick='(a,(b,c)));')\n self.assertRaises(ValueError, nt.tokens)", "def test_issue_delete_label(self):\n pass", "def error(self, t):\n print(\"Illegal character '%s' in line %i\" % (t.value[0], self.lineno))\n raise Exception(\"Illegal character '%s' in line %i\" %\n (t.value[0], self.lineno))", "def test_before_space():\n \n \n assert(1 == before_space(\"1 2 3\"))\n assert(\"NO SPACE\" == before_space(\"1\"))\n assert(\"Error\" == before_space(None))", "def test_issue_create_label(self):\n pass", "def _sanitize(label):\n return re.sub(r'(\\W+| )', '', label)", "def test_invalid_dataset2():\n train = ((\"Lorem ipsum dolor sit amet\", 3, 4.5),\n (\"Sed ut perspiciatis unde\", 4, 5.5))\n val = ((\"ipsum quia dolor sit\", 3.5),)\n with pytest.raises(ValueError):\n TabularDataset(train, val)", "def test_white_space(self):\n with self.assertRaises(ValidationError):\n field_name_validator('user id')", "def normalize_label(label: str) -> str:\n label = re.sub(r\"['\\\"`]+\", \"\", label) # remove apostrophes\n label = re.sub(r\"[-/\\\\ \\t_]+\", \" \", label) # normalize separators\n lower_count = sum(map(str.islower, label))\n upper_count = sum(map(str.isupper, label))\n if \" \" not in label and lower_count > 0 and upper_count > 0:\n # camel case to \"normal case\"\n label = re.sub(r\"([a-z])([A-Z])\", r\"\\g<1> \\g<2>\", label)\n label = re.sub(r\"(^[Tt]he |^[Aa] )\", \"\", label) # drop determiner\n return label.lower()", "def not_string_error(name, yml):\n\n yml = symlink_target(yml)\n output_1 = path(yml) + '\\n'\n output_2 = colored(' - Error: ', 'red')\n output_3 = colored(name, attrs=['bold'])\n output_4 = colored(' type should be ', 'red')\n output_5 = colored('str', 'yellow')\n return output_1 + output_2 + output_3 + output_4 + output_5", "def test_empty_description(self):\n self.check_validation_error('description\\n string does not match regex \".+\"', name=\"Name\", description=\"\")", "def label_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),label=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def test_labels(self):\n self.compliance_tester.test_labels(self.oi)", "def test_labels(ruler: SpaczzRuler) -> None:\n assert all(\n [label in ruler.labels for label in [\"GPE\", \"STREET\", \"DRUG\", \"NAME\", \"BAND\"]]\n )\n assert len(ruler.labels) == 5", "def valid_att_in_label(arch, **kwargs):\n return not arch.xpath('//label[not(@for) and not(descendant::input)]')", "def parser_error(msg):\n global MESSAGES\n if CURRENT_ROW != None:\n msg = \"row \"+str(CURRENT_ROW)+\": \"+msg\n msg += \"<br/>\\n&nbsp;&nbsp;&nbsp;starting with: \"\n for col in range(5):\n val = cellval(CURRENT_ROW, col)\n if val == None:\n val = \"\"\n msg += val+\" | \"\n MESSAGES.append(\"ERROR: \"+msg)", "def test_invalid_tokens(self):\n self.assertTrue(1 + 1)", "def _unused_label(self, label):\n original = label\n existing = self.column_labels\n i = 2\n while label in existing:\n label = '{}_{}'.format(original, i)\n i += 1\n return label", "def test_no_delimiter_error(self):\n val = DwcaValidator(yaml.load(self.yaml_delimited5, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'sex': 'male | female'}\n with self.assertRaises(ValueError):\n val.validate(document)", "def test_incompatible_rules():\n\n grammar = \"\"\"\n A: B | C;\n B: 'enumeration';\n C: value=INT;\n \"\"\"\n with pytest.raises(TextXSyntaxError):\n metamodel_from_str(grammar)", "def _CheckForMissingSpaceBeforeToken(self, token):\n # TODO(user): Check if too many spaces?\n if (len(token.string) == len(token.string.lstrip()) and\n token.previous and token.line_number == token.previous.line_number and\n len(token.previous.string) - len(token.previous.string.rstrip()) == 0):\n self._HandleError(\n errors.MISSING_SPACE,\n 'Missing space before \"%s\"' % token.string,\n token,\n Position.AtBeginning())", "def fail(*args, sep=\" \"):\n pass", "def _add_unexpected(self, newtk: str) -> None:\n pass", "def test_is_valid_annotation_value_valid_input():\n # test valid label values\n assert is_valid_annotation_value(value=None)\n assert is_valid_annotation_value(value=\"\")\n assert is_valid_annotation_value(value=\"l0L\")\n assert is_valid_annotation_value(value=\"L-l\")\n assert is_valid_annotation_value(value=\"L.L\")\n assert is_valid_annotation_value(value=\"l_4\")\n assert is_valid_annotation_value(value=\"4-you\")\n assert is_valid_annotation_value(value=\"You.2\")", "def test_merge_fails_different_label(self):\n job1 = ModelJob(label=\"a-label-1\")\n job2 = ModelJob(label=\"a-label-2\")\n\n self.assertRaises(AssertionError, lambda: job1.merge(job2))", "def validate_property_label(self, label_uri):\n label = extract_name_from_uri_or_curie(label_uri)\n if not label[0].islower():\n raise ValueError('Property label {} is incorrect. The first letter of the first word should be lower case!'.format(label))", "def test_issue_add_label(self):\n pass", "def test_named_unit_before_fmt_error():\n tcls = Write_Stmt\n # Cannot have an un-named (positional) argument after a named argument\n with pytest.raises(NoMatchError):\n tcls('''WRITE (UNIT=6, '(\"write some=\"\"'//'text'//'\"\"\")')''')", "def test_single_specifier_needed(self):\n template = '{0} one too many {1}'\n value_count = 1\n msg = ('The formatter should only contain one '\n '\"{}\" specifier for the source field.')\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)", "def error(self, msg, elem):\n if elem is not None:\n msg += \" (line %d)\" % elem.sourceline\n if self.ignore_errors:\n return self.warn(msg, elem)\n raise ParserException(msg)", "def check_empty(value, label):\n if value == u'':\n flash(label + \" Is Empty\")", "def test_missing_axis_specifier():\n svl_string = \"\"\"\n DATASETS\n bigfoot \"bigfoot_sightings.csv\"\n PIE bigfoot AXIS LABEL \"With Location\"\n \"\"\"\n\n with pytest.raises(SvlMissingValue):\n parse_svl(svl_string)", "def exit_with_message(error_text: str) -> NoReturn:\n raise StartRowParseError(start_row, error_text)", "def validate_zone_label(value):\n if not re.match(r'^[a-z0-9][\\.\\-0-9a-z]*[\\.0-9a-z]$', value):\n msg = _(\"Labels must start and end with a letter or digit, \"\n \"and have as interior characters only letters, digits, and hyphen.\")\n raise ValidationError(msg)\n if not value.endswith('.'):\n msg = _(\"Use a fully expanded domain name ending with a dot.\")\n raise ValidationError(msg)\n if len(value) > 63:\n raise ValidationError(_(\"Labels must be 63 characters or less.\"))", "def test_should_raise_error_for_duplicate_names(self):\r\n self.edge_spec['label'] = 'updated_at'\r\n with self.assertRaises(ValueError):\r\n self.spec_parser.parse_statement(self.property_spec)\r\n self.spec_parser.parse_statement(self.edge_spec)", "def test_lint_fail_short(self, style):\n with ExpectedException(LinterFailure):\n run_linter_throw(\"path/to/file\",\n \"{s}{e}\\n\",\n style,\n whitelist=[\"headerblock/desc_space\"])", "def test_col_data_label_no_attrs(self):\n help_tag = 'span'\n help_text_br = False\n names = ('first', 'billing_address_1')\n label_attrs = {}\n expected = ['<label for=\"id_first\">First:</label>']\n expected.append('<label for=\"id_billing_address_1\">street address (line 1):</label>')\n actual = []\n for name in names:\n field = self.form.fields[name]\n response = self.form.collect_col_data(name, field, help_tag, help_text_br, label_attrs)\n actual.append(response.get('label'))\n\n for expect, got in zip(expected, actual):\n self.assertEqual(expect, got)" ]
[ "0.67585087", "0.63081664", "0.6136866", "0.6121706", "0.6079675", "0.6045867", "0.5986271", "0.5970314", "0.5936593", "0.5910567", "0.58990806", "0.58863115", "0.5881466", "0.5871345", "0.58665997", "0.5764735", "0.57456005", "0.5732533", "0.5719328", "0.57080424", "0.56995803", "0.56872946", "0.5654367", "0.5654298", "0.5642969", "0.5623617", "0.5618477", "0.56180114", "0.5611361", "0.55977714", "0.5577953", "0.5560822", "0.5556297", "0.5555302", "0.55466336", "0.55451274", "0.5539302", "0.55385196", "0.55322623", "0.550544", "0.5500905", "0.5482045", "0.54728353", "0.5458261", "0.54380375", "0.54370403", "0.5428706", "0.54119456", "0.54107016", "0.53815305", "0.5379566", "0.53795", "0.53732175", "0.5365747", "0.5362456", "0.53510153", "0.5337281", "0.53346413", "0.53281033", "0.53017175", "0.5283074", "0.5265748", "0.5265682", "0.5250319", "0.52465564", "0.52409184", "0.52290726", "0.52228713", "0.5218934", "0.5208406", "0.52082455", "0.52032423", "0.5202831", "0.52003485", "0.51968217", "0.51896125", "0.5187526", "0.51790935", "0.51742566", "0.5173749", "0.5170407", "0.5164565", "0.5152775", "0.5151199", "0.5150674", "0.5148876", "0.5148295", "0.5147438", "0.5142519", "0.51420546", "0.5140476", "0.51383626", "0.51351696", "0.5134471", "0.51303995", "0.512979", "0.51296496", "0.512501", "0.5119978", "0.5115903" ]
0.7484639
0
Errors in sequence Checking sequence for allowed characters in sequence. only A,C,G,T if checked sequence differs, raise an error
def CheckSeq(Seq): OkNucleo = ("A", "C", "G", "T") for i in Seq: if i not in OkNucleo: raise InputError(Seq,"malformed input")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isSequenceValid(sequence):\n if not sequence:\n return False\n allowed_chars = set('GCAU')\n return set(sequence).issubset(allowed_chars)", "def seq_validator(sequence):\n\n # checks for ascii characters that should not appear in a fasta sequence\n seq_val = re.compile(r\"[.-@|\\s| -)|z-~|Z-`|EFIJLOPQX|efijlopqx+,]+\")\n\n if seq_val.search(sequence) is None:\n return True\n\n return False", "def validate_seq(sequence):\n sequence = sequence.strip()\n sequence = sequence.replace(\" \", \"\")\n sequence.upper()\n regex = re.compile('^[ACTGNRYSWKMBDHVEFILPQSXZ]*$', re.I)\n if regex.search(sequence) is not None:\n return True\n else:\n return False", "def is_valid_sequence(dna):\n num_char = 0\n \n for char in dna:\n if not char in 'ATCG':\n num_char += 1\n\n return num_char == 0", "def is_valid_sequence(dna):\n \n nucleotides = 'ATCG'\n error = 0\n \n for char in dna:\n if not char in nucleotides:\n error = error + 1\n return error == 0", "def check_and_clean_sequence(sequence, alphabet):\n if set(sequence).issubset(alphabet):\n return sequence\n else:\n return cleaning_ambiguous_bases(sequence)", "def validate_fasta_seq(sequence):\n sequence = sequence.replace(\" \", \"\")\n sequence.upper()\n regex = re.compile('>\\S*\\n[ACTGNRYSWKMBDHVEFILPQSXZ]*', re.MULTILINE)\n if regex.search(sequence) is not None:\n return True\n else:\n return False", "def validate_sequence_numbers(self):\n return self.hive_sequence1() == self.hive_sequence2()", "def test_should_raise_in_case_of_wrong_characters(self):\n validator = CharCombinationValidator()\n\n regex = re.compile(r'[\\(\\[\\{]\\)\\]\\}')\n forbidden_chars = regex.sub('', punctuation)\n for char in forbidden_chars:\n with self.assertRaises(FormulaValidationError):\n validator('Fe(O)2%s' % char)", "def _validate_input_sequence(self, seq:str) -> str:\n if not \"$\" in seq:\n # add sentinal letter which is unique and lexicographically smaller than any other character\n if self.debug: print(f\"Sentinal letter is added to input sequence: {seq + '$'}\")\n return seq + \"$\"\n else:\n if seq[-1:] == \"$\" and seq.count(\"$\") == 1:\n if self.debug: print(f\"Input sequnce ({seq}) already contains sentinal letter at last position.\")\n return seq\n else:\n if self.debug: print(f\"Sentinal letter at wrong position: {seq}\")\n raise ValueError(\"Input sequence sequence may only contain the sentinal letter '$' in the last position.\")", "def test_dna_validator(self):\n \n dna = 'TGGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTCTGAGTGGTAAT'\n # test invalid characters\n invalid_dna1 = 'EETGGAGACGGAAACASTCCGAGGACATCCGGAGGAACCCGGGGAGTZVTHHCTGAGTGGTAAT'\n # test invalid length\n invalid_dna2 = 'GGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTCTGAGTGGTAAT'\n # test for invalid internal stop\n invalid_dna3 = 'TGGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTTGAGTGGTAATC'\n expected_validationT = True\n expected_validationF = False\n result_validation1 = dna_validator(dna)\n self.assertEqual(result_validation1, expected_validationT)\n result_validation2 = dna_validator(invalid_dna1)\n self.assertEqual(result_validation2, expected_validationF)\n result_validation3 = dna_validator(invalid_dna2)\n self.assertEqual(result_validation3, expected_validationF)\n result_validation4 = dna_validator(invalid_dna3)\n self.assertEqual(result_validation4, expected_validationF)", "def validate_single_fasta_seq(sequence):\n sequence = sequence.replace(\" \", \"\")\n sequence.upper()\n regex = re.compile('>\\S*\\n[ACTGNRYSWKMBDHVEFILPQSXZ]', re.MULTILINE)\n if regex.search(sequence) is not None:\n return True\n else:\n return False", "def test_check_fasta_seqs_with_invalid(self):\r\n\r\n # Test against all data that should give some percent failures\r\n\r\n sample_barcodes = set(['ACCATACC', 'AGATTATAT'])\r\n sample_primers = set(['AGATTTACCA', 'TTATTACCGAT'])\r\n total_seq_count = 4\r\n\r\n perc_invalid_chars, perc_barcodes_detected, perc_primers_detected,\\\r\n perc_bcs_seq_start =\\\r\n check_fasta_seqs(self.sample_fasta_invalid_fp, sample_barcodes,\r\n sample_primers, total_seq_count)\r\n\r\n expected_perc_invalid_chars = \"%1.3f\" % 0.50\r\n expected_perc_barcodes_detected = \"%1.3f\" % 0.25\r\n expected_perc_primers_detected = \"%1.3f\" % 0.25\r\n\r\n self.assertEqual(perc_invalid_chars, expected_perc_invalid_chars)\r\n self.assertEqual(perc_barcodes_detected,\r\n expected_perc_barcodes_detected)\r\n self.assertEqual(perc_primers_detected,\r\n expected_perc_primers_detected)", "def letter_check(read):\n string=\"ACTG\"\n for line_number,line in enumerate(read):\n sequence=line.rstrip()\n if any(x not in string for x in sequence):\n return 0\n return 1", "def test_check_dna_chars_primers(self):\r\n\r\n header =\\\r\n ['SampleID',\r\n 'BarcodeSequence',\r\n 'LinkerPrimerSequence',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AARNCWSVDAA', 's1&data'],\r\n ['s2', 'CGTA', 'AAA1A', 's2_data']]\r\n errors = []\r\n\r\n errors = check_dna_chars_primers(header, mapping_data, errors)\r\n\r\n expected_errors = ['Invalid DNA sequence detected: AAA1A\\t2,2']\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should be able to suppress LinkerPrimerSequence check, won't\r\n # suppress ReversePrimer check\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'ReversePrimer', 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AARNCWSVDAA', 'ACGT', 's1&data'],\r\n ['s2', 'CGTA', 'AAA1A', 'ACGTF', 's2_data']]\r\n errors = []\r\n\r\n errors = check_dna_chars_primers(header, mapping_data, errors,\r\n disable_primer_check=True)\r\n\r\n expected_errors = ['Invalid DNA sequence detected: ACGTF\\t2,3']\r\n\r\n self.assertEqual(errors, expected_errors)", "def test_check_fasta_seqs_all_valid(self):\r\n\r\n # Test against all valid data\r\n\r\n sample_barcodes = set(['ACCATACC', 'CCAGATTACG'])\r\n sample_primers = set(['ACATTATTTT', 'TTATTACCGAT'])\r\n total_seq_count = 3\r\n\r\n perc_invalid_chars, perc_barcodes_detected, perc_primers_detected,\\\r\n perc_bcs_seq_start =\\\r\n check_fasta_seqs(self.sample_fasta_fp, sample_barcodes,\r\n sample_primers, total_seq_count)\r\n\r\n expected_perc_invalid_chars = \"%1.3f\" % 0\r\n expected_perc_barcodes_detected = \"%1.3f\" % 0\r\n expected_perc_primers_detected = \"%1.3f\" % 0\r\n\r\n self.assertEqual(perc_invalid_chars, expected_perc_invalid_chars)\r\n self.assertEqual(perc_barcodes_detected,\r\n expected_perc_barcodes_detected)\r\n self.assertEqual(perc_primers_detected,\r\n expected_perc_primers_detected)", "def test_invalid_sequence(self):\n b1 = board.Board(self.small_plain)\n start = np.array((3, 3), dtype='int')\n k1 = knight.Knight(b1,start)\n # set move sequence\n move_seq = [0, 5, 6, 6, 3, 2]\n # check sequence validity\n (cost, valid, endloc) = k1.validate_sequence(move_seq)\n self.assertFalse(valid)\n self.assertEqual(cost, 0)", "def is_aligned_dna(sequence):\r\n #ensure that the given sequence is uppercase\r\n sequence = sequence.upper()\r\n \r\n #replace all A C G and T and compare length with 0\r\n if len(sequence.replace(\"A\", \"\").replace(\"C\", \"\").replace(\"G\",\"\").replace(\"T\",\"\").replace(\"-\",\"\")) == 0:\r\n return True\r\n else:\r\n return False", "def check_dna_chars_primers(header,\r\n mapping_data,\r\n errors,\r\n disable_primer_check=False\r\n ):\r\n\r\n valid_dna_chars = DNASequence.iupac_characters()\r\n valid_dna_chars.add(',')\r\n\r\n # Detect fields directly, in case user does not have fields in proper\r\n # order in the mapping file (this will generate error separately)\r\n header_fields_to_check = [\"ReversePrimer\"]\r\n if not disable_primer_check:\r\n header_fields_to_check.append(\"LinkerPrimerSequence\")\r\n\r\n check_indices = []\r\n\r\n for curr_field in range(len(header)):\r\n if header[curr_field] in header_fields_to_check:\r\n check_indices.append(curr_field)\r\n\r\n # Correction factor for header being the first line\r\n correction_ix = 1\r\n # Check for missing data\r\n for curr_data in range(len(mapping_data)):\r\n for curr_ix in check_indices:\r\n if len(mapping_data[curr_data][curr_ix]) == 0:\r\n errors.append(\"Missing expected DNA sequence\\t%d,%d\" %\r\n (curr_data + correction_ix, curr_ix))\r\n\r\n # Check for non-DNA characters\r\n for curr_data in range(len(mapping_data)):\r\n for curr_ix in check_indices:\r\n for curr_nt in mapping_data[curr_data][curr_ix]:\r\n if curr_nt not in valid_dna_chars:\r\n errors.append(\"Invalid DNA sequence detected: %s\\t%d,%d\" %\r\n (mapping_data[curr_data][curr_ix],\r\n curr_data + correction_ix, curr_ix))\r\n continue\r\n\r\n return errors", "def count_sequence_mismatches(seq):\n trans_table = str.maketrans('ACGT', 'TGCA')\n half_len = len(seq) // 2\n second_half = seq[-half_len:].translate(trans_table)\n mismatches = 0\n for i in range(half_len):\n if seq[i] != second_half[-i - 1]:\n mismatches += 1\n return mismatches", "def check_gapped(sequence):\n w_regexp = re.compile('n|N')\n regexp_obj = w_regexp.search(sequence)\n if (regexp_obj):\n return True\n else:\n return False", "def is_legit_DNA_sequence(record_seq: str) -> bool:\n nts = {\"A\", \"G\", \"T\", \"C\", \"N\"}\n seq_symbols = {s.upper() for s in record_seq}\n return seq_symbols.issubset(nts)", "def guess_seq(seq):\n dna = \"ACTG-N\"\n \n chars = util.unique(seq.upper())\n \n for char in chars:\n if char not in dna:\n return \"pep\"\n return \"dna\"", "def checkAlphabet(self, count=10):\n if six.PY3:\n readLetters = super().checkAlphabet(count)\n else:\n readLetters = Read.checkAlphabet(self, count)\n if len(self) > 10 and readLetters.issubset(set('ACGT')):\n raise ValueError('It looks like a DNA sequence has been passed to '\n 'AARead().')\n return readLetters", "def test_should_raise_in_case_of_wrong_opening_closing_types(self):\n validator = CharCombinationValidator()\n\n with self.assertRaises(FormulaValidationError):\n validator(self.wrong_opening_closing_types)", "def test_check_dna_chars_bcs(self):\r\n\r\n header =\\\r\n ['SampleID',\r\n 'BarcodeSequence',\r\n 'LinkerPrimerSequence',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AARNCWSVDAA', 's1&data'],\r\n ['s2', 'CGTA', 'AAA1A', 's2_data']]\r\n errors = []\r\n\r\n errors = check_dna_chars_bcs(header, mapping_data, errors)\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should find no errors\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'ReversePrimer', 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AARNCWSVDAA', 'ACGT', 's1&data'],\r\n ['s2', 'C1GTA', 'AAA1A', 'ACGTF', 's2_data']]\r\n errors = []\r\n\r\n errors = check_dna_chars_bcs(header, mapping_data, errors,\r\n has_barcodes=False)\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should find errors with has_barcodes=True\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'ReversePrimer', 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AARNCWSVDAA', 'ACGT', 's1&data'],\r\n ['s2', 'CNGTA', 'AAA1A', 'ACGTF', 's2_data']]\r\n errors = []\r\n\r\n errors = check_dna_chars_bcs(header, mapping_data, errors,\r\n has_barcodes=True)\r\n\r\n expected_errors = ['Invalid DNA sequence detected: CNGTA\\t2,1']\r\n\r\n self.assertEqual(errors, expected_errors)", "def isValidPeptide(self, sequence):\n for position, disallowedAAs in self._rules.iteritems():\n nextAA = sequence[position - 1].upper()\n if nextAA in disallowedAAs:\n return False\n return True", "def check_random_bc(seq):\n if seq.startswith('TGATC'):\n return seq[5:]\n else:\n return seq[:16]", "def test_seq_exceeds_homopolymers(self):\r\n self.assertEqual(seq_exceeds_homopolymers('AAACGA', 3), False)\r\n self.assertEqual(seq_exceeds_homopolymers('AAACGA', 2), True)\r\n self.assertEqual(seq_exceeds_homopolymers('AAACGA', 1), True)\r\n self.assertEqual(seq_exceeds_homopolymers('AAACGATTTT', 3), True)", "def process_sequence(seq, whitelist):\n sym = ''.join(seq)\n out = validate_symbol(sym, whitelist)\n return out", "def inputCheckpoint(self, obereZeile, untereZeile):\n rv = True\n # 1) only equal length for obereZeile, untereZeile\n if (len(obereZeile) != len(untereZeile)):\n print(\"============================================================\")\n print(\"input sequences do not have the same length\")\n print(\"============================================================\")\n raise ValueError(\"Input sequences of different lengths\")\n \n # 2) only the input alphabets + INPUT_GAP_ZEICHEN (\"_\")\n validityInObereZeile = self.getValidityOfResiduesInSequence(obereZeile)\n validityInUntereZeile = self.getValidityOfResiduesInSequence(untereZeile)\n if (\n validityInObereZeile[\"recognizedAlphabet\"] == self.VALID_DNA_OR_PROTEIN and\n validityInUntereZeile[\"recognizedAlphabet\"] == self.VALID_DNA_OR_PROTEIN\n ):\n print(\"============================================================\")\n print(\"input is recognized as: \" + self.VALID_DNA_OR_PROTEIN)\n _input_type = \"dna\"\n if not self.aligntIsDna:\n _input_type = \"protein\"\n print(\"input is now further processed as: \" + _input_type)\n print(\"============================================================\")\n else:\n print(\"============================================================\")\n if (\n validityInObereZeile[\"recognizedAlphabet\"] in\n [self.INVALID_DNA, self.INVALID_PROTEIN]\n ):\n print(\n \"upper sequence is recognized as: \" +\n validityInObereZeile[\"recognizedAlphabet\"]\n )\n print(\n \"character number {} with value '{}' could not be parsed\".\n format(\n validityInObereZeile[\"residueIndex\"] + 1,\n validityInObereZeile[\"residue\"]\n )\n )\n if (\n validityInUntereZeile[\"recognizedAlphabet\"] in\n [self.INVALID_DNA, self.INVALID_PROTEIN]\n ):\n print(\n \"lower sequence is recognized as: \" +\n validityInUntereZeile[\"recognizedAlphabet\"]\n )\n print(\n \"character number {} with value '{}' could not be parsed\".\n format(\n validityInUntereZeile[\"residueIndex\"] + 1,\n validityInUntereZeile[\"residue\"]\n )\n )\n print(\"============================================================\")\n raise ValueError(\"Input outside of chosen alphabet.\")\n return(rv)", "def list_check(listz):\n isValid = False\n x = 0\n position = ''\n\n #checking if characters contains 023 and extracting them \n\n while (x < len(listz)):\n if(listz.__contains__(0)):\n position = position + str(listz[listz.index(0)])\n if(listz.__contains__(2)):\n position = position + str(listz[listz.index(2)])\n if(listz.__contains__(3)):\n position = position + str(listz[listz.index(3)])\n x = len(listz) + 1\n\n#making sure its the requered sequence\n\n if(position == '023'):\n isValid = True\n x = x + 1\n return isValid", "def test_should_raise_in_case_of_wrong_parenthesising(self):\n validator = CharCombinationValidator()\n\n with self.assertRaises(FormulaValidationError):\n validator(self.wrong_opening_closing_parenthesis)", "def test_multiple_char_not_unique(self):\n self.assertFalse(all_unique_chars(\"aa\"))\n self.assertFalse(all_unique_chars(\"alabama\"))\n self.assertFalse(all_unique_chars(\"Ricardio\"))\n self.assertFalse(all_unique_chars(\"aardvark\"))\n self.assertFalse(all_unique_chars(\"Zimbabwe\"))\n self.assertFalse(all_unique_chars(\"....What?....\"))", "def getValidityOfResiduesInSequence(self, seq):\n seqList = list(seq)\n aSpotted_Index = -1\n aSpotted_residue = \"\"\n if self.aligntIsDna:\n _alphabet = self.DNA_ALPHABET\n else:\n _alphabet = self.PROTEIN_ALPHABET\n # iterate over the sequence given the prior knowldege of the user\n for i in range(len(seqList)):\n residue = seqList[i]\n if str.upper(residue) not in list(_alphabet):\n aSpotted_Index = i\n aSpotted_residue = residue\n break\n rv = {\n \"residueIndex\": aSpotted_Index,\n \"residue\": aSpotted_residue,\n \"recognizedAlphabet\": self.VALID_DNA_OR_PROTEIN\n }\n if (aSpotted_residue != \"\"):\n if self.aligntIsDna:\n rv[\"recognizedAlphabet\"] = self.INVALID_DNA\n else:\n rv[\"recognizedAlphabet\"] = self.INVALID_PROTEIN\n return(rv)", "def test_multiple_char_not_unique(self):\n self.assertFalse(all_unique_chars_no_set(\"aa\"))\n self.assertFalse(all_unique_chars_no_set(\"alabama\"))\n self.assertFalse(all_unique_chars_no_set(\"Ricardio\"))\n self.assertFalse(all_unique_chars_no_set(\"aardvark\"))\n self.assertFalse(all_unique_chars_no_set(\"Zimbabwe\"))\n self.assertFalse(all_unique_chars_no_set(\"....What?....\"))", "def count_ambig(curr_seq, valid_chars='ATCG'):\r\n up_seq = curr_seq.upper()\r\n total = 0\r\n for vchar in valid_chars:\r\n total += up_seq.count(vchar)\r\n return len(curr_seq) - total", "def check_fasta_seqs(input_fasta_fp,\r\n barcodes,\r\n linkerprimerseqs,\r\n total_seq_count,\r\n valid_chars=frozenset(['A', 'T', 'C', 'G', 'N', 'a',\r\n 't', 'c', 'g', 'n'])):\r\n\r\n input_fasta_f = open(input_fasta_fp, \"U\")\r\n\r\n invalid_chars_count = 0\r\n barcodes_count = 0\r\n linkerprimers_count = 0\r\n barcodes_at_start = 0\r\n\r\n # Get max barcode length to checking the beginning of seq for barcode\r\n if barcodes:\r\n max_bc_len = max([len(bc_len) for bc_len in barcodes])\r\n else:\r\n max_bc_len = 0\r\n\r\n for label, seq in parse_fasta(input_fasta_f):\r\n\r\n # Only count one offending problem\r\n for curr_nt in seq:\r\n if curr_nt not in valid_chars:\r\n invalid_chars_count += 1\r\n break\r\n\r\n sliced_seq = seq[0:max_bc_len]\r\n\r\n for curr_bc in barcodes:\r\n if curr_bc in sliced_seq:\r\n barcodes_at_start += 1\r\n break\r\n\r\n for curr_bc in barcodes:\r\n if curr_bc in seq:\r\n barcodes_count += 1\r\n break\r\n\r\n for curr_primer in linkerprimerseqs:\r\n if curr_primer in seq:\r\n linkerprimers_count += 1\r\n break\r\n\r\n invalid_chars_count = float(invalid_chars_count)\r\n barcodes_count = float(barcodes_count)\r\n linkerprimers_count = float(linkerprimers_count)\r\n total_seq_count = float(total_seq_count)\r\n barcodes_at_start_count = float(barcodes_at_start)\r\n\r\n perc_invalid_chars = \"%1.3f\" %\\\r\n (invalid_chars_count / total_seq_count)\r\n perc_barcodes_detected = \"%1.3f\" %\\\r\n (barcodes_count / total_seq_count)\r\n perc_primers_detected = \"%1.3f\" %\\\r\n (linkerprimers_count / total_seq_count)\r\n perc_barcodes_at_start_detected = \"%1.3f\" %\\\r\n (barcodes_at_start_count / total_seq_count)\r\n\r\n return perc_invalid_chars, perc_barcodes_detected, perc_primers_detected,\\\r\n perc_barcodes_at_start_detected", "def check_dna_chars_bcs(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True):\r\n\r\n valid_dna_chars = DNASequence.iupac_standard_characters()\r\n # Detect fields directly, in case user does not have fields in proper\r\n # order in the mapping file (this will generate error separately)\r\n header_fields_to_check = []\r\n if has_barcodes:\r\n header_fields_to_check.append(\"BarcodeSequence\")\r\n\r\n check_indices = []\r\n\r\n for curr_field in range(len(header)):\r\n if header[curr_field] in header_fields_to_check:\r\n check_indices.append(curr_field)\r\n\r\n # Correction factor for header being the first line\r\n correction_ix = 1\r\n # Check for missing data\r\n for curr_data in range(len(mapping_data)):\r\n for curr_ix in check_indices:\r\n if len(mapping_data[curr_data][curr_ix]) == 0:\r\n errors.append(\"Missing expected DNA sequence\\t%d,%d\" %\r\n (curr_data + correction_ix, curr_ix))\r\n continue\r\n for curr_nt in mapping_data[curr_data][curr_ix]:\r\n if curr_nt not in valid_dna_chars:\r\n errors.append(\"Invalid DNA sequence detected: %s\\t%d,%d\" %\r\n (mapping_data[curr_data][curr_ix],\r\n curr_data + correction_ix, curr_ix))\r\n continue\r\n\r\n return errors", "def test_can_mismatch(self):\n assert not self.RNA(\"\").can_mismatch(\"\")\n assert self.RNA(\"N\").can_mismatch(\"N\")\n assert self.RNA(\"R\").can_mismatch(\"R\")\n assert self.RNA(\"N\").can_mismatch(\"r\")\n assert self.RNA(\"CGUACGCAN\").can_mismatch(\"CGUACGCAN\")\n assert self.RNA(\"U\").can_mismatch(\"C\")\n assert self.RNA(\"UUU\").can_mismatch(\"UUC\")\n assert self.RNA(\"UUU\").can_mismatch(\"UUY\")\n assert not self.RNA(\"UUU\").can_mismatch(\"UUU\")\n assert not self.RNA(\"UCAG\").can_mismatch(\"UCAG\")\n assert not self.RNA(\"U--\").can_mismatch(\"U--\")", "def test_correct_barcode(self):\r\n original = 'ATTTTTTTTTCG'\r\n recieved = 'ATTTTTTTTTTT'\r\n possibilities = ['TGTATTCGTGTA', 'ATTTTTTTTTCG', 'TGTAGGCGTGTA',\r\n 'TGTAGAAGTGTA', 'TGTAGGCGTATA', 'TGTAAAAAAAAA']\r\n decoded, num_errors = barcode.correct_barcode(recieved, possibilities)\r\n self.assertEqual(decoded, original)\r\n self.assertEqual(num_errors, 2)", "def test_sequence_outside_range(self):\n seqs = [-1, 100]\n for seq in seqs:\n with self.subTest(seq=seq):\n with self.assertRaisesRegex(ValueError, str(seq)):\n star_barcode.construct_postscript(\n sequence=seq,\n bwipp_location=self.bwipp,\n issn=self.issn,\n week=46,\n header_line=''\n )", "def validate_dna(s):\n import re\n return re.match(\"^[ctga]*$\", s.lower()) is not None", "def mp_mb_checker(self, seq):\n# print('input ' + seq)\n seq = re.sub(r'([ёуеыаоэяию])м(п|б)',r'\\1н\\2',seq)\n# print('output ' + seq)\n return seq", "def test_DnaSequence(self):\n x = DnaSequence(\"tcag\")\n # note: no longer preserves case\n self.assertEqual(x, \"TCAG\")\n\n x = DnaSequence(\"aaa\") + DnaSequence(\"ccc\")\n # note: doesn't preserve case\n self.assertEqual(x, \"AAACCC\")\n assert x.moltype is DNA\n self.assertRaises(AlphabetError, x.__add__, \"z\")\n self.assertEqual(DnaSequence(\"TTTAc\").rc(), \"GTAAA\")", "def validate_fasta(data):\n for sequence in data['fasta'].values():\n print(f'Sequence length {len(sequence)} nt')\n if len(sequence) < data['amplicon_min']:\n raise ValidationError({'fasta':\n f'Input sequence must be longer than minimum'\n + f' amplicon length parameter ({data[\"amplicon_min\"]} nt)'\n })", "def _check_grammar(seq, accepted_tokens):\r\n if len(seq) == 0:\r\n if accepted_tokens == [0, 3]:\r\n return True\r\n else:\r\n return False\r\n if seq[0] in accepted_tokens:\r\n curr_token = seq[0]\r\n if curr_token in [0, 2]:\r\n next_possible_tokens = [1, 2]\r\n elif curr_token in [1, 3]:\r\n next_possible_tokens = [0, 3]\r\n else:\r\n raise ValueError\r\n return Model._check_grammar(seq[1:], next_possible_tokens)\r\n return False", "def validate_csv_seq(sequence):\n if sequence.find(',') != -1 or sequence.find(';') != -1:\n return True\n else:\n return False", "def gk_g_checker(self, seq):\n seq = re.sub(r'гк', r'хк', seq)\n return seq", "def test_wrong_sequence(self):\n date = datetime(2016, 11, 12)\n seq = 31\n with self.assertRaises(ValueError):\n star_barcode.barcode_filename(date, seq)", "def valid_barcode(s):\n # implement this function!\n odd_digits = 0\n even_digits = 0\n result = 0\n for i in range(len(s) - 1):\n if i % 2 == 0:\n odd_digits += int(s[i])\n else:\n even_digits += int(s[i])\n result = (3 * odd_digits + even_digits) % 10\n if result != 0:\n result = 10 - result\n\n try:\n if int(s[-1]) == result and len(s) == 12:\n return True\n else:\n return False\n except IndexError:\n return False", "def ch_t_checker(self, seq):\n seq = re.sub(r'чт', r'шт', seq)\n return seq", "def test_quality_filter_sequence_fail_w_B(self):\r\n\r\n # early 'B' in sequence causes truncation and too short of a read\r\n header = \"990:2:4:11271:5323#1/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n _ascii_to_phred64(\"bbbbbbbbbbbbbbbbbbBbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\")\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n np.testing.assert_equal(\r\n actual,\r\n (1,\r\n \"GCACTCACCGCCCGTCAC\",\r\n _ascii_to_phred64(\"bbbbbbbbbbbbbbbbbb\")))\r\n\r\n # increasing max_bad_run_length rescues read\r\n header = \"990:2:4:11271:5323#1/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbBbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=1,\r\n phred_quality_threshold=2,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbBbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # changing threshold rescues read\r\n header = \"990:2:4:11271:5323#1/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n \"bbbbbbbbbbbbbbbbbbBbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=1,\r\n min_per_read_length=75,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n self.assertEqual(actual, (0,\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\",\r\n \"bbbbbbbbbbbbbbbbbbBbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\"))\r\n\r\n # changing min_per_read_length_fraction rescues read\r\n header = \"990:2:4:11271:5323#1/1\"\r\n sequence = \\\r\n \"GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC\"\r\n quality = \\\r\n _ascii_to_phred64(\"bbbbbbbbbbbbbbbbbbBbbbbbbY``\\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`\")\r\n actual = quality_filter_sequence(header,\r\n sequence,\r\n quality,\r\n max_bad_run_length=0,\r\n phred_quality_threshold=2,\r\n min_per_read_length=5,\r\n seq_max_N=0,\r\n filter_bad_illumina_qual_digit=True)\r\n np.testing.assert_equal(\r\n actual,\r\n (0,\r\n \"GCACTCACCGCCCGTCAC\",\r\n _ascii_to_phred64(\"bbbbbbbbbbbbbbbbbb\")))", "def test_check_barcode(self):\r\n self.assertEqual(check_barcode('AA', None, ['AA']), (False, 'AA',\r\n False))\r\n self.assertEqual(check_barcode('GCATCGTCCACA', 'golay_12',\r\n ['GCATCGTCAACA']), (2, 'GCATCGTCAACA', True))\r\n # num errors for golay code is currently in bits\r\n self.assertEqual(check_barcode('GGTT', 4, ['TTTT']), (2, 'TTTT', True))", "def validate_strand(strand: str) -> bool:\n strand = strand.upper()\n count = dict(Counter(strand))\n for k in count.keys():\n if k not in NUCLEOTIDES:\n raise Exception(\"Invalid DNA sequence\")\n return True", "def test_should_accept_alphanumeric_formulas(self):\n validator = CharCombinationValidator()\n\n for formula in self.correct_formulas:\n self.assertIsNone(validator(formula))", "def validate_sequence(outcome):\n from collections.abc import Sequence\n if not isinstance(outcome, Sequence):\n raise ditException('Outcome class is not a sequence.')\n else:\n return True", "def test_validate_input_rejection_invalid_symbol(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('02')", "def check(self, text):\n lt = s = n = 0\n result = False\n for g in text:\n if g in LETTERS and lt < self.letters:\n lt += 1\n if g in NUMBERS and n < self.numbers:\n n += 1\n if g in SYMBOLS and s < self.symbols:\n s += 1\n if n == self.numbers and s == self.symbols and lt == self.letters:\n result = True\n break\n return result", "def validate(self):\n self.filter_passing_hits()\n\n checks = {\"number of hits\":self.check_hits(),\n \"base pair count\":self.check_bp(),\n \"contig count\":self.check_contigs(),\n \"characters\": self.check_chars(),\n \"checksum\":not check_checksum(self.seqdata.checksum)}\n\n failed_checks = {(k, v) for k, v in checks.iteritems() if v is False}\n\n if failed_checks:\n \"\"\"\n replace this with logger, break would be replaced by a raised\n Exception where the Exception would be caught by the\n Sequence_Upload code\n \"\"\"\n for k, v in failed_checks:\n with open(generate_path(\"outputs/seq_errors.txt\"), \"a\") as file_:\n file_.write(\n '%s failed validation:'\n 'the %s was not valid\\n' %(self.seqdata.accession, k)\n )\n self.seqdata.valid = False\n else:\n self.seqdata.valid = True", "def __validateIndex(self, index, lenght):\r\n if index >= lenght or index < 0:\r\n raise Exception(\"Can't change these letters\")", "def test_solver(allowed_symbols, len_sequence=3):\n secret_sequence = \"\"\n for _ in range(len_sequence):\n secret_sequence += allowed_symbols[random.randint(0, len_sequence - 1)]\n print('secret:', secret_sequence)\n\n solution = brute_force_solver(allowed_symbols, secret_sequence)\n return solution == tuple(secret_sequence)", "def validateFormat(barcode):\r\n validatesymbol = 0\r\n delimitedsymbol = 0\r\n if barcode[0] == '' or barcode[-1] == '':\r\n validatesymbol += 1\r\n for i in range(len(barcode)):\r\n try:\r\n int(barcode[i])\r\n except ValueError:\r\n if barcode[i] == '-':\r\n delimitedsymbol += 1\r\n else:\r\n validatesymbol += 1\r\n if delimitedsymbol == 0 and validatesymbol == 0:\r\n if len(barcode) == 12 or len(barcode) == 13:\r\n pass\r\n else:\r\n validatesymbol += 1\r\n if validatesymbol == 0:\r\n return True\r\n else:\r\n return False", "def test_check_digits_with_wrong_alphabet(self, _, alpha):\n with self.assertRaises(exceptions.WrongArgumentValueError):\n positional.encode(42, 10, alphabet=alpha)", "def test_single_char(self):\n self.assertTrue(all_unique_chars_no_set(\"a\"))\n self.assertTrue(all_unique_chars_no_set(\"b\"))", "def __validate(self, seqdata):\n\n _Sequence = namedtuple('Seq', ['name', 'data'])\n\n # file-like object\n # isinstance(obj, file) does not hold in Py3\n if hasattr(seqdata, 'read') and hasattr(seqdata, 'name'):\n self.logger.debug('Reading data from file-like object {}'.format(seqdata.name))\n fname = seqdata.name\n\n elif isinstance(seqdata, basestring):\n self.logger.debug('Reading data from file path {}'.format(seqdata))\n fname = seqdata\n\n # can be file name string or sequence\n if not os.path.isfile(fname):\n raise OSError('Sequence file not found: {}'.format(seqdata))\n else:\n raise TypeError('Sequence input format not recognized: {}'.format(seqdata))\n\n # parse and validate sequences\n # defining these two a prior just in case later we decide to support more stuff\n _seq_alphabet = IUPACProtein()\n _seq_format = 'fasta'\n\n seq_iterator = SeqIO.parse(seqdata, _seq_format, alphabet=_seq_alphabet)\n for seq_i, seq_record in enumerate(seq_iterator, start=1):\n\n seq_name = seq_record.name\n seq_raw = str(seq_record.seq)\n if not _verify_alphabet(seq_record.seq):\n msg = 'Entry #{} ({}) in {} is not a valid protein sequence'\n raise ParseError(msg.format(seq_i, seq_name, fname))\n\n self.sequences.append(_Sequence(seq_name, seq_raw))\n\n return self.sequences", "def validate(data, badchars):\n assert(all(b not in data for b in badchars))", "def is_valid(t_input):\r\n eax = 1 # flag validita': inizialmente non valido (caso stringa di lunghezza 0)\r\n ecx = 0 # indice\r\n \r\n while t_input[ecx] != \"\\0\":\r\n eax = 1 # mi preparo il flag \"invalido\" per il carattere\r\n\r\n if is_valid_char(t_input[ecx]) == 0:\r\n # carattere valido\r\n eax = 0\r\n\r\n # se il carattere e' invalido\r\n if eax == 1:\r\n # salta fuori dal ciclo\r\n break\r\n\r\n ecx += 1\r\n # salta a inizio ciclo\r\n\r\n # eax e' 1 per stringhe vuote o \r\n # almeno un carattere invalido\r\n return eax", "def __init__ ( self , seq , pattern ):\n\t\tif pattern . search ( seq ):\n\t\t\tprint \" Warning : sequence contains illegal characters \"\n\t\tself . data = seq . upper ()", "def test_words_with_numbers(self):\n\n test_string = \"1. FC Köln\"\n test_anagram = \"anagram\"\n with pytest.raises(ValueError) as exc_info:\n is_anagram(test_string, test_anagram)\n expected_error_msg = \"should only contain letters!\"\n assert exc_info.match(expected_error_msg)", "def __validate_pdu_sequence(pdu_sequence: PDUs) -> None:\n if not isinstance(pdu_sequence, (tuple, list)):\n raise TypeError(\"'pdu_sequence' is not list or tuple type\")\n if not all([isinstance(pdu, AbstractPDU) for pdu in pdu_sequence]):\n raise ValueError(\"'pdu_sequence' does not contain AbstractPDU instances only\")", "def verify_sequencer(seq: 'Sequencer') -> Optional['Sequencer']:\n valid = [ch.isalpha() or ch.isdigit() or ch == '_' for ch in seq.Name]\n if all(valid):\n return seq\n return None", "def validate_input(self, input_sequences: Sequence[str],\n symmetry_group: str, min_length: int, max_length: int,\n max_multimer_length: int) -> Tuple[Sequence[str], bool]:\n sequences = []\n\n for input_sequence in input_sequences:\n if input_sequence.strip():\n input_sequence = self.clean_and_validate_sequence(\n input_sequence=input_sequence,\n min_length=min_length,\n max_length=max_length)\n sequences.append(input_sequence)\n\n if symmetry_group is not None and symmetry_group != 'C1':\n if symmetry_group.startswith(\n 'C') and symmetry_group[1:].isnumeric():\n print(\n f'Using UF-Symmetry with group {symmetry_group}. If you do not '\n f'want to use UF-Symmetry, please use `C1` and copy the AU '\n f'sequences to the count in the assembly.')\n is_multimer = (len(sequences) > 1)\n return sequences, is_multimer, symmetry_group\n else:\n raise ValueError(\n f'UF-Symmetry does not support symmetry group '\n f'{symmetry_group} currently. Cyclic groups (Cx) are '\n f'supported only.')\n\n elif len(sequences) == 1:\n print('Using the single-chain model.')\n return sequences, False, None\n\n elif len(sequences) > 1:\n total_multimer_length = sum([len(seq) for seq in sequences])\n if total_multimer_length > max_multimer_length:\n raise ValueError(\n f'The total length of multimer sequences is too long: '\n f'{total_multimer_length}, while the maximum is '\n f'{max_multimer_length}. Please use the full AlphaFold '\n f'system for long multimers.')\n print(f'Using the multimer model with {len(sequences)} sequences.')\n return sequences, True, None\n\n else:\n raise ValueError(\n 'No input amino acid sequence provided, please provide at '\n 'least one sequence.')", "def test_non_valid_input_in_list_middle():\n from unique_chars import find_uniq\n with pytest.raises(ValueError):\n find_uniq(['qwwer', 14, 'cake'])", "def barcode_is_valid(s):\n return (bool(re.match(r'^[ATGC]*$',s))\n or barcode_is_10xgenomics(s))", "def is_legit_peptide_sequence(record_seq: str) -> bool:\n aas = {\n \"A\",\n \"C\",\n \"D\",\n \"E\",\n \"F\",\n \"G\",\n \"H\",\n \"I\",\n \"K\",\n \"L\",\n \"M\",\n \"N\",\n \"P\",\n \"Q\",\n \"R\",\n \"S\",\n \"T\",\n \"V\",\n \"W\",\n \"Y\",\n \"*\",\n }\n seq_symbols = {s.upper() for s in record_seq}\n return seq_symbols.issubset(aas)", "def test_wrong_length(multi_mol_system_irregular, sequence):\n processor = dssp.AnnotateResidues(\"test\", sequence)\n with pytest.raises(ValueError):\n processor.run_system(multi_mol_system_irregular)", "def test_validate_fasta_with_invalid(self):\r\n\r\n validate_fasta(self.sample_fasta_invalid_fp, self.sample_mapping_fp,\r\n self.output_dir)\r\n\r\n expected_log_fp = join(self.output_dir,\r\n split(self.sample_fasta_invalid_fp)[1] + \"_report.log\")\r\n\r\n log_f = open(expected_log_fp, \"U\")\r\n actual_log_lines = [line.strip() for line in log_f][1:]\r\n\r\n expected_log_lines = \"\"\"Percent duplicate labels: 0.250\r\nPercent QIIME-incompatible fasta labels: 0.500\r\nPercent of labels that fail to map to SampleIDs: 0.750\r\nPercent of sequences with invalid characters: 0.500\r\nPercent of sequences with barcodes detected: 0.250\r\nPercent of sequences with barcodes detected at the beginning of the sequence: 0.000\r\nPercent of sequences with primers detected: 0.250\r\nDuplicate labels found:\r\nseq1\"\"\".split('\\n')\r\n\r\n self.assertEqual(actual_log_lines, expected_log_lines)", "def test_non_valid_input_in_list_end():\n from unique_chars import find_uniq\n with pytest.raises(ValueError):\n find_uniq(['qwwer', 'cake', 14])", "def test_preprocess_bad_chars_in_mapping(self):\r\n\r\n # Should discard all reads due to sequence length being too short\r\n # But should not halt due to bad characters in a data field\r\n\r\n fasta_files = [self.sample_fasta_file]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_bad_char_datafield_f\r\n barcode_type = \"golay_12\"\r\n min_seq_len = 200\r\n max_seq_len = 1000\r\n min_qual_score = 25\r\n starting_ix = 1\r\n keep_primer = False\r\n max_ambig = 0\r\n max_primer_mm = 1\r\n trim_seq_len = True\r\n dir_prefix = self.output_dir\r\n max_bc_errors = 2\r\n max_homopolymer = 4\r\n retain_unassigned_reads = False\r\n keep_barcode = False\r\n attempt_bc_correction = True\r\n qual_score_window = 0\r\n disable_primer_check = False\r\n reverse_primers = 'disable'\r\n record_qual_scores = False\r\n discard_bad_windows = False\r\n median_length_filtering = None\r\n added_demultiplex_field = None\r\n\r\n preprocess(fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field)\r\n\r\n output_seqs = open(dir_prefix + \"seqs.fna\", \"U\")\r\n output_log = open(dir_prefix + \"split_library_log.txt\", \"U\")\r\n output_histograms = open(dir_prefix + \"histograms.txt\", \"U\")\r\n\r\n actual_seqs = [line for line in output_seqs]\r\n actual_log = [line for line in output_log]\r\n actual_histograms = [line for line in output_histograms]\r\n\r\n expected_seqs = []\r\n expected_log = [\r\n 'Number raw input seqs\\t6\\n',\r\n '\\n',\r\n 'Length outside bounds of 200 and 1000\\t6\\n',\r\n 'Num ambiguous bases exceeds limit of 0\\t0\\n',\r\n 'Missing Qual Score\\t0\\n',\r\n 'Mean qual score below minimum of 25\\t0\\n',\r\n 'Max homopolymer run exceeds limit of 4\\t0\\n',\r\n 'Num mismatches in primer exceeds limit of 1: 0\\n',\r\n '\\n',\r\n 'Sequence length details for all sequences passing quality filters:\\n',\r\n 'No sequences passed quality filters for writing.\\n',\r\n '\\n',\r\n 'Barcodes corrected/not\\t0/0\\n',\r\n 'Uncorrected barcodes will not be written to the output fasta file.\\n',\r\n 'Corrected barcodes will be written with the appropriate barcode category.\\n',\r\n 'Corrected but unassigned sequences will not be written unless --retain_unassigned_reads is enabled.\\n',\r\n '\\n',\r\n 'Total valid barcodes that are not in mapping file\\t0\\n',\r\n 'Sequences associated with valid barcodes that are not in the mapping file will not be written.\\n',\r\n '\\n',\r\n 'Barcodes in mapping file\\n',\r\n 'Sample\\tSequence Count\\tBarcode\\n',\r\n 's2\\t0\\tAGAGTCCTGAGC\\n',\r\n 's1\\t0\\tACACATGTCTAC\\n',\r\n 's3\\t0\\tAACTGTGCGTAC\\n',\r\n '\\n',\r\n 'Total number seqs written\\t0']\r\n expected_histograms = [\r\n '# bins raw sequence lengths, length of sequences that pass quality filters before processing, and lengths of sequences that pass quality filters post processing.\\n',\r\n 'Length\\tRaw\\tBefore\\tAfter\\n',\r\n '20\\t2\\t0\\t0\\n',\r\n '30\\t4\\t0\\t0']\r\n\r\n self.assertEqual(actual_seqs, expected_seqs)\r\n self.assertEqual(actual_log, expected_log)\r\n self.assertEqual(actual_histograms, expected_histograms)\r\n\r\n '''# With invalid character in a SampleID, should raise ValueError\r\n\r\n fasta_files = [self.sample_fasta_file]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_bad_char_sampleid_f\r\n barcode_type=\"golay_12\"\r\n min_seq_len=200\r\n max_seq_len=1000\r\n min_qual_score=25\r\n starting_ix=1\r\n keep_primer=False\r\n max_ambig=0\r\n max_primer_mm=1\r\n trim_seq_len=True\r\n dir_prefix=self.output_dir\r\n max_bc_errors=2\r\n max_homopolymer=4\r\n retain_unassigned_reads=False\r\n keep_barcode=False\r\n attempt_bc_correction=True\r\n qual_score_window=0\r\n disable_primer_check=False\r\n reverse_primers='disable'\r\n record_qual_scores=False\r\n discard_bad_windows=False\r\n median_length_filtering=None\r\n added_demultiplex_field=None\r\n\r\n\r\n self.assertRaises(ValueError, preprocess, fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field)'''", "def vet_nucleotide_sequence(sequence):\n ##########################################################################\n # `rna_pattern_str` and `dna_pattern_str` are be regular expressions\n # that will match any string of RNA and DNA bases, respectively (and only\n # strings of RNA and DNA bases).\n # Read the docstring above for additional clues.\n rna_pattern_str = r'^[AUCGaucg]*$'\n dna_pattern_str = r'^[ATCGatcg]*$'\n ##########################################################################\n\n rna_pattern = re.compile(rna_pattern_str)\n dna_pattern = re.compile(dna_pattern_str)\n\n if rna_pattern.match(sequence):\n return\n if dna_pattern.match(sequence):\n return\n else:\n raise Exception(\"Invalid sequence: {0!r}\".format(sequence))", "def checkAlphabet(self, count=10):\n if count is None:\n readLetters = set(self.sequence.upper())\n else:\n readLetters = set(self.sequence.upper()[:count])\n # Check if readLetters is a subset of self.ALPHABET.\n if self.ALPHABET is None or readLetters.issubset(self.ALPHABET):\n return readLetters\n raise ValueError(\"Read alphabet (%r) is not a subset of expected \"\n \"alphabet (%r) for read class %s.\" % (\n ''.join(sorted(readLetters)),\n ''.join(sorted(self.ALPHABET)),\n str(self.__class__.__name__)))", "def isValid(self, s: str) -> bool:\n st = []\n\n for char in s:\n if (len(st) != 0):\n e = st[-1]\n if (self.isValidPair(e,char)):\n st.pop()\n continue\n st.append(char)\n return (len(st)==0)", "def test_check_cds_8(self):\n self.cds1.translation = Seq(\"MM\", IUPAC.protein)\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\")\n self.assertEqual(count, 1)", "def test_check_chars_data_fields(self):\r\n\r\n header =\\\r\n ['SampleID',\r\n 'BarcodeSequence',\r\n 'LinkerPrimerSequence',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', 's2_data']]\r\n warnings = []\r\n\r\n warnings = check_chars_data_fields(header, mapping_data, warnings)\r\n\r\n expected_warnings = ['Invalid characters found in s-1\\t1,0',\r\n 'Invalid characters found in s1&data\\t1,3']\r\n\r\n self.assertEqual(warnings, expected_warnings)", "def test_single_char(self):\n self.assertTrue(all_unique_chars(\"a\"))\n self.assertTrue(all_unique_chars(\"b\"))", "def isAlphabet(self, seqstr):\n mystr = seqstr\n if type(seqstr) is Sequence:\n mystr = seqstr.getString()\n return self.getAlphabet().isValidString(mystr)", "def check_node_seq(self, node_seq: str):\r\n syntax_error_info = []\r\n no_syntax_error = True\r\n # syntax error\r\n for tp in self.error_type:\r\n result = self.error_type[tp].findall(node_seq)\r\n if len(result) > 0:\r\n no_syntax_error = False\r\n for r in result:\r\n syntax_error_info.append(' {}: {}'.format(tp, r))\r\n return syntax_error_info, no_syntax_error", "def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))", "def seq_exceeds_homopolymers(curr_seq, max_len=6):\r\n for base in 'ATGC':\r\n curr = base * (max_len + 1)\r\n if curr in curr_seq:\r\n return True\r\n return False", "def test_multiple_char_unique(self):\n self.assertTrue(all_unique_chars_no_set(\"ab\"))\n self.assertTrue(all_unique_chars_no_set(\"ba\"))\n self.assertTrue(all_unique_chars_no_set(\"make\"))\n self.assertTrue(all_unique_chars_no_set(\"thorn\"))\n self.assertTrue(all_unique_chars_no_set(\"malibu\"))\n self.assertTrue(all_unique_chars_no_set(string.ascii_letters))", "def check_string( pname, use ):\n for l in pname:\n if l in string.letters: continue\n if l in string.digits : continue\n if l =='_' : continue\n print( \"your \"+use+\" (\" + pname + \") contains invalid characters, please choose another one!\" )\n return False\n return True", "def check_chars_data_fields(header,\r\n mapping_data,\r\n warnings):\r\n\r\n allowed_data_field_chars = \"+-%./ :,;_\" + digits + letters\r\n allowed_sampleid_chars = \".\" + digits + letters\r\n correction = 1\r\n\r\n sample_id_field = \"SampleID\"\r\n fields_to_skip = [\"BarcodeSequence\", \"LinkerPrimerSequence\",\r\n \"ReversePrimer\"]\r\n\r\n for curr_field in range(len(header)):\r\n if header[curr_field] in fields_to_skip:\r\n continue\r\n if header[curr_field] == sample_id_field:\r\n valid_chars = allowed_sampleid_chars\r\n else:\r\n valid_chars = allowed_data_field_chars\r\n for curr_data in range(len(mapping_data)):\r\n # Need to skip newline characters\r\n curr_cell = mapping_data[curr_data][curr_field].replace('\\n', '')\r\n for curr_char in curr_cell:\r\n if curr_char not in valid_chars:\r\n warnings.append(\"Invalid characters found in %s\\t%d,%d\" %\r\n (mapping_data[\r\n curr_data][curr_field].replace(\r\n '\\n', ''),\r\n curr_data + correction, curr_field))\r\n break\r\n\r\n return warnings", "def gene_check(self,DNA,Pol_ac,Pol_c,gene_begin,gene_end):\n PolymeraseIII_ac = Pol_ac\n PolymeraseIII_c = Pol_c\n if (gene_end < PolymeraseIII_c.position) or (gene_begin > (2*self.DNA.length-PolymeraseIII_ac.position)):\n return 2\n else:\n return 1", "def test_multiple_char_unique(self):\n self.assertTrue(all_unique_chars(\"ab\"))\n self.assertTrue(all_unique_chars(\"ba\"))\n self.assertTrue(all_unique_chars(\"make\"))\n self.assertTrue(all_unique_chars(\"thorn\"))\n self.assertTrue(all_unique_chars(\"malibu\"))\n self.assertTrue(all_unique_chars(string.ascii_letters))", "def yotated_checker(self, seq):\n seq = re.sub(r'([йцкнгшщзхфвпрлджчсмтб])(й(а|у|э))', r'\\1ь\\2', seq)\n seq = re.sub(r'(\\A| |[ьъ])йа', r'\\1я', seq)\n seq = re.sub(r'(\\A| |[ьъ])йу', r'\\1ю', seq)\n seq = re.sub(r'(\\A| |[ьъ])йэ', r'\\1е', seq)\n return seq", "def test_given_alphabet_has_code_for_each_character():\n codes = set()\n for char in MORSE_CODE_ALPHABET:\n assert char in MORSE_CHAR_DICT\n codes.add(MORSE_CHAR_DICT[char])\n assert len(codes) == len(MORSE_CODE_ALPHABET)", "def test_correct_barcode_bitwise(self):\r\n nt_to_bits = {\"A\": \"11\", \"C\": \"00\", \"T\": \"10\", \"G\": \"01\"}\r\n\r\n original = 'ATTTTTTTTTCG'\r\n recieved = 'ATTTTTTTTTTT'\r\n possibilities = ['TGTATTCGTGTA', 'ATTTTTTTTTCG', 'TGTAGGCGTGTA',\r\n 'TGTAGAAGTGTA', 'TGTAGGCGTATA', 'TGTAAAAAAAAA']\r\n decoded, num_errors = barcode.correct_barcode_bitwise(\r\n recieved, possibilities, nt_to_bits)\r\n self.assertEqual(decoded, original)\r\n self.assertEqual(num_errors, 3)", "def check_valid_input(letter_guessed, old_letters_guessed):\n if (len(letter_guessed) == 1) and letter_guessed.isalpha() and (letter_guessed not in old_letters_guessed):\n return True\n else:\n return False", "def play_with_regex():\n\n DNA_string = \"ATTTGTATGTTCGGCTAACTTCTACCCATCCCCCGAAGTTTAGCAGGTCGTGAGGTGTCATGGAGGCTCTCGTTCATCCCGTGGGACATCAAGCTTCGCCTTGATAAAGCACCCCGCTCGGGTGTAGCAGAGAAGACGCCTACTGAATTGTGCGATCCCTCCACCTCAGCTAAGGTAGCTACCAATATTTAGTTTTTTAGCCTTGCGACAGACCTCCTACTTAGATTGCCACGCATTGAGCTAGCGAGTCAGCGATAAGCATGACGCGCTTTCAAGCGTCGCGAGTATGTGAACCAAGGCTCCGGACAGGACTATATACTTGGGTTTGATCTCGCCCCGACAACTGCAAACCTCAACATTTATAGATTATAAGGTTAGCCGAAATTGCACGTGGTGGCGCCCGCCGACTGCTCCCCGAGTGTGGCTCTTTGATCTGACAACGCGCGACCTCCATCGCGGCCGATTGTTTCTGCGGACCATGTCGTCCTCATAGTTTGGGCATGTTTCCGTTGTAGGAGTGAAGCCACTTAGCTTTGCGCCGTAGTCCCAATGAAAAACCTATGGACTTTGTTTTGGGTAGCATCAGGAATCTGAACCCTGTGAATGTGGGGGTCGCGCGCATAGACCTTTATCTCCGGTTCAAGTTAGGCATGAGGCTGCATGCTACGTTGTCACACCTACACTGCTCGAAGTAAATATGGGAAGCGCGCGGCCTGGCCCGAGGCGTTCCGCGCCGCCACGTGTTCGTTAACTGTTGATTGGTGGCACATAAGCAATACCGTAGTCCCTCAAATTCAGCTCTGTTATCTCGAGCGTTATGTGTCAAATGGCGTAGAACGGGATTGACTGTTTGACACTAGCTGGTGTTCGGTTCGGTAACGGAGAATCTGTGGGGCTATGTCACTAATACTTTCGAAACGCCCCGTACCGATGCTGAACAAGTCGATGCAGGCTCCCGTCTTTGAATAGGGGTAAACATACAAGTCGATAGAAGATGGGT\"\n \n # 1. check if DNA_string starts with \"ATTTGTATG\" (say with re.search() or re.findall())\n regex = re.compile('ATTTGTATG')\n m = regex.search(DNA_string)\n \n # 2. use re.findall() if there are instances of 5 or more consecutive c's in DNA_string\n m = re.finditer('C{5,}',DNA_string)\n for entry in m:\n print entry.span()\n \n # 3. find instances of the motif GGXY in the DNA sequence \n # where X={A,C,G,T} and Y={C,T}\n m = re.finditer('GG[ACGT][CT]',DNA_string)\n print \"NUMBER 3\"\n for entry in m:\n print entry.span()" ]
[ "0.72993267", "0.7268155", "0.7225839", "0.70450157", "0.7042472", "0.666945", "0.6569683", "0.65460646", "0.6537845", "0.6512465", "0.64319974", "0.63730264", "0.6364683", "0.6364558", "0.63401854", "0.6320243", "0.6189944", "0.61825746", "0.6175934", "0.61738986", "0.6158142", "0.6082364", "0.60626537", "0.60547763", "0.60441273", "0.6035358", "0.6016323", "0.60151225", "0.6011422", "0.5971265", "0.59669244", "0.59423137", "0.593543", "0.59227943", "0.5886503", "0.588446", "0.5877923", "0.5874793", "0.58693874", "0.58547527", "0.58389765", "0.5825216", "0.5790237", "0.5767499", "0.57496935", "0.5737172", "0.57200295", "0.5718993", "0.5711126", "0.5710241", "0.5700189", "0.56946975", "0.5694563", "0.56942874", "0.5691196", "0.5671833", "0.5667847", "0.5666151", "0.5664151", "0.56564605", "0.5647852", "0.5645433", "0.5638966", "0.5638422", "0.56338966", "0.5615262", "0.560634", "0.5605964", "0.5598079", "0.559768", "0.5595083", "0.5594552", "0.55944794", "0.55926174", "0.5586949", "0.5581482", "0.55742675", "0.5570081", "0.55619156", "0.55607724", "0.55591357", "0.5548362", "0.5538562", "0.553408", "0.5529244", "0.5526912", "0.55216146", "0.5509097", "0.5508542", "0.550013", "0.54971665", "0.5494395", "0.54923725", "0.54890203", "0.54884636", "0.5488188", "0.5485585", "0.54733473", "0.5455333", "0.54478467" ]
0.7165377
3
parsing a given text file containing labels and sequences load file, tidy it, process each line in the file return the labels and sequences as list[tuple(string,string)]
def ParseSeqFile(FilePath): SeqFile = rSeqFile(FilePath) TidyFile = TidyLines(SeqFile) result = [] for line in TidyFile: t = ( ProcessLine(line) ) result.append(t) return(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_file(filename):\n contents, labels = [], []\n with open_file(filename) as f:\n for line in f:\n try:\n label,content = line.strip().split('\\t')\n contents.append(list(content))\n labels.append(label)\n except:\n pass\n return contents,labels", "def readSequences(lines):\n seqs = []\n label = None\n seq_lines = []\n for line in lines:\n line = line.strip() # strip off white space\n if not line: # skip empty lines\n continue\n if line.startswith(';'): # ignore comment lines\n continue\n # check for start of next sequence:\n if line.startswith('>'): # label line\n # first, store the previous sequence if we had one:\n if seq_lines:\n seqs.append(Sequence(label, ''.join(seq_lines)))\n seq_lines = []\n # get the label (name) for the next sequence\n label = line[1:].strip()\n else:\n # collect all lines with sequence information for this sequence:\n seq_lines.append(line)\n # take care of the last sequence in the file\n seqs.append(Sequence(label, ''.join(seq_lines)))\n return seqs", "def read_data(cls, input_file):\n with tf.gfile.Open(input_file, \"r\") as f:\n lines = []\n for line in f:\n line = line.strip()\n if line.startswith('-DOCSTART-'):\n continue\n else:\n word_labels = line.split('-seq-')\n assert len(word_labels) == 2\n\n words = word_labels[0]\n labels = word_labels[1]\n lines.append([words, labels])\n\n return lines", "def fasta(path):\n label = None\n sequence = None\n with open(path, 'r') as data:\n for line in data:\n line = line.strip()\n if line.startswith('>'):\n if label and sequence:\n yield (label, sequence)\n label = line[1:]\n sequence = \"\"\n else:\n sequence += line\n\n if label and sequence:\n yield (label, sequence)", "def read_data(input_file):\n\n def process_line(labels, words):\n l = ' '.join([label for label in labels if len(label) > 0])\n w = ' '.join([word for word in words if len(word) > 0])\n lines.append((l, w))\n words = []\n labels = []\n return words, labels, lines\n\n rf = open(input_file, 'r')\n lines = [];\n words = [];\n labels = []\n for line in rf:\n word = line.strip().split(' ')[0]\n label = line.strip().split(' ')[-1]\n # here we dont do \"DOCSTART\" check\n\n if len(line.strip()) == 0: # and words[-1] == '.'\n words, labels, lines = process_line(labels, words)\n words.append(word)\n labels.append(label)\n rf.close()\n return lines", "def read_processed_data_from_file(file, encoding='latin1'):\n\n with open(file, encoding=encoding) as f:\n raw = f.read()\n\n lines = raw.split('\\n')\n labeled_texts = []\n n = len(lines) - 1\n for i, line in enumerate(lines):\n print(f'\\rLoading review {i} of {n}', end='')\n if line == '':\n continue\n tagged_words = re.findall(r'(.+?\\\\.+?) ', line)\n label = re.findall(r'#(\\d+.\\d)#', line)[0]\n labeled_texts.append((tagged_words, label))\n print()\n return labeled_texts", "def read_file(filename):\n reads = []\n labels = []\n\n with open(filename) as f:\n content = f.readlines()\n\n for line in content:\n _, read, label = re.sub('[null\\t\\n\\[\\]\\\"]', '', line).replace(' ', '').split(',')\n reads.append(read)\n labels.append(label)\n \n return reads, labels", "def _process(self, file: bytes) -> Sequence[List[Tuple[str]]]:\n train_data = file[: -2 * self.num_eval_symbols]\n val_data = file[-2 * self.num_eval_symbols: -self.num_eval_symbols]\n test_data = file[-self.num_eval_symbols:]\n\n symbol = '' if self.remove_end_of_line else str(ord('\\n'))\n train = ' '.join([str(c) if c != ord('\\n') else symbol for c in train_data])\n val = ' '.join([str(c) if c != ord('\\n') else symbol for c in val_data])\n test = ' '.join([str(c) if c != ord('\\n') else symbol for c in test_data])\n\n return [(train,)], [(val,)], [(test,)]", "def seqs_from_file(ids, file_lines):\r\n\r\n for label, seq in parse_fasta(file_lines):\r\n\r\n if id_from_fasta_label_line(label) in ids:\r\n yield label, seq", "def _read_txt(file_path):\n translation_pairs = []\n with file_path.open() as f:\n for line in f:\n translation_pairs.append(\n evaluation.TranslationPair(source=None, translation=line.strip())\n )\n return translation_pairs", "def recover_original_data(data_path, sequence_pairs):\n # initialize variables\n num_labels = 0\n num_sequences = 0\n num_correct_labels = 0\n num_correct_sequences = 0\n with open(data_path, \"r\") as input_file:\n # sequence of workds in each sentence\n word_sequence = []\n # gold/original labels for each word in each sentence\n gold_label_sequence = []\n # prediction labels for each word in each sentence\n pred_label_sequence = []\n for line in input_file:\n # split line into tokens\n tokens = line.split()\n # check if line is not empty\n if tokens:\n # a label exists\n num_labels += 1\n # the word is the first token\n word = tokens[0]\n # the original label is the second token\n gold_label = tokens[1]\n # the prediction label is the third token\n pred_label = tokens[2]\n # check if prediction equals to real label\n if pred_label == gold_label:\n num_correct_labels += 1\n # build the sequence of words, labels, and predictions for each sentence\n word_sequence.append(word)\n gold_label_sequence.append(gold_label)\n pred_label_sequence.append(pred_label)\n # line is empty\n else:\n # count number of sequences (=sentences)\n num_sequences += 1\n # check if word_sequence is empty\n if word_sequence:\n sequence_pairs.append([word_sequence, gold_label_sequence])\n # check if we predicted correctly the whole sequence\n if pred_label_sequence == gold_label_sequence:\n num_correct_sequences += 1\n # flush lists for next sequence\n word_sequence = []\n gold_label_sequence = []\n pred_label_sequence = []\n # here is the case where the file does not end with an empty line\n # repeat the process for the last sequence of the file\n if word_sequence:\n num_sequences += 1\n sequence_pairs.append([word_sequence, gold_label_sequence])\n if pred_label_sequence == gold_label_sequence:\n num_correct_sequences += 1\n # calculate per instance (=word) accuracy and per sequence (=sentence) accuracy\n per_instance_accuracy = float(num_correct_labels) / num_labels * 100\n per_sequence_accuracy = float(num_correct_sequences) / num_sequences * 100\n return per_instance_accuracy, per_sequence_accuracy", "def convert_bmes_to_sequence_tagging(source_file: str, output_file: str):\n # 1. read all lines and split it to sentences\n sentences: List[str] = []\n labels: List[str] = []\n with open(source_file, 'r+', encoding='utf-8') as f:\n\n # 1. 一个文件中的token和labels\n sentence_tokens, sentence_labels = [], []\n for line in f:\n line = line.strip()\n if not line:\n sentences.append(sentence_tokens)\n labels.append(sentence_labels)\n sentence_tokens, sentence_labels = [], []\n else:\n line_tokens, line_labels = read_line(line)\n\n sentence_tokens.extend(line_tokens)\n sentence_labels.extend(line_labels)\n\n assert len(sentences) == len(labels)\n \n # 2. write tokens and labels to the file\n with open(output_file, 'w+', encoding='utf-8') as f:\n\n for index in range(len(sentences)):\n tokens, sentence_labels = sentences[index], labels[index]\n\n items = [\n '###'.join([tokens[i], sentence_labels[i]]) for i in range(len(tokens))]\n\n f.write('\\t'.join(items) + '\\n')", "def load_file(filename):\n\tlabels = []\n\tdocs = []\n\n\twith open(filename) as f:\n\t\tfor line in f:\n\t\t\tcontent = line.split('\\t')\n\n\t\t\tif len(content) > 2:\n\t\t\t\tprint('incorrect read')\n\t\t\t\texit()\n\n\t\t\tif len(content[1]) == 0: continue\n\n\t\t\tdocs.append(str(content[1]).strip('\\r').strip('\\n').strip('\\r\\n'))\n\t\t\tlabels.append(content[0])\n\n\treturn docs, labels", "def parse_file(self, file_path) -> list:\n data = []\n with open(file_path, 'rb') as f:\n lines = pickle.load(f)\n for line in lines:\n input, output = line\n if input.strip() == \"\" or output.strip() == \"\":\n continue\n input_len = len(input.split())\n output_len = len(output.split())\n if input_len > 50 or output_len > 50:\n continue\n data_item = Text2TextDataItem(input_text=input, output_text=output, tokenizer=self.tokenizer,\n share_vocab=self.share_vocab)\n data.append(data_item)\n return data", "def _process(self, file: bytes) -> List[Tuple[str]]:\n decoded_text = file.decode('utf-8')\n # Replace end of line tokens\n if self.eol is not None and not self.split_by_sentence:\n decoded_text = decoded_text.replace('\\n', self.eol)\n\n # Split by sentence or unroll\n if self.split_by_sentence:\n nltk.download('punkt', quiet=True)\n text = [(sent.strip(),) for sent in nltk.tokenize.sent_tokenize(decoded_text)]\n else:\n text = [(decoded_text,)]\n\n return text", "def read_traindata (filename, labels = ['pos', 'neg']):\n def split (l):\n \"\"\"split one line into words and label\"\"\"\n segs = l.strip().split ('\\t')\n label = segs [-1]\n words = segs [:-1]\n return words, label\n \n encoding = chardet.detect(open (filename).read ()) ['encoding']\n \n with codecs.open (filename, 'r', encoding) as f:\n for line in f.readlines ():\n row = split (line)\n assert len (row) == 2\n assert isinstance(row [0], list)\n assert isinstance(row [1], basestring)\n print row [1]\n assert row [1] in labels\n yield row", "def parse_data(filename):\r\n labels = []\r\n documents = []\r\n with open(filename, 'r') as f:\r\n for line in f:\r\n values = line.split()\r\n label = values[0]\r\n document = []\r\n for wordCount in values[1:]:\r\n parsed = wordCount.split(':')\r\n word = parsed[0]\r\n count = int(parsed[1])\r\n document.append((word, count))\r\n labels.append(label)\r\n documents.append(document)\r\n return (labels, documents)", "def load_data(self):\n with open(self.file_name) as f:\n lines = f.readlines()\n\n labels = list()\n all_dat = list()\n for i, l in enumerate(lines):\n\n labels.append(int(l[0]))\n\n l = gensim.utils.any2unicode(l)\n all_dat.append(LabeledSentence(l.split(\"\\t\")[-1], [i]))\n\n return all_dat, np.asarray(labels)", "def _preprocess(self, txt_seq):\n input = []\n label = []\n punc = \" \"\n for token in txt_seq.split():\n if token in self.punc2id:\n punc = token\n else:\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n label.append(self.punc2id[punc])\n punc = \" \"\n input.append(self.word2id[\"<END>\"])\n label.append(self.punc2id[punc])\n input = torch.LongTensor(input)\n label = torch.LongTensor(label)\n # input = np.array(input)\n # label = np.array(label)\n return input, label", "def process_data(self) -> Tuple[list, List[Sequence[int]]]:\n features, labels = self.load_files()\n x = [self.process_text(f, self.max_length) for f in features]\n y = [self._convert_labels(l, self.max_length) for l in labels]\n return x, y", "def parse(self):\n count = [] #count for trainset_size\n with open(self.file) as f:\n for line in f:\n data = line.split(\" \")[0]\n filename = data[:-1]\n id = data[-1:]\n if (filename not in count):\n count.append(filename)\n\n acid = \"\"\n structure = \"\"\n with open(self.directory+\"/\"+filename+\".dssp\") as dssp:\n for i in range(28): #skip lines we don't need\n next(dssp)\n for line in dssp:\n if (line[9] != \" \" and line[10] == \" \" and line[11] == id and line[13] not in (\"*\",\"!\",\"B\",\"Z\",\"X\")):\n #amino acid sequence\n if (line[13].islower()):\n acid += \"C\"\n else:\n acid += line[13]\n\n #sequence of the structure\n if (line[16] in (\"H\",\"G\",\"I\")):\n structure += \"H\"\n elif (line[16] in (\"E\",\"B\")):\n structure += \"E\"\n else:\n structure += \"C\"\n\n if (len(count) > self.trainset_size):\n self.testset.append((acid,structure))\n else:\n self.trainset.append((acid,structure))", "def load_labels(path):\n with open(path, \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r\"[:\\s]+\", content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n # print(labels)\n return labels", "def get_sequence_list(files):\n sequence_list = []\n for i in range(0,len(files)):\n with open(files[i], \"r\") as fasta_file:\n fasta_seq_all = fasta_file.read()\n \n\n fasta_seq_all = fasta_seq_all.split(\">\")\n\n for j in range(0, len(fasta_seq_all)):\n fasta_seq = fasta_seq_all[j]\n if len(fasta_seq) > 2:\n \n fasta_seq = fasta_seq.splitlines()\n label = _format_label(files[i], fasta_seq.pop(0))\n format_fasta_seq = []\n for k in range(0,len(fasta_seq)):\n try:\n if fasta_seq[k][0] == \"\\n\":\n break\n \n format_fasta_seq.append(fasta_seq[k])\n except:\n break\n format_fasta_seq = \"\".join(format_fasta_seq)\n format_fasta_seq.strip()\n if len(format_fasta_seq) > 2:\n sequence_list.append(Sequence(format_fasta_seq, label))\n \n return sequence_list", "def load_dataset(filepath):\n \n X = list()\n x = list()\n\n Y = list()\n y = list()\n \n for line in open(filepath):\n # blank lines separate sequences\n if len(line) <= 1:\n X.append(x)\n Y.append(y)\n\n x = list()\n y = list()\n else:\n a, b = line.strip().split('\\t')\n x.append(a)\n y.append(b)\n \n return X, Y", "def parse_labels(file: str) -> ndarray:\n rows = []\n with open(file, 'r', encoding='utf-8') as f:\n for row in f:\n rows.append(row.strip())\n return array(rows)", "def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = []\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n #if len(pair) == 2 and pair[0].strip().isdigit():\n labels.append(np.array([int(pair[0].strip()),pair[1].strip()]))\n #else:\n # labels.append(pair[0].strip())\n return np.array(labels)", "def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels", "def _read_conll(cls, input_file):\n #def read_conll(input_file):\n sents = []\n sent, labels = [], []\n for line in open(input_file):\n if line.startswith(\"# sent_id\"):\n current_id = line.strip().split(\" = \")[1]\n elif line.strip() == \"\":\n if len(sent) > 0:\n sents.append((current_id, sent, labels))\n sent, labels = [], []\n else:\n token, label = line.strip().split(\"\\t\")\n sent.append(token)\n labels.append(label)\n return sents", "def load_dataset(file_handle) -> list:\n output = []\n lines = file_handle.readlines()\n name = None\n for line in lines:\n line = line.replace(\"\\n\", \"\")\n if line.startswith(\">\"):\n if name:\n output.append(sequence)\n name = line[1:]\n sequence = \"\"\n else:\n sequence += line\n\n if name:\n output.append(sequence)\n \n return output", "def p_and_l_from(files):\n if isinstance(files, str):\n files = [files]\n paths = []\n labels = []\n for file in files:\n print(f'read {file}')\n with open(file, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.split(' ')\n paths.append(line[0])\n labels.append(int(line[1]))\n return [paths, labels]", "def read_file(self,filename):\n\n f = open(filename,'r')\n lines = f.readlines()\n f.close()\n\n sequences = [l.strip() for l in lines if l.strip() != \"\"]\n\n self.load_sequences(sequences)", "def read_label_file(file_path):\n with open(file_path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n ret = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n ret[int(pair[0])] = pair[1].strip()\n else:\n ret[row_number] = pair[0].strip()\n return ret", "def read_ptbtagged(ptbtagged_path: str) -> Iterator[Tuple[TokenSeq, PosSeq]]:\n #do this immediately (first)\n #start generating feature matrices\n \n #read file into an array \n with open(ptbtagged_path) as f:\n file_array = f.readlines()\n file_array.append(\"\\n\")\n array_of_tuples = create_tuples(file_array)\n\n return generator(array_of_tuples)", "def generate_input(file_path):\r\n\tlabel_matches = dict()\r\n\tfile_lines = []\r\n\twith open(file_path) as f:\r\n\t\tfor line in f:\r\n\t\t\tfile_lines = file_lines + [line.lower().split()]\r\n\t\tword_tuples = zip(file_lines[0::3], file_lines[1::3], file_lines[2::3])\r\n\t\tfor (words, part_of_speech, word_type) in word_tuples:\r\n\t\t\ttype_tuples = zip(words, word_type)\r\n\t\t\tfor word_and_tag in type_tuples:\r\n\t\t\t\tlabel_matches.update({word_and_tag : (label_matches.get(word_and_tag, 0) + 1)})\r\n\treturn label_matches", "def parse_labelfile(path):\n with open(path, \"r\") as FILE:\n lines = FILE.readlines()\n\n\n labels = {x.split(\":\")[0]: x.split(\":\")[1] for x in lines[1:]}\n\n for key in labels:\n labels[key] = np.array(labels[key].split(\",\")).astype(\"uint8\")\n\n return labels", "def parseLabels(filename):\n r = parse(filename)\n res = {}\n for qid in r:\n lst = []\n for y in r[qid].split(\";\"):\n doc, score = y.split(\",\")\n lst.append((int(doc), float(score)))\n res[qid] = lst\n return res", "def get_labeled_data(filename):\n e = []\n y = []\n with open(filename) as f:\n for line in f:\n e.append(line[1:-1])\n y.append(category_mapping[abbreviation_mapping[line[0]]])\n return e, y", "def process_input(fname,onlynugget,onlyarg):\n content=utils.readFileEncode(fname,'utf8')\n lines = content.split('\\n')[:-1]\n sentences=[]\n labels=[]\n sent=[]\n label=[]\n for i in range(len(lines)):\n if len(lines[i])>3:\n words=lines[i].split('\\t')\n word={'originalText':words[0],'offset':int(words[1])}\n sent.append(word)\n if onlynugget:\n if words[2] in NuggetList10:\n label.append(words[2]) \n else:\n label.append('O')\n elif onlyarg:\n if words[2] in ArgumentList:\n\n if 'Software' in words[2]:\n label.append(words[2][0:2]+'System')\n else:\n label.append(words[2])\n else:\n label.append('O')\n else:\n if len(sent)>0 and len(label)>0: \n sentences.append(sent)\n labels.append(label) \n sent=[]\n label=[]\n elif len(sent)==0 and i < len(lines)-1:\n sentences.append([])\n labels.append([])\n \n return sentences,labels", "def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n return labels", "def labels(labels_file, labels = []):\n\n print(f\"Parsing labels '{labels_file}'\")\n with open(labels_file, 'r') as f:\n for i, line in enumerate(f):\n labels.append(line.split(':')[-1].strip())\n return pd.Series(labels)", "def read(path, label2int):\n\n labels = [] # int labels\n samples = [] # examples as strings\n\n for label_dir in os.listdir(path):\n label_dir_path = os.path.join(path, label_dir)\n\n for file in os.listdir(label_dir_path):\n file_path = os.path.join(label_dir_path, file)\n file_text = open(file_path).read().rstrip()\n int_label = label2int[label_dir.lower()]\n samples.append(file_text)\n labels.append(int_label)\n\n return samples, labels", "def parse_txt(txt_path, debug_till_row=None, join_desc=False, return_max_len=False, fraction=1,\n label_prefix=\"__label__\", seed=None):\n\n with open(txt_path, \"r\") as infile:\n if debug_till_row not in [None, -1]:\n data = infile.read().split(\"\\n\")[:debug_till_row]\n else:\n data = infile.read().split(\"\\n\")\n\n max_len = -1\n assert 0 < fraction <= 1\n if fraction < 1:\n if seed is not None:\n np.random.seed(seed)\n size = int(round(fraction * len(data)))\n inds = np.arange(len(data))\n np.random.shuffle(inds)\n data = [data[i] for i in inds[:size]]\n\n descs, labels = [], []\n for row in data:\n row_splitted = row.split()\n num_words = len(row_splitted)\n if num_words == 1:\n continue\n max_len = max(max_len, len(row_splitted))\n\n tmp = []\n for ind, w in enumerate(row_splitted):\n if not w.startswith(label_prefix):\n break\n tmp.append(w[len(label_prefix):])\n\n labels.append(\" \".join(tmp))\n if join_desc:\n descs.append(\" \".join(row_splitted[ind:]))\n else:\n descs.append(row_splitted[ind:])\n\n if return_max_len:\n return descs, labels, max_len\n return descs, labels", "def parse(self):\n result = list()\n for i, line in enumerate([x.strip() for x in self._input_file], 1):\n if not line:\n continue\n # There should be only 2 entries. Example:\n # kernel`0xffffffff8074d27e;kernel`_sx_xlock 1\n try:\n frames, value = line.split()\n frames = [trim_offset(n) for n in frames.split(';')]\n except ValueError:\n raise StackCollapserException('Unable to parse line {}'.format(i))\n result.append((frames, int(value)))\n return result", "def _load_labels(self, label_path: str) -> List[str]:\n with open(label_path, 'r') as f:\n return [line.strip() for _, line in enumerate(f.readlines())]", "def readAnnotations(f):\n lbf = \"../labels/\" + f[: f.rfind('.')] + \".txt\"\n b = []\n with open(lbf, \"r\") as fh:\n for l in fh:\n p = l.strip().split()\n b.append( (p[0], int(p[1]), int(p[2]), int(p[3]), int(p[4])) )\n\n return b", "def fasta_seqs(file_name):\n list = []\n with open('../test_files/' + file_name, 'r') as infile:\n text = infile.read()\n seqs = text.split('>')\n for seq in seqs:\n try:\n x = seq.split('\\n', 1)\n # sequence will be stored in x[1], and i am removing nextline '\\n' characters that comes with it.\n list.append(x[1].replace('\\n', ''))\n except:\n pass\n return list", "def __prepareDataSet(fileName):\n\n labels = []\n utterances = []\n\n with open(fileName) as f:\n lines = f.readlines()\n\n for line in lines:\n try:\n act = line[:line.index(\" \")]\n utterance = line[line.index(\" \"):line.index(\"\\n\")]\n\n try:\n labels.append(act.strip())\n utterances.append(utterance.strip())\n\n except KeyError:\n pass\n\n except ValueError:\n pass\n\n return labels, utterances", "def Parse_Fasta(filename):\n dic = {}\n name = None\n seq = ''\n with open(filename) as F:\n for line in F:\n if line.startswith('>'):\n if name is not None:\n dic[name] = seq\n seq = ''\n name = line.strip()\n else:\n seq += line\n if not name in dic:\n dic[name] = seq\n return dic", "def _read_data(cls, input_file): # 这里是对文件的处理\r\n with open(input_file, encoding='utf-8') as f:\r\n lines = []\r\n\r\n for line in f:\r\n line = json.loads(line)\r\n words = ' '.join(list(line['natural']))\r\n labels = ' '.join(line['tag_seq'])\r\n poss = line['pos_seq']\r\n dps = line['dp_seq']\r\n head = line['head_seq']\r\n lines.append([labels, words, poss, dps, head])\r\n\r\n return lines", "def parse(self, f):\n lines = []\n for line in f:\n _line = line.split(\"//\")[0].strip()\n if _line.startswith(\"(\"): # is a label\n label_name = _line[1:-1]\n self.labels[label_name] = len(lines) # line number / address of label\n elif _line:\n lines.append(_line)\n # else: it's just a whitespace/comment line (ignore)\n return lines", "def _read_one_file(file_name, label_list):\n lines = tf.io.gfile.GFile(file_name, \"r\").readlines()\n examples = []\n label_id_map = {label: i for i, label in enumerate(label_list)}\n sentence_id = 0\n example = InputExample(sentence_id=0)\n for line in lines:\n line = line.strip(\"\\n\")\n if line:\n # The format is: <token>\\t<label> for train/dev set and <token> for test.\n items = line.split(\"\\t\")\n assert len(items) == 2 or len(items) == 1\n token = items[0].strip()\n\n # Assign a dummy label_id for test set\n label_id = label_id_map[items[1].strip()] if len(items) == 2 else 0\n example.add_word_and_label_id(token, label_id)\n else:\n # Empty line indicates a new sentence.\n if example.words:\n examples.append(example)\n sentence_id += 1\n example = InputExample(sentence_id=sentence_id)\n\n if example.words:\n examples.append(example)\n return examples", "def read_file(name):\n\twith open(name) as f:\n\t\tmodel=f.readlines()\n\n\ts=[]\n\tsequences=[]\n\ti=0;\n\n\tfor word in model:\n\t\ti+=1\n\t\tword=word[:-1]\n\t\ts.append(word)\n\t\tif i %10 == 0 or i == (len(model)-1):\n\t\t\tsequences.append(s)\n\t\t\ts=[]\n\n\treturn sequences", "def parse_sequences_from_gff_metadata( file ):\n import pandas\n result = []\n for line in file:\n if line.startswith( '##sequence-region' ):\n parts = line.strip().split( \" \" )\n nameStartEnd = parts[-3:] # last 3 elements\n result.append({\n \"seqid\": nameStartEnd[0],\n \"start\": int( nameStartEnd[1] ),\n \"end\": int( nameStartEnd[2] )\n })\n elif not line[0] == '#':\n # quit when we meet the first non-metadata line\n break\n return pandas.DataFrame( result )", "def get_fasta_labels(input_fasta_fp):\r\n\r\n fasta_labels = []\r\n\r\n fasta_f = open(input_fasta_fp, \"U\")\r\n\r\n for label, seq in parse_fasta(fasta_f):\r\n fasta_labels.append(label.split()[0])\r\n\r\n return fasta_labels", "def readFastaFile(filename):\n if os.path.exists(filename)==False:return {}\n sequences={}\n fhr=open(filename,\"r\")\n for line in fhr:\n if line[0]==\">\":\n sequences[line.strip()[1:].split()[0]]=fhr.readline().strip()\n fhr.close()\n return sequences", "def preprocess_file(self,\n file_path,\n with_labels=True,\n return_ids=False,\n fit=True,\n min_seq_length=None,\n max_seq_length=None):\n self._min_seq_length = min_seq_length\n self._max_seq_length = max_seq_length\n\n self._file_handle = codecs.open(file_path, \"r\", encoding=\"utf-8\")\n\n data_frame = pd.read_csv(self._file_handle, delimiter=\",\", encoding=\"utf-8\", header=\"infer\")\n\n X, y, ids = [], [], []\n\n for _, row in data_frame.iterrows():\n ids.append(row.id)\n\n strings = []\n string_ = self._tokenize(row.text)\n\n if (self._max_seq_length is not None) and len(string_) > self._max_seq_length:\n # split into several examples and append them to `strings`\n strings.extend(self._split_string(string_))\n else:\n strings.append(string_)\n\n for string_ in strings:\n if (self._min_seq_length is not None) and len(string_) < self._min_seq_length:\n # do not use this example for training\n continue\n\n string_ = self._ngrams(string_)\n X.append(string_)\n if with_labels:\n y.append(row.author)\n\n # encode X strings to sequence ids\n # keras tokenizer does not actually tokenize, rather encodes sequences\n if fit:\n self.encoder = Tokenizer(filters=\"\", lower=self._lowercase)\n self.encoder.fit_on_texts(X)\n X_sequences = self.encoder.texts_to_sequences(X)\n\n # pad without max length at the moment\n X_sequences = pad_sequences(X_sequences)\n\n # encode y labels\n if fit:\n self.label_encoder = LabelEncoder()\n self.label_encoder.fit(y)\n logger.info(\"Classes found in the training data: %s\" % str(self.label_encoder.classes_))\n\n if with_labels:\n y_sequences = self.label_encoder.transform(y)\n else:\n y_sequences = []\n\n # debug samples\n for name, subset in zip([\"ids\", \"X\", \"y\", \"X_sequences\", \"y_sequences\"],\n [samples[:5] for samples in [ids, X, y, X_sequences, y_sequences]]):\n logger.debug(\"Samples from %s: %s\" % (name, str(subset)))\n\n logger.info(\"Preprocessed dataset has size: %d\" % len(X))\n\n return ids, X, y, X_sequences, y_sequences", "def read_labels(labels_file):\n if not labels_file:\n print 'WARNING: No labels file provided. Results will be difficult to interpret.'\n return None\n\n labels = []\n with open(labels_file) as infile:\n for line in infile:\n label = line.strip()\n if label:\n labels.append(label)\n assert len(labels), 'No labels found'\n return labels", "def process_line(line):\n [label, text] = line.split('\\t')\n return text.split()", "def read_tuples_list(corpus_path):\n idx = 0\n data = []\n with open(corpus_path, encoding='utf-8') as fr:\n lines = fr.readlines()\n sent_, label_h_, pos_, ner_ = [], [], [], []\n\n for line in lines:\n idx += 1\n if line.find(\"DOC-ID\") < 0 and line != '\\n':\n try:\n [char, label_h, pos_tag, ner_tag] = line.strip().split()\n sent_.append(char)\n label_h_.append(label_h)\n pos_.append(pos_tag)\n ner_.append(ner_tag)\n except BaseException as e:\n print(e)\n print(line)\n else:\n # print(line)\n if idx > 1:\n data.append((sent_, label_h_, pos_, ner_))\n sent_, label_h_, pos_, ner_ = [], [], [], []\n\n return data", "def parse_fasta(self, filename):\n id = ''\n desc = ''\n tempseq = []\n try:\n seqfile = open(filename,'r')\n for line in seqfile:\n if line.startswith('>'):\n if not id is '':\n yield { 'id': id.strip(), 'desc': desc.strip(), 'dna': ''.join(tempseq) }\n if ' ' in line:\n (id, desc) = line[1::].split(' ', 1)\n else:\n id = line[1::].strip()\n desc = ''\n tempseq = []\n elif not line.startswith('>'):\n tempseq.append(line.rstrip())\n if not id is '':\n yield { 'id': id.strip(), 'desc': desc.strip(), 'dna': ''.join(tempseq) }\n except OSError:\n raise PathError(''.join(['ERROR: cannot open', refseqpath]))", "def load_input(filename: str) -> list:\n\n text_stream = io.open(filename, 'r', encoding='utf-8', errors='ignore', newline='\\n')\n \"\"\" Calls Python's io function to read the file with the specified name.\"\"\"\n\n initial_state = []\n for i in range(0, 4):\n initial_state.append(list(map(int, text_stream.readline().rstrip().split(' '))))\n \"\"\" The rstrip method removes all trailing whitespace of the string. The split \n method uses the given character as the delimiter to break down the string and \n return a list of the substrings. The map function takes that list, converts \n the substrings into integers and returns a map object, which is eventually \n converted into a list by the exterior call to the list function. \"\"\"\n\n \"\"\" A state is represented as a multi-layer list. The first layer contains \n the four rows, each of which is a second layer that consists of four tiles. \"\"\"\n\n blank_line = text_stream.readline()\n \"\"\" In the input file, there is a blank line in between the two states.\"\"\"\n\n goal_state = []\n for i in range(0, 4):\n goal_state.append(list(map(int, text_stream.readline().rstrip().split(' '))))\n \"\"\" The construct of this part is identical to the one above. \"\"\"\n\n text_stream.close()\n\n ret = [initial_state, goal_state]\n \"\"\" Returns the two lists that represent the initial and goal states, \n respectively. \"\"\"\n return ret", "def read_conll_file(file_name):\n data = []\n current_words = []\n current_tags = []\n\n for line in codecs.open(file_name, encoding='utf-8'):\n line = line.strip()\n \n if line:\n if line[0] == '#':\n continue # skip comments\n tok = line.split('\\t')\n if '-' in tok[0] or '.' in tok[0]:\n continue # skip special tokenized words\n word = tok[1]\n tag = tok[3]\n \n current_words.append(word)\n current_tags.append(tag)\n else:\n if current_words: # skip empty lines\n data.append((current_words, current_tags))\n current_words = []\n current_tags = []\n\n # check for last one\n if current_tags != [] and not raw:\n data.append((current_words, current_tags))\n return data", "def load_labels(filename):\n return [line.rstrip() for line in tf.gfile.GFile(filename)]", "def load_labels(filename):\n return [line.rstrip() for line in tf.gfile.GFile(filename)]", "def _parse_raw_labels(self, lines):\r\n images = []\r\n labels = []\r\n idx = 0\r\n while idx < len(lines):\r\n image_path = lines[idx].strip()\r\n images.append(self._real_image_path(image_path))\r\n idx += 1\r\n\r\n num = int(lines[idx])\r\n idx += 1\r\n\r\n labels_ = []\r\n for _ in range(num):\r\n x1, y1, w, h, blur, expression, illumination, invalid, \\\r\n occlusion, pose = [int(v) \r\n for v in lines[idx].strip().split()]\r\n x2, y2 = x1 + w - 1, y1 + h - 1 # -1 to get the read x2, y2\r\n\r\n labels_.append([x1, y1, x2, y2])\r\n idx += 1\r\n \r\n labels.append(np.array(labels_))\r\n\r\n self._data_map[self._real_image_path(image_path)] = np.array(labels_)\r\n return np.array(images), np.array(labels)", "def load_labels(path, encoding='utf-8'):\r\n with open(path, 'r', encoding=encoding) as f:\r\n lines = f.readlines()\r\n if not lines:\r\n return {}\r\n\r\n if lines[0].split(' ', maxsplit=1)[0].isdigit():\r\n pairs = [line.split(' ', maxsplit=1) for line in lines]\r\n return {int(index): label.strip() for index, label in pairs}\r\n else:\r\n return {index: line.strip() for index, line in enumerate(lines)}", "def load_data_multilabel(traning_data_path,vocab_word2index, vocab_label2index,sentence_len,training_portion=0.95):\n file_object = codecs.open(traning_data_path, mode='r', encoding='utf-8')\n lines = file_object.readlines()\n random.shuffle(lines)\n label_size=len(vocab_label2index)\n X = []\n Y = []\n for i,line in enumerate(lines):\n raw_list = line.strip().split(\"__label__\")\n input_list = raw_list[0].strip().split(\" \")\n input_list = [x.strip().replace(\" \", \"\") for x in input_list if x != '']\n x=[vocab_word2index.get(x,UNK_ID) for x in input_list]\n label_list = raw_list[1:]\n label_list=[l.strip().replace(\" \", \"\") for l in label_list if l != '']\n label_list=[vocab_label2index[label] for label in label_list]\n y=transform_multilabel_as_multihot(label_list,label_size)\n X.append(x)\n Y.append(y)\n if i<10:print(i,\"line:\",line)\n\n X = pad_sequences(X, maxlen=sentence_len, value=0.) # padding to max length\n number_examples = len(lines)\n training_number=int(training_portion* number_examples)\n train = (X[0:training_number], Y[0:training_number])\n\n test_number=int((number_examples-training_number)/2)\n\n\n test = (X[training_number+ 1:training_number+test_number], Y[training_number + 1:training_number+test_number])\n valid = (X[training_number + test_number + 1:],\n Y[training_number + test_number + 1:])\n\n return train,test,valid", "def load_labels(filename):\n return [line.rstrip() for line in tf.gfile.GFile(filename)]", "def LoadLabels(filepath):\n with open(filepath, 'r') as datafile:\n lines = [line.strip() for line in datafile.readlines()]\n # filter for empty lines\n return [line for line in lines if line]", "def load_human_sequences():\n # Define sequences list variable to store the sequences\n sequences = []\n # Open the human 9mer sequences file\n f = open(\"Human_9mer_Sequences.txt\", \"r\")\n # Store each sequence to the list\n for line in f:\n sequences.append(line.strip())\n\n return sequences", "def readPfile(filename):\n\n with smart_open(filename, \"rb\") as f:\n # Read header\n # Assuming all data are consistent\n for line in f:\n tokens = line.decode().split()\n if tokens[0] == \"-pfile_header\":\n headerSize = int(tokens[4])\n elif tokens[0] == \"-num_sentences\":\n nSentences = int(tokens[1])\n elif tokens[0] == \"-num_frames\":\n nFrames = int(tokens[1])\n elif tokens[0] == \"-first_feature_column\":\n cFeature = int(tokens[1])\n elif tokens[0] == \"-num_features\":\n nFeatures = int(tokens[1])\n elif tokens[0] == \"-first_label_column\":\n cLabel = int(tokens[1])\n elif tokens[0] == \"-num_labels\":\n nLabels = int(tokens[1])\n elif tokens[0] == \"-format\":\n format = tokens[1].replace(\"d\", \"i\")\n elif tokens[0] == \"-end\":\n break\n nCols = len(format)\n dataSize = nFrames * nCols\n\n # Read sentence index\n f.seek(headerSize + dataSize * 4)\n index = struct.unpack(\">%di\" % (nSentences + 1), f.read(4 * (nSentences + 1)))\n\n # Read data\n f.seek(headerSize)\n features = []\n labels = []\n sen = 0\n for i in range(nFrames):\n if i == index[sen]:\n features.append([])\n labels.append([])\n sen += 1\n data = struct.unpack(\">\" + format, f.read(4 * nCols))\n features[-1].append(data[cFeature : cFeature + nFeatures])\n labels[-1].append(data[cLabel : cLabel + nLabels])\n features = [numpy.array(x) for x in features]\n labels = [numpy.array(x) for x in labels] if nLabels > 0 else None\n\n return (features, labels)", "def parse(self):\n\n if self._parse is None:\n\n seqs = [] # list of Weighted Sequences generated by parsing file\n\n with open(self._seqfile, \"r\") as f:\n for i, l in enumerate(f.readlines()):\n try:\n float(l) # try if line is numbers only\n except ValueError:\n seqs.append(WeightSeq(l))\n\n self._parse = seqs\n\n return seqs\n else:\n return \"Sequence file was already parsed\"", "def _preprocess(self, txt_seq):\n input = []\n for token in txt_seq.split():\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n input.append(self.word2id[\"<END>\"])\n input = torch.LongTensor(input)\n return input", "def read_alignment(file):\n alignments = list()\n with open(file, 'r') as f:\n for line in f:\n line_lst = line.strip().split()\n align_lst = list()\n for pair in line_lst:\n src_idx, tgt_idx = pair.split('-')\n align_lst.append((int(src_idx),int(tgt_idx)))\n # print(align_lst)\n alignments.append(align_lst)\n return alignments", "def load_labels(filename):\n\n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read()\n\n magic, n_labels = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(2))\n\n assert magic[0] == 2049, \"bad magic number, what do?\"\n\n label_stream = array.array('B', b[8:])\n \n assert len(label_stream) == n_labels[0], \"mismatch in label length\"\n \n # label_stream is actually type array.array, which is iterable surely.\n # i'll convert it anyway...\n return tuple(label_stream)", "def get_data(data_path, label,alphabet):\n\tdata = []\n\tlabels = []\n\tpasid = []\n\tseq_len=0\n\n\twith open(data_path, 'r') as f:\n\t\tnext(f)\n\t\tfor cnt,line in enumerate(f):\n\t\t\tline = line.strip('\\n')\n\t\t\tarray = line.split('\\t')\n\t\t\tbase = list(array[7])\n\t\t\tseq_len = len(base)\n\t\t\tseq = np.array(base, dtype = '|U1').reshape(-1, 1)\n\t\t\tseq_data = (seq == alphabet).astype(np.float32)\n\t\t\tdata.append(seq_data)\n\t\t\tpasid.append(label[0:3]+array[0])\n\t\t\tif cnt > 20:\n\t\t\t\tbreak\n\tseq_dim = alphabet.shape[0]\n\tdata = np.stack(data).reshape([-1, seq_len, 1, seq_dim])\n\tpasid=np.array(pasid)\n\tif label == \"Positive\":\n\t\tlabels = np.zeros(data.shape[0])\n\telse:\n\t\tlabels = np.ones(data.shape[0])\n\tprint('Read %d %s sequences from %s'%(labels.shape[0],label, data_path))\n\tprint('Sequences length is %d,Sequences dimension is %d'%(seq_len,seq_dim))\n\treturn data, labels, pasid", "def read_target_and_labels_from_file(file_path):\n stemmer = SnowballStemmer(\"english\")\n dialog_acts = []\n utterances = []\n with open(file_path, \"r\") as file:\n for line in file.readlines():\n dialog_acts.append(line.split()[0])\n utterances.append(stemmer.stem(line[line.find(\" \"):].strip()))\n return dialog_acts, utterances", "def load_labeled_data(files):\n\tx = []\n\ty = []\n\tfor filename in files:\n\t\tdata = []\n\t\twith open(filename) as infile:\n\t\t\tlabel = int(infile.readline())\n\t\t\tfor line in infile:\t\n\t\t\t\tdata.append(dna_string_to_array(line.strip()))\n\t\ty += [label]*len(data)\n\t\tx += data\n\n\treturn (np.array(x), np.array(y))", "def read_data_in_sequences(filepath, sequence_length, shuffle=True, pca_dims=7):\n pca_dims_ = pca_dims\n x, y1, y2, y3 = read_data(filepath, shuffle=False, pca_dims=pca_dims_)\n\n x_seq = []\n\n n_features = x.shape[1]\n for i, features in enumerate(x, start=1):\n seq_features = np.zeros((sequence_length * n_features,))\n if i < sequence_length:\n rand_indices = list(map(int, x.shape[0] * np.random.random_sample((sequence_length-i,))))\n\n seq_features[:(sequence_length-i)*n_features] = np.concatenate([x[j] for j in rand_indices])\n\n # For the first inputs, this portion of the feature vector is empty\n try:\n seq_features[(sequence_length-i)*n_features:(sequence_length-1)*n_features] = x_seq[i-2][sequence_length:]\n except IndexError:\n pass\n except ValueError:\n pass\n\n seq_features[(sequence_length-1)*n_features:] = x[i-1]\n else:\n past = np.array(x_seq[-1])[n_features:]\n seq_features = np.concatenate([past, x[i-1]])\n\n x_seq.append(seq_features)\n\n x_seq = np.array(x_seq, dtype='float32')\n\n if shuffle:\n return parallel_shuffle(x_seq, y1, y2, y3)\n else:\n return x_seq, y1, y2, y3", "def parse_trflp(lines):\r\n\r\n sample_ids = []\r\n otu_ids = []\r\n data = []\r\n non_alphanum_mask = re.compile('[^\\w|^\\t]')\r\n # not sure why the above regex doesn't cover the following regex...\r\n dash_space_mask = re.compile('[_ -]')\r\n\r\n for i, line in enumerate(lines):\r\n elements = line.strip('\\n').split('\\t')\r\n\r\n # special handling for the first line only\r\n if i == 0:\r\n # validating if the file has a header\r\n if elements[0] == '':\r\n for otu_id in elements[1:]:\r\n otu_ids.append(non_alphanum_mask.sub('_', otu_id))\r\n continue\r\n else:\r\n for j, otu_id in enumerate(elements[1:]):\r\n otu_ids.append(non_alphanum_mask.sub('_', 'Bin%3d' % j))\r\n\r\n # handling of all other lines\r\n current_row = []\r\n\r\n # converting each value in the row to int\r\n for count in elements[1:]:\r\n try:\r\n current_row.append(int(round(float(count), 0)))\r\n except ValueError:\r\n current_row.append(0)\r\n\r\n # if the sum of all the values is equial to 0 ignore line\r\n if sum(current_row) == 0:\r\n continue\r\n\r\n # adding sample header to list\r\n sample_ids.append(non_alphanum_mask.sub('.',\r\n dash_space_mask.sub('.', elements[0])))\r\n\r\n # validating the size of the headers to add missing columns\r\n # this is only valid when there is no header\r\n if len(current_row) > len(otu_ids):\r\n # modify header data\r\n extra_cols = []\r\n for j in range(len(otu_ids), len(current_row)):\r\n extra_cols.append(non_alphanum_mask.sub('_', 'Bin%3d' % j))\r\n # modify data\r\n for j in range(len(data)):\r\n data[j].extend([0] * (len(current_row) - len(otu_ids)))\r\n\r\n otu_ids.extend(extra_cols)\r\n elif len(current_row) < len(otu_ids):\r\n # modify data\r\n current_row.extend([0] * (len(otu_ids) - len(current_row)))\r\n\r\n data.append(current_row)\r\n\r\n return sample_ids, otu_ids, asarray(data).transpose()", "def load_labels(label_path):\r\n\r\n with open(label_path, \"r\") as f:\r\n\r\n lines = f.readlines()\r\n \r\n label = {}\r\n index = []\r\n for i, line in enumerate(lines):\r\n sp = line.split()\r\n label[sp[0]] = [int(sp[1]),int(sp[2]),int(sp[3])]\r\n index.append([int(sp[3]),int(sp[2]),int(sp[1])])\r\n\r\n return label, index", "def decoding(file_path, id2spo, logits_all, seq_len_all,\n tok_to_orig_start_index_all, tok_to_orig_end_index_all):\n example_all = []\n with open(file_path, \"r\", encoding=\"utf-8\") as fp:\n for line in fp:\n example_all.append(json.loads(line))\n\n formatted_outputs = []\n for (i, (example, logits, seq_len, tok_to_orig_start_index, tok_to_orig_end_index)) in \\\n enumerate(zip(example_all, logits_all, seq_len_all, tok_to_orig_start_index_all, tok_to_orig_end_index_all)):\n\n logits = logits[1:seq_len +\n 1] # slice between [CLS] and [SEP] to get valid logits\n logits[logits >= 0.5] = 1\n logits[logits < 0.5] = 0\n tok_to_orig_start_index = tok_to_orig_start_index[1:seq_len + 1]\n tok_to_orig_end_index = tok_to_orig_end_index[1:seq_len + 1]\n predictions = []\n for token in logits:\n predictions.append(np.argwhere(token == 1).tolist())\n\n # format predictions into example-style output\n formatted_instance = {}\n text_raw = example['text']\n complex_relation_label = [8, 10, 26, 32, 46]\n complex_relation_affi_label = [9, 11, 27, 28, 29, 33, 47]\n\n # flatten predictions then retrival all valid subject id\n flatten_predictions = []\n for layer_1 in predictions:\n for layer_2 in layer_1:\n flatten_predictions.append(layer_2[0])\n subject_id_list = []\n for cls_label in list(set(flatten_predictions)):\n if 1 < cls_label <= 56 and (cls_label + 55) in flatten_predictions:\n subject_id_list.append(cls_label)\n subject_id_list = list(set(subject_id_list))\n\n # fetch all valid spo by subject id\n spo_list = []\n for id_ in subject_id_list:\n if id_ in complex_relation_affi_label:\n continue # do this in the next \"else\" branch\n if id_ not in complex_relation_label:\n subjects = find_entity(text_raw, id_, predictions,\n tok_to_orig_start_index,\n tok_to_orig_end_index)\n objects = find_entity(text_raw, id_ + 55, predictions,\n tok_to_orig_start_index,\n tok_to_orig_end_index)\n for subject_ in subjects:\n for object_ in objects:\n spo_list.append({\n \"predicate\": id2spo['predicate'][id_],\n \"object_type\": {\n '@value': id2spo['object_type'][id_]\n },\n 'subject_type': id2spo['subject_type'][id_],\n \"object\": {\n '@value': object_\n },\n \"subject\": subject_\n })\n else:\n # traverse all complex relation and look through their corresponding affiliated objects\n subjects = find_entity(text_raw, id_, predictions,\n tok_to_orig_start_index,\n tok_to_orig_end_index)\n objects = find_entity(text_raw, id_ + 55, predictions,\n tok_to_orig_start_index,\n tok_to_orig_end_index)\n for subject_ in subjects:\n for object_ in objects:\n object_dict = {'@value': object_}\n object_type_dict = {\n '@value': id2spo['object_type'][id_].split('_')[0]\n }\n if id_ in [8, 10, 32, 46\n ] and id_ + 1 in subject_id_list:\n id_affi = id_ + 1\n object_dict[id2spo['object_type'][id_affi].split(\n '_')[1]] = find_entity(text_raw, id_affi + 55,\n predictions,\n tok_to_orig_start_index,\n tok_to_orig_end_index)[0]\n object_type_dict[id2spo['object_type'][\n id_affi].split('_')[1]] = id2spo['object_type'][\n id_affi].split('_')[0]\n elif id_ == 26:\n for id_affi in [27, 28, 29]:\n if id_affi in subject_id_list:\n object_dict[id2spo['object_type'][id_affi].split('_')[1]] = \\\n find_entity(text_raw, id_affi + 55, predictions, tok_to_orig_start_index, tok_to_orig_end_index)[0]\n object_type_dict[id2spo['object_type'][id_affi].split('_')[1]] = \\\n id2spo['object_type'][id_affi].split('_')[0]\n spo_list.append({\n \"predicate\": id2spo['predicate'][id_],\n \"object_type\": object_type_dict,\n \"subject_type\": id2spo['subject_type'][id_],\n \"object\": object_dict,\n \"subject\": subject_\n })\n\n formatted_instance['text'] = example['text']\n formatted_instance['spo_list'] = spo_list\n formatted_outputs.append(formatted_instance)\n return formatted_outputs", "def parse_fasta(data):\n name, seq = None, []\n for line in data:\n line = line.rstrip()\n if line.startswith('>'):\n if name:\n yield (name, ''.join(seq))\n name, seq = line, []\n else:\n seq.append(line)\n if name:\n yield (name, ''.join(seq))", "def _read_sequence_list(self, sequence_list):\n f = open(sequence_list).read().replace('\\t', ' ').replace(',', '')\n sequences = {}\n\n for line in f.split(\"\\r\\n\"):\n line = re.split(' +', line)\n sequences[line[0]] = []\n for clip_col in range(2, len(line)):\n clip_range = line[clip_col].split(\"-\")\n sequences[line[0]].append((int(clip_range[0]),int(clip_range[1])))\n return sequences", "def parse(self):\n\t\tfirst = None\n\t\tf = open(self.input_file)\n\t\tfor line in f.readlines():\n\t\t\tif line.startswith(\"#\"):\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tflow,t,sequence,size = line.split()\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\t# append data to a list of tuples\n\t\t\tflow = int(flow)\n\t\t\tt = float(t)\n\t\t\tsequence = int(sequence)\n\t\t\tif size == \"x\":\n\t\t\t\tcontinue\n\t\t\tsize = int(size)\n\t\t\tif not size == 0:\n\t\t\t\tif flow == 1:\n\t\t\t\t\tself.data1.append((t,sequence,size))\n\t\t\t\telif flow == 2:\n\t\t\t\t\tself.data2.append((t,sequence,size))\n\t\t\t\telif flow == 3:\n\t\t\t\t\tself.data3.append((t, sequence, size))\n\t\t\t\telif flow == 4:\n\t\t\t\t\tself.data4.append((t, sequence, size))\n\t\t\t\telif flow == 5:\n\t\t\t\t\tself.data5.append((t, sequence, size))\n\t\t\t\telse:\n\t\t\t\t\tprint \"Erroneous data: \",flow, t, sequence, size\n\t\t\t# Keep track of the minimum and maximum time seen\n\t\t\tif not self.min_time or t < self.min_time:\n\t\t\t\tself.min_time = t\n\t\t\tif not self.max_time or t > self.max_time:\n\t\t\t\tself.max_time = t\n\n\t\t\t# print len(self.data1),len(self.data2),len(self.data3),len(self.data4),len(self.data5)", "def load_labels(label_file):\n\n label = []\n proto_as_ascii_lines = tf.io.gfile.GFile(label_file).readlines()\n for l in proto_as_ascii_lines:\n label.append(l.rstrip())\n return label", "def parse_file(self, infile, chardict, labeldict):\n examples = []\n fin = io.open(infile, 'r')\n # idx is for the index of the row in the \n # original file before shuffling and randomization\n idx = 0\n for line in fin: \n entity, label = map(clean, line.rstrip().split('\\t')[:2])\n # print entity\n ent = map(lambda c:chardict[c], list(entity))\n lab = map(lambda l:labeldict[l] if l in labeldict else 0, label.split(','))\n examples.append((idx, ent, lab))\n idx += 1\n fin.close()\n print \"num_rows:\", len(examples), \" index\", idx\n return examples", "def read_data(filename, eos='#'):\n ### Exercise 6.1\n\n with open(filename) as f:\n utterances = []\n labels = []\n\n for line in f:\n # Get utterance output and length\n utter = line\n utter = utter.replace(\" \", \"\").replace(\"\\n\", \"\") + \"#\"\n utterances.append(utter)\n # Make empty sequence\n sequence = np.zeros(len(utter), dtype=int)\n sequence[0], sequence[len(utter) - 1] = 1, 1\n # Find indexes of beginning of words\n prev_char = \"\"\n count = 0\n new_word_indexs = []\n for char in line:\n if char == \" \":\n prev_char = char\n continue\n if prev_char == \" \":\n prev_char = char\n new_word_indexs.append(count)\n count += 1\n else:\n prev_char = char\n count += 1\n for index in new_word_indexs:\n sequence[index] = 1\n labels.append(sequence)\n\n return (utterances, labels)", "def load_data_multilabel(traning_data_path,vocab_word2index, vocab_label2index,sentence_len,training_portion=0.95):\n file_object = codecs.open(traning_data_path, mode='r', encoding='utf-8')\n lines = file_object.readlines()\n random.shuffle(lines)\n label_size=len(vocab_label2index)\n X = []\n Y = []\n for i,line in enumerate(lines):\n raw_list = line.strip().split(\"__label__\")\n input_list = raw_list[0].strip().split(\" \")\n input_list = [x.strip().replace(\" \", \"\") for x in input_list if x != '']\n x=[vocab_word2index.get(x,UNK_ID) for x in input_list]\n label_list = raw_list[1:]\n label_list=[l.strip().replace(\" \", \"\") for l in label_list if l != '']\n label_list=[vocab_label2index[label] for label in label_list]\n y=transform_multilabel_as_multihot(label_list,label_size)\n X.append(x)\n Y.append(y)\n if i<10:print(i,\"line:\",line)\n\n X = pad_sequences(X, maxlen=sentence_len, value=0.) # padding to max length\n number_examples = len(lines)\n training_number=int(training_portion* number_examples)\n train = (X[0:training_number], Y[0:training_number])\n valid_number=min(1000,number_examples-training_number)\n test = (X[training_number+ 1:training_number+valid_number+1], Y[training_number + 1:training_number+valid_number+1])\n return train,test", "def _load_seqs(file_format, filename, fmt, kw, parser_kw):\n fmt = fmt or file_format\n if not fmt:\n msg = \"could not determined file format, set using the format argument\"\n raise ValueError(msg)\n parser_kw = parser_kw or {}\n for other_kw in (\"constructor_kw\", \"kw\"):\n other_kw = kw.pop(other_kw, None) or {}\n kw.update(other_kw)\n return list(FromFilenameParser(filename, fmt, **parser_kw))", "def read_instructions_from_file(file):\n list_of_instructions = []\n with open(file) as sf:\n instructions = sf.readlines()\n for line in instructions:\n if 0 < len(line.split()) <= 3:\n # Read from file the required information\n try:\n fragment_pdb = line.split()[0]\n core_atom = line.split()[1]\n fragment_atom = line.split()[2]\n ID = \"{}{}{}\".format(os.path.splitext(fragment_pdb)[0], core_atom, fragment_atom)\n task = (fragment_pdb, core_atom, fragment_atom, ID)\n list_of_instructions.append(task)\n except IndexError:\n logger.critical(\"Check that the serie file {} contains: 'PDB_fragment_file'\\t'PDB_core_atom_name'\\t'PDB_fragment_atom_name' \".format(file))\n elif len(line.split()) > 3:\n growing_counter = len(line.split()) / 3\n successive_tasks = []\n for i in range(int(growing_counter)):\n try:\n fragment_pdb = line.split()[i * 3]\n core_atom = line.split()[(i * 3) + 1]\n if \"*\" in core_atom:\n # Get fragment number and remove part of string\n fragment_number = re.findall(r'[*]\\d[*]', core_atom)[0].strip(\"*\")\n core_atom = core_atom.replace(\"*{}*\".format(fragment_number), \"\")\n else:\n fragment_number = None\n fragment_atom = line.split()[(i * 3) + 2]\n ID = \"{}{}{}\".format(os.path.splitext(fragment_pdb)[0], core_atom, fragment_atom)\n task = (fragment_pdb, core_atom, fragment_atom, ID, fragment_number)\n successive_tasks.append(task)\n except IndexError:\n logger.critical(\"Check that the serie file {} contains: 'PDB_fragment_file'\\t'PDB_core_atom_name'\\t'PDB_fragment_atom_name' \".format(file))\n list_of_instructions.append(successive_tasks)\n return list_of_instructions", "def load_text_and_label(data_file):\n # load data from file\n\n # splite by word\n dfRaw = pd.read_csv(data_file)\n dfRec = dfRaw[['Review Text', 'Recommended IND']].dropna()\n pos_examples = dfRec[dfRec['Recommended IND'] == 1]['Review Text'].tolist()\n neg_examples = dfRec[dfRec['Recommended IND'] == 0]['Review Text'].tolist()\n\n x_text = pos_examples + neg_examples\n x_text = np.array([clean_str(sentence) for sentence in x_text])\n # generate label (y)\n pos_labels = [[0,1] for _ in pos_examples]\n neg_labels = [[1,0] for _ in neg_examples]\n y = np.array(pos_labels + neg_labels)\n return [x_text, y]", "def read_data(filename,label=None,preprocessor=space_tokenizer):\n df = pd.read_csv(filename)\n return [preprocessor(string) for string in df['sentences'].values]", "def readSeq(seqFile):\n line = seqFile.readline()\n seq1 = line.rstrip()\n line = seqFile.readline()\n seq2 = line.rstrip()\n return (seq1, seq2)", "def seqs_from_file(filename, exit_on_err=False, return_qual=False):\n # VALIDATE INPUT\n if not isinstance(filename, str):\n msg = 'Filename has to be a string.'\n if exit_on_err:\n sys.stderr.write('Error: %s\\n'%msg)\n sys.exit(1)\n else: raise IOError(msg)\n if not os.path.exists(filename):\n msg = 'File \"%s\" does not exist.'%filename\n if exit_on_err:\n sys.stderr.write('Error: %s\\n'%msg)\n sys.exit(1)\n else: raise IOError(msg)\n \n # EXTRACT DATA\n with open_(filename,\"rt\") as f:\n query_seq_segments = []\n seq, name, desc, qual = '', '', '', ''\n add_segment = query_seq_segments.append\n for l in f:\n if len(l.strip()) == 0: continue\n #sys.stderr.write(\"%s\\n\"%line)\n fields=l.strip().split()\n if l.startswith(\">\"):\n # FASTA HEADER FOUND\n if query_seq_segments != []:\n # YIELD SEQUENCE AND RESET\n seq = ''.join(query_seq_segments)\n yield (seq, name, desc)\n seq, name, desc = '', '', ''\n del query_seq_segments[:]\n name = fields[0][1:]\n desc = ' '.join(fields[1:])\n \n elif l.startswith(\"@\"):\n # FASTQ HEADER FOUND\n name = fields[0][1:]\n desc = ' '.join(fields[1:])\n try:\n # EXTRACT FASTQ SEQUENCE\n seq = next(f).strip().split()[0]\n # SKIP SECOND HEADER LINE AND QUALITY SCORES\n l = next(f)\n qual = next(f).strip() # Qualities\n except:\n break\n else:\n # YIELD SEQUENCE AND RESET\n if return_qual:\n yield (seq, qual, name, desc)\n else:\n yield (seq, name, desc)\n seq, name, desc, qual = '', '', '', ''\n \n elif len(fields[0])>0:\n # EXTRACT FASTA SEQUENCE\n add_segment(fields[0])\n \n # CHECK FOR LAST FASTA SEQUENCE\n if query_seq_segments != []:\n # YIELD SEQUENCE\n seq = ''.join(query_seq_segments)\n yield (seq, name, desc)", "def read_examples(input_file):\n examples = []\n unique_id = 0\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n \n line = line.strip()\n text_a = None\n text_b = None\n m = re.match(r\"^(.*) \\|\\|\\| (.*)$\", line)\n \n if m is None:\n text_a = line\n else:\n text_a = m.group(1)\n text_b = m.group(2)\n examples.append(InputExample(unique_id=unique_id,\n text_a=text_a, \n text_b=text_b))\n unique_id += 1\n return examples", "def _read(self, file_path: str) -> Iterator[Instance]:\n with open(file_path) as f:\n for line in f:\n pairs = line.split()\n words, tags = zip(*(pair.split(\"###\") for pair in pairs))\n yield self.text_to_instance([Token(word) for word in words], tags)", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n # label_map = {label : i for i, label in enumerate(label_list)}\n\n features = []\n exindex = {}\n passagelens = []\n\n sum_of_labels = 0\n\n for (ex_index, example) in tqdm(enumerate(examples), desc=\"Tokenizing:\"):\n if example.text_a not in tokenmap.keys():\n tokens_a = tokenizer.tokenize(example.text_a)\n tokenmap[example.text_a] = tokens_a\n else:\n tokens_a = tokenmap[example.text_a]\n\n tokens_b = None\n if example.text_b:\n if example.text_b not in tokenmap.keys():\n tokens_b = tokenizer.tokenize(example.text_b)\n tokenmap[example.text_b] = tokens_b\n else:\n tokens_b = tokenmap[example.text_b]\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n\n passagelens.append(len(tokens_a) + len(tokens_b) + 3)\n\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n # label_id = label_map[example.label]\n label_id = example.label\n\n sum_of_labels += label_id\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (str(example.label), 0))\n\n exindex[ex_index] = example.guid\n features.append(\n InputFeatures(uuid=ex_index,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n\n print(\"Passage Token Lengths Distribution\", passagelens[-1], np.percentile(passagelens, 50),\n np.percentile(passagelens, 90), np.percentile(passagelens, 95), np.percentile(passagelens, 99))\n return features, exindex", "def import_data(in_file):\n\n print '\\n\\tImport data'\n sentence = []\n concept = []\n sentences = []\n concepts = []\n for line in open(in_file, 'r'):\n if line != '\\n':\n sentence += [ line.split()[0] ]\n concept += [ line.split()[1] ]\n else:\n sentences += [ sentence ]\n concepts += [ concept ]\n sentence = [ ]\n concept = [ ]\n pos = []\n lemma = []\n poss = []\n lemmas = []\n for line in open(in_file.replace('.data', '.feats.txt'), 'r'):\n if line != '\\n':\n pos += [ line.split()[ 1 ] ]\n lemma += [ line.split()[ 2 ] ]\n else:\n poss += [ pos ]\n lemmas += [ lemma ]\n pos = [ ]\n lemma = [ ]\n print '\\t--done'\n return sentences, poss, lemmas, concepts", "def load_data_and_labels():\n # Load data from files\n positive_examples = []\n for file in os.listdir('with_datarace'):\n filename = os.fsdecode(file)\n ast_file = open('with_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n positive_examples.append(token_vector)\n file_names.append(filename)\n\n negative_examples = []\n for file in os.listdir('without_datarace\\\\'):\n filename = os.fsdecode(file)\n ast_file = open('without_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n negative_examples.append(token_vector) # List of lists\n file_names.append(filename)\n\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = [s.strip() for s in negative_examples]\n\n # Split by words\n x_text = positive_examples + negative_examples # why we didn't cobine it from the beginning?\n x_text = [clean_str(sent) for sent in x_text]\n x_text = [s.split(\" \") for s in x_text]\n\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n\n return [x_text, y]" ]
[ "0.67697734", "0.6637289", "0.65705514", "0.65653694", "0.6479006", "0.64705706", "0.64496464", "0.6351888", "0.6281873", "0.6273862", "0.62096", "0.6180452", "0.6150285", "0.6150257", "0.61439294", "0.612267", "0.6109716", "0.6100851", "0.60979813", "0.60872257", "0.60840577", "0.6075221", "0.6075019", "0.60727876", "0.6065708", "0.6061248", "0.6037546", "0.60311437", "0.5999951", "0.5987098", "0.5982444", "0.59824085", "0.59764224", "0.59704494", "0.5956626", "0.5934553", "0.59218186", "0.5921218", "0.5914034", "0.58930254", "0.5885361", "0.58786905", "0.5875389", "0.58729345", "0.58620673", "0.58611256", "0.5844712", "0.5835011", "0.5821796", "0.5821321", "0.5816276", "0.5809211", "0.5803001", "0.5793592", "0.5786031", "0.5784276", "0.5780121", "0.57772523", "0.5768225", "0.5765742", "0.57651335", "0.5764105", "0.57627517", "0.57627517", "0.5755722", "0.5751623", "0.57502407", "0.57461315", "0.5739474", "0.57363266", "0.57232696", "0.57224053", "0.57195306", "0.5705683", "0.57054925", "0.5696607", "0.5694241", "0.5684552", "0.56828994", "0.5667301", "0.56641597", "0.5649403", "0.56385607", "0.56354547", "0.5621132", "0.56196684", "0.5617375", "0.5612839", "0.5610272", "0.56075364", "0.56008697", "0.55911195", "0.55881125", "0.5586902", "0.5586154", "0.55807084", "0.5580572", "0.5572477", "0.55684704", "0.55681103" ]
0.6663826
1
Parametrize tests for pytest to use a
def pytest_generate_tests_for_pyopencl_array_context(metafunc) -> None: from warnings import warn warn("pytest_generate_tests_for_pyopencl_array_context is deprecated. " "Use 'pytest_generate_tests = " "arraycontext.pytest_generate_tests_for_array_contexts" "([\"pyopencl-deprecated\"])' instead. " "pytest_generate_tests_for_pyopencl_array_context will stop working " "in 2022.", DeprecationWarning, stacklevel=2) pytest_generate_tests_for_array_contexts([ "pyopencl-deprecated", ], factory_arg_name="actx_factory")(metafunc)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pytest_generate_tests(metafunc):\n for param in ['env', 'browser', 'logging_level', 'env_file', 'name', 'jenkins_url', 'slack', 'output', 'email_retries',\n 'email_search_errors']:\n option_value = getattr(metafunc.config.option, param)\n if param in metafunc.fixturenames:\n metafunc.parametrize(param, [option_value], scope='session')", "def pytest_generate_tests(metafunc):\n if \"retrospective\" in metafunc.fixturenames:\n metafunc.parametrize(\"retrospective\", [False, True])\n if \"test_type\" in metafunc.fixturenames:\n metafunc.parametrize(\"test_type\", [FILES_TEST, STATE_TEST])\n if \"raise_error\" in metafunc.fixturenames:\n metafunc.parametrize(\"raise_error\", [False, True])", "def test_by_variable():\n pass", "def pytest_generate_tests(metafunc):\n if \"size1\" in metafunc.fixturenames and \"size2\" in metafunc.fixturenames:\n metafunc.parametrize(\n [\"size1\", \"size2\"], itertools.product([1, 4], [2, 8]))\n if \"lines\" in metafunc.fixturenames:\n metafunc.parametrize(\"lines\", [[], [\"line1\"], [\"line1\", \"line2\"]])", "def pytest_can_run_together(item1, item2):", "def pytest_generate_tests(metafunc):\n from datastructures.tests._test_trees_data import \\\n ids, \\\n inputs, \\\n expected_list, \\\n expected_items_list, \\\n expected_tree, \\\n expected_items_tree, \\\n expected_len, \\\n expected_valid_BST, \\\n shuffled_inputs, \\\n is_equal\n\n if 'get_test_as_list_data' in metafunc.fixturenames:\n metafunc.parametrize('get_test_as_list_data',\n list(zip(inputs, expected_list)),\n ids=ids)\n\n if 'get_test_items_as_list_data' in metafunc.fixturenames:\n metafunc.parametrize('get_test_items_as_list_data',\n list(zip(inputs, expected_items_list)),\n ids=ids)\n\n if 'get_test_as_tree_data' in metafunc.fixturenames:\n metafunc.parametrize('get_test_as_tree_data',\n list(zip(inputs, expected_tree)),\n ids=ids)\n\n if 'get_test_items_as_tree_data' in metafunc.fixturenames:\n metafunc.parametrize('get_test_items_as_tree_data',\n list(zip(inputs, expected_items_tree)),\n ids=ids)\n\n if 'get_test_len_data' in metafunc.fixturenames:\n metafunc.parametrize('get_test_len_data',\n list(zip(inputs, expected_len)),\n ids=ids)\n\n if 'get_test_valid_BST_glassbox' in metafunc.fixturenames:\n metafunc.parametrize('get_test_valid_BST_glassbox',\n list(zip(inputs, expected_valid_BST)),\n ids=ids)\n\n if 'get_test_eq' in metafunc.fixturenames:\n metafunc.parametrize('get_test_eq',\n list(zip(inputs, shuffled_inputs, is_equal)),\n ids=ids)", "def test_T1():", "def test_T1():", "def pytest_generate_tests(metafunc):\n if \"maptype\" in metafunc.fixturenames:\n metafunc.parametrize(\"maptype\", ALL_ATTMAPS)", "def test_data_in_param(self):", "def pytest_generate_tests(metafunc):\n if \"worker_type\" in metafunc.fixturenames:\n test_params = [[\"thread\", 1, 1], [\"thread\", 2, 2]]\n # if the OS is not Windows / OS X and python version > 2.7 then also do the multiprocess workers testing.\n if platform.system() not in [\"Windows\", \"Darwin\"] and sys.version_info >= (\n 2,\n 7,\n ):\n test_params.extend([[\"process\", 1, 1], [\"process\", 2, 2]])\n\n metafunc.parametrize(\n \"worker_type, workers_count, worker_sessions_count\", test_params\n )", "def test_something():", "def setup_fixtures(func):\n func = pytest.mark.usefixtures('smtp', 'mock_access_request', 'dummy_access_request')(func)\n func = pytest.mark.parametrize('mock_access_request',\n [{\n 'during_registration': True,\n 'during_registration_required': True,\n 'personal_data': PERSONAL_DATA\n }],\n indirect=True)(func)\n return func", "def setup_fixtures(func):\n func = pytest.mark.usefixtures('smtp', 'mock_access_request', 'dummy_access_request')(func)\n func = pytest.mark.parametrize('mock_access_request',\n [{\n 'during_registration': True,\n 'during_registration_required': True,\n 'personal_data': PERSONAL_DATA\n }],\n indirect=True)(func)\n return func", "def pytest_generate_tests(self, metafunc):\n\n # function for pretty test name\n def id_func(x):\n return \"-\".join([f\"{k}={v}\" for k, v in x.items()])\n\n # get arguments for the test function\n funcarglist = metafunc.cls.params.get(metafunc.function.__name__, None)\n if funcarglist is None:\n return\n else:\n # equivalent of pytest.mark.parametrize applied on the metafunction\n metafunc.parametrize(\"fields\", funcarglist, ids=id_func)", "def tests():", "def pytest_generate_tests(metafunc):\n\n # test is setup or teardown - parametrize to all scenarios\n if metafunc.function.__name__ in [\"test_setup\", \"test_teardown\"]:\n metafunc.parametrize(\n \"scenario\", Scenario.scenarios.values())\n\n # parameterize test for each scenario it is included in\n else:\n metafunc.parametrize(\n \"scenario\", metafunc.cls._scenarios)", "def test_basic_execution(self):", "def pytest_generate_tests(metafunc):\n\t\n\tif not metafunc.cls:\n\t\treturn\n\t\n\tinst = metafunc.cls()\n\t\n\tif 'valid' in metafunc.fixturenames:\n\t\tmetafunc.parametrize('valid', inst.valid)\n\t\n\tif 'invalid' in metafunc.fixturenames:\n\t\tmetafunc.parametrize('invalid', inst.invalid)", "def test_T2():", "def test_T2():", "def spec_tests():\n pass", "def pytest_generate_tests_abstract(metafunc):\n if 'content' in metafunc.fixturenames:\n content = getattr(metafunc.function, '_content', None)\n if isinstance(content, list):\n metafunc.parametrize('content', [content])\n else:\n metafunc.parametrize('content', [[]])", "def inner_test(param: int):\n self.assertEqual(param, 42)", "def test_T01():", "def test_a():\n foo_do(4)\n foo_do(\"hello\")\n bar_do([1,2,3])", "def pytest_generate_tests(metafunc):\n def get_schema_name(schema_path):\n \"\"\"Helper function to return the informative part of a schema path\"\"\"\n print(schema_path)\n path = os.path.normpath(schema_path)\n return os.path.sep.join(path.split(os.path.sep)[-3:])\n\n def create_schema_example_id(argval):\n \"\"\"Helper function to create test ID for schema example validation\"\"\"\n if argval[0] == '/':\n # ID for the first argument is just the schema name\n return get_schema_name(argval)\n else:\n # This will cause pytest to create labels of the form:\n # SCHEMA_NAME-example\n # If there are multiple examples within a single schema, the\n # examples will be numbered automatically to distinguish them\n return \"example\"\n\n if metafunc.function is test_validate_schema:\n metafunc.parametrize(\n 'schema_path',\n generate_schema_list(),\n # just use the schema name as a test ID instead of full path\n ids=get_schema_name)\n elif metafunc.function is test_schema_example:\n metafunc.parametrize(\n 'filename,example',\n generate_example_schemas(),\n ids=create_schema_example_id)", "def inner_test(param: str):\n pass", "def test_1():", "def test_arguments(arg=TestClass): # [used-before-assignment]\n return arg", "def pytest_configure(config):\n config._metadata['Project Name'] = 'nop Commerce'\n config._metadata['Module Name'] = 'Customers'\n config._metadata['Tester'] = 'Tester'", "def parametrize_from(\n data: List[Tuple[str, str, Any]]\n) -> Callable[[Callable[..., Any]], Callable[..., Any]]:\n return mark.parametrize(\n [\"definition\", \"expected\"],\n [param(definition, expected, id=key) for key, definition, expected in data],\n )", "def test_dummy():", "def pytest_generate_tests(metafunc):\n if (\"solver\" in metafunc.fixturenames\n and \"coefficients\" in metafunc.fixturenames):\n _parametrize_solver_coefficients(metafunc)", "def parametrize(testcase_class, param=None):\n test_loader = unittest.TestLoader()\n test_names = test_loader.getTestCaseNames(testcase_class)\n suite = unittest.TestSuite()\n for name in test_names:\n suite.addTest(testcase_class(name, param))\n return suite", "def parametrize(testcase_class, param=None):\n testloader = unittest.TestLoader()\n testnames = testloader.getTestCaseNames(testcase_class)\n suite = unittest.TestSuite()\n for name in testnames:\n suite.addTest(testcase_class(name, param=param))\n return suite", "def test_unnamed_parameterized_testcase(self):\n\n class ParameterizedTest(parameterized.TestCase):\n\n @parameterized.parameters(('a (b.c)',))\n def test_prefix(self, case):\n self.assertTrue(case.startswith('a'))\n\n start_time = 1000\n end_time = 1200\n result = self._make_result((start_time, start_time, end_time, end_time))\n test = ParameterizedTest(methodName='test_prefix0')\n result.startTestRun()\n result.startTest(test)\n result.addSuccess(test)\n result.stopTest(test)\n result.stopTestRun()\n result.printErrors()\n\n start_time_str = re.escape(self._iso_timestamp(start_time))\n run_time = end_time - start_time\n classname = xml_reporter._escape_xml_attr(\n unittest.util.strclass(test.__class__))\n expected_re = OUTPUT_STRING % {\n 'suite_name': 'ParameterizedTest',\n 'tests': 1,\n 'failures': 0,\n 'errors': 0,\n 'run_time': run_time,\n 'start_time': start_time_str,\n }\n expected_testcase_re = TESTCASE_STRING % {\n 'run_time': run_time,\n 'start_time': start_time_str,\n 'test_name': re.escape('test_prefix0&#x20;(&apos;a&#x20;(b.c)&apos;)'),\n 'classname': classname,\n 'status': 'run',\n 'result': 'completed',\n 'attributes': '',\n 'message': ''\n }\n (testcase,) = self._assert_match(expected_re, self.xml_stream.getvalue(),\n re.DOTALL)\n self._assert_match(expected_testcase_re, testcase)", "def test_T3():", "def test_T3():", "def test_test_case_name_step(self, pytester: pytest.Pytester, adaptavist_mock: AdaptavistMock):\n pytester.makepyfile(\n \"\"\"\n import pytest\n\n def test_TEST_T123_1():\n assert True\n \"\"\"\n )\n _, _, etss = adaptavist_mock\n pytester.runpytest(\"--adaptavist\")\n etss.assert_called_once_with(\n test_run_key=\"TEST-C1\",\n test_case_key=\"TEST-T123\",\n step=1,\n status=\"Pass\",\n comment=\"\",\n environment=None,\n executor=getpass.getuser().lower(),\n assignee=getpass.getuser().lower(),\n )", "def test_let(self):", "def pytest_configure(config):\n config.addinivalue_line(\n \"markers\",\n \"serial: Tests that will not execute with more than 1 MPI process\")\n config.addinivalue_line(\"markers\",\n \"gpu: Tests that should only run on the gpu.\")\n config.addinivalue_line(\n \"markers\",\n \"cupy_optional: tests that should pass with and without CuPy.\")\n config.addinivalue_line(\"markers\", \"cpu: Tests that only run on the CPU.\")\n config.addinivalue_line(\"markers\", \"gpu: Tests that only run on the GPU.\")", "def inner_test(param: int = 5):\n self.assertEqual(param, 5)", "def pytest_generate_tests(metafunc):\n testcases_file = metafunc.config._nrfu['testcase_dir'].joinpath(\n 'testcases-cabling.json')\n\n metafunc.parametrize('testcase',\n json.load(testcases_file.open()),\n ids=nrfu.name_test)", "def InjectTests(cls):\n param_test_names = [\n name for name in dir(cls) if name.startswith(\"ParamTest\")\n ]\n VERSIONS = (4, 6)\n TYPES = (SOCK_DGRAM, SOCK_STREAM)\n\n # Tests all combinations of auth & crypt. Mutually exclusive with aead.\n for crypt, auth, version, proto, name in itertools.product(\n CRYPT_ALGOS, AUTH_ALGOS, VERSIONS, TYPES, param_test_names):\n XfrmAlgorithmTest.InjectSingleTest(name, version, proto, crypt=crypt, auth=auth)\n\n # Tests all combinations of aead. Mutually exclusive with auth/crypt.\n for aead, version, proto, name in itertools.product(\n AEAD_ALGOS, VERSIONS, TYPES, param_test_names):\n XfrmAlgorithmTest.InjectSingleTest(name, version, proto, aead=aead)", "def test_test_group_parameters(self):\n pass", "def case(*ids):\n return pytest.mark.testrail(ids=ids)", "def inner_test(param: int):\n self.assertEqual(param, 15)", "def unitary_test():", "def pytest_generate_tests(metafunc):\n if \"expected_failure\" in metafunc.fixturenames:\n modpath = os.path.dirname(metafunc.module.__file__)\n pattern = os.path.join(modpath, \"RST???\", \"*.py\")\n metafunc.parametrize(\n \"expected_failure\",\n [os.path.relpath(p, modpath) for p in sorted(glob.glob(pattern))],\n )", "def test_3():", "def test_alien_data(self):", "def test_2():", "def setUp(self):\n data_types.Job(\n name='job1', environment_string=JOB1_ENVIRONMENT,\n platform='linux').put()\n\n data_types.Job(\n name='job2', environment_string=JOB2_ENVIRONMENT,\n platform='linux').put()\n\n data_types.Job(\n name='job3', environment_string=JOB3_ENVIRONMENT,\n platform='linux').put()\n\n data_types.Job(\n name='job4', environment_string=JOB4_ENVIRONMENT,\n platform='linux').put()\n\n testcase_args1 = {\n 'bug_information': '300',\n }\n\n testcase_args2 = {\n 'bug_information': '300',\n 'github_repo_id': GITHUB_REPO_ID,\n 'github_issue_num': GITHUB_ISSUE_NUM,\n }\n\n self.testcase1 = data_types.Testcase(job_type='job1', **testcase_args1)\n self.testcase1.put()\n\n self.testcase2 = data_types.Testcase(job_type='job2', **testcase_args1)\n self.testcase2.put()\n\n self.testcase3 = data_types.Testcase(job_type='job3', **testcase_args1)\n self.testcase3.put()\n\n self.testcase4 = data_types.Testcase(job_type='job1', **testcase_args2)\n self.testcase4.put()\n\n self.testcase5 = data_types.Testcase(job_type='job4', **testcase_args1)\n self.testcase5.put()\n\n test_helpers.patch(self, [\n 'clusterfuzz._internal.config.db_config.get_value',\n ])\n self.mock.get_value.return_value = GITHUB_ACCESS_TOKEN", "def testApi(self):", "def initTest(self, myargs):\n return", "def pytest(context):\n exec_cmd = \"pytest\"\n run_cmd(context, exec_cmd)", "def pytest_generate_tests(metafunc):\n # Launch EC2 mocking and env preparation\n mock_sqs.start()\n mock_sqs.create_env(queues, region)\n\n account = Account(region=region)\n\n checker = SQSPolicyChecker(account)\n checker.check()\n\n for sqs_queue in checker.queues:\n sqs_queue.restrict_policy()\n\n checker_remediated = SQSPolicyChecker(account)\n checker_remediated.check()\n\n sqs_queues = [(queue, False) for queue in checker.queues]\n sqs_queues += [(queue, True) for queue in checker_remediated.queues]\n\n # create test cases for each response\n metafunc.parametrize(\"queue,remediated\", sqs_queues, ids=ident_test)", "def pytest_generate_tests(metafunc):\n parent_conftest.pytest_generate_tests(metafunc, __file__)", "def test_decorated(*args):\n for i in args:\n yield i", "def test_get_scenarios(self):\n pass", "def inner_test(param: str):\n self.assertEqual(param, 'Test string.')", "def inner_test(param: int = 14):\n self.assertEqual(param, 14)", "def inner_test(param: dict):\n pass", "def test(self):\n for arch, python in self.python:\n self.run(f\"{python} -m pytest\")", "def parametrize(testcase_klass, param=None):\n\n testloader = unittest.TestLoader()\n testnames = testloader.getTestCaseNames(testcase_klass)\n suite = unittest.TestSuite()\n\n for name in testnames:\n suite.addTest(testcase_klass(name, param=param))\n return suite", "def test_generate_all_testing(self):\n pass", "def parametrize(testcase_klass, param=None):\n testloader = unittest.TestLoader()\n testnames = testloader.getTestCaseNames(testcase_klass)\n suite = unittest.TestSuite()\n for name in testnames:\n suite.addTest(testcase_klass(name, param=param))\n return suite", "def parametrize(testcase_klass, param=None):\n testloader = unittest.TestLoader()\n testnames = testloader.getTestCaseNames(testcase_klass)\n suite = unittest.TestSuite()\n for name in testnames:\n suite.addTest(testcase_klass(name, param=param))\n return suite", "def parametrize(testcase_klass, param=None):\n testloader = unittest.TestLoader()\n testnames = testloader.getTestCaseNames(testcase_klass)\n suite = unittest.TestSuite()\n for name in testnames:\n suite.addTest(testcase_klass(name, param=param))\n return suite", "def test_vargs(self):", "def test_main():\n # Setup\n # Exercise\n # Verify", "def inner_test(param: int):\n pass", "def inner_test(param: int):\n pass", "def inner_test(param: int):\n pass", "def inner_test(param: dict):\n self.assertEqual(param, {'foo': 1, 'bar': ['bat', 2]})", "def test_5():", "def test_pytest():\n assert True", "def test_pytest():\n assert True", "def test_get_context_parameter(params, expected):\n assert get_context_parameter(params) == expected", "def test_single_test_case():\n pass", "def test_T4():", "def test_T4():", "def setUp(self):\n\n if self.id().split('.')[-1] == 'test_singleLine':\n self.testParams = [\n {'csvfile': 'F',\n 'expectedLinkograph': [({'F'},set(),set())],\n 'expectedLabels': ['F']},\n {'csvfile': 'F ',\n 'expectedLinkograph': [({'F'},set(),set())],\n 'expectedLabels': ['F']}]\n\n elif self.id().split('.')[-1] == 'test_twoLines':\n self.testParams = [\n {'csvfile': 'F,\\nBs',\n 'expectedLinkograph': [({'F'},set(),set()),\n ({'Bs'}, set(), set())],\n 'expectedLabels': ['Bs', 'F']},\n {'csvfile': 'F,1\\nBs',\n 'expectedLinkograph': [({'F'},set(),{1}),\n ({'Bs'}, {0}, set())],\n 'expectedLabels': ['Bs', 'F']}]\n\n elif self.id().split('.')[-1] == 'test_threeLines':\n self.testParams = [\n {'csvfile': 'F\\nBs,2\\nBe',\n 'expectedLinkograph': [({'F'},set(),set()),\n ({'Bs'},set(),{2}),\n ({'Be'},{1},set())],\n 'expectedLabels': ['Be', 'Bs', 'F']},\n {'csvfile': 'F,1,2\\nBs,2\\nBe',\n 'expectedLinkograph': [({'F'},set(),{1,2}),\n ({'Bs'}, {0}, {2}),\n ({'Be'}, {0,1}, set())],\n 'expectedLabels': ['Be', 'Bs', 'F']}]", "def pytest_generate_tests(metafunc):\n if 'browser' in metafunc.fixturenames:\n if os.environ.get('E2E', 'no').lower() != 'yes':\n pytest.skip(\n \"End-to-end tests skipped because E2E environment variable \"\n \"was not set to 'yes'.\")\n\n # Parameterize test based on list of browsers.\n browsers = os.environ.get('E2E_WEBDRIVER_BROWSERS', 'Chrome').split()\n metafunc.parametrize('browser', browsers, indirect=True)", "def pytest_generate_tests(metafunc):\n if not \"bpftrace_condition\" in metafunc.fixturenames:\n raise RuntimeError(\"Invalid test case.\")\n spec_file = metafunc.config.option.spec_file\n spec_dir = os.path.dirname(os.path.realpath(spec_file.name))\n spec = json.load(spec_file)\n conditions = []\n # Generate a list of conditions to evaluate\n for test_case in spec['cases']:\n bpftrace_vars = collect_test_results(test_case, spec_dir)\n for condition in test_case['conditions']:\n conditions.append((test_case['name'], condition, bpftrace_vars))\n\n # Parameterize the conditions so that the test function gets run for each condition\n # We also set the ids of the functions to be \"name: condition\" for better reporting\n metafunc.parametrize(\"bpftrace_condition\", conditions, ids=map(\n lambda c: f\"{c[0]}: {c[1]}\", conditions))", "def test():\n\t\treturn [\"vice.core.objects.tests\",\n\t\t\t[\n\t\t\t\tagb.test_agb_grid_constructor(),\n\t\t\t\tagb.test_agb_grid_destructor(),\n\t\t\t\tcallback_1arg.test_callback_1arg_constructor(),\n\t\t\t\tcallback_1arg.test_callback_1arg_destructor(),\n\t\t\t\tcallback_2arg.test_callback_2arg_constructor(),\n\t\t\t\tcallback_2arg.test_callback_2arg_destructor(),\n\t\t\t\tccsne.test_ccsne_yield_specs_constructor(),\n\t\t\t\tccsne.test_ccsne_yield_specs_destructor(),\n\t\t\t\tchannel.test_channel_constructor(),\n\t\t\t\tchannel.test_channel_destructor(),\n\t\t\t\telement.test_element_constructor(),\n\t\t\t\telement.test_element_destructor(),\n\t\t\t\tfromfile.test_fromfile_constructor(),\n\t\t\t\tfromfile.test_fromfile_destructor(),\n\t\t\t\thydrodiskstars.test_hydrodiskstars_constructor(),\n\t\t\t\thydrodiskstars.test_hydrodiskstars_destructor(),\n\t\t\t\timf.test_imf_constructor(),\n\t\t\t\timf.test_imf_destructor(),\n\t\t\t\tintegral.test_integral_constructor(),\n\t\t\t\tintegral.test_integral_destructor(),\n\t\t\t\tinterp_scheme_1d.test_interp_scheme_1d_constructor(),\n\t\t\t\tinterp_scheme_1d.test_interp_scheme_1d_destructor(),\n\t\t\t\tinterp_scheme_2d.test_interp_scheme_2d_constructor(),\n\t\t\t\tinterp_scheme_2d.test_interp_scheme_2d_destructor(),\n\t\t\t\tism.test_ism_constructor(),\n\t\t\t\tism.test_ism_destructor(),\n\t\t\t\tmdf.test_mdf_constructor(),\n\t\t\t\tmdf.test_mdf_destructor(),\n\t\t\t\tmigration.test_migration_constructor(),\n\t\t\t\tmigration.test_migration_destructor(),\n\t\t\t\tmultizone.test_multizone_constructor(),\n\t\t\t\tmultizone.test_multizone_destructor(),\n\t\t\t\tsinglezone.test_singlezone_constructor(),\n\t\t\t\tsinglezone.test_singlezone_destructor(),\n\t\t\t\tsneia.test_sneia_yield_specs_constructor(),\n\t\t\t\tsneia.test_sneia_yield_specs_destructor(),\n\t\t\t\tssp.test_ssp_constructor(),\n\t\t\t\tssp.test_ssp_destructor(),\n\t\t\t\ttracer.test_tracer_constructor(),\n\t\t\t\ttracer.test_tracer_destructor()\n\t\t\t]\n\t\t]", "def test_require():", "def test_method(self):", "def test_compare(self):", "def setUp(self):\n\n if self.id().split('.')[-1] == 'test_checkLinkoStructure':\n self.testParams = [\n {'linko':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1, 2}),\n ({'L0'}, set(), {2}),\n ({'L0'}, {0,1}, set())]),\n 'labels': False,\n 'expectedResult': False,\n 'expectedErrors':\n {1: ({0}, set())\n }\n },\n {'linko':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1, 2}),\n ({'L0'}, {0}, set()),\n ({'L0'}, {0,1}, set())]),\n 'labels': False,\n 'expectedResult': False,\n 'expectedErrors':\n {1: (set(), {2})\n }\n },\n {'linko':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1, 2}),\n ({'L0'}, {0}, {2}),\n ({'L0'}, {0,1}, set())]),\n 'labels': False,\n 'expectedResult': True,\n 'expectedErrors': {}\n },\n {'linko':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1, 2, 5}),\n ({'L0'}, {0}, {2}),\n ({'L0'}, {0,1}, set())]),\n 'labels': False,\n 'expectedResult': False,\n 'expectedErrors':\n {\n 'missing': {5},\n 5: ({0}, set())\n }\n },\n ]", "def test(self, func):\r\n @wraps(func)\r\n def wrapper():\r\n with nested(self._contexts) as context:\r\n context = [c for c in context if c is not None]\r\n argc = len(inspect.getargspec(func)[0])\r\n args = []\r\n for arg in context:\r\n if type(arg) is tuple: # type() is intentional\r\n args.extend(arg)\r\n else:\r\n args.append(arg)\r\n func(*args[:argc])\r\n wrapper.__wrapped__ = func\r\n self._tests.append(wrapper)\r\n if self.replace_tests:\r\n return wrapper\r\n return func", "def test_run_all_searches(): # ***Incomplete test\n ##########################\n # Arrange.\n query_file_list = \"query_file_list\"\n\n ##########################\n # Act.\n #x = run_all_searches(query_file_list)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.", "def test_function(arg_1):\n return arg_1 * 2", "def test_argparser():\n for template in templates:\n argv = [template] + list(example_xyz_files)\n\n parser = argparser()\n parser.parse_args(argv)", "def pytest_configure(config):\n config.addinivalue_line(\"markers\", \"format_sql: mark format_sql tests.\")", "def test_filter():\n with pytest.raises(TypeError):\n Filter(description=\"some description\")", "def parameterized_test_case(cls):\n tests_to_remove = []\n tests_to_add = []\n for key, val in vars(cls).items():\n # Only process tests with build data on them\n if key.startswith('test_') and val.__dict__.get('build_data'):\n to_remove, to_add = process_parameterized_function(\n name=key,\n func_obj=val,\n build_data=val.__dict__.get('build_data')\n )\n tests_to_remove.extend(to_remove)\n tests_to_add.extend(to_add)\n\n # Add all new test functions\n [setattr(cls, name, func) for name, func in tests_to_add]\n\n # Remove all old test function templates (if they still exist)\n [delattr(cls, key) for key in tests_to_remove if hasattr(cls, key)]\n return cls", "def setUp(self):\n self.myfuncdesc = \"casper.demo.module.a_function_to_wrap\"\n self.mycloth = \"casper.demo.module.clothing\"\n self.mypipedesc = \"casper.demo.pipeline.xml\"\n self.myclothingdesc = \"casper.demo.clothing_pipeline.xml\"\n self.mypipexmldesc = \"casper.demo.xml_pipeline.xml\"\n self.mypyramiddesc = \"casper.demo.pyramid_pipeline.xml\"\n self.myswitchdesc = \"casper.demo.switch_pipeline.xml\"\n self.myiterativedesc = \"casper.demo.iterative_pipeline.xml\"\n self.myfile = os.path.abspath(__file__)\n self.mydir = os.path.dirname(self.myfile)", "def test_method(self, test, another_test, _): # noqa: D213, D407", "def test(func):\n register_tests(func, [func.__name__])" ]
[ "0.69219553", "0.68522805", "0.68168706", "0.6790184", "0.6683413", "0.6502182", "0.6483538", "0.6483538", "0.64723897", "0.6470273", "0.6441867", "0.63857716", "0.63678086", "0.63678086", "0.6332544", "0.62898874", "0.6277179", "0.62686515", "0.62585187", "0.62364167", "0.62364167", "0.6201438", "0.6188454", "0.6177081", "0.6124225", "0.61099887", "0.61097705", "0.60964096", "0.6085973", "0.6077167", "0.6062619", "0.60607344", "0.6039531", "0.60341096", "0.6032715", "0.6029468", "0.6017291", "0.6013932", "0.6013932", "0.59870505", "0.59858936", "0.5971755", "0.59643227", "0.59159714", "0.59086335", "0.5897647", "0.5887831", "0.5867182", "0.58449006", "0.58404595", "0.5839963", "0.5839215", "0.58388036", "0.5837982", "0.5836444", "0.58329946", "0.5832656", "0.5832248", "0.58203053", "0.5819221", "0.5806274", "0.5802379", "0.57907224", "0.5779849", "0.57696193", "0.5769267", "0.57639015", "0.5743002", "0.5743002", "0.5743002", "0.57407844", "0.57217884", "0.57195204", "0.57195204", "0.57195204", "0.57055074", "0.56994545", "0.5698815", "0.5698815", "0.5697992", "0.56719065", "0.56546223", "0.56546223", "0.5652865", "0.56525695", "0.5647086", "0.5640815", "0.56369954", "0.56357735", "0.5634455", "0.56242764", "0.56118566", "0.5609555", "0.55925035", "0.5587494", "0.5586308", "0.55848753", "0.55798614", "0.55780154", "0.55779207", "0.5566047" ]
0.0
-1