message
stringlengths
13
484
diff
stringlengths
38
4.63k
Update CNN.py added , map_location=torch.device('cpu')
@@ -61,7 +61,7 @@ class CNN(nn.Module): with open(os.path.join(input_path, 'cnn_config.json'), 'r') as fIn: config = json.load(fIn) - weights = torch.load(os.path.join(input_path, 'pytorch_model.bin')) + weights = torch.load(os.path.join(input_path, 'pytorch_model.bin'), map_location=torch.device('cpu')) model = CNN(**config) model.load_state_dict(weights) return model
Fix test_clients_monasca failure 'cafile','certfile','keyfile' and 'insecure' need mock override. Closes-Bug:
@@ -65,6 +65,10 @@ class TestClients(base.TestCase): expected = {'username': 'foousername', 'password': 'foopassword', 'auth_url': 'http://server.ip:35357', + 'cafile': None, + 'certfile': None, + 'keyfile': None, + 'insecure': False, 'user_domain_id': 'foouserdomainid', 'project_domain_id': 'fooprojdomainid'}
docs: Use term operation instead of openapi in generate_curl_example. The term operation makes more sense instead of openapi. OpenAPI specs defines a unique operation as a combination of a path and a HTTP method.
@@ -158,17 +158,18 @@ def generate_curl_example(endpoint: str, method: str, raise AssertionError("exclude and include cannot be set at the same time.") lines = ["```curl"] - openapi_entry = openapi_spec.spec()['paths'][endpoint][method.lower()] - openapi_params = openapi_entry.get("parameters", []) - openapi_request_body = openapi_entry.get("requestBody", None) + operation = endpoint + ":" + method.lower() + operation_entry = openapi_spec.spec()['paths'][endpoint][method.lower()] + operation_params = operation_entry.get("parameters", []) + operation_request_body = operation_entry.get("requestBody", None) if settings.RUNNING_OPENAPI_CURL_TEST: # nocoverage from zerver.openapi.curl_param_value_generators import patch_openapi_example_values - openapi_params, openapi_request_body = patch_openapi_example_values(endpoint + ":" + method.lower(), - openapi_params, openapi_request_body) + operation_params, operation_request_body = patch_openapi_example_values(operation, operation_params, + operation_request_body) format_dict = {} - for param in openapi_params: + for param in operation_params: if param["in"] != "path": continue example_value = get_openapi_param_example_value_as_string(endpoint, method, param) @@ -179,11 +180,11 @@ def generate_curl_example(endpoint: str, method: str, api_url) lines.append(" ".join(curl_first_line_parts)) - authentication_required = openapi_entry.get("security", False) + authentication_required = operation_entry.get("security", False) if authentication_required: lines.append(" -u %s:%s" % (auth_email, auth_api_key)) - for param in openapi_params: + for param in operation_params: if param["in"] == "path": continue param_name = param["name"] @@ -198,8 +199,8 @@ def generate_curl_example(endpoint: str, method: str, curl_argument=True) lines.append(example_value) - if "requestBody" in openapi_entry: - properties = openapi_entry["requestBody"]["content"]["multipart/form-data"]["schema"]["properties"] + if "requestBody" in operation_entry: + properties = operation_entry["requestBody"]["content"]["multipart/form-data"]["schema"]["properties"] for key, property in properties.items(): lines.append(' -F "{}=@{}"'.format(key, property["example"]))
Pin torchvision version. Summary: Pull Request resolved: ghimport-source-id:
@@ -146,6 +146,7 @@ test_torchvision() { # PyTorch CI git clone https://github.com/pytorch/vision --quiet pushd vision + git checkout 2f64dd90e14fe5463b4e5bd152d56e4a6f0419de # python setup.py install with a tqdm dependency is broken in the # Travis Python nightly (but not in latest Python nightlies, so # this should be a transient requirement...)
Update generic.txt > ```cobaltstrike-1.txt```
@@ -10809,12 +10809,6 @@ http://151.80.220.125 tennysondonehue.com -# Reference: https://www.virustotal.com/gui/ip-address/104.207.140.218/relations -# Reference: https://www.virustotal.com/gui/file/0906273884fdd14dfc89eea5c252fd46d5fcd000692e4af7e258048b5588b4d0/detection - -us-system3.com -us-system89.com - # Reference: https://twitter.com/FewAtoms/status/1326222282075811840 hechiceriadeamoryprosperidadisrael.com/imagenes/amarres/
Allow developers to choose attachments * Allow developers to choose attachments This change allows developers to choose attachments, even if there is a cur_frm object. * Update communication.js * Merge this.attachments and form attachments * fix codacy
@@ -354,11 +354,14 @@ frappe.views.CommunicationComposer = Class.extend({ var fields = this.dialog.fields_dict; var attach = $(fields.select_attachments.wrapper).find(".attach-list").empty(); + var files = []; + if (this.attachments && this.attachments.length) { + files = files.concat(this.attachments); + } if (cur_frm) { - var files = cur_frm.get_files(); - }else { - var files = this.attachments + files = files.concat(cur_frm.get_files()); } + if(files.length) { $.each(files, function(i, f) { if (!f.file_name) return;
Update gcloud.sh Fix $1: unbound variable Removed while loop on empty $1
set -euo pipefail +SETTING=${1:-""} DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" # Configure python path @@ -47,8 +48,7 @@ function main { popd } -while [ "$1" != "" ]; do - case $1 in +case $SETTING in --teardown | --revert ) shift teardown "$@" @@ -59,4 +59,3 @@ while [ "$1" != "" ]; do exit 0 ;; esac -done
User Guide: don't link to tigris any more [ci skip] Troubleshooting section had a remaining link to tigris.org. Now points to scons website - not to github, because we don't encourage directly filing a bug before first discussing.
odds are pretty good that someone else will run into the same problem, too. If so, please let the SCons development team know - (preferably by filing a bug report - or feature request at our project pages at tigris.org) + using the contact information at + <ulink url="https://scons.org/contact.html"/> so that we can use your feedback to try to come up with a better way to help you, and others, get the necessary insight into &SCons; behavior
Update DiyServo.py Updated so that pins are set before attaching
@@ -19,8 +19,8 @@ arduino.connect(port) # Start the MotorDualPwm. You can use also use a different type of Motor motor = Runtime.start("diyservo.motor","MotorDualPwm") # Tell the motor to attach to the Arduino and what pins to use -motor.attach(arduino) motor.setPwmPins(10,11) +motor.attach(arduino) # Start the DiyServo servo = Runtime.start("diyservo","DiyServo") servo.attach(arduino,A0) # Attach the analog pin 0
dep-update: bump deployment.updated_at Apparently this doesn't happen automatically!
# * limitations under the License. import uuid +from datetime import datetime from flask import request from flask_restful_swagger import swagger @@ -284,6 +285,8 @@ class DeploymentUpdateId(SecuredResource): if params.get('node_instances'): dep_upd.deployment_update_node_instances = \ params['node_instances'] + if dep_upd.state == STATES.SUCCESSFUL and not dep_upd.preview: + dep_upd.deployment.updated_at = datetime.utcnow() return dep_upd
Fix flush sync wrappers Call completion function in case of allocation error
@@ -245,6 +245,13 @@ static void _cache_mngt_cache_flush_complete(ocf_cache_t cache, void *priv, kfree(context); } +/* + * Possible return values: + * 0 - completion was called and operation succeded + * -KCAS_ERR_WAITING_INTERRUPTED - operation was canceled, caller must + * propagate error, completion will be called asynchronously + * other values - completion was called and operation failed + */ static int _cache_mngt_cache_flush_sync(ocf_cache_t cache, bool interruption, void (*compl)(ocf_cache_t cache)) { @@ -253,8 +260,11 @@ static int _cache_mngt_cache_flush_sync(ocf_cache_t cache, bool interruption, struct cache_priv *cache_priv = ocf_cache_get_priv(cache); context = kmalloc(sizeof(*context), GFP_KERNEL); - if (!context) + if (!context) { + if (compl) + compl(cache); return -ENOMEM; + } _cache_mngt_async_context_init(context); context->compl_func = compl; @@ -291,6 +301,13 @@ static void _cache_mngt_core_flush_complete(ocf_core_t core, void *priv, kfree(context); } +/* + * Possible return values: + * 0 - completion was called and operation succeded + * -KCAS_ERR_WAITING_INTERRUPTED - operation was canceled, caller must + * propagate error, completion will be called asynchronously + * other values - completion was called and operation failed + */ static int _cache_mngt_core_flush_sync(ocf_core_t core, bool interruption, void (*compl)(ocf_cache_t cache)) { @@ -300,8 +317,11 @@ static int _cache_mngt_core_flush_sync(ocf_core_t core, bool interruption, struct cache_priv *cache_priv = ocf_cache_get_priv(cache); context = kmalloc(sizeof(*context), GFP_KERNEL); - if (!context) + if (!context) { + if (compl) + compl(cache); return -ENOMEM; + } _cache_mngt_async_context_init(context); context->compl_func = compl;
Removed paired optional ATAC-seq natively supports only paired-end sequencing.
@@ -39,7 +39,6 @@ onstart: if "verbose" in config and config["verbose"]: print("--- Workflow parameters --------------------------------------------------------") print("samples:", samples) - print("paired:", paired) print("ATAC fragment cutoff: ", atac_fragment_cutoff) print("-" * 80, "\n")
Adalog: fix a memory leak in N_Propagate equations TN:
@@ -1837,6 +1837,7 @@ package body Langkit_Support.Adalog.Solver is Free (Self.Conv); when N_Propagate => + Self.Comb_Vars.Destroy; Destroy (Self.Comb.all); Free (Self.Comb);
Move helper functions to unnamed namespace. Currently, the helper functions in this file are in global namespace. I am guessing the purpose of excluding them from was to keep them local.
using namespace nom; +namespace { + std::map<std::string, caffe2::Argument> getArgumentsFromOperator(caffe2::OperatorDef op) { std::map<std::string, caffe2::Argument> argMap; @@ -83,6 +85,8 @@ std::vector<int> getDilations(std::map<std::string, caffe2::Argument> argMap) { return dilations; } +} // namespace + namespace caffe2 { std::unique_ptr<repr::NeuralNetOperator>
Remove use of single letter variable A single letter variable name of 'f' causes pylint to throw a coding style convention warning: C0103: Variable name "f" doesn't conform to snake_case naming style (invalid-name)
@@ -179,9 +179,9 @@ class Metadata(): The file cannot be written. """ - with tempfile.TemporaryFile() as f: - f.write(self.to_json(compact).encode('utf-8')) - persist_temp_file(f, filename, storage_backend) + with tempfile.TemporaryFile() as temp_file: + temp_file.write(self.to_json(compact).encode('utf-8')) + persist_temp_file(temp_file, filename, storage_backend) # Signatures.
make rebuildstaging --deploy work if you have commcare-cloud on the path
@@ -67,8 +67,11 @@ fi if [[ $deploy = 'y' && $no_push != 'y' ]] then - rebuildstaging $args && \ - echo 'rebuildstaging will no longer deploy for you. From commcarehq-ansible, run `fab staging deploy`' + rebuildstaging $args && { + which commcare-cloud \ + && commcare-cloud staging fab deploy \ + || echo 'Could not auto-deploy for you. Run `commcare-cloud staging fab deploy` to deploy.' + } else rebuildstaging $args fi
Remove redundant text Full Examples: Bucket policy Operations is repeated twice
@@ -162,11 +162,6 @@ The full API Reference is available here. * [set_bucket_policy.py](https://github.com/minio/minio-py/blob/master/examples/set_bucket_policy.py) * [get_bucket_policy.py](https://github.com/minio/minio-py/blob/master/examples/get_bucket_policy.py) -#### Full Examples: Bucket policy Operations - -* [set_bucket_policy.py](https://github.com/minio/minio-py/blob/master/examples/set_bucket_policy.py) -* [get_bucket_policy.py](https://github.com/minio/minio-py/blob/master/examples/get_bucket_policy.py) - #### Full Examples: Bucket notification Operations * [set_bucket_notification.py](https://github.com/minio/minio-py/blob/master/examples/set_bucket_notification.py)
Update default.py Having the sha1 & sha256 hashes by default would actually be pretty useful I think. This change would add those.
@@ -21,6 +21,8 @@ def width(s, character_count): def render_meta(doc, ostream): rows = [ (width("md5", 22), width(doc["meta"]["sample"]["md5"], 82)), + (width("sha1", 22), width(doc["meta"]["sample"]["sha1"], 82)), + (width("sha256", 22), width(doc["meta"]["sample"]["sha256"], 82)), ("path", doc["meta"]["sample"]["path"]), ]
Fix QATConv3D Update to optional Conv3D import
@@ -18,6 +18,7 @@ Utility / helper functions import random import re +import warnings from collections import OrderedDict, namedtuple from contextlib import contextmanager from copy import deepcopy @@ -35,7 +36,6 @@ from torch.utils.data import DataLoader try: quant_err = None from torch.nn.qat import Conv2d as QATConv2d - from torch.nn.qat import Conv3d as QATConv3d from torch.nn.qat import Linear as QATLinear from torch.quantization import QuantWrapper except Exception as _err: @@ -43,6 +43,11 @@ except Exception as _err: QuantWrapper = None QATLinear = None QATConv2d = None + +try: + from torch.nn.qat import Conv3d as QATConv3d +except Exception as _err: + quant_conv3d_err = _err QATConv3d = None from sparseml.utils import create_dirs, save_numpy @@ -791,7 +796,7 @@ def get_quantizable_layers(module: Module) -> List[Tuple[str, Module]]: if ( isinstance(mod, Linear) or isinstance(mod, Conv2d) - or isinstance(mod, Conv3d) + or (QATConv3d and isinstance(mod, Conv3d)) ) ] @@ -808,15 +813,23 @@ def get_quantized_layers(module: Module) -> List[Tuple[str, Module]]: "Please install a QAT compatible version of PyTorch" ) - return [ - (name, mod) - for (name, mod) in module.named_modules() + quantized_layers = [] + for (name, mod) in module.named_modules(): if ( (QATLinear and isinstance(mod, QATLinear)) or (QATConv2d and isinstance(mod, QATConv2d)) or (QATConv3d and isinstance(mod, QATConv3d)) + ): + quantized_layers.append((name, mod)) + + elif isinstance(mod, QATConv3d) and not QATConv3d: + warnings.warn( + "Pytorch version is not setup for Conv3D Quantization. " + "Quantization of Conv3D layers will be skipped", + UserWarning, ) - ] + + return quantized_layers def get_layer_param(param: str, layer: str, module: Module) -> Parameter:
Mark chaos_dataset_shuffle_push_based_sort_1tb and chaos_dataset_shuffle_sort_1tb stable They passed for the past 7 runs.
test_name: chaos_dataset_shuffle_push_based_sort_1tb test_suite: chaos_test - stable: false - frequency: nightly team: core cluster: test_name: chaos_dataset_shuffle_sort_1tb test_suite: chaos_test - stable: false - frequency: nightly team: core cluster:
client: fix format string in ValueError This fixes output like
@@ -582,7 +582,7 @@ def _fetch_and_map_with_go(isolated_hash, storage, outdir, go_cache_dir, proc.kill() proc.wait() # Raise unconditionally, because |proc| was forcefully terminated. - raise ValueError("timedout after %d seconds (cmd=%s)", + raise ValueError("timedout after %d seconds (cmd=%s)" % (check_period_sec * max_checks, cmd_str)) with open(result_json_path) as json_file:
test: Make ephemeral sagemaker component tests more stable * increase timeout Increase timeout to make canaries less flakey * Increase minio timeout Make canaries less flakey * Update run_integration_tests * correct sleep * remove unnecessary wait
@@ -164,6 +164,8 @@ function install_kfp() { echo "[Installing KFP] Minio port-forwarded to ${MINIO_LOCAL_PORT}" echo "[Installing KFP] Waiting for pods to stand up" + #TODO: In the future, modify kubectl wait to end when only one pod becomes ready. + sleep 3m kubectl wait --for=condition=ready -n "${KFP_NAMESPACE}" pod -l app=ml-pipeline --timeout=5m
Fix toObject() r-value version Summary: Pull Request resolved: It should use moveToIntrusivePtr. This function is a very hot one and used a lot in interpreter loop. e.g. GET_ATTR, SET_ATTR. Making a copy and doing incref/decref caused big overhead.
@@ -76,7 +76,7 @@ inline c10::intrusive_ptr<ivalue::ConstantString> IValue::toString() const & { } inline c10::intrusive_ptr<ivalue::Object> IValue::toObject() && { AT_ASSERT(isObject(), "Expected Object but got ", tagKind()); - return toIntrusivePtr<ivalue::Object>(); + return moveToIntrusivePtr<ivalue::Object>(); } inline c10::intrusive_ptr<ivalue::Object> IValue::toObject() const & { AT_ASSERT(isObject(), "Expected Object but got ", tagKind());
run `add_filesystem` after `inject_yum_repos` This is to avoid issues with baseimage special case conflicting with scratch images. *
@@ -37,19 +37,19 @@ class BinaryPreBuildTask(plugin_based.PluginBasedTask[PreBuildTaskParams]): {"name": "check_base_image"}, {"name": "koji_parent"}, {"name": "resolve_composes"}, - {"name": "add_filesystem"}, {"name": "flatpak_update_dockerfile"}, {"name": "bump_release"}, {"name": "add_flatpak_labels"}, {"name": "add_labels_in_dockerfile"}, {"name": "resolve_remote_source"}, {"name": "pin_operator_digest"}, - {"name": "change_from_in_dockerfile"}, {"name": "add_help"}, {"name": "fetch_maven_artifacts"}, {"name": "add_image_content_manifest"}, {"name": "add_dockerfile"}, {"name": "inject_yum_repos"}, + {"name": "add_filesystem"}, + {"name": "change_from_in_dockerfile"}, {"name": "hide_files"}, {"name": "distribution_scope"}, {"name": "add_buildargs_in_dockerfile"},
Unpin flake8 The latest version requires entrypoint >= 3 which is already satisfied in the base image which doesn't trigger a distutil reinstall.
@@ -62,10 +62,7 @@ RUN apt-get install -y libfreetype6-dev && \ pip install keras-rl && \ #keras-rcnn pip install git+https://github.com/broadinstitute/keras-rcnn && \ - # version 3.7.1 adds a dependency on entrypoints > 3. This causes a reinstall but fails because - # it is a distutils package and can't be uninstalled. Once the anaconda image in updated, this - # pin should be removed. - pip install flake8==3.6.0 && \ + pip install flake8 && \ #neon cd /usr/local/src && \ git clone --depth 1 https://github.com/NervanaSystems/neon.git && \
Fix of maggroups Change matrix reading from row to column based and adjust the start of hexagonal magnetic spacegroups to 143
@@ -124,7 +124,7 @@ class MagneticSpaceGroup(SymmetryGroup): def _get_point_operator(idx): '''Retrieve information on point operator (rotation matrix and Seitz label).''' - hex = self._data['bns_number'][0] >= 168 and self._data['bns_number'][0] <= 194 + hex = self._data['bns_number'][0] >= 143 and self._data['bns_number'][0] <= 194 c.execute('SELECT symbol, matrix FROM point_operators WHERE idx=? AND hex=?;', (idx-1, hex)) op = c.fetchone() op = {'symbol': op[0], 'matrix': np.array(op[1].split(','), dtype='f').reshape(3, 3)} @@ -179,12 +179,12 @@ class MagneticSpaceGroup(SymmetryGroup): for j in range(m): s = b[3+o+(j*22):3+o+(j*22)+22] # data corresponding to specific Wyckoff position translation_vec = [s[0]/s[3], s[1]/s[3], s[2]/s[3]] - matrix = [[s[4], s[5], s[6]], - [s[7], s[8], s[9]], - [s[10], s[11], s[12]]] - matrix_magmom = [[s[13], s[14], s[15]], - [s[16], s[17], s[18]], - [s[19], s[20], s[21]]] + matrix = [[s[4], s[7], s[10]], + [s[5], s[8], s[11]], + [s[6], s[9], s[12]]] + matrix_magmom = [[s[13], s[16], s[19]], + [s[14], s[17], s[20]], + [s[15], s[18], s[21]]] # store string representation, e.g. (x,y,z;mx,my,mz) wyckoff_str = "({};{})".format(transformation_to_string(matrix, translation_vec), transformation_to_string(matrix_magmom, c='m')) @@ -224,9 +224,9 @@ class MagneticSpaceGroup(SymmetryGroup): return None # capital letters used here by convention, # IUCr defines P and p specifically - P = [[b[0], b[1], b[2]], - [b[3], b[4], b[5]], - [b[6], b[7], b[8]]] + P = [[b[0], b[3], b[6]], + [b[1], b[4], b[7]], + [b[2], b[5], b[8]]] p = [b[9]/b[12], b[10]/b[12], b[11]/b[12]] P = np.array(P).transpose() P_string = transformation_to_string(P, components=('a', 'b', 'c'))
fix(astro): missing snake_name caused runtime issues with fits datasets * Update fits.py Fix bug while plotting and manipulating df coming from FITS files * Update packages/vaex-astro/vaex/astro/fits.py
@@ -18,6 +18,7 @@ logger = logging.getLogger("vaex.astro.fits") class FitsBinTable(DatasetMemoryMapped): + snake_name='fits' def __init__(self, filename, write=False, fs_options={}, fs=None): super(FitsBinTable, self).__init__(filename, write=write) self.ucds = {}
Add Chinese and Portuguese moves Also add 10.3, 10.3.1, and 10.3.2 to iOS list.
@@ -148,7 +148,7 @@ def get_device_info(account): def generate_device_info(account): ios8 = ('8.0', '8.0.1', '8.0.2', '8.1', '8.1.1', '8.1.2', '8.1.3', '8.2', '8.3', '8.4', '8.4.1') ios9 = ('9.0', '9.0.1', '9.0.2', '9.1', '9.2', '9.2.1', '9.3', '9.3.1', '9.3.2', '9.3.3', '9.3.4', '9.3.5') - ios10 = ('10.0', '10.0.1', '10.0.2', '10.0.3', '10.1', '10.1.1', '10.2', '10.2.1') + ios10 = ('10.0', '10.0.1', '10.0.2', '10.0.3', '10.1', '10.1.1', '10.2', '10.2.1', '10.3', '10.3.1', '10.3.2') devices = tuple(IPHONES.keys()) account['model'] = choice(devices)
events: Remove code for settings which are included in property_types. These lines in fetch_initial_state_data are redundant now since these settings are already included in property_types after
@@ -269,11 +269,6 @@ def fetch_initial_state_data( Realm.POLICY_ADMINS_ONLY if user_profile is None else realm.delete_own_message_policy ) - # TODO: Can we delete these lines? They seem to be in property_types... - state["realm_message_content_edit_limit_seconds"] = realm.message_content_edit_limit_seconds - state[ - "realm_message_content_delete_limit_seconds" - ] = realm.message_content_delete_limit_seconds state[ "realm_community_topic_editing_limit_seconds" ] = Realm.DEFAULT_COMMUNITY_TOPIC_EDITING_LIMIT_SECONDS
Add some more options for modal forms Ability to display info or warning panels before the form
+<div> +{% if form.pre_form_info %} +<div class='alert alert-info' role='alert' style='display: block;'> +{{ form.pre_form_info }} +</div> +{% endif %} +{% if form.pre_form_warning %} +<div class='alert alert-warning' role='alert' style='display: block;'> +{{ form.pre_form_warning }} +</div> +{% endif %} {% block non_field_error %} {% if form.non_field_errors %} <div class='alert alert-danger' role='alert' style='display: block;'> </div> {% endif %} {% endblock %} +</div> {% block pre_form_content %} {% endblock %}
Change type check to use isinstance instead of str compare Authors: Approvers: - Adam Thompson (https://github.com/awthomp) URL:
@@ -170,7 +170,7 @@ class _UpFIRDn(object): def __init__(self, h, x_dtype, up, down): """Helper for resampling""" - if str(type(h)) == "<class 'cupy._core.core.ndarray'>": + if isinstance(h, cp.ndarray): pp = cp else: pp = np
eggroll not support iterator in put_all api anymore, convert to list
@@ -28,7 +28,7 @@ def _save_as_func(rdd: RDD, name, namespace, partition, persistent): def _func(_, it): eggroll_util.maybe_create_eggroll_client() - dup.put_all(it) + dup.put_all(list(it)) return 1, rdd.mapPartitionsWithIndex(_func, preservesPartitioning=False).collect()
muting_ui: Fix bug with same name of function parameter and a file. The parameter passed to 'handle_topic_updates' is 'muted_topics' and there is also a javascript file with same name. So 'muted_topics.get_muted_topics' gives error, and this commit fixes this by changing the parametr name to 'muted_topics_list'. This was introduced in
@@ -45,9 +45,9 @@ export function rerender_for_muted_topic(old_muted_topics) { } } -export function handle_topic_updates(muted_topics) { +export function handle_topic_updates(muted_topics_list) { const old_muted_topics = muted_topics.get_muted_topics(); - muted_topics.set_muted_topics(muted_topics); + muted_topics.set_muted_topics(muted_topics_list); stream_popover.hide_topic_popover(); unread_ui.update_unread_counts(); rerender_for_muted_topic(old_muted_topics);
Update MAKE-RELEASE.md Add an entry to make sure changelog gets one last look before a release is made
## For the Pull Request - Update CHANGELOG.md, add entry inbetween `## Unversioned` and any changelog entries with `## YOUR.VERSION` +- Look through the changelog entries of this version, and reorder any entries so the most important changes are at the top of each category - Update pajbot/constants.py ## After the Pull Request has been accepted
Adds a placeholder for the 'mul' operator. Summary: Pull Request resolved: Test Plan: Imported from OSS
@@ -40,6 +40,14 @@ class FloatFunctional(torch.nn.Module): self.observer(r) return r + r"""Operation equivalent to ``torch.mul``""" + def mul(self, x, y): + # type: (Tensor, Tensor) -> Tensor + r = torch.mul(x, y) + # TODO: Fix for QAT. + self.observer(r) + return r + r"""Operation equivalent to ``torch.cat``""" def cat(self, x, dim=0): # type: (List[Tensor], int) -> Tensor @@ -83,6 +91,10 @@ class QFunctional(torch.nn.Module): return ops.quantized.add(x, y, scale=self.scale, zero_point=self.zero_point) + r"""Operation equivalent to ``torch.ops.quantized.mul``""" + def mul(self, x, y): + raise NotImplementedError("Implementation of 'mul' is in progress...") + r"""Operation equivalent to ``torch.ops.quantized.cat``""" def cat(self, x, dim=0): # type: (List[Tensor], int) -> Tensor
Alter the notification progress initializer to take a resource Before this made it so notifications had no way of being able to tell which specific resource they derived from.
@@ -92,7 +92,8 @@ class Notification(Model): return self.save(doc) def initProgress(self, user, title, total=0, state=ProgressState.ACTIVE, - current=0, message='', token=None, estimateTime=True): + current=0, message='', token=None, estimateTime=True, resource=None, + resourceName=None): """ Create a "progress" type notification that can be updated anytime there is progress on some task. Progress records that are not updated for more @@ -120,6 +121,9 @@ class Notification(Model): :param estimateTime: if True, generate an estimate of the total time the task will take, if possible. If False, never generate a time estimate. + :param resource: a partial or complete resource that the notification is + associated with. This must at a minimum include the id of the resource. + :param resourceName: the type of resource the notification is associated with. """ data = { 'title': title, @@ -127,7 +131,9 @@ class Notification(Model): 'current': current, 'state': state, 'message': message, - 'estimateTime': estimateTime + 'estimateTime': estimateTime, + 'resource': resource, + 'resourceName': resourceName } expires = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
Change to iterable as Python3.5 doesn't support Collection. We don't really need the getitem and len attributes anyway
import re from collections import defaultdict, namedtuple -from collections.abc import Collection +from collections.abc import Iterable from functools import lru_cache from .exceptions import NotFound, InvalidUsage from .views import CompositionView @@ -112,8 +112,8 @@ class Router: self.hosts.add(host) else: - if not isinstance(host, Collection): - raise ValueError("Expected either string of Collection of " + if not isinstance(host, Iterable): + raise ValueError("Expected either string or Iterable of " "host strings, not {!r}".format(host)) for host_ in host:
[cleanup] Remove T249090 debugging stuff Solving T249090 issue was declined because Python 2.7 is to be dropped soon and the related tests are done with Python 3.5 now. Therefore we can remove this debugging code.
@@ -17,7 +17,6 @@ import pywikibot.login import pywikibot.page import pywikibot.site -from pywikibot import config from pywikibot.throttle import Throttle from pywikibot.tools import ( suppress_warnings, @@ -25,13 +24,13 @@ from pywikibot.tools import ( UnicodeType, ) -from tests import patch, unittest_print from tests.aspects import ( unittest, TestCase, DefaultSiteTestCase, DefaultDrySiteTestCase, ) +from tests import patch from tests.utils import FakeLoginManager, PatchedHttp if not PY2: @@ -137,25 +136,12 @@ class TestApiFunctions(DefaultSiteTestCase): """API Request object test class.""" - # @suppress_warnings(r'Request\(\) invoked without a site', RuntimeWarning) + @suppress_warnings(r'Request\(\) invoked without a site', RuntimeWarning) def testObjectCreation(self): """Test api.Request() constructor with implicit site creation.""" - unittest_print('\n\n>>> Debugging stuff (T249090) ----') - unittest_print('config at method start:', config.mylang, config.family) - unittest_print('<<< ------------------------------\n') - req = api.Request(parameters={'action': 'test', 'foo': '', 'bar': 'test'}) self.assertTrue(req) - - unittest_print('>>> Debugging stuff (T249090) ----') - unittest_print('config:', config.mylang, config.family) - unittest_print('Request:', req) - unittest_print(req.__dict__) - unittest_print('config before asserting:', - config.mylang, config.family) - unittest_print('<<< ------------------------------') - self.assertEqual(req.site, self.get_site())
Check for empty list in matplotlib unit conversion Sometimes matplotlib >=3.1 can send an empty list for conversion. To prevent an exception we must check that the list contains something before checking the first element.
@@ -98,7 +98,7 @@ def quantity_support(format='latex_inline'): def convert(val, unit, axis): if isinstance(val, u.Quantity): return val.to_value(unit) - elif isinstance(val, list) and isinstance(val[0], u.Quantity): + elif isinstance(val, list) and val and isinstance(val[0], u.Quantity): return [v.to_value(unit) for v in val] else: return val
Update rogue_dns.txt (```/``` means period here).
171.244.33.116:53 220.136.110.179:53 42.112.35.45:53 +42.112.35.46:53 +42.112.35.47:53 +42.112.35.48:53 +42.112.35.49:53 +42.112.35.50:53 +42.112.35.51:53 +42.112.35.52:53 +42.112.35.53:53 +42.112.35.54:53 42.112.35.55:53 # Reference: https://twitter.com/bad_packets/status/1079251375987425280
Use create_astnode for EnumNode subclasses TN:
@@ -2642,30 +2642,22 @@ def create_enum_node_classes(cls): is_bool_node = bool(cls._qualifier) fields = list(cls._fields) - base_enum_dct = { - 'alternatives': cls._alternatives, - 'is_enum_node': True, - 'is_bool_node': is_bool_node, - 'is_type_resolved': True, - - '_doc': cls._doc, - '_fields': fields, - '_is_abstract': True, - - # List of `base_enum_node` subclass we create here, one for each - # alternative. - '_alternatives': [], - } if is_bool_node: prop = AbstractProperty(type=BoolType, public=True) prop.location = cls._location fields.append(('as_bool', prop)) - # Add other supplied fields to the base class dict - base_enum_dct.update(dict(cls._fields)) - # Generate the abstract base node type - base_enum_node = type(cls._name.camel, (T.root_node, ), base_enum_dct) + base_enum_node = create_astnode( + name=cls._name, location=cls._location, doc=cls._doc, base=T.root_node, + fields=fields, + is_abstract=True + ) + base_enum_node.alternatives = cls._alternatives + base_enum_node.is_enum_node = True + base_enum_node.is_bool_node = is_bool_node + base_enum_node.is_type_resolved = True + base_enum_node._alternatives = [] cls._type = base_enum_node for alt in cls._alternatives: @@ -2673,13 +2665,16 @@ def create_enum_node_classes(cls): # Generate the derived class corresponding to this alternative fields = [] - dct = {'_fields': fields} if is_bool_node: prop = Property(alt.name.lower == 'present') prop.location = cls._location fields.append(('as_bool', prop)) - alt_type = type(alt_name.camel, (base_enum_node, ), dct) + alt_type = create_astnode( + name=alt_name, location=None, doc=None, + base=base_enum_node, + fields=fields + ) alt._type = alt_type # Make the alternative derived class accessible from the root node for
compose-validate: Use settings config value for policy check. Updates the check in compose validate for the organization's policy on sending private messages to use the code/value in settings_config, instead of the number value.
@@ -483,7 +483,8 @@ function validate_private_message() { const user_ids = compose_pm_pill.get_user_ids(); if ( - page_params.realm_private_message_policy === 2 && // Frontend check for for PRIVATE_MESSAGE_POLICY_DISABLED + page_params.realm_private_message_policy === + settings_config.private_message_policy_values.disabled.code && (user_ids.length !== 1 || !people.get_by_user_id(user_ids[0]).is_bot) ) { // Unless we're composing to a bot
Update filter-dev-guide.md made small changed to md syntax
@@ -9,8 +9,6 @@ Sometimes you may want Ambassador Edge Stack to manipulate an incoming request. Ambassador Edge Stack supports these use cases by allowing you to execute custom logic in `Filters`. Filters are written in Golang, and managed by Ambassador Edge Stack. - - ## Prerequisites `Plugin` `Filter`s are built as [Go plugins](https://golang.org/pkg/plugin/) and loaded directly into the Ambassador Pro container so they can run in-process with the rest of Ambassador Pro. @@ -99,6 +97,3 @@ Now, you can quickly test and develop your filter. ## Further reading For more details about configuring filters and the `plugin` interface, see the [filter reference](/reference/filter-reference). - - -
Translated using Weblate (Russian) Currently translated at 84.4% (103 of 122 strings) Translation: udiskie/udiskie Translate-URL:
@@ -8,16 +8,17 @@ msgstr "" "Project-Id-Version: \n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2022-03-01 16:22+0000\n" -"PO-Revision-Date: 2019-02-16 21:18+0300\n" -"Last-Translator: mr-GreyWolf <[email protected]>\n" -"Language-Team: Russian\n" +"PO-Revision-Date: 2022-04-17 13:10+0000\n" +"Last-Translator: Weblate Admin <[email protected]>\n" +"Language-Team: Russian <http://weblate.coldfix.de/projects/udiskie/udiskie/" +"ru/>\n" "Language: ru_RU\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Generator: Poedit 1.8.11\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" -"%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" +"Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" +"%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;\n" +"X-Generator: Weblate 4.11.2\n" #: ../udiskie/cli.py:46 #, python-brace-format
Apply automatic formatting to _ecg_findpeaks_peakdetect As outlined in
@@ -1039,6 +1039,7 @@ def _ecg_findpeaks_peakdetect(detection, sampling_rate=1000): """Based on https://github.com/berndporr/py-ecg-detectors/ Optimized for vectorized computation. + """ min_peak_distance = int(0.3 * sampling_rate) min_missed_distance = int(0.25 * sampling_rate) @@ -1068,8 +1069,9 @@ def _ecg_findpeaks_peakdetect(detection, sampling_rate=1000): RR_missed = int(1.66 * RR_ave) if peak - last_peak > RR_missed: missed_peaks = peaks[last_index + 1 : index] - missed_peaks = missed_peaks[(missed_peaks > last_peak + min_missed_distance) & - (missed_peaks < peak - min_missed_distance)] + missed_peaks = missed_peaks[ + (missed_peaks > last_peak + min_missed_distance) & (missed_peaks < peak - min_missed_distance) + ] threshold_I2 = 0.5 * threshold_I1 missed_peaks = missed_peaks[detection[missed_peaks] > threshold_I2] if len(missed_peaks) > 0:
[IMPROV] Link: Normalize before assert Instead of checking all different variants in the assert, just normalize it first and then assert that the normalization worked.
@@ -4619,14 +4619,14 @@ class Link(ComparableMixin): """ source_is_page = isinstance(source, BasePage) - assert source is None or source_is_page or isinstance(source, pywikibot.site.BaseSite), \ - "source parameter should be either a Site or Page object" - if source_is_page: self._source = source.site else: self._source = source or pywikibot.Site() + assert isinstance(self._source, pywikibot.site.BaseSite), \ + "source parameter should be either a Site or Page object" + self._text = text self._defaultns = defaultNamespace
add minimal and nice-to-have functionality maybe add these functionality parts in the template to make a feature request more precise/clearer
@@ -15,6 +15,14 @@ A clear and concise description of what the problem is, followed by the solution Who is affected by the change (Users, Managers, Admins)? +### Minimal functionality + +What functionality would you like to have? + +### Nice-to-have functionality + +What sort of related functionality would you like to see in addition? + ### References / Verweise *
Fixed base_invalid_option unit test argparse output for invalid option has different text and goes to stderr
@@ -54,10 +54,9 @@ def test_base_invalid_option(base_app, capsys): run_cmd(base_app, 'show -z') out, err = capsys.readouterr() show_help = run_cmd(base_app, 'help show') - expected = ['no such option: -z'] - expected.extend(show_help) + expected = ['usage: show [-h] [-l] [param]', 'show: error: unrecognized arguments: -z'] # 'show -h' is the same as 'help show', other than whitespace differences of an extra newline present in 'help show' - assert normalize(str(out)) == expected + assert normalize(str(err)) == expected def test_base_shortcuts(base_app): out = run_cmd(base_app, 'shortcuts')
Enable linkcheck on CI Probably, we should do this on deployment only.
@@ -46,6 +46,7 @@ matrix: script: - python setup.py flake8 - sphinx-build -W -b html docs build/sphinx/html + - sphinx-build -W -b linkcheck docs build/sphinx/linkcheck - py.test --cov diofant diofant/polys - python: 3.5 env: COVERAGE='on' EXTRA='on' DIOFANT_GROUND_TYPES='gmpy'
feat: add polygon matic + mumbai networks Added polygon's matic and mumbai networks to the default network config, now that polygonscam is available.
@@ -62,6 +62,18 @@ live: id: ftm-main host: https://rpcapi.fantom.network explorer: https://api.ftmscan.com/api + - name: Polygon + networks: + - name: Mainnet + chainid: 137 + id: polygon-main (Infura) + host: https://polygon-mainnet.infura.io/v3/$WEB3_INFURA_PROJECT_ID + explorer: https://api.polygonscan.com/api + - name: Mumbai Testnet (Infura) + chainid: 80001 + id: polygon-test + host: https://polygon-mumbai.infura.io/v3/$WEB3_INFURA_PROJECT_ID + explorer: https://api-testnet.polygonscan.com/api development: - name: Ganache-CLI @@ -116,3 +128,15 @@ development: evm_version: istanbul mnemonic: brownie fork: ftm-main + - name: Ganache-CLI (Polygon-Mainnet Fork) + id: polygon-main-fork + cmd: ganache-cli + host: http://127.0.0.1 + timeout: 120 + cmd_settings: + port: 8545 + gas_limit: 20000000 + accounts: 10 + evm_version: istanbul + mnemonic: brownie + fork: polygon-main
Fixed two links to old docs. Fixed two old links
@@ -298,8 +298,8 @@ class KeyTable(object): The scope for both ``key_expr`` and ``agg_expr`` is all column names in the input :class:`KeyTable`. - For more information, see the documentation on writing `expressions <../overview.html#expressions>`_ - and using the `Hail Expression Language <../reference.html#HailExpressionLanguage>`_. + For more information, see the documentation on writing :ref:`expressions <overview-expressions>` + and using the `Hail Expression Language <https://hail.is/expr_lang.html>`_ :param key_expr: Named expression(s) for how to compute the keys of the new key table. :type key_expr: str or list of str
user status: Change Last seen to Last online. Change "Last seen" to "Last online" in the full user profile.
<span class="value">{{user_type}}</span> </div> <div class="default-field"> - <span class="name">{{#tr this}}Last seen{{/tr}}</span> + <span class="name">{{#tr this}}Last online{{/tr}}</span> <span class="value">{{last_seen}}</span> </div> {{#if user_time}}
MAINT: Remove unused import. Remove import of assert_array_almost_equal_nulp.
from numpy.testing import ( assert_, assert_equal, assert_raises, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_no_warnings, - assert_allclose, assert_array_almost_equal_nulp + assert_allclose, ) from numpy.compat import pickle
Windows Canvas should always use GraphicsPath This solves a bug with rendering text with stroke.
@@ -30,12 +30,11 @@ class WinformContext(Context): @property def current_path(self): if len(self.paths) == 0: - return None + self.add_path() return self.paths[-1] - @current_path.setter - def current_path(self, current_path): - self.paths.append(current_path) + def add_path(self): + self.paths.append(GraphicsPath()) class Canvas(Box): @@ -78,13 +77,13 @@ class Canvas(Box): # Basic paths def new_path(self, draw_context, *args, **kwargs): - draw_context.current_path = GraphicsPath() + draw_context.add_path() def closed_path(self, x, y, draw_context, *args, **kwargs): self.line_to(x, y, draw_context, *args, **kwargs) def move_to(self, x, y, draw_context, *args, **kwargs): - draw_context.current_path = GraphicsPath() + draw_context.add_path() draw_context.last_point = (x, y) def line_to(self, x, y, draw_context, *args, **kwargs): @@ -150,24 +149,11 @@ class Canvas(Box): *args, **kwargs): rect = RectangleF(float(x - radiusx), float(y - radiusy), float(2 * radiusx), float(2 * radiusy)) - if draw_context.current_path is not None: draw_context.current_path.AddArc( rect, math.degrees(startangle), math.degrees(endangle - startangle) ) - else: - pen = self.create_pen( - color=kwargs.get("stroke_color", None), - line_width=kwargs.get("text_line_width", None), - line_dash=kwargs.get("text_line_dash", None) - ) - draw_context.graphics.DrawArc( - pen, - rect, - math.degrees(startangle), - math.degrees(endangle - startangle) - ) draw_context.last_point = ( x + radiusx * math.cos(endangle), y + radiusy * math.sin(endangle) @@ -175,15 +161,7 @@ class Canvas(Box): def rect(self, x, y, width, height, draw_context, *args, **kwargs): rect = RectangleF(float(x), float(y), float(width), float(height)) - if draw_context.current_path is not None: draw_context.current_path.AddRectangle(rect) - else: - pen = self.create_pen( - color=kwargs.get("stroke_color", None), - line_width=kwargs.get("text_line_width", None), - line_dash=kwargs.get("text_line_dash", None) - ) - draw_context.graphics.DrawRectangles(pen, [rect]) # Drawing Paths @@ -231,19 +209,11 @@ class Canvas(Box): def write_text(self, text, x, y, font, draw_context, *args, **kwargs): width, height = font.measure(text) origin = PointF(x, y - height) - if draw_context.current_path is not None: font_family = win_font_family(font.family) font_style = win_font_style(font.weight, font.style, font_family) draw_context.current_path.AddString( text, font_family, font_style, float(height), origin, StringFormat() ) - else: - brush = self.create_brush( - color=kwargs.get("stroke_color", None), - ) - draw_context.graphics.DrawString( - text, font._impl.native, brush, origin - ) def measure_text(self, text, font, draw_context, *args, **kwargs): self.interface.factory.not_implemented('Canvas.measure_text()')
[pymtl/Aerodactyl/test/Host_test] A copy for HostAerodactyl_test * This file, like the non-host version, will scan all files that share the same module name (i.e., 'HostAerodactyl') and same ending (i.e., '_test') with the file containing the code * Then, the file will dynamically load all these files into the globals() space
# HostAerodactyl_test #========================================================================= +import os import importlib import pytest from pymtl import * -from pclib.test import run_sim -from fpga import SwShim - -# Import designs -from CompAerodactyl.Aerodactyl import Aerodactyl -from CompAerodactyl.HostAerodactyl import HostAerodactyl #------------------------------------------------------------------------- -# Redefining run_test for Hosted version +# Import all _test that matches the file name #------------------------------------------------------------------------- -def run_test( test, dump_vcd, test_verilog, - src_delay=0, sink_delay=0, mem_stall_prob=0, mem_latency=0 ): - - asynch_bitwidth = 8 +# Get filename and directory +filename = os.path.basename(__file__).rsplit('.', 1)[0] +dirname = os.path.dirname(os.path.realpath(__file__)) - dut = Aerodactyl() - hwshim_and_dut = HostAerodactyl( asynch_bitwidth ) - swshim = SwShim( dut, hwshim_and_dut, asynch_bitwidth, - dump_vcd, test_verilog ) +# Get base design name +design_name = filename.rsplit('_', 1) - # Set explicit name - swshim.explicit_modulename = swshim.__class__.__name__ +# Checks +assert design_name[-1] == 'test' - num_cores = 4 - cacheline_nbits = 128 +# List all files in current directory +for root, dirs, files in os.walk(dirname): - run( swshim, test, num_cores, cacheline_nbits, - dump_vcd, test_verilog, src_delay, sink_delay, - mem_stall_prob, mem_latency ) + # Loop over all files in the dorectoy + for f in files: + mod_name = f.rsplit('.', 1)[0] -#------------------------------------------------------------------------- -# Override old run_test -#------------------------------------------------------------------------- + load_mod = mod_name.endswith(design_name[-1]) + load_mod &= mod_name.startswith('_'.join(design_name[:-1])) + load_mod &= (mod_name != filename) -import Aerodactyl_asm_test -import Aerodactyl_mdu_test + if load_mod: -Aerodactyl_asm_test.run_test = run_test -Aerodactyl_mdu_test.run_test = run_test - -#------------------------------------------------------------------------- -# Import everything inside Aerodactyl test infrastructure -#------------------------------------------------------------------------- -# Reuse tests from non-host version + # Load the module + f_path = os.path.join(root, f) -from Aerodactyl_test import * + module = importlib.import_module(mod_name) -#for x in dir(Aerodactyl_test): -# if not x in globals(): -# print x -# globals()[x] = getattr(Aerodactyl_test, x) + for func in dir(module): + # If there is no conflict, load the module to globals + if not func in globals(): + globals()[func] = getattr(module, func)
Remove unnecessary string concatenations Python joins string literals spanning multiple lines anyway and getting rid of the pluses removes unnecessary noise.
@@ -125,10 +125,10 @@ class Grouping: Return :obj:`None` if you don't want to store :obj:`e` in a group. """ raise NotImplementedError("\n\n" - "There is no default implementation for `Groupings.key`.\n" + - "Congratulations, you managed to execute supposedly " + - "unreachable code.\n" + - "Please let us know by filing a bug at:\n\n " + + "There is no default implementation for `Groupings.key`.\n" + "Congratulations, you managed to execute supposedly " + "unreachable code.\n" + "Please let us know by filing a bug at:\n\n " "https://github.com/oemof/oemof/issues\n") def value(self, e): @@ -159,7 +159,7 @@ class Grouping: """ if old is new: return old - raise ValueError("\nGrouping \n " + + raise ValueError("\nGrouping \n " "{}:{}\nand\n {}:{}\ncollides.\n".format( id(old), old, id(new), new) + "Possibly duplicate uids/labels?") @@ -180,10 +180,10 @@ class Grouping: """ raise NotImplementedError("\n\n" - "`Groupings.filter` called without being overridden.\n" + - "Congratulations, you managed to execute supposedly " + - "unreachable code.\n" + - "Please let us know by filing a bug at:\n\n " + + "`Groupings.filter` called without being overridden.\n" + "Congratulations, you managed to execute supposedly " + "unreachable code.\n" + "Please let us know by filing a bug at:\n\n " "https://github.com/oemof/oemof/issues\n") def __call__(self, e, d):
update pymysql.constants.CR values from
@@ -65,4 +65,15 @@ CR_ALREADY_CONNECTED = 2058 CR_AUTH_PLUGIN_CANNOT_LOAD = 2059 CR_DUPLICATE_CONNECTION_ATTR = 2060 CR_AUTH_PLUGIN_ERR = 2061 -CR_ERROR_LAST = 2061 +CR_INSECURE_API_ERR = 2062 +CR_FILE_NAME_TOO_LONG = 2063 +CR_SSL_FIPS_MODE_ERR = 2064 +CR_DEPRECATED_COMPRESSION_NOT_SUPPORTED = 2065 +CR_COMPRESSION_WRONGLY_CONFIGURED = 2066 +CR_KERBEROS_USER_NOT_FOUND = 2067 +CR_LOAD_DATA_LOCAL_INFILE_REJECTED = 2068 +CR_LOAD_DATA_LOCAL_INFILE_REALPATH_FAIL = 2069 +CR_DNS_SRV_LOOKUP_FAILED = 2070 +CR_MANDATORY_TRACKER_NOT_FOUND = 2071 +CR_INVALID_FACTOR_NO = 2072 +CR_ERROR_LAST = 2072
[internal] jvm: limit caching of JDK setup processes As per the `Process`es used to obtain information on the JDK should not be cached permanently especially for use of the system JVM. This was originally present in the code refactored by but was lost in a rebase. [ci skip-rust] [ci skip-build-wheels]
@@ -8,7 +8,7 @@ from dataclasses import dataclass from pants.backend.java.compile.javac_subsystem import JavacSubsystem from pants.engine.fs import Digest from pants.engine.internals.selectors import Get -from pants.engine.process import FallibleProcessResult, Process, ProcessResult +from pants.engine.process import FallibleProcessResult, Process, ProcessCacheScope, ProcessResult from pants.engine.rules import collect_rules, rule from pants.jvm.resolve.coursier_setup import Coursier @@ -37,6 +37,7 @@ async def setup_jdk(coursier: Coursier, javac: JavacSubsystem) -> JdkSetup: ), input_digest=coursier.digest, description=f"Ensure download of JDK {coursier_jdk_option}.", + cache_scope=ProcessCacheScope.PER_RESTART_SUCCESSFUL, ), ) @@ -55,6 +56,7 @@ async def setup_jdk(coursier: Coursier, javac: JavacSubsystem) -> JdkSetup: "-version", ), description=f"Extract version from JDK {coursier_jdk_option}.", + cache_scope=ProcessCacheScope.PER_RESTART_SUCCESSFUL, ), )
Update brocade_fastiron_telnet.py Removed debug = True line
@@ -145,7 +145,7 @@ class BrocadeFastironTelnet(CiscoBaseConnection): def check_config_mode(self, check_string=')#', pattern=''): """Checks if the device is in configuration mode or not.""" - debug = True + debug = False if not pattern: pattern = re.escape(self.base_prompt) if debug:
Update gcloud-tasks-emulator to 0.5.1 0.5.0 had a bug that would prevent process_task_queues from submitting tasks correctly.
@@ -10,7 +10,7 @@ deps = 30: Django >= 3.0, < 3.1 commands = pip install beautifulsoup4 # Test requirements - pip install gcloud-tasks-emulator>=0.4.0 + pip install gcloud-tasks-emulator>=0.5.1 pip install gcloud-storage-emulator>=0.2.2 pip install requests-oauthlib pip install google-auth-oauthlib
Support using styles from Pygments plugins `pygments.styles.STYLE_MAP` contains only styles built directly into Pygments library. To list all available styles (including styles registered by plugins), one should use `get_all_styles` generator. For respective Pygments documentation, see:
@@ -16,7 +16,7 @@ from httpie.compat import is_windows from httpie.plugins import FormatterPlugin -AVAILABLE_STYLES = set(pygments.styles.STYLE_MAP.keys()) +AVAILABLE_STYLES = set(pygments.styles.get_all_styles()) AVAILABLE_STYLES.add('solarized') # This is the native style provided by the terminal emulator color scheme
background_subtraction_test Force the test to face the situation where bg_img.shape>fg_img.shape to cover an if statement
@@ -1983,6 +1983,8 @@ def test_plantcv_background_subtraction(): pcv.params.debug = None fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=fg_img) truths.append(np.sum(fgmask) > 0) + fgmask = pcv.background_subtraction(background_image=fg_img, foreground_image=bg_img) + truths.append(np.sum(fgmask) > 0) # The same foreground subtracted from itself should be 0 fgmask = pcv.background_subtraction(background_image=fg_img, foreground_image=fg_img) truths.append(np.sum(fgmask) == 0)
Format-The-Codebase rename a test
@@ -243,7 +243,7 @@ class TestLoadInterface(unittest.TestCase): class TestLoadFromPipeline(unittest.TestCase): - def test_question_answering(self): + def test_text_to_text_model_from_pipeline(self): pipe = transformers.pipeline(model="sshleifer/bart-tiny-random") output = pipe("My name is Sylvain and I work at Hugging Face in Brooklyn") self.assertIsNotNone(output)
Add test cases for normalize() Reorder reorder rules
@@ -22,6 +22,10 @@ _NORMALIZE_REPETITION = list( _NORMALIZE_REORDER = [ ("\u0e40\u0e40", "\u0e41"), # Sara E + Sara E -> Sara Ae + ( + f"([{tonemarks}\u0e4c]+)([{above_v}{below_v}]+)", + "\\2\\1", + ), # TONE/Thanthakhat+ + A/BVOWELV+ -> A/BVOWEL+ + TONE/Thanthakhat+ ( f"\u0e4d([{tonemarks}]*)\u0e32", "\\1\u0e33", @@ -30,10 +34,6 @@ _NORMALIZE_REORDER = [ f"([{follow_v}]+)([{tonemarks}]+)", "\\2\\1", ), # FOLLOWVOWEL+ + TONEMARK+ -> TONEMARK+ + FOLLOWVOWEL+ - ( - f"([{tonemarks}\u0e4c]+)([{above_v}{below_v}]+)", - "\\2\\1", - ), # TONE/Thanthakhat+ + A/BVOWELV+ -> A/BVOWEL+ + TONE/Thanthakhat+ ]
Fixing cron.yaml attempt Gcloud needs a way to lint this file
@@ -76,7 +76,7 @@ cron: - description: District Rankings Calculation url: /tasks/math/enqueue/district_rankings_calc/2018 - schedule: every tuesday + schedule: every tuesday 1:00 timezone: America/Los_Angeles - description: Upcoming match notification sending
Fix run launcher lint ### Summary & Motivation ### How I Tested These Changes
@@ -167,8 +167,10 @@ def cleanup_test_instance(instance): # To avoid filesystem contention when we close the temporary directory, wait for # all runs to reach a terminal state, and close any subprocesses or threads # that might be accessing the run history DB. - if instance._run_launcher: - instance._run_launcher.join() + + # Since launcher is lazy loaded, we don't need to do anyting if it's None + if instance._run_launcher: # pylint: disable=protected-access + instance._run_launcher.join() # pylint: disable=protected-access TEST_PIPELINE_NAME = "_test_pipeline_"
[DOC] Update README.md Do not recommend using `sudo` or `-e` during package installation (for users)
@@ -27,7 +27,7 @@ Donwload the package as zip from github and uncompress or if you have ``git`` us open a terminal in the phy2bids folder and execute the command: -``sudo pip3 install -e .`` +``pip3 install .`` type the command:
remove ssh_key_path validation Follow-up to fix test
@@ -163,7 +163,6 @@ def test_do_validate_config(tmpdir, monkeypatch): expected_output = { 'ip_detect_contents': 'ip-detect script `genconf/ip-detect` must exist', 'master_list': 'Must set master_list, no way to calculate value.', - 'ssh_key_path': 'could not find ssh private key: genconf/ssh_key' } with tmpdir.as_cwd(): assert Config(config_path='genconf/config.yaml').do_validate(include_ssh=True) == expected_output
Update EPS_Screen.kv Removed purple centerline hyphens.
origin: self.center canvas.after: PopMatrix - Label: - pos_hint: {"center_x": 0.12, "center_y": 0.5} - text: '--------------' - markup: True - color: 1,0,1 - font_size: 20 Label: id: beta4b_label pos_hint: {"center_x": 0.07, "center_y": 0.31}
[core/output] Add prefix/suffix metadata see
@@ -397,6 +397,7 @@ class WidgetDrawer(object): if self._prefix: self._full_text = u"{}{}".format(self._prefix, self._full_text) + return self._prefix def add_suffix_iconmarkup(self, widget): """add custom Pango markup for suffix""" @@ -412,6 +413,7 @@ class WidgetDrawer(object): if self._suffix: self._full_text = u"{}{}".format(self._full_text, self._suffix) + return self._suffix def escape_amp(self): """escape & in full_text, because pango requires it""" @@ -441,10 +443,8 @@ class WidgetDrawer(object): raw = self._full_text padding = self._theme.padding(widget) - - self.add_prefix(widget, padding) - - self.add_suffix(widget, padding) + prefix = self.add_prefix(widget, padding) + suffix = self.add_suffix(widget, padding) width = self._theme.minwidth(widget) @@ -466,6 +466,8 @@ class WidgetDrawer(object): "name": module.id, "markup": self._markup, "_raw": raw, + "_prefix": prefix, + "_suffix": suffix, }) return self._widgets
docstring update [skip ci]
@@ -267,6 +267,8 @@ class Event(BaseNeo, pq.Quantity): 1. By default, an array of `n` event times will be transformed into `n-1` epochs, where the end of one epoch is the beginning of the next. + This assumes that the events are ordered in time; it is the + responsibility of the caller to check this is the case. 2. If `pairwise` is True, then the event times will be taken as pairs representing the start and end time of an epoch. The number of events must be even, otherwise a ValueError is raised.
BUILDTEST_ROOT needs to be set in environment when building documentation. The error in rtd build for apidocs is due to fact BUILDTEST_ROOT is not set upon build.
@@ -17,8 +17,8 @@ import sys BUILDTEST_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +os.environ["BUILDTEST_ROOT"] = BUILDTEST_ROOT sys.path.insert(0, os.path.join(BUILDTEST_ROOT,'src')) -print (sys.path) # -- Project information ----------------------------------------------------- project = 'buildtest'
Compute metrics under distributed strategies. Removed the conditional over distributed strategies when computing metrics. Metrics are now computed even when distributed strategies are used.
@@ -300,12 +300,7 @@ def resnet_model_fn(features, labels, mode, model_class, else: train_op = None - if not tf.contrib.distribute.has_distribution_strategy(): accuracy = tf.metrics.accuracy(labels, predictions['classes']) - else: - # Metrics are currently not compatible with distribution strategies during - # training. This does not affect the overall performance of the model. - accuracy = (tf.no_op(), tf.constant(0)) metrics = {'accuracy': accuracy}
controls: remove noTarget event remove noTarget alert
@@ -420,16 +420,6 @@ class Controls: if self.sm['liveLocationKalman'].excessiveResets: self.events.add(EventName.localizerMalfunction) - # Only allow engagement with brake pressed when stopped behind another stopped car - speeds = self.sm['longitudinalPlan'].speeds - if len(speeds) > 1: - v_future = speeds[-1] - else: - v_future = 100.0 - if CS.brakePressed and v_future >= self.CP.vEgoStarting \ - and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3: - self.events.add(EventName.noTarget) - def data_sample(self): """Receive data from sockets and update carState"""
Updated rtol to order function Compute eccentricity for recseries propagation Updated rtol to order function
@@ -471,8 +471,15 @@ def recseries(k, r, v, tofs, rtol=1e-6): v0 = v.to_value(u.m / u.s) tofs = tofs.to_value(u.s) + # angular momentum vector + h = np.cross(r0,v0) + # eccentricity vector + e = np.cross(v0,h)/k - r0/np.linalg.norm(r0) + # eccentricity magnitude + ecc = np.linalg.norm(e) + # rough estiamte of order from tolerance - order = 2 * int(-np.log10(rtol)) + order = order = int( -2*np.log10(rtol) + 10*np.tanh(ecc) ) results = np.array([recseries_fast(k, r0, v0, tof, order) for tof in tofs]) return results[:, 0] << u.m, results[:, 1] << u.m / u.s
Pass --concurrent flag to ebtables calls This flag will force ebtables to acquire a lock so we don't have to worry about ebtables errors occuring if something else on the system is trying to use ebtables as well. Closes-Bug:
@@ -191,4 +191,4 @@ NAMESPACE = None def ebtables(comm): execute = ip_lib.IPWrapper(NAMESPACE).netns.execute - return execute(['ebtables'] + comm, run_as_root=True) + return execute(['ebtables', '--concurrent'] + comm, run_as_root=True)
fix: Clear class_doctypes cache for doctype rename, deletes for all sites
@@ -396,9 +396,18 @@ class DocType(Document): frappe.db.commit() # Do not rename and move files and folders for custom doctype - if not self.custom and not frappe.flags.in_patch: + if not self.custom: + if not frappe.flags.in_patch: self.rename_files_and_folders(old, new) + for site in frappe.utils.get_sites(): + frappe.cache().delete(f"{site}:doctype_classes", old) + + def after_delete(self): + if not self.custom: + for site in frappe.utils.get_sites(): + frappe.cache().delete(f"{site}:doctype_classes", self.name) + def rename_files_and_folders(self, old, new): # move files new_path = get_doc_path(self.module, 'doctype', new)
portico: Auto-detect field to focus for registration page. This replaces the manually-curated logic for which field to focus.
@@ -45,14 +45,13 @@ $(function () { } if ($("#registration").length > 0) { - if ($("#id_team_name").length === 1) { - common.autofocus('#id_team_name'); - } else if ($('#id_email').length === 1 && !$('#id_email').attr('disabled')) { - common.autofocus('#id_email'); - } else if ($("#source_realm_select").length === 1) { - common.autofocus('#source_realm_select'); - } else { - common.autofocus('#id_full_name'); + // Check if there is no input field with errors. + if ($('.help-inline:not(:empty)').length === 0) { + // Find the first input field present in the form that is + // not hidden and disabled and store it in a variable. + var firstInputElement = $("input:not(:hidden, :disabled):first"); + // Focus on the first input field in the form. + common.autofocus(firstInputElement); } // reset error message displays
Set n_estimators of GradientBoostingClassifier it does not change with sklean 0.22
@@ -154,7 +154,7 @@ class TestScikitlearnGradientBoostingClassifier(unittest.TestCase): def setUpClass(cls): np.random.seed(seed=1234) - cls.sklearn_model = GradientBoostingClassifier(n_estimators=10) + cls.sklearn_model = GradientBoostingClassifier(n_estimators=100) cls.classifier = ScikitlearnGradientBoostingClassifier(model=cls.sklearn_model) cls.classifier.fit(x=x_train, y=y_train)
Should this example be corrected? Assuming that matrix is represented like in numpy, the example looks confusing. (Unless I am overlooking something.)
@@ -291,15 +291,15 @@ instances ```python import random def rand2d(rows, cols): - return [[random.choice([+1, -1]) for _ in range(rows)] for _ in range(cols)] + return [[random.choice([+1, -1]) for _ in range(cols)] for _ in range(rows)] def random_instance(length): # transverse field terms h = rand2d(length, length) # links within a row - jr = rand2d(length, length - 1) + jr = rand2d(length - 1, length) # links within a column - jc = rand2d(length - 1, length) + jc = rand2d(length, length - 1) return (h, jr, jc) h, jr, jc = random_instance(3)
Use RABBIT_USER for the rabbit user in the documentation Use RABBIT_USER for the rabbit user in the documentation. Closes-Bug:
@@ -28,7 +28,7 @@ the ``auth_strategy`` field: verbose = True log_dir = /var/log/cloudkitty # oslo_messaging_rabbit is deprecated - transport_url = rabbit://openstack:RABBIT_PASSWORD@RABBIT_HOST + transport_url = rabbit://RABBIT_USER:RABBIT_PASSWORD@RABBIT_HOST auth_strategy = noauth @@ -46,7 +46,7 @@ For keystone (identity) API v2 (deprecated) verbose = True log_dir = /var/log/cloudkitty # oslo_messaging_rabbit is deprecated - transport_url = rabbit://openstack:RABBIT_PASSWORD@RABBIT_HOST/ + transport_url = rabbit://RABBIT_USER:RABBIT_PASSWORD@RABBIT_HOST/ auth_strategy = keystone [auth] @@ -107,7 +107,7 @@ The following shows the basic configuration items: verbose = True log_dir = /var/log/cloudkitty # oslo_messaging_rabbit is deprecated - transport_url = rabbit://openstack:RABBIT_PASSWORD@RABBIT_HOST/ + transport_url = rabbit://RABBIT_USER:RABBIT_PASSWORD@RABBIT_HOST/ auth_strategy = keystone [ks_auth]
Fixes authentication error when using oracle This commit applies for oralce dialect the same query strategy already implemeted for MSSQL in security manager when checking for authentication/permissions, since a query like "SELECT EXISTS(1) FROM DUAL" in oracle databases would raise an missing expression error (observed from version 18c on)
@@ -311,8 +311,8 @@ class SecurityManager(BaseSecurityManager): ) .exists() ) - # Special case for MSSQL (works on PG and MySQL > 8) - if self.appbuilder.get_session.bind.dialect.name == "mssql": + # Special case for MSSQL/Oracle (works on PG and MySQL > 8) + if self.appbuilder.get_session.bind.dialect.name in ("mssql", "oracle"): return self.appbuilder.get_session.query(literal(True)).filter(q).scalar() return self.appbuilder.get_session.query(q).scalar()
Correct output format of iron If nnz of input matrices is zero, the output of iron defaulted to the format "coo" even if explicitly given otherwise (see issue
@@ -330,7 +330,7 @@ def kron(A, B, format=None): if A.nnz == 0 or B.nnz == 0: # kronecker product is the zero matrix - return coo_matrix(output_shape) + return coo_matrix(output_shape).asformat(format) # expand entries of a into blocks row = A.row.repeat(B.nnz)
Issue proposal for dealing with repeats if allow_repeats = False after a period where it is True then use the highest status associated with that worker as the exclusion criteria.
@@ -186,6 +186,8 @@ def check_worker_status(): else: worker_id = request.args['workerId'] assignment_id = request.args['assignmentId'] + allow_repeats = CONFIG.getboolean('HIT Configuration', 'allow_repeats') + if allow_repeats: # if you allow repeats focus on current worker/assignment combo try: part = Participant.query.\ filter(Participant.workerid == worker_id).\ @@ -193,6 +195,17 @@ def check_worker_status(): status = part.status except exc.SQLAlchemyError: status = NOT_ACCEPTED + else: # if you disallow repeats search for highest status of anything by this worker + try: + matches = Participant.query.\ + filter(Participant.workerid == worker_id).all() + numrecs = len(matches) + if numrecs==0: # this should be caught by exception, but just to be safe + status = NOT_ACCEPTED + else: + status = max([record.status for record in matches]) + except exc.SQLAlchemyError: + status = NOT_ACCEPTED resp = {"status" : status} return jsonify(**resp)
Correctly encode user supplied name in mail From header This broke initially because someone put their address (containing commas) as their name.
+from email.utils import formataddr + from django.conf import settings from django.core.mail import get_connection, EmailMultiAlternatives from django.template.loader import get_template @@ -19,7 +21,7 @@ def send_feedback_mail(user_name, user_email_addr, subject, message, url): email = EmailMultiAlternatives( subject='OpenPrescribing Feedback: {}'.format(subject), body=body, - from_email='{} <{}>'.format(user_name, settings.SUPPORT_FROM_EMAIL), + from_email=formataddr((user_name, settings.SUPPORT_FROM_EMAIL)), to=[settings.SUPPORT_TO_EMAIL], reply_to=[user_email_addr], headers={'X-Mailgun-Track': 'no'},
Update config.yml update content versions
@@ -5,7 +5,7 @@ jobs: - image: devdemisto/content-build:3.0.0.3368 # disable-secrets-detection resource_class: medium+ environment: - CONTENT_VERSION: "19.11.0" + CONTENT_VERSION: "19.11.1" SERVER_VERSION: "5.0.0" GIT_SHA1: "93fab15da3ae0b427e63510836018f8efd6b7b5e" # guardrails-disable-line disable-secrets-detection steps:
only load unloadable sensors on sensor page ### How I Tested These Changes load the sensor page with unloadable schedules - see they are no longer fetched and filtered client side
@@ -10,7 +10,6 @@ import {UnloadableSensors} from '../instigation/Unloadable'; import {SENSOR_FRAGMENT} from '../sensors/SensorFragment'; import {SensorInfo} from '../sensors/SensorInfo'; import {SensorsTable} from '../sensors/SensorsTable'; -import {InstigationType} from '../types/globalTypes'; import {Loading} from '../ui/Loading'; import {REPOSITORY_INFO_FRAGMENT} from '../workspace/RepositoryInformation'; import {buildRepoPath, buildRepoAddress} from '../workspace/buildRepoAddress'; @@ -85,11 +84,8 @@ const AllSensors: React.FC<{data: InstanceSensorsQuery}> = ({data}) => { </> ) : null; - const unloadableSensors = unloadable.filter( - (state) => state.instigationType === InstigationType.SENSOR, - ); - const unloadableSensorsSection = unloadableSensors.length ? ( - <UnloadableSensors sensorStates={unloadableSensors} /> + const unloadableSensorsSection = unloadable.length ? ( + <UnloadableSensors sensorStates={unloadable} /> ) : null; if (!sensorDefinitionsSection && !unloadableSensorsSection) { @@ -144,7 +140,7 @@ const INSTANCE_SENSORS_QUERY = gql` } ...PythonErrorFragment } - unloadableInstigationStatesOrError { + unloadableInstigationStatesOrError(instigationType: SENSOR) { ... on InstigationStates { results { id
Add test to make sure on_node_updated task is called properly Test is called on title change with correct kwargs Test is only called once when both the title and contributors are changed
@@ -3383,18 +3383,35 @@ class TestOnNodeUpdate: def teardown_method(self, method): handlers.celery_before_request() - @mock.patch('osf.models.node.enqueue_task') - def test_enqueue_called(self, enqueue_task, node, user, request_context): + def test_on_node_updated_called(self, node, user, request_context): node.title = 'A new title' node.save() - (task, ) = enqueue_task.call_args[0] + task = handlers.get_task_from_queue('website.project.tasks.on_node_updated', predicate=lambda task: task.kwargs['node_id'] == node._id) assert task.task == 'website.project.tasks.on_node_updated' - assert task.args[0] == node._id - assert task.args[1] == user._id - assert task.args[2] is False - assert 'title' in task.args[3] + assert task.kwargs['node_id'] == node._id + assert task.kwargs['user_id'] == user._id + assert task.kwargs['first_save'] is False + assert 'title' in task.kwargs['saved_fields'] + + @mock.patch('osf.models.identifiers.IdentifierMixin.request_identifier_update') + def test_queueing_on_node_updated(self, mock_request_update, node, user): + node.set_identifier_value(category='doi', value=settings.DOI_FORMAT.format(prefix=settings.DATACITE_PREFIX, guid=node._id)) + node.title = 'Something New' + node.save() + + # make sure on_node_updated is in the queue + assert handlers.get_task_from_queue('website.project.tasks.on_node_updated', predicate=lambda task: task.kwargs['node_id'] == node._id) + + # adding a contributor to the node will also trigger on_node_updated + new_person = UserFactory() + node.add_contributor(new_person) + + # Make sure there's just one on_node_updated task, and that is has contributors in the kwargs + task = handlers.get_task_from_queue('website.project.tasks.on_node_updated', predicate=lambda task: task.kwargs['node_id'] == node._id) + assert 'contributors' in task.kwargs['saved_fields'] + mock_request_update.assert_called_once() @mock.patch('website.project.tasks.settings.SHARE_URL', 'https://share.osf.io') @mock.patch('website.project.tasks.settings.SHARE_API_TOKEN', 'Token')
Update installation_and_setup.rst dont know if (solph) is correct. Only tried with cbc
@@ -231,8 +231,9 @@ You can choose from the list of examples * storage_investment (solph) * simple_dispatch (solph) * csv_reader_investment (solph) - * flexible_modelling (solph) * csv_reader_dispatch (solph) + * add_constraints (solph) + * variable_chp (solph) Test the installation and the installed solver:
Fixed attribute decoding for elements with simpleType - Validate xml and xsi admitted attributes (TODO: tests for this)
@@ -346,8 +346,19 @@ class XsdElement(Sequence, XsdAnnotated, ValidatorMixin, ParticleMixin, XPathMix yield element_decode_hook(ElementData(elem.tag, *result), self) del result else: - if elem.attrib: - yield self._validation_error("a simpleType element can't has attributes.", validation, elem) + # simpleType + if not elem.attrib: + attributes = None + else: + # Decode with an empty XsdAttributeGroup validator (only XML and XSD default attrs) + for result in self.attributes.iter_decode(elem.attrib, validation, **kwargs): + if isinstance(result, XMLSchemaValidationError): + yield self._validation_error(result, validation, elem) + else: + attributes = result + break + else: + attributes = None if len(elem): yield self._validation_error("a simpleType element can't has child elements.", validation, elem) @@ -360,7 +371,7 @@ class XsdElement(Sequence, XsdAnnotated, ValidatorMixin, ParticleMixin, XPathMix if isinstance(result, XMLSchemaValidationError): yield self._validation_error(result, validation, elem) else: - yield element_decode_hook(ElementData(elem.tag, result, None, None), self) + yield element_decode_hook(ElementData(elem.tag, result, None, attributes), self) del result if validation != 'skip':
updates installation link closes
@@ -45,7 +45,7 @@ IceVision is the first agnostic computer vision framework to offer a curated col pip install icevision[all] ``` -For more installation options, check our [docs](https://airctic.github.io/icevision/install/). +For more installation options, check our [docs](https://airctic.com/0.7.0/install/). **Important:** We currently only support Linux/MacOS. <!-- Not included in docs - end -->
Update environmental_inhalers.json I have added codes for generic respimat
" WHERE form_route IN ('pressurizedinhalation.inhalation', 'powderinhalation.inhalation')", " AND bnf_code LIKE '03%' ", " AND bnf_code NOT LIKE '0301011R0%' ", + " AND bnf_name NOT LIKE '%Respimat%' ", + " AND bnf_code NOT LIKE '0301040X0AA%' ", + " AND bnf_code NOT LIKE '0301011Z0AA%' ", + " AND bnf_code NOT LIKE '0301020Q0AAACAC' ", + " AND bnf_code NOT LIKE '0301020Q0AAAEAE' ", ")" ], "date_reviewed": [ - "2019-10-09" + "2019-11-14" ], "next_review": [ "2020-10-09"
tests: handle `-` in the sfdisk version test When a `-` is in the version (meaning a version such as: `2.38-rc1`), take only the part before the dash. This closes
@@ -33,6 +33,10 @@ def have_sfdisk_with_json(): data = r.stdout.strip() vstr = data.split(" ")[-1] + + if "-" in vstr: + vstr = vstr.split("-")[0] + ver = list(map(int, vstr.split("."))) return ver[0] >= 2 and ver[1] >= 27
Use domain with tracker store factory Make sure domain is passed to tracker store factory for proper validation (closes )
@@ -168,7 +168,9 @@ def _run_markers( telemetry.track_markers_parsed_count(num_markers, max_depth, branching_factor) - tracker_loader = _create_tracker_loader(endpoint_config, strategy, count, seed) + tracker_loader = _create_tracker_loader( + endpoint_config, strategy, domain, count, seed + ) def _append_suffix(path: Optional[Path], suffix: Text) -> Optional[Path]: return path.parent / (path.name + suffix) if path else None @@ -185,7 +187,11 @@ def _run_markers( def _create_tracker_loader( - endpoint_config: Text, strategy: Text, count: Optional[int], seed: Optional[int] + endpoint_config: Text, + strategy: Text, + domain: Domain, + count: Optional[int], + seed: Optional[int], ) -> MarkerTrackerLoader: """Create a tracker loader against the configured tracker store. @@ -193,6 +199,7 @@ def _create_tracker_loader( endpoint_config: Path to the endpoint configuration defining the tracker store to use. strategy: Strategy to use when selecting trackers to extract from. + domain: The domain to use when connecting to the tracker store. count: (Optional) Number of trackers to extract from (for any strategy except 'all'). seed: (Optional) The seed to initialise the random number generator for @@ -203,5 +210,5 @@ def _create_tracker_loader( the configured tracker store. """ endpoints = AvailableEndpoints.read_endpoints(endpoint_config) - tracker_store = TrackerStore.create(endpoints.tracker_store) + tracker_store = TrackerStore.create(endpoints.tracker_store, domain=domain) return MarkerTrackerLoader(tracker_store, strategy, count, seed,)
Uncast data before writing out Copied from Reverts the str() cast introduced in
@@ -22,8 +22,9 @@ import re import sys from io import open import logging +from copy import deepcopy -from .casting import cast_data +from .casting import cast_data, uncast_data __all__ = [ "load", "loads", "dump", "dumps", # TODO Add GlyphsEncoder / GlyphsDecoder ala json module @@ -218,7 +219,8 @@ class Writer(object): out.write(' ' * self.curindent) out.write(')') - _escape_re = re.compile('([^\u0020-\u007e])|"') + # escape DEL and controls except for TAB + _escape_re = re.compile('([^\u0020-\u007e\u0009])|"') @staticmethod def _escape_fn(m): @@ -230,7 +232,7 @@ class Writer(object): return r'\"' def _write_atom(self, data): - data = Writer._escape_re.sub(self._escape_fn, str(data)) + data = Writer._escape_re.sub(self._escape_fn, data) out = self.out if Writer._sym_re.match(data): out.write(data) @@ -261,6 +263,10 @@ def loads(s): def dump(obj, fp, **kwargs): """Write object tree to a .glyphs file. 'fp' should be a (writable) file object. """ + logger.info('Making copy of values') + obj = deepcopy(obj) + logger.info('Uncasting values') + uncast_data(obj) w = Writer(out=fp, **kwargs) logger.info('Writing .glyphs file') w.write(obj)
Update malicious_js.txt Added description for specific trails.
@@ -10,6 +10,9 @@ ejyoklygase.tk examhome.net mp3menu.org uustoughtonma.org + +# Generic detection for compromised Bitrix CMS + /lib/crypta.js /bitrix/js/main/core/core_loader.js /bitrix/js/main/core/core_tasker.js
Fix workspace property on WorkspaceProcessContext Summary: This property was returning the wrong value. It not used anywhere, so this diff just removes it. Test Plan: unit Reviewers: dgibson
@@ -197,10 +197,6 @@ def create_request_context(self): def instance(self): return self._instance - @property - def workspace(self): - return self._instance - @property def repository_locations(self): return list(self._repository_locations.values())
Add timing outputs for decoding phase to know how much time is spent in decoder fetching and post processing.
@@ -1045,6 +1045,7 @@ class Decoder(base_runner.BaseRunner): start_time = time.time() while num_examples_metric.total_value < samples_per_summary: tf.logging.info('Fetching dec_output.') + fetch_start = time.time() run_options = config_pb2.RunOptions( report_tensor_allocations_upon_oom=False) if self._summary_op is None: @@ -1054,12 +1055,17 @@ class Decoder(base_runner.BaseRunner): dec_out, summary = sess.run([self._dec_output, self._summary_op], options=run_options) self._summary_writer.add_summary(summary, global_step) - tf.logging.info('Done fetching.') + post_process_start = time.time() + tf.logging.info( + 'Done fetching (%f seconds)' % (post_process_start - fetch_start)) decode_out = self._model_task.PostProcessDecodeOut(dec_out, dec_metrics) if decode_out: buffered_decode_out.extend(decode_out) - tf.logging.info('Total examples done: %d/%d', - num_examples_metric.total_value, samples_per_summary) + tf.logging.info( + 'Total examples done: %d/%d ' + '(%f seconds decode postprocess)', num_examples_metric.total_value, + samples_per_summary, + time.time() - post_process_start) summaries = {k: v.Summary(k) for k, v in six.iteritems(dec_metrics)} elapsed_secs = time.time() - start_time
Incorporated dwmcqueen fix for dvd calls this may not be useful. But incorporating anyway. I think maybe modifications to the ui omdb search would be good.
@@ -243,6 +243,9 @@ def get_video_details(job): logging.debug("Trying title: " + title) response = callwebservice(job, omdb_api_key, title, year) logging.debug("response: " + response) + if response == "fail": + logging.debug("Removing year...") + response = callwebservice(job, omdb_api_key, title, "") def callwebservice(job, omdb_api_key, dvd_title, year=""):
Ignore empty material slot for displacement The Displacement shader slot parser was trying to work in case of empty material slot, causing exception. Changes made: added forgotten check for empty material slot; added explicit displacement removal for correct viewport update.
@@ -165,9 +165,11 @@ def assign_materials(rpr_context: RPRContext, rpr_shape: pyrpr.Shape, rpr_shape.set_material(None) # sync displacement for single material shape only - if len(material_slots) == 1: + if len(material_slots) == 1 and material_slots[0].material: rpr_displacement = material.sync(rpr_context, material_slots[0].material, 'Displacement') rpr_shape.set_displacement_material(rpr_displacement) + else: + rpr_shape.set_displacement_material(None) return True