message
stringlengths
13
484
diff
stringlengths
38
4.63k
Fix for tweak SConscript() docstrings a little more Also handle_missing_SConscript(), internal interface added by this patch series.
@@ -156,8 +156,16 @@ stack_bottom = '% Stack boTTom %' # hard to define a variable w/this name :) def handle_missing_SConscript(f, must_exist=None): """Take appropriate action on missing file in SConscript() call. - The action may be to raise an exception, or print a warning. - On first warning, also print a deprecation warning. + Print a warning or raise an exception on missing file. + On first warning, print a deprecation message. + + Args: + f (str): path of missing configuration file + must_exist (bool): raise exception if file does not exist + + Raises: + UserError if 'must_exist' is True or if global + SCons.Script._no_missing_sconscript is True. """ if must_exist or (SCons.Script._no_missing_sconscript and must_exist is not False): @@ -550,22 +558,22 @@ class SConsEnvironment(SCons.Environment.Base): Keyword arguments: dirs (list): execute SConscript in each listed directory. - name (str): execute script 'name' (used with 'dirs'). - exports (list or dict): locally export variables the script(s) - can import. - variant_dir (str): mirror sources needed for build to variant_dir - to allow building there. - duplicate (bool): pysically duplicate sources instead of just + name (str): execute script 'name' (used only with 'dirs'). + exports (list or dict): locally export variables the + called script(s) can import. + variant_dir (str): mirror sources needed for the build in + a variant directory to allow building in it. + duplicate (bool): physically duplicate sources instead of just adjusting paths of derived files (used only with 'variant_dir') (default is True). must_exist (bool): fail if a requested script is missing (default is False, default is deprecated). Returns: - variables returned by the called script + list of variables returned by the called script Raises: - UserError if a script is not found and such exceptions are enabled. + UserError: a script is not found and such exceptions are enabled. """ if 'build_dir' in kw:
Putting proto numbers back as they were [skip CI]
@@ -40,7 +40,7 @@ service Requests { } message CreateHostRequestReq { - int64 host_id =5; + int64 host_id =1; // dates as "yyyy-mm-dd", in the timezone of the host string from_date = 2; string to_date = 3; @@ -49,8 +49,8 @@ message CreateHostRequestReq { message HostRequest { int64 host_request_id = 1; - int64 surfer_id = 10; - int64 host_id = 11; + int64 surfer_id = 2; + int64 host_id = 3; org.couchers.api.conversations.HostRequestStatus status = 4; google.protobuf.Timestamp created = 5;
Remove raw data with invalid committee_id values. Fixes more of
@@ -103,6 +103,14 @@ c.execute("DELETE FROM raw_table WHERE LENGTH(date_recieved) < 10") # set empty, non-zero, strings in date columns to null c.execute("UPDATE raw_table SET report_period_begin = NULL WHERE LENGTH(report_period_begin) < 10") c.execute("UPDATE raw_table SET report_period_end = NULL WHERE LENGTH(report_period_end) < 10") + +#committee ID is requred. Remove the 2 rows that don't have it. +c.execute("DELETE FROM raw_table WHERE committee_id=''"); + +# There's a record with a date stuck in the committee_id column, which causes +# problems when inserting into the contributions table below. Get rid of it this +# way. +c.execute("DELETE FROM raw_table WHERE LENGTH( committee_id ) > 9") conn.commit()
Move pip fix to top of test-make-requirements.sh Previously I had put it below `make requirements` which defeats the purpose.
#!/usr/bin/env bash set -e -make requirements -git --no-pager diff -git update-index -q --refresh # 19.3.1 causes locally reproduceable test failure # todo: remove this line once that's no longer a problem pip install 'pip<19.3.0' + +make requirements +git --no-pager diff +git update-index -q --refresh if git diff-index --quiet HEAD --; then # No changes echo "requirements ok"
Update Aparecida de Goiania spider Update spider code to follow latest best practices: Set a default `start_date` value Avoid the usage of `dateparser` when built-in `datetime` functions accomplish the same Remove not needed log
-import json - -from dateparser import parse +import datetime from gazette.items import Gazette from gazette.spiders.base import BaseGazetteSpider @@ -11,26 +9,22 @@ class GoAparecidaDeGoianiaSpider(BaseGazetteSpider): name = "go_aparecida_de_goiania" allowed_domains = ["aparecida.go.gov.br"] start_urls = ["https://webio.aparecida.go.gov.br/api/diof/lista"] + start_date = datetime.date(2014, 8, 1) def parse(self, response): download_url = "https://webio.aparecida.go.gov.br/diariooficial/download/{}" - records = json.loads(response.text)["records"] + records = response.json()["records"] for record in records: - url = download_url.format(record["numero"]) - power = "executive_legislative" - date = parse(record["publicado"], languages=["en"]).date() + gazette_url = download_url.format(record["numero"]) + gazette_date = datetime.datetime.strptime( + record["publicado"], "%Y-%m-%d %H:%M:%S" + ).date() - self.logger.info( - "Start Date: %s End Date: %s Date: %s", - self.start_date.strftime("%d/%m/%Y"), - self.end_date.strftime("%d/%m/%Y"), - date.strftime("%d/%m/%Y"), - ) - if date >= self.start_date and date <= self.end_date: + if gazette_date >= self.start_date and gazette_date <= self.end_date: yield Gazette( - date=date, - file_urls=[url], + date=gazette_date, + file_urls=[gazette_url], is_extra_edition=False, - power=power, + power="executive", )
allow compile_go to execute on windows Does some contortions to allow executing go build while also not blocking the caldera server's event loop.
-import asyncio.subprocess +import asyncio import base64 import copy import os +import subprocess from aiohttp import web from cryptography.fernet import Fernet @@ -159,7 +160,7 @@ class FileSvc(BaseService): return buf async def compile_go(self, platform, output, src_fle, arch='amd64', ldflags='-s -w', cflags='', buildmode='', - build_dir='.'): + build_dir='.', loop=None): """ Dynamically compile a go file @@ -189,15 +190,11 @@ class FileSvc(BaseService): args.extend(['-o', output, src_fle]) + loop = loop if loop else asyncio.get_event_loop() try: - process = await asyncio.subprocess.create_subprocess_exec(*args, cwd=build_dir, env=env, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE) - command_output = await process.communicate() - if process.returncode != 0: - self.log.warning('Problem building golang executable {}: {}'.format(src_fle, command_output)) - except NotImplementedError: - self.log.warning("You are running this on Windows. Compiling GO currently doesn't work on Windows.") + await loop.run_in_executor(None, lambda: subprocess.check_output(args, cwd=build_dir, env=env)) + except subprocess.CalledProcessError as e: + self.log.warning('Problem building golang executable {}: {} '.format(src_fle, e)) def get_payload_name_from_uuid(self, payload): for t in ['standard_payloads', 'special_payloads']:
Add 'indent' option to CLI commands xml2json/json2xml - Option 'converter' is now case-insensitive
@@ -24,12 +24,12 @@ from xmlschema.etree import etree_tostring PROGRAM_NAME = os.path.basename(sys.argv[0]) CONVERTERS_MAP = { - 'Unordered': xmlschema.UnorderedConverter, - 'Parker': xmlschema.ParkerConverter, - 'BadgerFish': xmlschema.BadgerFishConverter, - 'Abdera': xmlschema.AbderaConverter, - 'JsonML': xmlschema.JsonMLConverter, - 'Columnar': xmlschema.ColumnarConverter, + 'unordered': xmlschema.UnorderedConverter, + 'parker': xmlschema.ParkerConverter, + 'badgerfish': xmlschema.BadgerFishConverter, + 'abdera': xmlschema.AbderaConverter, + 'jsonml': xmlschema.JsonMLConverter, + 'columnar': xmlschema.ColumnarConverter, } @@ -57,11 +57,11 @@ def get_loglevel(verbosity): def get_converter(name): - if name is None: - return + if not isinstance(name, str): + return None try: - return CONVERTERS_MAP[name] + return CONVERTERS_MAP[name.lower()] except KeyError: raise ValueError("--converter must be in {!r}".format(tuple(CONVERTERS_MAP))) @@ -84,6 +84,9 @@ def xml2json(): help="use a different XML to JSON convention instead of " "the default converter. Option value can be one of " "{!r}.".format(tuple(CONVERTERS_MAP))) + parser.add_argument('--indent', type=int, default=None, + help="indentation for a pretty-printed JSON output " + "(default is the most compact representation)") parser.add_argument('--lazy', action='store_true', default=False, help="use lazy decoding mode (slower but use less memory).") parser.add_argument('--defuse', metavar='(always, remote, never)', @@ -106,6 +109,10 @@ def xml2json(): else: schema = None + json_options = {} + if args.indent is not None and args.indent >= 0: + json_options['indent'] = args.indent + base_path = pathlib.Path(args.output) if not base_path.exists(): base_path.mkdir() @@ -130,6 +137,7 @@ def xml2json(): lazy=args.lazy, defuse=args.defuse, validation='lax', + json_options=json_options, ) except (xmlschema.XMLSchemaException, URLError) as err: tot_errors += 1 @@ -165,6 +173,8 @@ def json2xml(): help="use a different XML to JSON convention instead of " "the default converter. Option value can be one of " "{!r}.".format(tuple(CONVERTERS_MAP))) + parser.add_argument('--indent', type=int, default=4, + help="indentation for XML output (default is 4 spaces)") parser.add_argument('-o', '--output', type=str, default='.', help="where to write the encoded XML files, current dir by default.") parser.add_argument('-f', '--force', action="store_true", default=False, @@ -199,6 +209,7 @@ def json2xml(): schema=schema, converter=converter, validation='lax', + indent=args.indent, ) except (xmlschema.XMLSchemaException, URLError) as err: tot_errors += 1
Fix local `clusterfuzz` server error Fix by upgrading `openjdk-8-jdk` to `openjdk-11-jdk`.
@@ -111,7 +111,7 @@ sudo apt-get update sudo apt-get install -y \ docker-ce \ google-cloud-sdk \ - openjdk-8-jdk \ + openjdk-11-jdk \ liblzma-dev # Install patchelf - latest version not available on some older distros so we
Update of readme_template.md Please note that this patch explitly set which `pip` version to use according to the user Python version.
@@ -43,13 +43,15 @@ To run unit tests, in the top level directory, just run: python testUpdateHostsFile.py -**Note** if you are using Python 2, please install the dependencies with: +**Note** if you are using Python 3, please install the dependencies with: - pip install -r requirements_python2.txt + pip3 install --user -r requirements.txt -**Note** if you are using Python 3, please install the dependencies with: +**Note** if you are using Python 2, please install the dependencies with: + + pip2 install --user -r requirements_python2.txt - pip install -r requirements.txt +**Note** we recommend the `--user` flag which installs the required dependencies at the user level. More information about it can be found on pip [documentation](https://pip.pypa.io/en/stable/reference/pip_install/?highlight=--user#cmdoption-user). The `updateHostsFile.py` script, which is Python 2.7 and Python 3-compatible, will generate a unified hosts file based on the sources in the local `data/`
Add logging filter for AmpConnectionRetry exception We do not need to log exception info about AmpConnectionRetry as this is expected exception, which enables retry process for AmphoraComputeConnectivityWait task. Story:
@@ -21,6 +21,7 @@ from sqlalchemy.orm import exc as db_exceptions from taskflow.listeners import logging as tf_logging import tenacity +from octavia.amphorae.driver_exceptions import exceptions from octavia.api.drivers import utils as provider_utils from octavia.common import base_taskflow from octavia.common import constants @@ -44,6 +45,17 @@ RETRY_BACKOFF = 1 RETRY_MAX = 5 +# We do not need to log retry exception information. Warning "Could not connect +# to instance" will be logged as usual. +def retryMaskFilter(record): + if record.exc_info is not None and isinstance( + record.exc_info[1], exceptions.AmpConnectionRetry): + return False + return True + +LOG.logger.addFilter(retryMaskFilter) + + def _is_provisioning_status_pending_update(lb_obj): return not lb_obj.provisioning_status == constants.PENDING_UPDATE
Adds specification of gauge group to make_qutrit_gateset. Previously, make_qutrit_gateset would create a GateSet of fully parameterized gates without specifying a gauge group. Now this function sets a default full-gauge-group as it should.
@@ -151,5 +151,6 @@ def make_qutrit_gateset(errorScale, Xangle = _np.pi/2, Yangle = _np.pi/2, qutritGS['Gy'] = _objs.FullyParameterizedGate(arrType(gateYSOfinal)) qutritGS['Gm'] = _objs.FullyParameterizedGate(arrType(gateMSOfinal)) qutritGS.set_basis(basis,3) + qutritGS.default_gauge_group = _objs.gaugegroup.FullGaugeGroup(qutritGS.dim) return qutritGS
move git-diff and log commands to subshell this ensures the redirection is made to the correct file closes
@@ -130,10 +130,13 @@ def main() -> None: # it does not matter for git. repodir = os.path.dirname(os.path.realpath(__file__)) - os.system("cd {}/..; git log -1 --format=%H > {}" + # we need to execute the git log command in subshell, because if + # the log file is specified via relative path, we need to do the + # redirection of the git-log output to the right file + os.system("(cd {}; git log -1 --format=%H) > {}" .format(repodir, git_commit_file)) - os.system("cd {}/..; git --no-pager diff --color=always > {}" + os.system("(cd {}; git --no-pager diff --color=always) > {}" .format(repodir, git_diff_file)) link_best_vars = "{}.best".format(variables_file_prefix)
fix: renamed_argument decorator error Also, I removed hidden mutation of input in _handling function
@@ -102,12 +102,16 @@ def renamed_argument(old_name: str, new_name: str, until_version: str, stackleve is_coroutine = asyncio.iscoroutinefunction(func) def _handling(kwargs): + """ + Returns updated version of kwargs. + """ routine_type = 'coroutine' if is_coroutine else 'function' if old_name in kwargs: warn_deprecated(f"In {routine_type} '{func.__name__}' argument '{old_name}' " f"is renamed to '{new_name}' " f"and will be removed in aiogram {until_version}", stacklevel=stacklevel) + kwargs = kwargs.copy() kwargs.update({new_name: kwargs.pop(old_name)}) return kwargs
Create checksum files with CI archives Create a sha256 file for use with the sha256sum tool to verify the integrity of artifacts created by the CI jobs. Make these files available in both the Jenkins UI alongside the the tarballs, and also upload them over SSH (where applicable).
@@ -362,10 +362,14 @@ parameters = [ 'builder_shell', script='\n'.join([ 'echo "# BEGIN SECTION: Compress install space"', - 'tar -cjf $WORKSPACE/ros%d-%s-linux-%s-%s-ci.tar.bz2 ' % (ros_version, rosdistro_name, os_code_name, arch) + - ' -C $WORKSPACE/ws' + + 'cd $WORKSPACE', + 'tar -cjf ros%d-%s-linux-%s-%s-ci.tar.bz2' % (ros_version, rosdistro_name, os_code_name, arch) + + ' -C ws' + ' --transform "s/^install_isolated/ros%d-linux/"' % (ros_version) + ' install_isolated', + 'sha256sum -b ros%d-%s-linux-%s-%s-ci.tar.bz2' % (ros_version, rosdistro_name, os_code_name, arch) + + ' > ros%d-%s-linux-%s-%s-ci-CHECKSUM' % (ros_version, rosdistro_name, os_code_name, arch), + 'cd -', 'echo "# END SECTION"', ]), ))@ @@ -457,6 +461,7 @@ parameters = [ 'archive_artifacts', artifacts=[ 'ros%d-%s-linux-%s-%s-ci.tar.bz2' % (ros_version, rosdistro_name, os_code_name, arch), + 'ros%d-%s-linux-%s-%s-ci-CHECKSUM' % (ros_version, rosdistro_name, os_code_name, arch), ] + archive_files + [ image for images in show_images.values() for image in images ], @@ -468,6 +473,7 @@ parameters = [ remote_directory=upload_directory, source_files=[ 'ros%d-%s-linux-%s-%s-ci.tar.bz2' % (ros_version, rosdistro_name, os_code_name, arch), + 'ros%d-%s-linux-%s-%s-ci-CHECKSUM' % (ros_version, rosdistro_name, os_code_name, arch), ], remove_prefix=None, ))@
use mimetype to see if it's a GIF thanks to good idea
@@ -938,7 +938,7 @@ class Client(object): return self._doSendRequest(data) - def _uploadImage(self, image_path, data, mimetype, is_gif=False): + def _uploadImage(self, image_path, data, mimetype): """Upload an image and get the image_id for sending in a message""" j = self._postFile(self.req_url.UPLOAD, { @@ -949,7 +949,7 @@ class Client(object): ) }, fix_request=True, as_json=True) # Return the image_id - if not is_gif: + if not mimetype == 'image/gif': return j['payload']['metadata'][0]['image_id'] else: return j['payload']['metadata'][0]['gif_id'] @@ -983,7 +983,7 @@ class Client(object): return self._doSendRequest(data) - def sendRemoteImage(self, image_url, message=None, thread_id=None, thread_type=ThreadType.USER, is_gif=False): + def sendRemoteImage(self, image_url, message=None, thread_id=None, thread_type=ThreadType.USER): """ Sends an image from a URL to a thread @@ -991,18 +991,18 @@ class Client(object): :param message: Additional message :param thread_id: User/Group ID to send to. See :ref:`intro_threads` :param thread_type: See :ref:`intro_threads` - :param is_gif: if sending GIF, True, else False :type thread_type: models.ThreadType :return: :ref:`Message ID <intro_message_ids>` of the sent image :raises: FBchatException if request failed """ thread_id, thread_type = self._getThread(thread_id, thread_type) mimetype = guess_type(image_url)[0] + is_gif = (mimetype == 'image/gif') remote_image = requests.get(image_url).content - image_id = self._uploadImage(image_url, remote_image, mimetype, is_gif) + image_id = self._uploadImage(image_url, remote_image, mimetype) return self.sendImage(image_id=image_id, message=message, thread_id=thread_id, thread_type=thread_type, is_gif=is_gif) - def sendLocalImage(self, image_path, message=None, thread_id=None, thread_type=ThreadType.USER, is_gif=False): + def sendLocalImage(self, image_path, message=None, thread_id=None, thread_type=ThreadType.USER): """ Sends a local image to a thread @@ -1010,14 +1010,14 @@ class Client(object): :param message: Additional message :param thread_id: User/Group ID to send to. See :ref:`intro_threads` :param thread_type: See :ref:`intro_threads` - :param is_gif: if sending GIF, True, else False :type thread_type: models.ThreadType :return: :ref:`Message ID <intro_message_ids>` of the sent image :raises: FBchatException if request failed """ thread_id, thread_type = self._getThread(thread_id, thread_type) mimetype = guess_type(image_path)[0] - image_id = self._uploadImage(image_path, open(image_path, 'rb'), mimetype, is_gif) + is_gif = (mimetype == 'image/gif') + image_id = self._uploadImage(image_path, open(image_path, 'rb'), mimetype) return self.sendImage(image_id=image_id, message=message, thread_id=thread_id, thread_type=thread_type, is_gif=is_gif) def addUsersToGroup(self, user_ids, thread_id=None):
website: fixed "Check your credentials" after login the reactContext json content of userdata the tag 'altText' contained a tab char
@@ -164,6 +164,7 @@ def extract_json(content, name): json_str = json_str.replace('\"', '\\"') # Escape double-quotes json_str = json_str.replace('\\s', '\\\\s') # Escape \s json_str = json_str.replace('\\n', '\\\\n') # Escape line feed + json_str = json_str.replace('\\t', '\\\\t') # Escape tab json_str = json_str.decode('unicode_escape') # finally decoding... return json.loads(json_str) except Exception:
Fix handling of default values of hypervisor. Avoid showing the deprecation warning for libvirt:hypervisor property if the user provided a value to the hypervisor parameter in virt.init.
@@ -1368,7 +1368,7 @@ def init(name, caps = capabilities(**kwargs) os_types = sorted({guest['os_type'] for guest in caps['guests']}) arches = sorted({guest['arch']['name'] for guest in caps['guests']}) - hypervisors = sorted({x for y in [guest['arch']['domains'].keys() for guest in caps['guests']] for x in y}) + if not hypervisor: hypervisor = __salt__['config.get']('libvirt:hypervisor', hypervisor) if hypervisor is not None: salt.utils.versions.warn_until( @@ -1381,12 +1381,13 @@ def init(name, else: # Use the machine types as possible values # Prefer 'kvm' over the others if available + hypervisors = sorted({x for y in [guest['arch']['domains'].keys() for guest in caps['guests']] for x in y}) hypervisor = 'kvm' if 'kvm' in hypervisors else hypervisors[0] # esxi used to be a possible value for the hypervisor: map it to vmware since it's the same hypervisor = 'vmware' if hypervisor == 'esxi' else hypervisor - log.debug('Using hyperisor %s', hypervisor) + log.debug('Using hypervisor %s', hypervisor) # the NICs are computed as follows: # 1 - get the default NICs from the profile
Updating my Bhagavad Gita API `url` : new domain `auth` : apiKey
@@ -124,6 +124,7 @@ API | Description | Auth | HTTPS | CORS | ### Books API | Description | Auth | HTTPS | CORS | |---|---|---|---|---| +| [Bhagavad Gita](https://docs.bhagavadgitaapi.in) | Open Source Shrimad Bhagavad Gita API including 21+ authors translation in Sanskrit/English/Hindi | `apiKey` | Yes | Yes | | [Bhagavad Gita](https://bhagavadgita.io/api) | Bhagavad Gita text | `OAuth` | Yes | Yes | | [Bible](https://bibleapi.co/) | RESTful Bible API with 7 versions, 4 languages and multiple features | `apiKey` | Yes | Unknown | | [British National Bibliography](http://bnb.data.bl.uk/) | Books | No | No | Unknown | @@ -135,7 +136,6 @@ API | Description | Auth | HTTPS | CORS | | [Penguin Publishing](http://www.penguinrandomhouse.biz/webservices/rest/) | Books, book covers and related data | No | Yes | Yes | | [Quran](https://quran.api-docs.io/) | RESTful Quran API with multiple languages | No | Yes | Yes | | [Rig Veda](https://aninditabasu.github.io/indica/html/rv.html) | Gods and poets, their categories, and the verse meters, with the mandal and sukta number | No | Yes | Unknown | -| [Shrimad Bhagavad Gita](https://vedicscriptures.github.io/) | Open Source Shrimad Bhagavad Gita API including 21+ authors translation in Sanskrit/English/Hindi | No | Yes | Yes | | [Thirukkural](https://api-thirukkural.web.app/) | 1330 Thirukkural poems and explanation in Tamil and English | No | Yes | Yes | | [Vedic Society](https://aninditabasu.github.io/indica/html/vs.html) | Descriptions of all nouns (names, places, animals, things) from vedic literature | No | Yes | Unknown |
Update main.py documenatation
@@ -45,12 +45,12 @@ def multi_criteria_main(locator, config): generation = config.multi_criteria.generations category = "optimization-detailed" + # TODO: this part is redundant for DH, check if that is true for DC # get path to data of the generation specified # if not os.path.exists(locator.get_address_of_individuals_of_a_generation(generation)): # data_address = locating_individuals_in_generation_script(generation, locator) # else: # data_address = pd.read_csv(locator.get_address_of_individuals_of_a_generation(generation)) - data_address = create_data_address_file(locator, generation) # initialize class @@ -80,14 +80,14 @@ def multi_criteria_main(locator, config): compiled_data_df.loc[i][name] = data_processed[name][0] compiled_data_df = compiled_data_df.assign(individual=individual_list) - ## normalize data + # normalize data compiled_data_df = normalize_compiled_data(compiled_data_df) - + # rank data compiled_data_df['TAC_rank'] = compiled_data_df['normalized_TAC'].rank(ascending=True) compiled_data_df['emissions_rank'] = compiled_data_df['normalized_emissions'].rank(ascending=True) compiled_data_df['prim_rank'] = compiled_data_df['normalized_prim'].rank(ascending=True) - # user defined mcda + ## user defined mcda compiled_data_df['user_MCDA'] = compiled_data_df['normalized_Capex_total'] * config.multi_criteria.capextotal * config.multi_criteria.economicsustainability + \ compiled_data_df['normalized_Opex'] * config.multi_criteria.opex * config.multi_criteria.economicsustainability + \ compiled_data_df['normalized_TAC'] * config.multi_criteria.annualizedcosts * config.multi_criteria.economicsustainability + \
Display classes on home page to learners who can only access assigned content but are not enrolled in any classes.
<div> <YourClasses - v-if="isUserLoggedIn && classes.length" + v-if="displayClasses" class="section" :classes="classes" data-test="classes" ); }); + const displayClasses = computed(() => { + return get(isUserLoggedIn) && (get(classes).length || !get(canAccessUnassignedContent)); + }); + return { isUserLoggedIn, channels, continueLearningFromClasses, continueLearningOnYourOwn, displayExploreChannels, + displayClasses, }; }, };
demos/CMakeLists.txt: make header files belong to their respective demo This enables IDEs to show them as part of the project. The code to do it was already there, but it was broken.
@@ -159,13 +159,13 @@ macro(ie_add_sample) # Create named folders for the sources within the .vcproj # Empty name lists them directly under the .vcproj - source_group("src" FILES ${IE_SAMPLES_SOURCES}) - if(IE_SAMPLES_HEADERS) - source_group("include" FILES ${IE_SAMPLES_HEADERS}) + source_group("src" FILES ${IE_SAMPLE_SOURCES}) + if(IE_SAMPLE_HEADERS) + source_group("include" FILES ${IE_SAMPLE_HEADERS}) endif() # Create executable file from sources - add_executable(${IE_SAMPLE_NAME} ${IE_SAMPLE_SOURCES} ${IE_SAMPLES_HEADERS}) + add_executable(${IE_SAMPLE_NAME} ${IE_SAMPLE_SOURCES} ${IE_SAMPLE_HEADERS}) if(WIN32) set_target_properties(${IE_SAMPLE_NAME} PROPERTIES COMPILE_PDB_NAME ${IE_SAMPLE_NAME})
reference: minor reformatting (no-tn-check)
@@ -53,15 +53,14 @@ def reference(nodes, through, transitive=False, visible_to_children=False): :param AbstractExpression nodes: An expression that yields a list of nodes. :param PropertyDef through: A property reference. - :param bool visible_to_children: If true, then the referenced - environment will be visible to the node, and the children of the - node on which reference acts. + :param bool visible_to_children: If true, then the referenced environment + will be visible to the node, and the children of the node on which + the reference acts. By default this is false, to prevent infinite recursions that can - happen if any children of node does an env-lookup as part of its - env spec. Use this flag if you need this reference to be visible to - children of node, and are sure that it can cause no infinite - recursion. + happen if any children of node does an env-lookup as part of its env + spec. Use this flag if you need this reference to be visible to + children of node, and are sure that it can cause no infinite recursion. :rtype: RefEnvs """
add bbknn to scAEspy It's a dimensionality reduction technique that was shown to work with BBKNN, rather than alone
@@ -18,7 +18,7 @@ Tools to be compared include: - [Harmony](https://github.com/immunogenomics/harmony) - [scMerge](https://github.com/SydneyBioX/scMerge) - [scAlign](https://github.com/quon-titative-biology/scAlign) -- [scAEspy](https://gitlab.com/cvejic-group/scaespy) +- BBKNN + [scAEspy](https://gitlab.com/cvejic-group/scaespy)? ## Data
Update Akinator.py This is in reference to issue
@@ -85,7 +85,7 @@ def main_game(jarvis): subprocess.run([imageViewerFromCommandLine, aki.picture]) # display image of answer except Exception: pass - correct = jarvis.input(f"It's {aki.name} ({aki.description})! Was I correct?\n\t") + correct = jarvis.input(f"It's {aki.first_guess['name']} ({aki.first_guess['description']})! Was I correct?\n\t") if correct.lower() == "yes" or correct.lower() == "y": jarvis.say("Yay !!! :D", Fore.GREEN) else:
Also check if if file_writer_config['upto_animation_number'] is not infinity. before not removing files of greater indices.
@@ -441,7 +441,7 @@ class SceneFileWriter(object): } if file_writer_config['from_animation_number'] is not None: kwargs["min_index"] = file_writer_config['from_animation_number'] - if file_writer_config['upto_animation_number'] is not None: + if file_writer_config['upto_animation_number'] not in [None, np.inf]: kwargs["max_index"] = file_writer_config['upto_animation_number'] else: kwargs["remove_indices_greater_than"] = self.scene.num_plays - 1
(#10167) libbacktrace: Bug when using Conan 2 profile mode * libbacktrace: Bug when using Conan 2 profile mode Make libbacktrace build work in Conan's 2 profile mode (host and build profile). * Removed comments * getattr change
@@ -65,15 +65,19 @@ class LibbacktraceConan(ConanFile): tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True) + @property + def _user_info_build(self): + return getattr(self, "user_info_build", self.deps_user_info) + @contextlib.contextmanager def _build_context(self): if self._is_msvc: with tools.vcvars(self): env = { - "CC": "{} cl -nologo".format(tools.unix_path(self.deps_user_info["automake"].compile)), - "CXX": "{} cl -nologo".format(tools.unix_path(self.deps_user_info["automake"].compile)), - "LD": "{} link -nologo".format(tools.unix_path(self.deps_user_info["automake"].compile)), - "AR": "{} lib".format(tools.unix_path(self.deps_user_info["automake"].ar_lib)), + "CC": "{} cl -nologo".format(tools.unix_path(self._user_info_build["automake"].compile)), + "CXX": "{} cl -nologo".format(tools.unix_path(self._user_info_build["automake"].compile)), + "LD": "{} link -nologo".format(tools.unix_path(self._user_info_build["automake"].compile)), + "AR": "{} lib".format(tools.unix_path(self._user_info_build["automake"].ar_lib)), } with tools.environment_append(env): yield
Configure travis pycodestyle to ignore pep8 error E402 Because right now we fixed all the detected pep8 errors, this commit configures codestyle test to ignore pep8 error E402 (module level import not at top of file). So this way the tests should work as expected.
@@ -16,7 +16,7 @@ install: - pip install pylint - pip install -e .[test] script: - - pycodestyle coherence --ignore=E122,E303,E501, + - pycodestyle coherence --ignore=E402 - pylint -E coherence - nosetests --with-coverage --cover-erase --cover-package=coherence --cover-html after_success:
Remove erroneous data keying. Add with_fetch_all to most list endpoint requests.
@@ -141,7 +141,9 @@ def list_files(project_id, branch_id): List all Source Files for a given branch """ with handle_api_exception("Listing files"): - response = crowdin_client.source_files.list_files(project_id, branch_id) + response = crowdin_client.source_files.with_fetch_all().list_files( + project_id, branch_id + ) return response["data"] @@ -413,7 +415,9 @@ GLOSSARY_XML_FILE = "glossary.tbx" def _get_glossary_id(project): # Currently we only support handling a single glossary file for a project with handle_api_exception("Listing glossaries"): - glossaries_response = crowdin_client.glossaries.list_glossaries() + glossaries_response = ( + crowdin_client.glossaries.with_fetch_all().list_glossaries() + ) glossary = next( filter( @@ -542,7 +546,7 @@ def upload_sources(branch, project, locale_data_folder): # currently on crowdin. crowdin_files = { file["data"]["name"]: file["data"] - for file in list_files(project["id"], branch["id"])["data"] + for file in list_files(project["id"], branch["id"]) } for file_name in source_files:
[ATen] Exclude CUDA tests when running `basic` under valgrind Summary: Pull Request resolved: Test Plan: CI
@@ -53,7 +53,7 @@ if [[ -x ./cuda_tensor_interop_test ]]; then fi if [ "$VALGRIND" == "ON" ] then - valgrind --suppressions="$VALGRIND_SUP" --error-exitcode=1 ./basic "[cpu]" + valgrind --suppressions="$VALGRIND_SUP" --error-exitcode=1 ./basic --gtest_filter='-*CUDA' valgrind --suppressions="$VALGRIND_SUP" --error-exitcode=1 ./tensor_interop_test fi
[benchmark] Expose running benchmarks with lowering [benchmark] Expose running benchmarks with lowering
@@ -79,6 +79,12 @@ if __name__ == '__main__': random.shuffle(task_fs) + benchmark_lower_env_var = '' + if os.environ.get('BENCHMARK_LOWER'): + benchmark_lower_env_var = f'HAIL_DEV_LOWER="1" ' + if os.environ.get('BENCHMARK_LOWER_ONLY'): + benchmark_lower_env_var = f'{benchmark_lower_env_var} HAIL_DEV_LOWER_ONLY="1" ' + for name, replicate, groups in task_fs: j = b.new_job(name=f'{name}_{replicate}') j.command('mkdir -p benchmark-resources') @@ -90,6 +96,7 @@ if __name__ == '__main__': f'OPENBLAS_NUM_THREADS=1' f'OMP_NUM_THREADS=1' f'VECLIB_MAXIMUM_THREADS=1' + f'{benchmark_lower_env_var}' f'PYSPARK_SUBMIT_ARGS="--driver-memory 6G pyspark-shell" ' f'hail-bench run -o {j.ofile} -n {N_ITERS} --data-dir benchmark-resources -t {name}') all_output.append(j.ofile)
Update whatsappMessages.py Fixes issue of thumbnail variable
@@ -44,6 +44,7 @@ def get_whatsappMessages(files_found, report_folder, seeker, wrap_text): ''') all_rows = cursor.fetchall() usageentries = len(all_rows) + thumb = '' if usageentries > 0: for row in all_rows:
Fix a crash with calibration missing on OAK-D-xx boards, use defaults for baseline, mono FOV, focal distance
@@ -166,10 +166,18 @@ class PreviewManager: if dai.CameraBoardSocket.LEFT in device.getConnectedCameras(): calib = device.readCalibration() eeprom = calib.getEepromData() - cam_info = eeprom.cameraData[calib.getStereoLeftCameraId()] + left_cam = calib.getStereoLeftCameraId() + if left_cam != dai.CameraBoardSocket.AUTO: + cam_info = eeprom.cameraData[left_cam] self.baseline = abs(cam_info.extrinsics.specTranslation.x * 10) # cm -> mm self.fov = calib.getFov(calib.getStereoLeftCameraId()) self.focal = (cam_info.width / 2) / (2. * math.tan(math.radians(self.fov / 2))) + print(self.baseline, self.fov, self.focal) + else: + print("Warning: calibration data missing, using OAK-D defaults") + self.baseline = 75 + self.fov = 71.86 + self.focal = 440 self.dispScaleFactor = self.baseline * self.focal self.output_queues = [] for name in self.display:
fix Brocade.ADX.get_arp HG-- branch : feature/microservices
@@ -33,11 +33,11 @@ class Script(BaseScript): "ip": match.group("ip"), "mac": None, "interface": None - }) + }] else: r += [{ "ip": match.group("ip"), "mac": match.group("mac"), "interface": match.group("interface") - }) + }] return r
plugin: removing virtual referencies from meta Remove referencies to "virtual" methods (that no longer exist) from the plugin metaclass.
@@ -252,9 +252,6 @@ class PluginMeta(type): The assumption is that the values of the attributes specified in the class are iterable; if that is not met, Bad Things (tm) will happen. - This also provides virtual method implementation, similar to those in - C-derived OO languages, and alias specifications. - """ to_propagate = [ @@ -262,9 +259,6 @@ class PluginMeta(type): ('artifacts', Artifact, AttributeCollection), ] - virtual_methods = ['validate', 'initialize', 'finalize'] - global_virtuals = ['initialize', 'finalize'] - def __new__(mcs, clsname, bases, attrs): mcs._propagate_attributes(bases, attrs, clsname) cls = type.__new__(mcs, clsname, bases, attrs)
update runners * update runners * add SCHEDULER_PARAMETERS which is required to submit job * error in CI file
+variables: + SCHEDULER_PARAMETERS: "-N 1 -M escori -q compile -t 30" + stages: - validate - regression validate_cori_testsuite: - tags: ["cori20-siddiq90"] + tags: ["cori"] stage: validate rules: - if: '$CI_PIPELINE_SOURCE == "push" || $CI_PIPELINE_SOURCE == "web"' @@ -23,7 +26,7 @@ validate_cori_testsuite: - conda env remove -n buildtest -y cori_pr_regression_test: - tags: ["cori20-siddiq90"] + tags: ["cori"] stage: regression rules: - if: '$CI_PIPELINE_SOURCE == "external_pull_request_event" || $CI_PIPELINE_SOURCE == "push" || $CI_PIPELINE_SOURCE == "web"'
restrict Tuple items to instances of Evaluable With the recently added `evaluable.EvaluableConstant` class, it is now possible to pass unevaluable values as evaluable values to `evaluable.Tuple`. This commit removes the (unused) possibility to pass unevaluable values directly to `evaluable.Tuple`, in favor of explicitly wrapping those values in a `evaluable.EvaluableConstant`.
@@ -526,27 +526,15 @@ class EvaluableConstant(Evaluable): class Tuple(Evaluable): - __slots__ = 'items', 'indices' + __slots__ = 'items' @types.apply_annotations - def __init__(self, items:tuple): # FIXME: shouldn't all items be Evaluable? + def __init__(self, items: types.tuple[strictevaluable]): self.items = items - args = [] - indices = [] - for i, item in enumerate(self.items): - if isevaluable(item): - args.append(item) - indices.append(i) - self.indices = tuple(indices) - super().__init__(args) + super().__init__(items) def evalf(self, *items): - 'evaluate' - - T = list(self.items) - for index, item in zip(self.indices, items): - T[index] = item - return tuple(T) + return items def __iter__(self): 'iterate'
Interaction.channel can be a PartialMessageable rather than Object This allows it to work just fine in DMs
@@ -31,6 +31,7 @@ import asyncio from . import utils from .enums import try_enum, InteractionType, InteractionResponseType from .errors import InteractionResponded, HTTPException, ClientException +from .channel import PartialMessageable, ChannelType from .user import User from .member import Member @@ -57,10 +58,12 @@ if TYPE_CHECKING: from aiohttp import ClientSession from .embeds import Embed from .ui.view import View - from .channel import VoiceChannel, StageChannel, TextChannel, CategoryChannel, StoreChannel + from .channel import VoiceChannel, StageChannel, TextChannel, CategoryChannel, StoreChannel, PartialMessageable from .threads import Thread - InteractionChannel = Union[VoiceChannel, StageChannel, TextChannel, CategoryChannel, StoreChannel, Thread] + InteractionChannel = Union[ + VoiceChannel, StageChannel, TextChannel, CategoryChannel, StoreChannel, Thread, PartialMessageable + ] MISSING: Any = utils.MISSING @@ -111,6 +114,7 @@ class Interaction: '_original_message', '_cs_response', '_cs_followup', + '_cs_channel', ) def __init__(self, *, data: InteractionPayload, state: ConnectionState): @@ -129,10 +133,9 @@ class Interaction: self.guild_id: Optional[int] = utils._get_as_snowflake(data, 'guild_id') self.application_id: int = int(data['application_id']) - channel = self.channel or Object(id=self.channel_id) # type: ignore self.message: Optional[Message] try: - self.message = Message(state=self._state, channel=channel, data=data['message']) # type: ignore + self.message = Message(state=self._state, channel=self.channel, data=data['message']) # type: ignore except KeyError: self.message = None @@ -160,15 +163,21 @@ class Interaction: """Optional[:class:`Guild`]: The guild the interaction was sent from.""" return self._state and self._state._get_guild(self.guild_id) - @property + @utils.cached_slot_property('_cs_channel') def channel(self) -> Optional[InteractionChannel]: - """Optional[Union[:class:`abc.GuildChannel`, :class:`Thread`]]: The channel the interaction was sent from. + """Optional[Union[:class:`abc.GuildChannel`, :class:`PartialMessageable`, :class:`Thread`]]: The channel the interaction was sent from. Note that due to a Discord limitation, DM channels are not resolved since there is - no data to complete them. + no data to complete them. These are :class:`PartialMessageable` instead. """ guild = self.guild - return guild and guild._resolve_channel(self.channel_id) + channel = guild and guild._resolve_channel(self.channel_id) + if channel is None: + if self.channel_id is not None: + type = ChannelType.text if self.guild_id is not None else ChannelType.private + return PartialMessageable(state=self._state, id=self.channel_id, type=type) + return None + return channel @property def permissions(self) -> Permissions:
refactor: updated boilerplate moved from regex + ast eval to just using import
@@ -269,17 +269,12 @@ def get_data(): setup_template = """# -*- coding: utf-8 -*- from setuptools import setup, find_packages -import re, ast with open('requirements.txt') as f: install_requires = f.read().strip().split('\\n') # get version from __version__ variable in {app_name}/__init__.py -_version_re = re.compile(r'__version__\s+=\s+(.*)') - -with open('{app_name}/__init__.py', 'rb') as f: - version = str(ast.literal_eval(_version_re.search( - f.read().decode('utf-8')).group(1))) +from {app_name} import __version__ as version setup( name='{app_name}',
preparation for attributes selection with boolean AND work in progress
@@ -1081,6 +1081,7 @@ class DialogReportCodes(QtWidgets.QDialog): self.ui.pushButton_attributeselect.setIcon(QtGui.QIcon(pm)) return self.attributes = ui.parameters + print("Attributes after GUI\n", self.attributes) if not self.attributes: pm = QtGui.QPixmap() pm.loadFromData(QtCore.QByteArray.fromBase64(attributes_icon), "png") @@ -1095,10 +1096,11 @@ class DialogReportCodes(QtWidgets.QDialog): cur = self.app.conn.cursor() # Run a series of sql based on each selected attribute # Apply a set to the resulting ids to determine the final list of ids + boolean_and_or = self.attributes[0] for a in self.attributes: # File attributes file_sql = "select id from attribute where " - if a[1] == 'file': + if len(a) > 1 and a[1] == 'file': file_sql += "attribute.name = '" + a[0] + "' " file_sql += " and attribute.value " + a[3] + " " if a[3] == 'between': @@ -1115,7 +1117,7 @@ class DialogReportCodes(QtWidgets.QDialog): for i in result: file_ids.append(i[0]) # Case attributes - if a[1] == 'case': + if len(a) > 1 and a[1] == 'case': # Case text table also links av and images case_sql = "select distinct case_text.fid from cases " case_sql += "join case_text on case_text.caseid=cases.caseid " @@ -1159,12 +1161,12 @@ class DialogReportCodes(QtWidgets.QDialog): file_msg = "" case_msg = "" for a in self.attributes: - if a[1] == 'file': + if len(a) > 1 and a[1] == 'file': file_msg += " or " + a[0] + " " + a[3] + " " + ",".join(a[4]) if len(file_msg) > 4: file_msg = "(" + _("File: ") + file_msg[3:] + ")" for a in self.attributes: - if a[1] == 'case': + if len(a) > 1 and a[1] == 'case': case_msg += " or " + a[0] + " " + a[3] + " " + ",".join(a[4]) if len(case_msg) > 5: case_msg = "(" + _("Case: ") + case_msg[4:] + ")"
Make small changes to KFP DSL export template Fixes issue where exported generic pipeline file fails during submission due to missing authorization credentials
@@ -5,6 +5,7 @@ import kfp_tekton {% if kf_secured %} import requests import sys +import urllib {% endif %} {% if cos_secret %} from kfp.aws import use_aws_secret @@ -108,7 +109,7 @@ def get_istio_auth_session(url: str, username: str, password: str) -> dict: ################ # Get Dex Login URL (that allows us to POST credentials) ################ - redirect_url_obj = urlsplit(auth_session["redirect_url"]) + redirect_url_obj = urllib.parse.urlsplit(auth_session["redirect_url"]) # we expect a "Dex Login" URL to have `/auth` in the HTTP path if "/auth" not in redirect_url_obj.path: @@ -166,6 +167,7 @@ if __name__ == "__main__": print('Trying to authenticate with Kubeflow server ...') api_endpoint = '{{ api_endpoint }}'.rstrip('/') + api_username, api_password = sys.argv[1], sys.argv[2] try: # attempt to create a session cookies using the provided `api_username` and `api_password`
[Core] [Hotfix] Change "task failed with unretryable exception" log statement to debug-level. Serve relies on being able to do quiet application-level retries, and this info-level logging is resulting in log spam hitting users. This PR demotes this log statement to debug-level to prevent this log spam.
@@ -748,7 +748,7 @@ cdef execute_task( core_worker.get_current_task_id()), exc_info=True) else: - logger.info("Task failed with unretryable exception:" + logger.debug("Task failed with unretryable exception:" " {}.".format( core_worker.get_current_task_id()), exc_info=True)
Use `extrapolation="last_value"` for custom signalflow used by HPA Without this, missing data can cause the signalfx metrics adapter to request 0 instances
@@ -176,8 +176,8 @@ setpoint = {setpoint} moving_average_window = '{moving_average_window_seconds}s' filters = filter('paasta_service', '{paasta_service}') and filter('paasta_instance', '{paasta_instance}') and filter('paasta_cluster', '{paasta_cluster}') -current_replicas = data('kube_hpa_status_current_replicas', filter=filters).sum(by=['paasta_cluster']) -load_per_instance = data('{signalfx_metric_name}', filter=filters) +current_replicas = data('kube_hpa_status_current_replicas', filter=filters, extrapolation="last_value").sum(by=['paasta_cluster']) +load_per_instance = data('{signalfx_metric_name}', filter=filters, extrapolation="last_value") desired_instances_at_each_point_in_time = (load_per_instance - offset).sum() / (setpoint - offset) desired_instances = desired_instances_at_each_point_in_time.mean(over=moving_average_window)
Add link to Contributing landing page to navbar This commit adds a link to the Contributing landing page to the navbar on our website.
<a class="navbar-item" href="{% url 'wiki:get' path="tools/" %}"> Tools </a> + <a class="navbar-item" href="{% url 'wiki:get' path="contributing/" %}"> + Contributing + </a> <a class="navbar-item" href="{% url 'wiki:get' path="frequently-asked-questions/" %}"> FAQ </a>
Update the version of pandas up-to-date for CI As pandas 1.0.3 released, I think It would be better match the version of pandas in our CI.
@@ -94,7 +94,7 @@ jobs: pyarrow-version: 0.14.1 - python-version: 3.7 spark-version: 2.4.5 - pandas-version: 1.0.2 + pandas-version: 1.0.3 pyarrow-version: 0.14.1 env: PYTHON_VERSION: ${{ matrix.python-version }}
Bugfix support SERVER_NAME configuration for the run method This matches Flask's functionality.
@@ -1247,8 +1247,8 @@ class Quart(Scaffold): def run( self, - host: str = "127.0.0.1", - port: int = 5000, + host: Optional[str] = None, + port: Optional[int] = None, debug: Optional[bool] = None, use_reloader: bool = True, loop: Optional[asyncio.AbstractEventLoop] = None, @@ -1308,6 +1308,18 @@ class Quart(Scaffold): except (AttributeError, NotImplementedError): pass + server_name = self.config.get("SERVER_NAME") + sn_host = None + sn_port = None + if server_name is not None: + sn_host, _, sn_port = server_name.partition(":") + + if host is None: + host = sn_host or "127.0.0.1" + + if port is None: + port = int(sn_port) or 5000 + task = self.run_task( host, port,
Add some types to `equals_tester.py` Adds more type hints to `equal_tester.py`
@@ -22,7 +22,7 @@ equal to each other. It will also check that a==b implies hash(a)==hash(b). import collections -from typing import Any, Callable +from typing import Any, Callable, List, Tuple, Union import itertools @@ -30,8 +30,10 @@ import itertools class EqualsTester: """Tests equality against user-provided disjoint equivalence groups.""" - def __init__(self): - self._groups = [(_ClassUnknownToSubjects(),)] + def __init__(self) -> None: + self._groups: List[Tuple[Union[Any, _ClassUnknownToSubjects], ...]] = [ + (_ClassUnknownToSubjects(),) + ] def _verify_equality_group(self, *group_items: Any): """Verifies that a group is an equivalence group. @@ -134,10 +136,10 @@ class EqualsTester: class _ClassUnknownToSubjects: """Equality methods should be able to deal with the unexpected.""" - def __eq__(self, other): + def __eq__(self, other: object) -> bool: return isinstance(other, _ClassUnknownToSubjects) - def __ne__(self, other): + def __ne__(self, other: object) -> bool: return not self == other def __hash__(self): @@ -150,10 +152,10 @@ class _TestsForNotImplemented: This class is equal to a specific instance or delegates by returning NotImplemented. """ - def __init__(self, other): + def __init__(self, other: object) -> None: self.other = other - def __eq__(self, other): + def __eq__(self, other: object) -> bool: return True if other is self.other else NotImplemented
html: parsing: set node attributes using Node.attributes Previously Node.__init__ was used to set HTML attributes, but this clashes with keyword arguments like "nodes" or boolean attributes like "disabled"
@@ -108,23 +108,32 @@ class NodeHTMLParser(HTMLParser): return Node def handle_starttag(self, tag, attrs): - node_kwargs = {} - # node attributes + node_attributes = {} + for name, value in attrs: if value is None: - value = 'true' + value = '' - node_kwargs[name] = value + node_attributes[name] = value # tag overrides + node_kwargs = {} + node_kwargs['tag_name'] = tag node_kwargs['self_closing_tag'] = tag in SELF_CLOSING_TAGS # setup node + for key in ('id', 'class', 'style'): + if key in node_attributes: + node_kwargs[key] = node_attributes.pop(key) + node_class = self.get_node_class(tag, node_kwargs) node = node_class(**node_kwargs) + # set attributes + node.attributes.update(node_attributes) + # setup node self._node.append(node) self.set_current_node(node)
Hotfix: Recompiled TexText <= 0.11.x nodes flip vertically Resolves
@@ -701,14 +701,12 @@ class TexTextElement(inkex.Group): old_transform = Transform(ref_node.transform) - # Account for vertical flipping of pstoedit nodes when recompiled via pdf2svg and vice versa + # Account for vertical flipping of nodes created via pstoedit in TexText <= 0.11.x revert_flip = Transform("scale(1)") - if ref_node.get_meta("pdfconverter") == "pdf2svg": + if ref_node.get_meta("pdfconverter") == "pstoedit": revert_flip = Transform(matrix=((1, 0, 0), (0, -1, 0))) # vertical reflection - composition = old_transform * revert_flip - - composition = scale_transform * composition + composition = scale_transform * old_transform * revert_flip # keep alignment point of drawing intact, calculate required shift self.transform = composition
Add socketserver.UDPServer.max_packet_size It looks like an int in the source code: Stubtest flagged it as being missing in all supported Python versions, on all platforms:
@@ -55,6 +55,7 @@ class TCPServer(BaseServer): def close_request(self, request: _RequestType) -> None: ... # undocumented class UDPServer(BaseServer): + max_packet_size: ClassVar[int] def __init__( self, server_address: tuple[str, int],
Change cartesian_to_ellipsoidal singularity range Now treats any lat < abs(1e-18) as close to singularity instead of only lat == 0.0 within the cartesian_to_ellipsoidal. This was added since unit tests failed to produce a height of 5 meters when the input latitude was 5e-315.
@@ -191,7 +191,7 @@ def cartesian_to_ellipsoidal(a, c, x, y, z): v = a / np.sqrt(1 - e2 * np.sin(lat) ** 2) h = ( np.sqrt(x**2 + y**2) / np.cos(lat) - v - if lat == 0.0 + if lat < abs(1e-18) else z / np.sin(lat) - (1 - e2) * v )
Python API: wrap null entities as None TN:
@@ -144,6 +144,10 @@ class ${type_name}(${base_cls}): %>${copy}, % endfor ) + % if cls.is_entity_type: + if result.el is None: + return None + % endif if cls._inc_ref and inc_ref: cls._inc_ref(ctypes.byref(c_value)) return result
fixed saving of project settings # Conflicts: # pype/tools/settings/settings/widgets/base.py
@@ -687,8 +687,12 @@ class ProjectWidget(QtWidgets.QWidget): return data = {} + studio_overrides = bool(self.project_name is None) for item in self.input_fields: - value, _is_group = item.overrides() + if studio_overrides: + value, is_group = item.studio_overrides() + else: + value, is_group = item.overrides() if value is not lib.NOT_SET: data.update(value) @@ -714,7 +718,7 @@ class ProjectWidget(QtWidgets.QWidget): def _update_values(self): self.ignore_value_changes = True - default_values = default_values = lib.convert_data_to_gui_data( + default_values = lib.convert_data_to_gui_data( {"project": default_settings()} ) for input_field in self.input_fields:
Python3.9: Disable warning given with MSVC in debug mode. * We don't care about deprecation warnings, they still work and so we can continue to use them.
@@ -46,7 +46,6 @@ import re import subprocess import SCons # pylint: disable=import-error -from nuitka.Tracing import my_print, scons_logger from SCons.Script import ( # pylint: disable=import-error ARGUMENTS, CacheDir, @@ -57,6 +56,8 @@ from SCons.Script import ( # pylint: disable=import-error GetOption, ) +from nuitka.Tracing import my_print, scons_logger + from .SconsCaching import enableCcache, enableClcache from .SconsCompilerSettings import enableC11Settings, getDownloadedGccPath from .SconsHacks import ( @@ -747,6 +748,9 @@ if debug_mode: if python_version >= "3.4": env.Append(CCFLAGS=["/wd4512", "/wd4510", "/wd4610"]) + if python_version >= "3.9": + env.Append(CCFLAGS=["/wd4996"]) + if full_compat_mode: env.Append(CPPDEFINES=["_NUITKA_FULL_COMPAT"])
[docs] Add links to v0.8.0 docs This uses the new code from with a link to the v0.8.0 docs. We can update this in the future as we add more releases.
@@ -33,7 +33,7 @@ pip3 install --upgrade \ Pillow==9.1.0 \ psutil \ pytest \ - tlcpack-sphinx-addon==0.2.1 \ + git+https://github.com/tlc-pack/tlcpack-sphinx-addon.git@14906063f938b7569e40f3d47a0ca39c181fb6ea \ pytest-profiling \ pytest-xdist \ requests \
Misc. changes: - compile regex - readability improvements
@@ -226,13 +226,13 @@ def is_generator_with_return_value(callable): return value is None or isinstance(value, ast.NameConstant) and value.value is None if inspect.isgeneratorfunction(callable): - pattern = r"(^[\t ]+)" src = inspect.getsource(callable) - match = re.match(pattern, src) # Find indentation - code = re.sub(pattern, "", src) + pattern = re.compile(r"(^[\t ]+)") + code = pattern.sub("", src) + + match = pattern.match(src) # finds indentation if match: - # Remove indentation - code = re.sub(f"\n{match.group(0)}", "\n", code) + code = re.sub(f"\n{match.group(0)}", "\n", code) # remove indentation tree = ast.parse(code) for node in walk_callable(tree):
Remove transition on compose box height. The transition "all" by default also affected the transition on the height change of the compose box which ended up making the compose box appear to be laggy and choppy.
@@ -300,7 +300,7 @@ textarea.new_message_textarea, border: 1px solid #ddd; box-shadow: none; -webkit-box-shadow: none; - transition: all 0.2s ease; + transition: border 0.2s ease; } textarea.new_message_textarea:focus,
Added XLPLN25X objective I added in the Olympus objectives the XLPLN25X.
@@ -43,3 +43,18 @@ class MVPlapo2XC(Objective): workingDistance=20, label='MVPlapo2XC', url="") + +class XLPLN25X(Objective): + """ Olympus XLPLN25X 1.05 NA + + Immersion not consided at this point + """ + + def __init__(self): + super(XLPLN25X, self).__init__(f=180/25, + NA=1.05, + focusToFocusLength=75, + backAperture=18, + workingDistance=2, + label='XLPLN25X', + url="https://www.olympus-lifescience.com/en/objectives/multiphoton/") \ No newline at end of file
change: update tests to version 1.1.0 Added missing test version comment using format for current test cases. Verified test content order and completion. Closes
@@ -2,6 +2,8 @@ import unittest from change import find_minimum_coins +# Tests adapted from `problem-specifications//canonical-data.json` @ v1.1.0 + class ChangeTest(unittest.TestCase): def test_single_coin_change(self):
Removes the duplicate entry created by the add() The add() created another entry, we dont want to create a dupe entry, just update the one we have.
@@ -387,7 +387,6 @@ def updatetitle(): job.poster_url_manual = poster_url job.poster_url = poster_url job.hasnicetitle = True - db.session.add(job) db.session.commit() flash('Title: {} ({}) was updated to {} ({})'.format(job.title_auto, job.year_auto, new_title, new_year), category='success') return redirect(url_for('home'))
fall back to 'ascii' locale in build (if needed) If locale.getpreferredencoding(False) returns Null, the build fails, since commit This seems to be not really necessary; here we provide a fallback to 'ascii' locale if needed.
@@ -67,8 +67,10 @@ def filepath_from_subprocess_output(output): Inherited from `exec_command`, and possibly incorrect. """ - output = output.decode(locale.getpreferredencoding(False), - errors='replace') + mylocale = locale.getpreferredencoding(False) + if mylocale is None: + mylocale = 'ascii' + output = output.decode(mylocale, errors='replace') output = output.replace('\r\n', '\n') # Another historical oddity if output[-1:] == '\n': @@ -278,9 +280,10 @@ def _exec_command(command, use_shell=None, use_tee = None, **env): return 127, '' text, err = proc.communicate() - text = text.decode(locale.getpreferredencoding(False), - errors='replace') - + mylocale = locale.getpreferredencoding(False) + if mylocale is None: + mylocale = 'ascii' + text = text.decode(mylocale, errors='replace') text = text.replace('\r\n', '\n') # Another historical oddity if text[-1:] == '\n':
[lambda] add a newline after each record for Firehose Firehose does not separate each record in the batch in any meaningful way. If you do not append a newline character, all records sent in the batch will appear back to back from eachother.
@@ -195,7 +195,7 @@ class StreamAlert(object): resp = self.firehose_client.put_record_batch( DeliveryStreamName=stream_name, - Records=[{'Data': json.dumps(record, separators=(",", ":"))} + Records=[{'Data': json.dumps(record, separators=(",", ":")) + '\n'} for record in record_batch])
reveal links differently Show download only link if the main view is not visible for user Show main view only if feature flag set for both domain and user
@@ -163,7 +163,9 @@ class ProjectReportsTab(UITab): 'url': reverse(UserConfigReportsHomeView.urlname, args=[self.domain]), 'icon': 'icon-tasks fa fa-wrench', }) - if toggles.LOCATION_REASSIGNMENT.enabled(self.domain, namespace=NAMESPACE_DOMAIN): + # show this link if feature flag enabled for the domain and not set for the user + if (toggles.LOCATION_REASSIGNMENT.enabled(self.domain, namespace=NAMESPACE_DOMAIN) + and not toggles.LOCATION_REASSIGNMENT.enabled(self.couch_user.username, namespace=NAMESPACE_USER)): from custom.icds.location_reassignment.views import LocationReassignmentDownloadOnlyView tools.append({ 'title': _(LocationReassignmentDownloadOnlyView.section_name), @@ -1499,7 +1501,9 @@ class ProjectUsersTab(UITab): 'show_in_dropdown': True, }) - if toggles.LOCATION_REASSIGNMENT.enabled(self.couch_user.username, namespace=NAMESPACE_USER): + # show this link if feature flag enabled for the domain and the user + if (toggles.LOCATION_REASSIGNMENT.enabled(self.domain, namespace=NAMESPACE_DOMAIN) + and toggles.LOCATION_REASSIGNMENT.enabled(self.couch_user.username, namespace=NAMESPACE_USER)): from custom.icds.location_reassignment.views import LocationReassignmentView menu.append({ 'title': _("Location Reassignment"),
integ tests: add missing resource dependency in vpc_builder When the route gets created before the VPCGatewayAttachment the stack creation is failing cause the RouteTable is trying to route traffic through a gateway not attached to the VPC.
@@ -77,7 +77,7 @@ class VPCTemplateBuilder: def __build_template(self): vpc = self.__build_vpc() - internet_gateway = self.__build_internet_gateway(vpc) + internet_gateway, internet_gateway_attachment = self.__build_internet_gateway(vpc) nat_gateway = None subnet_refs = [] for subnet in self.__vpc_config.subnets: @@ -87,7 +87,9 @@ class VPCTemplateBuilder: nat_gateway = self.__build_nat_gateway(subnet, subnet_ref) for subnet, subnet_ref in zip(self.__vpc_config.subnets, subnet_refs): - self.__build_route_table(subnet, subnet_ref, vpc, internet_gateway, nat_gateway) + self.__build_route_table( + subnet, subnet_ref, vpc, internet_gateway, internet_gateway_attachment, nat_gateway + ) def __build_vpc(self): vpc_config = self.__vpc_config @@ -107,10 +109,10 @@ class VPCTemplateBuilder: internet_gateway = self.__template.add_resource( InternetGateway("InternetGateway", Tags=Tags(Name=Ref("AWS::StackName"), Stack=Ref("AWS::StackId"))) ) - self.__template.add_resource( + internet_gateway_attachment = self.__template.add_resource( VPCGatewayAttachment("VPCGatewayAttachment", VpcId=Ref(vpc), InternetGatewayId=Ref(internet_gateway)) ) - return internet_gateway + return internet_gateway, internet_gateway_attachment def __build_subnet(self, subnet_config: SubnetConfig, vpc: VPC): subnet = Subnet( @@ -142,6 +144,7 @@ class VPCTemplateBuilder: subnet_ref: Subnet, vpc: VPC, internet_gateway: InternetGateway, + internet_gateway_attachment: VPCGatewayAttachment, nat_gateway: NatGateway, ): route_table = self.__template.add_resource( @@ -163,6 +166,7 @@ class VPCTemplateBuilder: RouteTableId=Ref(route_table), DestinationCidrBlock="0.0.0.0/0", GatewayId=Ref(internet_gateway), + DependsOn=internet_gateway_attachment, ) ) elif subnet_config.default_gateway == Gateways.NAT_GATEWAY:
ENH: Added sanity check to printoptions See issue
@@ -78,6 +78,7 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, if legacy not in [None, False, '1.13']: warnings.warn("legacy printing option can currently only be '1.13' or " "`False`", stacklevel=3) + if threshold is not None: # forbid the bad threshold arg suggested by stack overflow, gh-12351 if not isinstance(threshold, numbers.Number): @@ -85,6 +86,12 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, if np.isnan(threshold): raise ValueError("threshold must be non-NAN, try " "sys.maxsize for untruncated representation") + + if precision is not None: + # forbid the bad precision arg as suggested by issue #18254 + if not isinstance(precision, int): + raise TypeError('precision must be an integer') + return options
Update 2.6.0a.rst Announce future dropping of Python 3.3 support too
@@ -13,7 +13,7 @@ Wagtailmenus 2.6.0a release notes .. NOTE :: - Wagtailmenus 2.6 will be the last LTS release to support Python 2. + Wagtailmenus 2.6 will be the last LTS release to support Python 2 or Python 3.3. .. NOTE ::
chore: Update code coverage badge coveralls badge -> codecov badge
<a href='https://www.codetriage.com/frappe/frappe'> <img src='https://www.codetriage.com/frappe/frappe/badges/users.svg'> </a> - <a href='https://coveralls.io/github/frappe/frappe?branch=develop'> - <img src='https://coveralls.io/repos/github/frappe/frappe/badge.svg?branch=develop'> + <a href="https://codecov.io/gh/frappe/frappe"> + <img src="https://codecov.io/gh/frappe/frappe/branch/develop/graph/badge.svg?token=XoTa679hIj"/> </a> </div>
Update ua.txt [0] [1]
@@ -520,3 +520,8 @@ zeroup # Reference: https://blog.sucuri.net/2015/12/remote-command-execution-vulnerability-in-joomla.html JDatabaseDriverMysqli + +# Reference: https://twitter.com/Racco42/status/1053336574753148928 +# Reference: https://www.hybrid-analysis.com/sample/f65ba1cc50b29dd05ddaa83242f4b7bd0429841bfc4befa9e203cb6621d2389b?environmentId=100 + +4RR0B4R 4 X0T4 D4 TU4 M4E
Lexical envs: make implementation of Env_Getter public TN:
@@ -58,7 +58,21 @@ package Langkit_Support.Lexical_Env is type Getter_Fn_T is access function (Elt : Element_T) return Lexical_Env; - type Env_Getter is private; + type Env_Getter (Dynamic : Boolean := False) is record + case Dynamic is + when True => + Elt : Element_T; + Getter_Fn : Getter_Fn_T; + when False => + Is_Refcounted : Boolean; + -- Whether Env is ref-counted. When it's not, we can avoid calling + -- Dec_Ref at destruction time: This is useful because at analysis + -- unit destruction time, this may be a dangling access to an + -- environment from another unit. + + Env : Lexical_Env; + end case; + end record; -- Link to an environment. It can be either a simple link (just a pointer) -- or a dynamic link (a function that recomputes the link when needed). See -- tho two constructors below. @@ -407,22 +421,6 @@ package Langkit_Support.Lexical_Env is private - type Env_Getter (Dynamic : Boolean := False) is record - case Dynamic is - when True => - Elt : Element_T; - Getter_Fn : Getter_Fn_T; - when False => - Is_Refcounted : Boolean; - -- Whether Env is ref-counted. When it's not, we can avoid calling - -- Dec_Ref at destruction time: This is useful because at analysis - -- unit destruction time, this may be a dangling access to an - -- environment from another unit. - - Env : Lexical_Env; - end case; - end record; - type Env_Rebindings_Type (Size : Natural) is record Ref_Count : Natural := 1; Bindings : Env_Rebindings_Array (1 .. Size);
[mypy][core] decorators/pipeline.py mypy Test Plan: mypy Reviewers: alangenfeld
from functools import update_wrapper +from typing import Any, Callable, Dict, List, Optional, Set, Union from dagster import check from dagster.utils.backcompat import experimental_arg_warning class _Pipeline: def __init__( self, - name=None, - mode_defs=None, - preset_defs=None, - description=None, - tags=None, - hook_defs=None, - input_defs=None, - output_defs=None, - config_schema=None, - config_fn=None, + name: Optional[str] = None, + mode_defs: Optional[List[ModeDefinition]] = None, + preset_defs: Optional[List[PresetDefinition]] = None, + description: Optional[str] = None, + tags: Optional[Dict[str, Any]] = None, + hook_defs: Optional[Set[HookDefinition]] = None, + input_defs: Optional[List[InputDefinition]] = None, + output_defs: Optional[List[OutputDefinition]] = None, + config_schema: Optional[Dict[str, Any]] = None, + config_fn: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None, ): self.name = check.opt_str_param(name, "name") self.mode_definitions = check.opt_list_param(mode_defs, "mode_defs", ModeDefinition) @@ -41,7 +42,7 @@ def __init__( self.config_schema = config_schema self.config_fn = check.opt_callable_param(config_fn, "config_fn") - def __call__(self, fn): + def __call__(self, fn: Callable[..., Any]) -> PipelineDefinition: check.callable_param(fn, "fn") if not self.name: @@ -86,17 +87,17 @@ def __call__(self, fn): def pipeline( - name=None, - description=None, - mode_defs=None, - preset_defs=None, - tags=None, - hook_defs=None, - input_defs=None, - output_defs=None, - config_schema=None, - config_fn=None, -): + name: Union[Callable[..., Any], Optional[str]] = None, + description: Optional[str] = None, + mode_defs: Optional[List[ModeDefinition]] = None, + preset_defs: Optional[List[PresetDefinition]] = None, + tags: Optional[Dict[str, Any]] = None, + hook_defs: Optional[Set[HookDefinition]] = None, + input_defs: Optional[List[InputDefinition]] = None, + output_defs: Optional[List[OutputDefinition]] = None, + config_schema: Optional[Dict[str, Any]] = None, + config_fn: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None, +) -> Union[PipelineDefinition, _Pipeline]: """Create a pipeline with the specified parameters from the decorated composition function. Using this decorator allows you to build up the dependency graph of the pipeline by writing a
Fix hq commcare names for scheduled reports This test fails without this commit: corehq.apps.reports.tests.test_scheduled_reports:ScheduledReportSendingTest.test_get_scheduled_report_response
@@ -163,7 +163,16 @@ def _get_cc_name(request, var): value = getattr(settings, var) if isinstance(value, six.string_types): return value - return value.get(request.get_host()) or value['default'] + try: + host = request.get_host() + except KeyError: + # In reporting code we create an HttpRequest object inside python which + # does not have an HTTP_HOST attribute. Its unclear what host would be + # expected in that scenario, so we're showing the default. + # The true fix for this lies in removing fake requests from scheduled reports + host = 'default' + + return value.get(host) or value.get('default') def mobile_experience(request):
Change test_no_js_console to work in later node versions In the latest node version, `console.log` is defined in `runInThisContext`, so this test would fail
@@ -555,7 +555,8 @@ class TestJsConsole(unittest.TestCase): output = pipe.getvalue() pipe.close() - self.assertIn("ReferenceError: console is not defined", output) + self.assertNotIn("[log] Log message", output) + self.assertNotIn("[err] Error message", output) if __name__ == '__main__': unittest.main()
Fix argument handling in atvremote Passing additional arguments to a command, e.g. command=1,2,3 would drop the first argument. This fixes that.
@@ -351,27 +351,31 @@ def _handle_device_command(args, cmd, atv, loop): cmd, cmd_args = _extract_command_with_args(cmd) if cmd in device: return (yield from _exec_command( - DeviceCommands(atv, loop), cmd, print_result=False, *cmd_args)) + DeviceCommands(atv, loop), cmd, False, *cmd_args)) elif cmd in ctrl: - return (yield from _exec_command(atv.remote_control, cmd, *cmd_args)) + return (yield from _exec_command( + atv.remote_control, cmd, True, *cmd_args)) elif cmd in metadata: - return (yield from _exec_command(atv.metadata, cmd, *cmd_args)) + return (yield from _exec_command( + atv.metadata, cmd, True, *cmd_args)) elif cmd in playing: playing_resp = yield from atv.metadata.playing() - return (yield from _exec_command(playing_resp, cmd, *cmd_args)) + return (yield from _exec_command( + playing_resp, cmd, True, *cmd_args)) elif cmd in airplay: - return (yield from _exec_command(atv.airplay, cmd, *cmd_args)) + return (yield from _exec_command( + atv.airplay, cmd, True, *cmd_args)) logging.error('Unknown command: %s', args.command[0]) return 1 @asyncio.coroutine -def _exec_command(obj, command, print_result=True, *args): +def _exec_command(obj, command, print_result, *args): try: # If the command to execute is a @property, the value returned by that # property will be stored in tmp. Otherwise it's a coroutine and we
Fix .and_then/.or_else constructors TN:
@@ -11,17 +11,26 @@ from langkit.expressions.base import ( ) -@attr_call('and_then', 'and') -@attr_call('or_else', 'or', - doc='Like :dsl:`and_then`, but for the OR boolean operator or the' - ' logical disjunction.') -class BinaryBooleanOperator(AbstractExpression): +@attr_call('and_then') +def and_then(lhs, rhs): """ If `lhs` and `rhs` are booleans, this evaluates them in a short-circuit AND boolean operator fashion. Otherwise, both must be equations, and this returns a new equation that describes the logical conjunction. """ + return BinaryBooleanOperator('and', lhs, rhs) + +@attr_call('or_else') +def or_else(lhs, rhs): + """ + Like :dsl:`and_then`, but for the OR boolean operator or the logical + disjunction. + """ + return BinaryBooleanOperator('or', lhs, rhs) + + +class BinaryBooleanOperator(AbstractExpression): AND = 'and' OR = 'or'
Allow scope and colspan Closes
@@ -466,6 +466,9 @@ BLEACH_ALLOWED_ATTRIBUTES = { "height", ], # For continuous registration challenge and google group "img": ["height", "src", "width"], + # For bootstrap tables: https://getbootstrap.com/docs/4.3/content/tables/ + "th": ["scope", "colspan"], + "td": ["colspan"], } BLEACH_ALLOWED_STYLES = ["height", "margin-left", "text-align", "width"] BLEACH_ALLOWED_PROTOCOLS = ["http", "https", "mailto"]
series: support directional limits on the complex plane Closes diofant/diofant#1230
@@ -74,6 +74,16 @@ def test_basic1(): f = Function('f') assert limit(f(x), x, 4) == Limit(f(x), x, 4) + assert limit(exp(x), x, 0, dir=exp(I*pi/3)) == 1 + + assert limit(sqrt(-1 + I*x), x, 0) == +I + assert limit(sqrt(-1 + I*x), x, 0, dir=1) == -I + assert limit(sqrt(-1 + I*x), x, 0, dir=exp(I*pi/3)) == -I + + assert limit(log(x + sqrt(x**2 + 1)), x, I) == I*pi/2 + assert limit(log(x + sqrt(x**2 + 1)), x, I, dir=1) == I*pi/2 + assert limit(log(x + sqrt(x**2 + 1)), x, I, dir=exp(I*pi/3)) == I*pi/2 + def test_basic2(): assert limit(x**x, x, 0) == 1 @@ -992,3 +1002,7 @@ def test_issue_1213(): def test_sympyissue_23319(): assert limit(x*tan(pi/x), x, oo) == pi + + +def test_issue_1230(): + assert limit(log(x + sqrt(x**2 + 1)), x, I*oo) == oo
Add missing CAMRY_TSS2 engine & fwdCamera f/w `@Koda(Sleepy)#4682` 2021 Camry LE (ICE) DongleID/route 3653e5d0dbd0d7ed|2022-01-16--21-15-20
@@ -360,12 +360,14 @@ FW_VERSIONS = { b'\x018966306Q5000\x00\x00\x00\x00', b'\x018966306T3100\x00\x00\x00\x00', b'\x018966306T3200\x00\x00\x00\x00', + b'\x018966306T4000\x00\x00\x00\x00', b'\x018966306T4100\x00\x00\x00\x00', ], (Ecu.fwdRadar, 0x750, 0xf): [ b'\x018821F6201200\x00\x00\x00\x00', ], (Ecu.fwdCamera, 0x750, 0x6d): [ + b'\x028646F0602100\x00\x00\x00\x008646G5301200\x00\x00\x00\x00', b'\x028646F0602200\x00\x00\x00\x008646G5301200\x00\x00\x00\x00', b'\x028646F3305200\x00\x00\x00\x008646G5301200\x00\x00\x00\x00', b'\x028646F3305300\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
runcommands fixed HG-- branch : feature/microservices
@@ -845,7 +845,7 @@ Ext.define("NOC.sa.runcommands.Application", { this.viewModel.set(state, this.viewModel.get(state) + step) }, - sendCommands: function(cfg) { + sendCommands: function(mode, cfg) { var me = this, xhr, params = [], @@ -857,9 +857,28 @@ Ext.define("NOC.sa.runcommands.Application", { me.viewModel.set('progressState.f', 0); me.viewModel.set('progressState.s', 0); me.selectedStore.each(function(record) { - var v = {}; - v.id = record.get("id"); - params.push(cfg.filter(function(e){return e.id == v.id})[0]); + var v = { + id: record.get("id") + }; + + + if('commands' === mode) { + // Copy config + Ext.Object.each(cfg, function(key, value) { + if(key !== "id") { + v[key] = value; + } + }); + params.push(v); + } else { + var param = cfg.filter(function(e) { + return e.id === v.id + }); + + if(param.length) { + params.push(param[0]); + } + } record.set('status', 'w'); }); // @@ -892,7 +911,7 @@ Ext.define("NOC.sa.runcommands.Application", { ft = ft.substr(lh + l); // Process chunk record = me.selectedStore.getById(chunk.id); - if(chunk.error && 'f' != record.get('status')) { + if(chunk.error && 'f' !== record.get('status')) { record.set({ status: 'f', result: chunk.error @@ -900,12 +919,12 @@ Ext.define("NOC.sa.runcommands.Application", { me.stateInc('progressState.r', -1); me.stateInc('progressState.f', 1); } - if(chunk.running && 'r' != record.get('status')) { + if(chunk.running && 'r' !== record.get('status')) { record.set('status', 'r'); me.stateInc('progressState.w', -1); me.stateInc('progressState.r', 1); } - if(chunk.result && 's' != record.get('status')) { + if(chunk.result && 's' !== record.get('status')) { record.set({ status: 's', result: chunk.result @@ -978,7 +997,7 @@ Ext.define("NOC.sa.runcommands.Application", { } } if(commands.length > 0) { - me.sendCommands(commands); + me.sendCommands(mode, commands); } else { NOC.error(__('Empty command')) } @@ -991,7 +1010,7 @@ Ext.define("NOC.sa.runcommands.Application", { }; if('commands' === me.modeField.getValue()) { - me.sendCommands({ + me.sendCommands('commands', { "script": "commands", "args": { "commands": me.commandPanel.getValues().cmd.split("\n")
Tweak JS so Safari can choose admin actions I noticed that Safari was submitting both the empty option and the selected options back to the server. Digging into it, I was able to get Safari to deselect the option by using '[selected]' as the selector. For
// select the action button from the dropdown container.find('select[name=action]') - .find('op:selected').removeAttr('selected').end() + .find('[selected]').removeAttr('selected').end() .find('[value=' + action_type + ']').attr('selected', 'selected').click() // click submit & replace the archivebox logo with a spinner
Remove unecessary list-routes command Since this was added, Flask now comes with a build in command to list the routes, `flask routes`, so this is not needed.
@@ -252,13 +252,6 @@ def fix_notification_statuses_not_in_sync(): result = db.session.execute(subq_hist).fetchall() -@notify_command(name='list-routes') -def list_routes(): - """List URLs of all application routes.""" - for rule in sorted(current_app.url_map.iter_rules(), key=lambda r: r.rule): - print("{:10} {}".format(", ".join(rule.methods - set(['OPTIONS', 'HEAD'])), rule.rule)) - - @notify_command(name='insert-inbound-numbers') @click.option('-f', '--file_name', required=True, help="""Full path of the file to upload, file is a contains inbound numbers,
Add a is_pingable and device_vendor column in the scuba table Summary: Add an "is_pingable" column in the scuba table so that we can filter out device/network errors when making alerts/troubleshooting. See T47339256 for more context
# This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. +from typing import TYPE_CHECKING, Union + from .base_service import PeriodicServiceTask from .options import Option +if TYPE_CHECKING: + from .device_info import DeviceIP # noqa: F401 + + class BaseDeviceDB(PeriodicServiceTask): """ Interface to device database. @@ -89,3 +95,6 @@ class BaseDeviceDB(PeriodicServiceTask): # still not able to find device, raise an exeception raise KeyError("Device not found", device.hostname) + + def is_pingable(self, ip: Union[str, "DeviceIP"]) -> bool: + raise NotImplementedError("Please implement this to check if an ip is pingable")
Add support to generate inventory for OSP17 This patch specifies the overcloud_stack_name to generate inventory.
@@ -12,6 +12,7 @@ function usage } user="stack" +overcloud_stack_name=overcloud uncomment_localhost=false tripleo_ip_address= @@ -44,9 +45,9 @@ fi out_file="hosts.yml" if [ $uncomment_localhost ]; then source ~/stackrc - tripleo-ansible-inventory --static-yaml-inventory ${out_file} + tripleo-ansible-inventory --stack ${overcloud_stack_name} --static-yaml-inventory ${out_file} else - file_path=$(ssh -tt -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" ${user}@${tripleo_ip_address} ". ~/stackrc; tripleo-ansible-inventory --static-yaml-inventory ${out_file}; pwd ${out_file}") + file_path=$(ssh -tt -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" ${user}@${tripleo_ip_address} ". ~/stackrc; tripleo-ansible-inventory --stack ${overcloud_stack_name} --static-yaml-inventory ${out_file}; pwd ${out_file}") scp -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" ${user}@${tripleo_ip_address}:${file_path}/${out_file} . fi
settings: Fix code for special case of theme settings subsection. We handle "Theme settings" subsection separately in get_subsection_property_elements as it contains unique radio-button structure for emojiset setting. This should have been fixed while reorganizing the section to have color scheme and emoji related settings under same subsection in Fixes
@@ -215,12 +215,13 @@ export function extract_property_name(elem, for_realm_default_settings) { function get_subsection_property_elements(element) { const subsection = $(element).closest(".org-subsection-parent"); - if (subsection.hasClass("emoji-settings")) { + if (subsection.hasClass("theme-settings")) { // Because the emojiset widget has a unique radio button // structure, it needs custom code. + const color_scheme_elem = subsection.find(".setting_color_scheme"); const emojiset_elem = subsection.find("input[name='emojiset']:checked"); const translate_emoticons_elem = subsection.find(".translate_emoticons"); - return [emojiset_elem, translate_emoticons_elem]; + return [color_scheme_elem, emojiset_elem, translate_emoticons_elem]; } return Array.from(subsection.find(".prop-element")); }
Fix typo in consensus message signature verifier The signature is over the message header not the message content.
@@ -123,7 +123,7 @@ def is_valid_consensus_message(message): context = create_context('secp256k1') public_key = Secp256k1PublicKey.from_bytes(header.signer_id) if not context.verify(message.header_signature, - message.content, + message.header, public_key): LOGGER.debug("message signature invalid for message: %s", message.header_signature)
[varLib] Minor Part of Part of
@@ -109,7 +109,7 @@ class OnlineVarStoreBuilder(object): # Full array. Start new one. self._set_VarData() return self.storeDeltas(deltas) - VarData_add_item(self._data, deltas) + self._data.add_item(deltas) varIdx = (self._outer << 16) + inner self._cache[deltas] = varIdx @@ -127,10 +127,14 @@ def VarData_add_item(self, deltas): deltas = tuple(deltas) self.Item.append(deltas) +ot.VarData.add_item = VarData_add_item + def VarRegion_get_support(self, fvar_axes): return {fvar_axes[i].axisTag: (reg.StartCoord,reg.PeakCoord,reg.EndCoord) for i,reg in enumerate(self.VarRegionAxis)} +ot.VarRegion.get_support = VarRegion_get_support + class VarStoreInstancer(object): def __init__(self, varstore, fvar_axes, location={}): @@ -150,7 +154,7 @@ class VarStoreInstancer(object): def _getScalar(self, regionIdx): scalar = self._scalars.get(regionIdx) if scalar is None: - support = VarRegion_get_support(self._regions[regionIdx], self.fvar_axes) + support = self._regions[regionIdx].get_support(self.fvar_axes) scalar = supportScalar(self.location, support) self._scalars[regionIdx] = scalar return scalar
GDB helpers: fix a typo from previous commit TN:
@@ -61,7 +61,7 @@ This command may be followed by a "/X" flag, where X is one or several of: print('Invalid flags: {}'.format(repr(", ".join(invalid_args)))) return - StatePrinter(self.context, 'f' in arg, 's' in arg).run() + StatePrinter(self.context, 'f' not in arg, 's' in arg).run() class StatePrinter(object):
Use MappingProxyType to freeze non-private dictionaries. This is intended to make those mappings safer.
@@ -3,6 +3,7 @@ from collections import defaultdict from functools import reduce from operator import and_, or_ from pathlib import Path +from types import MappingProxyType import yaml from django.conf import settings @@ -16,10 +17,12 @@ Resource = dict[str, t.Union[str, list[dict[str, str]], dict[str, list[str]]]] RESOURCES_PATH = Path(settings.BASE_DIR, "pydis_site", "apps", "resources", "resources") -RESOURCES: dict[str, Resource] = {path.stem: yaml.safe_load(path.read_text()) for path - in RESOURCES_PATH.rglob("*.yaml")} +RESOURCES: MappingProxyType[str, Resource] = MappingProxyType({ + path.stem: yaml.safe_load(path.read_text()) + for path in RESOURCES_PATH.rglob("*.yaml") +}) -RESOURCE_TABLE = {category: defaultdict(set) for category in ( +_resource_table = {category: defaultdict(set) for category in ( "topics", "payment_tiers", "complexity", @@ -29,7 +32,13 @@ RESOURCE_TABLE = {category: defaultdict(set) for category in ( for name, resource in RESOURCES.items(): for category, tags in resource['tags'].items(): for tag in tags: - RESOURCE_TABLE[category][_transform_name(tag)].add(name) + _resource_table[category][_transform_name(tag)].add(name) + +# Freeze the resources table +RESOURCE_TABLE = MappingProxyType({ + category: MappingProxyType(d) + for category, d in _resource_table.items() +}) def get_resources_from_search(search_categories: dict[str, set[str]]) -> list[Resource]:
bump protobuf version * bump protobuf version Bump protobuf version so users don't face issues if they have an older version already installed on their systems. Ref: Ref: [stackoverflow](https://stackoverflow.com/questions/61922334/how-to-solve-attributeerror-module-google-protobuf-descriptor-has-no-attribu) `pip install --upgrade protobuf` * forbid the bad version as mentioned in
@@ -41,7 +41,8 @@ numpy = "*" packaging = "*" pandas = ">=0.21.0" pillow = ">=6.2.0" -protobuf = ">=3.6.0" +# protobuf version 3.11 is incompatible, see https://github.com/streamlit/streamlit/issues/2234 +protobuf = ">=3.6.0, !=3.11" pyarrow = "*" pydeck = ">=0.1.dev5" python-dateutil = "*"
doc: update infra playbooks statements We don't need to copy the infrastructure playbooks in the root ceph-ansible directory.
@@ -4,4 +4,4 @@ Infrastructure playbooks This directory contains a variety of playbooks that can be used independently of the Ceph roles we have. They aim to perform infrastructure related tasks that would help use managing a Ceph cluster or performing certain operational tasks. -To use them, **you must move them to ceph-ansible's root directory**, then run using `ansible-playbook <playbook>`. +To use them, run `ansible-playbook infrastructure-playbooks/<playbook>`.
Build javascript improvements Alter 'no records found' text Reload allocation table on edit or delete
@@ -123,6 +123,7 @@ function fillAllocationTable(table, index, parent_row, parent_table, options) { */ table.bootstrapTable({ + formatNoMatches: function() { return 'No parts allocated for ' + parent_row.sub_part_detail.name; }, columns: [ { field: 'stock_item_detail', @@ -164,6 +165,7 @@ function fillAllocationTable(table, index, parent_row, parent_table, options) { launchModalForm(button.attr('url'), { success: function() { + table.bootstrapTable('refresh'); } }); }); @@ -173,6 +175,7 @@ function fillAllocationTable(table, index, parent_row, parent_table, options) { launchDeleteForm(button.attr('url'), { success: function() { + table.bootstrapTable('refresh'); } }); });
Simplify logic: just delete the diagram and its elements You can always undo.
@@ -330,27 +330,6 @@ class Namespace(UIComponent, ActionProvider): @action(name="tree-view.delete") def tree_view_delete(self): element = self.get_selected_element() - if isinstance(element, Diagram): - m = Gtk.MessageDialog( - None, - Gtk.DialogFlags.MODAL, - Gtk.MessageType.QUESTION, - Gtk.ButtonsType.YES_NO, - gettext( - "Do you really want to delete diagram {name}?\n\n" - "This will possibly delete diagram items\n" - "that are not shown in other diagrams." - ).format(name=element.name or gettext("<None>")), - ) - if m.run() == Gtk.ResponseType.YES: - with Transaction(self.event_manager): - for i in reversed(list(element.get_all_items())): - s = i.subject - if s and len(s.presentation) == 1: - s.unlink() - i.unlink() - element.unlink() - m.destroy() - elif element: + if element: with Transaction(self.event_manager): element.unlink()
workloads/hackbench: fix target_binary Set target_binary as a class, rather than instance, attribute. This happens only only once per run, and setting it as instance attribute the first time, makes it unavailable for subsequent instances of the same workload.
@@ -62,7 +62,7 @@ class Hackbench(Workload): @once def initialize(self, context): host_binary = context.resolver.get(Executable(self, self.target.abi, self.binary_name)) - self.target_binary = self.target.install(host_binary) + Hackbench.target_binary = self.target.install(host_binary) def setup(self, context): self.target_output_file = self.target.get_workpath(hackbench_results_txt)
Reroot_Foreign_Nodes: populate lexical envs after exiled entries strip TN:
@@ -828,23 +828,19 @@ package body ${ada_lib_name}.Analysis is procedure Reroot_Foreign_Nodes (Self : Lex_Env_Data; Root_Scope : Lexical_Env) is - Els : ${root_node_type_name}_Vectors.Elements_Array := + Els : constant ${root_node_type_name}_Vectors.Elements_Array := Self.Foreign_Nodes.To_Array; - Env : Lexical_Env; begin -- Make the Foreign_Nodes vector empty as the partial -- Populate_Lexical_Env pass below will re-build it. Self.Foreign_Nodes.Clear; for El of Els loop - -- Re-do a partial Populate_Lexical_Env pass for each foreign node - -- that this unit contains so that they are relocated in our new - -- lexical environments. - Env := El.Pre_Env_Actions (El.Self_Env, Root_Scope, True); - El.Post_Env_Actions (Env, Root_Scope); - - -- Also filter the exiled entries in foreign units so that they don't - -- contain references to this unit's lexical environments. + -- First, filter the exiled entries in foreign units so that they + -- don't contain references to this unit's lexical environments. We + -- need to do that before running the partial Populate_Lexical_Env + -- pass so that we don't remove exiled entries that this pass will + -- produce. declare Exiled_Entries : Exiled_Entry_Vectors.Vector renames Get_Lex_Env_Data (El.Unit).Exiled_Entries; @@ -858,6 +854,16 @@ package body ${ada_lib_name}.Analysis is end if; end loop; end; + + -- Re-do a partial Populate_Lexical_Env pass for each foreign node + -- that this unit contains so that they are relocated in our new + -- lexical environments. + declare + Env : constant Lexical_Env := + El.Pre_Env_Actions (El.Self_Env, Root_Scope, True); + begin + El.Post_Env_Actions (Env, Root_Scope); + end; end loop; end Reroot_Foreign_Nodes;
Adding a bit more about testing to contributing.md let me know what you think of my additions. I think this is good to merge otherwise.
@@ -83,6 +83,12 @@ with this. * :white_check_mark: `:white_check_mark:` when adding tests * :shirt: `:shirt:` when removing linter warnings +### Pull Request Messages + + * Rename the pull request and provide a comment that synthesizes what + the pull request changes or adds. This helps us synthesize what + changes have occured between Landlab releases. + ## Adding new components If you would like to create a new component, we have just a few @@ -106,18 +112,39 @@ conventions that we would like you to follow. * All public functions, classes, methods, etc. must have a docstring that follows the [numpydoc](https://github.com/numpydoc/numpydoc) conventions. -* Every `.py` file must contain a module-level docstring that the top +* Every `.py` file must contain a module-level docstring at the top of the file that describes what the purpose of the file is. +* Additionally, you will need to add a ReStructuredText (.rst) file + so that the documentation of your new part of Landlab can be + autogenerated. See [this section of the creating a component](https://github.com/landlab/landlab/wiki/Develop-your-own-component#getting-your-component-into-the-documentation) documentation + for an example of how to do this. ### Testing -* All contributed code must be well tested. This should be done through - both doctests as well as more standard unit tests through `nose`. +* All contributed code must be well tested. This should be done + through both doctests as well as more standard unit tests through + [`nose`](http://nose.readthedocs.io/en/latest/index.html). * Doctests should be short, easy-to-read tests that are instructive to a user. * Unit tests should be significanly more extensive and give your - new code thorough testing. - + new code thorough testing. Ideally your tests will test what + happens within every `if`, `elif`, or `else `, and every `try` or + `except` block. Additionally, unless there is a specific reason your + component or utility can only work with one landlab grid type, the + tests should verify that it can work with multiple model grid types + (e.g. both `HexModelGrid` and `RasterModelGrid`). +* Your unit tests should verify that the component or utility you are + creating does exactly what it is expected to do. This means you will + probably want to create a very small (e.g. 5x5 model grid and hand + calculate what the correct answer is). Then assert that your code + reproduces that answer. +* Unit tests [must be discoverable by `nose`](http://nose.readthedocs.io/en/latest/finding_tests.html). + This means that the unit tests should be in folders called `test` + within the component or utility folder, in `.py` files that start with + the name `test` and within functions with names that begin with the + word `test`. +* For more information on constructing unit tests, see [this section](https://github.com/landlab/landlab/wiki/Develop-your-own-component#writing-docstring-and-unit-tests-for-your-component-or-utility) + of the User Guide. Thanks! :heart: :heart: :heart:
help_docs: Update image viewer documentation for changes. Updates the list of actions and buttons referenced in the help center documentation for viewing images with lightbox. Also, makes some minor corrections to the keyboard shortcut note. Fixes
@@ -5,10 +5,17 @@ preview. Click on the image preview to open the **image viewer**. In the image viewer, you can: -* View the image at **full size** +* Zoom in and out of the image + +* Click and drag the image + +* **Reset zoom** so that the image is recentered and its original size + +* **Open** the image in a new browser tab + * **Download** the image -* **Pan and zoom** -* **Browse other images** in the current view. For example, if you're in a + +* Browse other images in the current view. For example, if you're in a stream view, the image browser will show all the images from that stream. If you do a [search](/help/search-for-messages), the image browser will show all images in messages that matched that search. @@ -17,8 +24,9 @@ Exit the image viewer by clicking anywhere outside the image. !!! keyboard_tip "" - Use `v` to **open** the image viewer. Use `Z` and `z` - zoom in and out of the image. Use `v` or `Esc` to **close**. + Use `v` to **open** the image viewer. Use `Z` and `z` to + zoom in and out of the image. Use `v` or `Esc` to **close** + the image viewer. ## Troubleshooting
comment timeline plot Hide timeline plot
</div> </div> - <div class="col-md-12"> + <!--<div class="col-md-12"> <h2>Timeline</h2> <div class="chart" id="timeline"> Plotly.plot('timeline',graphs,{}); </script> </div> - </div> + </div>--> <div class="col-md-12"> <h2>Clients</h2>
Remove use of deprecated Renderer. Warning in the log: "Renderer() deprecated, Please use Scene()instead".
@@ -8,22 +8,20 @@ from dipy.core.graph import Graph from dipy.denoise.enhancement_kernel import EnhancementKernel from dipy.tracking.fbcmeasures import FBCMeasures from dipy.core.sphere import Sphere -from dipy.viz import window -from xvfbwrapper import Xvfb +from dipy.viz import window, actor class TestDipy(unittest.TestCase): - def test_renderer(self): - vdisplay = Xvfb() - vdisplay.start() + def test_scene(self): + xyz = 10 * np.random.rand(100, 3) + colors = np.random.rand(100, 4) + radii = np.random.rand(100) + 0.5 - ren = window.Renderer() + sphere_actor = actor.sphere(centers=xyz, colors=colors, radii=radii) - with tempfile.TemporaryDirectory() as dir: - out_file = os.path.join(dir, 'test.png') - window.record(ren, n_frames=1, out_path=out_file, size=(600, 600)) - self.assertTrue(os.path.exists(out_file)) + scene = window.Scene() + scene.add(sphere_actor) - vdisplay.stop() + self.assertEqual((0, 0), scene.GetSize()) def test_graph(self): g = Graph()
Rename incorrectly named change handler on mixin. Fixes
@@ -626,7 +626,7 @@ class RelationMixin(object): changes_to_return.extend(relation_changes) return errors, changes_to_return - def delete_relation_handler(self, changes): + def delete_relation_from_changes(self, changes): errors = [] changes_to_return = [] for relation in changes:
user_info_popover: Fix status emoji showing even if it's not set. This happened because we had not put a condition in our template to handle the above situation.
<li class="user_info_status_text"> <span id="status_message"> {{status_text}} + {{#if status_emoji_info}} {{#if status_emoji_info.emoji_alt_code}} <div class="emoji_alt_code">&nbsp:{{status_emoji_info.emoji_name}}:</div> {{else}} <div class="emoji status_emoji emoji-{{status_emoji_info.emoji_code}}"></div> {{/if}} {{/if}} + {{/if}} {{#if is_me}}(<a tabindex="0" class="clear_status">{{#tr}}clear{{/tr}}</a>){{/if}} </span> </li>
Update CHANGES. [skip ci]
@@ -53,6 +53,10 @@ These are all the changes in Lektor since the first public release. - When running `lektor dev new-theme`: fix check for ability to create symlinks under Windows. ([#996][]) +#### Tests + +- Fix for test failures when git is not installed. ([#998][], [#1000][]) + ### Refactorings - Cleaned up `EditorSession` to split mapping methods (for access to @@ -64,6 +68,11 @@ These are all the changes in Lektor since the first public release. - Cleaned up and moved our `pylint` and `coverage` configuration to `pyproject.toml`. ([#990][], [#991][]) +#### Packaging + +- Omit `example` subdirectory, frontend source code, developer-centric + config files, as well as other assorted cruft from sdist. ([#986][]) + [bsd]: https://opensource.org/licenses/BSD-3-Clause [#610]: https://github.com/lektor/lektor/issues/610 [#962]: https://github.com/lektor/lektor/issues/962 @@ -74,11 +83,14 @@ These are all the changes in Lektor since the first public release. [#975]: https://github.com/lektor/lektor/issues/975 [#976]: https://github.com/lektor/lektor/pull/976 [#984]: https://github.com/lektor/lektor/pull/984 +[#986]: https://github.com/lektor/lektor/pull/986 [#988]: https://github.com/lektor/lektor/pull/988 [#989]: https://github.com/lektor/lektor/pull/989 [#990]: https://github.com/lektor/lektor/pull/990 [#991]: https://github.com/lektor/lektor/pull/991 [#996]: https://github.com/lektor/lektor/pull/996 +[#998]: https://github.com/lektor/lektor/issues/998 +[#1000]: https://github.com/lektor/lektor/pull/1000 [jinja-dbg-ext]: https://jinja.palletsprojects.com/en/latest/extensions/#debug-extension ## 3.3.1 (2022-01-09)
Add LONG_BINPUT to unpickler Summary: Pull Request resolved: ghimport-source-id:
@@ -345,6 +345,18 @@ OpCode Unpickler::readInstruction() { } memo_table_.push_back(stack_.back()); } break; + case OpCode::LONG_BINPUT: { + AT_CHECK( + std::numeric_limits<size_t>::max() >= + std::numeric_limits<uint32_t>::max(), + "Found a LONG_BINPUT opcode, but size_t on this system is " + "not big enough to decode it"); + size_t memo_id = read<uint32_t>(); + if (memo_table_.size() <= memo_id) { + memo_table_.reserve(1 + 2 * memo_id); + } + memo_table_.push_back(stack_.back()); + } break; case OpCode::MARK: { // Mark location of the container ivalue in the stack marks_.push_back(stack_.size());