message
stringlengths
13
484
diff
stringlengths
38
4.63k
tv4play: improve detection of geoblocking for some reason tv4 sends http 500 when the video is geoblocked
@@ -7,6 +7,7 @@ from datetime import datetime from datetime import timedelta from urllib.parse import urlparse +import requests from svtplay_dl.error import ServiceError from svtplay_dl.fetcher.dash import dashparse from svtplay_dl.fetcher.hls import hlsparse @@ -68,7 +69,12 @@ class Tv4play(Service, OpenGraphThumbMixin): return url = f"https://playback-api.b17g.net/media/{vid}?service=tv4&device=browser&protocol=hls%2Cdash&drm=widevine&browser=GoogleChrome" + try: res = self.http.request("get", url, cookies=self.cookies) + except requests.exceptions.RetryError: + res = requests.get(url, cookies=self.cookies) + yield ServiceError(f"Can't play this because the video is geoblocked: {res.json()['message']}") + return if res.status_code > 200: yield ServiceError("Can't play this because the video is geoblocked or not available.") return
Update elasticsearch.rst Updated as per: and
@@ -16,7 +16,7 @@ Overview Elasticsearch allows you to search large volumes of data quickly, in near real time, by creating and managing an index of post data. The indexing process can be managed from the System Console after setting up and connecting an Elasticsearch server. The post index is stored on the Elasticsearch server and is updated constantly after new posts are made. In order to index existing posts, a bulk index of the entire post database must be generated. -Elasticsearch v5.x and v6.x are supported. +Elasticsearch v5.x, v6.x, and v7.x are supported. When to Use Elasticsearch ~~~~~~~~~~~~~~~~~~~~~~~~~
Fix UPI AWS deployment with CoreOS Fixes:
@@ -271,7 +271,7 @@ class AWSUPI(AWSBase): self.name = self.__class__.__name__ super(AWSUPI, self).__init__() - if config.ENV_DATA['rhel_workers']: + if config.ENV_DATA.get('rhel_workers'): self.worker_vpc = None self.worker_iam_role = None self.worker_subnet = None @@ -423,7 +423,7 @@ class AWSUPI(AWSBase): if config.DEPLOYMENT.get('host_network'): self.host_network_update() - if config.ENV_DATA['rhel_workers']: + if config.ENV_DATA.get('rhel_workers'): self.add_rhel_workers() def gather_worker_data(self, suffix='no0'): @@ -830,7 +830,7 @@ class AWSUPI(AWSBase): default:DEBUG) """ cluster_name = get_cluster_name(self.cluster_path) - if config.ENV_DATA['rhel_workers']: + if config.ENV_DATA.get('rhel_workers'): self.terminate_rhel_workers(self.get_rhel_worker_instances()) # Destroy extra volumes self.destroy_volumes()
Update prometheus2.spec to 2.6.0 Bumping Prometheus version to 2.6.0, released on 2018-12-17
%define debug_package %{nil} Name: prometheus2 -Version: 2.5.0 +Version: 2.6.0 Release: 1%{?dist} -Summary: The Prometheus 2.5.0 monitoring system and time series database. +Summary: The Prometheus 2.6.0 monitoring system and time series database. License: ASL 2.0 URL: https://prometheus.io Conflicts: prometheus
Add the missed decorator to pools The on_put function miss the decorator. This patch add it.
@@ -168,6 +168,7 @@ class Resource(object): response.body = transport_utils.to_json(data) + @decorators.TransportLog("Pools item") @acl.enforce("pools:create") def on_put(self, request, response, project_id, pool): """Registers a new pool. Expects the following input:
Update test_resizing.py Added a call to `text_file.close()` after the call to `np.savetxt()`.
@@ -61,6 +61,7 @@ class ModResizeAsciiBlock(SinkBlock): span = span_generator.next() text_file = open(self.filename, 'a') np.savetxt(text_file, span.data_view(np.float32).reshape((1,-1))) + text_file.close() class TestLateResize(unittest.TestCase): """Test late resizing of a ring in a pipeline"""
Update data-drift.md added new customization option to the data drift page
@@ -81,7 +81,9 @@ To change the bins displayed, you can define [custom options](../customization/o ## Report customization -As mentioned above, you can set different [options-for-data-target-drift.md](../customization/options-for-data-target-drift.md "mention") to modify the existing components of the report. Use this to change the statistical tests used, define Dataset Drift conditions, or change histogram Bins. +You can set different [options-for-data-target-drift.md](../customization/options-for-data-target-drift.md "mention") to modify the existing components of the report. Use this to change the statistical tests used, define Dataset Drift conditions, or change histogram Bins. + +You can also set [options-for-quality-metrics.md](../customization/options-for-quality-metrics.md "mention") to define the width of the confidence interval displayed. You can also select which components of the reports to display or choose to show the short version of the report: [select-widgets-to-display.md](../customization/select-widgets-to-display.md "mention").
doc/nxtdevices: remove note about old touch sensor Special treatment is no longer necessary; if no such touch sensor is detected, the port switches to analog mode. This means that it will work for both touch sensor types, as well as custom switches.
@@ -16,21 +16,6 @@ NXT Touch Sensor .. automethod:: pybricks.nxtdevices.TouchSensor.pressed - .. toggle-header:: - :header: **Using older NXT Touch Sensors** - - **Example: Using a first-generation NXT Touch Sensor.** - - Normally, the EV3 brick always verifies that a sensor is attached - before you can use it. This means that your program stops if a sensor - that you selected was not found. The very first generation of NXT Touch - Sensors did not support this functionality. - To use these sensors, set ``verify_type=False`` as follows:: - - from pybricks.nxtdevices import TouchSensor - from pybricks.parameters import Port - my_sensor = TouchSensor(Port.S1, verify_type=False) - NXT Light Sensor ^^^^^^^^^^^^^^^^ .. autoclass:: pybricks.nxtdevices.LightSensor
ENH: added app.result.tabular_result [NEW] container for tabular result classes
@@ -443,3 +443,13 @@ class bootstrap_result(generic_result): """returns the LR values corresponding to the synthetic data""" result = [self[k].LR for k in self if k != "observed"] return result + + +class tabular_result(generic_result): + """stores one or multiple tabular data sets, keyed by a title""" + + _type = "tabular_result" + _stat_attrs = ("header", "rows") + + def __init__(self, source=None): + super(tabular_result, self).__init__(source)
Reproduce bug The nova-compute fails to start if the hypervisor has PCI addresses 32bit domain. Related-Bug:
@@ -22,6 +22,7 @@ from oslo_utils.fixture import uuidsentinel import nova from nova.compute import vm_states from nova import context +from nova import exception from nova import objects from nova.objects import fields from nova.pci import manager @@ -236,6 +237,42 @@ class PciDevTrackerTestCase(test.NoDBTestCase): tracker.update_devices_from_hypervisor_resources(fake_pci_devs_json) self.assertEqual(2, len(tracker.pci_devs)) + def test_update_devices_from_hypervisor_resources_32bit_domain(self): + self.flags( + group='pci', + passthrough_whitelist=[ + '{"product_id":"2032", "vendor_id":"8086"}']) + # There are systems where 32 bit PCI domain is used. See bug 1897528 + # for example. While nova (and qemu) does not support assigning such + # devices but the existence of such device in the system should not + # lead to an error. + fake_pci = { + 'compute_node_id': 1, + 'address': '10000:00:02.0', + 'product_id': '2032', + 'vendor_id': '8086', + 'request_id': None, + 'status': fields.PciDeviceStatus.AVAILABLE, + 'dev_type': fields.PciDeviceType.STANDARD, + 'parent_addr': None, + 'numa_node': 0} + + fake_pci_devs = [fake_pci] + fake_pci_devs_json = jsonutils.dumps(fake_pci_devs) + tracker = manager.PciDevTracker(self.fake_context) + # We expect that the device with 32bit PCI domain is ignored + # tracker.update_devices_from_hypervisor_resources(fake_pci_devs_json) + # self.assertEqual(0, len(tracker.pci_devs)) + # + # This is the bug 1897528 + ex = self.assertRaises( + exception.PciConfigInvalidWhitelist, + tracker.update_devices_from_hypervisor_resources, + fake_pci_devs_json) + self.assertEqual( + 'Invalid PCI devices Whitelist config: property domain (10000) is ' + 'greater than the maximum allowable value (FFFF).', str(ex)) + def test_set_hvdev_new_dev(self): fake_pci_3 = dict(fake_pci, address='0000:00:00.4', vendor_id='v2') fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
Fix xvfb for travis (now it's a service) Because starting on January 15 of 2019, travis CI released an update to their Xenial build environment, which introduced a new way to start up `XVFB`, and that cause the travis build to fail See also:
@@ -4,6 +4,8 @@ language: python python: - '3.6-dev' - '3.7-dev' +services: + - xvfb before_install: - export PATH=/usr/bin:$PATH - sudo apt-get update -q @@ -19,9 +21,7 @@ before_install: # 'Gtk3 requires X11, and no DISPLAY environment variable is set' # http://docs.travis-ci.com/user/gui-and-headless-browsers/#Starting-a-Web-Server - sudo apt-get install -y xvfb - - "export DISPLAY=:99.0" - - "sh -e /etc/init.d/xvfb start" - - sleep 3 + - "/sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -ac -screen 0 1280x1024x16" install: - pip install --upgrade setuptools - pip install --upgrade cython
./CarlaUE4 to ./CarlaUE4.sh It seems in pre-compiled version 0.9.0 to cmd line is: ./CarlaUE4.sh -carla-server -windowed -ResX=320 -ResY=240
@@ -73,7 +73,7 @@ Run the following command after replacing [PATH_TO_CARLA] with the actual path t If you use the builded binary (0.8.2): - ./CarlaUE4 -carla-server -windowed -ResX=320 -ResY=240 + ./CarlaUE4.sh -carla-server -windowed -ResX=320 -ResY=240 Wait for the message:
Fix ConfigRegister naming scheme Use the value and length of the address to generate the ConfigRegister name (rather than the debug value which is usually an anonymous).
@@ -26,7 +26,10 @@ def define_config_register(width, address, has_reset, _type=m.Bits): def get_name(): type_name = str(T).replace("(", "$").replace(")", "$") - return "ConfigRegister_%s_%s_%s" % (type_name, address, has_reset) + addr_value = m.bitutils.seq2int(address.bits()) + addr_N = address.N + return ("ConfigRegister_%s_%s_%s" % + (type_name, addr_N, addr_value, has_reset)) class _ConfigRegister(m.Circuit): name = get_name()
Fix for rectangles of listview subitems if listview is a table and have cells
@@ -254,6 +254,9 @@ class _listview_item(object): remote_mem = RemoteMemoryBlock(self.listview_ctrl) rect = win32structures.RECT() + # If listview_ctrl has LVS_REPORT we can get access to subitems rectangles + is_table = self.listview_ctrl.has_style(win32defines.LVS_REPORT) + if area.lower() == "all" or not area: rect.left = win32defines.LVIR_BOUNDS elif area.lower() == "icon": @@ -261,20 +264,27 @@ class _listview_item(object): elif area.lower() == "text": rect.left = win32defines.LVIR_LABEL elif area.lower() == "select": - rect.left = win32defines.LVIR_SELECTBOUNDS + rect.left = win32defines.LVIR_BOUNDS if is_table else win32defines.LVIR_SELECTBOUNDS else: raise ValueError('Incorrect rectangle area of the list view item: "' + str(area) + '"') + if is_table: + # The one-based index of the subitem. + rect.top = self.subitem_index + # Write the local RECT structure to the remote memory block remote_mem.Write(rect) + # Depends on subitems rectangles availability + message = win32defines.LVM_GETSUBITEMRECT if is_table else win32defines.LVM_GETITEMRECT + # Fill in the requested item retval = self.listview_ctrl.send_message( - win32defines.LVM_GETITEMRECT, + message, self.item_index, remote_mem) - # if it succeeded + # If it not succeeded if not retval: del remote_mem raise RuntimeError("Did not succeed in getting rectangle")
Create port with port_vnic_type and port_profile from config modify create_port function to use tempest.conf parameters, 'port_vnic_type' and 'port_profile' in case they are defined.
@@ -94,6 +94,10 @@ class ScenarioTest(tempest.test.BaseTestCase): if not client: client = self.ports_client name = data_utils.rand_name(self.__class__.__name__) + if CONF.network.port_vnic_type and 'binding:vnic_type' not in kwargs: + kwargs['binding:vnic_type'] = CONF.network.port_vnic_type + if CONF.network.port_profile and 'binding:profile' not in kwargs: + kwargs['binding:profile'] = CONF.network.port_profile result = client.create_port( name=name, network_id=network_id,
[tests] Fix TooManyRedirects failure for test_merriam_webster Catch requests.exceptions.TooManyRedirects exception as an requested result if no Site can be created from this Url.
@@ -10,7 +10,7 @@ from contextlib import suppress from http import HTTPStatus from urllib.parse import urlparse -from requests.exceptions import ConnectionError, Timeout +from requests.exceptions import ConnectionError, Timeout, TooManyRedirects import pywikibot @@ -48,7 +48,7 @@ class SiteDetectionTestCase(TestCase): @raises AssertionError: Site under url is MediaWiki powered """ with self.assertRaises((AttributeError, ConnectionError, RuntimeError, - ServerError, Timeout)) as e: + ServerError, Timeout, TooManyRedirects)) as e: MWSite(url) unittest_print('\nassertNoSite expected exception:\n{e!r}' .format(e=e.exception))
DOC: move some docstring parts of exponweib to weibull_min also, add explanation that Weibull min distribution is often simply called "the Weibull" distribution.
@@ -1549,6 +1549,10 @@ class exponweib_gen(rv_continuous): %(before_notes)s + See Also + -------- + weibull_min, numpy.random.weibull + Notes ----- The probability density function for `exponweib` is: @@ -1569,11 +1573,8 @@ class exponweib_gen(rv_continuous): * :math:`a` is the exponentiation parameter, with the special case :math:`a=1` corresponding to the usual - (non-exponentiated) Weibull distribution. - * :math:`c` is the shape parameter of the usual Weibull law - (often named :math:`k`, but named :math:`a` in `numpy.random.weibull`). - Special values are :math:`c=1` and :math:`c=2` where Weibull distribution - reduces to the `expon` and `rayleigh` distributions respectively. + (non-exponentiated) Weibull distribution `weibull_min`. + * :math:`c` is the shape parameter of the non-exponentiated Weibull law. %(after_notes)s @@ -1920,11 +1921,14 @@ foldnorm = foldnorm_gen(a=0.0, name='foldnorm') class weibull_min_gen(rv_continuous): r"""Weibull minimum continuous random variable. + The Weibull Minimum Extreme Value distribution, from extreme value theory, + is also often simply called the Weibull distribution. + %(before_notes)s See Also -------- - weibull_max + weibull_max, numpy.random.weibull, exponweib Notes ----- @@ -1937,9 +1941,16 @@ class weibull_min_gen(rv_continuous): for :math:`x >= 0`, :math:`c > 0`. `weibull_min` takes ``c`` as a shape parameter for :math:`c`. + (named :math:`k` in Wikipedia article and :math:`a` in `numpy.random.weibull`) + Special shape values are :math:`c=1` and :math:`c=2` where Weibull distribution + reduces to the `expon` and `rayleigh` distributions respectively. %(after_notes)s + References + ---------- + https://en.wikipedia.org/wiki/Weibull_distribution + %(example)s """
Solid Guide Summary: Wrote up a guide explaning our core abstraction the solid. Probably need to move around some of this content. Test Plan: Read. Reviewers: natekupp, alangenfeld, max
@@ -8,3 +8,4 @@ Learn principles guides/logging/logging + guides/solid/solid
Fixed typo in ThirdLevel.do_say() print Added shebang line
+#!/usr/bin/env python """ Create a CLI with a nested command structure as follows. The commands 'second' and 'third' navigate the CLI to the scope of the submenu. Nesting of the submenus is done with the cmd2.AddSubmenu() decorator. @@ -25,7 +26,7 @@ class ThirdLevel(cmd2.Cmd): def do_say(self, line): print("You called a command in ThirdLevel with '%s'. " - "It has access to second_level_attr: %s " + "It has access to top_level_attr: %s " "and second_level_attr: %s" % (line, self.top_level_attr, self.second_level_attr)) def help_say(self):
better manage files table refresh and remove gif import removed gif import to better match REFI standard
@@ -482,11 +482,6 @@ class DialogManageFiles(QtWidgets.QDialog): self.parent_textEdit.append(entry['name'] + _(" imported.")) self.source.append(entry) - # clear and refill table widget - for r in self.source: - self.ui.tableWidget.removeRow(0) - self.fill_table() - def load_file_text(self, import_file): """ Import from file types of odt, docx pdf, epub, txt, html, htm. """
Add new context keys. This commit adds new context keys that have been introduced in the various ST4 builds.
"details": "Match the scope at the end of the line", "kind": ["variable", "k", "key"], }, + { + "trigger": "overlay_has_focus", + "details": "Overlay has focus", + "kind": ["variable", "k", "key"], + }, + { + "trigger": "overlay_name", + "details": "Name of the overlay open", + "kind": ["variable", "k", "key"], + }, + { + "trigger": "group_has_multiselect", + "details": "Group has multiselected tabs", + "kind": ["variable", "k", "key"], + }, + { + "trigger": "group_has_transient_sheet", + "details": "Group has a transient sheet", + "kind": ["variable", "k", "key"], + }, + { + "trigger": "has_snippet", + "details": "Word before cursor expands to a snippet", + "kind": ["variable", "k", "key"], + }, + { + "trigger": "is_javadoc", + // "details": "TODO", + "kind": ["variable", "k", "key"], + } ] }
Bump timeout for test This was failing inconsistently on Mac showing <BLANKLINE> along with exit -9 (killed) suggesting we're killing before the command can generate the preview.
@@ -6,7 +6,7 @@ review the setting before continuing. Because the prompt waits for user input, we need to terminate the process using a timeout: - >>> run("guild train mnist-softmax", timeout=1) + >>> run("guild train mnist-softmax", timeout=2) You are about to run mnist/mnist-softmax:train batch-size: 100 epochs: 10
[GeneralChannel] change cooldowns to match discord ratelimits more TLDR: The new rate limit for channel NAME AND TOPIC updates is 2 updates per 10 minutes, per channel. Reference: [Link to message on Discord Developers server](https://discord.com/channels/613425648685547541/697138785317814292/715995470048264233)
@@ -54,7 +54,7 @@ class GeneralChannel(commands.Cog): await ctx.tick() @gc.command(name="name") - @commands.cooldown(1, 60, commands.BucketType.user) + @commands.cooldown(2, 600, commands.BucketType.user) @commands.check(server_set) async def gcname(self, ctx, *, name: str): """Change name of #general""" @@ -75,7 +75,7 @@ class GeneralChannel(commands.Cog): await ctx.tick() @gc.command(name="topic") - @commands.cooldown(1, 60, commands.BucketType.user) + @commands.cooldown(2, 600, commands.BucketType.user) @commands.check(server_set) async def gctopic(self, ctx, *, topic: str = None): """Change topic of #general
Fix test assertion This test was calling `.load` on model objects, when it should have been calling `.dump`. This was not working as expected before the marshmallow upgrade either - the objects returned were errors and not template versions.
@@ -438,8 +438,9 @@ def test_get_template_versions(sample_template): assert versions[1].updated_at is not None from app.schemas import template_history_schema - v = template_history_schema.load(versions, many=True) + v = template_history_schema.dump(versions, many=True) assert len(v) == 2 + assert {template_history['version'] for template_history in v} == {1, 2} def test_get_template_versions_is_empty_for_hidden_templates(sample_service):
Get time_step_spec from the environment's time_step_spec, not its observation_spec. The latter makes the assumption that the PyEnvironment uses the default time_step_spec structure, which is not necessarily true.
@@ -143,9 +143,8 @@ class TFPyEnvironment(tf_environment.TFEnvironment): 'wrapped environment are no longer guaranteed to happen in a common ' 'thread. Environment: %s', (self._env,)) - observation_spec = tensor_spec.from_spec(self._env.observation_spec()) action_spec = tensor_spec.from_spec(self._env.action_spec()) - time_step_spec = ts.time_step_spec(observation_spec) + time_step_spec = tensor_spec.from_spec(self._env.time_step_spec()) batch_size = self._env.batch_size if self._env.batch_size else 1 super(TFPyEnvironment, self).__init__(time_step_spec,
Fix pattern matching Patch incorporating Closes
import pytube from pytube import request -from pytube.extract import get_ytplayer_config, apply_signature, js_url +from pytube.extract import apply_signature, js_url +from typing import Any def apply_patches(): """ @@ -46,6 +47,8 @@ def apply_patches(): pytube.__main__.YouTube.descramble = descramble # Patch 3: https://github.com/nficano/pytube/pull/701 pytube.cipher.get_initial_function_name = get_initial_function_name + # Patch 4: https://github.com/nficano/pytube/pull/726 + pytube.__main__.get_ytplayer_config = get_ytplayer_config # The below imports are required by the patches @@ -214,3 +217,32 @@ def get_initial_function_name(js: str) -> str: raise RegexMatchError( caller="get_initial_function_name", pattern="multiple" ) + +def get_ytplayer_config(html: str) -> Any: + """Get the YouTube player configuration data from the watch html. + + Extract the ``ytplayer_config``, which is json data embedded within the + watch html and serves as the primary source of obtaining the stream + manifest data. + + :param str html: + The html contents of the watch page. + :rtype: str + :returns: + Substring of the html containing the encoded manifest data. + """ + config_patterns = [ + r";ytplayer\.config\s*=\s*({.+?});ytplayer", + r";yt\.setConfig\(\{'PLAYER_CONFIG':\s*({.*})}\);", + r";yt\.setConfig\(\{'PLAYER_CONFIG':\s*({.*})(,'EXPERIMENT_FLAGS'|;)", # noqa: E501 + ] + for pattern in config_patterns: + regex = re.compile(pattern) + function_match = regex.search(html) + if function_match: + yt_player_config = function_match.group(1) + return json.loads(yt_player_config) + + raise RegexMatchError( + caller="get_ytplayer_config", pattern="config_patterns" + )
Fix for python2 env Since the python2 use `from __future__ import unicode_literals`, so the string literals will be `unicode` type in python2. Use `six.string_types` in `isinstance()` instead of using `str` type.
@@ -9,6 +9,7 @@ import argparse import functools import gdb +import six import pwndbg.chain import pwndbg.color @@ -225,7 +226,7 @@ class ArgparsedCommand(object): """ :param parser_or_desc: `argparse.ArgumentParser` instance or `str` """ - if isinstance(parser_or_desc, str): + if isinstance(parser_or_desc, six.string_types): self.parser = argparse.ArgumentParser(description=parser_or_desc) else: self.parser = parser_or_desc
[unit test] skip some test if OpenGL is not installed. close
@@ -41,6 +41,13 @@ from silx.gui.colors import rgba from silx.gui.colors import Colormap from silx import sx +try: + import OpenGL +except ImportError: + has_opengl = False +else: + has_opengl = True + _logger = logging.getLogger(__name__) @@ -193,6 +200,7 @@ class SXTest(TestCaseQt, ParametricTestCase): plt.setAttribute(qt.Qt.WA_DeleteOnClose) plt.close() + @unittest.skipUnless(has_opengl, 'OpenGL not installed') @unittest.skipUnless(test_options.WITH_GL_TEST, test_options.WITH_GL_TEST_REASON) def test_contour3d(self): @@ -244,6 +252,7 @@ class SXTest(TestCaseQt, ParametricTestCase): self.assertEqual(rgba(isosurfaces[0].getColor()), (0., 0., 0., 0.4)) + @unittest.skipUnless(has_opengl, 'OpenGL not installed') @unittest.skipUnless(test_options.WITH_GL_TEST, test_options.WITH_GL_TEST_REASON) def test_points3d(self):
Updates based on feedback Removed sentence previously added. Added information regarding the Group Export Dashboard
@@ -197,11 +197,11 @@ For more information about letter case in MySQL table names and the ``--lower-ca Migrating from HipChat Server and HipChat Data Center to Mattermost ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -HipChat.com, Stride, HipChat Server and HipChat Data Center are all being discontinued by Atlassian. Please refer to our `Migration Guidelines for Success <https://docs.mattermost.com>`_ to assist you through the migration process, as well addtitional information below. +HipChat.com, Stride, HipChat Server and HipChat Data Center are all being discontinued by Atlassian. For HipChat Data Center customers with limited amounts of data stored, follow the `Export data from HipChat Data Center Guide <https://confluence.atlassian.com/hipchatdc3/export-data-from-hipchat-data-center-913476832.html>`_ and use the `Mattermost ETL framework <https://github.com/Brightscout/mattermost-etl>`_ to import the solution. If you have questions or encounter issues, `please open a ticket <https://github.com/Brightscout/mattermost-etl/issues>`_. -For teams with large amounts of data, the export function has been reported to fail and it may be difficult to reclaim your team's data. Consider contacting HipChat support to see if a new solution is available. +For teams with large amounts of data, the export function has been reported to fail and it may be difficult to reclaim your team's data. Atlassian recommends upgrading to the latest version of HipChat Server or HipChat Data Center for access to the Group Export Dashboard. You can view their instructions on exporting your data `here <https://www.atlassian.com/partnerships/slack/migration>`_. Consider contacting HipChat support if this solution does not allow you to extract all your data. For teams unable to extract their data from HipChat, the most standard procedure is to run Mattermost and HipChat in parallel until all the users have moved to Mattermost, then deprecate the HipChat instance.
Include the host name into the encoded message key This ensures that two messages with similar content from different devices wouldn't overlap, e.g., when two devices complain about the same NTP server as unreachable, we should have two separate messages.
@@ -289,7 +289,9 @@ class NapalmLogsServerProc(NapalmLogsProc): if six.PY3: dev_os = bytes(dev_os, 'utf-8') if self._buffer: - message = '{dev_os}/{msg}'.format(dev_os=dev_os, msg=msg_dict['message']) + message = '{dev_os}/{host}/{msg}'.format(dev_os=dev_os, + host=msg_dict['host'], + msg=msg_dict['message']) message_key = base64.b64encode(message) if self._buffer[message_key]: log.info('"%s" seems to be already buffered, skipping', msg_dict['message'])
Update windows_indirect_command_execution_via_forfiles.yml Updated description info on forfiles detection yaml
name: Windows Indirect Command Execution Via forfiles id: 59e54602-9680-11ec-a8a6-aaaaaaaaaaaa version: 1 -date: '2022-03-09' +date: '2022-04-05' author: Eric McGinnis, Splunk type: TTP datamodel: - Endpoint -description: The following analytic detection programs that have been started by pcalua.exe, forfiles, forfiles.exe, or conhost.exe. - While these tools can be used to start legitimate programs, they have been observed being evade protections on command line execution. +description: The following analytic detects programs that have been started by forfiles.exe. + According to Microsoft, the 'The forfiles command lets you run a command on or pass + arguments to multiple files'. While this tool can be used to start legitimate programs, + usually within the context of a batch script, it has been observed being used to evade + protections on command line execution. search: '| tstats `security_content_summariesonly` count min(_time) as firstTime max(_time) as lastTime from datamodel=Endpoint.Processes where Processes.parent_process="*forfiles* /c *" by Processes.dest Processes.user Processes.parent_process Processes.parent_process_name Processes.process_name Processes.process Processes.process_id Processes.parent_process_id Processes.process_path @@ -17,12 +20,12 @@ search: '| tstats `security_content_summariesonly` count min(_time) as firstTime how_to_implement: To successfully implement this search, you need to be ingesting logs with the full process path in the process field of CIM's Process data model. If you are using Sysmon, you must have at least version 6.0.4 of the Sysmon TA. - Tune and filter known instances where renamed forfiles.exe and pcalua.exe may be used. + Tune and filter known instances where forfiles.exe may be used. known_false_positives: Some legacy applications may be run using pcalua.exe. Similarly, forfiles.exe may be used in legitimate batch scripts. Filter these results as needed. references: - https://twitter.com/KyleHanslovan/status/912659279806640128 - - REF2 - FIND ANOTHER REFERENCE + - https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/forfiles tags: analytic_story: - Living Off The Land
Fix docstrings in conductor manager Remove NodeCleaningFailure exception from docstrings of two methods, they are not raise it.
@@ -706,8 +706,6 @@ class ConductorManager(base_manager.BaseConductorManager): :param task: A TaskManager object :param skip_current_step: True to skip the current clean step; False to include it. - :raises: NodeCleaningFailure if an internal error occurred when - getting the next clean steps :returns: index of the next clean step; None if there are no clean steps to execute. @@ -811,8 +809,6 @@ class ConductorManager(base_manager.BaseConductorManager): async task :raises: NodeLocked if node is locked by another conductor. :raises: NodeNotFound if the node no longer appears in the database - :raises: NodeCleaningFailure if an internal error occurred when - getting the next clean steps """ LOG.debug("RPC continue_node_clean called for node %s.", node_id)
mypy: add "Optional" to attributes with allowed None These attributes are also used with None value
@@ -564,10 +564,10 @@ class ImageBuildWorkflowData(ISerializer): plugin_failed: bool = False # info about pre-declared build, build-id and token - reserved_build_id: int = None - reserved_token: str = None + reserved_build_id: Optional[int] = None + reserved_token: Optional[str] = None koji_source_nvr: Dict[str, str] = field(default_factory=dict) - koji_source_source_url: str = None + koji_source_source_url: Optional[str] = None koji_source_manifest: Dict[str, Any] = field(default_factory=dict) buildargs: Dict[str, str] = field(default_factory=dict) # --buildargs for container build @@ -588,7 +588,7 @@ class ImageBuildWorkflowData(ISerializer): # List of RPMs that go into the final result, as per utils.rpm.parse_rpm_output # Each RPM inside is a mapping containing the name, version, release and other attributes. - image_components: List[Dict[str, Union[int, str]]] = None + image_components: Optional[List[Dict[str, Union[int, str]]]] = None # List of all yum repos. The provided repourls might be changed (by resolve_composes) when # inheritance is enabled. This property holds the updated list of repos, allowing @@ -601,7 +601,7 @@ class ImageBuildWorkflowData(ISerializer): labels: Dict[str, Any] = field(default_factory=dict) # OSBS2 TBD - image_id: str = None + image_id: Optional[str] = None parent_images_digests: Dict[str, Dict[str, str]] = field(default_factory=dict) build_result: BuildResult = field(init=False)
Add support for new trace callTypes and actions Add new calltypes: callcode and staticall Add new action: reward
@@ -32,17 +32,26 @@ class ConfirmationType(Enum): class EthereumTxCallType(Enum): + # https://ethereum.stackexchange.com/questions/63743/whats-the-difference-between-type-and-calltype-in-parity-trace CALL = 0 DELEGATE_CALL = 1 + CALL_CODE = 2 + STATIC_CALL = 3 @staticmethod - def parse_call_type(call_type: str): + def parse_call_type(call_type: Optional[str]): if not call_type: return None - elif call_type.lower() == 'call': + + call_type = call_type.lower() + if call_type == 'call': return EthereumTxCallType.CALL - elif call_type.lower() == 'delegatecall': + elif call_type == 'delegatecall': return EthereumTxCallType.DELEGATE_CALL + elif call_type == 'callcode': + return EthereumTxCallType.CALL_CODE + elif call_type == 'staticcall': + return EthereumTxCallType.STATIC_CALL else: return None @@ -51,6 +60,7 @@ class EthereumTxType(Enum): CALL = 0 CREATE = 1 SELF_DESTRUCT = 2 + REWARD = 3 @staticmethod def parse(tx_type: str): @@ -61,6 +71,8 @@ class EthereumTxType(Enum): return EthereumTxType.CREATE elif tx_type == 'SUICIDE': return EthereumTxType.SELF_DESTRUCT + elif tx_type == 'REWARD': + return EthereumTxType.REWARD else: raise ValueError(f'{tx_type} is not a valid EthereumTxType')
Review of 'Overview' documentation section * Reducing text amount; Fixing CoZ channel URL; Removing sister projects (using links instead); * Made changes as requested. * title update
Overview ======== -What does it currently do -^^^^^^^^^^^^^^^^^^^^^^^^^ - -- This project aims to be a full port of the original C# `NEO project <https://github.com/neo-project>`_ -- Run a Python based P2P node -- Interactive CLI for configuring node and inspecting blockchain -- Compile, test, deploy and run Smart Contracts written in python or any Smart Contract in the ``.avm`` format -- `NEP2 <https://github.com/neo-project/proposals/blob/master/nep-2.mediawiki>`_ and `NEP5 <https://github.com/neo-project/proposals/blob/master/nep-5.mediawiki>`_ compliant wallet functionality -- RPC Client -- RPC server -- Notification Server ( for viewing transfers of NEP5 tokens ) -- ``Runtime.Log`` and ``Runtime.Notify`` event monitoring - -What will it do -^^^^^^^^^^^^^^^ +Neo-python is an implementation of the Neo protocol using Python. + + +What it currently does +^^^^^^^^^^^^^^^^^^^^^^ -- Consensus nodes -- Full smart contract debugging and inspection +- A Command Line Interface (CLI) for node configuration and blockchain inspection. +- Smart Contracts development tool using `neo-boa <https://github.com/CityOfZion/neo-boa>`_, debugging and deployment using the command line. +- `RPC Client <https://github.com/CityOfZion/neo-python-rpc>`_ and RPC Server. +- Notification server with optional REST endpoints (for viewing transfers of NEP5 tokens). +- ``Runtime.Log`` and ``Runtime.Notify`` event monitoring. +- `NEP2 <https://github.com/neo-project/proposals/blob/master/nep-2.mediawiki>`_ and `NEP5 <https://github.com/neo-project/proposals/blob/master/nep-5.mediawiki>`_ compliant wallet. +- This project aimed to be an alternative implementation for the original C# `NEO + project <https://github.com/neo-project>`_. + + +Neo 3 +^^^^^ +Our current focus is on Neo 3, with neo-python (Neo 2) only receiving the necessary bug fixes. Getting started ^^^^^^^^^^^^^^^ -Please follow directions in `the install section <install.html>`_ +Please follow directions in `the install section <install.html>`_. -The main functionality for this project is contained within the cli application ``np-prompt``. You can `view usage details here <prompt.html>`_ +The main functionality for this project is contained within the cli application ``np-prompt``. You can `view usage details here <prompt.html>`_. -We have published a Youtube `video <https://youtu.be/oy6Z_zd42-4>`_ to help get you started with this library. There are other videos under the `CityOfZion <(https://www.youtube.com/channel/UCzlQUNLrRa8qJkz40G91iJg>`_ Youtube channel. +Watch this `video <https://youtu.be/oy6Z_zd42-4>`_ to help get you started. -Sister projects -^^^^^^^^^^^^^^^ +Visit `CityOfZion <https://www.youtube.com/channel/UCzlQUNLrRa8qJkz40G91iJg>`_ for more video tutorials. -- `neo-python-rpc <https://github.com/CityOfZion/neo-python-rpc>`_: NEO RPC client in Python -- `neo-boa <https://github.com/CityOfZion/neo-boa>`_: Write smart contracts with Python
refactor: tests: popups: Assign mocked keypress to a variable. This commit assigns the mocked keypress method of the emoji_picker_view within `test_mouse_event` of the TestEmojiPickerView class to a variable, so that the corresponding assert method calls are type consistent.
@@ -1372,10 +1372,10 @@ class TestEmojiPickerView: ) def test_mouse_event(self, mocker, widget_size, event, button, keypress): emoji_picker = self.emoji_picker_view - mocker.patch.object(emoji_picker, "keypress") + mocked_emoji_picker_keypress = mocker.patch.object(emoji_picker, "keypress") size = widget_size(emoji_picker) emoji_picker.mouse_event(size, event, button, 0, 0, mocker.Mock()) - emoji_picker.keypress.assert_called_once_with(size, keypress) + mocked_emoji_picker_keypress.assert_called_once_with(size, keypress) @pytest.mark.parametrize("key", keys_for_command("SEARCH_EMOJIS")) def test_keypress_search_emoji(self, key, widget_size):
Build : Fix bug with builds using LOCATE_DEPENDENCY_RESOURCESPATH We also install other files (doc examples) into the resources folder, so SCons needs an explicit list of files from the dependency resources to copy over.
@@ -1214,7 +1214,14 @@ else : resources = None if commandEnv.subst( "$LOCATE_DEPENDENCY_RESOURCESPATH" ) : - resources = commandEnv.Install( "$BUILD_DIR", "$LOCATE_DEPENDENCY_RESOURCESPATH" ) + + resources = [] + resourceRoot = commandEnv.subst( "$LOCATE_DEPENDENCY_RESOURCESPATH" ) + for root, dirs, files in os.walk( resourceRoot ) : + for f in files : + fullPath = os.path.join( root, f ) + resources.append( commandEnv.Command( fullPath.replace( resourceRoot, "$BUILD_DIR/resources/", 1 ), fullPath, Copy( "$TARGET", "$SOURCE" ) ) ) + commandEnv.NoCache( resources ) commandEnv.Alias( "build", resources )
modules/nilrt_ip.py: Fix disable function When an interface is disabled, the adaptor mode should be Disabled.
@@ -36,6 +36,7 @@ except ImportError: try: import pyiface + from pyiface.ifreqioctls import IFF_LOOPBACK, IFF_RUNNING except ImportError: pyiface = None @@ -54,8 +55,6 @@ INTERFACES_CONFIG = "/var/lib/connman/interfaces.config" NIRTCFG_PATH = "/usr/local/natinst/bin/nirtcfg" INI_FILE = "/etc/natinst/share/ni-rt.ini" _CONFIG_TRUE = ["yes", "on", "true", "1", True] -IFF_LOOPBACK = 0x8 -IFF_RUNNING = 0x40 NIRTCFG_ETHERCAT = "EtherCAT" @@ -559,7 +558,7 @@ def _change_state(interface, new_state): """ if __grains__["lsb_distrib_id"] == "nilrt": initial_mode = _get_adapter_mode_info(interface) - _save_config(interface, "Mode", "TCPIP") + _save_config(interface, "Mode", "TCPIP" if new_state == "up" else "Disabled") if initial_mode == "ethercat": __salt__["system.set_reboot_required_witnessed"]() else:
Documentation: Add a bash header template The bash files should have sanitized headers as well.
@@ -184,6 +184,73 @@ class HeaderTemplate(ABC): return self.get_header() in "".join(file_content) +class BashHeaderTemplate(HeaderTemplate): + @staticmethod + def _get_file_shebag(file_path: str) -> Optional[str]: + """ + Returns the bash file shebag. + + :param file_path: The path to the file. + :returns: The python file shebag if present. This includes a new line character. + :throws UnicodeDecodeError: If the file could not be read. + """ + with open(file_path, "r") as file_obj: + file_content = file_obj.readlines() + + if len(file_content) == 0: + return None + if "#!/bin/bash" in file_content[0]: + return file_content[0] + return None + + @staticmethod + def is_file_suitable(file_path: str) -> bool: + if PythonHeaderTemplate._should_file_be_ignored(file_path): + return False + + try: + is_file_using_shebag = BashHeaderTemplate._get_file_shebag(file_path) is not None + except UnicodeDecodeError: + # File could not be opened, this makes it automatically invalid + return False + + return os.path.splitext(file_path)[1] == ".sh" or is_file_using_shebag + + def __init__(self, file_path: str): + super().__init__(file_path) + + def get_header(self) -> str: + shebag_string = BashHeaderTemplate._get_file_shebag(self.file_path) or "" + + return shebag_string \ + + """# -*- coding: utf-8 -*- +# Copyright European Organization for Nuclear Research (CERN) since 2012 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + + def current_header_number_of_lines(self) -> int: + with open(self.file_path, "r") as file_obj: + file_content = file_obj.readlines() + + ret = 0 + for line in file_content: + if not line.startswith("#"): + break + ret += 1 + return ret + + class PythonHeaderTemplate(HeaderTemplate): @staticmethod def _should_file_be_ignored(file_path: str) -> bool:
FIX: Return a copy of the empty array at the exit [ci skip] will run the tests depending on the result of atleast_2d() discussion
@@ -30,7 +30,7 @@ def _cholesky(a, lower=False, overwrite_a=False, clean=True, # Quick return for square empty array if a1.size == 0: - return a1, lower + return a1.copy(), lower overwrite_a = overwrite_a or _datacopied(a1, a) potrf, = get_lapack_funcs(('potrf',), (a1,))
Fix empty key for global options in run tracker the global scope "key" is an empty string, so fix that in the recorded options dict Follow up for: <img width="703" alt="Screen Shot 2020-09-17 at 1 15 24 PM" src="https://user-images.githubusercontent.com/1268088/93523412-da310180-f8e7-11ea-8f93-f4540ecf4c30.png">
@@ -25,6 +25,7 @@ from pants.goal.aggregated_timings import AggregatedTimings from pants.goal.pantsd_stats import PantsDaemonStats from pants.option.config import Config from pants.option.options_fingerprinter import CoercingOptionEncoder +from pants.option.scope import GLOBAL_SCOPE, GLOBAL_SCOPE_CONFIG_SECTION from pants.option.subsystem import Subsystem from pants.reporting.json_reporter import JsonReporter from pants.reporting.report import Report @@ -613,6 +614,8 @@ class RunTracker(Subsystem): scopes = self._all_options.known_scope_to_info.keys() for scope in scopes: scope_and_maybe_option = scope.split("^") + if scope == GLOBAL_SCOPE: + scope = GLOBAL_SCOPE_CONFIG_SECTION recorded_options[scope] = self._get_option_to_record(*scope_and_maybe_option) return recorded_options @@ -622,7 +625,7 @@ class RunTracker(Subsystem): Returns a dict of of all options in the scope, if option is None. Returns the specific option if option is not None. Raises ValueError if scope or option could not be found. """ - scope_to_look_up = scope if scope != "GLOBAL" else "" + scope_to_look_up = scope if scope != GLOBAL_SCOPE_CONFIG_SECTION else "" try: value = self._all_options.for_scope( scope_to_look_up, inherit_from_enclosing_scope=False
update install guide for Linux Mint on Mint I had to install python3-setuptools package too
@@ -105,10 +105,10 @@ On OS X, you can install *The Fuck* via [Homebrew][homebrew]: brew install thefuck ``` -On Ubuntu, install *The Fuck* with the following commands: +On Ubuntu / Mint, install *The Fuck* with the following commands: ```bash sudo apt update -sudo apt install python3-dev python3-pip +sudo apt install python3-dev python3-pip python3-setuptools sudo pip3 install thefuck ```
[DOC] ImportError troubleshooting, virtual environment tipps Additions to the installation guide: ImportError troubleshooting, virtual environment tipps
@@ -59,18 +59,6 @@ Note: currently this does not include the dependency ``catch-22``. As this package is not available on ``conda-forge``, it must be installed via ``pip`` if desired. Contributions to remedy this situation are appreciated. - -Release versions - troubleshooting -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Module not found -"""""""""""""""" - -The most frequent reason for *module not found* errors is installing ``sktime`` with -minimum dependencies and using an estimator which interfaces a package that has not -been installed in the environment. To resolve this, install the missing package, or -install ``sktime`` with maximum dependencies (see above). - Development versions -------------------- To install the latest development version of ``sktime``, or earlier versions, the sequence of steps is as follows: @@ -171,8 +159,9 @@ This section outlines the ``sktime`` build requirements. These are required for: Setting up a development environment -"""""""""""""""""""""""""""""""""""" -You now need to set up a new python virtual environment. Our instructions will go through the commands to set up a ``conda`` environment which is recommended for sktime development. +------------------------------------ + +First set up a new virtual environment. Our instructions will go through the commands to set up a ``conda`` environment which is recommended for sktime development. This relies on an `anaconda installation <https://www.anaconda.com/products/individual#windows>`_. The process will be similar for ``venv`` or other virtual environment managers. In the ``anaconda prompt`` terminal: @@ -207,6 +196,36 @@ Some users have experienced issues when installing NumPy, particularly version 1 Another option under Windows is to follow the instructions for `Unix-like OS`_, using the Windows Subsystem for Linux (WSL). For installing WSL, follow the instructions `here <https://docs.microsoft.com/en-us/windows/wsl/install-win10#step-2---check-requirements-for-running-wsl-2>`_. +Troubleshooting +--------------- + +Module not found +~~~~~~~~~~~~~~~~ + +The most frequent reason for *module not found* errors is installing ``sktime`` with +minimum dependencies and using an estimator which interfaces a package that has not +been installed in the environment. To resolve this, install the missing package, or +install ``sktime`` with maximum dependencies (see above). + +ImportError +~~~~~~~~~~~ +Import errors are often caused by an improperly linked virtual environment. Make sure that +your environment is activated and linked to whatever IDE you are using. If you are using Jupyter +Notebooks, follow `these instructions <https://janakiev.com/blog/jupyter-virtual-envs/>`_ for +adding your virtual environment as a new kernel for your notebook. + +Other Startup Resources +----------------------- + +Virtual environments +~~~~~~~~~~~~~~~~~~~~ + +Two good options for virtual environment managers are: +* `conda <https://uoa-eresearch.github.io/eresearch-cookbook/recipe/2014/11/20/conda/>`_ (many sktime community members us this) +* `venv <https://realpython.com/python-virtual-environments-a-primer/>`_ (also quite good!). + +Be sure to link your new virtual environment as the python kernel in whatever IDE you are using. You can find the instructions for doing so +in VScode `here <https://code.visualstudio.com/docs/python/environments>`_. References ----------
Itertools update * Updated the typehints for itertools. * Removed the overload because it caused problems and cleaned up the imports. * Update itertools.pyi Added back optionality of second argument for itertools.permutations. * Update itertools.pyi Moved the Optional which I accidentially put on the wrong function -.-
# Based on http://docs.python.org/3.2/library/itertools.html from typing import (Iterator, TypeVar, Iterable, overload, Any, Callable, Tuple, - Union, Sequence, Generic, Optional) + Generic, Optional) _T = TypeVar('_T') _S = TypeVar('_S') @@ -44,20 +44,18 @@ def islice(iterable: Iterable[_T], stop: int) -> Iterator[_T]: ... def islice(iterable: Iterable[_T], start: int, stop: Optional[int], step: int = ...) -> Iterator[_T]: ... -def starmap(func: Any, iterable: Iterable[Any]) -> Iterator[Any]: ... +def starmap(func: Callable[..., _S], iterable: Iterable[Iterable[Any]]) -> Iterator[_S]: ... def takewhile(predicate: Callable[[_T], Any], iterable: Iterable[_T]) -> Iterator[_T]: ... -def tee(iterable: Iterable[Any], n: int = ...) -> Iterator[Any]: ... +def tee(iterable: Iterable[_T], n: int = ...) -> Tuple[Iterator[_T], ...]: ... def zip_longest(*p: Iterable[Any], fillvalue: Any = ...) -> Iterator[Any]: ... -# TODO: Return type should be Iterator[Tuple[..]], but unknown tuple shape. -# Iterator[Sequence[_T]] loses this type information. -def product(*p: Iterable[_T], repeat: int = ...) -> Iterator[Sequence[_T]]: ... +def product(*p: Iterable[_T], repeat: int = ...) -> Iterator[Tuple[_T, ...]]: ... def permutations(iterable: Iterable[_T], - r: Union[int, None] = ...) -> Iterator[Sequence[_T]]: ... + r: Optional[int] = ...) -> Iterator[Tuple[_T, ...]]: ... def combinations(iterable: Iterable[_T], - r: int) -> Iterable[Sequence[_T]]: ... + r: int) -> Iterable[Tuple[_T, ...]]: ... def combinations_with_replacement(iterable: Iterable[_T], - r: int) -> Iterable[Sequence[_T]]: ... + r: int) -> Iterable[Tuple[_T, ...]]: ...
packages/dcos-image-deps: bump msrest and msrestazure msrest 0.4.0 has a hardcoded dependency on enum34.
}, "msrest": { "kind": "url", - "url": "https://pypi.python.org/packages/f5/b6/176a2109be5354bbcb31bf52e32ed91b1fc398f1a30ed12db0b429c64928/msrest-0.4.0-py3-none-any.whl", - "sha1": "fdd0ae8546202f817f1bbddf2828bc0af1720514" + "url": "https://pypi.python.org/packages/a4/79/956d2475af557ccc7de76ef67087fc8e7b591162748ab7783d88e9b685d7/msrest-0.4.17-py2.py3-none-any.whl", + "sha1": "f979991d34fdb8389abfddb4a73f6eef753abc47" }, "msrestazure": { "kind": "url", - "url": "https://pypi.python.org/packages/d3/37/6bf15f4e322bd2aa84ed98630a6e265f04784e40124aac5dae2e000553b0/msrestazure-0.4.1-py3-none-any.whl", - "sha1": "dfa52c208f423a1614a1a5dd2910e0a2295f1cb7" + "url": "https://pypi.python.org/packages/34/3d/4ed39efc7e833a84fa07561903750e64dcbd0cbea59b5be19ebd608f15fe/msrestazure-0.4.15-py2.py3-none-any.whl", + "sha1": "eb9bbdadbc1897950b95bb2e653dcc8cce2e50f6" }, "beautifulsoup4": { "kind": "url",
Update beta_binom_post_pred_plot.py In the latest stable version of scipy, comb is scipy.special instead of scipy.misc.
@@ -8,8 +8,7 @@ import os figdir = os.path.join(os.environ["PYPROBML"], "figures") def save_fig(fname): plt.savefig(os.path.join(figdir, fname)) -from scipy.misc import comb -from scipy.special import beta +from scipy.special import comb, beta from scipy.stats import binom
Consider ResourceWarnings as errors for shutdown test Trinity shutdowns are improving which means we ocassionally get shutdowns that are considered clean which then causes our xfail to fail. Let's consider ResourceWarnings as errors, too.
@@ -15,6 +15,7 @@ async def scan_for_errors(async_iterable): error_trigger = ( "exception was never retrieved", + "ResourceWarning: unclosed resource", "Task was destroyed but it is pending", "Traceback (most recent call last)", )
Fixed an issue where you couldn't get the gcd of strings Something to do with the gosh darn Pickling not liking generators (Note: that's the actual built-in type, not my Generators)
@@ -139,8 +139,7 @@ class Generator: self.generated.append(f) return f def __iter__(self): - import copy - return iter(copy.deepcopy(self.gen)) + return self def _map(self, function): return Generator(map(lambda x: function([x])[-1], self.gen)) def _filter(self, function): @@ -510,15 +509,15 @@ def gcd(lhs, rhs=None): if rhs: return { (Number, Number): lambda: math.gcd(lhs, rhs), - (Number, str): lambda: max(set(divisors_of(str(lhs))) & set(divisors_of(rhs))), - (str, Number): lambda: max(set(divisors_of(lhs)) & set(divisors_of(str(rhs)))), - (str, str): lambda: max(set(divisors_of(lhs)) & set(divisors_of(rhs))), + (Number, str): lambda: max(set(divisors_of(str(lhs))) & set(divisors_of(rhs)), key=lambda x: len(x)), + (str, Number): lambda: max(set(divisors_of(lhs)) & set(divisors_of(str(rhs))), key=lambda x: len(x)), + (str, str): lambda: max(set(divisors_of(lhs)) & set(divisors_of(rhs)), key=lambda x: len(x)), }.get((VY_type(lhs), VY_type(rhs)), lambda: vectorise(gcd, lhs, rhs))() else: # I can't use VY_reduce because ugh reasons lhs = deref(lhs) - return functools.reduce(gcd, lhs) + return int(numpy.gcd.reduce(lhs)) def get_input(): global input_level
try_load argument into targetdata.py Allows users to check if TPF has already been created and stored in .eleanor directory. try_load default == True
@@ -53,6 +53,9 @@ class TargetData(object): If true, will return a light curve made with a simple PSF model. cal_cadences : tuple, optional Start and end cadence numbers to use for optimal aperture selection. + try_load: bool, optional + If true, will search hidden ~/.eleanor directory to see if TPF has already + been created. Attributes ---------- @@ -127,19 +130,30 @@ class TargetData(object): Extension[2] = (3, N_time) time, raw flux, systematics corrected flux """ - def __init__(self, source, height=13, width=13, save_postcard=True, do_pca=False, do_psf=False, bkg_size=None, crowded_field=False, cal_cadences=None): + def __init__(self, source, height=13, width=13, save_postcard=True, do_pca=False, do_psf=False, bkg_size=None, crowded_field=False, cal_cadences=None, + try_load=True): self.source_info = source if self.source_info.premade is True: self.load(directory=self.source_info.fn_dir) else: + fnf = True + # Checks to see if file exists already + if try_load==True: + try: + default_fn = 'hlsp_eleanor_tess_ffi_tic{0}_s{1:02d}_tess_v{2}_lc.fits'.format(self.source_info.tic, + self.source_info.sector, + eleanor.__version__) + self.load(fn=default_fn) + fnf = False + except: + pass + + if fnf is True: self.aperture = None if source.tc == False: - if save_postcard == True: - self.post_obj = Postcard(source.postcard, source.ELEANORURL) - else: self.post_obj = Postcard(source.postcard, source.ELEANORURL) else: self.post_obj = Postcard_tesscut(source.cutout) @@ -172,15 +186,18 @@ class TargetData(object): self.create_apertures(height, width) self.get_lightcurve() + if do_pca == True: self.corrected_flux(pca=True) else: self.modes = None self.pca_flux = None + if do_psf == True: self.psf_lightcurve() else: self.psf_flux = None + self.center_of_mass() @@ -1072,7 +1089,7 @@ class TargetData(object): - def load(self, directory=None): + def load(self, directory=None, fn=None): """ Loads in and sets all the attributes for a pre-created TPF file. @@ -1084,8 +1101,10 @@ class TargetData(object): if directory is None: directory = self.fetch_dir() + if fn is None: + fn = self.source_info.fn - hdu = fits.open(os.path.join(directory, self.source_info.fn)) + hdu = fits.open(os.path.join(directory, fn)) hdr = hdu[0].header self.header = hdr # Loads in everything from the first extension
Adds gh link to mnist dataset Will revert when deeplearning.net comes back online.
@@ -47,7 +47,7 @@ PATH = DATA_PATH / "mnist" PATH.mkdir(parents=True, exist_ok=True) -URL = "http://deeplearning.net/data/mnist/" +URL = "https://github.com/pytorch/tutorials/raw/master/_static/" FILENAME = "mnist.pkl.gz" if not (PATH / FILENAME).exists():
Set umask 022 before starting prod install. Fixes
@@ -4,6 +4,7 @@ if [ "$EUID" -ne 0 ]; then echo "Error: The installation script must be run as root" >&2 exit 1 fi +umask 022 mkdir -p /var/log/zulip "$(dirname "$(dirname "$0")")/lib/install" "$@" 2>&1 | tee -a /var/log/zulip/install.log
Normalize imported dependency header- and lib-paths. This is mainly intended as a workaround for but generally a good idea.
@@ -261,6 +261,7 @@ def gather_imports(fips_dir, proj_dir) : # add header search paths for imp_hdr in deps[imp_proj_name]['exports']['header-dirs'] : hdr_path = '{}/{}/{}'.format(ws_dir, imp_proj_name, imp_hdr) + hdr_path = os.path.normpath(hdr_path) if not os.path.isdir(hdr_path) : log.warn("header search path '{}' not found in project '{}'".format(hdr_path, imp_proj_name)) imported[imp_proj_name]['hdrdirs'].append(hdr_path) @@ -268,6 +269,7 @@ def gather_imports(fips_dir, proj_dir) : # add lib search paths for imp_lib in deps[imp_proj_name]['exports']['lib-dirs'] : lib_path = '{}/{}/{}'.format(ws_dir, imp_proj_name, imp_lib) + lib_path = os.path.normpath(lib_path) if not os.path.isdir(lib_path) : log.warn("lib search path '{}' not found in project '{}'".format(lib_path, imp_proj_name)) imported[imp_proj_name]['libdirs'].append(lib_path)
GDB "next": when about to return, run the "finish" command TN:
@@ -282,9 +282,7 @@ class NextCommand(BaseCommand): # reach it. gdb.execute('until {}'.format(root_expr.line_no)) else: - print('Cannot resume execution: {} is about to return'.format( - prop_repr(state.property) - )) + gdb.execute('finish') else: # Depending on the control flow behavior of the currently running
[doc] Fix documentation target for getglobaluserinfo This seems like a typo
@@ -2201,7 +2201,8 @@ class APISite(BaseSite): self._globaluserinfo['registration'] = iso_ts return self._globaluserinfo - globaluserinfo = property(fget=getglobaluserinfo, doc=getuserinfo.__doc__) + globaluserinfo = property(fget=getglobaluserinfo, + doc=getglobaluserinfo.__doc__) @remove_last_args(['sysop']) def is_blocked(self):
patched `numpy/core/setup_common.py` to check for the `-ipo` flag when running the intel compiler and not on windows before checking for the long double representation, as this option causes the compiler to generate intermediary object files and interferes with checking the representation. This had already been done for MSVC style compilers.
@@ -216,6 +216,24 @@ def check_long_double_representation(cmd): except (AttributeError, ValueError): pass + # Disable multi-file interprocedural optimization in the Intel compiler on Linux + # which generates intermediary object files and prevents checking the + # float representation. + elif sys.platform != "win32" and cmd.compiler.compiler_type.startswith('intel') \ + and '-ipo' in cmd.compiler.cc_exe: + try: + newcompiler = cmd.compiler.cc_exe.replace(' -ipo', '') + cmd.compiler.set_executables( + compiler=newcompiler, + compiler_so=newcompiler, + compiler_cxx=newcompiler, + linker_exe=newcompiler, + linker_so=newcompiler + ' -shared' + ) + except (AttributeError, ValueError): + pass + + # We need to use _compile because we need the object filename src, obj = cmd._compile(body, None, None, 'c') try:
Remove comment Summary: Remove pointer to nonexistent Note. It is already removed in "Remove support for CUDNN 6 (#15851)" Pull Request resolved:
@@ -194,7 +194,6 @@ struct AT_CUDA_API DropoutDescriptor } // Restore a dropout descriptor given a dropout probability and existing RNG state. - // See Note [cuDNN dropout descriptor initialization] void set(cudnnHandle_t handle, float dropout, at::Tensor state_) { AT_ASSERTM(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout"); state = state_; @@ -205,7 +204,6 @@ struct AT_CUDA_API DropoutDescriptor } // Restore a dropout descriptor corresponding to no dropout - // See Note [cuDNN dropout descriptor initialization] void set_no_dropout(cudnnHandle_t handle) { // NB: seed doesn't matter when dropout = 0, because no random number // initialization actually takes place when there is no dropout.
Version .9.2.4 Removed regex constraint which prevented tables with leading numeric characters from being analysed
@@ -48,7 +48,7 @@ import datetime from _curses import OK import math -__version__ = ".9.2.3" +__version__ = ".9.2.4" OK = 0 ERROR = 1 @@ -879,7 +879,7 @@ join pg_namespace as pgn on pgn.oid = pgc.relnamespace join (select tbl, count(*) as mbytes from stv_blocklist group by tbl) b on a.id=b.tbl where pgn.nspname = '%s' - and a.name::text SIMILAR TO '[A-Za-z_][A-Za-z0-9_]*' + and a.name::text SIMILAR TO '[A-Za-z0-9_]*' order by 2; ''' % (analyze_schema,)
Stopping initial animation for carousel Adding local state interaction tracker so that carousel knows when it's been interacted with/when to animate
return { contentSetStart: 0, leftToRight: false, + // tracks whether the carousel has been interacted with + interacted: false, }; }, watch: { const newIndexTooLarge = this.contentSetEnd >= this.contents.length; const newIndexTooSmall = newStartIndex < 0; const enoughContentForASet = this.contents.length >= this.contentSetSize; + + // turns animation on in case this is the first time it's been updated + if (!this.interacted) { + this.interacted = true; + } + if (nextSet && newIndexTooLarge && enoughContentForASet) { this.contentSetStart = this.contents.length - this.contentSetSize; } else if (previousSet && newIndexTooSmall) { const gutters = (this.contentSetSize - 1) * gutterWidth; const carouselContainerOffset = cards + gutters; const sign = this.leftToRight ? -1 : 1; + + if (this.interacted) { el.style.left = `${sign * carouselContainerOffset + originalPosition}px`; + } }, slide(el) { const originalPosition = parseInt(el.style.left, 10); const gutters = (this.contentSetSize - 1) * gutterWidth; const carouselContainerOffset = cards + gutters; const sign = this.leftToRight ? 1 : -1; + + if (this.interacted) { el.style.left = `${sign * carouselContainerOffset + originalPosition}px`; + } }, isInThisSet(index) { return this.contentSetStart <= index && index <= this.contentSetEnd;
Fix for broken /jobs/<jid> in 2016.11.4 Fixes
@@ -74,6 +74,7 @@ class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object): reserved_kwargs = dict([(i, low.pop(i)) for i in [ 'username', 'password', 'eauth', 'token', 'client', 'user', 'key', + '__current_eauth_groups','__current_eauth_user', ] if i in low]) # Run name=value args through parse_input. We don't need to run kwargs
Ncf iter issue Ncf iter issue fix
@@ -56,13 +56,12 @@ class BaseRecommenderMetric(FullDatasetEvaluationMetric): self.gt_items[annotation.user] = annotation.item def evaluate(self, annotations, predictions): - iter_num = len(self.pred_per_user[0]) - measure = [] for user in range(self.users_num): if not self.pred_per_user[user]: continue map_item_score = {} + iter_num = len(self.pred_per_user[user]) for j in range(iter_num): item = self.pred_per_user[user][j][0] score = self.pred_per_user[user][j][1]
Fix package build dependencies Apparently, they were lost during some rebase:(
@@ -47,8 +47,9 @@ fedora_epoch = 1 pwd = os.getcwd() home = os.environ["HOME"] + def gen_control_file(pkg: Package, out): - str_build_deps = ", ".join(build_deps) + str_build_deps = ", ".join(common_deps) file_contents = f''' Source: {pkg.name.lower()} Section: utils @@ -66,8 +67,9 @@ Description: {pkg.desc} with open(out, 'w') as f: f.write(file_contents) + def gen_spec_file(pkg: Package, out): - build_requires = " ".join(build_deps + run_deps) + build_requires = " ".join(common_deps) config_files = list(filter(lambda x: x is not None, map(lambda x: x.config_file, package.systemd_units))) requires = " ".join(run_deps) if len(pkg.systemd_units) > 0:
test_external: Refactor mock.patch to assertLogs. Replaced mock.patch with assertLogs for testing log outputs in file zerver/tests/test_external.py
@@ -118,16 +118,17 @@ class RateLimitTests(ZulipTestCase): self.assert_json_success(result) - @mock.patch('zerver.lib.rate_limiter.logger.warning') - def test_hit_ratelimiterlockingexception(self, mock_warn: mock.MagicMock) -> None: + def test_hit_ratelimiterlockingexception(self) -> None: user = self.example_user('cordelia') RateLimitedUser(user).clear_history() with mock.patch('zerver.lib.rate_limiter.RedisRateLimiterBackend.incr_ratelimit', side_effect=RateLimiterLockingException): + with self.assertLogs("zerver.lib.rate_limiter", level="WARNING") as m: result = self.send_api_message(user, "some stuff") self.assertEqual(result.status_code, 429) - mock_warn.assert_called_with( - "Deadlock trying to incr_ratelimit for %s", - f"RateLimitedUser:{user.id}:api_by_user", + self.assertEqual( + m.output, + ["WARNING:zerver.lib.rate_limiter:Deadlock trying to incr_ratelimit for {}".format( + f"RateLimitedUser:{user.id}:api_by_user")] )
docs: Update Docker development instructions to include chown. This adds a command to change ownership of /srv/zulip to the zulip user.
@@ -395,7 +395,8 @@ docker build -t user/zulipdev . Commit and tag the provisioned images. The below will install Zulip's dependencies: ``` docker run -itv $(pwd):/srv/zulip -p 9991:9991 user/zulipdev /bin/bash -# /bin/bash /srv/zulip/tools/provision --docker +$ /bin/bash sudo chmod -R zulip:zulip /srv/zulip +$ /bin/bash /srv/zulip/tools/provision --docker docker ps -af ancestor=user/zulipdev docker commit -m "Zulip installed" <container id> user/zulipdev:v2 ```
Bumping the version To get the temp fix for pyqtgraph due to regressions plotting spectrograms
@@ -7,7 +7,7 @@ from codecs import open setup( name='pyspedas', - version='1.0', + version='1.0.1', description='Python Space Physics Environment Data Analysis\ Software (SPEDAS)', long_description=open('README.md').read(),
memory: Map lowmem using 16K pages only Turns out CTRR does not like working with huge pages, and just throws up its hands in the air with an L2 address size fault if a huge page overlaps the CTRR region.
@@ -260,7 +260,13 @@ int mmu_map(u64 from, u64 to, u64 size) return -1; // L3 mappings to boundary - chunk = min(size, ALIGN_UP(from, MASK(VADDR_L2_OFFSET_BITS)) - from); + u64 boundary = ALIGN_UP(from, MASK(VADDR_L2_OFFSET_BITS)); + // CPU CTRR doesn't like L2 mappings crossing CTRR boundaries! + // Map everything below the m1n1 base as L3 + if (boundary >= ram_base && boundary < (u64)_base) + boundary = ALIGN_UP((u64)_base, MASK(VADDR_L2_OFFSET_BITS)); + + chunk = min(size, boundary - from); if (chunk) { mmu_pt_map_l3(from, to, chunk); from += chunk; @@ -391,8 +397,6 @@ void mmu_map_framebuffer(u64 addr, size_t size) static void mmu_add_default_mappings(void) { - mmu_map_mmio(); - ram_base = ALIGN_DOWN(cur_boot_args.phys_base, BIT(32)); uint64_t ram_size = cur_boot_args.mem_size + cur_boot_args.phys_base - ram_base; ram_size = ALIGN_DOWN(ram_size, 0x4000); @@ -400,6 +404,8 @@ static void mmu_add_default_mappings(void) printf("MMU: RAM base: 0x%lx\n", ram_base); printf("MMU: Top of normal RAM: 0x%lx\n", ram_base + ram_size); + mmu_map_mmio(); + /* * Create identity mapping for RAM from 0x08_0000_0000 * With SPRR enabled, this becomes RW.
[realms] Replace forgotten TODO. [email protected]
@@ -31,7 +31,8 @@ def expand_realms(db, project_id, realms_cfg): All such realms_pb2.Realms messages across all projects (plus a list of all defined permissions with all their metadata) are later merged together into - a final universal realms_pb2.Realms by TODO. + a final universal realms_pb2.Realms by realms.merge(...) in + components/auth/replication.py. Args: db: a permissions.DB instance with current permissions and roles.
2.5.9 Automatically generated by python-semantic-release
@@ -9,7 +9,7 @@ https://community.home-assistant.io/t/echo-devices-alexa-as-media-player-testers """ from datetime import timedelta -__version__ = "2.5.8" +__version__ = "2.5.9" PROJECT_URL = "https://github.com/custom-components/alexa_media_player/" ISSUE_URL = "{}issues".format(PROJECT_URL)
Fix onConnect docstring regarding return value The docstring mentioned a ConnectionAccept to be returned, whilst this model exists it doesn't seem to be used and the actual expected return value is str.
@@ -411,9 +411,9 @@ class IWebSocketChannel(object): :returns: When this callback is fired on a WebSocket server, you may return either ``None`` (in which case the connection is accepted with no specific WebSocket subprotocol) or - an instance of :class:`autobahn.websocket.types.ConnectionAccept`. + an str instance with the name of the WebSocket subprotocol accepted. When the callback is fired on a WebSocket client, this method must return ``None``. - Do deny a connection, raise an Exception. + To deny a connection, raise an Exception. You can also return a Deferred/Future that resolves/rejects to the above. """
Refactor RandomSampler to use IdentitySampler This also gives us the 'seed' parameter for free.
""" A sampler that gives random samples. """ -from random import choice from dimod.core.sampler import Sampler -from dimod.sampleset import SampleSet +from dimod.reference.samplers.identity_sampler import IdentitySampler + __all__ = ['RandomSampler'] @@ -48,7 +48,7 @@ class RandomSampler(Sampler): self.parameters = {'num_reads': []} self.properties = {} - def sample(self, bqm, num_reads=10): + def sample(self, bqm, num_reads=10, seed=None): """Give random samples for a binary quadratic model. Variable assignments are chosen by coin flip. @@ -60,19 +60,17 @@ class RandomSampler(Sampler): num_reads (int, optional, default=10): Number of reads. + seed (int (32-bit unsigned integer), optional): + Seed to use for the PRNG. Specifying a particular seed with a + constant set of parameters produces identical results. If not + provided, a random seed is chosen. + Returns: :obj:`.SampleSet` """ - values = tuple(bqm.vartype.value) - - def _itersample(): - for __ in range(num_reads): - sample = {v: choice(values) for v in bqm.linear} - energy = bqm.energy(sample) - - yield sample, energy - - samples, energies = zip(*_itersample()) - - return SampleSet.from_samples(samples, bqm.vartype, energies) + # as an implementation detail, we can use IdentitySampler here, but + # in order to save on future changes that decouple them, we won't + # subclass + return IdentitySampler().sample(bqm, num_reads=num_reads, seed=seed, + initial_states_generator='random')
Update params.yaml Doing 25 epochs instead of 16
@@ -14,7 +14,7 @@ constants: csv_test: !ref <constants.data_folder>/test.csv # Neural Parameters - number_of_epochs: 16 + number_of_epochs: 25 batch_size: 8 lr: 1.0 dropout_rate: 0.15
Updates "precomp" matrix for wildcard budget to include SPAM term directly. Previously we added the SPAM component of a circuit's budget separately, but this is nicer for the alternate methods we're testing to solve the wildcard optimization.
@@ -168,6 +168,10 @@ class WildcardBudget(object): circuit_budget_matrix[i, self.primOpLookup[layer]] += 1.0 for component in layer.components: circuit_budget_matrix[i, self.primOpLookup[component]] += 1.0 + + if self.spam_index is not None: + circuit_budget_matrix[:, self.spam_index] = 1.0 + return circuit_budget_matrix def slow_update_probs(self, probs_in, probs_out, freqs, layout, precomp=None): @@ -655,8 +659,7 @@ class PrimitiveOpsWildcardBudget(WildcardBudget): circuit_budgets = _np.array([self.circuit_budget(circ) for circ in circuits]) else: Wvec = _np.abs(self.wildcard_vector) - off = 0 if (self.spam_index is None) else Wvec[self.spam_index] - circuit_budgets = _np.dot(precomp, Wvec) + off + circuit_budgets = _np.dot(precomp, Wvec) return circuit_budgets @property
Update loa.py Replaced one of the placeholder function args descriptions with correct one Replaced use of np.max() function with max() function
@@ -154,13 +154,13 @@ class LionOptimizationAlgorithm(Algorithm): Args: population_size (Optional[int]): Population size :math:`\in [1, \infty)`. - burden_factor (Optional[float]): Burden factor :math:`\in [0, 1]`. - death_rate (Optional[float]): Dying rate :math:`\in [0, 1]`. - visibility (Optional[float]): View range of camel. - supply_init (Optional[float]): Initial supply :math:`\in (0, \infty)`. - endurance_init (Optional[float]): Initial endurance :math:`\in (0, \infty)`. - min_temperature (Optional[float]): Minimum temperature, must be true :math:`$T_{min} < T_{max}`. - max_temperature (Optional[float]): Maximum temperature, must be true :math:`T_{min} < T_{max}`. + nomad_ratio (Optional[float]): Ratio of nomad lions :math:`\in [0, 1]`. + num_of_prides = Number of prides :math:`\in [1, \infty)`. + female_ratio = Ratio of female lions in prides :math:`\in [0, 1]`. + roaming_factor = Roaming factor :math:`\in [0, 1]`. + mating_factor = Mating factor :math:`\in [0, 1]`. + mutation_factor = Mutation factor :math:`\in [0, 1]`. + immigration_factor = Immigration factor :math:`\in [0, 1]`. See Also: * :func:`niapy.algorithms.Algorithm.set_parameters` @@ -374,7 +374,7 @@ class LionOptimizationAlgorithm(Algorithm): num_of_improvements += 1 # Tournament selection to select places in teritory if there's more than 2 places if len(pride_teritory) > 1: - tournament_size = np.max([2, int(np.ceil(num_of_improvements / 2))]) + tournament_size = max([2, int(np.ceil(num_of_improvements / 2))]) tournament_selections = self.rng.choice(pride_teritory, tournament_size, replace=False) tournament_winner = tournament_selections[0].x.copy() tournament_min_f = tournament_selections[0].f
Handle change in domain check behaviour in statsmodels 0.12. statsmodels 0.12 fixed a bug where evaluating points outside of the domain in density estimation now returns np.nan instead of raising a ValueError. Detect both cases and fallback to estimating the density point-wise.
@@ -185,6 +185,8 @@ def compute_density(x, weight, range, **params): try: y = kde.evaluate(x2) + if np.isscalar(y) and np.isnan(y): + raise ValueError('kde.evaluate returned nan') except ValueError: y = [] for _x in x2:
fix(recorder): Use EXPLAIN instead of EXPLAIN EXTENDED EXPLAIN EXTENDED is not a valid postgres query, use EXPLAIN instead
@@ -33,7 +33,7 @@ def sql(*args, **kwargs): # Collect EXPLAIN for executed query if query.lower().strip().split()[0] in ("select", "update", "delete"): # Only SELECT/UPDATE/DELETE queries can be "EXPLAIN"ed - explain_result = frappe.db._sql("EXPLAIN EXTENDED {}".format(query), as_dict=True) + explain_result = frappe.db._sql("EXPLAIN {}".format(query), as_dict=True) else: explain_result = []
Update nwm-archive.yaml Additional edits to formatting.
Name: NOAA National Water Model CONUS Retrospective Dataset Description: | - The NOAA National Water Model Retrospective dataset contains input and output from multi-decade CONUS retrospective simulations. These simulations used meteorological input fields from meteorological retrospective datasets. The output frequency and fields available in this historical NWM dataset differ from those contained in the real-time operational NWM forecast model. <br /> - + The NOAA National Water Model Retrospective dataset contains input and output from multi-decade CONUS retrospective simulations. These simulations used meteorological input fields from meteorological retrospective datasets. The output frequency and fields available in this historical NWM dataset differ from those contained in the real-time operational NWM forecast model. + <br/> + <br/> One application of this dataset is to provide historical context to current near real-time streamflow, soil moisture and snowpack conditions. The retrospective data can be used to infer flow frequencies and perform temporal analyses with hourly streamflow output and 3-hourly land surface output. This dataset can also be used in the development of end user applications which require a long baseline of data for system training or verification purposes. <br /> - - Currently there are three versions of the NWM retrospective dataset <br /> - - A 42-year (February 1979 through December 2020) retrospective simulation using version 2.1 of the National Water Model. <br /> - A 26-year (January 1993 through December 2018) retrospective simulation using version 2.0 of the National Water Model. <br /> - A 25-year (January 1993 through December 2017) retrospective simulation using version 1.2 of the National Water Model. <br /> - + <br/> + <br/> + Currently there are three versions of the NWM retrospective dataset + <br/> + <br/> + A 42-year (February 1979 through December 2020) retrospective simulation using version 2.1 of the National Water Model. + A 26-year (January 1993 through December 2018) retrospective simulation using version 2.0 of the National Water Model. + A 25-year (January 1993 through December 2017) retrospective simulation using version 1.2 of the National Water Model. + <br/> + <br/> Version 2.1 uses forcings from the Office of Water Prediction Analysis of Record for Calibration (AORC) dataset while Version 2.0 and version 1.2 use input meteorological forcing from the North American Land Data Assimilation (NLDAS) data set. Note that no streamflow or other data assimilation is performed within any of the NWM retrospective simulations. - + <br/> + <br/> NWM Retrospective data is available in two formats, NetCDF and Zarr. The NetCDF files contain the full set of NWM output data, while the Zarr files contain a subset of NWM output fields that vary with model version. - + <br/> + <br/> NWM V2.1: All model output and forcing input fields are available in the NetCDF format. All model output fields along with the precipitation forcing field are available in the Zarr format. NWM V2.0: All model output fields are available in NetCDF format. Model channel output including streamflow and related fields are available in Zarr format. NWM V1.2: All model output fields are available in NetCDF format. - + <br/> + <br/> A table listing the data available within each NetCDF and Zarr file is located in the documentation page. This data includes meteorological NWM forcing inputs along with NWM hydrologic and land surface outputs, and varies by version number. Documentation: https://github.com/awslabs/open-data-docs/tree/main/docs/noaa/nwm-archive Contact: |
Update tutorial.rst Fixed typo
@@ -488,7 +488,7 @@ The `JSON Lines`_ format is useful because it's stream-like, you can easily append new records to it. It doesn't have the same problem of JSON when you run twice. Also, as each record is a separate line, you can process big files without having to fit everything in memory, there are tools like `JQ`_ to help -doing that at the command-line. +do that at the command-line. In small projects (like the one in this tutorial), that should be enough. However, if you want to perform more complex things with the scraped items, you
PR + Governance updates to CONTRIBUTING.md * Add note about PRs and governance Closes * Add note about CoC. Related to
@@ -93,6 +93,23 @@ Contribution Guidelines .. _formatting: https://molecule.readthedocs.io/en/latest/testing.html#formatting .. _linting: https://molecule.readthedocs.io/en/latest/testing.html#linting +Code Of Conduct +=============== + +Please see our `Code of Conduct`_ document. + +.. _Code of Conduct: https://github.com/ansible/molecule/blob/master/.github/CODE_OF_CONDUCT.md + +Pull Request Life Cycle and Governance +====================================== + +* If your PRs get stuck `join us on IRC`_ or add to the `working group agenda`_. +* The code style is what is enforced by CI, everything else is off topic. +* All PRs must be reviewed by one other person. This is enforced by GitHub. Larger changes require +2. + +.. _working group agenda: https://github.com/ansible/community/wiki/Molecule#meetings +.. _join us on IRC: https://github.com/ansible/community/wiki/Molecule#join-the-discussion + Installing ==========
Remove use of the term "subdirectories". While gsutil mimics a directory structure in some ways, it's more appropriate to think of it as common prefixes to object names.
@@ -52,27 +52,30 @@ _DETAILED_HELP_TEXT = (""" gsutil mv ./dir gs://my_bucket -<B>RENAMING BUCKET SUBDIRECTORIES</B> - You can use the gsutil mv command to rename subdirectories. For example, - the command: +<B>RENAMING GROUPS OF OBJECTS</B> + You can use the gsutil mv command to rename all objects with a given prefix + to have a new prefix. This is accomplished by copying each object to a new + object with the desired name and deleting the old one. For example, the + command: - gsutil mv gs://my_bucket/olddir gs://my_bucket/newdir + gsutil mv gs://my_bucket/oldprefix gs://my_bucket/newprefix - would rename all objects and subdirectories under gs://my_bucket/olddir to be - under gs://my_bucket/newdir, otherwise preserving the subdirectory structure. + would rename all objects under gs://my_bucket/oldprefix to be under + gs://my_bucket/newprefix, otherwise preserving the naming structure. If you do a rename as specified above and you want to preserve ACLs, you should use the -p option (see OPTIONS). - Note that when using mv to rename bucket subdirectories you cannot specify - the source URL using wildcards. You need to spell out the complete name: + Note that when using mv to rename groups of objects with a common prefix + you cannot specify the source URL using wildcards. You need to spell out + the complete name: - gsutil mv gs://my_bucket/olddir gs://my_bucket/newdir + gsutil mv gs://my_bucket/oldprefix gs://my_bucket/newprefix If you have a large number of files to move you might want to use the gsutil -m option, to perform a multi-threaded/multi-processing move: - gsutil -m mv gs://my_bucket/olddir gs://my_bucket/newdir + gsutil -m mv gs://my_bucket/oldprefix gs://my_bucket/newprefix <B>NON-ATOMIC OPERATION</B> @@ -125,7 +128,7 @@ class MvCommand(Command): help_name='mv', help_name_aliases=['move', 'rename'], help_type='command_help', - help_one_line_summary='Move/rename objects and/or subdirectories', + help_one_line_summary='Move/rename objects', help_text=_DETAILED_HELP_TEXT, subcommand_help_text={}, )
Fix filename for download_saved_models This fix reflects examples changes.
@@ -63,7 +63,7 @@ For now, we'll just download pre-trained models with the script provided by the .. code-block:: bash - ./download_saved_models.sh + python download_saved_models.py This script downloads the pre-trained PyTorch models and puts them into the ``saved_models`` folder.
docs: Fix Grammar in Settings Transaction Family Change "remain pieces" to "remaining pieces".
@@ -143,7 +143,7 @@ following algorithm: Setting keys are broken into four parts, based on the dots in the string. For example, the address for the key `a.b.c` is computed based on `a`, `b`, `c` and the empty string. A longer key, for example `a.b.c.d.e`, is still broken into -four parts, but the remain pieces are in the last part: `a`, `b`, `c` and `d.e`. +four parts, but the remaining pieces are in the last part: `a`, `b`, `c` and `d.e`. Each of these pieces has a short hash computed (the first 16 characters of its SHA256 hash in hex) and is joined into a single address, with the settings
Fix behavior of delete_documents() with filters for Milvus * Fix behavior of delete_documents() Delete filtered set of vectors rather than the whole collection * Update milvus.py * Update milvus.py
@@ -403,6 +403,10 @@ class MilvusDocumentStore(SQLDocumentStore): if status.code != Status.SUCCESS: raise RuntimeError(f'Milvus has collection check failed: {status}') if ok: + if filters: + existing_docs = super().get_all_documents(filters=filters, index=index) + self._delete_vector_ids_from_milvus(documents=existing_docs, index=index) + else: status = self.milvus_server.drop_collection(collection_name=index) if status.code != Status.SUCCESS: raise RuntimeError(f'Milvus drop collection failed: {status}')
max_shown_downlinks add default value HG-- branch : feature/microservices
@@ -70,7 +70,8 @@ Ext.define("NOC.inv.networksegment.Model", { }, { name: "max_shown_downlinks", - type: "integer" + type: "integer", + defaultValue: 1000 } ] });
Remove the lock from the history recorder Thread safety should be the responsibility of the handler as not all handlers may require a lock
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. - -import threading import logging @@ -28,7 +26,6 @@ class HistoryRecorder(object): def __init__(self): self._enabled = False self._handlers = [] - self._lock = threading.Lock() def enable(self): self._enabled = True @@ -41,7 +38,6 @@ class HistoryRecorder(object): def record(self, event_type, payload, source='BOTOCORE'): if self._enabled and self._handlers: - with self._lock: for handler in self._handlers: try: handler.emit(event_type, payload, source)
repository/legacy: log auth failures Resolves:
@@ -382,4 +382,10 @@ class LegacyRepository(PyPiRepository): except requests.HTTPError as e: raise RepositoryError(e) + if response.status_code in (401, 403): + self._log( + "Authorization error accessing {url}".format(url=url), level="warn" + ) + return + return Page(url, response.content, response.headers)
Fixed issue where RemoteCopy is returning empty string. This is causing dependant functions to return PSObject values instead of strings. This is resulting in multiple failures.
@@ -1170,7 +1170,6 @@ Function RemoteCopy($uploadTo, $downloadFrom, $downloadTo, $port, $files, $usern Start-Sleep -Seconds 1 $uploadJobStatus = Get-Job -Id $uploadJob.Id } - Write-Output "" $returnCode = Get-Content -Path $uploadStatusRandomFile Remove-Item -Force $uploadStatusRandomFile | Out-Null Remove-Job -Id $uploadJob.Id -Force | Out-Null @@ -1253,7 +1252,6 @@ Function RemoteCopy($uploadTo, $downloadFrom, $downloadTo, $port, $files, $usern Start-Sleep -Seconds 1 $uploadJobStatus = Get-Job -Id $uploadJob.Id } - Write-Output "" $returnCode = Get-Content -Path $uploadStatusRandomFile Remove-Item -Force $uploadStatusRandomFile | Out-Null Remove-Job -Id $uploadJob.Id -Force | Out-Null @@ -1320,7 +1318,6 @@ Function RemoteCopy($uploadTo, $downloadFrom, $downloadTo, $port, $files, $usern Start-Sleep -Seconds 1 $downloadJobStatus = Get-Job -Id $downloadJob.Id } - Write-Output "" $returnCode = Get-Content -Path $downloadStatusRandomFile Remove-Item -Force $downloadStatusRandomFile | Out-Null Remove-Job -Id $downloadJob.Id -Force | Out-Null @@ -1360,7 +1357,6 @@ Function RemoteCopy($uploadTo, $downloadFrom, $downloadTo, $port, $files, $usern Start-Sleep -Seconds 1 $downloadJobStatus = Get-Job -Id $downloadJob.Id } - Write-Output "" $downloadExitCode = (Select-String -Path $downloadStatusRandomFile -Pattern "DownloadExtiCode_").Line if ( $downloadExitCode ) {
LogicVarType.c_type: turn the Exception into a language check TN:
@@ -561,7 +561,9 @@ class LogicVarType(BasicType): @classmethod def c_type(cls, c_api_settings): - raise Exception("Cannot expose logic variables to C at the moment") + check_source_language( + False, "Cannot expose logic variables to C at the moment" + ) class EquationType(BasicType):
group_by doesn't seem to apply for form reports Getting a validation error here: Introduced in
@@ -593,13 +593,16 @@ class ConfigureChartReport(ReportBuilderView): }) return self._handle_exception(error_response, e) field_names = report_form.fields.keys() + is_group_by_required = (report_form.source_type != 'form' + and ('group_by' in field_names + or 'location' in field_names)) return { 'report': { "title": self.page_name }, 'report_type': self.report_type, 'form': report_form, - 'is_group_by_required': 'group_by' in field_names or 'location' in field_names, + 'is_group_by_required': is_group_by_required, 'editing_existing_report': bool(self.existing_report), 'report_column_options': [p.to_dict() for p in report_form.report_column_options.values()], 'data_source_indicators': [p._asdict() for p in report_form.data_source_properties.values()],
Environment var for UPLOAD_FOLDER It would be helpful to have the option to set the UPLOAD_FOLDER via environment variable. This change allows that.
@@ -77,7 +77,7 @@ class Config(object): The default destination is the CTFd/uploads folder. If you need Amazon S3 files you can use the CTFd S3 plugin: https://github.com/ColdHeat/CTFd-S3-plugin ''' - UPLOAD_FOLDER = os.path.join(os.path.dirname(__file__), 'uploads') + UPLOAD_FOLDER = os.environ.get('UPLOAD_FOLDER') or os.path.join(os.path.dirname(__file__), 'uploads') ''' TEMPLATES_AUTO_RELOAD specifies whether Flask should check for modifications to templates and
docs: required reviews changed from 3 to 2 tidying up stale info looking at the docs
@@ -138,7 +138,7 @@ PR is selected for the merge only if: - PR is not a Draft. - PR has a green status (successful build). - PR doesn't have merge conflicts with `master` branch. -- PR has 3 approved reviews (as described above). +- PR has approved reviews (as described above). - PR does not have any [official reviewers](#official-reviewers) requesting changes - Master build is not running already (see below)
landing-page: Change "Help Center" => "Why Zulip". We have a good "Help Center" section in the footer, where a user may already expect to look for help/support related requests, so we can replace the navbar spot there with the "Why Zulip" page link.
<li on-page="integrations"> <a href="/integrations/">Integrations</a> </li> - <li on-page="help"> - <a href="/help/">Help Center</a> + <li> + <a href="/why-zulip/">Why Zulip</a> </li> {% if user_is_authenticated %} {% include 'zerver/portico-header-dropdown.html' %}
Make a copy of source array This prevents reference-counting issues
"""Definitions for different agents that can be controlled from Holodeck""" from functools import reduce +from typing import Any import numpy as np @@ -249,7 +250,7 @@ class HolodeckAgent: # Allow for smaller arrays to be provided as input if len(self._action_buffer) > len(action): - action = np.asarray(action) + action = np.copy(action) action.resize(self._action_buffer.shape) # The default act function is to copy the data,
llvm, functions/LinearMatrix: Drop custom _result_length method The shared on works OK, and it's on its way out anyway.
@@ -4666,10 +4666,6 @@ class LinearMatrix(TransferFunction): # --------------------------------------- else: return np.array(specification) - @property - def _result_length(self): - return len(self.instance_defaults.value) - def get_output_struct_type(self): default_val = self.instance_defaults.value
No need to call raise_for_status in get_config_and_id_from_registry query_registry already calls requests.Response.raise_for_status. So, no need to call it again in get_config_and_id_from_registry.
@@ -945,13 +945,11 @@ class RegistryClient(object): """ response = query_registry( self._session, image, digest=digest, version=version) - response.raise_for_status() manifest_config = response.json() config_digest = manifest_config['config']['digest'] config_response = query_registry( self._session, image, digest=config_digest, version=version, is_blob=True) - config_response.raise_for_status() blob_config = config_response.json()
Drop unused We don't need to colate data by db_alias after loading it, because each worker is dedicated to its own db_alias, so stats are already grouped when they are collected.
@@ -164,37 +164,6 @@ def _reset_sequences(load_stats): cursor.execute(line) -def load_objects(objects): - """Load the given list of object dictionaries into the database - :return: List of LoadStat objects - """ - load_stats_by_db = {} - - objects_by_db = _group_objects_by_db(objects) - executor = ProcessPoolExecutor(max_workers=len(objects_by_db)) - results = executor.map(load_data_task, objects_by_db) - for load_stat in results: - _update_stats(load_stats_by_db, [load_stat]) - return list(load_stats_by_db.values()) - - -def _update_stats(current_stats_by_db, new_stats): - """Helper to update stats dictionary""" - for new_stat in new_stats: - current_stat = current_stats_by_db.get(new_stat.db_alias) - if current_stat is not None: - current_stat.update(new_stat) - else: - current_stats_by_db[new_stat.db_alias] = new_stat - - -def load_data_task(dbalias_objects: tuple) -> LoadStat: - db_alias, objects_for_db = dbalias_objects - with transaction.atomic(using=db_alias): - load_stat = load_data_for_db(db_alias, objects_for_db) - return load_stat - - def load_data_for_db(db_alias, objects): """ :param db_alias: Django alias for database to load objects into @@ -231,18 +200,6 @@ def load_data_for_db(db_alias, objects): return LoadStat(db_alias, model_counter) -def _group_objects_by_db(objects): - """ - :param objects: Deserialized object dictionaries - :return: List of tuples of (db_alias, [object,...]) - """ - objects_by_db = defaultdict(list) - for obj in objects: - db_alias = get_db_alias(obj) - objects_by_db[db_alias].append(obj) - return list(objects_by_db.items()) - - def get_db_alias(obj: dict) -> str: app_label = obj['model'] model = apps.get_model(app_label)
Prometheus version bump - 2.7.1 Bugfix and security release:
%define debug_package %{nil} Name: prometheus2 -Version: 2.7.0 +Version: 2.7.1 Release: 1%{?dist} -Summary: The Prometheus 2.7.0 monitoring system and time series database. +Summary: The Prometheus 2.7.1 monitoring system and time series database. License: ASL 2.0 URL: https://prometheus.io Conflicts: prometheus
fix - enum for color coding in PS Some keys are really weird in PS
"type": "list", "key": "color_code", "label": "Color codes for layers", - "object_type": "text" + "type": "enum", + "multiselection": true, + "enum_items": [ + { "red": "red" }, + { "orange": "orange" }, + { "yellowColor": "yellow" }, + { "grain": "green" }, + { "blue": "blue" }, + { "violet": "violet" }, + { "gray": "gray" } + ] }, { "type": "list",
rbd-mirror: fix systemd unit in purge-docker rbd-mirror containers are not stopped in purge-docker-cluster playbook because of the wrong name used.
- name: disable ceph rbd-mirror service service: - name: "ceph-rbd-mirror@{{ ansible_hostname }}" + name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}" state: stopped enabled: no ignore_errors: true
Update generic.txt > ```stop_ransomware```
@@ -4516,10 +4516,6 @@ climapro-africa.com wwkkss.com -# Reference: https://twitter.com/petrovic082/status/1152952807600939008 - -bruze2.ug - # Reference: https://twitter.com/bad_packets/status/1153089384884736000 silynigr.xyz @@ -12576,10 +12572,6 @@ f0468736.xsph.ru 91.208.245.201:443 oooooooooo.ga -# Reference: https://www.virustotal.com/gui/file/5d7a8a1278237d3044e9079031352f845e226ea7d16f9223ff6f9fac896e1a82/detection - -vjsi.top - # Reference: https://www.virustotal.com/gui/domain/uufjffff.com/detection uufjffff.com
born_in_month more born_in_month
@@ -960,6 +960,8 @@ class ChildHealthMonthlyAggregationHelper(BaseICDSAggregationHelper): age_in_months = "(({} - child_health.dob) / 30.4 )".format(start_month_string) open_in_month = ("(({} - child_health.opened_on::date)::integer >= 0) AND (child_health.closed = 0 OR (child_health.closed_on::date - {})::integer > 0)").format(end_month_string, start_month_string) alive_in_month = "(child_health.date_death IS NULL OR child_health.date_death - {} >= 0)".format(start_month_string) + seeking_services = "(child_health.is_availing = 1 AND child_health.is_migrated = 0)" + born_in_month = "({} AND child_health.dob BETWEEN {} AND {})".format(seeking_services, start_month_string, end_month_string) columns = ( ("awc_id", "child_health.awc_id"), @@ -984,8 +986,9 @@ class ChildHealthMonthlyAggregationHelper(BaseICDSAggregationHelper): ("age_in_months", 'trunc({})'.format(age_in_months_end)), ("open_in_month", "CASE WHEN {} THEN 1 ELSE 0 END".format(open_in_month)), ("alive_in_month", "CASE WHEN {} THEN 1 ELSE 0 END".format(alive_in_month)), - ("born_in_month", "ucr.born_in_month"), - ("bf_at_birth_born_in_month", "ucr.bf_at_birth_born_in_month"), + ("born_in_month", "CASE WHEN {} THEN 1 ELSE 0 END".format(born_in_month)), + ("bf_at_birth_born_in_month", "CASE WHEN {} AND child_health.bf_at_birth = 'yes' THEN 1 ELSE 0 END".format(born_in_month)), + ("low_birth_weight_born_in_month", "CASE WHEN {} AND child_health.lbw_open_count = 1 THEN 1 ELSE 0 END".format(born_in_month)), ("fully_immunized_eligible", "ucr.fully_immunized_eligible"), ("fully_immunized_on_time", "ucr.fully_immunized_on_time"), ("fully_immunized_late", "ucr.fully_immunized_late"), @@ -1027,7 +1030,6 @@ class ChildHealthMonthlyAggregationHelper(BaseICDSAggregationHelper): "CASE WHEN ucr.pnc_eligible = 1 THEN COALESCE(pnc.skin_to_skin, 0) ELSE 0 END"), # GM Indicators ("wer_eligible", "ucr.wer_eligible"), - ("low_birth_weight_born_in_month", "ucr.low_birth_weight_born_in_month"), ("nutrition_status_last_recorded", "CASE " "WHEN ucr.wer_eligible = 0 THEN NULL "
Update POST for view * Update POST for view In the way POST view works is altered so that the same functionality is available as in GET view. This commit updates the POST view documentation to reflect this new behavior.
transfer size for attachments. .. http:post:: /{db}/_design/{ddoc}/_view/{view} - :synopsis: Returns certain rows for the specified stored view + :synopsis: Returns results for the specified view Executes the specified view function from the specified design document. - Unlike :get:`/{db}/_design/{ddoc}/_view/{view}` for accessing views, the - :method:`POST` method supports the specification - of explicit keys to be retrieved from the view results. The remainder of - the :method:`POST` view functionality is identical to the - :get:`/{db}/_design/{ddoc}/_view/{view}` API. + :method:`POST` view functionality supports identical parameters and behavior + as specified in the :get:`/{db}/_design/{ddoc}/_view/{view}` API but allows for the + query string parameters to be supplied as keys in a JSON object in the body + of the `POST` request. **Request**: