patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -185,11 +185,7 @@
<% if policy(@proposal).can_approve_or_reject? %>
<div class="centered">
<p>
- <%= link_to "Approve",
- approval_response_path(approver_action: 'approve',
- # @todo: get rid of cart
- cart_id: @proposal.cart.id, version: @proposal.version
- ), class: 'form-button' %>
+ <%= link_to "Approve", approve_proposal_url(@proposal, version: @proposal.version), class: 'form-button' %>
</p>
</div>
<% end %> | 1 | <div class="inset">
<div class="row">
<div class="col-md-12 col-xs-12">
<h1 class="communicart_header">
<%= @proposal.name %>
</h1>
<div class="communicart_description">
<p>
Purchase Request: <strong><%= @proposal.public_identifier %></strong>
</p>
<p>
Requested by:
<strong><%= @proposal.requester.full_name %></strong>
</p>
<%= client_partial(@proposal.client, 'external_id',
locals: {proposal: @proposal}) %>
</div>
</div>
<%- if @proposal.linear? %>
<div class="col-md-12">
<table class="col-md-12 data_container" border='0'>
<%= render partial: "shared/email_status" %>
</table>
</div>
<%- else %>
<div class="col-md-4 col-xs-12 communicart_description">
<div class="purchase-status">
<h5 class="<%= @proposal.status %>"><%= @proposal.display_status %></h5>
</div>
</div>
<%- end %>
</div>
<div class="row">
<%= client_partial(@proposal.client, 'proposal_properties',
locals: {proposal: @proposal }) %>
</div>
</div>
<% if policy(@proposal).can_edit? %>
<%= client_partial(@proposal.client, 'restart_link',
locals: {proposal: @proposal})
%>
<% end %>
<%- if @proposal.flow == 'parallel' %>
<%- if @proposal.approvals.approved.any? %>
<div class="approval-status-container">
<div id="approval-status">
<h3>Request approved by</h3>
<ul>
<%- @proposal.approvals.approved.each do |approval| %>
<li class='icon-approved'>
<%= approval.user_email_address %>
<span class='timestamp'>on <%= l approval.updated_at %></span>
</li>
<%- end %>
</ul>
</div>
</div>
<%- end %>
<%- if @proposal.approvals.pending.any? %>
<div class="approval-status-container">
<div id="approval-status">
<h3>Waiting for approval from</h3>
<ul class="left">
<%- @proposal.approvals.pending.each do |approval| %>
<li class='icon-pending'>
<%= approval.user_email_address %>
</li>
<%- end %>
</ul>
<ul class="right">
<%- @proposal.approvals.approved.each do |approval| %>
<li class='icon-approved'>
<%= approval.user_email_address %>
</li>
<%- end %>
</ul>
</div>
</div>
<%- end %>
<%- end %>
<%- if @include_comments_files %>
<div class="comments-container proposal-submodel-container">
<div id="comments">
<h3>Comments on this purchase request</h3>
<%= form_for [@proposal, Comment.new] do |f| %>
<%= f.text_area :comment_text, rows: 5 %>
<div class='row text-area-info-container'>
<div class='col-xs-7 col-sm-6 text-area-info-web'>
<p>
These comments will be sent to your requester through email
</p>
</div>
<p class='col-xs-5 col-sm-6 text-area-button'>
<%= submit_tag "Send a Comment", id: :add_a_comment %>
</p>
</div>
<%- end %>
<% if @proposal.comments.any? %>
<% @proposal.comments.each do |c| %>
<div class='line-item'>
<div class='row'>
<% unless c.user.nil? %>
<p class='comment-sender col-sm-6 col-xs-12'>
<strong><%= c.user_full_name %></strong>
</p>
<% end %>
<p class='date col-sm-6 col-xs-12'>
<%= date_with_tooltip(c.created_at) %>
</p>
</div>
<div class='row'>
<p class='comment-text col-sm-6 col-xs-12'>
<%= c.comment_text %>
</p>
</div>
</div>
<% end %>
<% else %>
<p class='empty-list-label'>
No comments have been added yet
</p>
<% end %>
</div>
<div id="files">
<h3>Attachments to this proposal</h3>
<%= form_for [@proposal, Attachment.new] do |f| %>
<div class="line-item">
<div class="row">
<%= f.file_field :file %>
</div>
</div>
<div class='row text-area-info-container'>
<div class='col-xs-7 col-sm-6 text-area-info-web'>
<p>Attach a file (e.g. receipts, contract documents, etc.)</p>
</div>
<p class='col-xs-5 col-sm-6 text-area-button'>
<%= submit_tag "Attach a File", id: :add_a_file %>
</p>
</div>
<%- end %>
<% @proposal.attachments.each do |attachment| %>
<div class="line-item">
<div class="row">
<p class="col-sm-5 col-xs-12">
<a href="<%= attachment.url %>"><%= attachment.file_file_name %></a>
</p>
<p class="col-sm-3 col-xs-6">
<strong><%= attachment.user.full_name %></strong>
</p>
<p class="col-sm-3 col-xs-5 date righted">
<%= date_with_tooltip(attachment.created_at) %>
</p>
<p class="col-sm-1 col-xs-1 righted">
<%- if policy(attachment).can_destroy? %>
<%= link_to "Delete",
proposal_attachment_path(@proposal, attachment),
method: :delete, data: {confirm: "Are you sure?"} %>
<%- end %>
</p>
</div>
</div>
<% end %>
<% if @proposal.attachments.empty? %>
<p class="empty-list-label">
No attachments have been added yet
</p>
<% end %>
</div>
</div>
<%- end %>
<% if policy(@proposal).can_approve_or_reject? %>
<div class="centered">
<p>
<%= link_to "Approve",
approval_response_path(approver_action: 'approve',
# @todo: get rid of cart
cart_id: @proposal.cart.id, version: @proposal.version
), class: 'form-button' %>
</p>
</div>
<% end %>
| 1 | 12,986 | `version` isn't need anymore because it's built into `approve_proposal_url`, yes? | 18F-C2 | rb |
@@ -11,9 +11,10 @@ import struct
from scapy.compat import orb, chb
from scapy.config import conf
-from scapy.data import MTU, DLT_BLUETOOTH_LE_LL
+from scapy.data import MTU, DLT_BLUETOOTH_LE_LL, DLT_BLUETOOTH_LE_LL_WITH_PHDR
from scapy.packet import *
from scapy.fields import *
+from scapy.layers.dot11 import _dbmField
from scapy.layers.ppi import PPI, addPPIType, PPIGenericFldHdr
from scapy.contrib.ppi_geotag import XLEIntField, XLEShortField | 1 | # This file is for use with Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Airbus DS CyberSecurity
# Authors: Jean-Michel Picod, Arnaud Lebrun, Jonathan Christofer Demay
# This program is published under a GPLv2 license
"""Bluetooth 4LE layer"""
import socket
import struct
from scapy.compat import orb, chb
from scapy.config import conf
from scapy.data import MTU, DLT_BLUETOOTH_LE_LL
from scapy.packet import *
from scapy.fields import *
from scapy.layers.ppi import PPI, addPPIType, PPIGenericFldHdr
from scapy.contrib.ppi_geotag import XLEIntField, XLEShortField
from scapy.layers.bluetooth import EIR_Hdr, L2CAP_Hdr
from scapy.modules.six.moves import range
BTLE_Versions = {
7: '4.1'
}
BTLE_Corp_IDs = {
0xf: 'Broadcom Corporation'
}
class CtrlPDU(Packet):
name = "CtrlPDU"
fields_desc = [
XByteField("optcode", 0),
ByteEnumField("version", 0, BTLE_Versions),
LEShortEnumField("Company", 0, BTLE_Corp_IDs),
XShortField("subversion", 0)
]
class BTLE_PPI(Packet):
name = "BTLE PPI header"
fields_desc = [
LEShortField("pfh_type", 30006),
LEShortField("pfh_datalen", 24),
ByteField("btle_version", 0),
LEShortField("btle_channel", None),
ByteField("btle_clkn_high", None),
LEIntField("btle_clk_100ns", None),
Field("rssi_max", None, fmt="b"),
Field("rssi_min", None, fmt="b"),
Field("rssi_avg", None, fmt="b"),
ByteField("rssi_count", None)
]
class BDAddrField(MACField):
def __init__(self, name, default, resolve=False):
MACField.__init__(self, name, default)
if resolve:
conf.resolve.add(self)
def i2m(self, pkt, x):
if x is None:
return b"\0\0\0\0\0\0"
return mac2str(':'.join(x.split(':')[::-1]))
def m2i(self, pkt, x):
return str2mac(x[::-1])
class BTLEChanMapField(XByteField):
def __init__(self, name, default):
Field.__init__(self, name, default, "<Q")
def addfield(self, pkt, s, val):
return s + struct.pack(self.fmt, self.i2m(pkt, val))[:5]
def getfield(self, pkt, s):
return s[5:], self.m2i(pkt, struct.unpack(self.fmt, s[:5] + b"\x00\x00\x00")[0]) # noqa: E501
class BTLE(Packet):
name = "BT4LE"
fields_desc = [
XLEIntField("access_addr", 0x8E89BED6),
X3BytesField("crc", None)
]
@staticmethod
def compute_crc(pdu, init=0x555555):
def swapbits(a):
v = 0
if a & 0x80 != 0:
v |= 0x01
if a & 0x40 != 0:
v |= 0x02
if a & 0x20 != 0:
v |= 0x04
if a & 0x10 != 0:
v |= 0x08
if a & 0x08 != 0:
v |= 0x10
if a & 0x04 != 0:
v |= 0x20
if a & 0x02 != 0:
v |= 0x40
if a & 0x01 != 0:
v |= 0x80
return v
state = swapbits(init & 0xff) + (swapbits((init >> 8) & 0xff) << 8) + (swapbits((init >> 16) & 0xff) << 16) # noqa: E501
lfsr_mask = 0x5a6000
for i in (orb(x) for x in pdu):
for j in range(8):
next_bit = (state ^ i) & 1
i >>= 1
state >>= 1
if next_bit:
state |= 1 << 23
state ^= lfsr_mask
return struct.pack("<L", state)[:-1]
def post_build(self, p, pay):
# Switch payload and CRC
crc = p[-3:]
p = p[:-3] + pay
p += crc if self.crc is not None else self.compute_crc(p[4:])
return p
def post_dissect(self, s):
self.raw_packet_cache = None # Reset packet to allow post_build
return s
def pre_dissect(self, s):
# move crc
return s[:4] + s[-3:] + s[4:-3]
def post_dissection(self, pkt):
if isinstance(pkt, PPI):
pkt.notdecoded = PPIGenericFldHdr(pkt.notdecoded)
def hashret(self):
return struct.pack("!L", self.access_addr)
class BTLE_ADV(Packet):
name = "BTLE advertising header"
fields_desc = [
BitEnumField("RxAdd", 0, 1, {0: "public", 1: "random"}),
BitEnumField("TxAdd", 0, 1, {0: "public", 1: "random"}),
BitField("RFU", 0, 2), # Unused
BitEnumField("PDU_type", 0, 4, {0: "ADV_IND", 1: "ADV_DIRECT_IND", 2: "ADV_NONCONN_IND", 3: "SCAN_REQ", # noqa: E501
4: "SCAN_RSP", 5: "CONNECT_REQ", 6: "ADV_SCAN_IND"}), # noqa: E501
BitField("unused", 0, 2), # Unused
XBitField("Length", None, 6),
]
def post_build(self, p, pay):
p += pay
if self.Length is None:
if len(pay) > 2:
l = len(pay)
else:
l = 0
p = p[:1] + chb(l & 0x3f) + p[2:]
if not isinstance(self.underlayer, BTLE):
self.add_underlayer(BTLE)
return p
class BTLE_DATA(Packet):
name = "BTLE data header"
fields_desc = [
BitField("RFU", 0, 3), # Unused
BitField("MD", 0, 1),
BitField("SN", 0, 1),
BitField("NESN", 0, 1),
BitEnumField("LLID", 0, 2, {1: "continue", 2: "start", 3: "control"}),
ByteField("len", None),
]
def post_build(self, p, pay):
if self.len is None:
p = p[:-1] + chb(len(pay))
return p + pay
class BTLE_ADV_IND(Packet):
name = "BTLE ADV_IND"
fields_desc = [
BDAddrField("AdvA", None),
PacketListField("data", None, EIR_Hdr)
]
class BTLE_ADV_DIRECT_IND(Packet):
name = "BTLE ADV_DIRECT_IND"
fields_desc = [
BDAddrField("AdvA", ""),
BDAddrField("InitA", "")
]
class BTLE_ADV_NONCONN_IND(BTLE_ADV_IND):
name = "BTLE ADV_NONCONN_IND"
class BTLE_ADV_SCAN_IND(BTLE_ADV_IND):
name = "BTLE ADV_SCAN_IND"
class BTLE_SCAN_REQ(Packet):
name = "BTLE scan request"
fields_desc = [
BDAddrField("ScanA", ""),
BDAddrField("AdvA", "")
]
def answers(self, other):
return BTLE_SCAN_RSP in other and self.AdvA == other.AdvA
class BTLE_SCAN_RSP(Packet):
name = "BTLE scan response"
fields_desc = [
BDAddrField("AdvA", ""),
PacketListField("data", None, EIR_Hdr)
]
def answers(self, other):
return BTLE_SCAN_REQ in other and self.AdvA == other.AdvA
class BTLE_CONNECT_REQ(Packet):
name = "BTLE connect request"
fields_desc = [
BDAddrField("InitA", ""),
BDAddrField("AdvA", ""),
# LLDATA
XIntField("AA", 0x00),
X3BytesField("crc_init", 0x0),
XByteField("win_size", 0x0),
XLEShortField("win_offset", 0x0),
XLEShortField("interval", 0x0),
XLEShortField("latency", 0x0),
XLEShortField("timeout", 0x0),
BTLEChanMapField("chM", 0),
BitField("SCA", 0, 3),
BitField("hop", 0, 5),
]
bind_layers(BTLE, BTLE_ADV, access_addr=0x8E89BED6)
bind_layers(BTLE, BTLE_DATA)
bind_layers(BTLE_ADV, BTLE_ADV_IND, PDU_type=0)
bind_layers(BTLE_ADV, BTLE_ADV_DIRECT_IND, PDU_type=1)
bind_layers(BTLE_ADV, BTLE_ADV_NONCONN_IND, PDU_type=2)
bind_layers(BTLE_ADV, BTLE_SCAN_REQ, PDU_type=3)
bind_layers(BTLE_ADV, BTLE_SCAN_RSP, PDU_type=4)
bind_layers(BTLE_ADV, BTLE_CONNECT_REQ, PDU_type=5)
bind_layers(BTLE_ADV, BTLE_ADV_SCAN_IND, PDU_type=6)
bind_layers(BTLE_DATA, L2CAP_Hdr, LLID=2) # BTLE_DATA / L2CAP_Hdr / ATT_Hdr
# LLID=1 -> Continue
bind_layers(BTLE_DATA, CtrlPDU, LLID=3)
conf.l2types.register(DLT_BLUETOOTH_LE_LL, BTLE)
bind_layers(PPI, BTLE, dlt=147)
addPPIType(30006, BTLE_PPI)
| 1 | 13,743 | Moved to the bottom of the file. `CtrlPDU` sits on `BTLE_DATA` so let's put it after | secdev-scapy | py |
@@ -48,6 +48,7 @@ module Blacklight::SolrHelper
extend ActiveSupport::Concern
include Blacklight::SearchFields
include Blacklight::Facet
+ require 'json'
included do
if self.respond_to?(:helper_method) | 1 | # -*- encoding : utf-8 -*-
# SolrHelper is a controller layer mixin. It is in the controller scope: request params, session etc.
#
# NOTE: Be careful when creating variables here as they may be overriding something that already exists.
# The ActionController docs: http://api.rubyonrails.org/classes/ActionController/Base.html
#
# Override these methods in your own controller for customizations:
#
# class CatalogController < ActionController::Base
#
# include Blacklight::Catalog
#
# def solr_search_params
# super.merge :per_page=>10
# end
# end
#
# Or by including in local extensions:
# module LocalSolrHelperExtension
# [ local overrides ]
# end
#
# class CatalogController < ActionController::Base
#
# include Blacklight::Catalog
# include LocalSolrHelperExtension
#
# def solr_search_params
# super.merge :per_page=>10
# end
# end
#
# Or by using ActiveSupport::Concern:
#
# module LocalSolrHelperExtension
# extend ActiveSupport::Concern
# include Blacklight::SolrHelper
#
# [ local overrides ]
# end
#
# class CatalogController < ApplicationController
# include LocalSolrHelperExtension
# include Blacklight::Catalog
# end
module Blacklight::SolrHelper
extend ActiveSupport::Concern
include Blacklight::SearchFields
include Blacklight::Facet
included do
if self.respond_to?(:helper_method)
helper_method(:facet_limit_for)
end
# We want to install a class-level place to keep
# solr_search_params_logic method names. Compare to before_filter,
# similar design. Since we're a module, we have to add it in here.
# There are too many different semantic choices in ruby 'class variables',
# we choose this one for now, supplied by Rails.
class_attribute :solr_search_params_logic
# Set defaults. Each symbol identifies a _method_ that must be in
# this class, taking two parameters (solr_parameters, user_parameters)
# Can be changed in local apps or by plugins, eg:
# CatalogController.include ModuleDefiningNewMethod
# CatalogController.solr_search_params_logic += [:new_method]
# CatalogController.solr_search_params_logic.delete(:we_dont_want)
self.solr_search_params_logic = [:default_solr_parameters , :add_query_to_solr, :add_facet_fq_to_solr, :add_facetting_to_solr, :add_solr_fields_to_query, :add_paging_to_solr, :add_sorting_to_solr, :add_group_config_to_solr ]
end
def force_to_utf8(value)
case value
when Hash
value.each { |k, v| value[k] = force_to_utf8(v) }
when Array
value.each { |v| force_to_utf8(v) }
when String
value.force_encoding("utf-8") if value.respond_to?(:force_encoding)
end
value
end
def find(*args)
path = blacklight_config.solr_path
response = blacklight_solr.get(path, :params=> args[1])
Blacklight::SolrResponse.new(force_to_utf8(response), args[1])
rescue Errno::ECONNREFUSED => e
raise Blacklight::Exceptions::ECONNREFUSED.new("Unable to connect to Solr instance using #{blacklight_solr.inspect}")
end
# A helper method used for generating solr LocalParams, put quotes
# around the term unless it's a bare-word. Escape internal quotes
# if needed.
def solr_param_quote(val, options = {})
options[:quote] ||= '"'
unless val =~ /^[a-zA-Z0-9$_\-\^]+$/
val = options[:quote] +
# Yes, we need crazy escaping here, to deal with regexp esc too!
val.gsub("'", "\\\\\'").gsub('"', "\\\\\"") +
options[:quote]
end
return val
end
# returns a params hash for searching solr.
# The CatalogController #index action uses this.
# Solr parameters can come from a number of places. From lowest
# precedence to highest:
# 1. General defaults in blacklight config (are trumped by)
# 2. defaults for the particular search field identified by params[:search_field] (are trumped by)
# 3. certain parameters directly on input HTTP query params
# * not just any parameter is grabbed willy nilly, only certain ones are allowed by HTTP input)
# * for legacy reasons, qt in http query does not over-ride qt in search field definition default.
# 4. extra parameters passed in as argument.
#
# spellcheck.q will be supplied with the [:q] value unless specifically
# specified otherwise.
#
# Incoming parameter :f is mapped to :fq solr parameter.
def solr_search_params(user_params = params || {})
solr_parameters = {}
solr_search_params_logic.each do |method_name|
send(method_name, solr_parameters, user_params)
end
return solr_parameters
end
####
# Start with general defaults from BL config. Need to use custom
# merge to dup values, to avoid later mutating the original by mistake.
def default_solr_parameters(solr_parameters, user_params)
blacklight_config.default_solr_params.each do |key, value|
solr_parameters[key] = value.dup rescue value
end
end
###
# copy paging params from BL app over to solr, changing
# app level per_page and page to Solr rows and start.
def add_paging_to_solr(solr_params, user_params)
# Now any over-rides from current URL?
solr_params[:rows] = user_params[:rows].to_i unless user_params[:rows].blank?
solr_params[:rows] = user_params[:per_page].to_i unless user_params[:per_page].blank?
# Do we need to translate :page to Solr :start?
unless user_params[:page].blank?
# already set solr_params["rows"] might not be the one we just set,
# could have been from app defaults too. But we need one.
# raising is consistent with prior RSolr magic keys behavior.
# We could change this to default to 10, or to raise on startup
# from config instead of at runtime.
if solr_params[:rows].blank?
raise Exception.new("To use pagination when no :per_page is supplied in the URL, :rows must be configured in blacklight_config default_solr_params")
end
solr_params[:start] = solr_params[:rows].to_i * (user_params[:page].to_i - 1)
solr_params[:start] = 0 if solr_params[:start].to_i < 0
end
solr_params[:rows] ||= blacklight_config.per_page.first unless blacklight_config.per_page.blank?
solr_params[:rows] = blacklight_config.max_per_page if solr_params[:rows].to_i > blacklight_config.max_per_page
end
###
# copy sorting params from BL app over to solr
def add_sorting_to_solr(solr_parameters, user_params)
if user_params[:sort].blank? and sort_field = blacklight_config.default_sort_field
# no sort param provided, use default
solr_parameters[:sort] = sort_field.sort unless sort_field.sort.blank?
elsif sort_field = blacklight_config.sort_fields[user_params[:sort]]
# check for sort field key
solr_parameters[:sort] = sort_field.sort unless sort_field.sort.blank?
else
# just pass the key through
solr_parameters[:sort] = user_params[:sort]
end
end
##
# Take the user-entered query, and put it in the solr params,
# including config's "search field" params for current search field.
# also include setting spellcheck.q.
def add_query_to_solr(solr_parameters, user_parameters)
###
# Merge in search field configured values, if present, over-writing general
# defaults
###
# legacy behavior of user param :qt is passed through, but over-ridden
# by actual search field config if present. We might want to remove
# this legacy behavior at some point. It does not seem to be currently
# rspec'd.
solr_parameters[:qt] = user_parameters[:qt] if user_parameters[:qt]
search_field_def = search_field_def_for_key(user_parameters[:search_field])
if (search_field_def)
solr_parameters[:qt] = search_field_def.qt
solr_parameters.merge!( search_field_def.solr_parameters) if search_field_def.solr_parameters
end
##
# Create Solr 'q' including the user-entered q, prefixed by any
# solr LocalParams in config, using solr LocalParams syntax.
# http://wiki.apache.org/solr/LocalParams
##
if (search_field_def && hash = search_field_def.solr_local_parameters)
local_params = hash.collect do |key, val|
key.to_s + "=" + solr_param_quote(val, :quote => "'")
end.join(" ")
solr_parameters[:q] = "{!#{local_params}}#{user_parameters[:q]}"
else
solr_parameters[:q] = user_parameters[:q] if user_parameters[:q]
end
##
# Set Solr spellcheck.q to be original user-entered query, without
# our local params, otherwise it'll try and spellcheck the local
# params! Unless spellcheck.q has already been set by someone,
# respect that.
#
# TODO: Change calling code to expect this as a symbol instead of
# a string, for consistency? :'spellcheck.q' is a symbol. Right now
# rspec tests for a string, and can't tell if other code may
# insist on a string.
solr_parameters["spellcheck.q"] = user_parameters[:q] unless solr_parameters["spellcheck.q"]
end
##
# Add any existing facet limits, stored in app-level HTTP query
# as :f, to solr as appropriate :fq query.
def add_facet_fq_to_solr(solr_parameters, user_params)
# convert a String value into an Array
if solr_parameters[:fq].is_a? String
solr_parameters[:fq] = [solr_parameters[:fq]]
end
# :fq, map from :f.
if ( user_params[:f])
f_request_params = user_params[:f]
solr_parameters[:fq] ||= []
f_request_params.each_pair do |facet_field, value_list|
Array(value_list).each do |value|
solr_parameters[:fq] << facet_value_to_fq_string(facet_field, value)
end
end
end
end
##
# Convert a facet/value pair into a solr fq parameter
def facet_value_to_fq_string(facet_field, value)
facet_config = blacklight_config.facet_fields[facet_field]
local_params = []
local_params << "tag=#{facet_config.tag}" if facet_config and facet_config.tag
prefix = ""
prefix = "{!#{local_params.join(" ")}}" unless local_params.empty?
fq = case
when (facet_config and facet_config.query)
facet_config.query[value][:fq]
when (facet_config and facet_config.date),
(value.is_a?(TrueClass) or value.is_a?(FalseClass) or value == 'true' or value == 'false'),
(value.is_a?(Integer) or (value.to_i.to_s == value if value.respond_to? :to_i)),
(value.is_a?(Float) or (value.to_f.to_s == value if value.respond_to? :to_f))
(value.is_a?(DateTime) or value.is_a?(Date) or value.is_a?(Time))
"#{prefix}#{facet_field}:#{value}"
when value.is_a?(Range)
"#{prefix}#{facet_field}:[#{value.first} TO #{value.last}]"
else
"{!raw f=#{facet_field}#{(" " + local_params.join(" ")) unless local_params.empty?}}#{value}"
end
end
##
# Add appropriate Solr facetting directives in, including
# taking account of our facet paging/'more'. This is not
# about solr 'fq', this is about solr facet.* params.
def add_facetting_to_solr(solr_parameters, user_params)
# While not used by BL core behavior, legacy behavior seemed to be
# to accept incoming params as "facet.field" or "facets", and add them
# on to any existing facet.field sent to Solr. Legacy behavior seemed
# to be accepting these incoming params as arrays (in Rails URL with []
# on end), or single values. At least one of these is used by
# Stanford for "faux hieararchial facets".
if user_params.has_key?("facet.field") || user_params.has_key?("facets")
solr_parameters[:"facet.field"] ||= []
solr_parameters[:"facet.field"].concat( [user_params["facet.field"], user_params["facets"]].flatten.compact ).uniq!
end
if blacklight_config.add_facet_fields_to_solr_request
solr_parameters[:facet] = true
solr_parameters[:'facet.field'] ||= []
solr_parameters[:'facet.field'] += blacklight_config.facet_fields_to_add_to_solr
if blacklight_config.facet_fields.any? { |k,v| v[:query] }
solr_parameters[:'facet.query'] ||= []
end
if blacklight_config.facet_fields.any? { |k,v| v[:pivot] }
solr_parameters[:'facet.pivot'] ||= []
end
end
blacklight_config.facet_fields.each do |field_name, facet|
if blacklight_config.add_facet_fields_to_solr_request
case
when facet.pivot
solr_parameters[:'facet.pivot'] << with_ex_local_param(facet.ex, facet.pivot.join(","))
when facet.query
solr_parameters[:'facet.query'] += facet.query.map { |k, x| with_ex_local_param(facet.ex, x[:fq]) }
when facet.ex
if idx = solr_parameters[:'facet.field'].index(facet.field)
solr_parameters[:'facet.field'][idx] = with_ex_local_param(facet.ex, solr_parameters[:'facet.field'][idx])
end
end
if facet.sort
solr_parameters[:"f.#{facet.field}.facet.sort"] = facet.sort
end
end
# Support facet paging and 'more'
# links, by sending a facet.limit one more than what we
# want to page at, according to configured facet limits.
solr_parameters[:"f.#{facet.field}.facet.limit"] = (facet_limit_for(field_name) + 1) if facet_limit_for(field_name)
end
end
def with_ex_local_param(ex, value)
if ex
"{!ex=#{ex}}#{value}"
else
value
end
end
def add_solr_fields_to_query solr_parameters, user_parameters
return unless blacklight_config.add_field_configuration_to_solr_request
blacklight_config.index_fields.each do |field_name, field|
if field.highlight
solr_parameters[:hl] ||= true
solr_parameters[:'hl.fl'] ||= []
solr_parameters[:'hl.fl'] << field.field
end
end
end
# Remove the group parameter if we've faceted on the group field (e.g. for the full results for a group)
def add_group_config_to_solr solr_parameters, user_parameters
if user_parameters[:f] and user_parameters[:f][grouped_key_for_results]
solr_parameters[:group] = false
end
end
# a solr query method
# given a user query, return a solr response containing both result docs and facets
# - mixes in the Blacklight::Solr::SpellingSuggestions module
# - the response will have a spelling_suggestions method
# Returns a two-element array (aka duple) with first the solr response object,
# and second an array of SolrDocuments representing the response.docs
def get_search_results(user_params = params || {}, extra_controller_params = {})
solr_response = query_solr(user_params, extra_controller_params)
case
when (solr_response.grouped? && grouped_key_for_results)
[solr_response.group(grouped_key_for_results), []]
when (solr_response.grouped? && solr_response.grouped.length == 1)
[solr_response.grouped.first, []]
else
document_list = solr_response.docs.collect {|doc| SolrDocument.new(doc, solr_response)}
[solr_response, document_list]
end
end
# a solr query method
# given a user query,
# Returns a solr response object
def query_solr(user_params = params || {}, extra_controller_params = {})
# In later versions of Rails, the #benchmark method can do timing
# better for us.
bench_start = Time.now
solr_params = self.solr_search_params(user_params).merge(extra_controller_params)
solr_params[:qt] ||= blacklight_config.qt
path = blacklight_config.solr_path
# delete these parameters, otherwise rsolr will pass them through.
res = blacklight_solr.send_and_receive(path, :params=>solr_params)
solr_response = Blacklight::SolrResponse.new(force_to_utf8(res), solr_params)
Rails.logger.debug("Solr query: #{solr_params.inspect}")
Rails.logger.debug("Solr response: #{solr_response.inspect}") if defined?(::BLACKLIGHT_VERBOSE_LOGGING) and ::BLACKLIGHT_VERBOSE_LOGGING
Rails.logger.debug("Solr fetch: #{self.class}#query_solr (#{'%.1f' % ((Time.now.to_f - bench_start.to_f)*1000)}ms)")
solr_response
end
# returns a params hash for finding a single solr document (CatalogController #show action)
# If the id arg is nil, then the value is fetched from params[:id]
# This method is primary called by the get_solr_response_for_doc_id method.
def solr_doc_params(id=nil)
id ||= params[:id]
p = blacklight_config.default_document_solr_params.merge({
:id => id # this assumes the document request handler will map the 'id' param to the unique key field
})
p[:qt] ||= 'document'
p
end
# a solr query method
# retrieve a solr document, given the doc id
def get_solr_response_for_doc_id(id=nil, extra_controller_params={})
solr_params = solr_doc_params(id).merge(extra_controller_params)
solr_response = find((blacklight_config.document_solr_request_handler || blacklight_config.qt), solr_params)
raise Blacklight::Exceptions::InvalidSolrID.new if solr_response.docs.empty?
document = SolrDocument.new(solr_response.docs.first, solr_response)
[solr_response, document]
end
# given a field name and array of values, get the matching SOLR documents
def get_solr_response_for_field_values(field, values, extra_solr_params = {})
values ||= []
values = [values] unless values.respond_to? :each
q = nil
if values.empty?
q = "NOT *:*"
else
q = "#{field}:(#{ values.to_a.map { |x| solr_param_quote(x)}.join(" OR ")})"
end
solr_params = {
:defType => "lucene", # need boolean for OR
:q => q,
# not sure why fl * is neccesary, why isn't default solr_search_params
# sufficient, like it is for any other search results solr request?
# But tests fail without this. I think because some functionality requires
# this to actually get solr_doc_params, not solr_search_params. Confused
# semantics again.
:fl => "*",
:facet => 'false',
:spellcheck => 'false'
}.merge(extra_solr_params)
solr_response = find(blacklight_config.qt, self.solr_search_params().merge(solr_params) )
document_list = solr_response.docs.collect{|doc| SolrDocument.new(doc, solr_response) }
[solr_response,document_list]
end
# returns a params hash for a single facet field solr query.
# used primary by the get_facet_pagination method.
# Looks up Facet Paginator request params from current request
# params to figure out sort and offset.
# Default limit for facet list can be specified by defining a controller
# method facet_list_limit, otherwise 20.
def solr_facet_params(facet_field, user_params=params || {}, extra_controller_params={})
input = user_params.deep_merge(extra_controller_params)
# First start with a standard solr search params calculations,
# for any search context in our request params.
solr_params = solr_search_params(user_params).merge(extra_controller_params)
# Now override with our specific things for fetching facet values
solr_params[:"facet.field"] = facet_field
limit =
if respond_to?(:facet_list_limit)
facet_list_limit.to_s.to_i
elsif solr_params["facet.limit"]
solr_params["facet.limit"].to_i
else
20
end
# Need to set as f.facet_field.facet.* to make sure we
# override any field-specific default in the solr request handler.
solr_params[:"f.#{facet_field}.facet.limit"] = limit + 1
solr_params[:"f.#{facet_field}.facet.offset"] = ( input.fetch(Blacklight::Solr::FacetPaginator.request_keys[:page] , 1).to_i - 1 ) * ( limit )
solr_params[:"f.#{facet_field}.facet.sort"] = input[ Blacklight::Solr::FacetPaginator.request_keys[:sort] ] if input[ Blacklight::Solr::FacetPaginator.request_keys[:sort] ]
solr_params[:rows] = 0
return solr_params
end
# a solr query method
# used to paginate through a single facet field's values
# /catalog/facet/language_facet
def get_facet_pagination(facet_field, user_params=params || {}, extra_controller_params={})
solr_params = solr_facet_params(facet_field, user_params, extra_controller_params)
# Make the solr call
response =find(blacklight_config.qt, solr_params)
limit = solr_params[:"f.#{facet_field}.facet.limit"] -1
# Actually create the paginator!
# NOTE: The sniffing of the proper sort from the solr response is not
# currently tested for, tricky to figure out how to test, since the
# default setup we test against doesn't use this feature.
return Blacklight::Solr::FacetPaginator.new(response.facets.first.items,
:offset => solr_params[:"f.#{facet_field}.facet.offset"],
:limit => limit,
:sort => response["responseHeader"]["params"][:"f.#{facet_field}.facet.sort"] || response["responseHeader"]["params"]["facet.sort"]
)
end
# a solr query method
# this is used when selecting a search result: we have a query and a
# position in the search results and possibly some facets
# Pass in an index where 1 is the first document in the list, and
# the Blacklight app-level request params that define the search.
def get_single_doc_via_search(index, request_params)
solr_params = solr_search_params(request_params)
solr_params[:start] = (index - 1) # start at 0 to get 1st doc, 1 to get 2nd.
solr_params[:rows] = 1
solr_params[:fl] = '*'
solr_response = find(blacklight_config.qt, solr_params)
SolrDocument.new(solr_response.docs.first, solr_response) unless solr_response.docs.empty?
end
# Get the previous and next document from a search result
def get_previous_and_next_documents_for_search(index, request_params, extra_controller_params={})
solr_params = solr_search_params(request_params).merge(extra_controller_params)
if index > 0
solr_params[:start] = index - 1 # get one before
solr_params[:rows] = 3 # and one after
else
solr_params[:start] = 0 # there is no previous doc
solr_params[:rows] = 2 # but there should be one after
end
solr_params[:fl] = '*'
solr_params[:facet] = false
solr_response = find(blacklight_config.qt, solr_params)
document_list = solr_response.docs.collect{|doc| SolrDocument.new(doc, solr_response) }
# only get the previous doc if there is one
prev_doc = document_list.first if index > 0
next_doc = document_list.last if (index + 1) < solr_response.total
[solr_response, [prev_doc, next_doc]]
end
# returns a solr params hash
# if field is nil, the value is fetched from blacklight_config[:index][:show_link]
# the :fl (solr param) is set to the "field" value.
# per_page is set to 10
def solr_opensearch_params(field=nil)
solr_params = solr_search_params
solr_params[:per_page] = 10
solr_params[:fl] = blacklight_config.index.show_link
solr_params
end
# a solr query method
# does a standard search but returns a simplified object.
# an array is returned, the first item is the query string,
# the second item is an other array. This second array contains
# all of the field values for each of the documents...
# where the field is the "field" argument passed in.
def get_opensearch_response(field=nil, extra_controller_params={})
solr_params = solr_opensearch_params().merge(extra_controller_params)
response = find(blacklight_config.qt, solr_params)
a = [solr_params[:q]]
a << response.docs.map {|doc| doc[solr_params[:fl]].to_s }
end
# Look up facet limit for given facet_field. Will look at config, and
# if config is 'true' will look up from Solr @response if available. If
# no limit is avaialble, returns nil. Used from #solr_search_params
# to supply f.fieldname.facet.limit values in solr request (no @response
# available), and used in display (with @response available) to create
# a facet paginator with the right limit.
def facet_limit_for(facet_field)
facet = blacklight_config.facet_fields[facet_field]
return nil if facet.blank?
limit = facet.limit
if ( limit == true && @response &&
@response["responseHeader"] &&
@response["responseHeader"]["params"])
limit =
@response["responseHeader"]["params"]["f.#{facet_field}.facet.limit"] ||
@response["responseHeader"]["params"]["facet.limit"]
limit = (limit.to_i() -1) if limit
limit = nil if limit == -2 # -1-1==-2, unlimited.
elsif limit == true
limit = nil
end
return limit
end
##
# The key to use to retrieve the grouped field to display
def grouped_key_for_results
blacklight_config.index.group
end
end
| 1 | 4,841 | This ends up requiring the `json` gem, right? Is there any way we can do this to be implementation agnostic (e.g. to let me use `yajl-ruby` for performance reasons (or because my app already brings in a JSON library) | projectblacklight-blacklight | rb |
@@ -56,8 +56,13 @@ type byteReadCloser struct{ io.ReadCloser }
// ReadByte implements the io.ByteReader interface.
func (b byteReadCloser) ReadByte() (byte, error) {
var buf [1]byte
- _, err := b.Read(buf[:])
- return buf[0], err
+ for {
+ n, err := b.Read(buf[:])
+ if n == 0 && err == nil {
+ continue
+ }
+ return buf[0], err
+ }
}
// A compressor builds a Riegeli compressed block. | 1 | /*
* Copyright 2018 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package riegeli
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"github.com/DataDog/zstd"
"github.com/google/brotli/go/cbrotli"
)
// A decompressor decodes a compressed Riegeli block.
type decompressor interface {
byteReader
io.Closer
}
func newDecompressor(r byteReader, c compressionType) (decompressor, error) {
if c == noCompression {
return &nopDecompressorClose{r}, nil
}
if _, err := binary.ReadUvarint(r); err != nil {
return nil, fmt.Errorf("bad varint prefix for compressed block: %v", err)
}
switch c {
case brotliCompression:
return &byteReadCloser{cbrotli.NewReader(r)}, nil
case zstdCompression:
return &byteReadCloser{zstd.NewReader(r)}, nil
default:
return nil, fmt.Errorf("unsupported compression_type: '%s'", []byte{byte(c)})
}
}
// A byteReadCloser trivially implements io.ByteReader for a io.ReadCloser.
type byteReadCloser struct{ io.ReadCloser }
// ReadByte implements the io.ByteReader interface.
func (b byteReadCloser) ReadByte() (byte, error) {
var buf [1]byte
_, err := b.Read(buf[:])
return buf[0], err
}
// A compressor builds a Riegeli compressed block.
type compressor interface {
writerTo
io.Closer
}
func newCompressor(opts *WriterOptions) (compressor, error) {
buf := bytes.NewBuffer(nil)
switch opts.compressionType() {
case noCompression:
return &nopCompressorClose{buf}, nil
case brotliCompression:
brotliOpts := cbrotli.WriterOptions{Quality: opts.compressionLevel()}
w := cbrotli.NewWriter(buf, brotliOpts)
return &sizePrefixedWriterTo{buf: buf, WriteCloser: w}, nil
case zstdCompression:
w := zstd.NewWriterLevel(buf, opts.compressionLevel())
return &sizePrefixedWriterTo{buf: buf, WriteCloser: w}, nil
default:
return nil, fmt.Errorf("unsupported compression_type: '%s'", []byte{byte(opts.compressionType())})
}
}
type sizePrefixedWriterTo struct {
buf *bytes.Buffer
io.WriteCloser
prefix []byte
}
// Close implements part of the compressor interface.
func (w *sizePrefixedWriterTo) Close() error {
if err := w.WriteCloser.Close(); err != nil {
return err
}
w.prefix = make([]byte, binary.MaxVarintLen64)
n := int64(binary.PutUvarint(w.prefix[:], uint64(w.buf.Len())))
w.prefix = w.prefix[:n]
return nil
}
// WriteTo implements part of the compressor interface.
func (w *sizePrefixedWriterTo) WriteTo(out io.Writer) (int64, error) {
if n, err := out.Write(w.prefix); err != nil {
return int64(n), err
}
n, err := w.buf.WriteTo(out)
n += int64(len(w.prefix))
return n, err
}
// Len implements part of the compressor interface.
func (w *sizePrefixedWriterTo) Len() int { return len(w.prefix) + w.buf.Len() }
type byteReader interface {
io.Reader
io.ByteReader
}
type writerTo interface {
io.WriterTo
io.Writer
// Len returns the total data that will be written by WriteTo.
Len() int
}
// A nopDecompressorClose trivially implements io.Closer for a byteReader.
type nopDecompressorClose struct{ byteReader }
// Close implements the io.Closer interface.
func (nopDecompressorClose) Close() error { return nil }
// A nopCompressorClose trivially implements io.Closer for a writerTo.
type nopCompressorClose struct{ writerTo }
// Close implements the io.Closer interface.
func (nopCompressorClose) Close() error { return nil }
| 1 | 8,657 | This seems dangerous. What circumstances lead to the need for a retry? Can that happen more than once? I'm concerned that if we hit the pathological state, this could turn into a CPU spinner. I wonder if we could get the same benefit from just "trying again" and failing if it fails the second time. | kythe-kythe | go |
@@ -20,11 +20,18 @@
package org.apache.iceberg;
import java.util.List;
+import java.util.Optional;
import org.apache.iceberg.exceptions.ValidationException;
import org.apache.iceberg.expressions.Expressions;
+import org.apache.iceberg.util.PartitionSet;
public class BaseReplacePartitions
extends MergingSnapshotProducer<ReplacePartitions> implements ReplacePartitions {
+
+ private final PartitionSet deletedPartitions = PartitionSet.create(super.getSpecsById());
+ private Long startingSnapshotId = null;
+ private boolean validateNoConflictingAppends = false;
+
BaseReplacePartitions(String tableName, TableOperations ops) {
super(tableName, ops);
set(SnapshotSummary.REPLACE_PARTITIONS_PROP, "true"); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.List;
import org.apache.iceberg.exceptions.ValidationException;
import org.apache.iceberg.expressions.Expressions;
public class BaseReplacePartitions
extends MergingSnapshotProducer<ReplacePartitions> implements ReplacePartitions {
BaseReplacePartitions(String tableName, TableOperations ops) {
super(tableName, ops);
set(SnapshotSummary.REPLACE_PARTITIONS_PROP, "true");
}
@Override
protected ReplacePartitions self() {
return this;
}
@Override
protected String operation() {
return DataOperations.OVERWRITE;
}
@Override
public ReplacePartitions addFile(DataFile file) {
dropPartition(file.specId(), file.partition());
add(file);
return this;
}
@Override
public ReplacePartitions validateAppendOnly() {
failAnyDelete();
return this;
}
@Override
public List<ManifestFile> apply(TableMetadata base) {
if (dataSpec().fields().size() <= 0) {
// replace all data in an unpartitioned table
deleteByRowFilter(Expressions.alwaysTrue());
}
try {
return super.apply(base);
} catch (ManifestFilterManager.DeleteException e) {
throw new ValidationException(
"Cannot commit file that conflicts with existing partition: %s", e.partition());
}
}
}
| 1 | 40,213 | If there is a reference to other fields or methods, please do the initialization in the constructor. | apache-iceberg | java |
@@ -0,0 +1,13 @@
+// This object is imported into the documentation site. An example for the documentation site should be part of the pull request for the component. The object key is the kabob case of the "URL folder". In the case of `http://localhost:8080/components/app-launcher/`, `app-launcher` is the `key`. The folder name is created by `components.component` value in `package.json`. The following uses webpack's raw-loader plugin to get "text files" that will be eval()'d by CodeMirror within the documentation site on page load.
+
+/* eslint-env node */
+/* eslint-disable global-require */
+
+const siteStories = [
+ require('raw-loader!@salesforce/design-system-react/components/pill/__examples__/base.jsx'),
+ require('raw-loader!@salesforce/design-system-react/components/pill/__examples__/icons.jsx'),
+ require('raw-loader!@salesforce/design-system-react/components/pill/__examples__/avatars.jsx'),
+ require('raw-loader!@salesforce/design-system-react/components/pill/__examples__/bare.jsx'),
+];
+
+module.exports = siteStories; | 1 | 1 | 11,983 | These should be importing from `pill-container` | salesforce-design-system-react | js |
|
@@ -2593,12 +2593,12 @@ void Game::playerRequestTrade(uint32_t playerId, const Position& pos, uint8_t st
Player* tradePartner = getPlayerByID(tradePlayerId);
if (!tradePartner || tradePartner == player) {
- player->sendTextMessage(MESSAGE_INFO_DESCR, "Sorry, not possible.");
+ player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (!Position::areInRange<2, 2, 0>(tradePartner->getPosition(), player->getPosition())) {
- player->sendTextMessage(MESSAGE_INFO_DESCR, fmt::format("{:s} tells you to move closer.", tradePartner->getName()));
+ player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH);
return;
}
| 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "pugicast.h"
#include "actions.h"
#include "bed.h"
#include "configmanager.h"
#include "creature.h"
#include "creatureevent.h"
#include "databasetasks.h"
#include "events.h"
#include "game.h"
#include "globalevent.h"
#include "iologindata.h"
#include "iomarket.h"
#include "items.h"
#include "monster.h"
#include "movement.h"
#include "scheduler.h"
#include "server.h"
#include "spells.h"
#include "talkaction.h"
#include "weapons.h"
#include "script.h"
#include <fmt/format.h>
extern ConfigManager g_config;
extern Actions* g_actions;
extern Chat* g_chat;
extern TalkActions* g_talkActions;
extern Spells* g_spells;
extern Vocations g_vocations;
extern GlobalEvents* g_globalEvents;
extern CreatureEvents* g_creatureEvents;
extern Events* g_events;
extern Monsters g_monsters;
extern MoveEvents* g_moveEvents;
extern Weapons* g_weapons;
extern Scripts* g_scripts;
Game::Game()
{
offlineTrainingWindow.defaultEnterButton = 1;
offlineTrainingWindow.defaultEscapeButton = 0;
offlineTrainingWindow.choices.emplace_back("Sword Fighting and Shielding", SKILL_SWORD);
offlineTrainingWindow.choices.emplace_back("Axe Fighting and Shielding", SKILL_AXE);
offlineTrainingWindow.choices.emplace_back("Club Fighting and Shielding", SKILL_CLUB);
offlineTrainingWindow.choices.emplace_back("Distance Fighting and Shielding", SKILL_DISTANCE);
offlineTrainingWindow.choices.emplace_back("Magic Level and Shielding", SKILL_MAGLEVEL);
offlineTrainingWindow.buttons.emplace_back("Okay", offlineTrainingWindow.defaultEnterButton);
offlineTrainingWindow.buttons.emplace_back("Cancel", offlineTrainingWindow.defaultEscapeButton);
offlineTrainingWindow.priority = true;
}
Game::~Game()
{
for (const auto& it : guilds) {
delete it.second;
}
}
void Game::start(ServiceManager* manager)
{
serviceManager = manager;
updateWorldTime();
if (g_config.getBoolean(ConfigManager::DEFAULT_WORLD_LIGHT)) {
g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this)));
}
g_scheduler.addEvent(createSchedulerTask(EVENT_CREATURE_THINK_INTERVAL, std::bind(&Game::checkCreatures, this, 0)));
g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this)));
}
GameState_t Game::getGameState() const
{
return gameState;
}
void Game::setWorldType(WorldType_t type)
{
worldType = type;
}
void Game::setGameState(GameState_t newState)
{
if (gameState == GAME_STATE_SHUTDOWN) {
return; //this cannot be stopped
}
if (gameState == newState) {
return;
}
gameState = newState;
switch (newState) {
case GAME_STATE_INIT: {
groups.load();
g_chat->load();
map.spawns.startup();
raids.loadFromXml();
raids.startup();
quests.loadFromXml();
mounts.loadFromXml();
loadMotdNum();
loadPlayersRecord();
loadAccountStorageValues();
g_globalEvents->startup();
break;
}
case GAME_STATE_SHUTDOWN: {
g_globalEvents->execute(GLOBALEVENT_SHUTDOWN);
//kick all players that are still online
auto it = players.begin();
while (it != players.end()) {
it->second->kickPlayer(true);
it = players.begin();
}
saveMotdNum();
saveGameState();
g_dispatcher.addTask(
createTask(std::bind(&Game::shutdown, this)));
g_scheduler.stop();
g_databaseTasks.stop();
g_dispatcher.stop();
break;
}
case GAME_STATE_CLOSED: {
/* kick all players without the CanAlwaysLogin flag */
auto it = players.begin();
while (it != players.end()) {
if (!it->second->hasFlag(PlayerFlag_CanAlwaysLogin)) {
it->second->kickPlayer(true);
it = players.begin();
} else {
++it;
}
}
saveGameState();
break;
}
default:
break;
}
}
void Game::saveGameState()
{
if (gameState == GAME_STATE_NORMAL) {
setGameState(GAME_STATE_MAINTAIN);
}
std::cout << "Saving server..." << std::endl;
if (!saveAccountStorageValues()) {
std::cout << "[Error - Game::saveGameState] Failed to save account-level storage values." << std::endl;
}
for (const auto& it : players) {
it.second->loginPosition = it.second->getPosition();
IOLoginData::savePlayer(it.second);
}
Map::save();
g_databaseTasks.flush();
if (gameState == GAME_STATE_MAINTAIN) {
setGameState(GAME_STATE_NORMAL);
}
}
bool Game::loadMainMap(const std::string& filename)
{
Monster::despawnRange = g_config.getNumber(ConfigManager::DEFAULT_DESPAWNRANGE);
Monster::despawnRadius = g_config.getNumber(ConfigManager::DEFAULT_DESPAWNRADIUS);
return map.loadMap("data/world/" + filename + ".otbm", true);
}
void Game::loadMap(const std::string& path)
{
map.loadMap(path, false);
}
Cylinder* Game::internalGetCylinder(Player* player, const Position& pos) const
{
if (pos.x != 0xFFFF) {
return map.getTile(pos);
}
//container
if (pos.y & 0x40) {
uint8_t from_cid = pos.y & 0x0F;
return player->getContainerByID(from_cid);
}
//inventory
return player;
}
Thing* Game::internalGetThing(Player* player, const Position& pos, int32_t index, uint32_t spriteId, stackPosType_t type) const
{
if (pos.x != 0xFFFF) {
Tile* tile = map.getTile(pos);
if (!tile) {
return nullptr;
}
Thing* thing;
switch (type) {
case STACKPOS_LOOK: {
return tile->getTopVisibleThing(player);
}
case STACKPOS_MOVE: {
Item* item = tile->getTopDownItem();
if (item && item->isMoveable()) {
thing = item;
} else {
thing = tile->getTopVisibleCreature(player);
}
break;
}
case STACKPOS_USEITEM: {
thing = tile->getUseItem(index);
break;
}
case STACKPOS_TOPDOWN_ITEM: {
thing = tile->getTopDownItem();
break;
}
case STACKPOS_USETARGET: {
thing = tile->getTopVisibleCreature(player);
if (!thing) {
thing = tile->getUseItem(index);
}
break;
}
default: {
thing = nullptr;
break;
}
}
if (player && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
//do extra checks here if the thing is accessible
if (thing && thing->getItem()) {
if (tile->hasProperty(CONST_PROP_ISVERTICAL)) {
if (player->getPosition().x + 1 == tile->getPosition().x) {
thing = nullptr;
}
} else { // horizontal
if (player->getPosition().y + 1 == tile->getPosition().y) {
thing = nullptr;
}
}
}
}
return thing;
}
//container
if (pos.y & 0x40) {
uint8_t fromCid = pos.y & 0x0F;
Container* parentContainer = player->getContainerByID(fromCid);
if (!parentContainer) {
return nullptr;
}
if (parentContainer->getID() == ITEM_BROWSEFIELD) {
Tile* tile = parentContainer->getTile();
if (tile && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
if (tile->hasProperty(CONST_PROP_ISVERTICAL)) {
if (player->getPosition().x + 1 == tile->getPosition().x) {
return nullptr;
}
} else { // horizontal
if (player->getPosition().y + 1 == tile->getPosition().y) {
return nullptr;
}
}
}
}
uint8_t slot = pos.z;
return parentContainer->getItemByIndex(player->getContainerIndex(fromCid) + slot);
} else if (pos.y == 0 && pos.z == 0) {
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return nullptr;
}
int32_t subType;
if (it.isFluidContainer() && index < static_cast<int32_t>(sizeof(reverseFluidMap) / sizeof(uint8_t))) {
subType = reverseFluidMap[index];
} else {
subType = -1;
}
return findItemOfType(player, it.id, true, subType);
}
//inventory
slots_t slot = static_cast<slots_t>(pos.y);
if (slot == CONST_SLOT_STORE_INBOX) {
return player->getStoreInbox();
}
return player->getInventoryItem(slot);
}
void Game::internalGetPosition(Item* item, Position& pos, uint8_t& stackpos)
{
pos.x = 0;
pos.y = 0;
pos.z = 0;
stackpos = 0;
Cylinder* topParent = item->getTopParent();
if (topParent) {
if (Player* player = dynamic_cast<Player*>(topParent)) {
pos.x = 0xFFFF;
Container* container = dynamic_cast<Container*>(item->getParent());
if (container) {
pos.y = static_cast<uint16_t>(0x40) | static_cast<uint16_t>(player->getContainerID(container));
pos.z = container->getThingIndex(item);
stackpos = pos.z;
} else {
pos.y = player->getThingIndex(item);
stackpos = pos.y;
}
} else if (Tile* tile = topParent->getTile()) {
pos = tile->getPosition();
stackpos = tile->getThingIndex(item);
}
}
}
Creature* Game::getCreatureByID(uint32_t id)
{
if (id <= Player::playerAutoID) {
return getPlayerByID(id);
} else if (id <= Monster::monsterAutoID) {
return getMonsterByID(id);
} else if (id <= Npc::npcAutoID) {
return getNpcByID(id);
}
return nullptr;
}
Monster* Game::getMonsterByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = monsters.find(id);
if (it == monsters.end()) {
return nullptr;
}
return it->second;
}
Npc* Game::getNpcByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = npcs.find(id);
if (it == npcs.end()) {
return nullptr;
}
return it->second;
}
Player* Game::getPlayerByID(uint32_t id)
{
if (id == 0) {
return nullptr;
}
auto it = players.find(id);
if (it == players.end()) {
return nullptr;
}
return it->second;
}
Creature* Game::getCreatureByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
const std::string& lowerCaseName = asLowerCaseString(s);
{
auto it = mappedPlayerNames.find(lowerCaseName);
if (it != mappedPlayerNames.end()) {
return it->second;
}
}
auto equalCreatureName = [&](const std::pair<uint32_t, Creature*>& it) {
auto name = it.second->getName();
return lowerCaseName.size() == name.size() && std::equal(lowerCaseName.begin(), lowerCaseName.end(), name.begin(), [](char a, char b) {
return a == std::tolower(b);
});
};
{
auto it = std::find_if(npcs.begin(), npcs.end(), equalCreatureName);
if (it != npcs.end()) {
return it->second;
}
}
{
auto it = std::find_if(monsters.begin(), monsters.end(), equalCreatureName);
if (it != monsters.end()) {
return it->second;
}
}
return nullptr;
}
Npc* Game::getNpcByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
const char* npcName = s.c_str();
for (const auto& it : npcs) {
if (strcasecmp(npcName, it.second->getName().c_str()) == 0) {
return it.second;
}
}
return nullptr;
}
Player* Game::getPlayerByName(const std::string& s)
{
if (s.empty()) {
return nullptr;
}
auto it = mappedPlayerNames.find(asLowerCaseString(s));
if (it == mappedPlayerNames.end()) {
return nullptr;
}
return it->second;
}
Player* Game::getPlayerByGUID(const uint32_t& guid)
{
if (guid == 0) {
return nullptr;
}
auto it = mappedPlayerGuids.find(guid);
if (it == mappedPlayerGuids.end()) {
return nullptr;
}
return it->second;
}
ReturnValue Game::getPlayerByNameWildcard(const std::string& s, Player*& player)
{
size_t strlen = s.length();
if (strlen == 0 || strlen > 20) {
return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE;
}
if (s.back() == '~') {
const std::string& query = asLowerCaseString(s.substr(0, strlen - 1));
std::string result;
ReturnValue ret = wildcardTree.findOne(query, result);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
player = getPlayerByName(result);
} else {
player = getPlayerByName(s);
}
if (!player) {
return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE;
}
return RETURNVALUE_NOERROR;
}
Player* Game::getPlayerByAccount(uint32_t acc)
{
for (const auto& it : players) {
if (it.second->getAccount() == acc) {
return it.second;
}
}
return nullptr;
}
bool Game::internalPlaceCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/)
{
if (creature->getParent() != nullptr) {
return false;
}
if (!map.placeCreature(pos, creature, extendedPos, forced)) {
return false;
}
creature->incrementReferenceCounter();
creature->setID();
creature->addList();
return true;
}
bool Game::placeCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/)
{
if (!internalPlaceCreature(creature, pos, extendedPos, forced)) {
return false;
}
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true);
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendCreatureAppear(creature, creature->getPosition(), true);
}
}
for (Creature* spectator : spectators) {
spectator->onCreatureAppear(creature, true);
}
creature->getParent()->postAddNotification(creature, nullptr, 0);
addCreatureCheck(creature);
creature->onPlacedCreature();
return true;
}
bool Game::removeCreature(Creature* creature, bool isLogout/* = true*/)
{
if (creature->isRemoved()) {
return false;
}
Tile* tile = creature->getTile();
std::vector<int32_t> oldStackPosVector;
SpectatorVec spectators;
map.getSpectators(spectators, tile->getPosition(), true);
for (Creature* spectator : spectators) {
if (Player* player = spectator->getPlayer()) {
oldStackPosVector.push_back(player->canSeeCreature(creature) ? tile->getClientIndexOfCreature(player, creature) : -1);
}
}
tile->removeCreature(creature);
const Position& tilePosition = tile->getPosition();
//send to client
size_t i = 0;
for (Creature* spectator : spectators) {
if (Player* player = spectator->getPlayer()) {
player->sendRemoveTileCreature(creature, tilePosition, oldStackPosVector[i++]);
}
}
//event method
for (Creature* spectator : spectators) {
spectator->onRemoveCreature(creature, isLogout);
}
creature->getParent()->postRemoveNotification(creature, nullptr, 0);
creature->removeList();
creature->setRemoved();
ReleaseCreature(creature);
removeCreatureCheck(creature);
for (Creature* summon : creature->summons) {
summon->setSkillLoss(false);
removeCreature(summon);
}
return true;
}
void Game::executeDeath(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && !creature->isRemoved()) {
creature->onDeath();
}
}
void Game::playerMoveThing(uint32_t playerId, const Position& fromPos,
uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint8_t fromIndex = 0;
if (fromPos.x == 0xFFFF) {
if (fromPos.y & 0x40) {
fromIndex = fromPos.z;
} else {
fromIndex = static_cast<uint8_t>(fromPos.y);
}
} else {
fromIndex = fromStackPos;
}
Thing* thing = internalGetThing(player, fromPos, fromIndex, 0, STACKPOS_MOVE);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (Creature* movingCreature = thing->getCreature()) {
Tile* tile = map.getTile(toPos);
if (!tile) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (Position::areInRange<1, 1, 0>(movingCreature->getPosition(), player->getPosition())) {
SchedulerTask* task = createSchedulerTask(1000,
std::bind(&Game::playerMoveCreatureByID, this, player->getID(),
movingCreature->getID(), movingCreature->getPosition(), tile->getPosition()));
player->setNextActionTask(task);
} else {
playerMoveCreature(player, movingCreature, movingCreature->getPosition(), tile);
}
} else if (thing->getItem()) {
Cylinder* toCylinder = internalGetCylinder(player, toPos);
if (!toCylinder) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
playerMoveItem(player, fromPos, spriteId, fromStackPos, toPos, count, thing->getItem(), toCylinder);
}
}
void Game::playerMoveCreatureByID(uint32_t playerId, uint32_t movingCreatureId, const Position& movingCreatureOrigPos, const Position& toPos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* movingCreature = getCreatureByID(movingCreatureId);
if (!movingCreature) {
return;
}
Tile* toTile = map.getTile(toPos);
if (!toTile) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
playerMoveCreature(player, movingCreature, movingCreatureOrigPos, toTile);
}
void Game::playerMoveCreature(Player* player, Creature* movingCreature, const Position& movingCreatureOrigPos, Tile* toTile)
{
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveCreatureByID,
this, player->getID(), movingCreature->getID(), movingCreatureOrigPos, toTile->getPosition()));
player->setNextActionTask(task);
return;
}
if (movingCreature->isMovementBlocked()) {
player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE);
return;
}
player->setNextActionTask(nullptr);
if (!Position::areInRange<1, 1, 0>(movingCreatureOrigPos, player->getPosition())) {
//need to walk to the creature first before moving it
std::vector<Direction> listDir;
if (player->getPathTo(movingCreatureOrigPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(1500, std::bind(&Game::playerMoveCreatureByID, this,
player->getID(), movingCreature->getID(), movingCreatureOrigPos, toTile->getPosition()));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
if ((!movingCreature->isPushable() && !player->hasFlag(PlayerFlag_CanPushAllCreatures)) ||
(movingCreature->isInGhostMode() && !player->isAccessPlayer())) {
player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE);
return;
}
//check throw distance
const Position& movingCreaturePos = movingCreature->getPosition();
const Position& toPos = toTile->getPosition();
if ((Position::getDistanceX(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceY(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceZ(movingCreaturePos, toPos) * 4 > movingCreature->getThrowRange())) {
player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH);
return;
}
if (player != movingCreature) {
if (toTile->hasFlag(TILESTATE_BLOCKPATH)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
return;
} else if ((movingCreature->getZone() == ZONE_PROTECTION && !toTile->hasFlag(TILESTATE_PROTECTIONZONE)) || (movingCreature->getZone() == ZONE_NOPVP && !toTile->hasFlag(TILESTATE_NOPVPZONE))) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
} else {
if (CreatureVector* tileCreatures = toTile->getCreatures()) {
for (Creature* tileCreature : *tileCreatures) {
if (!tileCreature->isInGhostMode()) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
return;
}
}
}
Npc* movingNpc = movingCreature->getNpc();
if (movingNpc && !Spawns::isInZone(movingNpc->getMasterPos(), movingNpc->getMasterRadius(), toPos)) {
player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM);
return;
}
}
}
if (!g_events->eventPlayerOnMoveCreature(player, movingCreature, movingCreaturePos, toPos)) {
return;
}
ReturnValue ret = internalMoveCreature(*movingCreature, *toTile);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
}
}
ReturnValue Game::internalMoveCreature(Creature* creature, Direction direction, uint32_t flags /*= 0*/)
{
creature->setLastPosition(creature->getPosition());
const Position& currentPos = creature->getPosition();
Position destPos = getNextPosition(direction, currentPos);
Player* player = creature->getPlayer();
bool diagonalMovement = (direction & DIRECTION_DIAGONAL_MASK) != 0;
if (player && !diagonalMovement) {
//try to go up
if (currentPos.z != 8 && creature->getTile()->hasHeight(3)) {
Tile* tmpTile = map.getTile(currentPos.x, currentPos.y, currentPos.getZ() - 1);
if (tmpTile == nullptr || (tmpTile->getGround() == nullptr && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID))) {
tmpTile = map.getTile(destPos.x, destPos.y, destPos.getZ() - 1);
if (tmpTile && tmpTile->getGround() && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID)) {
flags |= FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE;
if (!tmpTile->hasFlag(TILESTATE_FLOORCHANGE)) {
player->setDirection(direction);
destPos.z--;
}
}
}
}
//try to go down
if (currentPos.z != 7 && currentPos.z == destPos.z) {
Tile* tmpTile = map.getTile(destPos.x, destPos.y, destPos.z);
if (tmpTile == nullptr || (tmpTile->getGround() == nullptr && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID))) {
tmpTile = map.getTile(destPos.x, destPos.y, destPos.z + 1);
if (tmpTile && tmpTile->hasHeight(3)) {
flags |= FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE;
player->setDirection(direction);
destPos.z++;
}
}
}
}
Tile* toTile = map.getTile(destPos);
if (!toTile) {
return RETURNVALUE_NOTPOSSIBLE;
}
return internalMoveCreature(*creature, *toTile, flags);
}
ReturnValue Game::internalMoveCreature(Creature& creature, Tile& toTile, uint32_t flags /*= 0*/)
{
//check if we can move the creature to the destination
ReturnValue ret = toTile.queryAdd(0, creature, 1, flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
map.moveCreature(creature, toTile);
if (creature.getParent() != &toTile) {
return RETURNVALUE_NOERROR;
}
int32_t index = 0;
Item* toItem = nullptr;
Tile* subCylinder = nullptr;
Tile* toCylinder = &toTile;
Tile* fromCylinder = nullptr;
uint32_t n = 0;
while ((subCylinder = toCylinder->queryDestination(index, creature, &toItem, flags)) != toCylinder) {
map.moveCreature(creature, *subCylinder);
if (creature.getParent() != subCylinder) {
//could happen if a script move the creature
fromCylinder = nullptr;
break;
}
fromCylinder = toCylinder;
toCylinder = subCylinder;
flags = 0;
//to prevent infinite loop
if (++n >= MAP_MAX_LAYERS) {
break;
}
}
if (fromCylinder) {
const Position& fromPosition = fromCylinder->getPosition();
const Position& toPosition = toCylinder->getPosition();
if (fromPosition.z != toPosition.z && (fromPosition.x != toPosition.x || fromPosition.y != toPosition.y)) {
Direction dir = getDirectionTo(fromPosition, toPosition);
if ((dir & DIRECTION_DIAGONAL_MASK) == 0) {
internalCreatureTurn(&creature, dir);
}
}
}
return RETURNVALUE_NOERROR;
}
void Game::playerMoveItemByPlayerID(uint32_t playerId, const Position& fromPos, uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
playerMoveItem(player, fromPos, spriteId, fromStackPos, toPos, count, nullptr, nullptr);
}
void Game::playerMoveItem(Player* player, const Position& fromPos,
uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count, Item* item, Cylinder* toCylinder)
{
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveItemByPlayerID, this,
player->getID(), fromPos, spriteId, fromStackPos, toPos, count));
player->setNextActionTask(task);
return;
}
player->setNextActionTask(nullptr);
if (item == nullptr) {
uint8_t fromIndex = 0;
if (fromPos.x == 0xFFFF) {
if (fromPos.y & 0x40) {
fromIndex = fromPos.z;
} else {
fromIndex = static_cast<uint8_t>(fromPos.y);
}
} else {
fromIndex = fromStackPos;
}
Thing* thing = internalGetThing(player, fromPos, fromIndex, 0, STACKPOS_MOVE);
if (!thing || !thing->getItem()) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
item = thing->getItem();
}
if (item->getClientID() != spriteId) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Cylinder* fromCylinder = internalGetCylinder(player, fromPos);
if (fromCylinder == nullptr) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (toCylinder == nullptr) {
toCylinder = internalGetCylinder(player, toPos);
if (toCylinder == nullptr) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
}
if (!item->isPushable() || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE);
return;
}
const Position& playerPos = player->getPosition();
const Position& mapFromPos = fromCylinder->getTile()->getPosition();
if (playerPos.z != mapFromPos.z) {
player->sendCancelMessage(playerPos.z > mapFromPos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1>(playerPos, mapFromPos)) {
//need to walk to the item first before using it
std::vector<Direction> listDir;
if (player->getPathTo(item->getPosition(), listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerMoveItemByPlayerID, this,
player->getID(), fromPos, spriteId, fromStackPos, toPos, count));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
const Tile* toCylinderTile = toCylinder->getTile();
const Position& mapToPos = toCylinderTile->getPosition();
//hangable item specific code
if (item->isHangable() && toCylinderTile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) {
//destination supports hangable objects so need to move there first
bool vertical = toCylinderTile->hasProperty(CONST_PROP_ISVERTICAL);
if (vertical) {
if (playerPos.x + 1 == mapToPos.x) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
} else { // horizontal
if (playerPos.y + 1 == mapToPos.y) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
}
if (!Position::areInRange<1, 1, 0>(playerPos, mapToPos)) {
Position walkPos = mapToPos;
if (vertical) {
walkPos.x++;
} else {
walkPos.y++;
}
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && Position::areInRange<1, 1>(mapFromPos, playerPos)
&& !Position::areInRange<1, 1, 0>(mapFromPos, walkPos)) {
//need to pickup the item first
Item* moveItem = nullptr;
ReturnValue ret = internalMoveItem(fromCylinder, player, INDEX_WHEREEVER, item, count, &moveItem, 0, player, nullptr, &fromPos, &toPos);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::vector<Direction> listDir;
if (player->getPathTo(walkPos, listDir, 0, 0, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerMoveItemByPlayerID, this,
player->getID(), itemPos, spriteId, itemStackPos, toPos, count));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
}
if ((Position::getDistanceX(playerPos, mapToPos) > item->getThrowRange()) ||
(Position::getDistanceY(playerPos, mapToPos) > item->getThrowRange()) ||
(Position::getDistanceZ(mapFromPos, mapToPos) * 4 > item->getThrowRange())) {
player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH);
return;
}
if (!canThrowObjectTo(mapFromPos, mapToPos)) {
player->sendCancelMessage(RETURNVALUE_CANNOTTHROW);
return;
}
uint8_t toIndex = 0;
if (toPos.x == 0xFFFF) {
if (toPos.y & 0x40) {
toIndex = toPos.z;
} else {
toIndex = static_cast<uint8_t>(toPos.y);
}
}
ReturnValue ret = internalMoveItem(fromCylinder, toCylinder, toIndex, item, count, nullptr, 0, player, nullptr, &fromPos, &toPos);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
}
}
ReturnValue Game::internalMoveItem(Cylinder* fromCylinder, Cylinder* toCylinder, int32_t index,
Item* item, uint32_t count, Item** _moveItem, uint32_t flags /*= 0*/, Creature* actor/* = nullptr*/, Item* tradeItem/* = nullptr*/, const Position* fromPos /*= nullptr*/, const Position* toPos/*= nullptr*/)
{
Player* actorPlayer = actor ? actor->getPlayer() : nullptr;
if (actorPlayer && fromPos && toPos) {
if (!g_events->eventPlayerOnMoveItem(actorPlayer, item, count, *fromPos, *toPos, fromCylinder, toCylinder)) {
return RETURNVALUE_NOTPOSSIBLE;
}
}
Tile* fromTile = fromCylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == fromCylinder) {
fromCylinder = fromTile;
}
}
Item* toItem = nullptr;
Cylinder* subCylinder;
int floorN = 0;
while ((subCylinder = toCylinder->queryDestination(index, *item, &toItem, flags)) != toCylinder) {
toCylinder = subCylinder;
flags = 0;
//to prevent infinite loop
if (++floorN >= MAP_MAX_LAYERS) {
break;
}
}
//destination is the same as the source?
if (item == toItem) {
return RETURNVALUE_NOERROR; //silently ignore move
}
//check if we can add this item
ReturnValue ret = toCylinder->queryAdd(index, *item, count, flags, actor);
if (ret == RETURNVALUE_NEEDEXCHANGE) {
//check if we can add it to source cylinder
ret = fromCylinder->queryAdd(fromCylinder->getThingIndex(item), *toItem, toItem->getItemCount(), 0);
if (ret == RETURNVALUE_NOERROR) {
//check how much we can move
uint32_t maxExchangeQueryCount = 0;
ReturnValue retExchangeMaxCount = fromCylinder->queryMaxCount(INDEX_WHEREEVER, *toItem, toItem->getItemCount(), maxExchangeQueryCount, 0);
if (retExchangeMaxCount != RETURNVALUE_NOERROR && maxExchangeQueryCount == 0) {
return retExchangeMaxCount;
}
if (toCylinder->queryRemove(*toItem, toItem->getItemCount(), flags, actor) == RETURNVALUE_NOERROR) {
int32_t oldToItemIndex = toCylinder->getThingIndex(toItem);
toCylinder->removeThing(toItem, toItem->getItemCount());
fromCylinder->addThing(toItem);
if (oldToItemIndex != -1) {
toCylinder->postRemoveNotification(toItem, fromCylinder, oldToItemIndex);
}
int32_t newToItemIndex = fromCylinder->getThingIndex(toItem);
if (newToItemIndex != -1) {
fromCylinder->postAddNotification(toItem, toCylinder, newToItemIndex);
}
ret = toCylinder->queryAdd(index, *item, count, flags);
toItem = nullptr;
}
}
}
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
//check how much we can move
uint32_t maxQueryCount = 0;
ReturnValue retMaxCount = toCylinder->queryMaxCount(index, *item, count, maxQueryCount, flags);
if (retMaxCount != RETURNVALUE_NOERROR && maxQueryCount == 0) {
return retMaxCount;
}
uint32_t m;
if (item->isStackable()) {
m = std::min<uint32_t>(count, maxQueryCount);
} else {
m = maxQueryCount;
}
Item* moveItem = item;
//check if we can remove this item
ret = fromCylinder->queryRemove(*item, m, flags, actor);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (tradeItem) {
if (toCylinder->getItem() == tradeItem) {
return RETURNVALUE_NOTENOUGHROOM;
}
Cylinder* tmpCylinder = toCylinder->getParent();
while (tmpCylinder) {
if (tmpCylinder->getItem() == tradeItem) {
return RETURNVALUE_NOTENOUGHROOM;
}
tmpCylinder = tmpCylinder->getParent();
}
}
//remove the item
int32_t itemIndex = fromCylinder->getThingIndex(item);
Item* updateItem = nullptr;
fromCylinder->removeThing(item, m);
//update item(s)
if (item->isStackable()) {
uint32_t n;
if (item->equals(toItem)) {
n = std::min<uint32_t>(100 - toItem->getItemCount(), m);
toCylinder->updateThing(toItem, toItem->getID(), toItem->getItemCount() + n);
updateItem = toItem;
} else {
n = 0;
}
int32_t newCount = m - n;
if (newCount > 0) {
moveItem = item->clone();
moveItem->setItemCount(newCount);
} else {
moveItem = nullptr;
}
if (item->isRemoved()) {
ReleaseItem(item);
}
}
//add item
if (moveItem /*m - n > 0*/) {
toCylinder->addThing(index, moveItem);
}
if (itemIndex != -1) {
fromCylinder->postRemoveNotification(item, toCylinder, itemIndex);
}
if (moveItem) {
int32_t moveItemIndex = toCylinder->getThingIndex(moveItem);
if (moveItemIndex != -1) {
toCylinder->postAddNotification(moveItem, fromCylinder, moveItemIndex);
}
}
if (updateItem) {
int32_t updateItemIndex = toCylinder->getThingIndex(updateItem);
if (updateItemIndex != -1) {
toCylinder->postAddNotification(updateItem, fromCylinder, updateItemIndex);
}
}
if (_moveItem) {
if (moveItem) {
*_moveItem = moveItem;
} else {
*_moveItem = item;
}
}
//we could not move all, inform the player
if (item->isStackable() && maxQueryCount < count) {
return retMaxCount;
}
if (moveItem && moveItem->getDuration() > 0) {
if (moveItem->getDecaying() != DECAYING_TRUE) {
moveItem->incrementReferenceCounter();
moveItem->setDecaying(DECAYING_TRUE);
toDecayItems.push_front(moveItem);
}
}
if (actorPlayer && fromPos && toPos) {
g_events->eventPlayerOnItemMoved(actorPlayer, item, count, *fromPos, *toPos, fromCylinder, toCylinder);
}
return ret;
}
ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index /*= INDEX_WHEREEVER*/,
uint32_t flags/* = 0*/, bool test/* = false*/)
{
uint32_t remainderCount = 0;
return internalAddItem(toCylinder, item, index, flags, test, remainderCount);
}
ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index,
uint32_t flags, bool test, uint32_t& remainderCount)
{
if (toCylinder == nullptr || item == nullptr) {
return RETURNVALUE_NOTPOSSIBLE;
}
Cylinder* destCylinder = toCylinder;
Item* toItem = nullptr;
toCylinder = toCylinder->queryDestination(index, *item, &toItem, flags);
//check if we can add this item
ReturnValue ret = toCylinder->queryAdd(index, *item, item->getItemCount(), flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
/*
Check if we can move add the whole amount, we do this by checking against the original cylinder,
since the queryDestination can return a cylinder that might only hold a part of the full amount.
*/
uint32_t maxQueryCount = 0;
ret = destCylinder->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), maxQueryCount, flags);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (test) {
return RETURNVALUE_NOERROR;
}
if (item->isStackable() && item->equals(toItem)) {
uint32_t m = std::min<uint32_t>(item->getItemCount(), maxQueryCount);
uint32_t n = std::min<uint32_t>(100 - toItem->getItemCount(), m);
toCylinder->updateThing(toItem, toItem->getID(), toItem->getItemCount() + n);
int32_t count = m - n;
if (count > 0) {
if (item->getItemCount() != count) {
Item* remainderItem = item->clone();
remainderItem->setItemCount(count);
if (internalAddItem(destCylinder, remainderItem, INDEX_WHEREEVER, flags, false) != RETURNVALUE_NOERROR) {
ReleaseItem(remainderItem);
remainderCount = count;
}
} else {
toCylinder->addThing(index, item);
int32_t itemIndex = toCylinder->getThingIndex(item);
if (itemIndex != -1) {
toCylinder->postAddNotification(item, nullptr, itemIndex);
}
}
} else {
//fully merged with toItem, item will be destroyed
item->onRemoved();
ReleaseItem(item);
int32_t itemIndex = toCylinder->getThingIndex(toItem);
if (itemIndex != -1) {
toCylinder->postAddNotification(toItem, nullptr, itemIndex);
}
}
} else {
toCylinder->addThing(index, item);
int32_t itemIndex = toCylinder->getThingIndex(item);
if (itemIndex != -1) {
toCylinder->postAddNotification(item, nullptr, itemIndex);
}
}
if (item->getDuration() > 0) {
item->incrementReferenceCounter();
item->setDecaying(DECAYING_TRUE);
toDecayItems.push_front(item);
}
return RETURNVALUE_NOERROR;
}
ReturnValue Game::internalRemoveItem(Item* item, int32_t count /*= -1*/, bool test /*= false*/, uint32_t flags /*= 0*/)
{
Cylinder* cylinder = item->getParent();
if (cylinder == nullptr) {
return RETURNVALUE_NOTPOSSIBLE;
}
Tile* fromTile = cylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == cylinder) {
cylinder = fromTile;
}
}
if (count == -1) {
count = item->getItemCount();
}
//check if we can remove this item
ReturnValue ret = cylinder->queryRemove(*item, count, flags | FLAG_IGNORENOTMOVEABLE);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
if (!item->canRemove()) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (!test) {
int32_t index = cylinder->getThingIndex(item);
//remove the item
cylinder->removeThing(item, count);
if (item->isRemoved()) {
item->onRemoved();
if (item->canDecay()) {
decayItems->remove(item);
}
ReleaseItem(item);
}
cylinder->postRemoveNotification(item, nullptr, index);
}
return RETURNVALUE_NOERROR;
}
ReturnValue Game::internalPlayerAddItem(Player* player, Item* item, bool dropOnMap /*= true*/, slots_t slot /*= CONST_SLOT_WHEREEVER*/)
{
uint32_t remainderCount = 0;
ReturnValue ret = internalAddItem(player, item, static_cast<int32_t>(slot), 0, false, remainderCount);
if (remainderCount != 0) {
Item* remainderItem = Item::CreateItem(item->getID(), remainderCount);
ReturnValue remaindRet = internalAddItem(player->getTile(), remainderItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
if (remaindRet != RETURNVALUE_NOERROR) {
ReleaseItem(remainderItem);
}
}
if (ret != RETURNVALUE_NOERROR && dropOnMap) {
ret = internalAddItem(player->getTile(), item, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
return ret;
}
Item* Game::findItemOfType(Cylinder* cylinder, uint16_t itemId,
bool depthSearch /*= true*/, int32_t subType /*= -1*/) const
{
if (cylinder == nullptr) {
return nullptr;
}
std::vector<Container*> containers;
for (size_t i = cylinder->getFirstIndex(), j = cylinder->getLastIndex(); i < j; ++i) {
Thing* thing = cylinder->getThing(i);
if (!thing) {
continue;
}
Item* item = thing->getItem();
if (!item) {
continue;
}
if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) {
return item;
}
if (depthSearch) {
Container* container = item->getContainer();
if (container) {
containers.push_back(container);
}
}
}
size_t i = 0;
while (i < containers.size()) {
Container* container = containers[i++];
for (Item* item : container->getItemList()) {
if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) {
return item;
}
Container* subContainer = item->getContainer();
if (subContainer) {
containers.push_back(subContainer);
}
}
}
return nullptr;
}
bool Game::removeMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/)
{
if (cylinder == nullptr) {
return false;
}
if (money == 0) {
return true;
}
std::vector<Container*> containers;
std::multimap<uint32_t, Item*> moneyMap;
uint64_t moneyCount = 0;
for (size_t i = cylinder->getFirstIndex(), j = cylinder->getLastIndex(); i < j; ++i) {
Thing* thing = cylinder->getThing(i);
if (!thing) {
continue;
}
Item* item = thing->getItem();
if (!item) {
continue;
}
Container* container = item->getContainer();
if (container) {
containers.push_back(container);
} else {
const uint32_t worth = item->getWorth();
if (worth != 0) {
moneyCount += worth;
moneyMap.emplace(worth, item);
}
}
}
size_t i = 0;
while (i < containers.size()) {
Container* container = containers[i++];
for (Item* item : container->getItemList()) {
Container* tmpContainer = item->getContainer();
if (tmpContainer) {
containers.push_back(tmpContainer);
} else {
const uint32_t worth = item->getWorth();
if (worth != 0) {
moneyCount += worth;
moneyMap.emplace(worth, item);
}
}
}
}
if (moneyCount < money) {
return false;
}
for (const auto& moneyEntry : moneyMap) {
Item* item = moneyEntry.second;
if (moneyEntry.first < money) {
internalRemoveItem(item);
money -= moneyEntry.first;
} else if (moneyEntry.first > money) {
const uint32_t worth = moneyEntry.first / item->getItemCount();
const uint32_t removeCount = std::ceil(money / static_cast<double>(worth));
addMoney(cylinder, (worth * removeCount) - money, flags);
internalRemoveItem(item, removeCount);
break;
} else {
internalRemoveItem(item);
break;
}
}
return true;
}
void Game::addMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/)
{
if (money == 0) {
return;
}
uint32_t crystalCoins = money / 10000;
money -= crystalCoins * 10000;
while (crystalCoins > 0) {
const uint16_t count = std::min<uint32_t>(100, crystalCoins);
Item* remaindItem = Item::CreateItem(ITEM_CRYSTAL_COIN, count);
ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags);
if (ret != RETURNVALUE_NOERROR) {
internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
crystalCoins -= count;
}
uint16_t platinumCoins = money / 100;
if (platinumCoins != 0) {
Item* remaindItem = Item::CreateItem(ITEM_PLATINUM_COIN, platinumCoins);
ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags);
if (ret != RETURNVALUE_NOERROR) {
internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
money -= platinumCoins * 100;
}
if (money != 0) {
Item* remaindItem = Item::CreateItem(ITEM_GOLD_COIN, money);
ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags);
if (ret != RETURNVALUE_NOERROR) {
internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT);
}
}
}
Item* Game::transformItem(Item* item, uint16_t newId, int32_t newCount /*= -1*/)
{
if (item->getID() == newId && (newCount == -1 || (newCount == item->getSubType() && newCount != 0))) { //chargeless item placed on map = infinite
return item;
}
Cylinder* cylinder = item->getParent();
if (cylinder == nullptr) {
return nullptr;
}
Tile* fromTile = cylinder->getTile();
if (fromTile) {
auto it = browseFields.find(fromTile);
if (it != browseFields.end() && it->second == cylinder) {
cylinder = fromTile;
}
}
int32_t itemIndex = cylinder->getThingIndex(item);
if (itemIndex == -1) {
return item;
}
if (!item->canTransform()) {
return item;
}
const ItemType& newType = Item::items[newId];
if (newType.id == 0) {
return item;
}
const ItemType& curType = Item::items[item->getID()];
if (curType.alwaysOnTop != newType.alwaysOnTop) {
//This only occurs when you transform items on tiles from a downItem to a topItem (or vice versa)
//Remove the old, and add the new
cylinder->removeThing(item, item->getItemCount());
cylinder->postRemoveNotification(item, cylinder, itemIndex);
item->setID(newId);
if (newCount != -1) {
item->setSubType(newCount);
}
cylinder->addThing(item);
Cylinder* newParent = item->getParent();
if (newParent == nullptr) {
ReleaseItem(item);
return nullptr;
}
newParent->postAddNotification(item, cylinder, newParent->getThingIndex(item));
return item;
}
if (curType.type == newType.type) {
//Both items has the same type so we can safely change id/subtype
if (newCount == 0 && (item->isStackable() || item->hasAttribute(ITEM_ATTRIBUTE_CHARGES))) {
if (item->isStackable()) {
internalRemoveItem(item);
return nullptr;
} else {
int32_t newItemId = newId;
if (curType.id == newType.id) {
newItemId = item->getDecayTo();
}
if (newItemId < 0) {
internalRemoveItem(item);
return nullptr;
} else if (newItemId != newId) {
//Replacing the the old item with the new while maintaining the old position
Item* newItem = Item::CreateItem(newItemId, 1);
if (newItem == nullptr) {
return nullptr;
}
cylinder->replaceThing(itemIndex, newItem);
cylinder->postAddNotification(newItem, cylinder, itemIndex);
item->setParent(nullptr);
cylinder->postRemoveNotification(item, cylinder, itemIndex);
ReleaseItem(item);
return newItem;
} else {
return transformItem(item, newItemId);
}
}
} else {
cylinder->postRemoveNotification(item, cylinder, itemIndex);
uint16_t itemId = item->getID();
int32_t count = item->getSubType();
if (curType.id != newType.id) {
if (newType.group != curType.group) {
item->setDefaultSubtype();
}
itemId = newId;
}
if (newCount != -1 && newType.hasSubType()) {
count = newCount;
}
cylinder->updateThing(item, itemId, count);
cylinder->postAddNotification(item, cylinder, itemIndex);
return item;
}
}
//Replacing the old item with the new while maintaining the old position
Item* newItem;
if (newCount == -1) {
newItem = Item::CreateItem(newId);
} else {
newItem = Item::CreateItem(newId, newCount);
}
if (newItem == nullptr) {
return nullptr;
}
cylinder->replaceThing(itemIndex, newItem);
cylinder->postAddNotification(newItem, cylinder, itemIndex);
item->setParent(nullptr);
cylinder->postRemoveNotification(item, cylinder, itemIndex);
ReleaseItem(item);
if (newItem->getDuration() > 0) {
if (newItem->getDecaying() != DECAYING_TRUE) {
newItem->incrementReferenceCounter();
newItem->setDecaying(DECAYING_TRUE);
toDecayItems.push_front(newItem);
}
}
return newItem;
}
ReturnValue Game::internalTeleport(Thing* thing, const Position& newPos, bool pushMove/* = true*/, uint32_t flags /*= 0*/)
{
if (newPos == thing->getPosition()) {
return RETURNVALUE_NOERROR;
} else if (thing->isRemoved()) {
return RETURNVALUE_NOTPOSSIBLE;
}
Tile* toTile = map.getTile(newPos);
if (!toTile) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (Creature* creature = thing->getCreature()) {
ReturnValue ret = toTile->queryAdd(0, *creature, 1, FLAG_NOLIMIT);
if (ret != RETURNVALUE_NOERROR) {
return ret;
}
map.moveCreature(*creature, *toTile, !pushMove);
return RETURNVALUE_NOERROR;
} else if (Item* item = thing->getItem()) {
return internalMoveItem(item->getParent(), toTile, INDEX_WHEREEVER, item, item->getItemCount(), nullptr, flags);
}
return RETURNVALUE_NOTPOSSIBLE;
}
Item* searchForItem(Container* container, uint16_t itemId)
{
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
if ((*it)->getID() == itemId) {
return *it;
}
}
return nullptr;
}
slots_t getSlotType(const ItemType& it)
{
slots_t slot = CONST_SLOT_RIGHT;
if (it.weaponType != WeaponType_t::WEAPON_SHIELD) {
int32_t slotPosition = it.slotPosition;
if (slotPosition & SLOTP_HEAD) {
slot = CONST_SLOT_HEAD;
} else if (slotPosition & SLOTP_NECKLACE) {
slot = CONST_SLOT_NECKLACE;
} else if (slotPosition & SLOTP_ARMOR) {
slot = CONST_SLOT_ARMOR;
} else if (slotPosition & SLOTP_LEGS) {
slot = CONST_SLOT_LEGS;
} else if (slotPosition & SLOTP_FEET) {
slot = CONST_SLOT_FEET;
} else if (slotPosition & SLOTP_RING) {
slot = CONST_SLOT_RING;
} else if (slotPosition & SLOTP_AMMO) {
slot = CONST_SLOT_AMMO;
} else if (slotPosition & SLOTP_TWO_HAND || slotPosition & SLOTP_LEFT) {
slot = CONST_SLOT_LEFT;
}
}
return slot;
}
//Implementation of player invoked events
void Game::playerEquipItem(uint32_t playerId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Item* item = player->getInventoryItem(CONST_SLOT_BACKPACK);
if (!item) {
return;
}
Container* backpack = item->getContainer();
if (!backpack) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
slots_t slot = getSlotType(it);
Item* slotItem = player->getInventoryItem(slot);
Item* equipItem = searchForItem(backpack, it.id);
if (slotItem && slotItem->getID() == it.id && (!it.stackable || slotItem->getItemCount() == 100 || !equipItem)) {
internalMoveItem(slotItem->getParent(), player, CONST_SLOT_WHEREEVER, slotItem, slotItem->getItemCount(), nullptr);
} else if (equipItem) {
internalMoveItem(equipItem->getParent(), player, slot, equipItem, equipItem->getItemCount(), nullptr);
}
}
void Game::playerMove(uint32_t playerId, Direction direction)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (player->isMovementBlocked()) {
player->sendCancelWalk();
return;
}
player->resetIdleTime();
player->setNextWalkActionTask(nullptr);
player->startAutoWalk(direction);
}
bool Game::playerBroadcastMessage(Player* player, const std::string& text) const
{
if (!player->hasFlag(PlayerFlag_CanBroadcast)) {
return false;
}
std::cout << "> " << player->getName() << " broadcasted: \"" << text << "\"." << std::endl;
for (const auto& it : players) {
it.second->sendPrivateMessage(player, TALKTYPE_BROADCAST, text);
}
return true;
}
void Game::playerCreatePrivateChannel(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player || !player->isPremium()) {
return;
}
ChatChannel* channel = g_chat->createChannel(*player, CHANNEL_PRIVATE);
if (!channel || !channel->addUser(*player)) {
return;
}
player->sendCreatePrivateChannel(channel->getId(), channel->getName());
}
void Game::playerChannelInvite(uint32_t playerId, const std::string& name)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
PrivateChatChannel* channel = g_chat->getPrivateChannel(*player);
if (!channel) {
return;
}
Player* invitePlayer = getPlayerByName(name);
if (!invitePlayer) {
return;
}
if (player == invitePlayer) {
return;
}
channel->invitePlayer(*player, *invitePlayer);
}
void Game::playerChannelExclude(uint32_t playerId, const std::string& name)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
PrivateChatChannel* channel = g_chat->getPrivateChannel(*player);
if (!channel) {
return;
}
Player* excludePlayer = getPlayerByName(name);
if (!excludePlayer) {
return;
}
if (player == excludePlayer) {
return;
}
channel->excludePlayer(*player, *excludePlayer);
}
void Game::playerRequestChannels(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendChannelsDialog();
}
void Game::playerOpenChannel(uint32_t playerId, uint16_t channelId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
ChatChannel* channel = g_chat->addUserToChannel(*player, channelId);
if (!channel) {
return;
}
const InvitedMap* invitedUsers = channel->getInvitedUsers();
const UsersMap* users;
if (!channel->isPublicChannel()) {
users = &channel->getUsers();
} else {
users = nullptr;
}
player->sendChannel(channel->getId(), channel->getName(), users, invitedUsers);
}
void Game::playerCloseChannel(uint32_t playerId, uint16_t channelId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
g_chat->removeUserFromChannel(*player, channelId);
}
void Game::playerOpenPrivateChannel(uint32_t playerId, std::string& receiver)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!IOLoginData::formatPlayerName(receiver)) {
player->sendCancelMessage("A player with this name does not exist.");
return;
}
if (player->getName() == receiver) {
player->sendCancelMessage("You cannot set up a private message channel with yourself.");
return;
}
player->sendOpenPrivateChannel(receiver);
}
void Game::playerCloseNpcChannel(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
SpectatorVec spectators;
map.getSpectators(spectators, player->getPosition());
for (Creature* spectator : spectators) {
if (Npc* npc = spectator->getNpc()) {
npc->onPlayerCloseChannel(player);
}
}
}
void Game::playerReceivePing(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->receivePing();
}
void Game::playerReceivePingBack(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendPingBack();
}
void Game::playerAutoWalk(uint32_t playerId, const std::vector<Direction>& listDir)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->resetIdleTime();
player->setNextWalkTask(nullptr);
player->startAutoWalk(listDir);
}
void Game::playerStopAutoWalk(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->stopWalk();
}
void Game::playerUseItemEx(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint16_t fromSpriteId,
const Position& toPos, uint8_t toStackPos, uint16_t toSpriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
bool isHotkey = (fromPos.x == 0xFFFF && fromPos.y == 0 && fromPos.z == 0);
if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
return;
}
Thing* thing = internalGetThing(player, fromPos, fromStackPos, fromSpriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || !item->isUseable() || item->getClientID() != fromSpriteId) {
player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT);
return;
}
Position walkToPos = fromPos;
ReturnValue ret = g_actions->canUse(player, fromPos);
if (ret == RETURNVALUE_NOERROR) {
ret = g_actions->canUse(player, toPos, item);
if (ret == RETURNVALUE_TOOFARAWAY) {
walkToPos = toPos;
}
}
if (ret != RETURNVALUE_NOERROR) {
if (ret == RETURNVALUE_TOOFARAWAY) {
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && toPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) &&
!Position::areInRange<1, 1, 0>(fromPos, toPos)) {
Item* moveItem = nullptr;
ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem, 0, player, nullptr, &fromPos, &toPos);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::vector<Direction> listDir;
if (player->getPathTo(walkToPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseItemEx, this,
playerId, itemPos, itemStackPos, fromSpriteId, toPos, toStackPos, toSpriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItemEx, this,
playerId, fromPos, fromStackPos, fromSpriteId, toPos, toStackPos, toSpriteId));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItemEx(player, fromPos, toPos, toStackPos, item, isHotkey);
}
void Game::playerUseItem(uint32_t playerId, const Position& pos, uint8_t stackPos,
uint8_t index, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
bool isHotkey = (pos.x == 0xFFFF && pos.y == 0 && pos.z == 0);
if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, spriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || item->isUseable() || item->getClientID() != spriteId) {
player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT);
return;
}
ReturnValue ret = g_actions->canUse(player, pos);
if (ret != RETURNVALUE_NOERROR) {
if (ret == RETURNVALUE_TOOFARAWAY) {
std::vector<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseItem, this,
playerId, pos, stackPos, index, spriteId));
player->setNextWalkActionTask(task);
return;
}
ret = RETURNVALUE_THEREISNOWAY;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItem, this,
playerId, pos, stackPos, index, spriteId));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItem(player, pos, index, item, isHotkey);
}
void Game::playerUseWithCreature(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint32_t creatureId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
if (!Position::areInRange<7, 5, 0>(creature->getPosition(), player->getPosition())) {
return;
}
bool isHotkey = (fromPos.x == 0xFFFF && fromPos.y == 0 && fromPos.z == 0);
if (!g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) {
if (creature->getPlayer() || isHotkey) {
player->sendCancelMessage(RETURNVALUE_DIRECTPLAYERSHOOT);
return;
}
}
Thing* thing = internalGetThing(player, fromPos, fromStackPos, spriteId, STACKPOS_USEITEM);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* item = thing->getItem();
if (!item || !item->isUseable() || item->getClientID() != spriteId) {
player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT);
return;
}
Position toPos = creature->getPosition();
Position walkToPos = fromPos;
ReturnValue ret = g_actions->canUse(player, fromPos);
if (ret == RETURNVALUE_NOERROR) {
ret = g_actions->canUse(player, toPos, item);
if (ret == RETURNVALUE_TOOFARAWAY) {
walkToPos = toPos;
}
}
if (ret != RETURNVALUE_NOERROR) {
if (ret == RETURNVALUE_TOOFARAWAY) {
Position itemPos = fromPos;
uint8_t itemStackPos = fromStackPos;
if (fromPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) && !Position::areInRange<1, 1, 0>(fromPos, toPos)) {
Item* moveItem = nullptr;
ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem, 0, player, nullptr, &fromPos, &toPos);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
return;
}
//changing the position since its now in the inventory of the player
internalGetPosition(moveItem, itemPos, itemStackPos);
}
std::vector<Direction> listDir;
if (player->getPathTo(walkToPos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseWithCreature, this,
playerId, itemPos, itemStackPos, creatureId, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
player->sendCancelMessage(ret);
return;
}
if (!player->canDoAction()) {
uint32_t delay = player->getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseWithCreature, this,
playerId, fromPos, fromStackPos, creatureId, spriteId));
player->setNextActionTask(task);
return;
}
player->resetIdleTime();
player->setNextActionTask(nullptr);
g_actions->useItemEx(player, fromPos, creature->getPosition(), creature->getParent()->getThingIndex(creature), item, isHotkey, creature);
}
void Game::playerCloseContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->closeContainer(cid);
player->sendCloseContainer(cid);
}
void Game::playerMoveUpContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(cid);
if (!container) {
return;
}
Container* parentContainer = dynamic_cast<Container*>(container->getRealParent());
if (!parentContainer) {
Tile* tile = container->getTile();
if (!tile) {
return;
}
if (!g_events->eventPlayerOnBrowseField(player, tile->getPosition())) {
return;
}
auto it = browseFields.find(tile);
if (it == browseFields.end()) {
parentContainer = new Container(tile);
parentContainer->incrementReferenceCounter();
browseFields[tile] = parentContainer;
g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition())));
} else {
parentContainer = it->second;
}
}
player->addContainer(cid, parentContainer);
player->sendContainer(cid, parentContainer, parentContainer->hasParent(), player->getContainerIndex(cid));
}
void Game::playerUpdateContainer(uint32_t playerId, uint8_t cid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(cid);
if (!container) {
return;
}
player->sendContainer(cid, container, container->hasParent(), player->getContainerIndex(cid));
}
void Game::playerRotateItem(uint32_t playerId, const Position& pos, uint8_t stackPos, const uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, 0, STACKPOS_TOPDOWN_ITEM);
if (!thing) {
return;
}
Item* item = thing->getItem();
if (!item || item->getClientID() != spriteId || !item->isRotatable() || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (pos.x != 0xFFFF && !Position::areInRange<1, 1, 0>(pos, player->getPosition())) {
std::vector<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerRotateItem, this,
playerId, pos, stackPos, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
uint16_t newId = Item::items[item->getID()].rotateTo;
if (newId != 0) {
transformItem(item, newId);
}
}
void Game::playerWriteItem(uint32_t playerId, uint32_t windowTextId, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint16_t maxTextLength = 0;
uint32_t internalWindowTextId = 0;
Item* writeItem = player->getWriteItem(internalWindowTextId, maxTextLength);
if (text.length() > maxTextLength || windowTextId != internalWindowTextId) {
return;
}
if (!writeItem || writeItem->isRemoved()) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Cylinder* topParent = writeItem->getTopParent();
Player* owner = dynamic_cast<Player*>(topParent);
if (owner && owner != player) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (!Position::areInRange<1, 1, 0>(writeItem->getPosition(), player->getPosition())) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_TEXTEDIT)) {
if (!creatureEvent->executeTextEdit(player, writeItem, text)) {
player->setWriteItem(nullptr);
return;
}
}
if (!text.empty()) {
if (writeItem->getText() != text) {
writeItem->setText(text);
writeItem->setWriter(player->getName());
writeItem->setDate(time(nullptr));
}
} else {
writeItem->resetText();
writeItem->resetWriter();
writeItem->resetDate();
}
uint16_t newId = Item::items[writeItem->getID()].writeOnceItemId;
if (newId != 0) {
transformItem(writeItem, newId);
}
player->setWriteItem(nullptr);
}
void Game::playerBrowseField(uint32_t playerId, const Position& pos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
const Position& playerPos = player->getPosition();
if (playerPos.z != pos.z) {
player->sendCancelMessage(playerPos.z > pos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1>(playerPos, pos)) {
std::vector<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(400, std::bind(
&Game::playerBrowseField, this, playerId, pos
));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
Tile* tile = map.getTile(pos);
if (!tile) {
return;
}
if (!g_events->eventPlayerOnBrowseField(player, pos)) {
return;
}
Container* container;
auto it = browseFields.find(tile);
if (it == browseFields.end()) {
container = new Container(tile);
container->incrementReferenceCounter();
browseFields[tile] = container;
g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition())));
} else {
container = it->second;
}
uint8_t dummyContainerId = 0xF - ((pos.x % 3) * 3 + (pos.y % 3));
Container* openContainer = player->getContainerByID(dummyContainerId);
if (openContainer) {
player->onCloseContainer(openContainer);
player->closeContainer(dummyContainerId);
} else {
player->addContainer(dummyContainerId, container);
player->sendContainer(dummyContainerId, container, false, 0);
}
}
void Game::playerSeekInContainer(uint32_t playerId, uint8_t containerId, uint16_t index)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Container* container = player->getContainerByID(containerId);
if (!container || !container->hasPagination()) {
return;
}
if ((index % container->capacity()) != 0 || index >= container->size()) {
return;
}
player->setContainerIndex(containerId, index);
player->sendContainer(containerId, container, container->hasParent(), index);
}
void Game::playerUpdateHouseWindow(uint32_t playerId, uint8_t listId, uint32_t windowTextId, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
uint32_t internalWindowTextId;
uint32_t internalListId;
House* house = player->getEditHouse(internalWindowTextId, internalListId);
if (house && house->canEditAccessList(internalListId, player) && internalWindowTextId == windowTextId && listId == 0) {
house->setAccessList(internalListId, text);
}
player->setEditHouse(nullptr);
}
void Game::playerWrapItem(uint32_t playerId, const Position& position, uint8_t stackPos, const uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Thing* thing = internalGetThing(player, position, stackPos, 0, STACKPOS_TOPDOWN_ITEM);
if (!thing) {
return;
}
Item* item = thing->getItem();
if (!item || item->getClientID() != spriteId || !item->hasAttribute(ITEM_ATTRIBUTE_WRAPID) || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (position.x != 0xFFFF && !Position::areInRange<1, 1, 0>(position, player->getPosition())) {
std::vector<Direction> listDir;
if (player->getPathTo(position, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerWrapItem, this,
playerId, position, stackPos, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
g_events->eventPlayerOnWrapItem(player, item);
}
void Game::playerRequestTrade(uint32_t playerId, const Position& pos, uint8_t stackPos,
uint32_t tradePlayerId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* tradePartner = getPlayerByID(tradePlayerId);
if (!tradePartner || tradePartner == player) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "Sorry, not possible.");
return;
}
if (!Position::areInRange<2, 2, 0>(tradePartner->getPosition(), player->getPosition())) {
player->sendTextMessage(MESSAGE_INFO_DESCR, fmt::format("{:s} tells you to move closer.", tradePartner->getName()));
return;
}
if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition())) {
player->sendCancelMessage(RETURNVALUE_CREATUREISNOTREACHABLE);
return;
}
Thing* tradeThing = internalGetThing(player, pos, stackPos, 0, STACKPOS_TOPDOWN_ITEM);
if (!tradeThing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Item* tradeItem = tradeThing->getItem();
if (tradeItem->getClientID() != spriteId || !tradeItem->isPickupable() || tradeItem->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
if (g_config.getBoolean(ConfigManager::ONLY_INVITED_CAN_MOVE_HOUSE_ITEMS)) {
if (HouseTile* houseTile = dynamic_cast<HouseTile*>(tradeItem->getTile())) {
House* house = houseTile->getHouse();
if (house && !house->isInvited(player)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
}
}
const Position& playerPosition = player->getPosition();
const Position& tradeItemPosition = tradeItem->getPosition();
if (playerPosition.z != tradeItemPosition.z) {
player->sendCancelMessage(playerPosition.z > tradeItemPosition.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS);
return;
}
if (!Position::areInRange<1, 1>(tradeItemPosition, playerPosition)) {
std::vector<Direction> listDir;
if (player->getPathTo(pos, listDir, 0, 1, true, true)) {
g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk,
this, player->getID(), std::move(listDir))));
SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerRequestTrade, this,
playerId, pos, stackPos, tradePlayerId, spriteId));
player->setNextWalkActionTask(task);
} else {
player->sendCancelMessage(RETURNVALUE_THEREISNOWAY);
}
return;
}
Container* tradeItemContainer = tradeItem->getContainer();
if (tradeItemContainer) {
for (const auto& it : tradeItems) {
Item* item = it.first;
if (tradeItem == item) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
if (tradeItemContainer->isHoldingItem(item)) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
}
} else {
for (const auto& it : tradeItems) {
Item* item = it.first;
if (tradeItem == item) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded.");
return;
}
}
}
Container* tradeContainer = tradeItem->getContainer();
if (tradeContainer && tradeContainer->getItemHoldingCount() + 1 > 100) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "You can not trade more than 100 items.");
return;
}
if (!g_events->eventPlayerOnTradeRequest(player, tradePartner, tradeItem)) {
return;
}
internalStartTrade(player, tradePartner, tradeItem);
}
bool Game::internalStartTrade(Player* player, Player* tradePartner, Item* tradeItem)
{
if (player->tradeState != TRADE_NONE && !(player->tradeState == TRADE_ACKNOWLEDGE && player->tradePartner == tradePartner)) {
player->sendCancelMessage(RETURNVALUE_YOUAREALREADYTRADING);
return false;
} else if (tradePartner->tradeState != TRADE_NONE && tradePartner->tradePartner != player) {
player->sendCancelMessage(RETURNVALUE_THISPLAYERISALREADYTRADING);
return false;
}
player->tradePartner = tradePartner;
player->tradeItem = tradeItem;
player->tradeState = TRADE_INITIATED;
tradeItem->incrementReferenceCounter();
tradeItems[tradeItem] = player->getID();
player->sendTradeItemRequest(player->getName(), tradeItem, true);
if (tradePartner->tradeState == TRADE_NONE) {
tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, fmt::format("{:s} wants to trade with you.", player->getName()));
tradePartner->tradeState = TRADE_ACKNOWLEDGE;
tradePartner->tradePartner = player;
} else {
Item* counterOfferItem = tradePartner->tradeItem;
player->sendTradeItemRequest(tradePartner->getName(), counterOfferItem, false);
tradePartner->sendTradeItemRequest(player->getName(), tradeItem, false);
}
return true;
}
void Game::playerAcceptTrade(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!(player->getTradeState() == TRADE_ACKNOWLEDGE || player->getTradeState() == TRADE_INITIATED)) {
return;
}
Player* tradePartner = player->tradePartner;
if (!tradePartner) {
return;
}
if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition())) {
player->sendCancelMessage(RETURNVALUE_CREATUREISNOTREACHABLE);
return;
}
player->setTradeState(TRADE_ACCEPT);
if (tradePartner->getTradeState() == TRADE_ACCEPT) {
Item* playerTradeItem = player->tradeItem;
Item* partnerTradeItem = tradePartner->tradeItem;
if (!g_events->eventPlayerOnTradeAccept(player, tradePartner, playerTradeItem, partnerTradeItem)) {
internalCloseTrade(player);
return;
}
player->setTradeState(TRADE_TRANSFER);
tradePartner->setTradeState(TRADE_TRANSFER);
auto it = tradeItems.find(playerTradeItem);
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
it = tradeItems.find(partnerTradeItem);
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
bool isSuccess = false;
ReturnValue tradePartnerRet = RETURNVALUE_NOERROR;
ReturnValue playerRet = RETURNVALUE_NOERROR;
// if player is trying to trade its own backpack
if (tradePartner->getInventoryItem(CONST_SLOT_BACKPACK) == partnerTradeItem) {
tradePartnerRet = (tradePartner->getInventoryItem(getSlotType(Item::items[playerTradeItem->getID()])) ? RETURNVALUE_NOTENOUGHROOM : RETURNVALUE_NOERROR);
}
if (player->getInventoryItem(CONST_SLOT_BACKPACK) == playerTradeItem) {
playerRet = (player->getInventoryItem(getSlotType(Item::items[partnerTradeItem->getID()])) ? RETURNVALUE_NOTENOUGHROOM : RETURNVALUE_NOERROR);
}
if (tradePartnerRet == RETURNVALUE_NOERROR && playerRet == RETURNVALUE_NOERROR) {
tradePartnerRet = internalAddItem(tradePartner, playerTradeItem, INDEX_WHEREEVER, 0, true);
playerRet = internalAddItem(player, partnerTradeItem, INDEX_WHEREEVER, 0, true);
if (tradePartnerRet == RETURNVALUE_NOERROR && playerRet == RETURNVALUE_NOERROR) {
playerRet = internalRemoveItem(playerTradeItem, playerTradeItem->getItemCount(), true);
tradePartnerRet = internalRemoveItem(partnerTradeItem, partnerTradeItem->getItemCount(), true);
if (tradePartnerRet == RETURNVALUE_NOERROR && playerRet == RETURNVALUE_NOERROR) {
tradePartnerRet = internalMoveItem(playerTradeItem->getParent(), tradePartner, INDEX_WHEREEVER, playerTradeItem, playerTradeItem->getItemCount(), nullptr, FLAG_IGNOREAUTOSTACK, nullptr, partnerTradeItem);
if (tradePartnerRet == RETURNVALUE_NOERROR) {
internalMoveItem(partnerTradeItem->getParent(), player, INDEX_WHEREEVER, partnerTradeItem, partnerTradeItem->getItemCount(), nullptr, FLAG_IGNOREAUTOSTACK);
playerTradeItem->onTradeEvent(ON_TRADE_TRANSFER, tradePartner);
partnerTradeItem->onTradeEvent(ON_TRADE_TRANSFER, player);
isSuccess = true;
}
}
}
}
if (!isSuccess) {
std::string errorDescription;
if (tradePartner->tradeItem) {
errorDescription = getTradeErrorDescription(tradePartnerRet, playerTradeItem);
tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, errorDescription);
tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner);
}
if (player->tradeItem) {
errorDescription = getTradeErrorDescription(playerRet, partnerTradeItem);
player->sendTextMessage(MESSAGE_EVENT_ADVANCE, errorDescription);
player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player);
}
}
g_events->eventPlayerOnTradeCompleted(player, tradePartner, playerTradeItem, partnerTradeItem, isSuccess);
player->setTradeState(TRADE_NONE);
player->tradeItem = nullptr;
player->tradePartner = nullptr;
player->sendTradeClose();
tradePartner->setTradeState(TRADE_NONE);
tradePartner->tradeItem = nullptr;
tradePartner->tradePartner = nullptr;
tradePartner->sendTradeClose();
}
}
std::string Game::getTradeErrorDescription(ReturnValue ret, Item* item)
{
if (item) {
if (ret == RETURNVALUE_NOTENOUGHCAPACITY) {
return fmt::format("You do not have enough capacity to carry {:s}.\n {:s}", item->isStackable() && item->getItemCount() > 1 ? "these objects" : "this object", item->getWeightDescription());
} else if (ret == RETURNVALUE_NOTENOUGHROOM || ret == RETURNVALUE_CONTAINERNOTENOUGHROOM) {
return fmt::format("You do not have enough room to carry {:s}.", item->isStackable() && item->getItemCount() > 1 ? "these objects" : "this object");
}
}
return "Trade could not be completed.";
}
void Game::playerLookInTrade(uint32_t playerId, bool lookAtCounterOffer, uint8_t index)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* tradePartner = player->tradePartner;
if (!tradePartner) {
return;
}
Item* tradeItem;
if (lookAtCounterOffer) {
tradeItem = tradePartner->getTradeItem();
} else {
tradeItem = player->getTradeItem();
}
if (!tradeItem) {
return;
}
const Position& playerPosition = player->getPosition();
const Position& tradeItemPosition = tradeItem->getPosition();
int32_t lookDistance = std::max<int32_t>(Position::getDistanceX(playerPosition, tradeItemPosition),
Position::getDistanceY(playerPosition, tradeItemPosition));
if (index == 0) {
g_events->eventPlayerOnLookInTrade(player, tradePartner, tradeItem, lookDistance);
return;
}
Container* tradeContainer = tradeItem->getContainer();
if (!tradeContainer) {
return;
}
std::vector<const Container*> containers {tradeContainer};
size_t i = 0;
while (i < containers.size()) {
const Container* container = containers[i++];
for (Item* item : container->getItemList()) {
Container* tmpContainer = item->getContainer();
if (tmpContainer) {
containers.push_back(tmpContainer);
}
if (--index == 0) {
g_events->eventPlayerOnLookInTrade(player, tradePartner, item, lookDistance);
return;
}
}
}
}
void Game::playerCloseTrade(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
internalCloseTrade(player);
}
void Game::internalCloseTrade(Player* player)
{
Player* tradePartner = player->tradePartner;
if ((tradePartner && tradePartner->getTradeState() == TRADE_TRANSFER) || player->getTradeState() == TRADE_TRANSFER) {
return;
}
if (player->getTradeItem()) {
auto it = tradeItems.find(player->getTradeItem());
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player);
player->tradeItem = nullptr;
}
player->setTradeState(TRADE_NONE);
player->tradePartner = nullptr;
player->sendTextMessage(MESSAGE_STATUS_SMALL, "Trade cancelled.");
player->sendTradeClose();
if (tradePartner) {
if (tradePartner->getTradeItem()) {
auto it = tradeItems.find(tradePartner->getTradeItem());
if (it != tradeItems.end()) {
ReleaseItem(it->first);
tradeItems.erase(it);
}
tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner);
tradePartner->tradeItem = nullptr;
}
tradePartner->setTradeState(TRADE_NONE);
tradePartner->tradePartner = nullptr;
tradePartner->sendTextMessage(MESSAGE_STATUS_SMALL, "Trade cancelled.");
tradePartner->sendTradeClose();
}
}
void Game::playerPurchaseItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount,
bool ignoreCap/* = false*/, bool inBackpacks/* = false*/)
{
if (amount == 0 || amount > 100) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
uint8_t subType;
if (it.isSplash() || it.isFluidContainer()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
if (!player->hasShopItemForSale(it.id, subType)) {
return;
}
merchant->onPlayerTrade(player, onBuy, it.id, subType, amount, ignoreCap, inBackpacks);
}
void Game::playerSellItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount, bool ignoreEquipped)
{
if (amount == 0 || amount > 100) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
uint8_t subType;
if (it.isSplash() || it.isFluidContainer()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
merchant->onPlayerTrade(player, onSell, it.id, subType, amount, ignoreEquipped);
}
void Game::playerCloseShop(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->closeShopWindow();
}
void Game::playerLookInShop(uint32_t playerId, uint16_t spriteId, uint8_t count)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
int32_t onBuy, onSell;
Npc* merchant = player->getShopOwner(onBuy, onSell);
if (!merchant) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
int32_t subType;
if (it.isFluidContainer() || it.isSplash()) {
subType = clientFluidToServer(count);
} else {
subType = count;
}
if (!player->hasShopItemForSale(it.id, subType)) {
return;
}
const std::string& description = Item::getDescription(it, 1, nullptr, subType);
g_events->eventPlayerOnLookInShop(player, &it, subType, description);
}
void Game::playerLookAt(uint32_t playerId, const Position& pos, uint8_t stackPos)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Thing* thing = internalGetThing(player, pos, stackPos, 0, STACKPOS_LOOK);
if (!thing) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Position thingPos = thing->getPosition();
if (!player->canSee(thingPos)) {
player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return;
}
Position playerPos = player->getPosition();
int32_t lookDistance;
if (thing != player) {
lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, thingPos), Position::getDistanceY(playerPos, thingPos));
if (playerPos.z != thingPos.z) {
lookDistance += 15;
}
} else {
lookDistance = -1;
}
g_events->eventPlayerOnLook(player, pos, thing, stackPos, lookDistance);
}
void Game::playerLookInBattleList(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
if (!player->canSeeCreature(creature)) {
return;
}
const Position& creaturePos = creature->getPosition();
if (!player->canSee(creaturePos)) {
return;
}
int32_t lookDistance;
if (creature != player) {
const Position& playerPos = player->getPosition();
lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, creaturePos), Position::getDistanceY(playerPos, creaturePos));
if (playerPos.z != creaturePos.z) {
lookDistance += 15;
}
} else {
lookDistance = -1;
}
g_events->eventPlayerOnLookInBattleList(player, creature, lookDistance);
}
void Game::playerCancelAttackAndFollow(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
playerSetAttackedCreature(playerId, 0);
playerFollowCreature(playerId, 0);
player->stopWalk();
}
void Game::playerSetAttackedCreature(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (player->getAttackedCreature() && creatureId == 0) {
player->setAttackedCreature(nullptr);
player->sendCancelTarget();
return;
}
Creature* attackCreature = getCreatureByID(creatureId);
if (!attackCreature) {
player->setAttackedCreature(nullptr);
player->sendCancelTarget();
return;
}
ReturnValue ret = Combat::canTargetCreature(player, attackCreature);
if (ret != RETURNVALUE_NOERROR) {
player->sendCancelMessage(ret);
player->sendCancelTarget();
player->setAttackedCreature(nullptr);
return;
}
player->setAttackedCreature(attackCreature);
g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID())));
}
void Game::playerFollowCreature(uint32_t playerId, uint32_t creatureId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setAttackedCreature(nullptr);
g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID())));
player->setFollowCreature(getCreatureByID(creatureId));
}
void Game::playerSetFightModes(uint32_t playerId, fightMode_t fightMode, bool chaseMode, bool secureMode)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setFightMode(fightMode);
player->setChaseMode(chaseMode);
player->setSecureMode(secureMode);
}
void Game::playerRequestAddVip(uint32_t playerId, const std::string& name)
{
if (name.length() > 20) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* vipPlayer = getPlayerByName(name);
if (!vipPlayer) {
uint32_t guid;
bool specialVip;
std::string formattedName = name;
if (!IOLoginData::getGuidByNameEx(guid, specialVip, formattedName)) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name does not exist.");
return;
}
if (specialVip && !player->hasFlag(PlayerFlag_SpecialVIP)) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "You can not add this player.");
return;
}
player->addVIP(guid, formattedName, VIPSTATUS_OFFLINE);
} else {
if (vipPlayer->hasFlag(PlayerFlag_SpecialVIP) && !player->hasFlag(PlayerFlag_SpecialVIP)) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "You can not add this player.");
return;
}
if (!vipPlayer->isInGhostMode() || player->isAccessPlayer()) {
player->addVIP(vipPlayer->getGUID(), vipPlayer->getName(), VIPSTATUS_ONLINE);
} else {
player->addVIP(vipPlayer->getGUID(), vipPlayer->getName(), VIPSTATUS_OFFLINE);
}
}
}
void Game::playerRequestRemoveVip(uint32_t playerId, uint32_t guid)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->removeVIP(guid);
}
void Game::playerRequestEditVip(uint32_t playerId, uint32_t guid, const std::string& description, uint32_t icon, bool notify)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->editVIP(guid, description, icon, notify);
}
void Game::playerTurn(uint32_t playerId, Direction dir)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!g_events->eventPlayerOnTurn(player, dir)) {
return;
}
player->resetIdleTime();
internalCreatureTurn(player, dir);
}
void Game::playerRequestOutfit(uint32_t playerId)
{
if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendOutfitWindow();
}
void Game::playerToggleMount(uint32_t playerId, bool mount)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->toggleMount(mount);
}
void Game::playerChangeOutfit(uint32_t playerId, Outfit_t outfit)
{
if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
const Outfit* playerOutfit = Outfits::getInstance().getOutfitByLookType(player->getSex(), outfit.lookType);
if (!playerOutfit) {
outfit.lookMount = 0;
}
if (outfit.lookMount != 0) {
Mount* mount = mounts.getMountByClientID(outfit.lookMount);
if (!mount) {
return;
}
if (!player->hasMount(mount)) {
return;
}
if (player->isMounted()) {
Mount* prevMount = mounts.getMountByID(player->getCurrentMount());
if (prevMount) {
changeSpeed(player, mount->speed - prevMount->speed);
}
player->setCurrentMount(mount->id);
} else {
player->setCurrentMount(mount->id);
outfit.lookMount = 0;
}
} else if (player->isMounted()) {
player->dismount();
}
if (player->canWear(outfit.lookType, outfit.lookAddons)) {
player->defaultOutfit = outfit;
if (player->hasCondition(CONDITION_OUTFIT)) {
return;
}
internalCreatureChangeOutfit(player, outfit);
}
}
void Game::playerShowQuestLog(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->sendQuestLog();
}
void Game::playerShowQuestLine(uint32_t playerId, uint16_t questId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Quest* quest = quests.getQuestByID(questId);
if (!quest) {
return;
}
player->sendQuestLine(quest);
}
void Game::playerSay(uint32_t playerId, uint16_t channelId, SpeakClasses type,
const std::string& receiver, const std::string& text)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->resetIdleTime();
if (playerSaySpell(player, type, text)) {
return;
}
if (type == TALKTYPE_PRIVATE_PN) {
playerSpeakToNpc(player, text);
return;
}
uint32_t muteTime = player->isMuted();
if (muteTime > 0) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("You are still muted for {:d} seconds.", muteTime));
return;
}
if (!text.empty() && text.front() == '/' && player->isAccessPlayer()) {
return;
}
player->removeMessageBuffer();
switch (type) {
case TALKTYPE_SAY:
internalCreatureSay(player, TALKTYPE_SAY, text, false);
break;
case TALKTYPE_WHISPER:
playerWhisper(player, text);
break;
case TALKTYPE_YELL:
playerYell(player, text);
break;
case TALKTYPE_PRIVATE_TO:
case TALKTYPE_PRIVATE_RED_TO:
playerSpeakTo(player, type, receiver, text);
break;
case TALKTYPE_CHANNEL_O:
case TALKTYPE_CHANNEL_Y:
case TALKTYPE_CHANNEL_R1:
g_chat->talkToChannel(*player, type, text, channelId);
break;
case TALKTYPE_BROADCAST:
playerBroadcastMessage(player, text);
break;
default:
break;
}
}
bool Game::playerSaySpell(Player* player, SpeakClasses type, const std::string& text)
{
std::string words = text;
TalkActionResult_t result = g_talkActions->playerSaySpell(player, type, words);
if (result == TALKACTION_BREAK) {
return true;
}
result = g_spells->playerSaySpell(player, words);
if (result == TALKACTION_BREAK) {
if (!g_config.getBoolean(ConfigManager::EMOTE_SPELLS)) {
return internalCreatureSay(player, TALKTYPE_SAY, words, false);
} else {
return internalCreatureSay(player, TALKTYPE_MONSTER_SAY, words, false);
}
} else if (result == TALKACTION_FAILED) {
return true;
}
return false;
}
void Game::playerWhisper(Player* player, const std::string& text)
{
SpectatorVec spectators;
map.getSpectators(spectators, player->getPosition(), false, false,
Map::maxClientViewportX, Map::maxClientViewportX,
Map::maxClientViewportY, Map::maxClientViewportY);
//send to client
for (Creature* spectator : spectators) {
if (Player* spectatorPlayer = spectator->getPlayer()) {
if (!Position::areInRange<1, 1>(player->getPosition(), spectatorPlayer->getPosition())) {
spectatorPlayer->sendCreatureSay(player, TALKTYPE_WHISPER, "pspsps");
} else {
spectatorPlayer->sendCreatureSay(player, TALKTYPE_WHISPER, text);
}
}
}
//event method
for (Creature* spectator : spectators) {
spectator->onCreatureSay(player, TALKTYPE_WHISPER, text);
}
}
bool Game::playerYell(Player* player, const std::string& text)
{
if (player->hasCondition(CONDITION_YELLTICKS)) {
player->sendCancelMessage(RETURNVALUE_YOUAREEXHAUSTED);
return false;
}
uint32_t minimumLevel = g_config.getNumber(ConfigManager::YELL_MINIMUM_LEVEL);
if (player->getLevel() < minimumLevel) {
if (g_config.getBoolean(ConfigManager::YELL_ALLOW_PREMIUM)) {
if (player->isPremium()) {
internalCreatureSay(player, TALKTYPE_YELL, asUpperCaseString(text), false);
return true;
} else {
player->sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("You may not yell unless you have reached level {:d} or have a premium account.", minimumLevel));
}
} else {
player->sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("You may not yell unless you have reached level {:d}.", minimumLevel));
}
return false;
}
if (player->getAccountType() < ACCOUNT_TYPE_GAMEMASTER) {
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_YELLTICKS, 30000, 0);
player->addCondition(condition);
}
internalCreatureSay(player, TALKTYPE_YELL, asUpperCaseString(text), false);
return true;
}
bool Game::playerSpeakTo(Player* player, SpeakClasses type, const std::string& receiver,
const std::string& text)
{
Player* toPlayer = getPlayerByName(receiver);
if (!toPlayer) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name is not online.");
return false;
}
if (type == TALKTYPE_PRIVATE_RED_TO && (player->hasFlag(PlayerFlag_CanTalkRedPrivate) || player->getAccountType() >= ACCOUNT_TYPE_GAMEMASTER)) {
type = TALKTYPE_PRIVATE_RED_FROM;
} else {
type = TALKTYPE_PRIVATE_FROM;
}
toPlayer->sendPrivateMessage(player, type, text);
toPlayer->onCreatureSay(player, type, text);
if (toPlayer->isInGhostMode() && !player->isAccessPlayer()) {
player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name is not online.");
} else {
player->sendTextMessage(MESSAGE_STATUS_SMALL, fmt::format("Message sent to {:s}.", toPlayer->getName()));
}
return true;
}
void Game::playerSpeakToNpc(Player* player, const std::string& text)
{
SpectatorVec spectators;
map.getSpectators(spectators, player->getPosition());
for (Creature* spectator : spectators) {
if (spectator->getNpc()) {
spectator->onCreatureSay(player, TALKTYPE_PRIVATE_PN, text);
}
}
}
//--
bool Game::canThrowObjectTo(const Position& fromPos, const Position& toPos, bool checkLineOfSight /*= true*/,
int32_t rangex /*= Map::maxClientViewportX*/, int32_t rangey /*= Map::maxClientViewportY*/) const
{
return map.canThrowObjectTo(fromPos, toPos, checkLineOfSight, rangex, rangey);
}
bool Game::isSightClear(const Position& fromPos, const Position& toPos, bool floorCheck) const
{
return map.isSightClear(fromPos, toPos, floorCheck);
}
bool Game::internalCreatureTurn(Creature* creature, Direction dir)
{
if (creature->getDirection() == dir) {
return false;
}
creature->setDirection(dir);
//send to client
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureTurn(creature);
}
return true;
}
bool Game::internalCreatureSay(Creature* creature, SpeakClasses type, const std::string& text,
bool ghostMode, SpectatorVec* spectatorsPtr/* = nullptr*/, const Position* pos/* = nullptr*/)
{
if (text.empty()) {
return false;
}
if (!pos) {
pos = &creature->getPosition();
}
SpectatorVec spectators;
if (!spectatorsPtr || spectatorsPtr->empty()) {
// This somewhat complex construct ensures that the cached SpectatorVec
// is used if available and if it can be used, else a local vector is
// used (hopefully the compiler will optimize away the construction of
// the temporary when it's not used).
if (type != TALKTYPE_YELL && type != TALKTYPE_MONSTER_YELL) {
map.getSpectators(spectators, *pos, false, false,
Map::maxClientViewportX, Map::maxClientViewportX,
Map::maxClientViewportY, Map::maxClientViewportY);
} else {
map.getSpectators(spectators, *pos, true, false, 18, 18, 14, 14);
}
} else {
spectators = (*spectatorsPtr);
}
//send to client
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
if (!ghostMode || tmpPlayer->canSeeCreature(creature)) {
tmpPlayer->sendCreatureSay(creature, type, text, pos);
}
}
}
//event method
for (Creature* spectator : spectators) {
spectator->onCreatureSay(creature, type, text);
if (creature != spectator) {
g_events->eventCreatureOnHear(spectator, creature, text, type);
}
}
return true;
}
void Game::checkCreatureWalk(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->onWalk();
cleanup();
}
}
void Game::updateCreatureWalk(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->goToFollowCreature();
}
}
void Game::checkCreatureAttack(uint32_t creatureId)
{
Creature* creature = getCreatureByID(creatureId);
if (creature && creature->getHealth() > 0) {
creature->onAttacking(0);
}
}
void Game::addCreatureCheck(Creature* creature)
{
creature->creatureCheck = true;
if (creature->inCheckCreaturesVector) {
// already in a vector
return;
}
creature->inCheckCreaturesVector = true;
checkCreatureLists[uniform_random(0, EVENT_CREATURECOUNT - 1)].push_back(creature);
creature->incrementReferenceCounter();
}
void Game::removeCreatureCheck(Creature* creature)
{
if (creature->inCheckCreaturesVector) {
creature->creatureCheck = false;
}
}
void Game::checkCreatures(size_t index)
{
g_scheduler.addEvent(createSchedulerTask(EVENT_CHECK_CREATURE_INTERVAL, std::bind(&Game::checkCreatures, this, (index + 1) % EVENT_CREATURECOUNT)));
auto& checkCreatureList = checkCreatureLists[index];
auto it = checkCreatureList.begin(), end = checkCreatureList.end();
while (it != end) {
Creature* creature = *it;
if (creature->creatureCheck) {
if (creature->getHealth() > 0) {
creature->onThink(EVENT_CREATURE_THINK_INTERVAL);
creature->onAttacking(EVENT_CREATURE_THINK_INTERVAL);
creature->executeConditions(EVENT_CREATURE_THINK_INTERVAL);
}
++it;
} else {
creature->inCheckCreaturesVector = false;
it = checkCreatureList.erase(it);
ReleaseCreature(creature);
}
}
cleanup();
}
void Game::changeSpeed(Creature* creature, int32_t varSpeedDelta)
{
int32_t varSpeed = creature->getSpeed() - creature->getBaseSpeed();
varSpeed += varSpeedDelta;
creature->setSpeed(varSpeed);
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), false, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendChangeSpeed(creature, creature->getStepSpeed());
}
}
void Game::internalCreatureChangeOutfit(Creature* creature, const Outfit_t& outfit)
{
if (!g_events->eventCreatureOnChangeOutfit(creature, outfit)) {
return;
}
creature->setCurrentOutfit(outfit);
if (creature->isInvisible()) {
return;
}
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureChangeOutfit(creature, outfit);
}
}
void Game::internalCreatureChangeVisible(Creature* creature, bool visible)
{
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureChangeVisible(creature, visible);
}
}
void Game::changeLight(const Creature* creature)
{
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureLight(creature);
}
}
bool Game::combatBlockHit(CombatDamage& damage, Creature* attacker, Creature* target, bool checkDefense, bool checkArmor, bool field, bool ignoreResistances /*= false */)
{
if (damage.primary.type == COMBAT_NONE && damage.secondary.type == COMBAT_NONE) {
return true;
}
if (target->getPlayer() && target->isInGhostMode()) {
return true;
}
if (damage.primary.value > 0) {
return false;
}
static const auto sendBlockEffect = [this](BlockType_t blockType, CombatType_t combatType, const Position& targetPos) {
if (blockType == BLOCK_DEFENSE) {
addMagicEffect(targetPos, CONST_ME_POFF);
} else if (blockType == BLOCK_ARMOR) {
addMagicEffect(targetPos, CONST_ME_BLOCKHIT);
} else if (blockType == BLOCK_IMMUNITY) {
uint8_t hitEffect = 0;
switch (combatType) {
case COMBAT_UNDEFINEDDAMAGE: {
return;
}
case COMBAT_ENERGYDAMAGE:
case COMBAT_FIREDAMAGE:
case COMBAT_PHYSICALDAMAGE:
case COMBAT_ICEDAMAGE:
case COMBAT_DEATHDAMAGE: {
hitEffect = CONST_ME_BLOCKHIT;
break;
}
case COMBAT_EARTHDAMAGE: {
hitEffect = CONST_ME_GREEN_RINGS;
break;
}
case COMBAT_HOLYDAMAGE: {
hitEffect = CONST_ME_HOLYDAMAGE;
break;
}
default: {
hitEffect = CONST_ME_POFF;
break;
}
}
addMagicEffect(targetPos, hitEffect);
}
};
BlockType_t primaryBlockType, secondaryBlockType;
if (damage.primary.type != COMBAT_NONE) {
damage.primary.value = -damage.primary.value;
primaryBlockType = target->blockHit(attacker, damage.primary.type, damage.primary.value, checkDefense, checkArmor, field, ignoreResistances);
damage.primary.value = -damage.primary.value;
sendBlockEffect(primaryBlockType, damage.primary.type, target->getPosition());
} else {
primaryBlockType = BLOCK_NONE;
}
if (damage.secondary.type != COMBAT_NONE) {
damage.secondary.value = -damage.secondary.value;
secondaryBlockType = target->blockHit(attacker, damage.secondary.type, damage.secondary.value, false, false, field, ignoreResistances);
damage.secondary.value = -damage.secondary.value;
sendBlockEffect(secondaryBlockType, damage.secondary.type, target->getPosition());
} else {
secondaryBlockType = BLOCK_NONE;
}
damage.blockType = primaryBlockType;
return (primaryBlockType != BLOCK_NONE) && (secondaryBlockType != BLOCK_NONE);
}
void Game::combatGetTypeInfo(CombatType_t combatType, Creature* target, TextColor_t& color, uint8_t& effect)
{
switch (combatType) {
case COMBAT_PHYSICALDAMAGE: {
Item* splash = nullptr;
switch (target->getRace()) {
case RACE_VENOM:
color = TEXTCOLOR_LIGHTGREEN;
effect = CONST_ME_HITBYPOISON;
splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_SLIME);
break;
case RACE_BLOOD:
color = TEXTCOLOR_RED;
effect = CONST_ME_DRAWBLOOD;
if (const Tile* tile = target->getTile()) {
if (!tile->hasFlag(TILESTATE_PROTECTIONZONE)) {
splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_BLOOD);
}
}
break;
case RACE_UNDEAD:
color = TEXTCOLOR_LIGHTGREY;
effect = CONST_ME_HITAREA;
break;
case RACE_FIRE:
color = TEXTCOLOR_ORANGE;
effect = CONST_ME_DRAWBLOOD;
break;
case RACE_ENERGY:
color = TEXTCOLOR_ELECTRICPURPLE;
effect = CONST_ME_ENERGYHIT;
break;
default:
color = TEXTCOLOR_NONE;
effect = CONST_ME_NONE;
break;
}
if (splash) {
internalAddItem(target->getTile(), splash, INDEX_WHEREEVER, FLAG_NOLIMIT);
startDecay(splash);
}
break;
}
case COMBAT_ENERGYDAMAGE: {
color = TEXTCOLOR_ELECTRICPURPLE;
effect = CONST_ME_ENERGYHIT;
break;
}
case COMBAT_EARTHDAMAGE: {
color = TEXTCOLOR_LIGHTGREEN;
effect = CONST_ME_GREEN_RINGS;
break;
}
case COMBAT_DROWNDAMAGE: {
color = TEXTCOLOR_LIGHTBLUE;
effect = CONST_ME_LOSEENERGY;
break;
}
case COMBAT_FIREDAMAGE: {
color = TEXTCOLOR_ORANGE;
effect = CONST_ME_HITBYFIRE;
break;
}
case COMBAT_ICEDAMAGE: {
color = TEXTCOLOR_SKYBLUE;
effect = CONST_ME_ICEATTACK;
break;
}
case COMBAT_HOLYDAMAGE: {
color = TEXTCOLOR_YELLOW;
effect = CONST_ME_HOLYDAMAGE;
break;
}
case COMBAT_DEATHDAMAGE: {
color = TEXTCOLOR_DARKRED;
effect = CONST_ME_SMALLCLOUDS;
break;
}
case COMBAT_LIFEDRAIN: {
color = TEXTCOLOR_RED;
effect = CONST_ME_MAGIC_RED;
break;
}
default: {
color = TEXTCOLOR_NONE;
effect = CONST_ME_NONE;
break;
}
}
}
bool Game::combatChangeHealth(Creature* attacker, Creature* target, CombatDamage& damage)
{
const Position& targetPos = target->getPosition();
if (damage.primary.value > 0) {
if (target->getHealth() <= 0) {
return false;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_HEALTHCHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeHealthChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeHealth(attacker, target, damage);
}
}
int32_t realHealthChange = target->getHealth();
target->gainHealth(attacker, damage.primary.value);
realHealthChange = target->getHealth() - realHealthChange;
if (realHealthChange > 0 && !target->isInGhostMode()) {
auto damageString = fmt::format("{:d} hitpoint{:s}", realHealthChange, realHealthChange != 1 ? "s" : "");
std::string spectatorMessage;
TextMessage message;
message.position = targetPos;
message.primary.value = realHealthChange;
message.primary.color = TEXTCOLOR_PASTELRED;
SpectatorVec spectators;
map.getSpectators(spectators, targetPos, false, true);
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
message.type = MESSAGE_HEALED;
message.text = fmt::format("You heal {:s} for {:s}.", target->getNameDescription(), damageString);
} else if (tmpPlayer == targetPlayer) {
message.type = MESSAGE_HEALED;
if (!attacker) {
message.text = fmt::format("You were healed for {:s}.", damageString);
} else if (targetPlayer == attackerPlayer) {
message.text = fmt::format("You healed yourself for {:s}.", damageString);
} else {
message.text = fmt::format("You were healed by {:s} for {:s}.", attacker->getNameDescription(), damageString);
}
} else {
message.type = MESSAGE_HEALED_OTHERS;
if (spectatorMessage.empty()) {
if (!attacker) {
spectatorMessage = fmt::format("{:s} was healed for {:s}.", target->getNameDescription(), damageString);
} else if (attacker == target) {
spectatorMessage = fmt::format("{:s} healed {:s}self for {:s}.", attacker->getNameDescription(), targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her" : "him") : "it", damageString);
} else {
spectatorMessage = fmt::format("{:s} healed {:s} for {:s}.", attacker->getNameDescription(), target->getNameDescription(), damageString);
}
spectatorMessage[0] = std::toupper(spectatorMessage[0]);
}
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
} else {
if (!target->isAttackable()) {
if (!target->isInGhostMode()) {
addMagicEffect(targetPos, CONST_ME_POFF);
}
return true;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
Player* targetPlayer = target->getPlayer();
if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
damage.primary.value = std::abs(damage.primary.value);
damage.secondary.value = std::abs(damage.secondary.value);
int32_t healthChange = damage.primary.value + damage.secondary.value;
if (healthChange == 0) {
return true;
}
TextMessage message;
message.position = targetPos;
SpectatorVec spectators;
if (targetPlayer && target->hasCondition(CONDITION_MANASHIELD) && damage.primary.type != COMBAT_UNDEFINEDDAMAGE) {
int32_t manaDamage = std::min<int32_t>(targetPlayer->getMana(), healthChange);
if (manaDamage != 0) {
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeManaChange(target, attacker, damage);
}
healthChange = damage.primary.value + damage.secondary.value;
if (healthChange == 0) {
return true;
}
manaDamage = std::min<int32_t>(targetPlayer->getMana(), healthChange);
}
}
targetPlayer->drainMana(attacker, manaDamage);
map.getSpectators(spectators, targetPos, true, true);
addMagicEffect(spectators, targetPos, CONST_ME_LOSEENERGY);
std::string spectatorMessage;
message.primary.value = manaDamage;
message.primary.color = TEXTCOLOR_BLUE;
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer->getPosition().z != targetPos.z) {
continue;
}
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
message.type = MESSAGE_DAMAGE_DEALT;
message.text = fmt::format("{:s} loses {:d} mana due to your attack.", target->getNameDescription(), manaDamage);
message.text[0] = std::toupper(message.text[0]);
} else if (tmpPlayer == targetPlayer) {
message.type = MESSAGE_DAMAGE_RECEIVED;
if (!attacker) {
message.text = fmt::format("You lose {:d} mana.", manaDamage);
} else if (targetPlayer == attackerPlayer) {
message.text = fmt::format("You lose {:d} mana due to your own attack.", manaDamage);
} else {
message.text = fmt::format("You lose {:d} mana due to an attack by {:s}.", manaDamage, attacker->getNameDescription());
}
} else {
message.type = MESSAGE_DAMAGE_OTHERS;
if (spectatorMessage.empty()) {
if (!attacker) {
spectatorMessage = fmt::format("{:s} loses {:d} mana.", target->getNameDescription(), manaDamage);
} else if (attacker == target) {
spectatorMessage = fmt::format("{:s} loses {:d} mana due to {:s} own attack.", target->getNameDescription(), manaDamage, targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her" : "his");
} else {
spectatorMessage = fmt::format("{:s} loses {:d} mana due to an attack by {:s}.", target->getNameDescription(), manaDamage, attacker->getNameDescription());
}
spectatorMessage[0] = std::toupper(spectatorMessage[0]);
}
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
damage.primary.value -= manaDamage;
if (damage.primary.value < 0) {
damage.secondary.value = std::max<int32_t>(0, damage.secondary.value + damage.primary.value);
damage.primary.value = 0;
}
}
}
int32_t realDamage = damage.primary.value + damage.secondary.value;
if (realDamage == 0) {
return true;
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_HEALTHCHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeHealthChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeHealth(attacker, target, damage);
}
}
int32_t targetHealth = target->getHealth();
if (damage.primary.value >= targetHealth) {
damage.primary.value = targetHealth;
damage.secondary.value = 0;
} else if (damage.secondary.value) {
damage.secondary.value = std::min<int32_t>(damage.secondary.value, targetHealth - damage.primary.value);
}
realDamage = damage.primary.value + damage.secondary.value;
if (realDamage == 0) {
return true;
}
if (spectators.empty()) {
map.getSpectators(spectators, targetPos, true, true);
}
message.primary.value = damage.primary.value;
message.secondary.value = damage.secondary.value;
uint8_t hitEffect;
if (message.primary.value) {
combatGetTypeInfo(damage.primary.type, target, message.primary.color, hitEffect);
if (hitEffect != CONST_ME_NONE) {
addMagicEffect(spectators, targetPos, hitEffect);
}
}
if (message.secondary.value) {
combatGetTypeInfo(damage.secondary.type, target, message.secondary.color, hitEffect);
if (hitEffect != CONST_ME_NONE) {
addMagicEffect(spectators, targetPos, hitEffect);
}
}
if (message.primary.color != TEXTCOLOR_NONE || message.secondary.color != TEXTCOLOR_NONE) {
auto damageString = fmt::format("{:d} hitpoint{:s}", realDamage, realDamage != 1 ? "s" : "");
std::string spectatorMessage;
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer->getPosition().z != targetPos.z) {
continue;
}
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
message.type = MESSAGE_DAMAGE_DEALT;
message.text = fmt::format("{:s} loses {:s} due to your attack.", target->getNameDescription(), damageString);
message.text[0] = std::toupper(message.text[0]);
} else if (tmpPlayer == targetPlayer) {
message.type = MESSAGE_DAMAGE_RECEIVED;
if (!attacker) {
message.text = fmt::format("You lose {:s}.", damageString);
} else if (targetPlayer == attackerPlayer) {
message.text = fmt::format("You lose {:s} due to your own attack.", damageString);
} else {
message.text = fmt::format("You lose {:s} due to an attack by {:s}.", damageString, attacker->getNameDescription());
}
} else {
message.type = MESSAGE_DAMAGE_OTHERS;
if (spectatorMessage.empty()) {
if (!attacker) {
spectatorMessage = fmt::format("{:s} loses {:s}.", target->getNameDescription(), damageString);
} else if (attacker == target) {
spectatorMessage = fmt::format("{:s} loses {:s} due to {:s} own attack.", target->getNameDescription(), damageString, targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her" : "his") : "its");
} else {
spectatorMessage = fmt::format("{:s} loses {:s} due to an attack by {:s}.", target->getNameDescription(), damageString, attacker->getNameDescription());
}
spectatorMessage[0] = std::toupper(spectatorMessage[0]);
}
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
if (realDamage >= targetHealth) {
for (CreatureEvent* creatureEvent : target->getCreatureEvents(CREATURE_EVENT_PREPAREDEATH)) {
if (!creatureEvent->executeOnPrepareDeath(target, attacker)) {
return false;
}
}
}
target->drainHealth(attacker, realDamage);
addCreatureHealth(spectators, target);
}
return true;
}
bool Game::combatChangeMana(Creature* attacker, Creature* target, CombatDamage& damage)
{
Player* targetPlayer = target->getPlayer();
if (!targetPlayer) {
return true;
}
int32_t manaChange = damage.primary.value + damage.secondary.value;
if (manaChange > 0) {
if (attacker) {
const Player* attackerPlayer = attacker->getPlayer();
if (attackerPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(target) == SKULL_NONE) {
return false;
}
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeManaChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeMana(attacker, target, damage);
}
}
int32_t realManaChange = targetPlayer->getMana();
targetPlayer->changeMana(manaChange);
realManaChange = targetPlayer->getMana() - realManaChange;
if (realManaChange > 0 && !targetPlayer->isInGhostMode()) {
TextMessage message(MESSAGE_HEALED, "You gained " + std::to_string(realManaChange) + " mana.");
message.position = target->getPosition();
message.primary.value = realManaChange;
message.primary.color = TEXTCOLOR_MAYABLUE;
targetPlayer->sendTextMessage(message);
}
} else {
const Position& targetPos = target->getPosition();
if (!target->isAttackable()) {
if (!target->isInGhostMode()) {
addMagicEffect(targetPos, CONST_ME_POFF);
}
return false;
}
Player* attackerPlayer;
if (attacker) {
attackerPlayer = attacker->getPlayer();
} else {
attackerPlayer = nullptr;
}
if (attackerPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) {
return false;
}
int32_t manaLoss = std::min<int32_t>(targetPlayer->getMana(), -manaChange);
BlockType_t blockType = target->blockHit(attacker, COMBAT_MANADRAIN, manaLoss);
if (blockType != BLOCK_NONE) {
addMagicEffect(targetPos, CONST_ME_POFF);
return false;
}
if (manaLoss <= 0) {
return true;
}
if (damage.origin != ORIGIN_NONE) {
const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE);
if (!events.empty()) {
for (CreatureEvent* creatureEvent : events) {
creatureEvent->executeManaChange(target, attacker, damage);
}
damage.origin = ORIGIN_NONE;
return combatChangeMana(attacker, target, damage);
}
}
targetPlayer->drainMana(attacker, manaLoss);
std::string spectatorMessage;
TextMessage message;
message.position = targetPos;
message.primary.value = manaLoss;
message.primary.color = TEXTCOLOR_BLUE;
SpectatorVec spectators;
map.getSpectators(spectators, targetPos, false, true);
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) {
message.type = MESSAGE_DAMAGE_DEALT;
message.text = fmt::format("{:s} loses {:d} mana due to your attack.", target->getNameDescription(), manaLoss);
message.text[0] = std::toupper(message.text[0]);
} else if (tmpPlayer == targetPlayer) {
message.type = MESSAGE_DAMAGE_RECEIVED;
if (!attacker) {
message.text = fmt::format("You lose {:d} mana.", manaLoss);
} else if (targetPlayer == attackerPlayer) {
message.text = fmt::format("You lose {:d} mana due to your own attack.", manaLoss);
} else {
message.text = fmt::format("You lose {:d} mana due to an attack by {:s}.", manaLoss, attacker->getNameDescription());
}
} else {
message.type = MESSAGE_DAMAGE_OTHERS;
if (spectatorMessage.empty()) {
if (!attacker) {
spectatorMessage = fmt::format("{:s} loses {:d} mana.", target->getNameDescription(), manaLoss);
} else if (attacker == target) {
spectatorMessage = fmt::format("{:s} loses {:d} mana due to {:s} own attack.", target->getNameDescription(), manaLoss, targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her" : "his");
} else {
spectatorMessage = fmt::format("{:s} loses {:d} mana due to an attack by {:s}.", target->getNameDescription(), manaLoss, attacker->getNameDescription());
}
spectatorMessage[0] = std::toupper(spectatorMessage[0]);
}
message.text = spectatorMessage;
}
tmpPlayer->sendTextMessage(message);
}
}
return true;
}
void Game::addCreatureHealth(const Creature* target)
{
SpectatorVec spectators;
map.getSpectators(spectators, target->getPosition(), true, true);
addCreatureHealth(spectators, target);
}
void Game::addCreatureHealth(const SpectatorVec& spectators, const Creature* target)
{
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendCreatureHealth(target);
}
}
}
void Game::addMagicEffect(const Position& pos, uint8_t effect)
{
SpectatorVec spectators;
map.getSpectators(spectators, pos, true, true);
addMagicEffect(spectators, pos, effect);
}
void Game::addMagicEffect(const SpectatorVec& spectators, const Position& pos, uint8_t effect)
{
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendMagicEffect(pos, effect);
}
}
}
void Game::addDistanceEffect(const Position& fromPos, const Position& toPos, uint8_t effect)
{
SpectatorVec spectators, toPosSpectators;
map.getSpectators(spectators, fromPos, false, true);
map.getSpectators(toPosSpectators, toPos, false, true);
spectators.addSpectators(toPosSpectators);
addDistanceEffect(spectators, fromPos, toPos, effect);
}
void Game::addDistanceEffect(const SpectatorVec& spectators, const Position& fromPos, const Position& toPos, uint8_t effect)
{
for (Creature* spectator : spectators) {
if (Player* tmpPlayer = spectator->getPlayer()) {
tmpPlayer->sendDistanceShoot(fromPos, toPos, effect);
}
}
}
void Game::setAccountStorageValue(const uint32_t accountId, const uint32_t key, const int32_t value)
{
if (value == -1) {
accountStorageMap[accountId].erase(key);
return;
}
accountStorageMap[accountId][key] = value;
}
int32_t Game::getAccountStorageValue(const uint32_t accountId, const uint32_t key) const
{
const auto& accountMapIt = accountStorageMap.find(accountId);
if (accountMapIt != accountStorageMap.end()) {
const auto& storageMapIt = accountMapIt->second.find(key);
if (storageMapIt != accountMapIt->second.end()) {
return storageMapIt->second;
}
}
return -1;
}
void Game::loadAccountStorageValues()
{
Database& db = Database::getInstance();
DBResult_ptr result;
if ((result = db.storeQuery("SELECT `account_id`, `key`, `value` FROM `account_storage`"))) {
do {
g_game.setAccountStorageValue(result->getNumber<uint32_t>("account_id"), result->getNumber<uint32_t>("key"), result->getNumber<int32_t>("value"));
} while (result->next());
}
}
bool Game::saveAccountStorageValues() const
{
DBTransaction transaction;
Database& db = Database::getInstance();
if (!transaction.begin()) {
return false;
}
if (!db.executeQuery("DELETE FROM `account_storage`")) {
return false;
}
for (const auto& accountIt : g_game.accountStorageMap) {
if (accountIt.second.empty()) {
break;
}
DBInsert accountStorageQuery("INSERT INTO `account_storage` (`account_id`, `key`, `value`) VALUES");
for (const auto& storageIt : accountIt.second) {
if (!accountStorageQuery.addRow(fmt::format("{:d}, {:d}, {:d}", accountIt.first, storageIt.first, storageIt.second))) {
return false;
}
}
if (!accountStorageQuery.execute()) {
return false;
}
}
return transaction.commit();
}
void Game::startDecay(Item* item)
{
if (!item || !item->canDecay()) {
return;
}
ItemDecayState_t decayState = item->getDecaying();
if (decayState == DECAYING_TRUE) {
return;
}
if (item->getDuration() > 0) {
item->incrementReferenceCounter();
item->setDecaying(DECAYING_TRUE);
toDecayItems.push_front(item);
} else {
internalDecayItem(item);
}
}
void Game::internalDecayItem(Item* item)
{
const ItemType& it = Item::items[item->getID()];
if (it.decayTo != 0) {
Item* newItem = transformItem(item, item->getDecayTo());
startDecay(newItem);
} else {
ReturnValue ret = internalRemoveItem(item);
if (ret != RETURNVALUE_NOERROR) {
std::cout << "[Debug - Game::internalDecayItem] internalDecayItem failed, error code: " << static_cast<uint32_t>(ret) << ", item id: " << item->getID() << std::endl;
}
}
}
void Game::checkDecay()
{
g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this)));
size_t bucket = (lastBucket + 1) % EVENT_DECAY_BUCKETS;
auto it = decayItems[bucket].begin(), end = decayItems[bucket].end();
while (it != end) {
Item* item = *it;
if (!item->canDecay()) {
item->setDecaying(DECAYING_FALSE);
ReleaseItem(item);
it = decayItems[bucket].erase(it);
continue;
}
int32_t duration = item->getDuration();
int32_t decreaseTime = std::min<int32_t>(EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS, duration);
duration -= decreaseTime;
item->decreaseDuration(decreaseTime);
if (duration <= 0) {
it = decayItems[bucket].erase(it);
internalDecayItem(item);
ReleaseItem(item);
} else if (duration < EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) {
it = decayItems[bucket].erase(it);
size_t newBucket = (bucket + ((duration + EVENT_DECAYINTERVAL / 2) / 1000)) % EVENT_DECAY_BUCKETS;
if (newBucket == bucket) {
internalDecayItem(item);
ReleaseItem(item);
} else {
decayItems[newBucket].push_back(item);
}
} else {
++it;
}
}
lastBucket = bucket;
cleanup();
}
void Game::checkLight()
{
g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this)));
updateWorldLightLevel();
LightInfo lightInfo = getWorldLightInfo();
for (const auto& it : players) {
it.second->sendWorldLight(lightInfo);
}
}
void Game::updateWorldLightLevel()
{
if (getWorldTime() >= GAME_SUNRISE && getWorldTime() <= GAME_DAYTIME) {
lightLevel = ((GAME_DAYTIME - GAME_SUNRISE) - (GAME_DAYTIME - getWorldTime())) * float(LIGHT_CHANGE_SUNRISE) + LIGHT_NIGHT;
} else if (getWorldTime() >= GAME_SUNSET && getWorldTime() <= GAME_NIGHTTIME) {
lightLevel = LIGHT_DAY - ((getWorldTime() - GAME_SUNSET) * float(LIGHT_CHANGE_SUNSET));
} else if (getWorldTime() >= GAME_NIGHTTIME || getWorldTime() < GAME_SUNRISE) {
lightLevel = LIGHT_NIGHT;
} else {
lightLevel = LIGHT_DAY;
}
}
void Game::updateWorldTime()
{
g_scheduler.addEvent(createSchedulerTask(EVENT_WORLDTIMEINTERVAL, std::bind(&Game::updateWorldTime, this)));
time_t osTime = time(nullptr);
tm* timeInfo = localtime(&osTime);
worldTime = (timeInfo->tm_sec + (timeInfo->tm_min * 60)) / 2.5f;
}
void Game::shutdown()
{
std::cout << "Shutting down..." << std::flush;
g_scheduler.shutdown();
g_databaseTasks.shutdown();
g_dispatcher.shutdown();
map.spawns.clear();
raids.clear();
cleanup();
if (serviceManager) {
serviceManager->stop();
}
ConnectionManager::getInstance().closeAll();
std::cout << " done!" << std::endl;
}
void Game::cleanup()
{
//free memory
for (auto creature : ToReleaseCreatures) {
creature->decrementReferenceCounter();
}
ToReleaseCreatures.clear();
for (auto item : ToReleaseItems) {
item->decrementReferenceCounter();
}
ToReleaseItems.clear();
for (Item* item : toDecayItems) {
const uint32_t dur = item->getDuration();
if (dur >= EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) {
decayItems[lastBucket].push_back(item);
} else {
decayItems[(lastBucket + 1 + dur / 1000) % EVENT_DECAY_BUCKETS].push_back(item);
}
}
toDecayItems.clear();
}
void Game::ReleaseCreature(Creature* creature)
{
ToReleaseCreatures.push_back(creature);
}
void Game::ReleaseItem(Item* item)
{
ToReleaseItems.push_back(item);
}
void Game::broadcastMessage(const std::string& text, MessageClasses type) const
{
std::cout << "> Broadcasted message: \"" << text << "\"." << std::endl;
for (const auto& it : players) {
it.second->sendTextMessage(type, text);
}
}
void Game::updateCreatureWalkthrough(const Creature* creature)
{
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
Player* tmpPlayer = spectator->getPlayer();
tmpPlayer->sendCreatureWalkthrough(creature, tmpPlayer->canWalkthroughEx(creature));
}
}
void Game::updateCreatureSkull(const Creature* creature)
{
if (getWorldType() != WORLD_TYPE_PVP) {
return;
}
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureSkull(creature);
}
}
void Game::updatePlayerShield(Player* player)
{
SpectatorVec spectators;
map.getSpectators(spectators, player->getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureShield(player);
}
}
void Game::updatePlayerHelpers(const Player& player)
{
uint32_t creatureId = player.getID();
uint16_t helpers = player.getHelpers();
SpectatorVec spectators;
map.getSpectators(spectators, player.getPosition(), true, true);
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureHelpers(creatureId, helpers);
}
}
void Game::updateCreatureType(Creature* creature)
{
const Player* masterPlayer = nullptr;
uint32_t creatureId = creature->getID();
CreatureType_t creatureType = creature->getType();
if (creatureType == CREATURETYPE_MONSTER) {
const Creature* master = creature->getMaster();
if (master) {
masterPlayer = master->getPlayer();
if (masterPlayer) {
creatureType = CREATURETYPE_SUMMON_OTHERS;
}
}
}
//send to clients
SpectatorVec spectators;
map.getSpectators(spectators, creature->getPosition(), true, true);
if (creatureType == CREATURETYPE_SUMMON_OTHERS) {
for (Creature* spectator : spectators) {
Player* player = spectator->getPlayer();
if (masterPlayer == player) {
player->sendCreatureType(creatureId, CREATURETYPE_SUMMON_OWN);
} else {
player->sendCreatureType(creatureId, creatureType);
}
}
} else {
for (Creature* spectator : spectators) {
spectator->getPlayer()->sendCreatureType(creatureId, creatureType);
}
}
}
void Game::loadMotdNum()
{
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_num'");
if (result) {
motdNum = result->getNumber<uint32_t>("value");
} else {
db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_num', '0')");
}
result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_hash'");
if (result) {
motdHash = result->getString("value");
if (motdHash != transformToSHA1(g_config.getString(ConfigManager::MOTD))) {
++motdNum;
}
} else {
db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_hash', '')");
}
}
void Game::saveMotdNum() const
{
Database& db = Database::getInstance();
db.executeQuery(fmt::format("UPDATE `server_config` SET `value` = '{:d}' WHERE `config` = 'motd_num'", motdNum));
db.executeQuery(fmt::format("UPDATE `server_config` SET `value` = '{:s}' WHERE `config` = 'motd_hash'", transformToSHA1(g_config.getString(ConfigManager::MOTD))));
}
void Game::checkPlayersRecord()
{
const size_t playersOnline = getPlayersOnline();
if (playersOnline > playersRecord) {
uint32_t previousRecord = playersRecord;
playersRecord = playersOnline;
for (auto& it : g_globalEvents->getEventMap(GLOBALEVENT_RECORD)) {
it.second.executeRecord(playersRecord, previousRecord);
}
updatePlayersRecord();
}
}
void Game::updatePlayersRecord() const
{
Database& db = Database::getInstance();
db.executeQuery(fmt::format("UPDATE `server_config` SET `value` = '{:d}' WHERE `config` = 'players_record'", playersRecord));
}
void Game::loadPlayersRecord()
{
Database& db = Database::getInstance();
DBResult_ptr result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'players_record'");
if (result) {
playersRecord = result->getNumber<uint32_t>("value");
} else {
db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('players_record', '0')");
}
}
void Game::playerInviteToParty(uint32_t playerId, uint32_t invitedId)
{
if (playerId == invitedId) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* invitedPlayer = getPlayerByID(invitedId);
if (!invitedPlayer || invitedPlayer->isInviting(player)) {
return;
}
if (invitedPlayer->getParty()) {
player->sendTextMessage(MESSAGE_INFO_DESCR, fmt::format("{:s} is already in a party.", invitedPlayer->getName()));
return;
}
Party* party = player->getParty();
if (!party) {
party = new Party(player);
} else if (party->getLeader() != player) {
return;
}
party->invitePlayer(*invitedPlayer);
}
void Game::playerJoinParty(uint32_t playerId, uint32_t leaderId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Player* leader = getPlayerByID(leaderId);
if (!leader || !leader->isInviting(player)) {
return;
}
Party* party = leader->getParty();
if (!party || party->getLeader() != leader) {
return;
}
if (player->getParty()) {
player->sendTextMessage(MESSAGE_INFO_DESCR, "You are already in a party.");
return;
}
party->joinParty(*player);
}
void Game::playerRevokePartyInvitation(uint32_t playerId, uint32_t invitedId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || party->getLeader() != player) {
return;
}
Player* invitedPlayer = getPlayerByID(invitedId);
if (!invitedPlayer || !player->isInviting(invitedPlayer)) {
return;
}
party->revokeInvitation(*invitedPlayer);
}
void Game::playerPassPartyLeadership(uint32_t playerId, uint32_t newLeaderId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || party->getLeader() != player) {
return;
}
Player* newLeader = getPlayerByID(newLeaderId);
if (!newLeader || !player->isPartner(newLeader)) {
return;
}
party->passPartyLeadership(newLeader);
}
void Game::playerLeaveParty(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || player->hasCondition(CONDITION_INFIGHT)) {
return;
}
party->leaveParty(player);
}
void Game::playerEnableSharedPartyExperience(uint32_t playerId, bool sharedExpActive)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Party* party = player->getParty();
if (!party || (player->hasCondition(CONDITION_INFIGHT) && player->getZone() != ZONE_PROTECTION)) {
return;
}
party->setSharedExperience(player, sharedExpActive);
}
void Game::sendGuildMotd(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
Guild* guild = player->getGuild();
if (guild) {
player->sendChannelMessage("Message of the Day", guild->getMotd(), TALKTYPE_CHANNEL_R1, CHANNEL_GUILD);
}
}
void Game::kickPlayer(uint32_t playerId, bool displayEffect)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->kickPlayer(displayEffect);
}
void Game::playerReportRuleViolation(uint32_t playerId, const std::string& targetName, uint8_t reportType, uint8_t reportReason, const std::string& comment, const std::string& translation)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
g_events->eventPlayerOnReportRuleViolation(player, targetName, reportType, reportReason, comment, translation);
}
void Game::playerReportBug(uint32_t playerId, const std::string& message, const Position& position, uint8_t category)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
g_events->eventPlayerOnReportBug(player, message, position, category);
}
void Game::playerDebugAssert(uint32_t playerId, const std::string& assertLine, const std::string& date, const std::string& description, const std::string& comment)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
// TODO: move debug assertions to database
FILE* file = fopen("client_assertions.txt", "a");
if (file) {
fprintf(file, "----- %s - %s (%s) -----\n", formatDate(time(nullptr)).c_str(), player->getName().c_str(), convertIPToString(player->getIP()).c_str());
fprintf(file, "%s\n%s\n%s\n%s\n", assertLine.c_str(), date.c_str(), description.c_str(), comment.c_str());
fclose(file);
}
}
void Game::playerLeaveMarket(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
player->setInMarket(false);
}
void Game::playerBrowseMarket(uint32_t playerId, uint16_t spriteId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(spriteId);
if (it.id == 0) {
return;
}
if (it.wareId == 0) {
return;
}
const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id);
const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id);
player->sendMarketBrowseItem(it.id, buyOffers, sellOffers);
player->sendMarketDetail(it.id);
}
void Game::playerBrowseMarketOwnOffers(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const MarketOfferList& buyOffers = IOMarket::getOwnOffers(MARKETACTION_BUY, player->getGUID());
const MarketOfferList& sellOffers = IOMarket::getOwnOffers(MARKETACTION_SELL, player->getGUID());
player->sendMarketBrowseOwnOffers(buyOffers, sellOffers);
}
void Game::playerBrowseMarketOwnHistory(uint32_t playerId)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
const HistoryMarketOfferList& buyOffers = IOMarket::getOwnHistory(MARKETACTION_BUY, player->getGUID());
const HistoryMarketOfferList& sellOffers = IOMarket::getOwnHistory(MARKETACTION_SELL, player->getGUID());
player->sendMarketBrowseOwnHistory(buyOffers, sellOffers);
}
void Game::playerCreateMarketOffer(uint32_t playerId, uint8_t type, uint16_t spriteId, uint16_t amount, uint32_t price, bool anonymous)
{
if (amount == 0 || amount > 64000) {
return;
}
if (price == 0 || price > 999999999) {
return;
}
if (type != MARKETACTION_BUY && type != MARKETACTION_SELL) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
if (g_config.getBoolean(ConfigManager::MARKET_PREMIUM) && !player->isPremium()) {
player->sendMarketLeave();
return;
}
const ItemType& itt = Item::items.getItemIdByClientId(spriteId);
if (itt.id == 0 || itt.wareId == 0) {
return;
}
const ItemType& it = Item::items.getItemIdByClientId(itt.wareId);
if (it.id == 0 || it.wareId == 0) {
return;
}
if (!it.stackable && amount > 2000) {
return;
}
const uint32_t maxOfferCount = g_config.getNumber(ConfigManager::MAX_MARKET_OFFERS_AT_A_TIME_PER_PLAYER);
if (maxOfferCount != 0 && IOMarket::getPlayerOfferCount(player->getGUID()) >= maxOfferCount) {
return;
}
uint64_t fee = (price / 100.) * amount;
if (fee < 20) {
fee = 20;
} else if (fee > 1000) {
fee = 1000;
}
if (type == MARKETACTION_SELL) {
if (fee > (player->getMoney() + player->bankBalance)) {
return;
}
DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false);
if (!depotChest) {
return;
}
std::forward_list<Item*> itemList = getMarketItemList(it.wareId, amount, depotChest, player->getInbox());
if (itemList.empty()) {
return;
}
if (it.stackable) {
uint16_t tmpAmount = amount;
for (Item* item : itemList) {
uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount());
tmpAmount -= removeCount;
internalRemoveItem(item, removeCount);
if (tmpAmount == 0) {
break;
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
const auto debitCash = std::min(player->getMoney(), fee);
const auto debitBank = fee - debitCash;
removeMoney(player, debitCash);
player->bankBalance -= debitBank;
} else {
uint64_t totalPrice = static_cast<uint64_t>(price) * amount;
totalPrice += fee;
if (totalPrice > (player->getMoney() + player->bankBalance)) {
return;
}
const auto debitCash = std::min(player->getMoney(), totalPrice);
const auto debitBank = totalPrice - debitCash;
removeMoney(player, debitCash);
player->bankBalance -= debitBank;
}
IOMarket::createOffer(player->getGUID(), static_cast<MarketAction_t>(type), it.id, amount, price, anonymous);
player->sendMarketEnter(player->getLastDepotId());
const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id);
const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id);
player->sendMarketBrowseItem(it.id, buyOffers, sellOffers);
}
void Game::playerCancelMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter);
if (offer.id == 0 || offer.playerId != player->getGUID()) {
return;
}
if (offer.type == MARKETACTION_BUY) {
player->bankBalance += static_cast<uint64_t>(offer.price) * offer.amount;
player->sendMarketEnter(player->getLastDepotId());
} else {
const ItemType& it = Item::items[offer.itemId];
if (it.id == 0) {
return;
}
if (it.stackable) {
uint16_t tmpAmount = offer.amount;
while (tmpAmount > 0) {
int32_t stackCount = std::min<int32_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < offer.amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
}
}
}
IOMarket::moveOfferToHistory(offer.id, OFFERSTATE_CANCELLED);
offer.amount = 0;
offer.timestamp += g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION);
player->sendMarketCancelOffer(offer);
player->sendMarketEnter(player->getLastDepotId());
}
void Game::playerAcceptMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter, uint16_t amount)
{
if (amount == 0 || amount > 64000) {
return;
}
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->isInMarket()) {
return;
}
MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter);
if (offer.id == 0) {
return;
}
uint32_t offerAccountId = IOLoginData::getAccountIdByPlayerId(offer.playerId);
if (offerAccountId == player->getAccount()) {
return;
}
if (amount > offer.amount) {
return;
}
const ItemType& it = Item::items[offer.itemId];
if (it.id == 0) {
return;
}
uint64_t totalPrice = static_cast<uint64_t>(offer.price) * amount;
if (offer.type == MARKETACTION_BUY) {
DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false);
if (!depotChest) {
return;
}
std::forward_list<Item*> itemList = getMarketItemList(it.wareId, amount, depotChest, player->getInbox());
if (itemList.empty()) {
return;
}
Player* buyerPlayer = getPlayerByGUID(offer.playerId);
if (!buyerPlayer) {
buyerPlayer = new Player(nullptr);
if (!IOLoginData::loadPlayerById(buyerPlayer, offer.playerId)) {
delete buyerPlayer;
return;
}
}
if (it.stackable) {
uint16_t tmpAmount = amount;
for (Item* item : itemList) {
uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount());
tmpAmount -= removeCount;
internalRemoveItem(item, removeCount);
if (tmpAmount == 0) {
break;
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
player->bankBalance += totalPrice;
if (it.stackable) {
uint16_t tmpAmount = amount;
while (tmpAmount > 0) {
uint16_t stackCount = std::min<uint16_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
}
}
if (buyerPlayer->isOffline()) {
IOLoginData::savePlayer(buyerPlayer);
delete buyerPlayer;
} else {
buyerPlayer->onReceiveMail();
}
} else {
if (totalPrice > (player->getMoney() + player->bankBalance)) {
return;
}
const auto debitCash = std::min(player->getMoney(), totalPrice);
const auto debitBank = totalPrice - debitCash;
removeMoney(player, debitCash);
player->bankBalance -= debitBank;
if (it.stackable) {
uint16_t tmpAmount = amount;
while (tmpAmount > 0) {
uint16_t stackCount = std::min<uint16_t>(100, tmpAmount);
Item* item = Item::CreateItem(it.id, stackCount);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
tmpAmount -= stackCount;
}
} else {
int32_t subType;
if (it.charges != 0) {
subType = it.charges;
} else {
subType = -1;
}
for (uint16_t i = 0; i < amount; ++i) {
Item* item = Item::CreateItem(it.id, subType);
if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) {
delete item;
break;
}
}
}
Player* sellerPlayer = getPlayerByGUID(offer.playerId);
if (sellerPlayer) {
sellerPlayer->bankBalance += totalPrice;
} else {
IOLoginData::increaseBankBalance(offer.playerId, totalPrice);
}
player->onReceiveMail();
}
const int32_t marketOfferDuration = g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION);
IOMarket::appendHistory(player->getGUID(), (offer.type == MARKETACTION_BUY ? MARKETACTION_SELL : MARKETACTION_BUY), offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTEDEX);
IOMarket::appendHistory(offer.playerId, offer.type, offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTED);
offer.amount -= amount;
if (offer.amount == 0) {
IOMarket::deleteOffer(offer.id);
} else {
IOMarket::acceptOffer(offer.id, amount);
}
player->sendMarketEnter(player->getLastDepotId());
offer.timestamp += marketOfferDuration;
player->sendMarketAcceptOffer(offer);
}
void Game::parsePlayerExtendedOpcode(uint32_t playerId, uint8_t opcode, const std::string& buffer)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
for (CreatureEvent* creatureEvent : player->getCreatureEvents(CREATURE_EVENT_EXTENDED_OPCODE)) {
creatureEvent->executeExtendedOpcode(player, opcode, buffer);
}
}
std::forward_list<Item*> Game::getMarketItemList(uint16_t wareId, uint16_t sufficientCount, DepotChest* depotChest, Inbox* inbox)
{
std::forward_list<Item*> itemList;
uint16_t count = 0;
std::list<Container*> containers { depotChest, inbox };
do {
Container* container = containers.front();
containers.pop_front();
for (Item* item : container->getItemList()) {
Container* c = item->getContainer();
if (c && !c->empty()) {
containers.push_back(c);
continue;
}
const ItemType& itemType = Item::items[item->getID()];
if (itemType.wareId != wareId) {
continue;
}
if (c && (!itemType.isContainer() || c->capacity() != itemType.maxItems)) {
continue;
}
if (!item->hasMarketAttributes()) {
continue;
}
itemList.push_front(item);
count += Item::countByType(item, -1);
if (count >= sufficientCount) {
return itemList;
}
}
} while (!containers.empty());
return std::forward_list<Item*>();
}
void Game::forceAddCondition(uint32_t creatureId, Condition* condition)
{
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
delete condition;
return;
}
creature->addCondition(condition, true);
}
void Game::forceRemoveCondition(uint32_t creatureId, ConditionType_t type)
{
Creature* creature = getCreatureByID(creatureId);
if (!creature) {
return;
}
creature->removeCondition(type, true);
}
void Game::sendOfflineTrainingDialog(Player* player)
{
if (!player) {
return;
}
if (!player->hasModalWindowOpen(offlineTrainingWindow.id)) {
player->sendModalWindow(offlineTrainingWindow);
}
}
void Game::playerAnswerModalWindow(uint32_t playerId, uint32_t modalWindowId, uint8_t button, uint8_t choice)
{
Player* player = getPlayerByID(playerId);
if (!player) {
return;
}
if (!player->hasModalWindowOpen(modalWindowId)) {
return;
}
player->onModalWindowHandled(modalWindowId);
// offline training, hard-coded
if (modalWindowId == std::numeric_limits<uint32_t>::max()) {
if (button == offlineTrainingWindow.defaultEnterButton) {
if (choice == SKILL_SWORD || choice == SKILL_AXE || choice == SKILL_CLUB || choice == SKILL_DISTANCE || choice == SKILL_MAGLEVEL) {
BedItem* bedItem = player->getBedItem();
if (bedItem && bedItem->sleep(player)) {
player->setOfflineTrainingSkill(choice);
return;
}
}
} else {
player->sendTextMessage(MESSAGE_EVENT_ADVANCE, "Offline training aborted.");
}
player->setBedItem(nullptr);
} else {
for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_MODALWINDOW)) {
creatureEvent->executeModalWindow(player, modalWindowId, button, choice);
}
}
}
void Game::addPlayer(Player* player)
{
const std::string& lowercase_name = asLowerCaseString(player->getName());
mappedPlayerNames[lowercase_name] = player;
mappedPlayerGuids[player->getGUID()] = player;
wildcardTree.insert(lowercase_name);
players[player->getID()] = player;
}
void Game::removePlayer(Player* player)
{
const std::string& lowercase_name = asLowerCaseString(player->getName());
mappedPlayerNames.erase(lowercase_name);
mappedPlayerGuids.erase(player->getGUID());
wildcardTree.remove(lowercase_name);
players.erase(player->getID());
}
void Game::addNpc(Npc* npc)
{
npcs[npc->getID()] = npc;
}
void Game::removeNpc(Npc* npc)
{
npcs.erase(npc->getID());
}
void Game::addMonster(Monster* monster)
{
monsters[monster->getID()] = monster;
}
void Game::removeMonster(Monster* monster)
{
monsters.erase(monster->getID());
}
Guild* Game::getGuild(uint32_t id) const
{
auto it = guilds.find(id);
if (it == guilds.end()) {
return nullptr;
}
return it->second;
}
void Game::addGuild(Guild* guild)
{
guilds[guild->getId()] = guild;
}
void Game::removeGuild(uint32_t guildId)
{
guilds.erase(guildId);
}
void Game::decreaseBrowseFieldRef(const Position& pos)
{
Tile* tile = map.getTile(pos.x, pos.y, pos.z);
if (!tile) {
return;
}
auto it = browseFields.find(tile);
if (it != browseFields.end()) {
it->second->decrementReferenceCounter();
}
}
void Game::internalRemoveItems(std::vector<Item*> itemList, uint32_t amount, bool stackable)
{
if (stackable) {
for (Item* item : itemList) {
if (item->getItemCount() > amount) {
internalRemoveItem(item, amount);
break;
} else {
amount -= item->getItemCount();
internalRemoveItem(item);
}
}
} else {
for (Item* item : itemList) {
internalRemoveItem(item);
}
}
}
BedItem* Game::getBedBySleeper(uint32_t guid) const
{
auto it = bedSleepersMap.find(guid);
if (it == bedSleepersMap.end()) {
return nullptr;
}
return it->second;
}
void Game::setBedSleeper(BedItem* bed, uint32_t guid)
{
bedSleepersMap[guid] = bed;
}
void Game::removeBedSleeper(uint32_t guid)
{
auto it = bedSleepersMap.find(guid);
if (it != bedSleepersMap.end()) {
bedSleepersMap.erase(it);
}
}
Item* Game::getUniqueItem(uint16_t uniqueId)
{
auto it = uniqueItems.find(uniqueId);
if (it == uniqueItems.end()) {
return nullptr;
}
return it->second;
}
bool Game::addUniqueItem(uint16_t uniqueId, Item* item)
{
auto result = uniqueItems.emplace(uniqueId, item);
if (!result.second) {
std::cout << "Duplicate unique id: " << uniqueId << std::endl;
}
return result.second;
}
void Game::removeUniqueItem(uint16_t uniqueId)
{
auto it = uniqueItems.find(uniqueId);
if (it != uniqueItems.end()) {
uniqueItems.erase(it);
}
}
bool Game::reload(ReloadTypes_t reloadType)
{
switch (reloadType) {
case RELOAD_TYPE_ACTIONS: return g_actions->reload();
case RELOAD_TYPE_CHAT: return g_chat->load();
case RELOAD_TYPE_CONFIG: return g_config.reload();
case RELOAD_TYPE_CREATURESCRIPTS: {
g_creatureEvents->reload();
g_creatureEvents->removeInvalidEvents();
return true;
}
case RELOAD_TYPE_EVENTS: return g_events->load();
case RELOAD_TYPE_GLOBALEVENTS: return g_globalEvents->reload();
case RELOAD_TYPE_ITEMS: return Item::items.reload();
case RELOAD_TYPE_MONSTERS: return g_monsters.reload();
case RELOAD_TYPE_MOUNTS: return mounts.reload();
case RELOAD_TYPE_MOVEMENTS: return g_moveEvents->reload();
case RELOAD_TYPE_NPCS: {
Npcs::reload();
return true;
}
case RELOAD_TYPE_QUESTS: return quests.reload();
case RELOAD_TYPE_RAIDS: return raids.reload() && raids.startup();
case RELOAD_TYPE_SPELLS: {
if (!g_spells->reload()) {
std::cout << "[Error - Game::reload] Failed to reload spells." << std::endl;
std::terminate();
} else if (!g_monsters.reload()) {
std::cout << "[Error - Game::reload] Failed to reload monsters." << std::endl;
std::terminate();
}
return true;
}
case RELOAD_TYPE_TALKACTIONS: return g_talkActions->reload();
case RELOAD_TYPE_WEAPONS: {
bool results = g_weapons->reload();
g_weapons->loadDefaults();
return results;
}
case RELOAD_TYPE_SCRIPTS: {
// commented out stuff is TODO, once we approach further in revscriptsys
g_actions->clear(true);
g_creatureEvents->clear(true);
g_moveEvents->clear(true);
g_talkActions->clear(true);
g_globalEvents->clear(true);
g_weapons->clear(true);
g_weapons->loadDefaults();
g_spells->clear(true);
g_scripts->loadScripts("scripts", false, true);
g_creatureEvents->removeInvalidEvents();
/*
Npcs::reload();
raids.reload() && raids.startup();
Item::items.reload();
quests.reload();
mounts.reload();
g_config.reload();
g_events->load();
g_chat->load();
*/
return true;
}
default: {
if (!g_spells->reload()) {
std::cout << "[Error - Game::reload] Failed to reload spells." << std::endl;
std::terminate();
} else if (!g_monsters.reload()) {
std::cout << "[Error - Game::reload] Failed to reload monsters." << std::endl;
std::terminate();
}
g_actions->reload();
g_config.reload();
g_creatureEvents->reload();
g_monsters.reload();
g_moveEvents->reload();
Npcs::reload();
raids.reload() && raids.startup();
g_talkActions->reload();
Item::items.reload();
g_weapons->reload();
g_weapons->clear(true);
g_weapons->loadDefaults();
quests.reload();
mounts.reload();
g_globalEvents->reload();
g_events->load();
g_chat->load();
g_actions->clear(true);
g_creatureEvents->clear(true);
g_moveEvents->clear(true);
g_talkActions->clear(true);
g_globalEvents->clear(true);
g_spells->clear(true);
g_scripts->loadScripts("scripts", false, true);
g_creatureEvents->removeInvalidEvents();
return true;
}
}
return true;
}
| 1 | 19,068 | Why this change? It will now only show on the bottom of the screen as white text, is it correct behaviour? | otland-forgottenserver | cpp |
@@ -46,7 +46,7 @@ module Blacklight
fields = Array.wrap(view_config.title_field) + [configuration.document_model.unique_key]
f = fields.lazy.map { |field| field_config(field) }.detect { |field_config| field_presenter(field_config).any? }
- field_value(f, except_operations: [Rendering::HelperMethod])
+ f ? field_value(f, except_operations: [Rendering::HelperMethod]) : ""
end
def display_type(base_name = nil, default: nil) | 1 | # frozen_string_literal: true
module Blacklight
# An abstract class that the view presenters for SolrDocuments descend from
class DocumentPresenter
attr_reader :document, :configuration, :view_context
class_attribute :thumbnail_presenter
self.thumbnail_presenter = ThumbnailPresenter
# @param [SolrDocument] document
# @param [ActionView::Base] view_context scope for linking and generating urls
# @param [Blacklight::Configuration] configuration
def initialize(document, view_context, configuration = view_context.blacklight_config)
@document = document
@view_context = view_context
@configuration = configuration
end
# @return [Hash<String,Configuration::Field>] all the fields for this index view that should be rendered
def fields_to_render
return to_enum(:fields_to_render) unless block_given?
fields.each do |name, field_config|
field_presenter = field_presenter(field_config)
next unless field_presenter.render_field? && field_presenter.any?
yield name, field_config, field_presenter
end
end
def field_presenters
return to_enum(:field_presenters) unless block_given?
fields_to_render.each { |_, _, config| yield config }
end
##
# Get the value of the document's "title" field, or a placeholder
# value (if empty)
#
# @return [String]
def heading
return field_value(view_config.title_field) if view_config.title_field.is_a? Blacklight::Configuration::Field
fields = Array.wrap(view_config.title_field) + [configuration.document_model.unique_key]
f = fields.lazy.map { |field| field_config(field) }.detect { |field_config| field_presenter(field_config).any? }
field_value(f, except_operations: [Rendering::HelperMethod])
end
def display_type(base_name = nil, default: nil)
fields = []
fields += Array.wrap(view_config[:"#{base_name}_display_type_field"]) if base_name && view_config.key?(:"#{base_name}_display_type_field")
fields += Array.wrap(view_config.display_type_field)
display_type = fields.lazy.map { |field| field_presenter(field_config(field)) }.detect(&:any?)&.values
display_type ||= Array(default) if default
display_type || []
end
##
# Render the field label for a document
#
# Allow an extention point where information in the document
# may drive the value of the field
# @param [Configuration::Field] field_config
# @param [Hash] options
# @option options [String] :value
def field_value field_config, options = {}
field_presenter(field_config, options).render
end
def thumbnail
@thumbnail ||= thumbnail_presenter.new(document, view_context, view_config)
end
private
def render_field?(field_config)
field_presenter(field_config).render_field?
end
deprecation_deprecate render_field?: 'Use FieldPresenter#render_field?'
def has_value?(field_config)
field_presenter(field_config).any?
end
deprecation_deprecate has_value?: 'Use FieldPresenter#any?'
def field_values(field_config, options = {})
field_value(field_config, options)
end
deprecation_deprecate field_values: 'Use #field_value'
def retrieve_values(field_config)
field_presenter(field_config).values
end
deprecation_deprecate retrieve_values: 'Use FieldPresenter#values'
def field_presenter(field_config, options = {})
presenter_class = field_config.presenter || Blacklight::FieldPresenter
presenter_class.new(view_context, document, field_config, options)
end
end
end
| 1 | 8,782 | Does this need to allocate a string or would a nil value (indicating no header) be a better? | projectblacklight-blacklight | rb |
@@ -13,6 +13,7 @@ import { Fragment } from './create-element';
export function Component(props, context) {
this.props = props;
this.context = context;
+ // this.constructor // When component is functional component, this is reseted to functional component
// if (this.state==null) this.state = {};
// this.state = {};
// this._dirty = true; | 1 | import { assign } from './util';
import { diff, commitRoot } from './diff/index';
import options from './options';
import { Fragment } from './create-element';
/**
* Base Component class. Provides `setState()` and `forceUpdate()`, which
* trigger rendering
* @param {object} props The initial component props
* @param {object} context The initial context from parent components'
* getChildContext
*/
export function Component(props, context) {
this.props = props;
this.context = context;
// if (this.state==null) this.state = {};
// this.state = {};
// this._dirty = true;
// this._renderCallbacks = []; // Only class components
// Other properties that Component will have set later,
// shown here as commented out for quick reference
// this.base = null;
// this._context = null;
// this._ancestorComponent = null; // Always set right after instantiation
// this._vnode = null;
// this._nextState = null; // Only class components
// this._prevVNode = null;
// this._processingException = null; // Always read, set only when handling error
// this._constructor = null; // Only functional components, always set right after instantiation
}
/**
* Update component state and schedule a re-render.
* @param {object | ((s: object, p: object) => object)} update A hash of state
* properties to update with new values or a function that given the current
* state and props returns a new partial state
* @param {() => void} [callback] A function to be called once component state is
* updated
*/
Component.prototype.setState = function(update, callback) {
// only clone state when copying to nextState the first time.
let s = (this._nextState!==this.state && this._nextState) || (this._nextState = assign({}, this.state));
// if update() mutates state in-place, skip the copy:
if (typeof update!=='function' || (update = update(s, this.props))) {
assign(s, update);
}
// Skip update if updater function returned null
if (update==null) return;
if (callback) this._renderCallbacks.push(callback);
enqueueRender(this);
};
/**
* Immediately perform a synchronous re-render of the component
* @param {() => void} [callback] A function to be called after component is
* re-renderd
*/
Component.prototype.forceUpdate = function(callback) {
let vnode = this._vnode, dom = this._vnode._dom, parentDom = this._parentDom;
if (parentDom) {
// Set render mode so that we can differantiate where the render request
// is coming from. We need this because forceUpdate should never call
// shouldComponentUpdate
const force = callback!==false;
let mounts = [];
dom = diff(dom, parentDom, vnode, vnode, this._context, parentDom.ownerSVGElement!==undefined, null, mounts, this._ancestorComponent, force);
if (dom!=null && dom.parentNode!==parentDom) {
parentDom.appendChild(dom);
}
commitRoot(mounts, vnode);
}
if (callback) callback();
};
/**
* Accepts `props` and `state`, and returns a new Virtual DOM tree to build.
* Virtual DOM is generally constructed via [JSX](http://jasonformat.com/wtf-is-jsx).
* @param {object} props Props (eg: JSX attributes) received from parent
* element/component
* @param {object} state The component's current state
* @param {object} context Context object, as returned by the nearest
* ancestor's `getChildContext()`
* @returns {import('./index').ComponentChildren | void}
*/
Component.prototype.render = Fragment;
/**
* The render queue
* @type {Array<import('./internal').Component>}
*/
let q = [];
/**
* Asynchronously schedule a callback
* @type {(cb) => void}
*/
const defer = typeof Promise=='function' ? Promise.prototype.then.bind(Promise.resolve()) : setTimeout;
/*
* The value of `Component.debounce` must asynchronously invoke the passed in callback. It is
* important that contributors to Preact can consistenly reason about what calls to `setState`, etc.
* do, and when their effects will be applied. See the links below for some further reading on designing
* asynchronous APIs.
* * [Designing APIs for Asynchrony](https://blog.izs.me/2013/08/designing-apis-for-asynchrony)
* * [Callbacks synchronous and asynchronous](https://blog.ometer.com/2011/07/24/callbacks-synchronous-and-asynchronous/)
*/
/**
* Enqueue a rerender of a component
* @param {import('./internal').Component} c The component to rerender
*/
export function enqueueRender(c) {
if (!c._dirty && (c._dirty = true) && q.push(c) === 1) {
(options.debounceRendering || defer)(process);
}
}
/** Flush the render queue by rerendering all queued components */
function process() {
let p;
while ((p=q.pop())) {
// forceUpdate's callback argument is reused here to indicate a non-forced update.
if (p._dirty) p.forceUpdate(false);
}
}
| 1 | 12,768 | Nit: Past tense of `reset` is also `reset`. | preactjs-preact | js |
@@ -672,6 +672,12 @@ class Commands:
""" return wallet synchronization status """
return self.wallet.is_up_to_date()
+ @command('')
+ def getfee(self):
+ """Return current optimal fee per kilobyte, according to
+ config settings (static/dynamic)"""
+ return self.config.fee_per_kb()
+
@command('')
def help(self):
# for the python console | 1 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import datetime
import copy
import argparse
import json
import ast
import base64
from functools import wraps
from decimal import Decimal
from .import util
from .util import bfh, bh2u, format_satoshis, json_decode
from .import bitcoin
from .bitcoin import is_address, hash_160, COIN, TYPE_ADDRESS
from .i18n import _
from .transaction import Transaction, multisig_script
from .paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .plugins import run_hook
known_commands = {}
def satoshis(amount):
# satoshi conversion must not be performed by the parser
return int(COIN*Decimal(amount)) if amount not in ['!', None] else amount
class Command:
def __init__(self, func, s):
self.name = func.__name__
self.requires_network = 'n' in s
self.requires_wallet = 'w' in s
self.requires_password = 'p' in s
self.description = func.__doc__
self.help = self.description.split('.')[0] if self.description else None
varnames = func.__code__.co_varnames[1:func.__code__.co_argcount]
self.defaults = func.__defaults__
if self.defaults:
n = len(self.defaults)
self.params = list(varnames[:-n])
self.options = list(varnames[-n:])
else:
self.params = list(varnames)
self.options = []
self.defaults = []
def command(s):
def decorator(func):
global known_commands
name = func.__name__
known_commands[name] = Command(func, s)
@wraps(func)
def func_wrapper(*args, **kwargs):
c = known_commands[func.__name__]
wallet = args[0].wallet
password = kwargs.get('password')
if c.requires_wallet and wallet is None:
raise BaseException("wallet not loaded. Use 'electrum daemon load_wallet'")
if c.requires_password and password is None and wallet.storage.get('use_encryption'):
return {'error': 'Password required' }
return func(*args, **kwargs)
return func_wrapper
return decorator
class Commands:
def __init__(self, config, wallet, network, callback = None):
self.config = config
self.wallet = wallet
self.network = network
self._callback = callback
def _run(self, method, args, password_getter):
# this wrapper is called from the python console
cmd = known_commands[method]
if cmd.requires_password and self.wallet.has_password():
password = password_getter()
if password is None:
return
else:
password = None
f = getattr(self, method)
if cmd.requires_password:
result = f(*args, **{'password':password})
else:
result = f(*args)
if self._callback:
self._callback()
return result
@command('')
def commands(self):
"""List of commands"""
return ' '.join(sorted(known_commands.keys()))
@command('')
def create(self, segwit=False):
"""Create a new wallet"""
raise BaseException('Not a JSON-RPC command')
@command('wn')
def restore(self, text):
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys. If you want to be prompted for your
seed, type '?' or ':' (concealed) """
raise BaseException('Not a JSON-RPC command')
@command('wp')
def password(self, password=None, new_password=None):
"""Change wallet password. """
b = self.wallet.storage.is_encrypted()
self.wallet.update_password(password, new_password, b)
self.wallet.storage.write()
return {'password':self.wallet.has_password()}
@command('')
def getconfig(self, key):
"""Return a configuration variable. """
return self.config.get(key)
@command('')
def setconfig(self, key, value):
"""Set a configuration variable. 'value' may be a string or a Python expression."""
if key not in ('rpcuser', 'rpcpassword'):
value = json_decode(value)
self.config.set_key(key, value)
return True
@command('')
def make_seed(self, nbits=132, entropy=1, language=None, segwit=False):
"""Create a seed"""
from .mnemonic import Mnemonic
t = 'segwit' if segwit else 'standard'
s = Mnemonic(language).make_seed(t, nbits, custom_entropy=entropy)
return s
@command('')
def check_seed(self, seed, entropy=1, language=None):
"""Check that a seed was generated with given entropy"""
from .mnemonic import Mnemonic
return Mnemonic(language).check_seed(seed, entropy)
@command('n')
def getaddresshistory(self, address):
"""Return the transaction history of any address. Note: This is a
walletless server query, results are not checked by SPV.
"""
return self.network.synchronous_get(('blockchain.address.get_history', [address]))
@command('w')
def listunspent(self):
"""List unspent outputs. Returns the list of unspent transaction
outputs in your wallet."""
l = copy.deepcopy(self.wallet.get_utxos(exclude_frozen=False))
for i in l:
v = i["value"]
i["value"] = str(Decimal(v)/COIN) if v is not None else None
return l
@command('n')
def getaddressunspent(self, address):
"""Returns the UTXO list of any address. Note: This
is a walletless server query, results are not checked by SPV.
"""
return self.network.synchronous_get(('blockchain.address.listunspent', [address]))
@command('')
def serialize(self, jsontx):
"""Create a transaction from json inputs.
Inputs must have a redeemPubkey.
Outputs must be a list of {'address':address, 'value':satoshi_amount}.
"""
keypairs = {}
inputs = jsontx.get('inputs')
outputs = jsontx.get('outputs')
locktime = jsontx.get('locktime', 0)
for txin in inputs:
if txin.get('output'):
prevout_hash, prevout_n = txin['output'].split(':')
txin['prevout_n'] = int(prevout_n)
txin['prevout_hash'] = prevout_hash
sec = txin.get('privkey')
if sec:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
pubkey = bitcoin.public_key_from_private_key(privkey, compressed)
keypairs[pubkey] = privkey, compressed
txin['type'] = txin_type
txin['x_pubkeys'] = [pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
outputs = [(TYPE_ADDRESS, x['address'], int(x['value'])) for x in outputs]
tx = Transaction.from_io(inputs, outputs, locktime=locktime)
tx.sign(keypairs)
return tx.as_dict()
@command('wp')
def signtransaction(self, tx, privkey=None, password=None):
"""Sign a transaction. The wallet keys will be used unless a private key is provided."""
tx = Transaction(tx)
if privkey:
txin_type, privkey2, compressed = bitcoin.deserialize_privkey(privkey)
pubkey = bitcoin.public_key_from_private_key(privkey2, compressed)
h160 = bitcoin.hash_160(bfh(pubkey))
x_pubkey = 'fd' + bh2u(b'\x00' + h160)
tx.sign({x_pubkey:(privkey2, compressed)})
else:
self.wallet.sign_transaction(tx, password)
return tx.as_dict()
@command('')
def deserialize(self, tx):
"""Deserialize a serialized transaction"""
tx = Transaction(tx)
return tx.deserialize()
@command('n')
def broadcast(self, tx, timeout=30):
"""Broadcast a transaction to the network. """
tx = Transaction(tx)
return self.network.broadcast(tx, timeout)
@command('')
def createmultisig(self, num, pubkeys):
"""Create multisig address"""
assert isinstance(pubkeys, list), (type(num), type(pubkeys))
redeem_script = multisig_script(pubkeys, num)
address = bitcoin.hash160_to_p2sh(hash_160(bfh(redeem_script)))
return {'address':address, 'redeemScript':redeem_script}
@command('w')
def freeze(self, address):
"""Freeze address. Freeze the funds at one of your wallet\'s addresses"""
return self.wallet.set_frozen_state([address], True)
@command('w')
def unfreeze(self, address):
"""Unfreeze address. Unfreeze the funds at one of your wallet\'s address"""
return self.wallet.set_frozen_state([address], False)
@command('wp')
def getprivatekeys(self, address, password=None):
"""Get private keys of addresses. You may pass a single wallet address, or a list of wallet addresses."""
if isinstance(address, str):
address = address.strip()
if is_address(address):
return self.wallet.export_private_key(address, password)[0]
domain = address
return [self.wallet.export_private_key(address, password)[0] for address in domain]
@command('w')
def ismine(self, address):
"""Check if address is in wallet. Return true if and only address is in wallet"""
return self.wallet.is_mine(address)
@command('')
def dumpprivkeys(self):
"""Deprecated."""
return "This command is deprecated. Use a pipe instead: 'electrum listaddresses | electrum getprivatekeys - '"
@command('')
def validateaddress(self, address):
"""Check that an address is valid. """
return is_address(address)
@command('w')
def getpubkeys(self, address):
"""Return the public keys for a wallet address. """
return self.wallet.get_public_keys(address)
@command('w')
def getbalance(self):
"""Return the balance of your wallet. """
c, u, x = self.wallet.get_balance()
out = {"confirmed": str(Decimal(c)/COIN)}
if u:
out["unconfirmed"] = str(Decimal(u)/COIN)
if x:
out["unmatured"] = str(Decimal(x)/COIN)
return out
@command('n')
def getaddressbalance(self, address):
"""Return the balance of any address. Note: This is a walletless
server query, results are not checked by SPV.
"""
out = self.network.synchronous_get(('blockchain.address.get_balance', [address]))
out["confirmed"] = str(Decimal(out["confirmed"])/COIN)
out["unconfirmed"] = str(Decimal(out["unconfirmed"])/COIN)
return out
@command('n')
def getproof(self, address):
"""Get Merkle branch of an address in the UTXO set"""
p = self.network.synchronous_get(('blockchain.address.get_proof', [address]))
out = []
for i,s in p:
out.append(i)
return out
@command('n')
def getmerkle(self, txid, height):
"""Get Merkle branch of a transaction included in a block. Electrum
uses this to verify transactions (Simple Payment Verification)."""
return self.network.synchronous_get(('blockchain.transaction.get_merkle', [txid, int(height)]))
@command('n')
def getservers(self):
"""Return the list of available servers"""
return self.network.get_servers()
@command('')
def version(self):
"""Return the version of electrum."""
from .version import ELECTRUM_VERSION
return ELECTRUM_VERSION
@command('w')
def getmpk(self):
"""Get master public key. Return your wallet\'s master public key"""
return self.wallet.get_master_public_key()
@command('wp')
def getmasterprivate(self, password=None):
"""Get master private key. Return your wallet\'s master private key"""
return str(self.wallet.keystore.get_master_private_key(password))
@command('wp')
def getseed(self, password=None):
"""Get seed phrase. Print the generation seed of your wallet."""
s = self.wallet.get_seed(password)
return s
@command('wp')
def importprivkey(self, privkey, password=None):
"""Import a private key."""
if not self.wallet.can_import_privkey():
return "Error: This type of wallet cannot import private keys. Try to create a new wallet with that key."
try:
addr = self.wallet.import_private_key(privkey, password)
out = "Keypair imported: " + addr
except BaseException as e:
out = "Error: " + str(e)
return out
def _resolver(self, x):
if x is None:
return None
out = self.wallet.contacts.resolve(x)
if out.get('type') == 'openalias' and self.nocheck is False and out.get('validated') is False:
raise BaseException('cannot verify alias', x)
return out['address']
@command('n')
def sweep(self, privkey, destination, fee=None, nocheck=False, imax=100):
"""Sweep private keys. Returns a transaction that spends UTXOs from
privkey to a destination address. The transaction is not
broadcasted."""
from .wallet import sweep
tx_fee = satoshis(fee)
privkeys = privkey.split()
self.nocheck = nocheck
#dest = self._resolver(destination)
tx = sweep(privkeys, self.network, self.config, destination, tx_fee, imax)
return tx.as_dict() if tx else None
@command('wp')
def signmessage(self, address, message, password=None):
"""Sign a message with a key. Use quotes if your message contains
whitespaces"""
sig = self.wallet.sign_message(address, message, password)
return base64.b64encode(sig).decode('ascii')
@command('')
def verifymessage(self, address, signature, message):
"""Verify a signature."""
sig = base64.b64decode(signature)
message = util.to_bytes(message)
return bitcoin.verify_message(address, sig, message)
def _mktx(self, outputs, fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime=None):
self.nocheck = nocheck
change_addr = self._resolver(change_addr)
domain = None if domain is None else map(self._resolver, domain)
final_outputs = []
for address, amount in outputs:
address = self._resolver(address)
amount = satoshis(amount)
final_outputs.append((TYPE_ADDRESS, address, amount))
coins = self.wallet.get_spendable_coins(domain, self.config)
tx = self.wallet.make_unsigned_transaction(coins, final_outputs, self.config, fee, change_addr)
if locktime != None:
tx.locktime = locktime
if rbf:
tx.set_rbf(True)
if not unsigned:
run_hook('sign_tx', self.wallet, tx)
self.wallet.sign_transaction(tx, password)
return tx
@command('wp')
def payto(self, destination, amount, fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=False, password=None, locktime=None):
"""Create a transaction. """
tx_fee = satoshis(fee)
domain = from_addr.split(',') if from_addr else None
tx = self._mktx([(destination, amount)], tx_fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime)
return tx.as_dict()
@command('wp')
def paytomany(self, outputs, fee=None, from_addr=None, change_addr=None, nocheck=False, unsigned=False, rbf=False, password=None, locktime=None):
"""Create a multi-output transaction. """
tx_fee = satoshis(fee)
domain = from_addr.split(',') if from_addr else None
tx = self._mktx(outputs, tx_fee, change_addr, domain, nocheck, unsigned, rbf, password, locktime)
return tx.as_dict()
@command('w')
def history(self):
"""Wallet history. Returns the transaction history of your wallet."""
balance = 0
out = []
for item in self.wallet.get_history():
tx_hash, height, conf, timestamp, value, balance = item
if timestamp:
date = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
else:
date = "----"
label = self.wallet.get_label(tx_hash)
tx = self.wallet.transactions.get(tx_hash)
tx.deserialize()
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = x.get('address')
if addr == None: continue
if addr == "(pubkey)":
prevout_hash = x.get('prevout_hash')
prevout_n = x.get('prevout_n')
_addr = self.wallet.find_pay_to_pubkey_address(prevout_hash, prevout_n)
if _addr:
addr = _addr
input_addresses.append(addr)
for addr, v in tx.get_outputs():
output_addresses.append(addr)
out.append({
'txid': tx_hash,
'timestamp': timestamp,
'date': date,
'input_addresses': input_addresses,
'output_addresses': output_addresses,
'label': label,
'value': str(Decimal(value)/COIN) if value is not None else None,
'height': height,
'confirmations': conf
})
return out
@command('w')
def setlabel(self, key, label):
"""Assign a label to an item. Item may be a bitcoin address or a
transaction ID"""
self.wallet.set_label(key, label)
@command('w')
def listcontacts(self):
"""Show your list of contacts"""
return self.wallet.contacts
@command('w')
def getalias(self, key):
"""Retrieve alias. Lookup in your list of contacts, and for an OpenAlias DNS record."""
return self.wallet.contacts.resolve(key)
@command('w')
def searchcontacts(self, query):
"""Search through contacts, return matching entries. """
results = {}
for key, value in self.wallet.contacts.items():
if query.lower() in key.lower():
results[key] = value
return results
@command('w')
def listaddresses(self, receiving=False, change=False, labels=False, frozen=False, unused=False, funded=False, balance=False):
"""List wallet addresses. Returns the list of all addresses in your wallet. Use optional arguments to filter the results."""
out = []
for addr in self.wallet.get_addresses():
if frozen and not self.wallet.is_frozen(addr):
continue
if receiving and self.wallet.is_change(addr):
continue
if change and not self.wallet.is_change(addr):
continue
if unused and self.wallet.is_used(addr):
continue
if funded and self.wallet.is_empty(addr):
continue
item = addr
if labels or balance:
item = (item,)
if balance:
item += (format_satoshis(sum(self.wallet.get_addr_balance(addr))),)
if labels:
item += (repr(self.wallet.labels.get(addr, '')),)
out.append(item)
return out
@command('n')
def gettransaction(self, txid):
"""Retrieve a transaction. """
if self.wallet and txid in self.wallet.transactions:
tx = self.wallet.transactions[txid]
else:
raw = self.network.synchronous_get(('blockchain.transaction.get', [txid]))
if raw:
tx = Transaction(raw)
else:
raise BaseException("Unknown transaction")
return tx.as_dict()
@command('')
def encrypt(self, pubkey, message):
"""Encrypt a message with a public key. Use quotes if the message contains whitespaces."""
return bitcoin.encrypt_message(message, pubkey)
@command('wp')
def decrypt(self, pubkey, encrypted, password=None):
"""Decrypt a message encrypted with a public key."""
return self.wallet.decrypt_message(pubkey, encrypted, password)
def _format_request(self, out):
pr_str = {
PR_UNKNOWN: 'Unknown',
PR_UNPAID: 'Pending',
PR_PAID: 'Paid',
PR_EXPIRED: 'Expired',
}
out['amount (BTC)'] = format_satoshis(out.get('amount'))
out['status'] = pr_str[out.get('status', PR_UNKNOWN)]
return out
@command('w')
def getrequest(self, key):
"""Return a payment request"""
r = self.wallet.get_payment_request(key, self.config)
if not r:
raise BaseException("Request not found")
return self._format_request(r)
#@command('w')
#def ackrequest(self, serialized):
# """<Not implemented>"""
# pass
@command('w')
def listrequests(self, pending=False, expired=False, paid=False):
"""List the payment requests you made."""
out = self.wallet.get_sorted_requests(self.config)
if pending:
f = PR_UNPAID
elif expired:
f = PR_EXPIRED
elif paid:
f = PR_PAID
else:
f = None
if f is not None:
out = list(filter(lambda x: x.get('status')==f, out))
return list(map(self._format_request, out))
@command('w')
def createnewaddress(self):
"""Create a new receiving address, beyond the gap limit of the wallet"""
return self.wallet.create_new_address(False)
@command('w')
def getunusedaddress(self):
"""Returns the first unused address of the wallet, or None if all addresses are used.
An address is considered as used if it has received a transaction, or if it is used in a payment request."""
return self.wallet.get_unused_address()
@command('w')
def addrequest(self, amount, memo='', expiration=None, force=False):
"""Create a payment request, using the first unused address of the wallet.
The address will be condidered as used after this operation.
If no payment is received, the address will be considered as unused if the payment request is deleted from the wallet."""
addr = self.wallet.get_unused_address()
if addr is None:
if force:
addr = self.wallet.create_new_address(False)
else:
return False
amount = satoshis(amount)
expiration = int(expiration) if expiration else None
req = self.wallet.make_payment_request(addr, amount, memo, expiration)
self.wallet.add_payment_request(req, self.config)
out = self.wallet.get_payment_request(addr, self.config)
return self._format_request(out)
@command('wp')
def signrequest(self, address, password=None):
"Sign payment request with an OpenAlias"
alias = self.config.get('alias')
if not alias:
raise BaseException('No alias in your configuration')
alias_addr = self.wallet.contacts.resolve(alias)['address']
self.wallet.sign_payment_request(address, alias, alias_addr, password)
@command('w')
def rmrequest(self, address):
"""Remove a payment request"""
return self.wallet.remove_payment_request(address, self.config)
@command('w')
def clearrequests(self):
"""Remove all payment requests"""
for k in list(self.wallet.receive_requests.keys()):
self.wallet.remove_payment_request(k, self.config)
@command('n')
def notify(self, address, URL):
"""Watch an address. Everytime the address changes, a http POST is sent to the URL."""
def callback(x):
import urllib.request
headers = {'content-type':'application/json'}
data = {'address':address, 'status':x.get('result')}
serialized_data = util.to_bytes(json.dumps(data))
try:
req = urllib.request.Request(URL, serialized_data, headers)
response_stream = urllib.request.urlopen(req, timeout=5)
util.print_error('Got Response for %s' % address)
except BaseException as e:
util.print_error(str(e))
h = self.network.addr_to_scripthash(address)
self.network.send([('blockchain.scripthash.subscribe', [h])], callback)
return True
@command('wn')
def is_synchronized(self):
""" return wallet synchronization status """
return self.wallet.is_up_to_date()
@command('')
def help(self):
# for the python console
return sorted(known_commands.keys())
param_descriptions = {
'privkey': 'Private key. Type \'?\' to get a prompt.',
'destination': 'Bitcoin address, contact or alias',
'address': 'Bitcoin address',
'seed': 'Seed phrase',
'txid': 'Transaction ID',
'pos': 'Position',
'height': 'Block height',
'tx': 'Serialized transaction (hexadecimal)',
'key': 'Variable name',
'pubkey': 'Public key',
'message': 'Clear text message. Use quotes if it contains spaces.',
'encrypted': 'Encrypted message',
'amount': 'Amount to be sent (in BTC). Type \'!\' to send the maximum available.',
'requested_amount': 'Requested amount (in BTC).',
'outputs': 'list of ["address", amount]',
'redeem_script': 'redeem script (hexadecimal)',
}
command_options = {
'password': ("-W", "Password"),
'new_password':(None, "New Password"),
'receiving': (None, "Show only receiving addresses"),
'change': (None, "Show only change addresses"),
'frozen': (None, "Show only frozen addresses"),
'unused': (None, "Show only unused addresses"),
'funded': (None, "Show only funded addresses"),
'balance': ("-b", "Show the balances of listed addresses"),
'labels': ("-l", "Show the labels of listed addresses"),
'nocheck': (None, "Do not verify aliases"),
'imax': (None, "Maximum number of inputs"),
'fee': ("-f", "Transaction fee (in BTC)"),
'from_addr': ("-F", "Source address (must be a wallet address; use sweep to spend from non-wallet address)."),
'change_addr': ("-c", "Change address. Default is a spare address, or the source address if it's not in the wallet"),
'nbits': (None, "Number of bits of entropy"),
'entropy': (None, "Custom entropy"),
'segwit': (None, "Create segwit seed"),
'language': ("-L", "Default language for wordlist"),
'privkey': (None, "Private key. Set to '?' to get a prompt."),
'unsigned': ("-u", "Do not sign transaction"),
'rbf': (None, "Replace-by-fee transaction"),
'locktime': (None, "Set locktime block number"),
'domain': ("-D", "List of addresses"),
'memo': ("-m", "Description of the request"),
'expiration': (None, "Time in seconds"),
'timeout': (None, "Timeout in seconds"),
'force': (None, "Create new address beyond gap limit, if no more addresses are available."),
'pending': (None, "Show only pending requests."),
'expired': (None, "Show only expired requests."),
'paid': (None, "Show only paid requests."),
}
# don't use floats because of rounding errors
from .transaction import tx_from_str
json_loads = lambda x: json.loads(x, parse_float=lambda x: str(Decimal(x)))
arg_types = {
'num': int,
'nbits': int,
'imax': int,
'entropy': int,
'tx': tx_from_str,
'pubkeys': json_loads,
'jsontx': json_loads,
'inputs': json_loads,
'outputs': json_loads,
'fee': lambda x: str(Decimal(x)) if x is not None else None,
'amount': lambda x: str(Decimal(x)) if x != '!' else '!',
'locktime': int,
}
config_variables = {
'addrequest': {
'requests_dir': 'directory where a bip70 file will be written.',
'ssl_privkey': 'Path to your SSL private key, needed to sign the request.',
'ssl_chain': 'Chain of SSL certificates, needed for signed requests. Put your certificate at the top and the root CA at the end',
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of bitcoin: URIs. Example: \"(\'file:///var/www/\',\'https://electrum.org/\')\"',
},
'listrequests':{
'url_rewrite': 'Parameters passed to str.replace(), in order to create the r= part of bitcoin: URIs. Example: \"(\'file:///var/www/\',\'https://electrum.org/\')\"',
}
}
def set_default_subparser(self, name, args=None):
"""see http://stackoverflow.com/questions/5176691/argparse-how-to-specify-a-default-subcommand"""
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: # global help if no subparser
break
else:
for x in self._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name)
argparse.ArgumentParser.set_default_subparser = set_default_subparser
# workaround https://bugs.python.org/issue23058
# see https://github.com/nickstenning/honcho/pull/121
def subparser_call(self, parser, namespace, values, option_string=None):
from argparse import ArgumentError, SUPPRESS, _UNRECOGNIZED_ARGS_ATTR
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)') % tup
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
argparse._SubParsersAction.__call__ = subparser_call
def add_network_options(parser):
parser.add_argument("-1", "--oneserver", action="store_true", dest="oneserver", default=False, help="connect to one server only")
parser.add_argument("-s", "--server", dest="server", default=None, help="set server host:port:protocol, where protocol is either t (tcp) or s (ssl)")
parser.add_argument("-p", "--proxy", dest="proxy", default=None, help="set proxy [type:]host[:port], where type is socks4,socks5 or http")
def add_global_options(parser):
group = parser.add_argument_group('global options')
group.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Show debugging information")
group.add_argument("-D", "--dir", dest="electrum_path", help="electrum directory")
group.add_argument("-P", "--portable", action="store_true", dest="portable", default=False, help="Use local 'electrum_data' directory")
group.add_argument("-w", "--wallet", dest="wallet_path", help="wallet path")
group.add_argument("--testnet", action="store_true", dest="testnet", default=False, help="Use Testnet")
def get_parser():
# create main parser
parser = argparse.ArgumentParser(
epilog="Run 'electrum help <command>' to see the help for a command")
add_global_options(parser)
subparsers = parser.add_subparsers(dest='cmd', metavar='<command>')
# gui
parser_gui = subparsers.add_parser('gui', description="Run Electrum's Graphical User Interface.", help="Run GUI (default)")
parser_gui.add_argument("url", nargs='?', default=None, help="bitcoin URI (or bip70 file)")
parser_gui.add_argument("-g", "--gui", dest="gui", help="select graphical user interface", choices=['qt', 'kivy', 'text', 'stdio'])
parser_gui.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
parser_gui.add_argument("-m", action="store_true", dest="hide_gui", default=False, help="hide GUI on startup")
parser_gui.add_argument("-L", "--lang", dest="language", default=None, help="default language used in GUI")
add_network_options(parser_gui)
add_global_options(parser_gui)
# daemon
parser_daemon = subparsers.add_parser('daemon', help="Run Daemon")
parser_daemon.add_argument("subcommand", choices=['start', 'status', 'stop', 'load_wallet', 'close_wallet'], nargs='?')
#parser_daemon.set_defaults(func=run_daemon)
add_network_options(parser_daemon)
add_global_options(parser_daemon)
# commands
for cmdname in sorted(known_commands.keys()):
cmd = known_commands[cmdname]
p = subparsers.add_parser(cmdname, help=cmd.help, description=cmd.description)
add_global_options(p)
if cmdname == 'restore':
p.add_argument("-o", "--offline", action="store_true", dest="offline", default=False, help="Run offline")
for optname, default in zip(cmd.options, cmd.defaults):
a, help = command_options[optname]
b = '--' + optname
action = "store_true" if type(default) is bool else 'store'
args = (a, b) if a else (b,)
if action == 'store':
_type = arg_types.get(optname, str)
p.add_argument(*args, dest=optname, action=action, default=default, help=help, type=_type)
else:
p.add_argument(*args, dest=optname, action=action, default=default, help=help)
for param in cmd.params:
h = param_descriptions.get(param, '')
_type = arg_types.get(param, str)
p.add_argument(param, help=h, type=_type)
cvh = config_variables.get(cmdname)
if cvh:
group = p.add_argument_group('configuration variables', '(set with setconfig/getconfig)')
for k, v in cvh.items():
group.add_argument(k, nargs='?', help=v)
# 'gui' is the default command
parser.set_default_subparser('gui')
return parser
| 1 | 12,188 | Shouldn't this use `'n'` instead? | spesmilo-electrum | py |
@@ -451,10 +451,10 @@ func (t *timerQueueProcessorBase) getTimerTaskType(
switch taskType {
case enumsspb.TASK_TYPE_USER_TIMER:
return "UserTimer"
- case enumsspb.TASK_TYPE_ACTIVITY_TIMEOUT:
- return "ActivityTimeout"
- case enumsspb.TASK_TYPE_DECISION_TIMEOUT:
- return "DecisionTimeout"
+ case enumsspb.TASK_TYPE_ACTIVITY_TASK_TIMEOUT:
+ return "ActivityTaskTimeout"
+ case enumsspb.TASK_TYPE_WORKFLOW_TASK_TIMEOUT:
+ return "WorkflowTaskTimeout"
case enumsspb.TASK_TYPE_WORKFLOW_RUN_TIMEOUT:
return "WorkflowRunTimeout"
case enumsspb.TASK_TYPE_DELETE_HISTORY_EVENT: | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package history
import (
"context"
"sync"
"sync/atomic"
"time"
"github.com/gogo/protobuf/types"
enumsspb "go.temporal.io/server/api/enums/v1"
"go.temporal.io/server/api/persistenceblobs/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/backoff"
"go.temporal.io/server/common/clock"
"go.temporal.io/server/common/collection"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/quotas"
"go.temporal.io/server/common/service/dynamicconfig"
)
var (
emptyTime = time.Time{}
loadNamespaceEntryForTimerTaskRetryDelay = 100 * time.Millisecond
loadTimerTaskThrottleRetryDelay = 5 * time.Second
)
type (
timerQueueProcessorBase struct {
scope int
shard ShardContext
historyService *historyEngineImpl
cache *historyCache
executionManager persistence.ExecutionManager
status int32
shutdownWG sync.WaitGroup
shutdownCh chan struct{}
config *Config
logger log.Logger
metricsClient metrics.Client
metricsScope metrics.Scope
timerFiredCount uint64
timerProcessor timerProcessor
timerQueueAckMgr timerQueueAckMgr
timerGate TimerGate
timeSource clock.TimeSource
rateLimiter quotas.Limiter
retryPolicy backoff.RetryPolicy
lastPollTime time.Time
taskProcessor *taskProcessor // TODO: deprecate task processor, in favor of queueTaskProcessor
queueTaskProcessor queueTaskProcessor
redispatchQueue collection.Queue
queueTaskInitializer queueTaskInitializer
// timer notification
newTimerCh chan struct{}
newTimeLock sync.Mutex
newTime time.Time
}
)
func newTimerQueueProcessorBase(
scope int,
shard ShardContext,
historyService *historyEngineImpl,
timerProcessor timerProcessor,
queueTaskProcessor queueTaskProcessor,
timerQueueAckMgr timerQueueAckMgr,
redispatchQueue collection.Queue,
queueTaskInitializer queueTaskInitializer,
timerGate TimerGate,
maxPollRPS dynamicconfig.IntPropertyFn,
logger log.Logger,
metricsScope metrics.Scope,
) *timerQueueProcessorBase {
logger = logger.WithTags(tag.ComponentTimerQueue)
config := shard.GetConfig()
var taskProcessor *taskProcessor
if !config.TimerProcessorEnablePriorityTaskProcessor() {
options := taskProcessorOptions{
workerCount: config.TimerTaskWorkerCount(),
queueSize: config.TimerTaskWorkerCount() * config.TimerTaskBatchSize(),
}
taskProcessor = newTaskProcessor(options, shard, historyService.historyCache, logger)
}
base := &timerQueueProcessorBase{
scope: scope,
shard: shard,
historyService: historyService,
timerProcessor: timerProcessor,
cache: historyService.historyCache,
executionManager: shard.GetExecutionManager(),
status: common.DaemonStatusInitialized,
shutdownCh: make(chan struct{}),
config: config,
logger: logger,
metricsClient: historyService.metricsClient,
metricsScope: metricsScope,
timerQueueAckMgr: timerQueueAckMgr,
timerGate: timerGate,
timeSource: shard.GetTimeSource(),
newTimerCh: make(chan struct{}, 1),
lastPollTime: time.Time{},
taskProcessor: taskProcessor,
queueTaskProcessor: queueTaskProcessor,
redispatchQueue: redispatchQueue,
queueTaskInitializer: queueTaskInitializer,
rateLimiter: quotas.NewDynamicRateLimiter(
func() float64 {
return float64(maxPollRPS())
},
),
retryPolicy: common.CreatePersistanceRetryPolicy(),
}
return base
}
func (t *timerQueueProcessorBase) Start() {
if !atomic.CompareAndSwapInt32(&t.status, common.DaemonStatusInitialized, common.DaemonStatusStarted) {
return
}
if t.taskProcessor != nil {
t.taskProcessor.start()
}
t.shutdownWG.Add(1)
// notify a initial scan
t.notifyNewTimer(time.Time{})
go t.processorPump()
t.logger.Info("Timer queue processor started.")
}
func (t *timerQueueProcessorBase) Stop() {
if !atomic.CompareAndSwapInt32(&t.status, common.DaemonStatusStarted, common.DaemonStatusStopped) {
return
}
t.timerGate.Close()
close(t.shutdownCh)
t.retryTasks()
if success := common.AwaitWaitGroup(&t.shutdownWG, time.Minute); !success {
t.logger.Warn("Timer queue processor timedout on shutdown.")
}
if t.taskProcessor != nil {
t.taskProcessor.stop()
}
t.logger.Info("Timer queue processor stopped.")
}
func (t *timerQueueProcessorBase) processorPump() {
defer t.shutdownWG.Done()
RetryProcessor:
for {
select {
case <-t.shutdownCh:
break RetryProcessor
default:
err := t.internalProcessor()
if err != nil {
t.logger.Error("processor pump failed with error", tag.Error(err))
}
}
}
t.logger.Info("Timer queue processor pump shutting down.")
t.logger.Info("Timer processor exiting.")
}
// NotifyNewTimers - Notify the processor about the new timer events arrival.
// This should be called each time new timer events arrives, otherwise timers maybe fired unexpected.
func (t *timerQueueProcessorBase) notifyNewTimers(
timerTasks []persistence.Task,
) {
if len(timerTasks) == 0 {
return
}
isActive := t.scope == metrics.TimerActiveQueueProcessorScope
newTime := timerTasks[0].GetVisibilityTimestamp()
for _, task := range timerTasks {
ts := task.GetVisibilityTimestamp()
if ts.Before(newTime) {
newTime = ts
}
scopeIdx := getTimerTaskMetricScope(task.GetType(), isActive)
t.metricsClient.IncCounter(scopeIdx, metrics.NewTimerCounter)
}
t.notifyNewTimer(newTime)
}
func (t *timerQueueProcessorBase) notifyNewTimer(
newTime time.Time,
) {
t.newTimeLock.Lock()
defer t.newTimeLock.Unlock()
if t.newTime.IsZero() || newTime.Before(t.newTime) {
t.newTime = newTime
select {
case t.newTimerCh <- struct{}{}:
// Notified about new time.
default:
// Channel "full" -> drop and move on, this will happen only if service is in high load.
}
}
}
func (t *timerQueueProcessorBase) internalProcessor() error {
pollTimer := time.NewTimer(backoff.JitDuration(
t.config.TimerProcessorMaxPollInterval(),
t.config.TimerProcessorMaxPollIntervalJitterCoefficient(),
))
defer pollTimer.Stop()
updateAckTimer := time.NewTimer(backoff.JitDuration(
t.config.TimerProcessorUpdateAckInterval(),
t.config.TimerProcessorUpdateAckIntervalJitterCoefficient(),
))
defer updateAckTimer.Stop()
redispatchTimer := time.NewTimer(backoff.JitDuration(
t.config.TimerProcessorRedispatchInterval(),
t.config.TimerProcessorRedispatchIntervalJitterCoefficient(),
))
defer redispatchTimer.Stop()
for {
// Wait until one of four things occurs:
// 1. we get notified of a new message
// 2. the timer gate fires (message scheduled to be delivered)
// 3. shutdown was triggered.
// 4. updating ack level
//
select {
case <-t.shutdownCh:
t.logger.Debug("Timer queue processor pump shutting down.")
return nil
case <-t.timerQueueAckMgr.getFinishedChan():
// timer queue ack manager indicate that all task scanned
// are finished and no more tasks
// use a separate goroutine since the caller hold the shutdownWG
go t.Stop()
return nil
case <-t.timerGate.FireChan():
if !t.isPriorityTaskProcessorEnabled() || t.redispatchQueue.Len() <= t.config.TimerProcessorMaxRedispatchQueueSize() {
lookAheadTimer, err := t.readAndFanoutTimerTasks()
if err != nil {
return err
}
if lookAheadTimer != nil {
visTs, err := types.TimestampFromProto(lookAheadTimer.VisibilityTime)
if err != nil {
return err
}
t.timerGate.Update(visTs)
}
continue
}
// has too many pending tasks in re-dispatch queue, block loading tasks from persistence
t.redispatchTasks()
// re-enqueue the event to see if we need keep re-dispatching or load new tasks from persistence
t.notifyNewTimer(time.Time{})
case <-pollTimer.C:
pollTimer.Reset(backoff.JitDuration(
t.config.TimerProcessorMaxPollInterval(),
t.config.TimerProcessorMaxPollIntervalJitterCoefficient(),
))
if t.lastPollTime.Add(t.config.TimerProcessorMaxPollInterval()).Before(t.timeSource.Now()) {
lookAheadTimer, err := t.readAndFanoutTimerTasks()
if err != nil {
return err
}
if lookAheadTimer != nil {
visTs, err := types.TimestampFromProto(lookAheadTimer.VisibilityTime)
if err != nil {
return err
}
t.timerGate.Update(visTs)
}
}
case <-updateAckTimer.C:
updateAckTimer.Reset(backoff.JitDuration(
t.config.TimerProcessorUpdateAckInterval(),
t.config.TimerProcessorUpdateAckIntervalJitterCoefficient(),
))
if err := t.timerQueueAckMgr.updateAckLevel(); err == ErrShardClosed {
// shard is closed, shutdown timerQProcessor and bail out
go t.Stop()
return err
}
case <-t.newTimerCh:
t.newTimeLock.Lock()
newTime := t.newTime
t.newTime = emptyTime
t.newTimeLock.Unlock()
// New Timer has arrived.
t.metricsClient.IncCounter(t.scope, metrics.NewTimerNotifyCounter)
t.timerGate.Update(newTime)
case <-redispatchTimer.C:
redispatchTimer.Reset(backoff.JitDuration(
t.config.TimerProcessorRedispatchInterval(),
t.config.TimerProcessorRedispatchIntervalJitterCoefficient(),
))
t.redispatchTasks()
}
}
}
func (t *timerQueueProcessorBase) readAndFanoutTimerTasks() (*persistenceblobs.TimerTaskInfo, error) {
ctx, cancel := context.WithTimeout(context.Background(), loadTimerTaskThrottleRetryDelay)
if err := t.rateLimiter.Wait(ctx); err != nil {
cancel()
t.notifyNewTimer(time.Time{}) // re-enqueue the event
return nil, nil
}
cancel()
t.lastPollTime = t.timeSource.Now()
timerTasks, lookAheadTask, moreTasks, err := t.timerQueueAckMgr.readTimerTasks()
if err != nil {
t.notifyNewTimer(time.Time{}) // re-enqueue the event
return nil, err
}
for _, task := range timerTasks {
if submitted := t.submitTask(task); !submitted {
return nil, nil
}
select {
case <-t.shutdownCh:
return nil, nil
default:
}
}
if !moreTasks {
return lookAheadTask, nil
}
t.notifyNewTimer(time.Time{}) // re-enqueue the event
return nil, nil
}
func (t *timerQueueProcessorBase) submitTask(
taskInfo queueTaskInfo,
) bool {
if !t.isPriorityTaskProcessorEnabled() {
return t.taskProcessor.addTask(
newTaskInfo(
t.timerProcessor,
taskInfo,
initializeLoggerForTask(t.shard.GetShardID(), taskInfo, t.logger),
),
)
}
timeQueueTask := t.queueTaskInitializer(taskInfo)
submitted, err := t.queueTaskProcessor.TrySubmit(timeQueueTask)
if err != nil {
return false
}
if !submitted {
t.redispatchQueue.Add(timeQueueTask)
}
return true
}
func (t *timerQueueProcessorBase) redispatchTasks() {
if !t.isPriorityTaskProcessorEnabled() {
return
}
redispatchQueueTasks(
t.redispatchQueue,
t.queueTaskProcessor,
t.logger,
t.metricsScope,
t.shutdownCh,
)
}
func (t *timerQueueProcessorBase) retryTasks() {
if t.taskProcessor != nil {
t.taskProcessor.retryTasks()
}
}
func (t *timerQueueProcessorBase) complete(
timerTask *persistenceblobs.TimerTaskInfo,
) {
t.timerQueueAckMgr.completeTimerTask(timerTask)
atomic.AddUint64(&t.timerFiredCount, 1)
}
func (t *timerQueueProcessorBase) isPriorityTaskProcessorEnabled() bool {
return t.taskProcessor == nil
}
func (t *timerQueueProcessorBase) getTimerFiredCount() uint64 {
return atomic.LoadUint64(&t.timerFiredCount)
}
//nolint:unused
func (t *timerQueueProcessorBase) getTimerTaskType(
taskType enumsspb.TaskType,
) string {
switch taskType {
case enumsspb.TASK_TYPE_USER_TIMER:
return "UserTimer"
case enumsspb.TASK_TYPE_ACTIVITY_TIMEOUT:
return "ActivityTimeout"
case enumsspb.TASK_TYPE_DECISION_TIMEOUT:
return "DecisionTimeout"
case enumsspb.TASK_TYPE_WORKFLOW_RUN_TIMEOUT:
return "WorkflowRunTimeout"
case enumsspb.TASK_TYPE_DELETE_HISTORY_EVENT:
return "DeleteHistoryEvent"
case enumsspb.TASK_TYPE_ACTIVITY_RETRY_TIMER:
return "ActivityRetryTimerTask"
case enumsspb.TASK_TYPE_WORKFLOW_BACKOFF_TIMER:
return "WorkflowBackoffTimerTask"
}
return "UnKnown"
}
func getTimerTaskMetricScope(
taskType enumsspb.TaskType,
isActive bool,
) int {
switch taskType {
case enumsspb.TASK_TYPE_DECISION_TIMEOUT:
if isActive {
return metrics.TimerActiveTaskDecisionTimeoutScope
}
return metrics.TimerStandbyTaskDecisionTimeoutScope
case enumsspb.TASK_TYPE_ACTIVITY_TIMEOUT:
if isActive {
return metrics.TimerActiveTaskActivityTimeoutScope
}
return metrics.TimerStandbyTaskActivityTimeoutScope
case enumsspb.TASK_TYPE_USER_TIMER:
if isActive {
return metrics.TimerActiveTaskUserTimerScope
}
return metrics.TimerStandbyTaskUserTimerScope
case enumsspb.TASK_TYPE_WORKFLOW_RUN_TIMEOUT:
if isActive {
return metrics.TimerActiveTaskWorkflowTimeoutScope
}
return metrics.TimerStandbyTaskWorkflowTimeoutScope
case enumsspb.TASK_TYPE_DELETE_HISTORY_EVENT:
if isActive {
return metrics.TimerActiveTaskDeleteHistoryEventScope
}
return metrics.TimerStandbyTaskDeleteHistoryEventScope
case enumsspb.TASK_TYPE_ACTIVITY_RETRY_TIMER:
if isActive {
return metrics.TimerActiveTaskActivityRetryTimerScope
}
return metrics.TimerStandbyTaskActivityRetryTimerScope
case enumsspb.TASK_TYPE_WORKFLOW_BACKOFF_TIMER:
if isActive {
return metrics.TimerActiveTaskWorkflowBackoffTimerScope
}
return metrics.TimerStandbyTaskWorkflowBackoffTimerScope
default:
if isActive {
return metrics.TimerActiveQueueProcessorScope
}
return metrics.TimerStandbyQueueProcessorScope
}
}
| 1 | 9,852 | revert back to 'TASK_TYPE_ACTIVITY_TIMEOUT' | temporalio-temporal | go |
@@ -105,6 +105,9 @@ Engine::Engine(core::Engine *engine) : m_Engine(engine) {}
template void Engine::Get<T>(const std::string &, std::vector<T> &, \
const Mode); \
\
+ template void Engine::GetBlock<T>(Variable<T>, T **, const Mode); \
+ template void Engine::GetBlock<T>(const std::string &, T **, const Mode); \
+ \
template std::map<size_t, std::vector<typename Variable<T>::Info>> \
Engine::AllStepsBlocksInfo(const Variable<T> variable) const; \
\ | 1 | /*
* Distributed under the OSI-approved Apache License, Version 2.0. See
* accompanying file Copyright.txt for details.
*
* Engine.h :
*
* Created on: Jun 4, 2018
* Author: William F Godoy [email protected]
*/
#include "Engine.h"
#include "Engine.tcc"
#include "adios2/core/Engine.h"
#include "adios2/helper/adiosFunctions.h"
namespace adios2
{
Engine::operator bool() const noexcept
{
if (m_Engine == nullptr)
{
return false;
}
return *m_Engine ? true : false;
}
std::string Engine::Name() const
{
helper::CheckForNullptr(m_Engine, "in call to Engine::Name");
return m_Engine->m_Name;
}
std::string Engine::Type() const
{
helper::CheckForNullptr(m_Engine, "in call to Engine::Type");
return m_Engine->m_EngineType;
}
StepStatus Engine::BeginStep()
{
helper::CheckForNullptr(m_Engine, "in call to Engine::BeginStep");
return m_Engine->BeginStep();
}
StepStatus Engine::BeginStep(const StepMode mode, const float timeoutSeconds)
{
helper::CheckForNullptr(
m_Engine, "in call to Engine::BeginStep(const StepMode, const float)");
return m_Engine->BeginStep(mode, timeoutSeconds);
}
size_t Engine::CurrentStep() const
{
helper::CheckForNullptr(m_Engine, "in call to Engine::CurrentStep");
return m_Engine->CurrentStep();
}
void Engine::PerformPuts()
{
helper::CheckForNullptr(m_Engine, "in call to Engine::PerformPuts");
m_Engine->PerformPuts();
}
void Engine::PerformGets()
{
helper::CheckForNullptr(m_Engine, "in call to Engine::PerformGets");
m_Engine->PerformGets();
}
void Engine::EndStep()
{
helper::CheckForNullptr(m_Engine, "in call to Engine::EndStep");
m_Engine->EndStep();
}
void Engine::Flush(const int transportIndex)
{
helper::CheckForNullptr(m_Engine, "in call to Engine::Flush");
m_Engine->Flush(transportIndex);
}
void Engine::Close(const int transportIndex)
{
helper::CheckForNullptr(m_Engine, "in call to Engine::Close");
m_Engine->Close(transportIndex);
}
Engine::Engine(core::Engine *engine) : m_Engine(engine) {}
#define declare_template_instantiation(T) \
template void Engine::Put<T>(Variable<T>, const T *, const Mode); \
template void Engine::Put<T>(const std::string &, const T *, const Mode); \
template void Engine::Put<T>(Variable<T>, const T &, const Mode); \
template void Engine::Put<T>(const std::string &, const T &, const Mode); \
\
template void Engine::Get<T>(Variable<T>, T *, const Mode); \
template void Engine::Get<T>(const std::string &, T *, const Mode); \
template void Engine::Get<T>(Variable<T>, T &, const Mode); \
template void Engine::Get<T>(const std::string &, T &, const Mode); \
\
template void Engine::Get<T>(Variable<T>, std::vector<T> &, const Mode); \
template void Engine::Get<T>(const std::string &, std::vector<T> &, \
const Mode); \
\
template std::map<size_t, std::vector<typename Variable<T>::Info>> \
Engine::AllStepsBlocksInfo(const Variable<T> variable) const; \
\
template std::vector<typename Variable<T>::Info> Engine::BlocksInfo( \
const Variable<T> variable, const size_t step) const;
ADIOS2_FOREACH_TYPE_1ARG(declare_template_instantiation)
#undef declare_template_instantiation
} // end namespace adios2
| 1 | 12,371 | Shouldn't we prefer passing a pointer by reference T*&, since these are C++ bindings? | ornladios-ADIOS2 | cpp |
@@ -146,9 +146,15 @@ func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *secv1alpha1.C
appliedToGroupNamesForRule = append(appliedToGroupNamesForRule, atGroup)
appliedToGroupNamesSet.Insert(atGroup)
}
+ cgExists := len(ingressRule.SourceGroups) > 0
+ iFromPeers := n.toAntreaPeerForCRD(ingressRule.From, cnp, controlplane.DirectionIn, namedPortExists, cgExists)
+ // If ClusterGroups are set in Rule, then create AddressGroups corresponding to the CG and append to From Peers.
+ ag, ipb := n.processRefCGs(ingressRule.SourceGroups)
+ iFromPeers.IPBlocks = append(iFromPeers.IPBlocks, ipb...)
+ iFromPeers.AddressGroups = append(iFromPeers.AddressGroups, ag...)
rules = append(rules, controlplane.NetworkPolicyRule{
Direction: controlplane.DirectionIn,
- From: *n.toAntreaPeerForCRD(ingressRule.From, cnp, controlplane.DirectionIn, namedPortExists),
+ From: iFromPeers,
Services: services,
Action: ingressRule.Action,
Priority: int32(idx), | 1 | // Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package networkpolicy
import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
"github.com/vmware-tanzu/antrea/pkg/apis/controlplane"
secv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1"
antreatypes "github.com/vmware-tanzu/antrea/pkg/controller/types"
)
// addCNP receives ClusterNetworkPolicy ADD events and creates resources
// which can be consumed by agents to configure corresponding rules on the Nodes.
func (n *NetworkPolicyController) addCNP(obj interface{}) {
defer n.heartbeat("addCNP")
cnp := obj.(*secv1alpha1.ClusterNetworkPolicy)
klog.Infof("Processing ClusterNetworkPolicy %s ADD event", cnp.Name)
// Create an internal NetworkPolicy object corresponding to this
// ClusterNetworkPolicy and enqueue task to internal NetworkPolicy Workqueue.
internalNP := n.processClusterNetworkPolicy(cnp)
klog.V(2).Infof("Creating new internal NetworkPolicy %s for %s", internalNP.Name, internalNP.SourceRef.ToString())
n.internalNetworkPolicyStore.Create(internalNP)
key := internalNetworkPolicyKeyFunc(cnp)
n.enqueueInternalNetworkPolicy(key)
}
// updateCNP receives ClusterNetworkPolicy UPDATE events and updates resources
// which can be consumed by agents to configure corresponding rules on the Nodes.
func (n *NetworkPolicyController) updateCNP(old, cur interface{}) {
defer n.heartbeat("updateCNP")
curCNP := cur.(*secv1alpha1.ClusterNetworkPolicy)
klog.Infof("Processing ClusterNetworkPolicy %s UPDATE event", curCNP.Name)
// Update an internal NetworkPolicy, corresponding to this NetworkPolicy and
// enqueue task to internal NetworkPolicy Workqueue.
curInternalNP := n.processClusterNetworkPolicy(curCNP)
klog.V(2).Infof("Updating existing internal NetworkPolicy %s for %s", curInternalNP.Name, curInternalNP.SourceRef.ToString())
// Retrieve old secv1alpha1.NetworkPolicy object.
oldCNP := old.(*secv1alpha1.ClusterNetworkPolicy)
// Old and current NetworkPolicy share the same key.
key := internalNetworkPolicyKeyFunc(oldCNP)
// Lock access to internal NetworkPolicy store such that concurrent access
// to an internal NetworkPolicy is not allowed. This will avoid the
// case in which an Update to an internal NetworkPolicy object may
// cause the SpanMeta member to be overridden with stale SpanMeta members
// from an older internal NetworkPolicy.
n.internalNetworkPolicyMutex.Lock()
oldInternalNPObj, _, _ := n.internalNetworkPolicyStore.Get(key)
oldInternalNP := oldInternalNPObj.(*antreatypes.NetworkPolicy)
// Must preserve old internal NetworkPolicy Span.
curInternalNP.SpanMeta = oldInternalNP.SpanMeta
n.internalNetworkPolicyStore.Update(curInternalNP)
// Unlock the internal NetworkPolicy store.
n.internalNetworkPolicyMutex.Unlock()
// Enqueue addressGroup keys to update their Node span.
for _, rule := range curInternalNP.Rules {
for _, addrGroupName := range rule.From.AddressGroups {
n.enqueueAddressGroup(addrGroupName)
}
for _, addrGroupName := range rule.To.AddressGroups {
n.enqueueAddressGroup(addrGroupName)
}
}
n.enqueueInternalNetworkPolicy(key)
for _, atg := range oldInternalNP.AppliedToGroups {
// Delete the old AppliedToGroup object if it is not referenced
// by any internal NetworkPolicy.
n.deleteDereferencedAppliedToGroup(atg)
}
n.deleteDereferencedAddressGroups(oldInternalNP)
}
// deleteCNP receives ClusterNetworkPolicy DELETED events and deletes resources
// which can be consumed by agents to delete corresponding rules on the Nodes.
func (n *NetworkPolicyController) deleteCNP(old interface{}) {
cnp, ok := old.(*secv1alpha1.ClusterNetworkPolicy)
if !ok {
tombstone, ok := old.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Error decoding object when deleting ClusterNetworkPolicy, invalid type: %v", old)
return
}
cnp, ok = tombstone.Obj.(*secv1alpha1.ClusterNetworkPolicy)
if !ok {
klog.Errorf("Error decoding object tombstone when deleting ClusterNetworkPolicy, invalid type: %v", tombstone.Obj)
return
}
}
defer n.heartbeat("deleteCNP")
klog.Infof("Processing ClusterNetworkPolicy %s DELETE event", cnp.Name)
key := internalNetworkPolicyKeyFunc(cnp)
oldInternalNPObj, _, _ := n.internalNetworkPolicyStore.Get(key)
oldInternalNP := oldInternalNPObj.(*antreatypes.NetworkPolicy)
klog.V(2).Infof("Deleting internal NetworkPolicy %s for %s", oldInternalNP.Name, oldInternalNP.SourceRef.ToString())
err := n.internalNetworkPolicyStore.Delete(key)
if err != nil {
klog.Errorf("Error deleting internal NetworkPolicy during NetworkPolicy %s delete: %v", cnp.Name, err)
return
}
for _, atg := range oldInternalNP.AppliedToGroups {
n.deleteDereferencedAppliedToGroup(atg)
}
n.deleteDereferencedAddressGroups(oldInternalNP)
}
// processClusterNetworkPolicy creates an internal NetworkPolicy instance
// corresponding to the secv1alpha1.ClusterNetworkPolicy object. This method
// does not commit the internal NetworkPolicy in store, instead returns an
// instance to the caller wherein, it will be either stored as a new Object
// in case of ADD event or modified and store the updated instance, in case
// of an UPDATE event.
func (n *NetworkPolicyController) processClusterNetworkPolicy(cnp *secv1alpha1.ClusterNetworkPolicy) *antreatypes.NetworkPolicy {
appliedToPerRule := len(cnp.Spec.AppliedTo) == 0
// appliedToGroupNames tracks all distinct appliedToGroups referred to by the ClusterNetworkPolicy,
// either in the spec section or in ingress/egress rules.
// The span calculation and stale appliedToGroup cleanup logic would work seamlessly for both cases.
appliedToGroupNamesSet := sets.String{}
// Create AppliedToGroup for each AppliedTo present in ClusterNetworkPolicy spec.
for _, at := range cnp.Spec.AppliedTo {
appliedToGroupNamesSet.Insert(n.createAppliedToGroup(
"", at.PodSelector, at.NamespaceSelector, at.ExternalEntitySelector))
}
rules := make([]controlplane.NetworkPolicyRule, 0, len(cnp.Spec.Ingress)+len(cnp.Spec.Egress))
// Compute NetworkPolicyRule for Ingress Rule.
for idx, ingressRule := range cnp.Spec.Ingress {
// Set default action to ALLOW to allow traffic.
services, namedPortExists := toAntreaServicesForCRD(ingressRule.Ports)
var appliedToGroupNamesForRule []string
// Create AppliedToGroup for each AppliedTo present in the ingress rule.
for _, at := range ingressRule.AppliedTo {
atGroup := n.createAppliedToGroup("", at.PodSelector, at.NamespaceSelector, at.ExternalEntitySelector)
appliedToGroupNamesForRule = append(appliedToGroupNamesForRule, atGroup)
appliedToGroupNamesSet.Insert(atGroup)
}
rules = append(rules, controlplane.NetworkPolicyRule{
Direction: controlplane.DirectionIn,
From: *n.toAntreaPeerForCRD(ingressRule.From, cnp, controlplane.DirectionIn, namedPortExists),
Services: services,
Action: ingressRule.Action,
Priority: int32(idx),
EnableLogging: ingressRule.EnableLogging,
AppliedToGroups: appliedToGroupNamesForRule,
})
}
// Compute NetworkPolicyRule for Egress Rule.
for idx, egressRule := range cnp.Spec.Egress {
// Set default action to ALLOW to allow traffic.
services, namedPortExists := toAntreaServicesForCRD(egressRule.Ports)
var appliedToGroupNamesForRule []string
// Create AppliedToGroup for each AppliedTo present in the ingress rule.
for _, at := range egressRule.AppliedTo {
atGroup := n.createAppliedToGroup("", at.PodSelector, at.NamespaceSelector, at.ExternalEntitySelector)
appliedToGroupNamesForRule = append(appliedToGroupNamesForRule, atGroup)
appliedToGroupNamesSet.Insert(atGroup)
}
rules = append(rules, controlplane.NetworkPolicyRule{
Direction: controlplane.DirectionOut,
To: *n.toAntreaPeerForCRD(egressRule.To, cnp, controlplane.DirectionOut, namedPortExists),
Services: services,
Action: egressRule.Action,
Priority: int32(idx),
EnableLogging: egressRule.EnableLogging,
AppliedToGroups: appliedToGroupNamesForRule,
})
}
tierPriority := n.getTierPriority(cnp.Spec.Tier)
internalNetworkPolicy := &antreatypes.NetworkPolicy{
Name: internalNetworkPolicyKeyFunc(cnp),
Generation: cnp.Generation,
SourceRef: &controlplane.NetworkPolicyReference{
Type: controlplane.AntreaClusterNetworkPolicy,
Name: cnp.Name,
UID: cnp.UID,
},
UID: cnp.UID,
AppliedToGroups: appliedToGroupNamesSet.List(),
Rules: rules,
Priority: &cnp.Spec.Priority,
TierPriority: &tierPriority,
AppliedToPerRule: appliedToPerRule,
}
return internalNetworkPolicy
}
| 1 | 31,042 | nit: Personally I feel that it's cleaner to let `toAntreaPeerForCRD` to take `ingressRule` as a param instead of `ingressRule.From`. It can call `processRefCGs` inside the function and do the ipBlock/AG union within the function itself. It might not be worth the refactor though. | antrea-io-antrea | go |
@@ -98,6 +98,7 @@ func updateSPCVersion(name string) error {
return err
}
spcObj.VersionDetails.Desired = upgradeVersion
+ spcObj.VersionDetails.Status.State = apis.ReconcilePending
_, err = client.Update(spcObj)
if err != nil {
return err | 1 | /*
Copyright 2019 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"time"
utask "github.com/openebs/maya/pkg/apis/openebs.io/upgrade/v1alpha1"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
errors "github.com/openebs/maya/pkg/errors/v1alpha1"
spc "github.com/openebs/maya/pkg/storagepoolclaim/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
)
// to verify that no two csp are on same node
func verifyCSPNodeName(cspList *apis.CStorPoolList) error {
nodeMap := map[string]bool{}
for _, cspObj := range cspList.Items {
nodeName := cspObj.Labels[string(apis.HostNameCPK)]
if nodeMap[nodeName] {
return errors.Errorf("more than one csp on %s node."+
" please make sure all csp are on different nodes", nodeName)
}
nodeMap[nodeName] = true
}
return nil
}
func spcUpgrade(spcName, openebsNamespace string) (*utask.UpgradeTask, error) {
spcLabel := "openebs.io/storage-pool-claim=" + spcName
cspList, err := cspClient.List(metav1.ListOptions{
LabelSelector: spcLabel,
})
if err != nil {
return nil, errors.Wrapf(err, "failed to list csp for spc %s", spcName)
}
if len(cspList.Items) == 0 {
return nil, errors.Errorf("no csp found for spc %s: no csp found", spcName)
}
err = waitForSPCCurrentVersion(spcName)
if err != nil {
return nil, err
}
err = verifyCSPNodeName(cspList)
if err != nil {
return nil, err
}
for _, cspObj := range cspList.Items {
if cspObj.Name == "" {
return nil, errors.Errorf("missing csp name")
}
utaskObj, err := cspUpgrade(cspObj.Name, openebsNamespace)
if err != nil {
return utaskObj, err
}
if utaskObj != nil {
utaskObj.Status.Phase = utask.UpgradeSuccess
utaskObj.Status.CompletedTime = metav1.Now()
_, uerr := utaskClient.WithNamespace(openebsNamespace).
Update(utaskObj)
if uerr != nil && isENVPresent {
return nil, uerr
}
}
}
err = updateSPCVersion(spcName)
if err != nil {
return nil, err
}
err = verifySPCVersionReconcile(spcName)
if err != nil {
return nil, err
}
klog.Infof("Upgrade Successful for spc %s", spcName)
return nil, nil
}
func updateSPCVersion(name string) error {
client := spc.NewKubeClient()
spcObj, err := client.Get(name, metav1.GetOptions{})
if err != nil {
return err
}
spcObj.VersionDetails.Desired = upgradeVersion
_, err = client.Update(spcObj)
if err != nil {
return err
}
return nil
}
func waitForSPCCurrentVersion(name string) error {
klog.Infof("Waiting for spc current version to get populated.")
client := spc.NewKubeClient()
spcObj, err := client.Get(name, metav1.GetOptions{})
if err != nil {
return err
}
// waiting for old objects to get populated with new fields
for spcObj.VersionDetails.Current == "" {
// Sleep equal to the default sync time
time.Sleep(30 * time.Second)
spcObj, err = client.Get(name, metav1.GetOptions{})
if err != nil {
return err
}
}
return nil
}
func verifySPCVersionReconcile(name string) error {
klog.Infof("Verifying the reconciliation of version.")
client := spc.NewKubeClient()
spcObj, err := client.Get(name, metav1.GetOptions{})
if err != nil {
return err
}
// waiting for the current version to be equal to desired version
for spcObj.VersionDetails.Current != spcObj.VersionDetails.Desired {
// Sleep equal to the default sync time
time.Sleep(30 * time.Second)
spcObj, err = client.Get(name, metav1.GetOptions{})
if err != nil {
return err
}
}
return nil
}
| 1 | 17,493 | Pending has been set only for SPC.. why? for other CRs as well, this need to be done.. right? | openebs-maya | go |
@@ -61,11 +61,13 @@ class VimeoProvider extends BaseVideoProvider
$box = $this->getBoxHelperProperties($media, $format, $options);
$params = array(
- 'src' => http_build_query($player_parameters),
- 'id' => $player_parameters['js_swf_id'],
- 'frameborder' => isset($options['frameborder']) ? $options['frameborder'] : 0,
- 'width' => $box->getWidth(),
- 'height' => $box->getHeight(),
+ 'src' => http_build_query($player_parameters),
+ 'id' => $player_parameters['js_swf_id'],
+ 'frameborder' => isset($options['frameborder']) ? $options['frameborder'] : 0,
+ 'width' => $box->getWidth(),
+ 'height' => $box->getHeight(),
+ 'class' => isset($options['class']) ? $options['class'] : '',
+ 'allow_fullscreen' => isset($options['allowfullscreen']) ? true : false,
);
return $params; | 1 | <?php
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Provider;
use Sonata\CoreBundle\Model\Metadata;
use Sonata\MediaBundle\Model\MediaInterface;
use Symfony\Component\HttpFoundation\RedirectResponse;
class VimeoProvider extends BaseVideoProvider
{
/**
* {@inheritdoc}
*/
public function getHelperProperties(MediaInterface $media, $format, $options = array())
{
// documentation : http://vimeo.com/api/docs/moogaloop
$defaults = array(
// (optional) Flash Player version of app. Defaults to 9 .NEW!
// 10 - New Moogaloop. 9 - Old Moogaloop without newest features.
'fp_version' => 10,
// (optional) Enable fullscreen capability. Defaults to true.
'fullscreen' => true,
// (optional) Show the byline on the video. Defaults to true.
'title' => true,
// (optional) Show the title on the video. Defaults to true.
'byline' => 0,
// (optional) Show the user's portrait on the video. Defaults to true.
'portrait' => true,
// (optional) Specify the color of the video controls.
'color' => null,
// (optional) Set to 1 to disable HD.
'hd_off' => 0,
// Set to 1 to enable the Javascript API.
'js_api' => null,
// (optional) JS function called when the player loads. Defaults to vimeo_player_loaded.
'js_onLoad' => 0,
// Unique id that is passed into all player events as the ending parameter.
'js_swf_id' => uniqid('vimeo_player_'),
);
$player_parameters = array_merge($defaults, isset($options['player_parameters']) ? $options['player_parameters'] : array());
$box = $this->getBoxHelperProperties($media, $format, $options);
$params = array(
'src' => http_build_query($player_parameters),
'id' => $player_parameters['js_swf_id'],
'frameborder' => isset($options['frameborder']) ? $options['frameborder'] : 0,
'width' => $box->getWidth(),
'height' => $box->getHeight(),
);
return $params;
}
/**
* {@inheritdoc}
*/
public function getProviderMetadata()
{
return new Metadata($this->getName(), $this->getName().'.description', false, 'SonataMediaBundle', array('class' => 'fa fa-vimeo-square'));
}
/**
* @param MediaInterface $media
*/
protected function fixBinaryContent(MediaInterface $media)
{
if (!$media->getBinaryContent()) {
return;
}
if (preg_match("/vimeo\.com\/(video\/|)(\d+)/", $media->getBinaryContent(), $matches)) {
$media->setBinaryContent($matches[2]);
}
}
/**
* {@inheritdoc}
*/
protected function doTransform(MediaInterface $media)
{
$this->fixBinaryContent($media);
if (!$media->getBinaryContent()) {
return;
}
// store provider information
$media->setProviderName($this->name);
$media->setProviderReference($media->getBinaryContent());
$media->setProviderStatus(MediaInterface::STATUS_OK);
$this->updateMetadata($media, true);
}
/**
* {@inheritdoc}
*/
public function updateMetadata(MediaInterface $media, $force = false)
{
$url = sprintf('http://vimeo.com/api/oembed.json?url=http://vimeo.com/%s', $media->getProviderReference());
try {
$metadata = $this->getMetadata($media, $url);
} catch (\RuntimeException $e) {
$media->setEnabled(false);
$media->setProviderStatus(MediaInterface::STATUS_ERROR);
return;
}
// store provider information
$media->setProviderMetadata($metadata);
// update Media common fields from metadata
if ($force) {
$media->setName($metadata['title']);
$media->setDescription($metadata['description']);
$media->setAuthorName($metadata['author_name']);
}
$media->setHeight($metadata['height']);
$media->setWidth($metadata['width']);
$media->setLength($metadata['duration']);
$media->setContentType('video/x-flv');
}
/**
* {@inheritdoc}
*/
public function getDownloadResponse(MediaInterface $media, $format, $mode, array $headers = array())
{
return new RedirectResponse(sprintf('http://vimeo.com/%s', $media->getProviderReference()), 302, $headers);
}
}
| 1 | 7,412 | would use an empty string here as default and then check for emptiness in the twig template. I dont like mixing types @core23 what do you think? | sonata-project-SonataMediaBundle | php |
@@ -169,6 +169,8 @@ public class PhpSampleMethodToViewTransformer implements SampleMethodToViewTrans
builder.isResourceMap(fieldInfo.type().isMap());
builder.pageVarName(
symbolTable.getNewSymbol(namer.localVarName(Name.lowerCamel(fieldInfo.name()))));
+ builder.pageTokenName(methodInfo.requestPageTokenName());
+ builder.nextPageTokenName(Name.lowerCamel(methodInfo.responsePageTokenName()).toUpperCamel());
return builder.build();
}
} | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.discovery.transformer.php;
import com.google.api.codegen.discovery.config.FieldInfo;
import com.google.api.codegen.discovery.config.MethodInfo;
import com.google.api.codegen.discovery.config.SampleConfig;
import com.google.api.codegen.discovery.transformer.SampleMethodToViewTransformer;
import com.google.api.codegen.discovery.transformer.SampleNamer;
import com.google.api.codegen.discovery.transformer.SampleTransformerContext;
import com.google.api.codegen.discovery.transformer.SampleTypeTable;
import com.google.api.codegen.discovery.viewmodel.SampleAuthView;
import com.google.api.codegen.discovery.viewmodel.SampleFieldView;
import com.google.api.codegen.discovery.viewmodel.SamplePageStreamingView;
import com.google.api.codegen.discovery.viewmodel.SampleView;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.util.SymbolTable;
import com.google.api.codegen.util.php.PhpTypeTable;
import com.google.api.codegen.viewmodel.ViewModel;
import com.google.protobuf.Field.Cardinality;
import com.google.protobuf.Method;
import java.util.ArrayList;
import java.util.List;
public class PhpSampleMethodToViewTransformer implements SampleMethodToViewTransformer {
private static final String TEMPLATE_FILENAME = "php/sample.snip";
public PhpSampleMethodToViewTransformer() {}
@Override
public ViewModel transform(Method method, SampleConfig config) {
SampleTypeTable typeTable =
new SampleTypeTable(
new PhpTypeTable(""), new PhpSampleTypeNameConverter(config.apiTypeName()));
SampleNamer namer = new PhpSampleNamer();
SampleTransformerContext context =
SampleTransformerContext.create(config, typeTable, namer, method.getName());
return createSampleView(context);
}
private SampleView createSampleView(SampleTransformerContext context) {
SampleConfig config = context.getSampleConfig();
MethodInfo methodInfo = config.methods().get(context.getMethodName());
SampleNamer namer = context.getSampleNamer();
SampleTypeTable typeTable = context.getSampleTypeTable();
SymbolTable symbolTable = SymbolTable.fromSeed(PhpTypeTable.RESERVED_IDENTIFIER_SET);
SampleView.Builder builder = SampleView.newBuilder();
String serviceVarName = symbolTable.getNewSymbol(namer.getServiceVarName(config.apiTypeName()));
String serviceTypeName = typeTable.getServiceTypeName(config.apiTypeName()).getNickname();
if (methodInfo.isPageStreaming()) {
builder.pageStreaming(createSamplePageStreamingView(context, symbolTable));
}
// Created before the fields in case there are naming conflicts in the symbol table.
SampleAuthView sampleAuthView = createSampleAuthView(context);
List<SampleFieldView> fields = new ArrayList<>();
List<String> fieldVarNames = new ArrayList<>();
for (FieldInfo field : methodInfo.fields().values()) {
SampleFieldView sampleFieldView =
SampleFieldView.newBuilder()
.name(namer.localVarName(Name.lowerCamel(field.name())))
.defaultValue(typeTable.getZeroValueAndSaveNicknameFor(field.type()))
.example(field.example())
.description(field.description())
.build();
fields.add(sampleFieldView);
fieldVarNames.add(sampleFieldView.name());
}
boolean hasRequestBody = methodInfo.requestBodyType() != null;
if (hasRequestBody) {
String requestBodyVarName = symbolTable.getNewSymbol(namer.getRequestBodyVarName());
builder.requestBodyVarName(requestBodyVarName);
builder.requestBodyTypeName(
typeTable.getTypeName(methodInfo.requestBodyType()).getNickname());
fieldVarNames.add(requestBodyVarName);
}
boolean hasResponse = methodInfo.responseType() != null;
if (hasResponse) {
builder.responseVarName(symbolTable.getNewSymbol(namer.getResponseVarName()));
}
String optParamsVarName = "";
if (methodInfo.isPageStreaming() || methodInfo.hasMediaDownload()) {
optParamsVarName = namer.localVarName(Name.lowerCamel("optParams"));
fieldVarNames.add(optParamsVarName);
}
return builder
.templateFileName(TEMPLATE_FILENAME)
.outputPath(context.getMethodName() + ".frag.php")
.apiTitle(config.apiTitle())
.apiName(config.apiName())
.apiVersion(config.apiVersion())
.appName(namer.getSampleApplicationName(config.apiCanonicalName()))
.auth(sampleAuthView)
.serviceVarName(serviceVarName)
.serviceTypeName(serviceTypeName)
.methodVerb(methodInfo.verb())
.methodNameComponents(methodInfo.nameComponents())
.hasRequestBody(hasRequestBody)
.hasResponse(hasResponse)
.fields(fields)
.fieldVarNames(fieldVarNames)
.isPageStreaming(methodInfo.isPageStreaming())
.hasMediaUpload(methodInfo.hasMediaUpload())
.hasMediaDownload(methodInfo.hasMediaDownload())
.clientVarName(namer.localVarName(Name.lowerCamel("client")))
.optParamsVarName(optParamsVarName)
.build();
}
private SampleAuthView createSampleAuthView(SampleTransformerContext context) {
SampleConfig config = context.getSampleConfig();
MethodInfo methodInfo = config.methods().get(context.getMethodName());
return SampleAuthView.newBuilder()
.type(config.authType())
.instructionsUrl(config.authInstructionsUrl())
.scopes(methodInfo.authScopes())
.isScopesSingular(methodInfo.authScopes().size() == 1)
.build();
}
private SamplePageStreamingView createSamplePageStreamingView(
SampleTransformerContext context, SymbolTable symbolTable) {
MethodInfo methodInfo = context.getSampleConfig().methods().get(context.getMethodName());
FieldInfo fieldInfo = methodInfo.pageStreamingResourceField();
SampleNamer namer = context.getSampleNamer();
SamplePageStreamingView.Builder builder = SamplePageStreamingView.newBuilder();
builder.resourceFieldName(namer.publicFieldName(Name.lowerCamel(fieldInfo.name())));
if (fieldInfo.type().isMap()) {
// Assume that the value in the map is a message.
if (!fieldInfo.type().mapValue().isMessage()) {
throw new IllegalArgumentException("expected map value to be a message");
}
builder.resourceKeyVarName(
symbolTable.getNewSymbol(namer.localVarName(Name.lowerCamel("name"))));
String resourceValueVarName =
namer.localVarName(Name.upperCamel(fieldInfo.type().mapValue().message().typeName()));
builder.resourceValueVarName(symbolTable.getNewSymbol(resourceValueVarName));
} else {
String resourceVarName =
namer.getResourceVarName(
fieldInfo.type().isMessage() ? fieldInfo.type().message().typeName() : "");
builder.resourceVarName(symbolTable.getNewSymbol(resourceVarName));
}
builder.isResourceRepeated(fieldInfo.cardinality() == Cardinality.CARDINALITY_REPEATED);
builder.isResourceMap(fieldInfo.type().isMap());
builder.pageVarName(
symbolTable.getNewSymbol(namer.localVarName(Name.lowerCamel(fieldInfo.name()))));
return builder.build();
}
}
| 1 | 20,433 | How come we need case manipulation for the nextPageToken and not for pageToken? | googleapis-gapic-generator | java |
@@ -2,7 +2,8 @@ C2::Application.routes.draw do
ActiveAdmin.routes(self)
root :to => 'home#index'
get '/error' => 'home#error'
- get '/me' => 'home#me'
+ get '/me' => 'profile#show'
+ post '/me' => 'profile#update'
get '/feedback' => 'feedback#index'
get '/feedback/thanks' => 'feedback#thanks'
post '/feedback' => 'feedback#create' | 1 | C2::Application.routes.draw do
ActiveAdmin.routes(self)
root :to => 'home#index'
get '/error' => 'home#error'
get '/me' => 'home#me'
get '/feedback' => 'feedback#index'
get '/feedback/thanks' => 'feedback#thanks'
post '/feedback' => 'feedback#create'
match '/auth/:provider/callback' => 'auth#oauth_callback', via: [:get]
post '/logout' => 'auth#logout'
resources :help, only: [:index, :show]
# mandrill-rails
resource :inbox, controller: 'inbox', only: [:show, :create]
namespace :api do
scope :v1 do
namespace :ncr do
resources :work_orders, only: [:index]
end
resources :users, only: [:index]
end
end
resources :proposals, only: [:index, :show] do
member do
get 'approve' # this route has special protection to prevent the confused deputy problem
# if you are adding a new controller which performs an action, use post instead
post 'approve'
get 'cancel_form'
post 'cancel'
get 'history'
end
collection do
get 'archive'
get 'query'
end
resources :comments, only: :create
resources :attachments, only: [:create, :destroy, :show]
resources :observations, only: [:create, :destroy]
end
namespace :ncr do
resources :work_orders, except: [:index, :destroy]
get '/dashboard' => 'dashboard#index'
end
namespace :gsa18f do
resources :procurements, except: [:index, :destroy]
get '/dashboard' => 'dashboard#index'
end
mount Peek::Railtie => '/peek'
if Rails.env.development?
mount MailPreview => 'mail_view'
mount LetterOpenerWeb::Engine => 'letter_opener'
end
end
| 1 | 15,459 | why use these rather than regular named paths? | 18F-C2 | rb |
@@ -61,6 +61,9 @@ module Bolt
{ flags: OPTIONS[:global],
banner: GROUP_HELP }
end
+ when 'guide'
+ { flags: OPTIONS[:global] + %w[format],
+ banner: GUIDE_HELP }
when 'plan'
case action
when 'convert' | 1 | # frozen_string_literal: true
# Note this file includes very few 'requires' because it expects to be used from the CLI.
require 'optparse'
module Bolt
class BoltOptionParser < OptionParser
OPTIONS = { inventory: %w[targets query rerun description],
authentication: %w[user password password-prompt private-key host-key-check ssl ssl-verify],
escalation: %w[run-as sudo-password sudo-password-prompt sudo-executable],
run_context: %w[concurrency inventoryfile save-rerun cleanup],
global_config_setters: %w[modulepath project configfile],
transports: %w[transport connect-timeout tty native-ssh ssh-command copy-command],
display: %w[format color verbose trace],
global: %w[help version debug log-level] }.freeze
ACTION_OPTS = OPTIONS.values.flatten.freeze
def get_help_text(subcommand, action = nil)
case subcommand
when 'apply'
{ flags: ACTION_OPTS + %w[noop execute compile-concurrency hiera-config],
banner: APPLY_HELP }
when 'command'
case action
when 'run'
{ flags: ACTION_OPTS + %w[env-var],
banner: COMMAND_RUN_HELP }
else
{ flags: OPTIONS[:global],
banner: COMMAND_HELP }
end
when 'file'
case action
when 'upload'
{ flags: ACTION_OPTS + %w[tmpdir],
banner: FILE_UPLOAD_HELP }
when 'download'
{ flags: ACTION_OPTS,
banner: FILE_DOWNLOAD_HELP }
else
{ flags: OPTIONS[:global],
banner: FILE_HELP }
end
when 'inventory'
case action
when 'show'
{ flags: OPTIONS[:inventory] + OPTIONS[:global] + %w[format inventoryfile boltdir configfile detail],
banner: INVENTORY_SHOW_HELP }
else
{ flags: OPTIONS[:global],
banner: INVENTORY_HELP }
end
when 'group'
case action
when 'show'
{ flags: OPTIONS[:global] + %w[format inventoryfile boltdir configfile],
banner: GROUP_SHOW_HELP }
else
{ flags: OPTIONS[:global],
banner: GROUP_HELP }
end
when 'plan'
case action
when 'convert'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters],
banner: PLAN_CONVERT_HELP }
when 'new'
{ flags: OPTIONS[:global] + %w[configfile project],
banner: PLAN_NEW_HELP }
when 'run'
{ flags: ACTION_OPTS + %w[params compile-concurrency tmpdir hiera-config],
banner: PLAN_RUN_HELP }
when 'show'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[filter format],
banner: PLAN_SHOW_HELP }
else
{ flags: OPTIONS[:global],
banner: PLAN_HELP }
end
when 'project'
case action
when 'init'
{ flags: OPTIONS[:global] + %w[modules],
banner: PROJECT_INIT_HELP }
when 'migrate'
{ flags: OPTIONS[:global] + %w[inventoryfile boltdir configfile],
banner: PROJECT_MIGRATE_HELP }
else
{ flags: OPTIONS[:global],
banner: PROJECT_HELP }
end
when 'puppetfile'
case action
when 'install'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[puppetfile],
banner: PUPPETFILE_INSTALL_HELP }
when 'show-modules'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters],
banner: PUPPETFILE_SHOWMODULES_HELP }
when 'generate-types'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters],
banner: PUPPETFILE_GENERATETYPES_HELP }
else
{ flags: OPTIONS[:global],
banner: PUPPETFILE_HELP }
end
when 'script'
case action
when 'run'
{ flags: ACTION_OPTS + %w[tmpdir env-var],
banner: SCRIPT_RUN_HELP }
else
{ flags: OPTIONS[:global],
banner: SCRIPT_HELP }
end
when 'secret'
case action
when 'createkeys'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[plugin force],
banner: SECRET_CREATEKEYS_HELP }
when 'decrypt'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[plugin],
banner: SECRET_DECRYPT_HELP }
when 'encrypt'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[plugin],
banner: SECRET_ENCRYPT_HELP }
else
{ flags: OPTIONS[:global],
banner: SECRET_HELP }
end
when 'task'
case action
when 'run'
{ flags: ACTION_OPTS + %w[params tmpdir noop],
banner: TASK_RUN_HELP }
when 'show'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[filter format],
banner: TASK_SHOW_HELP }
else
{ flags: OPTIONS[:global],
banner: TASK_HELP }
end
else
{ flags: OPTIONS[:global],
banner: BANNER }
end
end
BANNER = <<~HELP
NAME
bolt
USAGE
bolt <subcommand> [action] [options]
DESCRIPTION
Bolt is an orchestration tool that automates the manual work it takes to
maintain your infrastructure.
SUBCOMMANDS
apply Apply Puppet manifest code
command Run a command remotely
file Copy files between the controller and targets
group Show the list of groups in the inventory
inventory Show the list of targets an action would run on
plan Convert, create, show, and run Bolt plans
project Create and migrate Bolt projects
puppetfile Install and list modules and generate type references
script Upload a local script and run it remotely
secret Create encryption keys and encrypt and decrypt values
task Show and run Bolt tasks
HELP
APPLY_HELP = <<~HELP
NAME
apply
USAGE
bolt apply [manifest.pp] [options]
DESCRIPTION
Apply Puppet manifest code on the specified targets.
EXAMPLES
bolt apply manifest.pp -t target
bolt apply -e "file { '/etc/puppetlabs': ensure => present }" -t target
HELP
COMMAND_HELP = <<~HELP
NAME
command
USAGE
bolt command <action> [options]
DESCRIPTION
Run a command on the specified targets.
ACTIONS
run Run a command on the specified targets.
HELP
COMMAND_RUN_HELP = <<~HELP
NAME
run
USAGE
bolt command run <command> [options]
DESCRIPTION
Run a command on the specified targets.
EXAMPLES
bolt command run 'uptime' -t target1,target2
HELP
FILE_HELP = <<~HELP
NAME
file
USAGE
bolt file <action> [options]
DESCRIPTION
Copy files and directories between the controller and targets
ACTIONS
download Download a file or directory to the controller
upload Upload a local file or directory from the controller
HELP
FILE_DOWNLOAD_HELP = <<~HELP
NAME
download
USAGE
bolt file download <src> <dest> [options]
DESCRIPTION
Download a file or directory from one or more targets.
Downloaded files and directories are saved to the a subdirectory
matching the target's name under the destination directory. The
destination directory is expanded relative to the downloads
subdirectory of the project directory.
EXAMPLES
bolt file download /etc/ssh_config ssh_config -t all
HELP
FILE_UPLOAD_HELP = <<~HELP
NAME
upload
USAGE
bolt file upload <src> <dest> [options]
DESCRIPTION
Upload a local file or directory.
EXAMPLES
bolt file upload /tmp/source /etc/profile.d/login.sh -t target1
HELP
GROUP_HELP = <<~HELP
NAME
group
USAGE
bolt group <action> [options]
DESCRIPTION
Show the list of groups in the inventory.
ACTIONS
show Show the list of groups in the inventory
HELP
GROUP_SHOW_HELP = <<~HELP
NAME
show
USAGE
bolt group show [options]
DESCRIPTION
Show the list of groups in the inventory.
HELP
INVENTORY_HELP = <<~HELP
NAME
inventory
USAGE
bolt inventory <action> [options]
DESCRIPTION
Show the list of targets an action would run on.
ACTIONS
show Show the list of targets an action would run on
HELP
INVENTORY_SHOW_HELP = <<~HELP
NAME
show
USAGE
bolt inventory show [options]
DESCRIPTION
Show the list of targets an action would run on.
HELP
PLAN_HELP = <<~HELP
NAME
plan
USAGE
bolt plan <action> [parameters] [options]
DESCRIPTION
Convert, create, show, and run Bolt plans.
ACTIONS
convert Convert a YAML plan to a Bolt plan
new Create a new plan in the current project
run Run a plan on the specified targets
show Show available plans and plan documentation
HELP
PLAN_CONVERT_HELP = <<~HELP
NAME
convert
USAGE
bolt plan convert <path> [options]
DESCRIPTION
Convert a YAML plan to a Bolt plan.
Converting a YAML plan may result in a plan that is syntactically
correct but has different behavior. Always verify a converted plan's
functionality.
EXAMPLES
bolt plan convert path/to/plan/myplan.yaml
HELP
PLAN_NEW_HELP = <<~HELP
NAME
new
USAGE
bolt plan new <plan> [options]
DESCRIPTION
Create a new plan in the current project.
EXAMPLES
bolt plan new myproject::myplan
HELP
PLAN_RUN_HELP = <<~HELP
NAME
run
USAGE
bolt plan run <plan> [parameters] [options]
DESCRIPTION
Run a plan on the specified targets.
EXAMPLES
bolt plan run canary --targets target1,target2 command=hostname
HELP
PLAN_SHOW_HELP = <<~HELP
NAME
show
USAGE
bolt plan show [plan] [options]
DESCRIPTION
Show available plans and plan documentation.
Omitting the name of a plan will display a list of plans available
in the Bolt project.
Providing the name of a plan will display detailed documentation for
the plan, including a list of available parameters.
EXAMPLES
Display a list of available tasks
bolt plan show
Display documentation for the canary task
bolt plan show aggregate::count
HELP
PROJECT_HELP = <<~HELP
NAME
project
USAGE
bolt project <action> [options]
DESCRIPTION
Create and migrate Bolt projects
ACTIONS
init Create a new Bolt project
migrate Migrate a Bolt project to the latest version
HELP
PROJECT_INIT_HELP = <<~HELP
NAME
init
USAGE
bolt project init [name] [options]
DESCRIPTION
Create a new Bolt project in the current working directory.
Specify a name for the Bolt project. Defaults to the basename of the current working directory.
EXAMPLES
Create a new Bolt project using the directory as the project name.
bolt project init
Create a new Bolt project with a specified name.
bolt project init myproject
Create a new Bolt project with existing modules.
bolt project init --modules puppetlabs-apt,puppetlabs-ntp
HELP
PROJECT_MIGRATE_HELP = <<~HELP
NAME
migrate
USAGE
bolt project migrate [options]
DESCRIPTION
Migrate a Bolt project to the latest version.
Loads a Bolt project's inventory file and migrates it to the latest version. The
inventory file is modified in place and will not preserve comments or formatting.
HELP
PUPPETFILE_HELP = <<~HELP
NAME
puppetfile
USAGE
bolt puppetfile <action> [options]
DESCRIPTION
Install and list modules and generate type references
ACTIONS
generate-types Generate type references to register in plans
install Install modules from a Puppetfile into a project
show-modules List modules available to the Bolt project
HELP
PUPPETFILE_GENERATETYPES_HELP = <<~HELP
NAME
generate-types
USAGE
bolt puppetfile generate-types [options]
DESCRIPTION
Generate type references to register in plans.
HELP
PUPPETFILE_INSTALL_HELP = <<~HELP
NAME
install
USAGE
bolt puppetfile install [options]
DESCRIPTION
Install modules from a Puppetfile into a project
HELP
PUPPETFILE_SHOWMODULES_HELP = <<~HELP
NAME
show-modules
USAGE
bolt puppetfile show-modules [options]
DESCRIPTION
List modules available to the Bolt project.
HELP
SCRIPT_HELP = <<~HELP
NAME
script
USAGE
bolt script <action> [options]
DESCRIPTION
Run a script on the specified targets.
ACTIONS
run Run a script on the specified targets.
HELP
SCRIPT_RUN_HELP = <<~HELP
NAME
run
USAGE
bolt script run <script> [arguments] [options]
DESCRIPTION
Run a script on the specified targets.
Arguments passed to a script are passed literally and are not interpolated
by the shell. Any arguments containing spaces or special characters should
be quoted.
EXAMPLES
bolt script run myscript.sh 'echo hello' --targets target1,target2
HELP
SECRET_HELP = <<~HELP
NAME
secret
USAGE
bolt secret <action> [options]
DESCRIPTION
Create encryption keys and encrypt and decrypt values.
ACTIONS
createkeys Create new encryption keys
encrypt Encrypt a value
decrypt Decrypt a value
HELP
SECRET_CREATEKEYS_HELP = <<~HELP
NAME
createkeys
USAGE
bolt secret createkeys [options]
DESCRIPTION
Create new encryption keys.
HELP
SECRET_DECRYPT_HELP = <<~HELP
NAME
decrypt
USAGE
bolt secret decrypt <ciphertext> [options]
DESCRIPTION
Decrypt a value.
HELP
SECRET_ENCRYPT_HELP = <<~HELP
NAME
encrypt
USAGE
bolt secret encrypt <plaintext> [options]
DESCRIPTION
Encrypt a value.
HELP
TASK_HELP = <<~HELP
NAME
task
USAGE
bolt task <action> [options]
DESCRIPTION
Show and run Bolt tasks.
ACTIONS
run Run a Bolt task
show Show available tasks and task documentation
HELP
TASK_RUN_HELP = <<~HELP
NAME
run
USAGE
bolt task run <task> [parameters] [options]
DESCRIPTION
Run a task on the specified targets.
Parameters take the form parameter=value.
EXAMPLES
bolt task run package --targets target1,target2 action=status name=bash
HELP
TASK_SHOW_HELP = <<~HELP
NAME
show
USAGE
bolt task show [task] [options]
DESCRIPTION
Show available tasks and task documentation.
Omitting the name of a task will display a list of tasks available
in the Bolt project.
Providing the name of a task will display detailed documentation for
the task, including a list of available parameters.
EXAMPLES
Display a list of available tasks
bolt task show
Display documentation for the canary task
bolt task show canary
HELP
attr_reader :deprecations
def initialize(options)
super()
@options = options
@deprecations = []
separator "\nINVENTORY OPTIONS"
define('-t', '--targets TARGETS',
'Identifies the targets of command.',
'Enter a comma-separated list of target URIs or group names.',
"Or read a target list from an input file '@<file>' or stdin '-'.",
'Example: --targets localhost,target_group,ssh://nix.com:23,winrm://windows.puppet.com',
'URI format is [protocol://]host[:port]',
"SSH is the default protocol; may be #{TRANSPORTS.keys.join(', ')}",
'For Windows targets, specify the winrm:// protocol if it has not be configured',
'For SSH, port defaults to `22`',
'For WinRM, port defaults to `5985` or `5986` based on the --[no-]ssl setting') do |targets|
@options[:targets] ||= []
@options[:targets] << get_arg_input(targets)
end
define('-q', '--query QUERY', 'Query PuppetDB to determine the targets') do |query|
@options[:query] = query
end
define('--rerun FILTER', 'Retry on targets from the last run',
"'all' all targets that were part of the last run.",
"'failure' targets that failed in the last run.",
"'success' targets that succeeded in the last run.") do |rerun|
@options[:rerun] = rerun
end
define('--noop', 'See what changes Bolt will make without actually executing the changes') do |_|
@options[:noop] = true
end
define('--description DESCRIPTION',
'Description to use for the job') do |description|
@options[:description] = description
end
define('--params PARAMETERS',
"Parameters to a task or plan as json, a json file '@<file>', or on stdin '-'") do |params|
@options[:task_options] = parse_params(params)
end
define('-e', '--execute CODE',
"Puppet manifest code to apply to the targets") do |code|
@options[:code] = code
end
define('--detail', 'Show resolved configuration for the targets') do |detail|
@options[:detail] = detail
end
separator "\nAUTHENTICATION OPTIONS"
define('-u', '--user USER', 'User to authenticate as') do |user|
@options[:user] = user
end
define('-p', '--password PASSWORD',
'Password to authenticate with') do |password|
@options[:password] = password
end
define('--password-prompt', 'Prompt for user to input password') do |_password|
$stderr.print "Please enter your password: "
@options[:password] = $stdin.noecho(&:gets).chomp
$stderr.puts
end
define('--private-key KEY', 'Path to private ssh key to authenticate with') do |key|
@options[:'private-key'] = File.expand_path(key)
end
define('--[no-]host-key-check', 'Check host keys with SSH') do |host_key_check|
@options[:'host-key-check'] = host_key_check
end
define('--[no-]ssl', 'Use SSL with WinRM') do |ssl|
@options[:ssl] = ssl
end
define('--[no-]ssl-verify', 'Verify remote host SSL certificate with WinRM') do |ssl_verify|
@options[:'ssl-verify'] = ssl_verify
end
separator "\nESCALATION OPTIONS"
define('--run-as USER', 'User to run as using privilege escalation') do |user|
@options[:'run-as'] = user
end
define('--sudo-password PASSWORD',
'Password for privilege escalation') do |password|
@options[:'sudo-password'] = password
end
define('--sudo-password-prompt', 'Prompt for user to input escalation password') do |_password|
$stderr.print "Please enter your privilege escalation password: "
@options[:'sudo-password'] = $stdin.noecho(&:gets).chomp
$stderr.puts
end
define('--sudo-executable EXEC', "Specify an executable for running as another user.",
"This option is experimental.") do |exec|
@options[:'sudo-executable'] = exec
end
separator "\nRUN CONTEXT OPTIONS"
define('-c', '--concurrency CONCURRENCY', Integer,
'Maximum number of simultaneous connections') do |concurrency|
@options[:concurrency] = concurrency
end
define('--compile-concurrency CONCURRENCY', Integer,
'Maximum number of simultaneous manifest block compiles (default: number of cores)') do |concurrency|
@options[:'compile-concurrency'] = concurrency
end
define('--[no-]cleanup',
'Whether to clean up temporary files created on targets') do |cleanup|
@options[:cleanup] = cleanup
end
define('-m', '--modulepath MODULES',
"List of directories containing modules, separated by '#{File::PATH_SEPARATOR}'",
'Directories are case-sensitive') do |modulepath|
# When specified from the CLI, modulepath entries are relative to pwd
@options[:modulepath] = modulepath.split(File::PATH_SEPARATOR).map do |moduledir|
File.expand_path(moduledir)
end
end
define('--project PATH', '--boltdir PATH',
'Specify what project to load config from (default: autodiscovered from current working dir)') do |path|
@options[:boltdir] = path
end
define('--configfile PATH',
'Specify where to load config from (default: ~/.puppetlabs/bolt/bolt.yaml).',
'Directory containing bolt.yaml will be used as the project directory.') do |path|
@options[:configfile] = path
end
define('--hiera-config PATH',
'Specify where to load Hiera config from (default: ~/.puppetlabs/bolt/hiera.yaml)') do |path|
@options[:'hiera-config'] = File.expand_path(path)
end
define('-i', '--inventoryfile PATH',
'Specify where to load inventory from (default: ~/.puppetlabs/bolt/inventory.yaml)') do |path|
if ENV.include?(Bolt::Inventory::ENVIRONMENT_VAR)
raise Bolt::CLIError, "Cannot pass inventory file when #{Bolt::Inventory::ENVIRONMENT_VAR} is set"
end
@options[:inventoryfile] = Pathname.new(File.expand_path(path))
end
define('--puppetfile PATH',
'Specify a Puppetfile to use when installing modules. (default: ~/.puppetlabs/bolt/Puppetfile)',
'Modules are installed in the current project.') do |path|
@options[:puppetfile_path] = Pathname.new(File.expand_path(path))
end
define('--[no-]save-rerun', 'Whether to update the rerun file after this command.') do |save|
@options[:'save-rerun'] = save
end
separator "\nREMOTE ENVIRONMENT OPTIONS"
define('--env-var ENVIRONMENT_VARIABLES', 'Environment variables to set on the target') do |envvar|
unless envvar.include?('=')
raise Bolt::CLIError, "Environment variables must be specified using 'myenvvar=key' format"
end
@options[:env_vars] ||= {}
@options[:env_vars].store(*envvar.split('=', 2))
end
separator "\nTRANSPORT OPTIONS"
define('--transport TRANSPORT', TRANSPORTS.keys.map(&:to_s),
"Specify a default transport: #{TRANSPORTS.keys.join(', ')}") do |t|
@options[:transport] = t
end
define('--[no-]native-ssh', 'Whether to shell out to native SSH or use the net-ssh Ruby library.',
'This option is experimental') do |bool|
@options[:'native-ssh'] = bool
end
define('--ssh-command EXEC', "Executable to use instead of the net-ssh Ruby library. ",
"This option is experimental.") do |exec|
@options[:'ssh-command'] = exec
end
define('--copy-command EXEC', "Command to copy files to remote hosts if using native SSH. ",
"This option is experimental.") do |exec|
@options[:'copy-command'] = exec
end
define('--connect-timeout TIMEOUT', Integer, 'Connection timeout (defaults vary)') do |timeout|
@options[:'connect-timeout'] = timeout
end
define('--[no-]tty', 'Request a pseudo TTY on targets that support it') do |tty|
@options[:tty] = tty
end
define('--tmpdir DIR', 'The directory to upload and execute temporary files on the target') do |tmpdir|
@options[:tmpdir] = tmpdir
end
separator "\nDISPLAY OPTIONS"
define('--filter FILTER', 'Filter tasks and plans by a matching substring') do |filter|
unless /^[a-z0-9_:]+$/.match(filter)
msg = "Illegal characters in filter string '#{filter}'. Filters must match a legal "\
"task or plan name."
raise Bolt::CLIError, msg
end
@options[:filter] = filter
end
define('--format FORMAT', 'Output format to use: human or json') do |format|
@options[:format] = format
end
define('--[no-]color', 'Whether to show output in color') do |color|
@options[:color] = color
end
define('-v', '--[no-]verbose', 'Display verbose logging') do |value|
@options[:verbose] = value
end
define('--trace', 'Display error stack traces') do |_|
@options[:trace] = true
end
separator "\nADDITIONAL OPTIONS"
define('--modules MODULES',
'A comma-separated list of modules to install from the Puppet Forge',
'when initializing a project. Resolves and installs all dependencies.') do |modules|
@options[:modules] = modules.split(',')
end
define('--force', 'Overwrite existing key pairs') do |_force|
@options[:force] = true
end
separator "\nGLOBAL OPTIONS"
define('-h', '--help', 'Display help') do |_|
@options[:help] = true
end
define('--version', 'Display the version') do |_|
puts Bolt::VERSION
raise Bolt::CLIExit
end
define('--debug', 'Display debug logging') do |_|
@options[:debug] = true
# We don't actually set '--log-level debug' here, but once the options are evaluated by
# the config class the end result is the same.
msg = "Command line option '--debug' is deprecated, set '--log-level debug' instead."
@deprecations << { type: 'Using --debug instead of --log-level debug', msg: msg }
end
define('--log-level LEVEL',
"Set the log level for the console. Available options are",
"debug, info, notice, warn, error, fatal, any.") do |level|
@options[:log] = { 'console' => { 'level' => level } }
end
define('--plugin PLUGIN', 'Select the plugin to use') do |plug|
@options[:plugin] = plug
end
end
def remove_excluded_opts(option_list)
# Remove any options that are not available for the specified subcommand
top.list.delete_if do |opt|
opt.respond_to?(:switch_name) && !option_list.include?(opt.switch_name)
end
# Remove any separators if all options of that type have been removed
top.list.delete_if do |opt|
i = top.list.index(opt)
opt.is_a?(String) && top.list[i + 1].is_a?(String)
end
end
def update
help_text = get_help_text(@options[:subcommand], @options[:action])
# Update the banner according to the subcommand
self.banner = help_text[:banner]
# Builds the option list for the specified subcommand and removes all excluded
# options from the help text
remove_excluded_opts(help_text[:flags])
end
def parse_params(params)
json = get_arg_input(params)
JSON.parse(json)
rescue JSON::ParserError => e
raise Bolt::CLIError, "Unable to parse --params value as JSON: #{e}"
end
def get_arg_input(value)
if value.start_with?('@')
file = value.sub(/^@/, '')
read_arg_file(file)
elsif value == '-'
$stdin.read
else
value
end
end
def read_arg_file(file)
File.read(File.expand_path(file))
rescue StandardError => e
raise Bolt::FileError.new("Error attempting to read #{file}: #{e}", file)
end
end
end
| 1 | 15,671 | Hm, I don't think the extra flags are doing any harm here, but it does seem like `--help` is the only flag you could *actually* use with this command. We might eventually want to separate those out. | puppetlabs-bolt | rb |
@@ -1,3 +1,18 @@
+# This script is responsible to create candidate sets for all users. The generated candidate sets
+# will be given as input to the recommender to assign ratings to the recordings in candidate sets.
+# The general flow is as follows:
+#
+# Last 7 days listens are filtered from mapped_listens_df and is called the mapped_listens_subset_df.
+# Top X artists are fetched for each user from the mapped_listens_subset_df. The top_artist_df is joined
+# with recordings_df to get the dataframe of recordings belonging to the top artists. From this dataframe,
+# recordings listened to by the users in the last 7 days are filtered so that the recommendations don't contain
+# recordings that the user has listened to in the last week. The resultant dataframe is called the top_artists_candidate_set_df.
+#
+# Artists similar to top artists are fetched from the artist_relations_df and the similar_artist_candidate_set_df is generated
+# in a manner similar to the generation of the top artist candidate set.
+#
+# The top artist and similar artist candidate set dataframes are saved to HDFS.
+
import os
import sys
import uuid | 1 | import os
import sys
import uuid
import logging
import time
from datetime import datetime
from collections import defaultdict
from py4j.protocol import Py4JJavaError
import listenbrainz_spark
from listenbrainz_spark import stats, utils, path
from listenbrainz_spark.recommendations.utils import save_html
from listenbrainz_spark.exceptions import (SparkSessionNotInitializedException,
ViewNotRegisteredException,
PathNotFoundException,
FileNotFetchedException,
TopArtistNotFetchedException,
SimilarArtistNotFetchedException)
from flask import current_app
import pyspark.sql.functions as func
from pyspark.sql.window import Window
from pyspark.sql.functions import col, row_number
# Some useful dataframe fields/columns.
# top_artist_df:
# [
# 'top_artist_credit_id',
# 'top_artist_name',
# 'user_name'
# ]
#
# top_artist_candidate_set_df:
# [
# 'user_id',
# 'recording_id'
# ]
#
# top_artist_candidate_set_df_html:
# [
# 'top_artist_credit_id',
# 'top_artist_name',
# 'mb_artist_credit_id',
# 'mb_artist_credit_mbids',
# 'mb_recording_mbid',
# 'msb_artist_credit_name_matchable',
# 'msb_recording_name_matchable',
# 'recording_id',
# 'user_name',
# 'user_id'
# ]
#
# similar_artist_df:
# [
# 'similar_artist_credit_id',
# 'similar_artist_name'
# 'user_name'
# ]
#
# similar_artist_candidate_set_df:
# [
# 'user_id',
# 'recording_id'
# ]
#
# similar_artist_candidate_set_df_html:
# [
# 'similar_artist_credit_id',
# 'similar_artist_name',
# 'mb_artist_credit_id',
# 'mb_artist_credit_mbids',
# 'mb_recording_mbid',
# 'msb_artist_credit_name_matchable',
# 'msb_recording_name_matchable',
# 'recording_id',
# 'user_name',
# 'user_id'
# ]
def get_dates_to_generate_candidate_sets(mapped_listens_df, recommendation_generation_window):
""" Get window to fetch listens to generate candidate sets.
Args:
mapped_listens_df (dataframe): listens mapped with msid_mbid_mapping. Refer to create_dataframe.py
for dataframe columns.
recommendation_generation_window (int): recommendations to be generated on history of given number of days.
Returns:
from_date (datetime): Date from which start fetching listens.
to_date (datetime): Date upto which fetch listens.
"""
# get timestamp of latest listen in HDFS
to_date = mapped_listens_df.select(func.max('listened_at').alias('listened_at')).collect()[0].listened_at
from_date = stats.offset_days(to_date, recommendation_generation_window).replace(hour=0, minute=0, second=0)
return from_date, to_date
def get_listens_to_fetch_top_artists(mapped_listens_df, from_date, to_date):
""" Get listens of past X days to fetch top artists where X = RECOMMENDATION_GENERATION_WINDOW.
Args:
mapped_listens_df (dataframe): listens mapped with msid_mbid_mapping.
from_date (datetime): Date from which start fetching listens.
to_date (datetime): Date upto which fetch listens.
Returns:
mapped_listens_subset (dataframe): A subset of mapped_listens_df containing user history.
"""
mapped_listens_subset = mapped_listens_df.filter(mapped_listens_df.listened_at.between(from_date, to_date))
return mapped_listens_subset
def _is_empty_dataframe(df):
""" Return True if the dataframe is empty, return False otherwise.
"""
try:
df.take(1)[0]
except IndexError:
return True
return False
def get_top_artists(mapped_listens_subset, top_artist_limit, users):
""" Get top artists listened to by users who have a listening history in
the past X days where X = RECOMMENDATION_GENERATION_WINDOW.
Args:
df (dataframe): A subset of mapped_listens_df containing user history.
top_artist_limit (int): number of top artist to calculate
users: list of users to generate candidate sets.
Returns:
if users is an empty list:
top_artist_df (dataframe): Top Y artists listened to by a user for all users where
Y = TOP_ARTISTS_LIMIT
else:
top_artist_given_users_df (dataframe): Top Y artists listened to by a user for given users where
Y = TOP_ARTISTS_LIMIT
"""
df = mapped_listens_subset.select('mb_artist_credit_id',
'msb_artist_credit_name_matchable',
'user_name') \
.groupBy('mb_artist_credit_id',
'msb_artist_credit_name_matchable',
'user_name') \
.agg(func.count('mb_artist_credit_id').alias('total_count'))
window = Window.partitionBy('user_name').orderBy(col('total_count').desc())
top_artist_df = df.withColumn('rank', row_number().over(window)) \
.where(col('rank') <= top_artist_limit) \
.select(col('mb_artist_credit_id').alias('top_artist_credit_id'),
col('msb_artist_credit_name_matchable').alias('top_artist_name'),
col('user_name'),
col('total_count'))
if users:
top_artist_given_users_df = top_artist_df.select('top_artist_credit_id',
'top_artist_name',
'user_name',
'total_count') \
.where(top_artist_df.user_name.isin(users))
if _is_empty_dataframe(top_artist_given_users_df):
current_app.logger.error('Top artists for {} not fetched'.format(users), exc_info=True)
raise TopArtistNotFetchedException('Users inactive or data missing from msid->mbid mapping')
return top_artist_given_users_df
if _is_empty_dataframe(top_artist_df):
current_app.logger.error('Top artists not fetched', exc_info=True)
raise TopArtistNotFetchedException('Users inactive or data missing from msid->mbid mapping')
return top_artist_df
def filter_top_artists_from_similar_artists(similar_artist_df, top_artist_df):
""" Filter artists from similar artist dataframe for a user who have already made it to
top artist dataframe.
Args:
similar_artist_df: Similar artist dataframe.
top_artist_df: Top artist dataframe.
Returns:
res_df: Similar artist dataframe that does not contain top artists.
"""
df = top_artist_df.select(col('top_artist_credit_id').alias('artist_credit_id'),
col('user_name').alias('user'))
condition = [
similar_artist_df.similar_artist_credit_id == df.artist_credit_id,
similar_artist_df.user_name == df.user
]
res_df = similar_artist_df.join(df, condition, 'left') \
.select('top_artist_credit_id',
'top_artist_name',
'similar_artist_credit_id',
'similar_artist_name',
'user_name') \
.where(col('artist_credit_id').isNull() & col('user').isNull())
return res_df
def get_similar_artists(top_artist_df, artist_relation_df, similar_artist_limit):
""" Get artists similar to top artists.
Args:
top_artist_df: Dataframe containing top artists listened to by users
artist_relation_df: Dataframe containing artists and similar artists.
For columns refer to artist_relation_schema in listenbrainz_spark/schema.py.
similar_artist_limit (int): number of similar artist to calculate
Returns:
similar_artist_df (dataframe): Top Z artists similar to top artists where
Z = SIMILAR_ARTISTS_LIMIT.
"""
condition = [top_artist_df.top_artist_credit_id == artist_relation_df.id_0]
df1 = top_artist_df.join(artist_relation_df, condition, 'inner') \
.select(col('id_0').alias('top_artist_credit_id'),
col('name_0').alias('top_artist_name'),
col('id_1').alias('similar_artist_credit_id'),
col('name_1').alias('similar_artist_name'),
'score',
'user_name')
condition = [top_artist_df.top_artist_credit_id == artist_relation_df.id_1]
df2 = top_artist_df.join(artist_relation_df, condition, 'inner') \
.select(col('id_1').alias('top_artist_credit_id'),
col('name_1').alias('top_artist_name'),
col('id_0').alias('similar_artist_credit_id'),
col('name_0').alias('similar_artist_name'),
'score',
'user_name')
df = df1.union(df2)
window = Window.partitionBy('top_artist_credit_id', 'user_name')\
.orderBy(col('score').desc())
similar_artist_df_html = df.withColumn('rank', row_number().over(window)) \
.where(col('rank') <= similar_artist_limit)\
.select('top_artist_credit_id',
'top_artist_name',
'similar_artist_credit_id',
'similar_artist_name',
'user_name')
similar_artist_df_html = filter_top_artists_from_similar_artists(similar_artist_df_html, top_artist_df)
# Two or more artists can have same similar artist(s) leading to non-unique recordings
# therefore we have filtered the distinct similar artists.
similar_artist_df = similar_artist_df_html.select('similar_artist_credit_id',
'similar_artist_name',
'user_name') \
.distinct()
if _is_empty_dataframe(similar_artist_df):
current_app.logger.error('Similar artists not generated.', exc_info=True)
raise SimilarArtistNotFetchedException('Artists missing from artist relation')
return similar_artist_df, similar_artist_df_html
def filter_last_x_days_recordings(candidate_set_df, mapped_listens_subset):
""" Filter recordings from candidate set that the user has listened in the last X
days where X = RECOMMENDATION_GENERATION_WINDOW.
Args:
candidate_set_df: top/similar artist candidate set.
mapped_listens_subset: dataframe containing user listening history of last X days.
Returns:
candidate_set without recordings of last X days of a user for all users.
"""
df = mapped_listens_subset.select(col('mb_recording_mbid').alias('recording_mbid'),
col('user_name').alias('user')) \
.distinct()
condition = [
candidate_set_df.mb_recording_mbid == df.recording_mbid,
candidate_set_df.user_name == df.user
]
filtered_df = candidate_set_df.join(df, condition, 'left') \
.select('*') \
.where(col('recording_mbid').isNull() & col('user').isNull())
return filtered_df.drop('recording_mbid', 'user')
def get_top_artist_candidate_set(top_artist_df, recordings_df, users_df, mapped_listens_subset):
""" Get recording ids that belong to top artists.
Args:
top_artist_df: Dataframe containing top artists listened to by users.
recordings_df: Dataframe containing distinct recordings and corresponding
mbids and names.
users_df: Dataframe containing user names and user ids.
mapped_listens_subset: dataframe containing user listening history of last X days .
Returns:
top_artist_candidate_set_df (dataframe): recording ids that belong to top artists
corresponding to user ids.
top_artists_candidate_set_df_html (dataframe): top artist info required for html file
"""
condition = [
top_artist_df.top_artist_credit_id == recordings_df.mb_artist_credit_id
]
df = top_artist_df.join(recordings_df, condition, 'inner')
joined_df = df.join(users_df, 'user_name', 'inner') \
.select('top_artist_credit_id',
'top_artist_name',
'mb_artist_credit_id',
'mb_artist_credit_mbids',
'mb_recording_mbid',
'msb_artist_credit_name_matchable',
'msb_recording_name_matchable',
'recording_id',
'user_name',
'user_id')
top_artist_candidate_set_df_html = filter_last_x_days_recordings(joined_df, mapped_listens_subset)
top_artist_candidate_set_df = top_artist_candidate_set_df_html.select('recording_id', 'user_id', 'user_name')
return top_artist_candidate_set_df, top_artist_candidate_set_df_html
def get_similar_artist_candidate_set(similar_artist_df, recordings_df, users_df, mapped_listens_subset):
""" Get recording ids that belong to similar artists.
Args:
similar_artist_df: Dataframe containing artists similar to top artists.
recordings_df: Dataframe containing distinct recordings and corresponding
mbids and names.
users_df: Dataframe containing user names and user ids.
mapped_listens_subset: dataframe containing user listening history of last X days .
Returns:
similar_artist_candidate_set_df (dataframe): recording ids that belong to similar artists
corresponding to user ids.
similar_artist_candidate_set_df_html (dataframe): similar artist info for html file
"""
condition = [
similar_artist_df.similar_artist_credit_id == recordings_df.mb_artist_credit_id
]
df = similar_artist_df.join(recordings_df, condition, 'inner')
joined_df = df.join(users_df, 'user_name', 'inner') \
.select('similar_artist_credit_id',
'similar_artist_name',
'mb_artist_credit_id',
'mb_artist_credit_mbids',
'mb_recording_mbid',
'msb_artist_credit_name_matchable',
'msb_recording_name_matchable',
'recording_id',
'user_name',
'user_id')
similar_artist_candidate_set_df_html = filter_last_x_days_recordings(joined_df, mapped_listens_subset)
similar_artist_candidate_set_df = similar_artist_candidate_set_df_html.select('recording_id', 'user_id', 'user_name')
return similar_artist_candidate_set_df, similar_artist_candidate_set_df_html
def save_candidate_sets(top_artist_candidate_set_df, similar_artist_candidate_set_df):
""" Save candidate sets to HDFS.
Args:
top_artist_candidate_set_df (dataframe): recording ids that belong to top artists
corresponding to user ids.
similar_artist_candidate_set_df (dataframe): recording ids that belong to similar artists
corresponding to user ids.
"""
try:
utils.save_parquet(top_artist_candidate_set_df, path.TOP_ARTIST_CANDIDATE_SET)
except FileNotSavedException as err:
current_app.logger.error(str(err), exc_info=True)
raise
try:
utils.save_parquet(similar_artist_candidate_set_df, path.SIMILAR_ARTIST_CANDIDATE_SET)
except FileNotSavedException as err:
current_app.logger.error(str(err), exc_info=True)
raise
def get_candidate_html_data(similar_artist_candidate_set_df_html, top_artist_candidate_set_df_html,
top_artist_df, similar_artist_df_html):
""" Get artists and recordings associated with users for HTML. The function is invoked
when candidate set HTML is to be generated.
Args:
similar_artist_candidate_set_df_html (dataframe): similar artists and related info.
top_artist_candidate_set_df_html (dataframe): top artists and related info.
top_artist_df (dataframe) : top artists listened to by users.
similar_artist_df_html (dataframe): similar artists and corresponding top artists
Returns:
user_data: Dictionary can be depicted as:
{
'user 1' : {
'top_artist': [],
'top_similar_artist': [],
'top_artist_candidate_set': [],
'top_similar_artist_candidate_set': []
}
.
.
.
}
"""
user_data = defaultdict(list)
for row in top_artist_df.collect():
if row.user_name not in user_data:
user_data[row.user_name] = defaultdict(list)
data = (
row.top_artist_name,
row.top_artist_credit_id,
row.total_count
)
user_data[row.user_name]['top_artist'].append(data)
for row in similar_artist_df_html.collect():
data = (
row.top_artist_name,
row.top_artist_credit_id,
row.similar_artist_name,
row.similar_artist_credit_id
)
user_data[row.user_name]['similar_artist'].append(data)
for row in top_artist_candidate_set_df_html.collect():
data = (
row.top_artist_credit_id,
row.top_artist_name,
row.mb_artist_credit_id,
row.mb_artist_credit_mbids,
row.mb_recording_mbid,
row.msb_artist_credit_name_matchable,
row.msb_recording_name_matchable,
row.recording_id
)
user_data[row.user_name]['top_artist_candidate_set'].append(data)
for row in similar_artist_candidate_set_df_html.collect():
data = (
row.similar_artist_credit_id,
row.similar_artist_name,
row.mb_artist_credit_id,
row.mb_artist_credit_mbids,
row.mb_recording_mbid,
row.msb_artist_credit_name_matchable,
row.msb_recording_name_matchable,
row.recording_id
)
user_data[row.user_name]['similar_artist_candidate_set'].append(data)
return user_data
def save_candidate_html(user_data, total_time, from_date, to_date):
""" Save user data to an HTML file.
Args:
user_data (dict): Top and similar artists associated to users.
total_time (str): time taken to generate candidate_sets
"""
date = datetime.utcnow().strftime('%Y-%m-%d')
candidate_html = 'Candidate-{}-{}.html'.format(uuid.uuid4(), date)
context = {
'user_data': user_data,
'total_time': total_time,
'from_date': from_date,
'to_date': to_date
}
save_html(candidate_html, context, 'candidate.html')
def main(recommendation_generation_window=None, top_artist_limit=None, similar_artist_limit=None,
users=None, html_flag=False):
time_initial = time.monotonic()
try:
listenbrainz_spark.init_spark_session('Candidate_set')
except SparkSessionNotInitializedException as err:
current_app.logger.error(str(err), exc_info=True)
raise
try:
mapped_listens_df = utils.read_files_from_HDFS(path.MAPPED_LISTENS)
recordings_df = utils.read_files_from_HDFS(path.RECORDINGS_DATAFRAME_PATH)
users_df = utils.read_files_from_HDFS(path.USERS_DATAFRAME_PATH)
artist_relation_df = utils.read_files_from_HDFS(path.SIMILAR_ARTIST_DATAFRAME_PATH)
except PathNotFoundException as err:
current_app.logger.error(str(err), exc_info=True)
raise
except FileNotFetchedException as err:
current_app.logger.error(str(err), exc_info=True)
raise
from_date, to_date = get_dates_to_generate_candidate_sets(mapped_listens_df, recommendation_generation_window)
current_app.logger.info('Fetching listens to get top artists...')
mapped_listens_subset = get_listens_to_fetch_top_artists(mapped_listens_df, from_date, to_date)
current_app.logger.info('Fetching top artists...')
top_artist_df = get_top_artists(mapped_listens_subset, top_artist_limit, users)
current_app.logger.info('Preparing top artists candidate set...')
top_artist_candidate_set_df, top_artist_candidate_set_df_html = get_top_artist_candidate_set(top_artist_df, recordings_df,
users_df, mapped_listens_subset)
current_app.logger.info('Fetching similar artists...')
similar_artist_df, similar_artist_df_html = get_similar_artists(top_artist_df, artist_relation_df, similar_artist_limit)
current_app.logger.info('Preparing similar artists candidate set...')
similar_artist_candidate_set_df, similar_artist_candidate_set_df_html = get_similar_artist_candidate_set(
similar_artist_df,
recordings_df,
users_df,
mapped_listens_subset)
current_app.logger.info('Saving candidate sets...')
save_candidate_sets(top_artist_candidate_set_df, similar_artist_candidate_set_df)
current_app.logger.info('Done!')
# time taken to generate candidate_sets
total_time = '{:.2f}'.format((time.monotonic() - time_initial) / 60)
if html_flag:
user_data = get_candidate_html_data(similar_artist_candidate_set_df_html, top_artist_candidate_set_df_html,
top_artist_df, similar_artist_df_html)
current_app.logger.info('Saving HTML...')
save_candidate_html(user_data, total_time, from_date, to_date)
current_app.logger.info('Done!')
message = [{
'type': 'cf_recording_candidate_sets',
'candidate_sets_upload_time': str(datetime.utcnow()),
'total_time': total_time,
'from_date': str(from_date),
'to_date': str(to_date)
}]
return message
| 1 | 17,125 | We should make this a docstring, so that editors are able to pick it up. | metabrainz-listenbrainz-server | py |
@@ -14,5 +14,13 @@ feature "User without a subscription views sample video" do
expect(current_path).to eq(video_path(video))
expect(page).to have_css("h1", text: video.name)
expect(page).not_to have_css(".locked-message")
+ expect_authed_to_access_event_fired_for(video)
+ end
+
+ def expect_authed_to_access_event_fired_for(video)
+ expect(analytics).to have_tracked("Authed to Access").with_properties(
+ video_name: video.name,
+ watchable_name: video.watchable_name,
+ )
end
end | 1 | require "rails_helper"
feature "User without a subscription views sample video" do
scenario "successfully" do
user = create(:user)
trail = create(:trail, :video)
video = trail.first_completeable
video.update accessible_without_subscription: true
visit trail_path(trail)
click_on "Start Course For Free"
expect(user.has_active_subscription?).to eq(false)
expect(current_path).to eq(video_path(video))
expect(page).to have_css("h1", text: video.name)
expect(page).not_to have_css(".locked-message")
end
end
| 1 | 16,271 | Put a comma after the last parameter of a multiline method call. | thoughtbot-upcase | rb |
@@ -150,10 +150,8 @@ public class SalesforceDroidGapActivity extends CordovaActivity {
webSettings.setDomStorageEnabled(true);
String cachePath = getApplicationContext().getCacheDir().getAbsolutePath();
webSettings.setAppCachePath(cachePath);
- webSettings.setAppCacheMaxSize(1024 * 1024 * 8);
webSettings.setAppCacheEnabled(true);
webSettings.setAllowFileAccess(true);
- webSettings.setSavePassword(false);
webSettings.setCacheMode(WebSettings.LOAD_DEFAULT);
EventsObservable.get().notifyEvent(EventType.GapWebViewCreateComplete, appView);
} | 1 | /*
* Copyright (c) 2011-12, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.ui.sfhybrid;
import java.net.URI;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.cordova.CallbackContext;
import org.apache.cordova.CordovaActivity;
import org.apache.cordova.CordovaWebView;
import org.apache.cordova.CordovaWebViewClient;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.apache.http.message.BasicNameValuePair;
import org.json.JSONObject;
import android.content.IntentFilter;
import android.os.Bundle;
import android.os.SystemClock;
import android.util.Log;
import android.view.View;
import android.webkit.CookieManager;
import android.webkit.CookieSyncManager;
import android.webkit.WebSettings;
import android.webkit.WebView;
import android.webkit.WebViewClient;
import com.salesforce.androidsdk.accounts.UserAccountManager;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.auth.HttpAccess.NoNetworkException;
import com.salesforce.androidsdk.rest.ApiVersionStrings;
import com.salesforce.androidsdk.rest.BootConfig;
import com.salesforce.androidsdk.rest.ClientManager;
import com.salesforce.androidsdk.rest.ClientManager.AccountInfoNotFoundException;
import com.salesforce.androidsdk.rest.ClientManager.RestClientCallback;
import com.salesforce.androidsdk.rest.RestClient;
import com.salesforce.androidsdk.rest.RestClient.AsyncRequestCallback;
import com.salesforce.androidsdk.rest.RestClient.ClientInfo;
import com.salesforce.androidsdk.rest.RestRequest;
import com.salesforce.androidsdk.rest.RestResponse;
import com.salesforce.androidsdk.security.PasscodeManager;
import com.salesforce.androidsdk.util.EventsObservable;
import com.salesforce.androidsdk.util.EventsObservable.EventType;
import com.salesforce.androidsdk.util.UserSwitchReceiver;
/**
* Class that defines the main activity for a PhoneGap-based application.
*/
public class SalesforceDroidGapActivity extends CordovaActivity {
// Keys in credentials map
private static final String USER_AGENT = "userAgent";
private static final String INSTANCE_URL = "instanceUrl";
private static final String LOGIN_URL = "loginUrl";
private static final String IDENTITY_URL = "identityUrl";
private static final String CLIENT_ID = "clientId";
private static final String ORG_ID = "orgId";
private static final String USER_ID = "userId";
private static final String REFRESH_TOKEN = "refreshToken";
private static final String ACCESS_TOKEN = "accessToken";
private static final String COMMUNITY_ID = "communityId";
private static final String COMMUNITY_URL = "communityUrl";
// Used in refresh REST call
private static final String API_VERSION = ApiVersionStrings.VERSION_NUMBER;
// Rest client
private RestClient client;
private ClientManager clientManager;
// Config
private BootConfig bootconfig;
private PasscodeManager passcodeManager;
private UserSwitchReceiver userSwitchReceiver;
// Web app loaded?
private boolean webAppLoaded = false;
/** Called when the activity is first created. */
@Override
public void onCreate(Bundle savedInstanceState) {
Log.i("SalesforceDroidGapActivity.onCreate", "onCreate called");
super.onCreate(savedInstanceState);
init();
// Get bootconfig
bootconfig = BootConfig.getBootConfig(this);
// Get clientManager
clientManager = buildClientManager();
// Passcode manager
passcodeManager = SalesforceSDKManager.getInstance().getPasscodeManager();
userSwitchReceiver = new DroidGapUserSwitchReceiver();
registerReceiver(userSwitchReceiver, new IntentFilter(UserAccountManager.USER_SWITCH_INTENT_ACTION));
// Ensure we have a CookieSyncManager
CookieSyncManager.createInstance(this);
// Let observers know
EventsObservable.get().notifyEvent(EventType.MainActivityCreateComplete, this);
}
protected ClientManager buildClientManager() {
return new ClientManager(this, SalesforceSDKManager.getInstance().getAccountType(),
SalesforceSDKManager.getInstance().getLoginOptions(),
SalesforceSDKManager.getInstance().shouldLogoutWhenTokenRevoked());
}
@Override
public void init()
{
Log.i("SalesforceDroidGapActivity.init", "init called");
super.init();
final String uaStr = SalesforceSDKManager.getInstance().getUserAgent();
if (null != this.appView) {
WebSettings webSettings = this.appView.getSettings();
String origUserAgent = webSettings.getUserAgentString();
final String extendedUserAgentString = uaStr + " Hybrid " + (origUserAgent == null ? "" : origUserAgent);
webSettings.setUserAgentString(extendedUserAgentString);
// Configure HTML5 cache support.
webSettings.setDomStorageEnabled(true);
String cachePath = getApplicationContext().getCacheDir().getAbsolutePath();
webSettings.setAppCachePath(cachePath);
webSettings.setAppCacheMaxSize(1024 * 1024 * 8);
webSettings.setAppCacheEnabled(true);
webSettings.setAllowFileAccess(true);
webSettings.setSavePassword(false);
webSettings.setCacheMode(WebSettings.LOAD_DEFAULT);
EventsObservable.get().notifyEvent(EventType.GapWebViewCreateComplete, appView);
}
}
@Override
protected CordovaWebViewClient makeWebViewClient(CordovaWebView webView) {
return new SalesforceIceCreamWebViewClient(this, webView);
}
@Override
public void onResume() {
super.onResume();
if (passcodeManager.onResume(this)) {
// Get client (if already logged in)
try {
client = clientManager.peekRestClient();
} catch (AccountInfoNotFoundException e) {
client = null;
}
// Not logged in
if (client == null) {
onResumeNotLoggedIn();
}
// Logged in
else {
// Web app never loaded
if (!webAppLoaded) {
onResumeLoggedInNotLoaded();
}
// Web app already loaded
else {
Log.i("SalesforceDroidGapActivity.onResume", "Already logged in / web app already loaded");
}
CookieSyncManager.getInstance().startSync();
}
}
}
/**
* Restarts the activity if the user has been switched.
*/
private void restartIfUserSwitched() {
if (client != null) {
try {
RestClient currentClient = clientManager.peekRestClient();
if (currentClient != null && !currentClient.getClientInfo().userId.equals(client.getClientInfo().userId)) {
this.recreate();
}
} catch (AccountInfoNotFoundException e) {
Log.i("SalesforceDroidGapActivity.restartIfUserSwitched", "No user account found");
}
}
}
/**
* Called when resuming activity and user is not authenticated
*/
private void onResumeNotLoggedIn() {
// Need to be authenticated
if (bootconfig.shouldAuthenticate()) {
// Online
if (SalesforceSDKManager.getInstance().hasNetwork()) {
Log.i("SalesforceDroidGapActivity.onResumeNotLoggedIn", "Should authenticate / online - authenticating");
authenticate(null);
}
// Offline
else {
Log.w("SalesforceDroidGapActivity.onResumeNotLoggedIn", "Should authenticate / offline - cannot proceed");
loadErrorPage();
}
}
// Does not need to be authenticated
else {
// Local
if (bootconfig.isLocal()) {
Log.i("SalesforceDroidGapActivity.onResumeNotLoggedIn", "Should not authenticate / local start page - loading web app");
loadLocalStartPage();
}
// Remote
else {
Log.w("SalesforceDroidGapActivity.onResumeNotLoggedIn", "Should not authenticate / remote start page - cannot proceed");
loadErrorPage();
}
}
}
/**
* Called when resuming activity and user is authenticated but webview has not been loaded yet
*/
private void onResumeLoggedInNotLoaded() {
// Local
if (bootconfig.isLocal()) {
Log.i("SalesforceDroidGapActivity.onResumeLoggedInNotLoaded", "Local start page - loading web app");
loadLocalStartPage();
}
// Remote
else {
// Online
if (SalesforceSDKManager.getInstance().hasNetwork()) {
Log.i("SalesforceDroidGapActivity.onResumeLoggedInNotLoaded", "Remote start page / online - loading web app");
loadRemoteStartPage();
}
// Offline
else {
// Has cached version
if (SalesforceWebViewClientHelper.hasCachedAppHome(this)) {
Log.i("SalesforceDroidGapActivity.onResumeLoggedInNotLoaded", "Remote start page / offline / cached - loading cached web app");
loadCachedStartPage();
}
// No cached version
else {
Log.w("SalesforceDroidGapActivity.onResumeLoggedInNotLoaded", "Remote start page / offline / not cached - cannot proceed");
loadErrorPage();
}
}
}
}
@Override
public void onPause() {
super.onPause();
passcodeManager.onPause(this);
CookieSyncManager.getInstance().stopSync();
}
@Override
public void onDestroy() {
unregisterReceiver(userSwitchReceiver);
super.onDestroy();
}
@Override
public void onUserInteraction() {
passcodeManager.recordUserInteraction();
}
public BootConfig getBootConfig() {
return bootconfig;
}
/**
* Get a RestClient and refresh the auth token
* @param callbackContext when not null credentials/errors are sent through to callbackContext.success()/error()
*/
public void authenticate(final CallbackContext callbackContext) {
Log.i("SalesforceDroidGapActivity.authenticate", "authenticate called");
clientManager.getRestClient(this, new RestClientCallback() {
@Override
public void authenticatedRestClient(RestClient client) {
if (client == null) {
Log.i("SalesforceDroidGapActivity.authenticate", "authenticatedRestClient called with null client");
SalesforceSDKManager.getInstance().logout(SalesforceDroidGapActivity.this);
} else {
Log.i("SalesforceDroidGapActivity.authenticate", "authenticatedRestClient called with actual client");
SalesforceDroidGapActivity.this.client = client;
/*
* Do a cheap REST call to refresh the access token if needed.
* If the login took place a while back (e.g. the already logged
* in application was restarted), then the returned session ID
* (access token) might be stale. This is not an issue if one
* uses exclusively RestClient for calling the server because
* it takes care of refreshing the access token when needed,
* but a stale session ID will cause the WebView to redirect
* to the web login.
*/
SalesforceDroidGapActivity.this.client.sendAsync(RestRequest.getRequestForResources(API_VERSION), new AsyncRequestCallback() {
@Override
public void onSuccess(RestRequest request, RestResponse response) {
/*
* The client instance being used here needs to be
* refreshed, to ensure we use the new access token.
*/
SalesforceDroidGapActivity.this.client = SalesforceDroidGapActivity.this.clientManager.peekRestClient();
setSidCookies();
loadVFPingPage();
if (callbackContext != null) {
callbackContext.success(getJSONCredentials());
}
}
@Override
public void onError(Exception exception) {
if (callbackContext != null) {
callbackContext.error(exception.getMessage());
}
}
});
}
}
});
}
/**
* If an action causes a redirect to the login page, this method will be called.
* It causes the session to be refreshed and reloads url through the front door.
* @param url the page to load once the session has been refreshed.
*/
public void refresh(final String url) {
Log.i("SalesforceDroidGapActivity.refresh", "refresh called");
client.sendAsync(RestRequest.getRequestForResources(API_VERSION), new AsyncRequestCallback() {
@Override
public void onSuccess(RestRequest request, RestResponse response) {
Log.i("SalesforceDroidGapActivity.refresh", "Refresh succeeded");
/*
* The client instance being used here needs to be
* refreshed, to ensure we use the new access token.
*/
SalesforceDroidGapActivity.this.client = SalesforceDroidGapActivity.this.clientManager.peekRestClient();
setSidCookies();
loadVFPingPage();
final String frontDoorUrl = getFrontDoorUrl(url, true);
loadUrl(frontDoorUrl);
}
@Override
public void onError(Exception exception) {
Log.w("SalesforceDroidGapActivity.refresh", "Refresh failed - " + exception);
// Only logout if we are NOT offline
if (!(exception instanceof NoNetworkException)) {
SalesforceSDKManager.getInstance().logout(SalesforceDroidGapActivity.this);
}
}
});
}
/**
* Loads the VF ping page and sets cookies.
*/
private void loadVFPingPage() {
if (!bootconfig.isLocal()) {
final ClientInfo clientInfo = SalesforceDroidGapActivity.this.client.getClientInfo();
URI instanceUrl = null;
if (clientInfo != null) {
instanceUrl = clientInfo.getInstanceUrl();
}
setVFCookies(instanceUrl);
}
}
/**
* Sets VF domain cookies by loading the VF ping page on an invisible WebView.
*
* @param instanceUrl Instance URL.
*/
private static void setVFCookies(URI instanceUrl) {
if (instanceUrl != null) {
final WebView view = new WebView(SalesforceSDKManager.getInstance().getAppContext());
view.setVisibility(View.GONE);
view.setWebViewClient(new WebViewClient() {
@Override
public boolean shouldOverrideUrlLoading(WebView view, String url) {
final CookieSyncManager cookieSyncMgr = CookieSyncManager.getInstance();
final CookieManager cookieMgr = CookieManager.getInstance();
cookieMgr.setAcceptCookie(true);
cookieSyncMgr.sync();
return true;
}
});
view.loadUrl(instanceUrl.toString() + "/visualforce/session?url=/apexpages/utils/ping.apexp&autoPrefixVFDomain=true");
}
}
/**
* Load local start page
*/
public void loadLocalStartPage() {
assert bootconfig.isLocal();
String startPage = bootconfig.getStartPage();
Log.i("SalesforceDroidGapActivity.loadLocalStartPage", "loading: " + startPage);
loadUrl("file:///android_asset/www/" + startPage);
webAppLoaded = true;
}
/**
* Load remote start page (front-doored)
*/
public void loadRemoteStartPage() {
assert !bootconfig.isLocal();
String startPage = bootconfig.getStartPage();
Log.i("SalesforceDroidGapActivity.loadRemoteStartPage", "loading: " + startPage);
String url = getFrontDoorUrl(startPage, false);
loadUrl(url);
webAppLoaded = true;
}
/**
* Returns the front-doored URL of a URL passed in.
*
* @param url URL to be front-doored.
* @param isAbsUrl True - if the URL should be used as is, False - otherwise.
* @return Front-doored URL.
*/
public String getFrontDoorUrl(String url, boolean isAbsUrl) {
String frontDoorUrl = client.getClientInfo().getInstanceUrlAsString() + "/secur/frontdoor.jsp?";
List<NameValuePair> params = new LinkedList<NameValuePair>();
params.add(new BasicNameValuePair("sid", client.getAuthToken()));
/*
* We need to use the absolute URL in some cases and relative URL in some
* other cases, because of differences between instance URL and community
* URL. Community URL can be custom and the logic of determining which
* URL to use is in the 'resolveUrl' method in 'ClientInfo'.
*/
url = (isAbsUrl ? url : client.getClientInfo().resolveUrl(url).toString());
params.add(new BasicNameValuePair("retURL", url));
params.add(new BasicNameValuePair("display", "touch"));
frontDoorUrl += URLEncodedUtils.format(params, "UTF-8");
return frontDoorUrl;
}
/**
* Load cached start page
*/
private void loadCachedStartPage() {
String url = SalesforceWebViewClientHelper.getAppHomeUrl(this);
loadUrl(url);
webAppLoaded = true;
}
/**
* Load error page
*/
public void loadErrorPage() {
Log.i("SalesforceDroidGapActivity.getErrorPageUrl", "getErrorPageUrl called");
String errorPage = bootconfig.getErrorPage();
Log.i("SalesforceDroidGapActivity.getErrorPageUrl", "local error page: " + errorPage);
loadUrl("file:///android_asset/www/" + errorPage);
}
/**
* Set cookies on cookie manager
* @param client
*/
private void setSidCookies() {
Log.i("SalesforceDroidGapActivity.setSidCookies", "setting cookies");
CookieSyncManager cookieSyncMgr = CookieSyncManager.getInstance();
CookieManager cookieMgr = CookieManager.getInstance();
cookieMgr.setAcceptCookie(true); // Required to set additional cookies that the auth process will return.
cookieMgr.removeSessionCookie();
SystemClock.sleep(250); // removeSessionCookies kicks out a thread - let it finish
String accessToken = client.getAuthToken();
addSidCookieForInstance(cookieMgr,".salesforce.com", accessToken);
cookieSyncMgr.sync();
}
private void addSidCookieForInstance(CookieManager cookieMgr, String domain, String sid) {
final ClientInfo clientInfo = SalesforceDroidGapActivity.this.client.getClientInfo();
URI instanceUrl = null;
if (clientInfo != null) {
instanceUrl = clientInfo.getInstanceUrl();
}
String host = null;
if (instanceUrl != null) {
host = instanceUrl.getHost();
}
if (host != null) {
addSidCookieForDomain(cookieMgr, host, sid);
}
}
private void addSidCookieForDomain(CookieManager cookieMgr, String domain, String sid) {
String cookieStr = "sid=" + sid;
cookieMgr.setCookie(domain, cookieStr);
}
/**
* @return credentials as JSONObject
*/
public JSONObject getJSONCredentials() {
if (client != null) {
ClientInfo clientInfo = client.getClientInfo();
Map<String, String> data = new HashMap<String, String>();
data.put(ACCESS_TOKEN, client.getAuthToken());
data.put(REFRESH_TOKEN, client.getRefreshToken());
data.put(USER_ID, clientInfo.userId);
data.put(ORG_ID, clientInfo.orgId);
data.put(CLIENT_ID, clientInfo.clientId);
data.put(LOGIN_URL, clientInfo.loginUrl.toString());
data.put(IDENTITY_URL, clientInfo.identityUrl.toString());
data.put(INSTANCE_URL, clientInfo.instanceUrl.toString());
data.put(USER_AGENT, SalesforceSDKManager.getInstance().getUserAgent());
data.put(COMMUNITY_ID, clientInfo.communityId);
data.put(COMMUNITY_URL, clientInfo.communityUrl);
return new JSONObject(data);
} else {
return null;
}
}
/**
* Exception thrown if initial web page load fails.
*/
public static class HybridAppLoadException extends RuntimeException {
public HybridAppLoadException(String msg) {
super(msg);
}
private static final long serialVersionUID = 1L;
}
/**
* Acts on the user switch event.
*
* @author bhariharan
*/
private class DroidGapUserSwitchReceiver extends UserSwitchReceiver {
@Override
protected void onUserSwitch() {
restartIfUserSwitched();
}
}
}
| 1 | 14,490 | App cache size is now managed dynamically by the `WebView`. This statement has no effect in the new framework. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -1,4 +1,6 @@
-var nn = node.nodeName.toLowerCase();
-return (
- node.hasAttribute('alt') && (nn === 'img' || nn === 'input' || nn === 'area')
-);
+const { nodeName } = virtualNode.props;
+if (['img', 'input', 'area'].includes(nodeName) === false) {
+ return false;
+}
+
+return typeof virtualNode.attr('alt') === 'string'; | 1 | var nn = node.nodeName.toLowerCase();
return (
node.hasAttribute('alt') && (nn === 'img' || nn === 'input' || nn === 'area')
);
| 1 | 15,124 | VirtualNode has a `hasAttr` function, any reason why you're not using it? | dequelabs-axe-core | js |
@@ -69,7 +69,6 @@ public class HttpAccess {
* Initializes HttpAccess. Should be called from the application.
*/
public static void init(Context app) {
- assert DEFAULT == null : "HttpAccess.init should be called once per process";
DEFAULT = new HttpAccess(app, null /* user agent will be calculated at request time */);
}
| 1 | /*
* Copyright (c) 2011-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.auth;
import android.content.Context;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import java.io.IOException;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
import okhttp3.ConnectionSpec;
import okhttp3.Interceptor;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.Response;
import okhttp3.TlsVersion;
/**
* Generic HTTP Access layer - used internally by {@link com.salesforce.androidsdk.rest.RestClient}
* and {@link OAuth2}. This class watches network changes as well.
*/
public class HttpAccess {
// Timeouts.
public static final int CONNECT_TIMEOUT = 60;
public static final int READ_TIMEOUT = 20;
// User agent header name.
private static final String USER_AGENT = "User-Agent";
private String userAgent;
private OkHttpClient okHttpClient;
// Connection manager.
private final ConnectivityManager conMgr;
// Singleton instance.
public static HttpAccess DEFAULT;
/**
* Initializes HttpAccess. Should be called from the application.
*/
public static void init(Context app) {
assert DEFAULT == null : "HttpAccess.init should be called once per process";
DEFAULT = new HttpAccess(app, null /* user agent will be calculated at request time */);
}
/**
* Parameterized constructor.
*
* @param app Reference to the application.
* @param userAgent The user agent to be used with requests.
*/
public HttpAccess(Context app, String userAgent) {
this.userAgent = userAgent;
// Only null in tests.
if (app == null) {
conMgr = null;
} else {
// Gets the connectivity manager and current network type.
conMgr = (ConnectivityManager) app.getSystemService(Context.CONNECTIVITY_SERVICE);
}
}
/**
*
* @return okHttpClient.Builder with appropriate connection spec and user agent interceptor
*/
public OkHttpClient.Builder getOkHttpClientBuilder() {
ConnectionSpec connectionSpec = new ConnectionSpec.Builder(ConnectionSpec.MODERN_TLS)
.tlsVersions(TlsVersion.TLS_1_1, TlsVersion.TLS_1_2)
.build();
OkHttpClient.Builder builder = new OkHttpClient.Builder()
.connectionSpecs(Collections.singletonList(connectionSpec))
.connectTimeout(CONNECT_TIMEOUT, TimeUnit.SECONDS)
.readTimeout(READ_TIMEOUT, TimeUnit.SECONDS)
.addNetworkInterceptor(new UserAgentInterceptor());
return builder;
}
/**
*
* @return okHttpClient tied to this HttpAccess - builds one if needed
*/
public synchronized OkHttpClient getOkHttpClient() {
if (okHttpClient == null) {
okHttpClient = getOkHttpClientBuilder().build();
}
return okHttpClient;
}
/**
* Returns the status of network connectivity.
*
* @return True - if network connectivity is available, False - otherwise.
*/
public synchronized boolean hasNetwork() {
boolean isConnected = true;
if (conMgr != null) {
final NetworkInfo activeInfo = conMgr.getActiveNetworkInfo();
if (activeInfo == null || !activeInfo.isConnected()) {
isConnected = false;
}
}
return isConnected;
}
/**
* Returns the current user agent.
*
* @return User agent.
*/
public String getUserAgent() {
return userAgent;
}
/**
* Exception thrown if the device is offline, during an attempted HTTP call.
*/
public static class NoNetworkException extends IOException {
private static final long serialVersionUID = 1L;
public NoNetworkException(String msg) {
super(msg);
}
}
/**
* Interceptor that adds user agent header
*/
public static class UserAgentInterceptor implements Interceptor {
private String userAgent;
public UserAgentInterceptor() {
// User this constructor to have the user agent computed for each call
}
public UserAgentInterceptor(String userAgent) {
this.userAgent = userAgent;
}
@Override
public Response intercept(Chain chain) throws IOException {
Request originalRequest = chain.request();
Request requestWithUserAgent = originalRequest.newBuilder()
.header(HttpAccess.USER_AGENT, userAgent == null ? SalesforceSDKManager.getInstance().getUserAgent() : userAgent)
.build();
return chain.proceed(requestWithUserAgent);
}
}
}
| 1 | 17,992 | This now throws when running tests (maybe the move to java 11??). Do we want to keep it? | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -45,8 +45,8 @@ func EncodeSha1(str string) string {
}
func ShortSha(sha1 string) string {
- if len(sha1) == 40 {
- return sha1[:10]
+ if len(sha1) > 7 {
+ return sha1[:7]
}
return sha1
} | 1 | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package base
import (
"crypto/hmac"
"crypto/md5"
"crypto/rand"
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"fmt"
"hash"
"html/template"
"math"
"net/http"
"strings"
"time"
"unicode"
"unicode/utf8"
"github.com/Unknwon/com"
"github.com/Unknwon/i18n"
"github.com/gogits/chardet"
"github.com/gogits/gogs/modules/log"
"github.com/gogits/gogs/modules/setting"
)
// EncodeMD5 encodes string to md5 hex value.
func EncodeMD5(str string) string {
m := md5.New()
m.Write([]byte(str))
return hex.EncodeToString(m.Sum(nil))
}
// Encode string to sha1 hex value.
func EncodeSha1(str string) string {
h := sha1.New()
h.Write([]byte(str))
return hex.EncodeToString(h.Sum(nil))
}
func ShortSha(sha1 string) string {
if len(sha1) == 40 {
return sha1[:10]
}
return sha1
}
func DetectEncoding(content []byte) (string, error) {
if utf8.Valid(content) {
log.Debug("Detected encoding: utf-8 (fast)")
return "UTF-8", nil
}
result, err := chardet.NewTextDetector().DetectBest(content)
if result.Charset != "UTF-8" && len(setting.Repository.AnsiCharset) > 0 {
log.Debug("Using default AnsiCharset: %s", setting.Repository.AnsiCharset)
return setting.Repository.AnsiCharset, err
}
log.Debug("Detected encoding: %s", result.Charset)
return result.Charset, err
}
func BasicAuthDecode(encoded string) (string, string, error) {
s, err := base64.StdEncoding.DecodeString(encoded)
if err != nil {
return "", "", err
}
auth := strings.SplitN(string(s), ":", 2)
return auth[0], auth[1], nil
}
func BasicAuthEncode(username, password string) string {
return base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
}
// GetRandomString generate random string by specify chars.
func GetRandomString(n int, alphabets ...byte) string {
const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
var bytes = make([]byte, n)
rand.Read(bytes)
for i, b := range bytes {
if len(alphabets) == 0 {
bytes[i] = alphanum[b%byte(len(alphanum))]
} else {
bytes[i] = alphabets[b%byte(len(alphabets))]
}
}
return string(bytes)
}
// http://code.google.com/p/go/source/browse/pbkdf2/pbkdf2.go?repo=crypto
// FIXME: use https://godoc.org/golang.org/x/crypto/pbkdf2?
func PBKDF2(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
prf := hmac.New(h, password)
hashLen := prf.Size()
numBlocks := (keyLen + hashLen - 1) / hashLen
var buf [4]byte
dk := make([]byte, 0, numBlocks*hashLen)
U := make([]byte, hashLen)
for block := 1; block <= numBlocks; block++ {
// N.B.: || means concatenation, ^ means XOR
// for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
// U_1 = PRF(password, salt || uint(i))
prf.Reset()
prf.Write(salt)
buf[0] = byte(block >> 24)
buf[1] = byte(block >> 16)
buf[2] = byte(block >> 8)
buf[3] = byte(block)
prf.Write(buf[:4])
dk = prf.Sum(dk)
T := dk[len(dk)-hashLen:]
copy(U, T)
// U_n = PRF(password, U_(n-1))
for n := 2; n <= iter; n++ {
prf.Reset()
prf.Write(U)
U = U[:0]
U = prf.Sum(U)
for x := range U {
T[x] ^= U[x]
}
}
}
return dk[:keyLen]
}
// verify time limit code
func VerifyTimeLimitCode(data string, minutes int, code string) bool {
if len(code) <= 18 {
return false
}
// split code
start := code[:12]
lives := code[12:18]
if d, err := com.StrTo(lives).Int(); err == nil {
minutes = d
}
// right active code
retCode := CreateTimeLimitCode(data, minutes, start)
if retCode == code && minutes > 0 {
// check time is expired or not
before, _ := time.ParseInLocation("200601021504", start, time.Local)
now := time.Now()
if before.Add(time.Minute*time.Duration(minutes)).Unix() > now.Unix() {
return true
}
}
return false
}
const TimeLimitCodeLength = 12 + 6 + 40
// create a time limit code
// code format: 12 length date time string + 6 minutes string + 40 sha1 encoded string
func CreateTimeLimitCode(data string, minutes int, startInf interface{}) string {
format := "200601021504"
var start, end time.Time
var startStr, endStr string
if startInf == nil {
// Use now time create code
start = time.Now()
startStr = start.Format(format)
} else {
// use start string create code
startStr = startInf.(string)
start, _ = time.ParseInLocation(format, startStr, time.Local)
startStr = start.Format(format)
}
end = start.Add(time.Minute * time.Duration(minutes))
endStr = end.Format(format)
// create sha1 encode string
sh := sha1.New()
sh.Write([]byte(data + setting.SecretKey + startStr + endStr + com.ToStr(minutes)))
encoded := hex.EncodeToString(sh.Sum(nil))
code := fmt.Sprintf("%s%06d%s", startStr, minutes, encoded)
return code
}
// HashEmail hashes email address to MD5 string.
// https://en.gravatar.com/site/implement/hash/
func HashEmail(email string) string {
email = strings.ToLower(strings.TrimSpace(email))
h := md5.New()
h.Write([]byte(email))
return hex.EncodeToString(h.Sum(nil))
}
// AvatarLink returns avatar link by given email.
func AvatarLink(email string) string {
if setting.DisableGravatar || setting.OfflineMode {
return setting.AppSubUrl + "/img/avatar_default.png"
}
return setting.GravatarSource + HashEmail(email)
}
// Seconds-based time units
const (
Minute = 60
Hour = 60 * Minute
Day = 24 * Hour
Week = 7 * Day
Month = 30 * Day
Year = 12 * Month
)
func computeTimeDiff(diff int64) (int64, string) {
diffStr := ""
switch {
case diff <= 0:
diff = 0
diffStr = "now"
case diff < 2:
diff = 0
diffStr = "1 second"
case diff < 1*Minute:
diffStr = fmt.Sprintf("%d seconds", diff)
diff = 0
case diff < 2*Minute:
diff -= 1 * Minute
diffStr = "1 minute"
case diff < 1*Hour:
diffStr = fmt.Sprintf("%d minutes", diff/Minute)
diff -= diff / Minute * Minute
case diff < 2*Hour:
diff -= 1 * Hour
diffStr = "1 hour"
case diff < 1*Day:
diffStr = fmt.Sprintf("%d hours", diff/Hour)
diff -= diff / Hour * Hour
case diff < 2*Day:
diff -= 1 * Day
diffStr = "1 day"
case diff < 1*Week:
diffStr = fmt.Sprintf("%d days", diff/Day)
diff -= diff / Day * Day
case diff < 2*Week:
diff -= 1 * Week
diffStr = "1 week"
case diff < 1*Month:
diffStr = fmt.Sprintf("%d weeks", diff/Week)
diff -= diff / Week * Week
case diff < 2*Month:
diff -= 1 * Month
diffStr = "1 month"
case diff < 1*Year:
diffStr = fmt.Sprintf("%d months", diff/Month)
diff -= diff / Month * Month
case diff < 2*Year:
diff -= 1 * Year
diffStr = "1 year"
default:
diffStr = fmt.Sprintf("%d years", diff/Year)
diff = 0
}
return diff, diffStr
}
// TimeSincePro calculates the time interval and generate full user-friendly string.
func TimeSincePro(then time.Time) string {
now := time.Now()
diff := now.Unix() - then.Unix()
if then.After(now) {
return "future"
}
var timeStr, diffStr string
for {
if diff == 0 {
break
}
diff, diffStr = computeTimeDiff(diff)
timeStr += ", " + diffStr
}
return strings.TrimPrefix(timeStr, ", ")
}
func timeSince(then time.Time, lang string) string {
now := time.Now()
lbl := i18n.Tr(lang, "tool.ago")
diff := now.Unix() - then.Unix()
if then.After(now) {
lbl = i18n.Tr(lang, "tool.from_now")
diff = then.Unix() - now.Unix()
}
switch {
case diff <= 0:
return i18n.Tr(lang, "tool.now")
case diff <= 2:
return i18n.Tr(lang, "tool.1s", lbl)
case diff < 1*Minute:
return i18n.Tr(lang, "tool.seconds", diff, lbl)
case diff < 2*Minute:
return i18n.Tr(lang, "tool.1m", lbl)
case diff < 1*Hour:
return i18n.Tr(lang, "tool.minutes", diff/Minute, lbl)
case diff < 2*Hour:
return i18n.Tr(lang, "tool.1h", lbl)
case diff < 1*Day:
return i18n.Tr(lang, "tool.hours", diff/Hour, lbl)
case diff < 2*Day:
return i18n.Tr(lang, "tool.1d", lbl)
case diff < 1*Week:
return i18n.Tr(lang, "tool.days", diff/Day, lbl)
case diff < 2*Week:
return i18n.Tr(lang, "tool.1w", lbl)
case diff < 1*Month:
return i18n.Tr(lang, "tool.weeks", diff/Week, lbl)
case diff < 2*Month:
return i18n.Tr(lang, "tool.1mon", lbl)
case diff < 1*Year:
return i18n.Tr(lang, "tool.months", diff/Month, lbl)
case diff < 2*Year:
return i18n.Tr(lang, "tool.1y", lbl)
default:
return i18n.Tr(lang, "tool.years", diff/Year, lbl)
}
}
func RawTimeSince(t time.Time, lang string) string {
return timeSince(t, lang)
}
// TimeSince calculates the time interval and generate user-friendly string.
func TimeSince(t time.Time, lang string) template.HTML {
return template.HTML(fmt.Sprintf(`<span class="time-since" title="%s">%s</span>`, t.Format(setting.TimeFormat), timeSince(t, lang)))
}
const (
Byte = 1
KByte = Byte * 1024
MByte = KByte * 1024
GByte = MByte * 1024
TByte = GByte * 1024
PByte = TByte * 1024
EByte = PByte * 1024
)
var bytesSizeTable = map[string]uint64{
"b": Byte,
"kb": KByte,
"mb": MByte,
"gb": GByte,
"tb": TByte,
"pb": PByte,
"eb": EByte,
}
func logn(n, b float64) float64 {
return math.Log(n) / math.Log(b)
}
func humanateBytes(s uint64, base float64, sizes []string) string {
if s < 10 {
return fmt.Sprintf("%dB", s)
}
e := math.Floor(logn(float64(s), base))
suffix := sizes[int(e)]
val := float64(s) / math.Pow(base, math.Floor(e))
f := "%.0f"
if val < 10 {
f = "%.1f"
}
return fmt.Sprintf(f+"%s", val, suffix)
}
// FileSize calculates the file size and generate user-friendly string.
func FileSize(s int64) string {
sizes := []string{"B", "KB", "MB", "GB", "TB", "PB", "EB"}
return humanateBytes(uint64(s), 1024, sizes)
}
// Subtract deals with subtraction of all types of number.
func Subtract(left interface{}, right interface{}) interface{} {
var rleft, rright int64
var fleft, fright float64
var isInt bool = true
switch left.(type) {
case int:
rleft = int64(left.(int))
case int8:
rleft = int64(left.(int8))
case int16:
rleft = int64(left.(int16))
case int32:
rleft = int64(left.(int32))
case int64:
rleft = left.(int64)
case float32:
fleft = float64(left.(float32))
isInt = false
case float64:
fleft = left.(float64)
isInt = false
}
switch right.(type) {
case int:
rright = int64(right.(int))
case int8:
rright = int64(right.(int8))
case int16:
rright = int64(right.(int16))
case int32:
rright = int64(right.(int32))
case int64:
rright = right.(int64)
case float32:
fright = float64(left.(float32))
isInt = false
case float64:
fleft = left.(float64)
isInt = false
}
if isInt {
return rleft - rright
} else {
return fleft + float64(rleft) - (fright + float64(rright))
}
}
// EllipsisString returns a truncated short string,
// it appends '...' in the end of the length of string is too large.
func EllipsisString(str string, length int) string {
if len(str) < length {
return str
}
return str[:length-3] + "..."
}
// TruncateString returns a truncated string with given limit,
// it returns input string if length is not reached limit.
func TruncateString(str string, limit int) string {
if len(str) < limit {
return str
}
return str[:limit]
}
// StringsToInt64s converts a slice of string to a slice of int64.
func StringsToInt64s(strs []string) []int64 {
ints := make([]int64, len(strs))
for i := range strs {
ints[i] = com.StrTo(strs[i]).MustInt64()
}
return ints
}
// Int64sToStrings converts a slice of int64 to a slice of string.
func Int64sToStrings(ints []int64) []string {
strs := make([]string, len(ints))
for i := range ints {
strs[i] = com.ToStr(ints[i])
}
return strs
}
// Int64sToMap converts a slice of int64 to a int64 map.
func Int64sToMap(ints []int64) map[int64]bool {
m := make(map[int64]bool)
for _, i := range ints {
m[i] = true
}
return m
}
// IsLetter reports whether the rune is a letter (category L).
// https://github.com/golang/go/blob/master/src/go/scanner/scanner.go#L257
func IsLetter(ch rune) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
}
func IsTextFile(data []byte) (string, bool) {
contentType := http.DetectContentType(data)
if strings.Index(contentType, "text/") != -1 {
return contentType, true
}
return contentType, false
}
func IsImageFile(data []byte) (string, bool) {
contentType := http.DetectContentType(data)
if strings.Index(contentType, "image/") != -1 {
return contentType, true
}
return contentType, false
}
func IsPDFFile(data []byte) (string, bool) {
contentType := http.DetectContentType(data)
if strings.Index(contentType, "application/pdf") != -1 {
return contentType, true
}
return contentType, false
}
| 1 | 11,761 | We can allow 7-char SHA, does not mean we want to show with 7-char in default, please change to `if len() > 10`, then cut. | gogs-gogs | go |
@@ -66,8 +66,10 @@ en_US.strings = {
loading: 'Loading...',
logOut: 'Log out',
myDevice: 'My Device',
+ noDuplicates: 'Cannot add the duplicate file %{fileName}, it already exists',
noFilesFound: 'You have no files or folders here',
noInternetConnection: 'No Internet connection',
+ noNewAlreadyUploading: 'Cannot add new files: already uploading',
openFolderNamed: 'Open folder %{name}',
pause: 'Pause',
pauseUpload: 'Pause upload', | 1 | const en_US = {}
en_US.strings = {
addBulkFilesFailed: {
'0': 'Failed to add %{smart_count} file due to an internal error',
'1': 'Failed to add %{smart_count} files due to internal errors'
},
addMore: 'Add more',
addMoreFiles: 'Add more files',
addingMoreFiles: 'Adding more files',
allowAccessDescription: 'In order to take pictures or record video with your camera, please allow camera access for this site.',
allowAccessTitle: 'Please allow access to your camera',
authenticateWith: 'Connect to %{pluginName}',
authenticateWithTitle: 'Please authenticate with %{pluginName} to select files',
back: 'Back',
browse: 'browse',
cancel: 'Cancel',
cancelUpload: 'Cancel upload',
chooseFiles: 'Choose files',
closeModal: 'Close Modal',
companionAuthError: 'Authorization required',
companionError: 'Connection with Companion failed',
companionUnauthorizeHint: 'To unauthorize to your %{provider} account, please go to %{url}',
complete: 'Complete',
connectedToInternet: 'Connected to the Internet',
copyLink: 'Copy link',
copyLinkToClipboardFallback: 'Copy the URL below',
copyLinkToClipboardSuccess: 'Link copied to clipboard',
creatingAssembly: 'Preparing upload...',
creatingAssemblyFailed: 'Transloadit: Could not create Assembly',
dashboardTitle: 'File Uploader',
dashboardWindowTitle: 'File Uploader Window (Press escape to close)',
dataUploadedOfTotal: '%{complete} of %{total}',
done: 'Done',
dropHereOr: 'Drop files here or %{browse}',
dropHint: 'Drop your files here',
dropPaste: 'Drop files here, paste or %{browse}',
dropPasteImport: 'Drop files here, paste, %{browse} or import from',
edit: 'Edit',
editFile: 'Edit file',
editing: 'Editing %{file}',
emptyFolderAdded: 'No files were added from empty folder',
encoding: 'Encoding...',
enterCorrectUrl: 'Incorrect URL: Please make sure you are entering a direct link to a file',
enterUrlToImport: 'Enter URL to import a file',
exceedsSize: 'This file exceeds maximum allowed size of',
failedToFetch: 'Companion failed to fetch this URL, please make sure it’s correct',
failedToUpload: 'Failed to upload %{file}',
fileSource: 'File source: %{name}',
filesUploadedOfTotal: {
'0': '%{complete} of %{smart_count} file uploaded',
'1': '%{complete} of %{smart_count} files uploaded',
'2': '%{complete} of %{smart_count} files uploaded'
},
filter: 'Filter',
finishEditingFile: 'Finish editing file',
folderAdded: {
'0': 'Added %{smart_count} file from %{folder}',
'1': 'Added %{smart_count} files from %{folder}',
'2': 'Added %{smart_count} files from %{folder}'
},
generatingThumbnails: 'Generating thumbnails...',
import: 'Import',
importFrom: 'Import from %{name}',
link: 'Link',
loading: 'Loading...',
logOut: 'Log out',
myDevice: 'My Device',
noFilesFound: 'You have no files or folders here',
noInternetConnection: 'No Internet connection',
openFolderNamed: 'Open folder %{name}',
pause: 'Pause',
pauseUpload: 'Pause upload',
paused: 'Paused',
poweredBy: 'Powered by',
preparingUpload: 'Preparing upload...',
processingXFiles: {
'0': 'Processing %{smart_count} file',
'1': 'Processing %{smart_count} files',
'2': 'Processing %{smart_count} files'
},
recordingLength: 'Recording length %{recording_length}',
removeFile: 'Remove file',
resetFilter: 'Reset filter',
resume: 'Resume',
resumeUpload: 'Resume upload',
retry: 'Retry',
retryUpload: 'Retry upload',
saveChanges: 'Save changes',
selectAllFilesFromFolderNamed: 'Select all files from folder %{name}',
selectFileNamed: 'Select file %{name}',
selectX: {
'0': 'Select %{smart_count}',
'1': 'Select %{smart_count}',
'2': 'Select %{smart_count}'
},
smile: 'Smile!',
startRecording: 'Begin video recording',
stopRecording: 'Stop video recording',
takePicture: 'Take a picture',
timedOut: 'Upload stalled for %{seconds} seconds, aborting.',
unselectAllFilesFromFolderNamed: 'Unselect all files from folder %{name}',
unselectFileNamed: 'Unselect file %{name}',
upload: 'Upload',
uploadComplete: 'Upload complete',
uploadFailed: 'Upload failed',
uploadPaused: 'Upload paused',
uploadXFiles: {
'0': 'Upload %{smart_count} file',
'1': 'Upload %{smart_count} files',
'2': 'Upload %{smart_count} files'
},
uploadXNewFiles: {
'0': 'Upload +%{smart_count} file',
'1': 'Upload +%{smart_count} files',
'2': 'Upload +%{smart_count} files'
},
uploading: 'Uploading',
uploadingXFiles: {
'0': 'Uploading %{smart_count} file',
'1': 'Uploading %{smart_count} files',
'2': 'Uploading %{smart_count} files'
},
xFilesSelected: {
'0': '%{smart_count} file selected',
'1': '%{smart_count} files selected',
'2': '%{smart_count} files selected'
},
xMoreFilesAdded: {
'0': '%{smart_count} more file added',
'1': '%{smart_count} more files added',
'2': '%{smart_count} more files added'
},
xTimeLeft: '%{time} left',
youCanOnlyUploadFileTypes: 'You can only upload: %{types}',
youCanOnlyUploadX: {
'0': 'You can only upload %{smart_count} file',
'1': 'You can only upload %{smart_count} files',
'2': 'You can only upload %{smart_count} files'
},
youHaveToAtLeastSelectX: {
'0': 'You have to select at least %{smart_count} file',
'1': 'You have to select at least %{smart_count} files',
'2': 'You have to select at least %{smart_count} files'
}
}
en_US.pluralize = function (n) {
if (n === 1) {
return 0
}
return 1
}
if (typeof window !== 'undefined' && typeof window.Uppy !== 'undefined') {
window.Uppy.locales.en_US = en_US
}
module.exports = en_US
| 1 | 12,780 | this one should also have quotes i guess :) | transloadit-uppy | js |
@@ -106,12 +106,14 @@ func TestBuilderForYAML(t *testing.T) {
"Test 2": {fakeInvalidK8sResource, "", true},
}
for name, mock := range tests {
+ name := name // pin it
+ mock := mock // pin it
t.Run(name, func(t *testing.T) {
b := BuilderForYaml(mock.resourceYAML)
if mock.expectError && len(b.errs) == 0 {
t.Fatalf("Test %s failed, expected err but got 0", name)
- } else if b.unstruct.object.GetName() != mock.expectedName {
- t.Fatalf("Test %s failed, expected %v but got %v", name, mock.expectedName, b.unstruct.object.GetName())
+ } else if b.unstruct.Object.GetName() != mock.expectedName {
+ t.Fatalf("Test %s failed, expected %v but got %v", name, mock.expectedName, b.unstruct.Object.GetName())
}
})
} | 1 | // Copyright © 2018-2019 The OpenEBS Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha2
import (
"testing"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
const (
fakeK8sResource = `
apiVersion: v1
kind: Service
metadata:
labels:
app: icstcee
name: icstcee
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: icstcee
type: LoadBalancer
`
fakeInvalidK8sResource = `
apiVersion: v1
kind: Service
metadata
labels
app: icstcee
name: icstcee
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: icstcee
type: LoadBalancer
`
)
func fakePodObject() *unstructured.Unstructured {
n := &unstructured.Unstructured{}
n.SetKind("Pod")
n.SetName("fake pod")
return n
}
func fakeDeploymentObject() *unstructured.Unstructured {
n := &unstructured.Unstructured{}
n.SetKind("Deployment")
n.SetName("fake deployment")
return n
}
func fakeServiceObject() *unstructured.Unstructured {
n := &unstructured.Unstructured{}
n.SetKind("Service")
n.SetName("fake service")
return n
}
func fakeNamespaceObject() *unstructured.Unstructured {
n := &unstructured.Unstructured{}
n.SetKind("Namespace")
n.SetName("fake namespace")
return n
}
func fakeUnstructObject(name string) *unstructured.Unstructured {
u := &unstructured.Unstructured{}
u.SetName(name)
return u
}
func fakeK8sResourceList(resource string, count int) string {
result := ""
for i := 0; i < count; i++ {
result += resource + "---"
}
return result
}
func TestBuilderForYAML(t *testing.T) {
tests := map[string]struct {
resourceYAML, expectedName string
expectError bool
}{
"Test 1": {fakeK8sResource, "icstcee", false},
"Test 2": {fakeInvalidK8sResource, "", true},
}
for name, mock := range tests {
t.Run(name, func(t *testing.T) {
b := BuilderForYaml(mock.resourceYAML)
if mock.expectError && len(b.errs) == 0 {
t.Fatalf("Test %s failed, expected err but got 0", name)
} else if b.unstruct.object.GetName() != mock.expectedName {
t.Fatalf("Test %s failed, expected %v but got %v", name, mock.expectedName, b.unstruct.object.GetName())
}
})
}
}
func TestBuilderForObject(t *testing.T) {
tests := map[string]struct {
resourceName, expectedName string
}{
"Test 1": {"icstcee", "icstcee"},
"Test 2": {"icstcee1", "icstcee1"},
}
for name, mock := range tests {
t.Run(name, func(t *testing.T) {
mockObj := fakeUnstructObject(mock.resourceName)
b := BuilderForObject(mockObj)
if b.unstruct.object.GetName() != mock.expectedName {
t.Fatalf("Test %s failed, expected %v but got %v", name, mock.expectedName, b.unstruct.object.GetName())
}
})
}
}
func TestBuilderForYamlBuild(t *testing.T) {
tests := map[string]struct {
resourceYAML, expectedName string
expectError bool
}{
"Test 1": {fakeK8sResource, "icstcee", false},
"Test 2": {fakeInvalidK8sResource, "", true},
}
for name, mock := range tests {
t.Run(name, func(t *testing.T) {
b, err := BuilderForYaml(mock.resourceYAML).Build()
if mock.expectError && err == nil {
t.Fatalf("Test %s failed, expected err but got nil", name)
} else if b != nil && b.object.GetName() != mock.expectedName {
t.Fatalf("Test %s failed, expected %v but got %v", name, mock.expectedName, b.object.GetName())
}
})
}
}
func TestListBuilderForYamls(t *testing.T) {
tests := map[string]struct {
resourceYAML string
expectedResourceCount int
expectErr bool
}{
"Test 1": {fakeK8sResourceList(fakeK8sResource, 1), 1, false},
"Test 2": {fakeK8sResourceList(fakeK8sResource, 2), 2, false},
"Test 3": {fakeK8sResourceList(fakeInvalidK8sResource, 1), 0, true},
"Test 4": {fakeK8sResourceList(fakeInvalidK8sResource, 2), 0, true},
}
for name, mock := range tests {
t.Run(name, func(t *testing.T) {
lb := ListBuilderForYamls(mock.resourceYAML)
if mock.expectErr && len(lb.errs) == 0 {
t.Fatalf("Test %s failed, expected err but got nil", name)
} else if len(lb.list.items) != mock.expectedResourceCount {
t.Fatalf("Test %s failed, expected resource count %v but got %v", name, mock.expectedResourceCount, len(lb.list.items))
}
})
}
}
func TestListUnstructBuilderForObjects(t *testing.T) {
tests := map[string]struct {
availableResources []*unstructured.Unstructured
expectedResourceCount int
expectErr bool
}{
"Test 1": {[]*unstructured.Unstructured{fakePodObject()}, 1, false},
"Test 2": {[]*unstructured.Unstructured{fakePodObject(), fakeDeploymentObject()}, 2, false},
"Test 3": {[]*unstructured.Unstructured{fakePodObject(), fakeDeploymentObject(), fakeServiceObject()}, 3, false},
"Test 4": {[]*unstructured.Unstructured{fakePodObject(), fakeDeploymentObject(), fakeServiceObject(), fakeNamespaceObject()}, 4, false},
}
for name, mock := range tests {
t.Run(name, func(t *testing.T) {
lb := ListBuilderForObjects(mock.availableResources...)
if mock.expectErr && len(lb.errs) == 0 {
t.Fatalf("Test %s failed, expected err but got nil", name)
} else if len(lb.list.items) != mock.expectedResourceCount {
t.Fatalf("Test %s failed, expected resource count %v but got %v", name, mock.expectedResourceCount, len(lb.list.items))
}
})
}
}
| 1 | 15,527 | Using the variable on range scope `mock` in function literal (from `scopelint`) | openebs-maya | go |
@@ -1020,7 +1020,7 @@ public class Datasets extends AbstractApiBean {
PublishDatasetResult res = execCommand(new PublishDatasetCommand(ds,
createDataverseRequest(user),
isMinor));
- return res.isCompleted() ? ok(json(res.getDataset())) : accepted(json(res.getDataset()));
+ return res.isWorkflow() ? accepted(json(res.getDataset())) : ok(json(res.getDataset()));
}
} catch (WrappedResponse ex) {
return ex.getResponse(); | 1 | package edu.harvard.iq.dataverse.api;
import edu.harvard.iq.dataverse.ControlledVocabularyValue;
import edu.harvard.iq.dataverse.DataFile;
import edu.harvard.iq.dataverse.DataFileServiceBean;
import edu.harvard.iq.dataverse.Dataset;
import edu.harvard.iq.dataverse.DatasetField;
import edu.harvard.iq.dataverse.DatasetFieldCompoundValue;
import edu.harvard.iq.dataverse.DatasetFieldServiceBean;
import edu.harvard.iq.dataverse.DatasetFieldType;
import edu.harvard.iq.dataverse.DatasetFieldValue;
import edu.harvard.iq.dataverse.DatasetLock;
import edu.harvard.iq.dataverse.DatasetServiceBean;
import edu.harvard.iq.dataverse.DatasetVersion;
import edu.harvard.iq.dataverse.Dataverse;
import edu.harvard.iq.dataverse.DataverseRequestServiceBean;
import edu.harvard.iq.dataverse.DataverseServiceBean;
import edu.harvard.iq.dataverse.DataverseSession;
import edu.harvard.iq.dataverse.DvObject;
import edu.harvard.iq.dataverse.EjbDataverseEngine;
import edu.harvard.iq.dataverse.MetadataBlock;
import edu.harvard.iq.dataverse.MetadataBlockServiceBean;
import edu.harvard.iq.dataverse.PermissionServiceBean;
import edu.harvard.iq.dataverse.RoleAssignment;
import edu.harvard.iq.dataverse.UserNotification;
import edu.harvard.iq.dataverse.UserNotificationServiceBean;
import edu.harvard.iq.dataverse.authorization.AuthenticationServiceBean;
import edu.harvard.iq.dataverse.authorization.DataverseRole;
import edu.harvard.iq.dataverse.authorization.Permission;
import edu.harvard.iq.dataverse.authorization.RoleAssignee;
import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser;
import edu.harvard.iq.dataverse.authorization.users.User;
import edu.harvard.iq.dataverse.batch.jobs.importer.ImportMode;
import edu.harvard.iq.dataverse.datacapturemodule.DataCaptureModuleUtil;
import edu.harvard.iq.dataverse.datacapturemodule.ScriptRequestResponse;
import edu.harvard.iq.dataverse.dataset.DatasetThumbnail;
import edu.harvard.iq.dataverse.dataset.DatasetUtil;
import edu.harvard.iq.dataverse.datasetutility.AddReplaceFileHelper;
import edu.harvard.iq.dataverse.datasetutility.DataFileTagException;
import edu.harvard.iq.dataverse.datasetutility.NoFilesException;
import edu.harvard.iq.dataverse.datasetutility.OptionalFileParams;
import edu.harvard.iq.dataverse.engine.command.Command;
import edu.harvard.iq.dataverse.engine.command.DataverseRequest;
import edu.harvard.iq.dataverse.engine.command.impl.AbstractSubmitToArchiveCommand;
import edu.harvard.iq.dataverse.engine.command.impl.AddLockCommand;
import edu.harvard.iq.dataverse.engine.command.impl.AssignRoleCommand;
import edu.harvard.iq.dataverse.engine.command.impl.CreateDatasetVersionCommand;
import edu.harvard.iq.dataverse.engine.command.impl.CreatePrivateUrlCommand;
import edu.harvard.iq.dataverse.engine.command.impl.CuratePublishedDatasetVersionCommand;
import edu.harvard.iq.dataverse.engine.command.impl.DeleteDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.DeleteDatasetVersionCommand;
import edu.harvard.iq.dataverse.engine.command.impl.DeleteDatasetLinkingDataverseCommand;
import edu.harvard.iq.dataverse.engine.command.impl.DeletePrivateUrlCommand;
import edu.harvard.iq.dataverse.engine.command.impl.DestroyDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.GetDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.GetSpecificPublishedDatasetVersionCommand;
import edu.harvard.iq.dataverse.engine.command.impl.GetDraftDatasetVersionCommand;
import edu.harvard.iq.dataverse.engine.command.impl.GetLatestAccessibleDatasetVersionCommand;
import edu.harvard.iq.dataverse.engine.command.impl.GetLatestPublishedDatasetVersionCommand;
import edu.harvard.iq.dataverse.engine.command.impl.GetPrivateUrlCommand;
import edu.harvard.iq.dataverse.engine.command.impl.ImportFromFileSystemCommand;
import edu.harvard.iq.dataverse.engine.command.impl.LinkDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.ListRoleAssignments;
import edu.harvard.iq.dataverse.engine.command.impl.ListVersionsCommand;
import edu.harvard.iq.dataverse.engine.command.impl.MoveDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.PublishDatasetCommand;
import edu.harvard.iq.dataverse.engine.command.impl.PublishDatasetResult;
import edu.harvard.iq.dataverse.engine.command.impl.RemoveLockCommand;
import edu.harvard.iq.dataverse.engine.command.impl.RequestRsyncScriptCommand;
import edu.harvard.iq.dataverse.engine.command.impl.ReturnDatasetToAuthorCommand;
import edu.harvard.iq.dataverse.engine.command.impl.SetDatasetCitationDateCommand;
import edu.harvard.iq.dataverse.engine.command.impl.SubmitDatasetForReviewCommand;
import edu.harvard.iq.dataverse.engine.command.impl.UpdateDatasetVersionCommand;
import edu.harvard.iq.dataverse.engine.command.impl.UpdateDatasetTargetURLCommand;
import edu.harvard.iq.dataverse.engine.command.impl.UpdateDatasetThumbnailCommand;
import edu.harvard.iq.dataverse.export.DDIExportServiceBean;
import edu.harvard.iq.dataverse.export.ExportService;
import edu.harvard.iq.dataverse.ingest.IngestServiceBean;
import edu.harvard.iq.dataverse.privateurl.PrivateUrl;
import edu.harvard.iq.dataverse.S3PackageImporter;
import static edu.harvard.iq.dataverse.api.AbstractApiBean.error;
import edu.harvard.iq.dataverse.api.dto.RoleAssignmentDTO;
import edu.harvard.iq.dataverse.batch.util.LoggingUtil;
import edu.harvard.iq.dataverse.dataaccess.DataAccess;
import edu.harvard.iq.dataverse.dataaccess.ImageThumbConverter;
import edu.harvard.iq.dataverse.dataaccess.S3AccessIO;
import edu.harvard.iq.dataverse.engine.command.exception.CommandException;
import edu.harvard.iq.dataverse.engine.command.exception.UnforcedCommandException;
import edu.harvard.iq.dataverse.engine.command.impl.GetDatasetStorageSizeCommand;
import edu.harvard.iq.dataverse.engine.command.impl.RevokeRoleCommand;
import edu.harvard.iq.dataverse.engine.command.impl.UpdateDvObjectPIDMetadataCommand;
import edu.harvard.iq.dataverse.makedatacount.DatasetExternalCitations;
import edu.harvard.iq.dataverse.makedatacount.DatasetExternalCitationsServiceBean;
import edu.harvard.iq.dataverse.makedatacount.DatasetMetrics;
import edu.harvard.iq.dataverse.makedatacount.DatasetMetricsServiceBean;
import edu.harvard.iq.dataverse.makedatacount.MakeDataCountLoggingServiceBean;
import edu.harvard.iq.dataverse.makedatacount.MakeDataCountLoggingServiceBean.MakeDataCountEntry;
import edu.harvard.iq.dataverse.makedatacount.MakeDataCountUtil;
import edu.harvard.iq.dataverse.settings.SettingsServiceBean;
import edu.harvard.iq.dataverse.util.ArchiverUtil;
import edu.harvard.iq.dataverse.util.BundleUtil;
import edu.harvard.iq.dataverse.util.EjbUtil;
import edu.harvard.iq.dataverse.util.FileUtil;
import edu.harvard.iq.dataverse.util.SystemConfig;
import edu.harvard.iq.dataverse.util.json.JsonParseException;
import edu.harvard.iq.dataverse.search.IndexServiceBean;
import static edu.harvard.iq.dataverse.util.json.JsonPrinter.*;
import static edu.harvard.iq.dataverse.util.json.NullSafeJsonBuilder.jsonObjectBuilder;
import edu.harvard.iq.dataverse.util.json.NullSafeJsonBuilder;
import java.io.IOException;
import java.io.InputStream;
import java.io.StringReader;
import java.sql.Timestamp;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.ejb.EJB;
import javax.ejb.EJBException;
import javax.inject.Inject;
import javax.json.Json;
import javax.json.JsonArray;
import javax.json.JsonArrayBuilder;
import javax.json.JsonObject;
import javax.json.JsonObjectBuilder;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.Status;
import static javax.ws.rs.core.Response.Status.BAD_REQUEST;
import javax.ws.rs.core.UriInfo;
import org.apache.solr.client.solrj.SolrServerException;
import org.glassfish.jersey.media.multipart.FormDataBodyPart;
import org.glassfish.jersey.media.multipart.FormDataContentDisposition;
import org.glassfish.jersey.media.multipart.FormDataParam;
@Path("datasets")
public class Datasets extends AbstractApiBean {
private static final Logger logger = Logger.getLogger(Datasets.class.getCanonicalName());
@Inject DataverseSession session;
@EJB
DatasetServiceBean datasetService;
@EJB
DataverseServiceBean dataverseService;
@EJB
UserNotificationServiceBean userNotificationService;
@EJB
PermissionServiceBean permissionService;
@EJB
AuthenticationServiceBean authenticationServiceBean;
@EJB
DDIExportServiceBean ddiExportService;
@EJB
DatasetFieldServiceBean datasetfieldService;
@EJB
MetadataBlockServiceBean metadataBlockService;
@EJB
DataFileServiceBean fileService;
@EJB
IngestServiceBean ingestService;
@EJB
EjbDataverseEngine commandEngine;
@EJB
IndexServiceBean indexService;
@EJB
S3PackageImporter s3PackageImporter;
@EJB
SettingsServiceBean settingsService;
// TODO: Move to AbstractApiBean
@EJB
DatasetMetricsServiceBean datasetMetricsSvc;
@EJB
DatasetExternalCitationsServiceBean datasetExternalCitationsService;
@Inject
MakeDataCountLoggingServiceBean mdcLogService;
@Inject
DataverseRequestServiceBean dvRequestService;
/**
* Used to consolidate the way we parse and handle dataset versions.
* @param <T>
*/
public interface DsVersionHandler<T> {
T handleLatest();
T handleDraft();
T handleSpecific( long major, long minor );
T handleLatestPublished();
}
@GET
@Path("{id}")
public Response getDataset(@PathParam("id") String id, @Context UriInfo uriInfo, @Context HttpHeaders headers, @Context HttpServletResponse response) {
return response( req -> {
final Dataset retrieved = execCommand(new GetDatasetCommand(req, findDatasetOrDie(id)));
final DatasetVersion latest = execCommand(new GetLatestAccessibleDatasetVersionCommand(req, retrieved));
final JsonObjectBuilder jsonbuilder = json(retrieved);
//Report MDC if this is a released version (could be draft if user has access, or user may not have access at all and is not getting metadata beyond the minimum)
if((latest != null) && latest.isReleased()) {
MakeDataCountLoggingServiceBean.MakeDataCountEntry entry = new MakeDataCountEntry(uriInfo, headers, dvRequestService, retrieved);
mdcLogService.logEntry(entry);
}
return ok(jsonbuilder.add("latestVersion", (latest != null) ? json(latest) : null));
});
}
// TODO:
// This API call should, ideally, call findUserOrDie() and the GetDatasetCommand
// to obtain the dataset that we are trying to export - which would handle
// Auth in the process... For now, Auth isn't necessary - since export ONLY
// WORKS on published datasets, which are open to the world. -- L.A. 4.5
@GET
@Path("/export")
@Produces({"application/xml", "application/json", "application/html" })
public Response exportDataset(@QueryParam("persistentId") String persistentId, @QueryParam("exporter") String exporter, @Context UriInfo uriInfo, @Context HttpHeaders headers, @Context HttpServletResponse response) {
try {
Dataset dataset = datasetService.findByGlobalId(persistentId);
if (dataset == null) {
return error(Response.Status.NOT_FOUND, "A dataset with the persistentId " + persistentId + " could not be found.");
}
ExportService instance = ExportService.getInstance(settingsSvc);
InputStream is = instance.getExport(dataset, exporter);
String mediaType = instance.getMediaType(exporter);
//Export is only possible for released (non-draft) dataset versions so we can log without checking to see if this is a request for a draft
MakeDataCountLoggingServiceBean.MakeDataCountEntry entry = new MakeDataCountEntry(uriInfo, headers, dvRequestService, dataset);
mdcLogService.logEntry(entry);
return Response.ok()
.entity(is)
.type(mediaType).
build();
} catch (Exception wr) {
return error(Response.Status.FORBIDDEN, "Export Failed");
}
}
@DELETE
@Path("{id}")
public Response deleteDataset( @PathParam("id") String id) {
// Internally, "DeleteDatasetCommand" simply redirects to "DeleteDatasetVersionCommand"
// (and there's a comment that says "TODO: remove this command")
// do we need an exposed API call for it?
// And DeleteDatasetVersionCommand further redirects to DestroyDatasetCommand,
// if the dataset only has 1 version... In other words, the functionality
// currently provided by this API is covered between the "deleteDraftVersion" and
// "destroyDataset" API calls.
// (The logic below follows the current implementation of the underlying
// commands!)
return response( req -> {
Dataset doomed = findDatasetOrDie(id);
DatasetVersion doomedVersion = doomed.getLatestVersion();
User u = findUserOrDie();
boolean destroy = false;
if (doomed.getVersions().size() == 1) {
if (doomed.isReleased() && (!(u instanceof AuthenticatedUser) || !u.isSuperuser())) {
throw new WrappedResponse(error(Response.Status.UNAUTHORIZED, "Only superusers can delete published datasets"));
}
destroy = true;
} else {
if (!doomedVersion.isDraft()) {
throw new WrappedResponse(error(Response.Status.UNAUTHORIZED, "This is a published dataset with multiple versions. This API can only delete the latest version if it is a DRAFT"));
}
}
// Gather the locations of the physical files that will need to be
// deleted once the destroy command execution has been finalized:
Map<Long, String> deleteStorageLocations = fileService.getPhysicalFilesToDelete(doomedVersion, destroy);
execCommand( new DeleteDatasetCommand(req, findDatasetOrDie(id)));
// If we have gotten this far, the destroy command has succeeded,
// so we can finalize it by permanently deleting the physical files:
// (DataFileService will double-check that the datafiles no
// longer exist in the database, before attempting to delete
// the physical files)
if (!deleteStorageLocations.isEmpty()) {
fileService.finalizeFileDeletes(deleteStorageLocations);
}
return ok("Dataset " + id + " deleted");
});
}
@DELETE
@Path("{id}/destroy")
public Response destroyDataset(@PathParam("id") String id) {
return response(req -> {
// first check if dataset is released, and if so, if user is a superuser
Dataset doomed = findDatasetOrDie(id);
User u = findUserOrDie();
if (doomed.isReleased() && (!(u instanceof AuthenticatedUser) || !u.isSuperuser())) {
throw new WrappedResponse(error(Response.Status.UNAUTHORIZED, "Destroy can only be called by superusers."));
}
// Gather the locations of the physical files that will need to be
// deleted once the destroy command execution has been finalized:
Map<Long, String> deleteStorageLocations = fileService.getPhysicalFilesToDelete(doomed);
execCommand(new DestroyDatasetCommand(doomed, req));
// If we have gotten this far, the destroy command has succeeded,
// so we can finalize permanently deleting the physical files:
// (DataFileService will double-check that the datafiles no
// longer exist in the database, before attempting to delete
// the physical files)
if (!deleteStorageLocations.isEmpty()) {
fileService.finalizeFileDeletes(deleteStorageLocations);
}
return ok("Dataset " + id + " destroyed");
});
}
@DELETE
@Path("{id}/versions/{versionId}")
public Response deleteDraftVersion( @PathParam("id") String id, @PathParam("versionId") String versionId ){
if ( ! ":draft".equals(versionId) ) {
return badRequest("Only the :draft version can be deleted");
}
return response( req -> {
Dataset dataset = findDatasetOrDie(id);
DatasetVersion doomed = dataset.getLatestVersion();
if (!doomed.isDraft()) {
throw new WrappedResponse(error(Response.Status.UNAUTHORIZED, "This is NOT a DRAFT version"));
}
// Gather the locations of the physical files that will need to be
// deleted once the destroy command execution has been finalized:
Map<Long, String> deleteStorageLocations = fileService.getPhysicalFilesToDelete(doomed);
execCommand( new DeleteDatasetVersionCommand(req, dataset));
// If we have gotten this far, the delete command has succeeded -
// by either deleting the Draft version of a published dataset,
// or destroying an unpublished one.
// This means we can finalize permanently deleting the physical files:
// (DataFileService will double-check that the datafiles no
// longer exist in the database, before attempting to delete
// the physical files)
if (!deleteStorageLocations.isEmpty()) {
fileService.finalizeFileDeletes(deleteStorageLocations);
}
return ok("Draft version of dataset " + id + " deleted");
});
}
@DELETE
@Path("{datasetId}/deleteLink/{linkedDataverseId}")
public Response deleteDatasetLinkingDataverse( @PathParam("datasetId") String datasetId, @PathParam("linkedDataverseId") String linkedDataverseId) {
boolean index = true;
return response(req -> {
execCommand(new DeleteDatasetLinkingDataverseCommand(req, findDatasetOrDie(datasetId), findDatasetLinkingDataverseOrDie(datasetId, linkedDataverseId), index));
return ok("Link from Dataset " + datasetId + " to linked Dataverse " + linkedDataverseId + " deleted");
});
}
@PUT
@Path("{id}/citationdate")
public Response setCitationDate( @PathParam("id") String id, String dsfTypeName) {
return response( req -> {
if ( dsfTypeName.trim().isEmpty() ){
return badRequest("Please provide a dataset field type in the requst body.");
}
DatasetFieldType dsfType = null;
if (!":publicationDate".equals(dsfTypeName)) {
dsfType = datasetFieldSvc.findByName(dsfTypeName);
if (dsfType == null) {
return badRequest("Dataset Field Type Name " + dsfTypeName + " not found.");
}
}
execCommand(new SetDatasetCitationDateCommand(req, findDatasetOrDie(id), dsfType));
return ok("Citation Date for dataset " + id + " set to: " + (dsfType != null ? dsfType.getDisplayName() : "default"));
});
}
@DELETE
@Path("{id}/citationdate")
public Response useDefaultCitationDate( @PathParam("id") String id) {
return response( req -> {
execCommand(new SetDatasetCitationDateCommand(req, findDatasetOrDie(id), null));
return ok("Citation Date for dataset " + id + " set to default");
});
}
@GET
@Path("{id}/versions")
public Response listVersions( @PathParam("id") String id ) {
return response( req ->
ok( execCommand( new ListVersionsCommand(req, findDatasetOrDie(id)) )
.stream()
.map( d -> json(d) )
.collect(toJsonArray())));
}
@GET
@Path("{id}/versions/{versionId}")
public Response getVersion( @PathParam("id") String datasetId, @PathParam("versionId") String versionId, @Context UriInfo uriInfo, @Context HttpHeaders headers) {
return response( req -> {
DatasetVersion dsv = getDatasetVersionOrDie(req, versionId, findDatasetOrDie(datasetId), uriInfo, headers);
return (dsv == null || dsv.getId() == null) ? notFound("Dataset version not found")
: ok(json(dsv));
});
}
@GET
@Path("{id}/versions/{versionId}/files")
public Response getVersionFiles( @PathParam("id") String datasetId, @PathParam("versionId") String versionId, @Context UriInfo uriInfo, @Context HttpHeaders headers) {
return response( req -> ok( jsonFileMetadatas(
getDatasetVersionOrDie(req, versionId, findDatasetOrDie(datasetId), uriInfo, headers).getFileMetadatas())));
}
@GET
@Path("{id}/versions/{versionId}/metadata")
public Response getVersionMetadata( @PathParam("id") String datasetId, @PathParam("versionId") String versionId, @Context UriInfo uriInfo, @Context HttpHeaders headers) {
return response( req -> ok(
jsonByBlocks(
getDatasetVersionOrDie(req, versionId, findDatasetOrDie(datasetId), uriInfo, headers )
.getDatasetFields())));
}
@GET
@Path("{id}/versions/{versionNumber}/metadata/{block}")
public Response getVersionMetadataBlock( @PathParam("id") String datasetId,
@PathParam("versionNumber") String versionNumber,
@PathParam("block") String blockName,
@Context UriInfo uriInfo,
@Context HttpHeaders headers ) {
return response( req -> {
DatasetVersion dsv = getDatasetVersionOrDie(req, versionNumber, findDatasetOrDie(datasetId), uriInfo, headers );
Map<MetadataBlock, List<DatasetField>> fieldsByBlock = DatasetField.groupByBlock(dsv.getDatasetFields());
for ( Map.Entry<MetadataBlock, List<DatasetField>> p : fieldsByBlock.entrySet() ) {
if ( p.getKey().getName().equals(blockName) ) {
return ok(json(p.getKey(), p.getValue()));
}
}
return notFound("metadata block named " + blockName + " not found");
});
}
@GET
@Path("{id}/modifyRegistration")
public Response updateDatasetTargetURL(@PathParam("id") String id ) {
return response( req -> {
execCommand(new UpdateDatasetTargetURLCommand(findDatasetOrDie(id), req));
return ok("Dataset " + id + " target url updated");
});
}
@POST
@Path("/modifyRegistrationAll")
public Response updateDatasetTargetURLAll() {
return response( req -> {
datasetService.findAll().forEach( ds -> {
try {
execCommand(new UpdateDatasetTargetURLCommand(findDatasetOrDie(ds.getId().toString()), req));
} catch (WrappedResponse ex) {
Logger.getLogger(Datasets.class.getName()).log(Level.SEVERE, null, ex);
}
});
return ok("Update All Dataset target url completed");
});
}
@POST
@Path("{id}/modifyRegistrationMetadata")
public Response updateDatasetPIDMetadata(@PathParam("id") String id) {
try {
Dataset dataset = findDatasetOrDie(id);
if (!dataset.isReleased()) {
return error(Response.Status.BAD_REQUEST, BundleUtil.getStringFromBundle("datasets.api.updatePIDMetadata.failure.dataset.must.be.released"));
}
} catch (WrappedResponse ex) {
Logger.getLogger(Datasets.class.getName()).log(Level.SEVERE, null, ex);
}
return response(req -> {
execCommand(new UpdateDvObjectPIDMetadataCommand(findDatasetOrDie(id), req));
List<String> args = Arrays.asList(id);
return ok(BundleUtil.getStringFromBundle("datasets.api.updatePIDMetadata.success.for.single.dataset", args));
});
}
@GET
@Path("/modifyRegistrationPIDMetadataAll")
public Response updateDatasetPIDMetadataAll() {
return response( req -> {
datasetService.findAll().forEach( ds -> {
try {
execCommand(new UpdateDvObjectPIDMetadataCommand(findDatasetOrDie(ds.getId().toString()), req));
} catch (WrappedResponse ex) {
Logger.getLogger(Datasets.class.getName()).log(Level.SEVERE, null, ex);
}
});
return ok(BundleUtil.getStringFromBundle("datasets.api.updatePIDMetadata.success.for.update.all"));
});
}
@PUT
@Path("{id}/versions/{versionId}")
public Response updateDraftVersion( String jsonBody, @PathParam("id") String id, @PathParam("versionId") String versionId ){
if ( ! ":draft".equals(versionId) ) {
return error( Response.Status.BAD_REQUEST, "Only the :draft version can be updated");
}
try ( StringReader rdr = new StringReader(jsonBody) ) {
DataverseRequest req = createDataverseRequest(findUserOrDie());
Dataset ds = findDatasetOrDie(id);
JsonObject json = Json.createReader(rdr).readObject();
DatasetVersion incomingVersion = jsonParser().parseDatasetVersion(json);
// clear possibly stale fields from the incoming dataset version.
// creation and modification dates are updated by the commands.
incomingVersion.setId(null);
incomingVersion.setVersionNumber(null);
incomingVersion.setMinorVersionNumber(null);
incomingVersion.setVersionState(DatasetVersion.VersionState.DRAFT);
incomingVersion.setDataset(ds);
incomingVersion.setCreateTime(null);
incomingVersion.setLastUpdateTime(null);
if (!incomingVersion.getFileMetadatas().isEmpty()){
return error( Response.Status.BAD_REQUEST, "You may not add files via this api.");
}
boolean updateDraft = ds.getLatestVersion().isDraft();
DatasetVersion managedVersion;
if ( updateDraft ) {
final DatasetVersion editVersion = ds.getEditVersion();
editVersion.setDatasetFields(incomingVersion.getDatasetFields());
editVersion.setTermsOfUseAndAccess( incomingVersion.getTermsOfUseAndAccess() );
Dataset managedDataset = execCommand(new UpdateDatasetVersionCommand(ds, req));
managedVersion = managedDataset.getEditVersion();
} else {
managedVersion = execCommand(new CreateDatasetVersionCommand(req, ds, incomingVersion));
}
// DatasetVersion managedVersion = execCommand( updateDraft
// ? new UpdateDatasetVersionCommand(req, incomingVersion)
// : new CreateDatasetVersionCommand(req, ds, incomingVersion));
return ok( json(managedVersion) );
} catch (JsonParseException ex) {
logger.log(Level.SEVERE, "Semantic error parsing dataset version Json: " + ex.getMessage(), ex);
return error( Response.Status.BAD_REQUEST, "Error parsing dataset version: " + ex.getMessage() );
} catch (WrappedResponse ex) {
return ex.getResponse();
}
}
@PUT
@Path("{id}/deleteMetadata")
public Response deleteVersionMetadata(String jsonBody, @PathParam("id") String id) throws WrappedResponse {
DataverseRequest req = createDataverseRequest(findUserOrDie());
return processDatasetFieldDataDelete(jsonBody, id, req);
}
private Response processDatasetFieldDataDelete(String jsonBody, String id, DataverseRequest req) {
try (StringReader rdr = new StringReader(jsonBody)) {
Dataset ds = findDatasetOrDie(id);
JsonObject json = Json.createReader(rdr).readObject();
DatasetVersion dsv = ds.getEditVersion();
List<DatasetField> fields = new LinkedList<>();
DatasetField singleField = null;
JsonArray fieldsJson = json.getJsonArray("fields");
if (fieldsJson == null) {
singleField = jsonParser().parseField(json, Boolean.FALSE);
fields.add(singleField);
} else {
fields = jsonParser().parseMultipleFields(json);
}
dsv.setVersionState(DatasetVersion.VersionState.DRAFT);
List<ControlledVocabularyValue> controlledVocabularyItemsToRemove = new ArrayList<ControlledVocabularyValue>();
List<DatasetFieldValue> datasetFieldValueItemsToRemove = new ArrayList<DatasetFieldValue>();
List<DatasetFieldCompoundValue> datasetFieldCompoundValueItemsToRemove = new ArrayList<DatasetFieldCompoundValue>();
for (DatasetField updateField : fields) {
boolean found = false;
for (DatasetField dsf : dsv.getDatasetFields()) {
if (dsf.getDatasetFieldType().equals(updateField.getDatasetFieldType())) {
if (dsf.getDatasetFieldType().isAllowMultiples()) {
if (updateField.getDatasetFieldType().isControlledVocabulary()) {
if (dsf.getDatasetFieldType().isAllowMultiples()) {
for (ControlledVocabularyValue cvv : updateField.getControlledVocabularyValues()) {
for (ControlledVocabularyValue existing : dsf.getControlledVocabularyValues()) {
if (existing.getStrValue().equals(cvv.getStrValue())) {
found = true;
controlledVocabularyItemsToRemove.add(existing);
}
}
if (!found) {
logger.log(Level.SEVERE, "Delete metadata failed: " + updateField.getDatasetFieldType().getDisplayName() + ": " + cvv.getStrValue() + " not found.");
return error(Response.Status.BAD_REQUEST, "Delete metadata failed: " + updateField.getDatasetFieldType().getDisplayName() + ": " + cvv.getStrValue() + " not found.");
}
}
for (ControlledVocabularyValue remove : controlledVocabularyItemsToRemove) {
dsf.getControlledVocabularyValues().remove(remove);
}
} else {
if (dsf.getSingleControlledVocabularyValue().getStrValue().equals(updateField.getSingleControlledVocabularyValue().getStrValue())) {
found = true;
dsf.setSingleControlledVocabularyValue(null);
}
}
} else {
if (!updateField.getDatasetFieldType().isCompound()) {
if (dsf.getDatasetFieldType().isAllowMultiples()) {
for (DatasetFieldValue dfv : updateField.getDatasetFieldValues()) {
for (DatasetFieldValue edsfv : dsf.getDatasetFieldValues()) {
if (edsfv.getDisplayValue().equals(dfv.getDisplayValue())) {
found = true;
datasetFieldValueItemsToRemove.add(dfv);
}
}
if (!found) {
logger.log(Level.SEVERE, "Delete metadata failed: " + updateField.getDatasetFieldType().getDisplayName() + ": " + dfv.getDisplayValue() + " not found.");
return error(Response.Status.BAD_REQUEST, "Delete metadata failed: " + updateField.getDatasetFieldType().getDisplayName() + ": " + dfv.getDisplayValue() + " not found.");
}
}
datasetFieldValueItemsToRemove.forEach((remove) -> {
dsf.getDatasetFieldValues().remove(remove);
});
} else {
if (dsf.getSingleValue().getDisplayValue().equals(updateField.getSingleValue().getDisplayValue())) {
found = true;
dsf.setSingleValue(null);
}
}
} else {
for (DatasetFieldCompoundValue dfcv : updateField.getDatasetFieldCompoundValues()) {
String deleteVal = getCompoundDisplayValue(dfcv);
for (DatasetFieldCompoundValue existing : dsf.getDatasetFieldCompoundValues()) {
String existingString = getCompoundDisplayValue(existing);
if (existingString.equals(deleteVal)) {
found = true;
datasetFieldCompoundValueItemsToRemove.add(existing);
}
}
datasetFieldCompoundValueItemsToRemove.forEach((remove) -> {
dsf.getDatasetFieldCompoundValues().remove(remove);
});
if (!found) {
logger.log(Level.SEVERE, "Delete metadata failed: " + updateField.getDatasetFieldType().getDisplayName() + ": " + deleteVal + " not found.");
return error(Response.Status.BAD_REQUEST, "Delete metadata failed: " + updateField.getDatasetFieldType().getDisplayName() + ": " + deleteVal + " not found.");
}
}
}
}
} else {
found = true;
dsf.setSingleValue(null);
dsf.setSingleControlledVocabularyValue(null);
}
break;
}
}
if (!found){
String displayValue = !updateField.getDisplayValue().isEmpty() ? updateField.getDisplayValue() : updateField.getCompoundDisplayValue();
logger.log(Level.SEVERE, "Delete metadata failed: " + updateField.getDatasetFieldType().getDisplayName() + ": " + displayValue + " not found." );
return error(Response.Status.BAD_REQUEST, "Delete metadata failed: " + updateField.getDatasetFieldType().getDisplayName() + ": " + displayValue + " not found." );
}
}
boolean updateDraft = ds.getLatestVersion().isDraft();
DatasetVersion managedVersion = updateDraft
? execCommand(new UpdateDatasetVersionCommand(ds, req)).getEditVersion()
: execCommand(new CreateDatasetVersionCommand(req, ds, dsv));
return ok(json(managedVersion));
} catch (JsonParseException ex) {
logger.log(Level.SEVERE, "Semantic error parsing dataset update Json: " + ex.getMessage(), ex);
return error(Response.Status.BAD_REQUEST, "Error processing metadata delete: " + ex.getMessage());
} catch (WrappedResponse ex) {
logger.log(Level.SEVERE, "Delete metadata error: " + ex.getMessage(), ex);
return ex.getResponse();
}
}
private String getCompoundDisplayValue (DatasetFieldCompoundValue dscv){
String returnString = "";
for (DatasetField dsf : dscv.getChildDatasetFields()) {
for (String value : dsf.getValues()) {
if (!(value == null)) {
returnString += (returnString.isEmpty() ? "" : "; ") + value.trim();
}
}
}
return returnString;
}
@PUT
@Path("{id}/editMetadata")
public Response editVersionMetadata(String jsonBody, @PathParam("id") String id, @QueryParam("replace") Boolean replace) throws WrappedResponse{
Boolean replaceData = replace != null;
DataverseRequest req = createDataverseRequest(findUserOrDie());
return processDatasetUpdate(jsonBody, id, req, replaceData);
}
private Response processDatasetUpdate(String jsonBody, String id, DataverseRequest req, Boolean replaceData){
try (StringReader rdr = new StringReader(jsonBody)) {
Dataset ds = findDatasetOrDie(id);
JsonObject json = Json.createReader(rdr).readObject();
DatasetVersion dsv = ds.getEditVersion();
List<DatasetField> fields = new LinkedList<>();
DatasetField singleField = null;
JsonArray fieldsJson = json.getJsonArray("fields");
if( fieldsJson == null ){
singleField = jsonParser().parseField(json, Boolean.FALSE);
fields.add(singleField);
} else{
fields = jsonParser().parseMultipleFields(json);
}
String valdationErrors = validateDatasetFieldValues(fields);
if (!valdationErrors.isEmpty()) {
logger.log(Level.SEVERE, "Semantic error parsing dataset update Json: " + valdationErrors, valdationErrors);
return error(Response.Status.BAD_REQUEST, "Error parsing dataset update: " + valdationErrors);
}
dsv.setVersionState(DatasetVersion.VersionState.DRAFT);
//loop through the update fields
// and compare to the version fields
//if exist add/replace values
//if not add entire dsf
for (DatasetField updateField : fields) {
boolean found = false;
for (DatasetField dsf : dsv.getDatasetFields()) {
if (dsf.getDatasetFieldType().equals(updateField.getDatasetFieldType())) {
found = true;
if (dsf.isEmpty() || dsf.getDatasetFieldType().isAllowMultiples() || replaceData) {
List priorCVV = new ArrayList<>();
String cvvDisplay = "";
if (updateField.getDatasetFieldType().isControlledVocabulary()) {
cvvDisplay = dsf.getDisplayValue();
for (ControlledVocabularyValue cvvOld : dsf.getControlledVocabularyValues()) {
priorCVV.add(cvvOld);
}
}
if (replaceData) {
if (dsf.getDatasetFieldType().isAllowMultiples()) {
dsf.setDatasetFieldCompoundValues(new ArrayList<>());
dsf.setDatasetFieldValues(new ArrayList<>());
dsf.setControlledVocabularyValues(new ArrayList<>());
priorCVV.clear();
dsf.getControlledVocabularyValues().clear();
} else {
dsf.setSingleValue("");
dsf.setSingleControlledVocabularyValue(null);
}
}
if (updateField.getDatasetFieldType().isControlledVocabulary()) {
if (dsf.getDatasetFieldType().isAllowMultiples()) {
for (ControlledVocabularyValue cvv : updateField.getControlledVocabularyValues()) {
if (!cvvDisplay.contains(cvv.getStrValue())) {
priorCVV.add(cvv);
}
}
dsf.setControlledVocabularyValues(priorCVV);
} else {
dsf.setSingleControlledVocabularyValue(updateField.getSingleControlledVocabularyValue());
}
} else {
if (!updateField.getDatasetFieldType().isCompound()) {
if (dsf.getDatasetFieldType().isAllowMultiples()) {
for (DatasetFieldValue dfv : updateField.getDatasetFieldValues()) {
if (!dsf.getDisplayValue().contains(dfv.getDisplayValue())) {
dfv.setDatasetField(dsf);
dsf.getDatasetFieldValues().add(dfv);
}
}
} else {
dsf.setSingleValue(updateField.getValue());
}
} else {
for (DatasetFieldCompoundValue dfcv : updateField.getDatasetFieldCompoundValues()) {
if (!dsf.getCompoundDisplayValue().contains(updateField.getCompoundDisplayValue())) {
dfcv.setParentDatasetField(dsf);
dsf.setDatasetVersion(dsv);
dsf.getDatasetFieldCompoundValues().add(dfcv);
}
}
}
}
} else {
if (!dsf.isEmpty() && !dsf.getDatasetFieldType().isAllowMultiples() || !replaceData) {
return error(Response.Status.BAD_REQUEST, "You may not add data to a field that already has data and does not allow multiples. Use replace=true to replace existing data (" + dsf.getDatasetFieldType().getDisplayName() + ")");
}
}
break;
}
}
if (!found) {
updateField.setDatasetVersion(dsv);
dsv.getDatasetFields().add(updateField);
}
}
boolean updateDraft = ds.getLatestVersion().isDraft();
DatasetVersion managedVersion;
if (updateDraft) {
managedVersion = execCommand(new UpdateDatasetVersionCommand(ds, req)).getEditVersion();
} else {
managedVersion = execCommand(new CreateDatasetVersionCommand(req, ds, dsv));
}
return ok(json(managedVersion));
} catch (JsonParseException ex) {
logger.log(Level.SEVERE, "Semantic error parsing dataset update Json: " + ex.getMessage(), ex);
return error(Response.Status.BAD_REQUEST, "Error parsing dataset update: " + ex.getMessage());
} catch (WrappedResponse ex) {
logger.log(Level.SEVERE, "Update metdata error: " + ex.getMessage(), ex);
return ex.getResponse();
}
}
private String validateDatasetFieldValues(List<DatasetField> fields) {
StringBuilder error = new StringBuilder();
for (DatasetField dsf : fields) {
if (dsf.getDatasetFieldType().isAllowMultiples() && dsf.getControlledVocabularyValues().isEmpty()
&& dsf.getDatasetFieldCompoundValues().isEmpty() && dsf.getDatasetFieldValues().isEmpty()) {
error.append("Empty multiple value for field: ").append(dsf.getDatasetFieldType().getDisplayName()).append(" ");
} else if (!dsf.getDatasetFieldType().isAllowMultiples() && dsf.getSingleValue().getValue().isEmpty()) {
error.append("Empty value for field: ").append(dsf.getDatasetFieldType().getDisplayName()).append(" ");
}
}
if (!error.toString().isEmpty()) {
return (error.toString());
}
return "";
}
/**
* @deprecated This was shipped as a GET but should have been a POST, see https://github.com/IQSS/dataverse/issues/2431
*/
@GET
@Path("{id}/actions/:publish")
@Deprecated
public Response publishDataseUsingGetDeprecated( @PathParam("id") String id, @QueryParam("type") String type ) {
logger.info("publishDataseUsingGetDeprecated called on id " + id + ". Encourage use of POST rather than GET, which is deprecated.");
return publishDataset(id, type);
}
@POST
@Path("{id}/actions/:publish")
public Response publishDataset(@PathParam("id") String id, @QueryParam("type") String type) {
try {
if (type == null) {
return error(Response.Status.BAD_REQUEST, "Missing 'type' parameter (either 'major','minor', or 'updatecurrent').");
}
boolean updateCurrent=false;
AuthenticatedUser user = findAuthenticatedUserOrDie();
type = type.toLowerCase();
boolean isMinor=false;
switch (type) {
case "minor":
isMinor = true;
break;
case "major":
isMinor = false;
break;
case "updatecurrent":
if(user.isSuperuser()) {
updateCurrent=true;
} else {
return error(Response.Status.FORBIDDEN, "Only superusers can update the current version");
}
break;
default:
return error(Response.Status.BAD_REQUEST, "Illegal 'type' parameter value '" + type + "'. It needs to be either 'major', 'minor', or 'updatecurrent'.");
}
Dataset ds = findDatasetOrDie(id);
if (updateCurrent) {
/*
* Note: The code here mirrors that in the
* edu.harvard.iq.dataverse.DatasetPage:updateCurrentVersion method. Any changes
* to the core logic (i.e. beyond updating the messaging about results) should
* be applied to the code there as well.
*/
String errorMsg = null;
String successMsg = null;
try {
CuratePublishedDatasetVersionCommand cmd = new CuratePublishedDatasetVersionCommand(ds, createDataverseRequest(user));
ds = commandEngine.submit(cmd);
successMsg = BundleUtil.getStringFromBundle("datasetversion.update.success");
// If configured, update archive copy as well
String className = settingsService.get(SettingsServiceBean.Key.ArchiverClassName.toString());
DatasetVersion updateVersion = ds.getLatestVersion();
AbstractSubmitToArchiveCommand archiveCommand = ArchiverUtil.createSubmitToArchiveCommand(className, createDataverseRequest(user), updateVersion);
if (archiveCommand != null) {
// Delete the record of any existing copy since it is now out of date/incorrect
updateVersion.setArchivalCopyLocation(null);
/*
* Then try to generate and submit an archival copy. Note that running this
* command within the CuratePublishedDatasetVersionCommand was causing an error:
* "The attribute [id] of class
* [edu.harvard.iq.dataverse.DatasetFieldCompoundValue] is mapped to a primary
* key column in the database. Updates are not allowed." To avoid that, and to
* simplify reporting back to the GUI whether this optional step succeeded, I've
* pulled this out as a separate submit().
*/
try {
updateVersion = commandEngine.submit(archiveCommand);
if (updateVersion.getArchivalCopyLocation() != null) {
successMsg = BundleUtil.getStringFromBundle("datasetversion.update.archive.success");
} else {
successMsg = BundleUtil.getStringFromBundle("datasetversion.update.archive.failure");
}
} catch (CommandException ex) {
successMsg = BundleUtil.getStringFromBundle("datasetversion.update.archive.failure") + " - " + ex.toString();
logger.severe(ex.getMessage());
}
}
} catch (CommandException ex) {
errorMsg = BundleUtil.getStringFromBundle("datasetversion.update.failure") + " - " + ex.toString();
logger.severe(ex.getMessage());
}
if (errorMsg != null) {
return error(Response.Status.INTERNAL_SERVER_ERROR, errorMsg);
} else {
return Response.ok(Json.createObjectBuilder()
.add("status", STATUS_OK)
.add("status_details", successMsg)
.add("data", json(ds)).build())
.type(MediaType.APPLICATION_JSON)
.build();
}
} else {
PublishDatasetResult res = execCommand(new PublishDatasetCommand(ds,
createDataverseRequest(user),
isMinor));
return res.isCompleted() ? ok(json(res.getDataset())) : accepted(json(res.getDataset()));
}
} catch (WrappedResponse ex) {
return ex.getResponse();
}
}
@POST
@Path("{id}/move/{targetDataverseAlias}")
public Response moveDataset(@PathParam("id") String id, @PathParam("targetDataverseAlias") String targetDataverseAlias, @QueryParam("forceMove") Boolean force) {
try {
User u = findUserOrDie();
Dataset ds = findDatasetOrDie(id);
Dataverse target = dataverseService.findByAlias(targetDataverseAlias);
if (target == null) {
return error(Response.Status.BAD_REQUEST, BundleUtil.getStringFromBundle("datasets.api.moveDataset.error.targetDataverseNotFound"));
}
//Command requires Super user - it will be tested by the command
execCommand(new MoveDatasetCommand(
createDataverseRequest(u), ds, target, force
));
return ok(BundleUtil.getStringFromBundle("datasets.api.moveDataset.success"));
} catch (WrappedResponse ex) {
if (ex.getCause() instanceof UnforcedCommandException) {
return ex.refineResponse(BundleUtil.getStringFromBundle("datasets.api.moveDataset.error.suggestForce"));
} else {
return ex.getResponse();
}
}
}
@PUT
@Path("{linkedDatasetId}/link/{linkingDataverseAlias}")
public Response linkDataset(@PathParam("linkedDatasetId") String linkedDatasetId, @PathParam("linkingDataverseAlias") String linkingDataverseAlias) {
try{
User u = findUserOrDie();
Dataset linked = findDatasetOrDie(linkedDatasetId);
Dataverse linking = findDataverseOrDie(linkingDataverseAlias);
if (linked == null){
return error(Response.Status.BAD_REQUEST, "Linked Dataset not found.");
}
if (linking == null){
return error(Response.Status.BAD_REQUEST, "Linking Dataverse not found.");
}
execCommand(new LinkDatasetCommand(
createDataverseRequest(u), linking, linked
));
return ok("Dataset " + linked.getId() + " linked successfully to " + linking.getAlias());
} catch (WrappedResponse ex) {
return ex.getResponse();
}
}
@GET
@Path("{id}/links")
public Response getLinks(@PathParam("id") String idSupplied ) {
try {
User u = findUserOrDie();
if (!u.isSuperuser()) {
return error(Response.Status.FORBIDDEN, "Not a superuser");
}
Dataset dataset = findDatasetOrDie(idSupplied);
long datasetId = dataset.getId();
List<Dataverse> dvsThatLinkToThisDatasetId = dataverseSvc.findDataversesThatLinkToThisDatasetId(datasetId);
JsonArrayBuilder dataversesThatLinkToThisDatasetIdBuilder = Json.createArrayBuilder();
for (Dataverse dataverse : dvsThatLinkToThisDatasetId) {
dataversesThatLinkToThisDatasetIdBuilder.add(dataverse.getAlias() + " (id " + dataverse.getId() + ")");
}
JsonObjectBuilder response = Json.createObjectBuilder();
response.add("dataverses that link to dataset id " + datasetId, dataversesThatLinkToThisDatasetIdBuilder);
return ok(response);
} catch (WrappedResponse wr) {
return wr.getResponse();
}
}
/**
* Add a given assignment to a given user or group
* @param ra role assignment DTO
* @param id dataset id
* @param apiKey
*/
@POST
@Path("{identifier}/assignments")
public Response createAssignment(RoleAssignmentDTO ra, @PathParam("identifier") String id, @QueryParam("key") String apiKey) {
try {
Dataset dataset = findDatasetOrDie(id);
RoleAssignee assignee = findAssignee(ra.getAssignee());
if (assignee == null) {
return error(Response.Status.BAD_REQUEST, BundleUtil.getStringFromBundle("datasets.api.grant.role.assignee.not.found.error"));
}
DataverseRole theRole;
Dataverse dv = dataset.getOwner();
theRole = null;
while ((theRole == null) && (dv != null)) {
for (DataverseRole aRole : rolesSvc.availableRoles(dv.getId())) {
if (aRole.getAlias().equals(ra.getRole())) {
theRole = aRole;
break;
}
}
dv = dv.getOwner();
}
if (theRole == null) {
List<String> args = Arrays.asList(ra.getRole(), dataset.getOwner().getDisplayName());
return error(Status.BAD_REQUEST, BundleUtil.getStringFromBundle("datasets.api.grant.role.not.found.error", args));
}
String privateUrlToken = null;
return ok(
json(execCommand(new AssignRoleCommand(assignee, theRole, dataset, createDataverseRequest(findUserOrDie()), privateUrlToken))));
} catch (WrappedResponse ex) {
List<String> args = Arrays.asList(ex.getMessage());
logger.log(Level.WARNING, BundleUtil.getStringFromBundle("datasets.api.grant.role.cant.create.assignment.error", args));
return ex.getResponse();
}
}
@DELETE
@Path("{identifier}/assignments/{id}")
public Response deleteAssignment(@PathParam("id") long assignmentId, @PathParam("identifier") String dsId) {
RoleAssignment ra = em.find(RoleAssignment.class, assignmentId);
if (ra != null) {
try {
findDatasetOrDie(dsId);
execCommand(new RevokeRoleCommand(ra, createDataverseRequest(findUserOrDie())));
List<String> args = Arrays.asList(ra.getRole().getName(), ra.getAssigneeIdentifier(), ra.getDefinitionPoint().accept(DvObject.NamePrinter));
return ok(BundleUtil.getStringFromBundle("datasets.api.revoke.role.success", args));
} catch (WrappedResponse ex) {
return ex.getResponse();
}
} else {
List<String> args = Arrays.asList(Long.toString(assignmentId));
return error(Status.NOT_FOUND, BundleUtil.getStringFromBundle("datasets.api.revoke.role.not.found.error", args));
}
}
@GET
@Path("{identifier}/assignments")
public Response getAssignments(@PathParam("identifier") String id) {
return response( req ->
ok( execCommand(
new ListRoleAssignments(req, findDatasetOrDie(id)))
.stream().map(ra->json(ra)).collect(toJsonArray())) );
}
@GET
@Path("{id}/privateUrl")
public Response getPrivateUrlData(@PathParam("id") String idSupplied) {
return response( req -> {
PrivateUrl privateUrl = execCommand(new GetPrivateUrlCommand(req, findDatasetOrDie(idSupplied)));
return (privateUrl != null) ? ok(json(privateUrl))
: error(Response.Status.NOT_FOUND, "Private URL not found.");
});
}
@POST
@Path("{id}/privateUrl")
public Response createPrivateUrl(@PathParam("id") String idSupplied) {
return response( req ->
ok(json(execCommand(
new CreatePrivateUrlCommand(req, findDatasetOrDie(idSupplied))))));
}
@DELETE
@Path("{id}/privateUrl")
public Response deletePrivateUrl(@PathParam("id") String idSupplied) {
return response( req -> {
Dataset dataset = findDatasetOrDie(idSupplied);
PrivateUrl privateUrl = execCommand(new GetPrivateUrlCommand(req, dataset));
if (privateUrl != null) {
execCommand(new DeletePrivateUrlCommand(req, dataset));
return ok("Private URL deleted.");
} else {
return notFound("No Private URL to delete.");
}
});
}
@GET
@Path("{id}/thumbnail/candidates")
public Response getDatasetThumbnailCandidates(@PathParam("id") String idSupplied) {
try {
Dataset dataset = findDatasetOrDie(idSupplied);
boolean canUpdateThumbnail = false;
try {
canUpdateThumbnail = permissionSvc.requestOn(createDataverseRequest(findUserOrDie()), dataset).canIssue(UpdateDatasetThumbnailCommand.class);
} catch (WrappedResponse ex) {
logger.info("Exception thrown while trying to figure out permissions while getting thumbnail for dataset id " + dataset.getId() + ": " + ex.getLocalizedMessage());
}
if (!canUpdateThumbnail) {
return error(Response.Status.FORBIDDEN, "You are not permitted to list dataset thumbnail candidates.");
}
JsonArrayBuilder data = Json.createArrayBuilder();
boolean considerDatasetLogoAsCandidate = true;
for (DatasetThumbnail datasetThumbnail : DatasetUtil.getThumbnailCandidates(dataset, considerDatasetLogoAsCandidate, ImageThumbConverter.DEFAULT_CARDIMAGE_SIZE)) {
JsonObjectBuilder candidate = Json.createObjectBuilder();
String base64image = datasetThumbnail.getBase64image();
if (base64image != null) {
logger.fine("found a candidate!");
candidate.add("base64image", base64image);
}
DataFile dataFile = datasetThumbnail.getDataFile();
if (dataFile != null) {
candidate.add("dataFileId", dataFile.getId());
}
data.add(candidate);
}
return ok(data);
} catch (WrappedResponse ex) {
return error(Response.Status.NOT_FOUND, "Could not find dataset based on id supplied: " + idSupplied + ".");
}
}
@GET
@Produces({"image/png"})
@Path("{id}/thumbnail")
public Response getDatasetThumbnail(@PathParam("id") String idSupplied) {
try {
Dataset dataset = findDatasetOrDie(idSupplied);
InputStream is = DatasetUtil.getThumbnailAsInputStream(dataset, ImageThumbConverter.DEFAULT_CARDIMAGE_SIZE);
if(is == null) {
return notFound("Thumbnail not available");
}
return Response.ok(is).build();
} catch (WrappedResponse wr) {
return notFound("Thumbnail not available");
}
}
// TODO: Rather than only supporting looking up files by their database IDs (dataFileIdSupplied), consider supporting persistent identifiers.
@POST
@Path("{id}/thumbnail/{dataFileId}")
public Response setDataFileAsThumbnail(@PathParam("id") String idSupplied, @PathParam("dataFileId") long dataFileIdSupplied) {
try {
DatasetThumbnail datasetThumbnail = execCommand(new UpdateDatasetThumbnailCommand(createDataverseRequest(findUserOrDie()), findDatasetOrDie(idSupplied), UpdateDatasetThumbnailCommand.UserIntent.setDatasetFileAsThumbnail, dataFileIdSupplied, null));
return ok("Thumbnail set to " + datasetThumbnail.getBase64image());
} catch (WrappedResponse wr) {
return wr.getResponse();
}
}
@POST
@Path("{id}/thumbnail")
@Consumes(MediaType.MULTIPART_FORM_DATA)
public Response uploadDatasetLogo(@PathParam("id") String idSupplied, @FormDataParam("file") InputStream inputStream
) {
try {
DatasetThumbnail datasetThumbnail = execCommand(new UpdateDatasetThumbnailCommand(createDataverseRequest(findUserOrDie()), findDatasetOrDie(idSupplied), UpdateDatasetThumbnailCommand.UserIntent.setNonDatasetFileAsThumbnail, null, inputStream));
return ok("Thumbnail is now " + datasetThumbnail.getBase64image());
} catch (WrappedResponse wr) {
return wr.getResponse();
}
}
@DELETE
@Path("{id}/thumbnail")
public Response removeDatasetLogo(@PathParam("id") String idSupplied) {
try {
DatasetThumbnail datasetThumbnail = execCommand(new UpdateDatasetThumbnailCommand(createDataverseRequest(findUserOrDie()), findDatasetOrDie(idSupplied), UpdateDatasetThumbnailCommand.UserIntent.removeThumbnail, null, null));
return ok("Dataset thumbnail removed.");
} catch (WrappedResponse wr) {
return wr.getResponse();
}
}
@GET
@Path("{identifier}/dataCaptureModule/rsync")
public Response getRsync(@PathParam("identifier") String id) {
//TODO - does it make sense to switch this to dataset identifier for consistency with the rest of the DCM APIs?
if (!DataCaptureModuleUtil.rsyncSupportEnabled(settingsSvc.getValueForKey(SettingsServiceBean.Key.UploadMethods))) {
return error(Response.Status.METHOD_NOT_ALLOWED, SettingsServiceBean.Key.UploadMethods + " does not contain " + SystemConfig.FileUploadMethods.RSYNC + ".");
}
Dataset dataset = null;
try {
dataset = findDatasetOrDie(id);
AuthenticatedUser user = findAuthenticatedUserOrDie();
ScriptRequestResponse scriptRequestResponse = execCommand(new RequestRsyncScriptCommand(createDataverseRequest(user), dataset));
DatasetLock lock = datasetService.addDatasetLock(dataset.getId(), DatasetLock.Reason.DcmUpload, user.getId(), "script downloaded");
if (lock == null) {
logger.log(Level.WARNING, "Failed to lock the dataset (dataset id={0})", dataset.getId());
return error(Response.Status.FORBIDDEN, "Failed to lock the dataset (dataset id="+dataset.getId()+")");
}
return ok(scriptRequestResponse.getScript(), MediaType.valueOf(MediaType.TEXT_PLAIN));
} catch (WrappedResponse wr) {
return wr.getResponse();
} catch (EJBException ex) {
return error(Response.Status.INTERNAL_SERVER_ERROR, "Something went wrong attempting to download rsync script: " + EjbUtil.ejbExceptionToString(ex));
}
}
/**
* This api endpoint triggers the creation of a "package" file in a dataset
* after that package has been moved onto the same filesystem via the Data Capture Module.
* The package is really just a way that Dataverse interprets a folder created by DCM, seeing it as just one file.
* The "package" can be downloaded over RSAL.
*
* This endpoint currently supports both posix file storage and AWS s3 storage in Dataverse, and depending on which one is active acts accordingly.
*
* The initial design of the DCM/Dataverse interaction was not to use packages, but to allow import of all individual files natively into Dataverse.
* But due to the possibly immense number of files (millions) the package approach was taken.
* This is relevant because the posix ("file") code contains many remnants of that development work.
* The s3 code was written later and is set to only support import as packages. It takes a lot from FileRecordWriter.
* -MAD 4.9.1
*/
@POST
@Path("{identifier}/dataCaptureModule/checksumValidation")
public Response receiveChecksumValidationResults(@PathParam("identifier") String id, JsonObject jsonFromDcm) {
logger.log(Level.FINE, "jsonFromDcm: {0}", jsonFromDcm);
AuthenticatedUser authenticatedUser = null;
try {
authenticatedUser = findAuthenticatedUserOrDie();
} catch (WrappedResponse ex) {
return error(Response.Status.BAD_REQUEST, "Authentication is required.");
}
if (!authenticatedUser.isSuperuser()) {
return error(Response.Status.FORBIDDEN, "Superusers only.");
}
String statusMessageFromDcm = jsonFromDcm.getString("status");
try {
Dataset dataset = findDatasetOrDie(id);
if ("validation passed".equals(statusMessageFromDcm)) {
logger.log(Level.INFO, "Checksum Validation passed for DCM.");
String storageDriver = dataset.getDataverseContext().getEffectiveStorageDriverId();
String uploadFolder = jsonFromDcm.getString("uploadFolder");
int totalSize = jsonFromDcm.getInt("totalSize");
String storageDriverType = System.getProperty("dataverse.file." + storageDriver + ".type");
if (storageDriverType.equals("file")) {
logger.log(Level.INFO, "File storage driver used for (dataset id={0})", dataset.getId());
ImportMode importMode = ImportMode.MERGE;
try {
JsonObject jsonFromImportJobKickoff = execCommand(new ImportFromFileSystemCommand(createDataverseRequest(findUserOrDie()), dataset, uploadFolder, new Long(totalSize), importMode));
long jobId = jsonFromImportJobKickoff.getInt("executionId");
String message = jsonFromImportJobKickoff.getString("message");
JsonObjectBuilder job = Json.createObjectBuilder();
job.add("jobId", jobId);
job.add("message", message);
return ok(job);
} catch (WrappedResponse wr) {
String message = wr.getMessage();
return error(Response.Status.INTERNAL_SERVER_ERROR, "Uploaded files have passed checksum validation but something went wrong while attempting to put the files into Dataverse. Message was '" + message + "'.");
}
} else if(storageDriverType.equals("s3")) {
logger.log(Level.INFO, "S3 storage driver used for DCM (dataset id={0})", dataset.getId());
try {
//Where the lifting is actually done, moving the s3 files over and having dataverse know of the existance of the package
s3PackageImporter.copyFromS3(dataset, uploadFolder);
DataFile packageFile = s3PackageImporter.createPackageDataFile(dataset, uploadFolder, new Long(totalSize));
if (packageFile == null) {
logger.log(Level.SEVERE, "S3 File package import failed.");
return error(Response.Status.INTERNAL_SERVER_ERROR, "S3 File package import failed.");
}
DatasetLock dcmLock = dataset.getLockFor(DatasetLock.Reason.DcmUpload);
if (dcmLock == null) {
logger.log(Level.WARNING, "Dataset not locked for DCM upload");
} else {
datasetService.removeDatasetLocks(dataset, DatasetLock.Reason.DcmUpload);
dataset.removeLock(dcmLock);
}
// update version using the command engine to enforce user permissions and constraints
if (dataset.getVersions().size() == 1 && dataset.getLatestVersion().getVersionState() == DatasetVersion.VersionState.DRAFT) {
try {
Command<Dataset> cmd;
cmd = new UpdateDatasetVersionCommand(dataset, new DataverseRequest(authenticatedUser, (HttpServletRequest) null));
commandEngine.submit(cmd);
} catch (CommandException ex) {
return error(Response.Status.INTERNAL_SERVER_ERROR, "CommandException updating DatasetVersion from batch job: " + ex.getMessage());
}
} else {
String constraintError = "ConstraintException updating DatasetVersion form batch job: dataset must be a "
+ "single version in draft mode.";
logger.log(Level.SEVERE, constraintError);
}
JsonObjectBuilder job = Json.createObjectBuilder();
return ok(job);
} catch (IOException e) {
String message = e.getMessage();
return error(Response.Status.INTERNAL_SERVER_ERROR, "Uploaded files have passed checksum validation but something went wrong while attempting to move the files into Dataverse. Message was '" + message + "'.");
}
} else {
return error(Response.Status.INTERNAL_SERVER_ERROR, "Invalid storage driver in Dataverse, not compatible with dcm");
}
} else if ("validation failed".equals(statusMessageFromDcm)) {
Map<String, AuthenticatedUser> distinctAuthors = permissionService.getDistinctUsersWithPermissionOn(Permission.EditDataset, dataset);
distinctAuthors.values().forEach((value) -> {
userNotificationService.sendNotification((AuthenticatedUser) value, new Timestamp(new Date().getTime()), UserNotification.Type.CHECKSUMFAIL, dataset.getId());
});
List<AuthenticatedUser> superUsers = authenticationServiceBean.findSuperUsers();
if (superUsers != null && !superUsers.isEmpty()) {
superUsers.forEach((au) -> {
userNotificationService.sendNotification(au, new Timestamp(new Date().getTime()), UserNotification.Type.CHECKSUMFAIL, dataset.getId());
});
}
return ok("User notified about checksum validation failure.");
} else {
return error(Response.Status.BAD_REQUEST, "Unexpected status cannot be processed: " + statusMessageFromDcm);
}
} catch (WrappedResponse ex) {
return ex.getResponse();
}
}
@POST
@Path("{id}/submitForReview")
public Response submitForReview(@PathParam("id") String idSupplied) {
try {
Dataset updatedDataset = execCommand(new SubmitDatasetForReviewCommand(createDataverseRequest(findUserOrDie()), findDatasetOrDie(idSupplied)));
JsonObjectBuilder result = Json.createObjectBuilder();
boolean inReview = updatedDataset.isLockedFor(DatasetLock.Reason.InReview);
result.add("inReview", inReview);
result.add("message", "Dataset id " + updatedDataset.getId() + " has been submitted for review.");
return ok(result);
} catch (WrappedResponse wr) {
return wr.getResponse();
}
}
@POST
@Path("{id}/returnToAuthor")
public Response returnToAuthor(@PathParam("id") String idSupplied, String jsonBody) {
if (jsonBody == null || jsonBody.isEmpty()) {
return error(Response.Status.BAD_REQUEST, "You must supply JSON to this API endpoint and it must contain a reason for returning the dataset (field: reasonForReturn).");
}
StringReader rdr = new StringReader(jsonBody);
JsonObject json = Json.createReader(rdr).readObject();
try {
Dataset dataset = findDatasetOrDie(idSupplied);
String reasonForReturn = null;
reasonForReturn = json.getString("reasonForReturn");
// TODO: Once we add a box for the curator to type into, pass the reason for return to the ReturnDatasetToAuthorCommand and delete this check and call to setReturnReason on the API side.
if (reasonForReturn == null || reasonForReturn.isEmpty()) {
return error(Response.Status.BAD_REQUEST, "You must enter a reason for returning a dataset to the author(s).");
}
AuthenticatedUser authenticatedUser = findAuthenticatedUserOrDie();
Dataset updatedDataset = execCommand(new ReturnDatasetToAuthorCommand(createDataverseRequest(authenticatedUser), dataset, reasonForReturn ));
JsonObjectBuilder result = Json.createObjectBuilder();
result.add("inReview", false);
result.add("message", "Dataset id " + updatedDataset.getId() + " has been sent back to the author(s).");
return ok(result);
} catch (WrappedResponse wr) {
return wr.getResponse();
}
}
@GET
@Path("{id}/uploadsid")
public Response getUploadUrl(@PathParam("id") String idSupplied) {
try {
Dataset dataset = findDatasetOrDie(idSupplied);
boolean canUpdateDataset = false;
try {
canUpdateDataset = permissionSvc.requestOn(createDataverseRequest(findUserOrDie()), dataset).canIssue(UpdateDatasetVersionCommand.class);
} catch (WrappedResponse ex) {
logger.info("Exception thrown while trying to figure out permissions while getting upload URL for dataset id " + dataset.getId() + ": " + ex.getLocalizedMessage());
}
if (!canUpdateDataset) {
return error(Response.Status.FORBIDDEN, "You are not permitted to upload files to this dataset.");
}
S3AccessIO<?> s3io = FileUtil.getS3AccessForDirectUpload(dataset);
if(s3io == null) {
return error(Response.Status.NOT_FOUND,"Direct upload not supported for files in this dataset: " + dataset.getId());
}
String url = null;
String storageIdentifier = null;
try {
url = s3io.generateTemporaryS3UploadUrl();
storageIdentifier = FileUtil.getStorageIdentifierFromLocation(s3io.getStorageLocation());
} catch (IOException io) {
logger.warning(io.getMessage());
throw new WrappedResponse(io, error( Response.Status.INTERNAL_SERVER_ERROR, "Could not create process direct upload request"));
}
JsonObjectBuilder response = Json.createObjectBuilder()
.add("url", url)
.add("storageIdentifier", storageIdentifier );
return ok(response);
} catch (WrappedResponse wr) {
return wr.getResponse();
}
}
/**
* Add a File to an existing Dataset
*
* @param idSupplied
* @param jsonData
* @param fileInputStream
* @param contentDispositionHeader
* @param formDataBodyPart
* @return
*/
@POST
@Path("{id}/add")
@Consumes(MediaType.MULTIPART_FORM_DATA)
public Response addFileToDataset(@PathParam("id") String idSupplied,
@FormDataParam("jsonData") String jsonData,
@FormDataParam("file") InputStream fileInputStream,
@FormDataParam("file") FormDataContentDisposition contentDispositionHeader,
@FormDataParam("file") final FormDataBodyPart formDataBodyPart
){
if (!systemConfig.isHTTPUpload()) {
return error(Response.Status.SERVICE_UNAVAILABLE, BundleUtil.getStringFromBundle("file.api.httpDisabled"));
}
// -------------------------------------
// (1) Get the user from the API key
// -------------------------------------
User authUser;
try {
authUser = findUserOrDie();
} catch (WrappedResponse ex) {
return error(Response.Status.FORBIDDEN,
BundleUtil.getStringFromBundle("file.addreplace.error.auth")
);
}
// -------------------------------------
// (2) Get the Dataset Id
//
// -------------------------------------
Dataset dataset;
try {
dataset = findDatasetOrDie(idSupplied);
} catch (WrappedResponse wr) {
return wr.getResponse();
}
//------------------------------------
// (2a) Make sure dataset does not have package file
//
// --------------------------------------
for (DatasetVersion dv : dataset.getVersions()) {
if (dv.isHasPackageFile()) {
return error(Response.Status.FORBIDDEN,
BundleUtil.getStringFromBundle("file.api.alreadyHasPackageFile")
);
}
}
// (2a) Load up optional params via JSON
//---------------------------------------
OptionalFileParams optionalFileParams = null;
msgt("(api) jsonData: " + jsonData);
try {
optionalFileParams = new OptionalFileParams(jsonData);
} catch (DataFileTagException ex) {
return error( Response.Status.BAD_REQUEST, ex.getMessage());
}
// -------------------------------------
// (3) Get the file name and content type
// -------------------------------------
String newFilename = null;
String newFileContentType = null;
String newStorageIdentifier = null;
if (null == contentDispositionHeader) {
if (optionalFileParams.hasStorageIdentifier()) {
newStorageIdentifier = optionalFileParams.getStorageIdentifier();
// ToDo - check that storageIdentifier is valid
if (optionalFileParams.hasFileName()) {
newFilename = optionalFileParams.getFileName();
if (optionalFileParams.hasMimetype()) {
newFileContentType = optionalFileParams.getMimeType();
}
}
} else {
return error(BAD_REQUEST,
"You must upload a file or provide a storageidentifier, filename, and mimetype.");
}
} else {
newFilename = contentDispositionHeader.getFileName();
newFileContentType = formDataBodyPart.getMediaType().toString();
}
//-------------------
// (3) Create the AddReplaceFileHelper object
//-------------------
msg("ADD!");
DataverseRequest dvRequest2 = createDataverseRequest(authUser);
AddReplaceFileHelper addFileHelper = new AddReplaceFileHelper(dvRequest2,
ingestService,
datasetService,
fileService,
permissionSvc,
commandEngine,
systemConfig);
//-------------------
// (4) Run "runAddFileByDatasetId"
//-------------------
addFileHelper.runAddFileByDataset(dataset,
newFilename,
newFileContentType,
newStorageIdentifier,
fileInputStream,
optionalFileParams);
if (addFileHelper.hasError()){
return error(addFileHelper.getHttpErrorCode(), addFileHelper.getErrorMessagesAsString("\n"));
}else{
String successMsg = BundleUtil.getStringFromBundle("file.addreplace.success.add");
try {
//msgt("as String: " + addFileHelper.getSuccessResult());
/**
* @todo We need a consistent, sane way to communicate a human
* readable message to an API client suitable for human
* consumption. Imagine if the UI were built in Angular or React
* and we want to return a message from the API as-is to the
* user. Human readable.
*/
logger.fine("successMsg: " + successMsg);
String duplicateWarning = addFileHelper.getDuplicateFileWarning();
if (duplicateWarning != null && !duplicateWarning.isEmpty()) {
return ok(addFileHelper.getDuplicateFileWarning(), addFileHelper.getSuccessResultAsJsonObjectBuilder());
} else {
return ok(addFileHelper.getSuccessResultAsJsonObjectBuilder());
}
//"Look at that! You added a file! (hey hey, it may have worked)");
} catch (NoFilesException ex) {
Logger.getLogger(Files.class.getName()).log(Level.SEVERE, null, ex);
return error(Response.Status.BAD_REQUEST, "NoFileException! Serious Error! See administrator!");
}
}
} // end: addFileToDataset
private void msg(String m){
//System.out.println(m);
logger.fine(m);
}
private void dashes(){
msg("----------------");
}
private void msgt(String m){
dashes(); msg(m); dashes();
}
public static <T> T handleVersion( String versionId, DsVersionHandler<T> hdl )
throws WrappedResponse {
switch (versionId) {
case ":latest": return hdl.handleLatest();
case ":draft": return hdl.handleDraft();
case ":latest-published": return hdl.handleLatestPublished();
default:
try {
String[] versions = versionId.split("\\.");
switch (versions.length) {
case 1:
return hdl.handleSpecific(Long.parseLong(versions[0]), (long)0.0);
case 2:
return hdl.handleSpecific( Long.parseLong(versions[0]), Long.parseLong(versions[1]) );
default:
throw new WrappedResponse(error( Response.Status.BAD_REQUEST, "Illegal version identifier '" + versionId + "'"));
}
} catch ( NumberFormatException nfe ) {
throw new WrappedResponse( error( Response.Status.BAD_REQUEST, "Illegal version identifier '" + versionId + "'") );
}
}
}
private DatasetVersion getDatasetVersionOrDie( final DataverseRequest req, String versionNumber, final Dataset ds, UriInfo uriInfo, HttpHeaders headers) throws WrappedResponse {
DatasetVersion dsv = execCommand( handleVersion(versionNumber, new DsVersionHandler<Command<DatasetVersion>>(){
@Override
public Command<DatasetVersion> handleLatest() {
return new GetLatestAccessibleDatasetVersionCommand(req, ds);
}
@Override
public Command<DatasetVersion> handleDraft() {
return new GetDraftDatasetVersionCommand(req, ds);
}
@Override
public Command<DatasetVersion> handleSpecific(long major, long minor) {
return new GetSpecificPublishedDatasetVersionCommand(req, ds, major, minor);
}
@Override
public Command<DatasetVersion> handleLatestPublished() {
return new GetLatestPublishedDatasetVersionCommand(req, ds);
}
}));
if ( dsv == null || dsv.getId() == null ) {
throw new WrappedResponse( notFound("Dataset version " + versionNumber + " of dataset " + ds.getId() + " not found") );
}
if (dsv.isReleased()) {
MakeDataCountLoggingServiceBean.MakeDataCountEntry entry = new MakeDataCountEntry(uriInfo, headers, dvRequestService, ds);
mdcLogService.logEntry(entry);
}
return dsv;
}
@GET
@Path("{identifier}/locks")
public Response getLocks(@PathParam("identifier") String id, @QueryParam("type") DatasetLock.Reason lockType) {
Dataset dataset = null;
try {
dataset = findDatasetOrDie(id);
Set<DatasetLock> locks;
if (lockType == null) {
locks = dataset.getLocks();
} else {
// request for a specific type lock:
DatasetLock lock = dataset.getLockFor(lockType);
locks = new HashSet<>();
if (lock != null) {
locks.add(lock);
}
}
return ok(locks.stream().map(lock -> json(lock)).collect(toJsonArray()));
} catch (WrappedResponse wr) {
return wr.getResponse();
}
}
@DELETE
@Path("{identifier}/locks")
public Response deleteLocks(@PathParam("identifier") String id, @QueryParam("type") DatasetLock.Reason lockType) {
return response(req -> {
try {
AuthenticatedUser user = findAuthenticatedUserOrDie();
if (!user.isSuperuser()) {
return error(Response.Status.FORBIDDEN, "This API end point can be used by superusers only.");
}
Dataset dataset = findDatasetOrDie(id);
if (lockType == null) {
Set<DatasetLock.Reason> locks = new HashSet<>();
for (DatasetLock lock : dataset.getLocks()) {
locks.add(lock.getReason());
}
if (!locks.isEmpty()) {
for (DatasetLock.Reason locktype : locks) {
execCommand(new RemoveLockCommand(req, dataset, locktype));
// refresh the dataset:
dataset = findDatasetOrDie(id);
}
// kick of dataset reindexing, in case the locks removed
// affected the search card:
try {
indexService.indexDataset(dataset, true);
} catch (IOException | SolrServerException e) {
String failureLogText = "Post lock removal indexing failed. You can kickoff a re-index of this dataset with: \r\n curl http://localhost:8080/api/admin/index/datasets/" + dataset.getId().toString();
failureLogText += "\r\n" + e.getLocalizedMessage();
LoggingUtil.writeOnSuccessFailureLog(null, failureLogText, dataset);
}
return ok("locks removed");
}
return ok("dataset not locked");
}
// request for a specific type lock:
DatasetLock lock = dataset.getLockFor(lockType);
if (lock != null) {
execCommand(new RemoveLockCommand(req, dataset, lock.getReason()));
// refresh the dataset:
dataset = findDatasetOrDie(id);
// ... and kick of dataset reindexing, in case the lock removed
// affected the search card:
try {
indexService.indexDataset(dataset, true);
} catch (IOException | SolrServerException e) {
String failureLogText = "Post lock removal indexing failed. You can kickoff a re-index of this dataset with: \r\n curl http://localhost:8080/api/admin/index/datasets/" + dataset.getId().toString();
failureLogText += "\r\n" + e.getLocalizedMessage();
LoggingUtil.writeOnSuccessFailureLog(null, failureLogText, dataset);
}
return ok("lock type " + lock.getReason() + " removed");
}
return ok("no lock type " + lockType + " on the dataset");
} catch (WrappedResponse wr) {
return wr.getResponse();
}
});
}
@POST
@Path("{identifier}/lock/{type}")
public Response lockDataset(@PathParam("identifier") String id, @PathParam("type") DatasetLock.Reason lockType) {
return response(req -> {
try {
AuthenticatedUser user = findAuthenticatedUserOrDie();
if (!user.isSuperuser()) {
return error(Response.Status.FORBIDDEN, "This API end point can be used by superusers only.");
}
Dataset dataset = findDatasetOrDie(id);
DatasetLock lock = dataset.getLockFor(lockType);
if (lock != null) {
return error(Response.Status.FORBIDDEN, "dataset already locked with lock type " + lockType);
}
lock = new DatasetLock(lockType, user);
execCommand(new AddLockCommand(req, dataset, lock));
// refresh the dataset:
dataset = findDatasetOrDie(id);
// ... and kick of dataset reindexing:
try {
indexService.indexDataset(dataset, true);
} catch (IOException | SolrServerException e) {
String failureLogText = "Post add lock indexing failed. You can kickoff a re-index of this dataset with: \r\n curl http://localhost:8080/api/admin/index/datasets/" + dataset.getId().toString();
failureLogText += "\r\n" + e.getLocalizedMessage();
LoggingUtil.writeOnSuccessFailureLog(null, failureLogText, dataset);
}
return ok("dataset locked with lock type " + lockType);
} catch (WrappedResponse wr) {
return wr.getResponse();
}
});
}
@GET
@Path("{id}/makeDataCount/citations")
public Response getMakeDataCountCitations(@PathParam("id") String idSupplied) {
try {
Dataset dataset = findDatasetOrDie(idSupplied);
JsonArrayBuilder datasetsCitations = Json.createArrayBuilder();
List<DatasetExternalCitations> externalCitations = datasetExternalCitationsService.getDatasetExternalCitationsByDataset(dataset);
for (DatasetExternalCitations citation : externalCitations ){
JsonObjectBuilder candidateObj = Json.createObjectBuilder();
/**
* In the future we can imagine storing and presenting more
* information about the citation such as the title of the paper
* and the names of the authors. For now, we'll at least give
* the URL of the citation so people can click and find out more
* about the citation.
*/
candidateObj.add("citationUrl", citation.getCitedByUrl());
datasetsCitations.add(candidateObj);
}
return ok(datasetsCitations);
} catch (WrappedResponse wr) {
return wr.getResponse();
}
}
@GET
@Path("{id}/makeDataCount/{metric}")
public Response getMakeDataCountMetricCurrentMonth(@PathParam("id") String idSupplied, @PathParam("metric") String metricSupplied, @QueryParam("country") String country) {
String nullCurrentMonth = null;
return getMakeDataCountMetric(idSupplied, metricSupplied, nullCurrentMonth, country);
}
@GET
@Path("{identifier}/storagesize")
public Response getStorageSize(@PathParam("identifier") String dvIdtf, @QueryParam("includeCached") boolean includeCached,
@Context UriInfo uriInfo, @Context HttpHeaders headers) throws WrappedResponse {
return response(req -> ok(MessageFormat.format(BundleUtil.getStringFromBundle("datasets.api.datasize.storage"),
execCommand(new GetDatasetStorageSizeCommand(req, findDatasetOrDie(dvIdtf), includeCached,GetDatasetStorageSizeCommand.Mode.STORAGE, null)))));
}
@GET
@Path("{identifier}/versions/{versionId}/downloadsize")
public Response getDownloadSize(@PathParam("identifier") String dvIdtf, @PathParam("versionId") String version,
@Context UriInfo uriInfo, @Context HttpHeaders headers) throws WrappedResponse {
return response(req -> ok(MessageFormat.format(BundleUtil.getStringFromBundle("datasets.api.datasize.download"),
execCommand(new GetDatasetStorageSizeCommand(req, findDatasetOrDie(dvIdtf), false, GetDatasetStorageSizeCommand.Mode.DOWNLOAD, getDatasetVersionOrDie(req, version , findDatasetOrDie(dvIdtf), uriInfo, headers))))));
}
@GET
@Path("{id}/makeDataCount/{metric}/{yyyymm}")
public Response getMakeDataCountMetric(@PathParam("id") String idSupplied, @PathParam("metric") String metricSupplied, @PathParam("yyyymm") String yyyymm, @QueryParam("country") String country) {
try {
Dataset dataset = findDatasetOrDie(idSupplied);
NullSafeJsonBuilder jsonObjectBuilder = jsonObjectBuilder();
MakeDataCountUtil.MetricType metricType = null;
try {
metricType = MakeDataCountUtil.MetricType.fromString(metricSupplied);
} catch (IllegalArgumentException ex) {
return error(Response.Status.BAD_REQUEST, ex.getMessage());
}
String monthYear = null;
if (yyyymm != null) {
// We add "-01" because we store "2018-05-01" rather than "2018-05" in the "monthyear" column.
// Dates come to us as "2018-05-01" in the SUSHI JSON ("begin-date") and we decided to store them as-is.
monthYear = yyyymm + "-01";
}
DatasetMetrics datasetMetrics = datasetMetricsSvc.getDatasetMetricsByDatasetForDisplay(dataset, monthYear, country);
if (datasetMetrics == null) {
return ok("No metrics available for dataset " + dataset.getId() + " for " + yyyymm + " for country code " + country + ".");
} else if (datasetMetrics.getDownloadsTotal() + datasetMetrics.getViewsTotal() == 0) {
return ok("No metrics available for dataset " + dataset.getId() + " for " + yyyymm + " for country code " + country + ".");
}
Long viewsTotalRegular = null;
Long viewsUniqueRegular = null;
Long downloadsTotalRegular = null;
Long downloadsUniqueRegular = null;
Long viewsTotalMachine = null;
Long viewsUniqueMachine = null;
Long downloadsTotalMachine = null;
Long downloadsUniqueMachine = null;
Long viewsTotal = null;
Long viewsUnique = null;
Long downloadsTotal = null;
Long downloadsUnique = null;
switch (metricSupplied) {
case "viewsTotal":
viewsTotal = datasetMetrics.getViewsTotal();
break;
case "viewsTotalRegular":
viewsTotalRegular = datasetMetrics.getViewsTotalRegular();
break;
case "viewsTotalMachine":
viewsTotalMachine = datasetMetrics.getViewsTotalMachine();
break;
case "viewsUnique":
viewsUnique = datasetMetrics.getViewsUnique();
break;
case "viewsUniqueRegular":
viewsUniqueRegular = datasetMetrics.getViewsUniqueRegular();
break;
case "viewsUniqueMachine":
viewsUniqueMachine = datasetMetrics.getViewsUniqueMachine();
break;
case "downloadsTotal":
downloadsTotal = datasetMetrics.getDownloadsTotal();
break;
case "downloadsTotalRegular":
downloadsTotalRegular = datasetMetrics.getDownloadsTotalRegular();
break;
case "downloadsTotalMachine":
downloadsTotalMachine = datasetMetrics.getDownloadsTotalMachine();
break;
case "downloadsUnique":
downloadsUnique = datasetMetrics.getDownloadsUnique();
break;
case "downloadsUniqueRegular":
downloadsUniqueRegular = datasetMetrics.getDownloadsUniqueRegular();
break;
case "downloadsUniqueMachine":
downloadsUniqueMachine = datasetMetrics.getDownloadsUniqueMachine();
break;
default:
break;
}
/**
* TODO: Think more about the JSON output and the API design.
* getDatasetMetricsByDatasetMonthCountry returns a single row right
* now, by country. We could return multiple metrics (viewsTotal,
* viewsUnique, downloadsTotal, and downloadsUnique) by country.
*/
jsonObjectBuilder.add("viewsTotalRegular", viewsTotalRegular);
jsonObjectBuilder.add("viewsUniqueRegular", viewsUniqueRegular);
jsonObjectBuilder.add("downloadsTotalRegular", downloadsTotalRegular);
jsonObjectBuilder.add("downloadsUniqueRegular", downloadsUniqueRegular);
jsonObjectBuilder.add("viewsTotalMachine", viewsTotalMachine);
jsonObjectBuilder.add("viewsUniqueMachine", viewsUniqueMachine);
jsonObjectBuilder.add("downloadsTotalMachine", downloadsTotalMachine);
jsonObjectBuilder.add("downloadsUniqueMachine", downloadsUniqueMachine);
jsonObjectBuilder.add("viewsTotal", viewsTotal);
jsonObjectBuilder.add("viewsUnique", viewsUnique);
jsonObjectBuilder.add("downloadsTotal", downloadsTotal);
jsonObjectBuilder.add("downloadsUnique", downloadsUnique);
return ok(jsonObjectBuilder);
} catch (WrappedResponse wr) {
return wr.getResponse();
}
}
}
| 1 | 42,843 | does this result in a 200 when the dataset is still inprogress/publishing not yet finalized? Seems like 202 is the right code for that (as it was) and the test should be watching for a 202? | IQSS-dataverse | java |
@@ -65,6 +65,7 @@ public class SmartStorePlugin extends ForcePlugin {
public static final String LIKE_KEY = "likeKey";
public static final String MATCH_KEY = "matchKey";
public static final String SMART_SQL = "smartSql";
+ public static final String ORDER_PATH = "orderPath";
public static final String ORDER = "order";
public static final String PAGE_SIZE = "pageSize";
public static final String QUERY_TYPE = "queryType"; | 1 | /*
* Copyright (c) 2011-2015, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.smartstore.phonegap;
import android.app.Activity;
import android.util.Log;
import android.util.SparseArray;
import com.salesforce.androidsdk.phonegap.ForcePlugin;
import com.salesforce.androidsdk.phonegap.JavaScriptPluginVersion;
import com.salesforce.androidsdk.smartstore.app.SalesforceSDKManagerWithSmartStore;
import com.salesforce.androidsdk.smartstore.store.IndexSpec;
import com.salesforce.androidsdk.smartstore.store.QuerySpec;
import com.salesforce.androidsdk.smartstore.store.QuerySpec.QueryType;
import com.salesforce.androidsdk.smartstore.store.SmartStore;
import com.salesforce.androidsdk.smartstore.store.SmartStore.SmartStoreException;
import com.salesforce.androidsdk.smartstore.ui.SmartStoreInspectorActivity;
import net.sqlcipher.database.SQLiteDatabase;
import org.apache.cordova.CallbackContext;
import org.apache.cordova.PluginResult;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* PhoneGap plugin for smart store.
*/
public class SmartStorePlugin extends ForcePlugin {
// Keys in json from/to javascript
public static final String BEGIN_KEY = "beginKey";
public static final String END_KEY = "endKey";
public static final String INDEX_PATH = "indexPath";
public static final String LIKE_KEY = "likeKey";
public static final String MATCH_KEY = "matchKey";
public static final String SMART_SQL = "smartSql";
public static final String ORDER = "order";
public static final String PAGE_SIZE = "pageSize";
public static final String QUERY_TYPE = "queryType";
static final String TOTAL_ENTRIES = "totalEntries";
static final String TOTAL_PAGES = "totalPages";
static final String RE_INDEX_DATA = "reIndexData";
static final String CURRENT_PAGE_INDEX = "currentPageIndex";
static final String CURRENT_PAGE_ORDERED_ENTRIES = "currentPageOrderedEntries";
static final String CURSOR_ID = "cursorId";
private static final String TYPE = "type";
private static final String SOUP_NAME = "soupName";
private static final String PATH = "path";
private static final String PATHS = "paths";
private static final String QUERY_SPEC = "querySpec";
private static final String EXTERNAL_ID_PATH = "externalIdPath";
private static final String ENTRIES = "entries";
private static final String ENTRY_IDS = "entryIds";
private static final String INDEX = "index";
private static final String INDEXES = "indexes";
private static final String IS_GLOBAL_STORE = "isGlobalStore";
// Map of cursor id to StoreCursor, per database.
private static Map<SQLiteDatabase, SparseArray<StoreCursor>> STORE_CURSORS = new HashMap<SQLiteDatabase, SparseArray<StoreCursor>>();
private synchronized static SparseArray<StoreCursor> getSmartStoreCursors(SmartStore store) {
final SQLiteDatabase db = store.getDatabase();
if (!STORE_CURSORS.containsKey(db)) {
STORE_CURSORS.put(db, new SparseArray<StoreCursor>());
}
return STORE_CURSORS.get(db);
}
/**
* Supported plugin actions that the client can take.
*/
enum Action {
pgAlterSoup,
pgClearSoup,
pgCloseCursor,
pgGetDatabaseSize,
pgGetSoupIndexSpecs,
pgMoveCursorToPageIndex,
pgQuerySoup,
pgRegisterSoup,
pgReIndexSoup,
pgRemoveFromSoup,
pgRemoveSoup,
pgRetrieveSoupEntries,
pgRunSmartQuery,
pgShowInspector,
pgSoupExists,
pgUpsertSoupEntries
}
@Override
public boolean execute(String actionStr, JavaScriptPluginVersion jsVersion,
final JSONArray args, final CallbackContext callbackContext) throws JSONException {
final long start = System.currentTimeMillis();
// Figure out action
final Action action;
try {
action = Action.valueOf(actionStr);
} catch (IllegalArgumentException e) {
Log.e("SmartStorePlugin.execute", "Unknown action " + actionStr);
return false;
}
// Not running smartstore action on the main thread
cordova.getThreadPool().execute(new Runnable() {
@Override
public void run() {
// All smart store action need to be serialized
synchronized(SmartStorePlugin.class) {
try {
switch(action) {
case pgAlterSoup: alterSoup(args, callbackContext); break;
case pgClearSoup: clearSoup(args, callbackContext); break;
case pgCloseCursor: closeCursor(args, callbackContext); break;
case pgGetDatabaseSize: getDatabaseSize(args, callbackContext); break;
case pgGetSoupIndexSpecs: getSoupIndexSpecs(args, callbackContext); break;
case pgMoveCursorToPageIndex: moveCursorToPageIndex(args, callbackContext); break;
case pgQuerySoup: querySoup(args, callbackContext); break;
case pgRegisterSoup: registerSoup(args, callbackContext); break;
case pgReIndexSoup: reIndexSoup(args, callbackContext); break;
case pgRemoveFromSoup: removeFromSoup(args, callbackContext); break;
case pgRemoveSoup: removeSoup(args, callbackContext); break;
case pgRetrieveSoupEntries: retrieveSoupEntries(args, callbackContext); break;
case pgRunSmartQuery: runSmartQuery(args, callbackContext); break;
case pgShowInspector: showInspector(args, callbackContext); break;
case pgSoupExists: soupExists(args, callbackContext); break;
case pgUpsertSoupEntries: upsertSoupEntries(args, callbackContext); break;
default: throw new SmartStoreException("No handler for action " + action);
}
} catch (Exception e) {
Log.w("SmartStorePlugin.execute", e.getMessage(), e);
callbackContext.error(e.getMessage());
}
Log.d("SmartSTorePlugin.execute", "Total time for " + action + "->" + (System.currentTimeMillis() - start));
}
}
});
Log.d("SmartSTorePlugin.execute", "Main thread time for " + action + "->" + (System.currentTimeMillis() - start));
return true;
}
/**
* Native implementation of pgRemoveFromSoup
* @param args
* @param callbackContext
* @throws JSONException
*/
private void removeFromSoup(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
JSONArray jsonSoupEntryIds = arg0.getJSONArray(ENTRY_IDS);
Long[] soupEntryIds = new Long[jsonSoupEntryIds.length()];
for (int i = 0; i < jsonSoupEntryIds.length(); i++) {
soupEntryIds[i] = jsonSoupEntryIds.getLong(i);
}
// Run remove
smartStore.delete(soupName, soupEntryIds);
callbackContext.success();
}
/**
* Native implementation of pgRetrieveSoupEntries
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void retrieveSoupEntries(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
JSONArray jsonSoupEntryIds = arg0.getJSONArray(ENTRY_IDS);
Long[] soupEntryIds = new Long[jsonSoupEntryIds.length()];
for (int i = 0; i < jsonSoupEntryIds.length(); i++) {
soupEntryIds[i] = jsonSoupEntryIds.getLong(i);
}
// Run retrieve
JSONArray result = smartStore.retrieve(soupName, soupEntryIds);
PluginResult pluginResult = new PluginResult(PluginResult.Status.OK, result);
callbackContext.sendPluginResult(pluginResult);
}
/**
* Native implementation of pgCloseCursor
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void closeCursor(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
Integer cursorId = arg0.getInt(CURSOR_ID);
final SmartStore smartStore = getSmartStore(arg0);
// Drop cursor from storeCursors map
getSmartStoreCursors(smartStore).remove(cursorId);
callbackContext.success();
}
/**
* Native implementation of pgMoveCursorToPageIndex
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void moveCursorToPageIndex(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
Integer cursorId = arg0.getInt(CURSOR_ID);
Integer index = arg0.getInt(INDEX);
final SmartStore smartStore = getSmartStore(arg0);
// Get cursor
final StoreCursor storeCursor = getSmartStoreCursors(smartStore).get(cursorId);
if (storeCursor == null) {
callbackContext.error("Invalid cursor id");
}
// Change page
storeCursor.moveToPageIndex(index);
// Build json result
JSONObject result = storeCursor.getData(smartStore);
// Done
callbackContext.success(result);
}
/**
* Native implementation of pgShowInspector
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void showInspector(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
boolean isGlobal = getIsGlobal(arg0);
Activity activity = cordova.getActivity();
activity.startActivity(SmartStoreInspectorActivity.getIntent(activity, isGlobal, null));
}
/**
* Native implementation of pgSoupExists
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void soupExists(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
// Run upsert
boolean exists = smartStore.hasSoup(soupName);
PluginResult pluginResult = new PluginResult(PluginResult.Status.OK, exists);
callbackContext.sendPluginResult(pluginResult);
}
/**
* Native implementation of pgUpsertSoupEntries
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void upsertSoupEntries(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
JSONArray entriesJson = arg0.getJSONArray(ENTRIES);
String externalIdPath = arg0.getString(EXTERNAL_ID_PATH);
List<JSONObject> entries = new ArrayList<JSONObject>();
for (int i = 0; i < entriesJson.length(); i++) {
entries.add(entriesJson.getJSONObject(i));
}
// Run upsert
smartStore.beginTransaction();
try {
JSONArray results = new JSONArray();
for (JSONObject entry : entries) {
results.put(smartStore.upsert(soupName, entry, externalIdPath, false));
}
smartStore.setTransactionSuccessful();
PluginResult pluginResult = new PluginResult(PluginResult.Status.OK, results);
callbackContext.sendPluginResult(pluginResult);
} finally {
smartStore.endTransaction();
}
}
/**
* Native implementation of pgRegisterSoup
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void registerSoup(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.isNull(SOUP_NAME) ? null : arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
List<IndexSpec> indexSpecs = new ArrayList<IndexSpec>();
JSONArray indexesJson = arg0.getJSONArray(INDEXES);
for (int i = 0; i < indexesJson.length(); i++) {
JSONObject indexJson = indexesJson.getJSONObject(i);
indexSpecs.add(new IndexSpec(indexJson.getString(PATH), SmartStore.Type.valueOf(indexJson.getString(TYPE))));
}
// Run register
smartStore.registerSoup(soupName, indexSpecs.toArray(new IndexSpec[0]));
callbackContext.success(soupName);
}
/**
* Native implementation of pgQuerySoup
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void querySoup(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
JSONObject querySpecJson = arg0.getJSONObject(QUERY_SPEC);
QuerySpec querySpec = QuerySpec.fromJSON(soupName, querySpecJson);
if (querySpec.queryType == QueryType.smart) {
throw new RuntimeException("Smart queries can only be run through runSmartQuery");
}
// Run query
runQuery(smartStore, querySpec, callbackContext);
}
/**
* Native implementation of pgRunSmartSql
* @param args
* @param callbackContext
*/
private void runSmartQuery(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
JSONObject querySpecJson = arg0.getJSONObject(QUERY_SPEC);
final SmartStore smartStore = getSmartStore(arg0);
QuerySpec querySpec = QuerySpec.fromJSON(null, querySpecJson);
if (querySpec.queryType != QueryType.smart) {
throw new RuntimeException("runSmartQuery can only run smart queries");
}
// Run query
runQuery(smartStore, querySpec, callbackContext);
}
/**
* Helper for querySoup and runSmartSql
* @param querySpec
* @param callbackContext
* @throws JSONException
*/
private void runQuery(SmartStore smartStore, QuerySpec querySpec,
CallbackContext callbackContext) throws JSONException {
// Build store cursor
final StoreCursor storeCursor = new StoreCursor(smartStore, querySpec);
getSmartStoreCursors(smartStore).put(storeCursor.cursorId, storeCursor);
// Build json result
JSONObject result = storeCursor.getData(smartStore);
// Done
callbackContext.success(result);
}
/**
* Native implementation of pgRemoveSoup
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void removeSoup(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
// Run remove
smartStore.dropSoup(soupName);
callbackContext.success();
}
/**
* Native implementation of pgClearSoup
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void clearSoup(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
// Run clear
smartStore.clearSoup(soupName);
callbackContext.success();
}
/**
* Native implementation of pgGetDatabaseSize
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void getDatabaseSize(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
final JSONObject arg0 = args.optJSONObject(0);
final SmartStore smartStore = getSmartStore(arg0);
int databaseSize = smartStore.getDatabaseSize();
callbackContext.success(databaseSize);
}
/**
* Native implementation of pgAlterSoup
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void alterSoup(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
List<IndexSpec> indexSpecs = new ArrayList<IndexSpec>();
JSONArray indexesJson = arg0.getJSONArray(INDEXES);
for (int i = 0; i < indexesJson.length(); i++) {
JSONObject indexJson = indexesJson.getJSONObject(i);
indexSpecs.add(new IndexSpec(indexJson.getString(PATH), SmartStore.Type.valueOf(indexJson.getString(TYPE))));
}
boolean reIndexData = arg0.getBoolean(RE_INDEX_DATA);
// Run register
smartStore.alterSoup(soupName, indexSpecs.toArray(new IndexSpec[0]), reIndexData);
callbackContext.success(soupName);
}
/**
* Native implementation of pgReIndexSoup
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void reIndexSoup(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
List<String> indexPaths = new ArrayList<String>();
JSONArray indexPathsJson = arg0.getJSONArray(PATHS);
for (int i = 0; i < indexPathsJson.length(); i++) {
indexPaths.add(indexPathsJson.getString(i));
}
// Run register
smartStore.reIndexSoup(soupName, indexPaths.toArray(new String[0]), true);
callbackContext.success(soupName);
}
/**
* Native implementation of pgGetSoupIndexSpecs
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void getSoupIndexSpecs(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
// Get soup index specs
IndexSpec[] indexSpecs = smartStore.getSoupIndexSpecs(soupName);
JSONArray indexSpecsJson = new JSONArray();
for (int i = 0; i < indexSpecs.length; i++) {
JSONObject indexSpecJson = new JSONObject();
IndexSpec indexSpec = indexSpecs[i];
indexSpecJson.put(PATH, indexSpec.path);
indexSpecJson.put(TYPE, indexSpec.type);
indexSpecsJson.put(indexSpecJson);
}
callbackContext.success(indexSpecsJson);
}
/**
* Return smartstore to use
* @param arg0 first argument passed in plugin call
* @return
*/
private SmartStore getSmartStore(JSONObject arg0) {
boolean isGlobal = getIsGlobal(arg0);
return (isGlobal
? SalesforceSDKManagerWithSmartStore.getInstance().getGlobalSmartStore()
: SalesforceSDKManagerWithSmartStore.getInstance().getSmartStore());
}
/**
* Return the value of the isGlobalStore argument
* @param arg0
* @return
*/
private boolean getIsGlobal(JSONObject arg0) {
return arg0 != null ? arg0.optBoolean(IS_GLOBAL_STORE, false) : false;
}
}
| 1 | 14,749 | Previously you could only order by the field you were searching by. But for full-text search, you can search across all indexed fields, and it didn't make sense not to have a sorting. For backward compatibility, the javascript code uses indexPath as the orderPath when no orderPath is provided. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -38,7 +38,7 @@ export function flushMounts() {
* Apply differences in a given vnode (and it's deep children) to a real DOM Node.
* @param {import('../dom').PreactElement} dom A DOM node to mutate into the shape of a `vnode`
* @param {import('../vnode').VNode} vnode A VNode (with descendants forming a tree) representing
- * the desired DOM structure
+ * the desired DOM structure
* @param {object} context The current context
* @param {boolean} mountAll Whether or not to immediately mount all components
* @param {Element} parent ? | 1 | import { ATTR_KEY } from '../constants';
import { isSameNodeType, isNamedNode } from './index';
import { buildComponentFromVNode } from './component';
import { createNode, setAccessor } from '../dom/index';
import { unmountComponent } from './component';
import options from '../options';
import { applyRef } from '../util';
import { removeNode } from '../dom/index';
/**
* Queue of components that have been mounted and are awaiting componentDidMount
* @type {Array<import('../component').Component>}
*/
export const mounts = [];
/** Diff recursion count, used to track the end of the diff cycle. */
export let diffLevel = 0;
/** Global flag indicating if the diff is currently within an SVG */
let isSvgMode = false;
/** Global flag indicating if the diff is performing hydration */
let hydrating = false;
/** Invoke queued componentDidMount lifecycle methods */
export function flushMounts() {
let c, i;
for (i=0; i<mounts.length; ++i) {
c = mounts[i];
if (options.afterMount) options.afterMount(c);
if (c.componentDidMount) c.componentDidMount();
}
mounts.length = 0;
}
/**
* Apply differences in a given vnode (and it's deep children) to a real DOM Node.
* @param {import('../dom').PreactElement} dom A DOM node to mutate into the shape of a `vnode`
* @param {import('../vnode').VNode} vnode A VNode (with descendants forming a tree) representing
* the desired DOM structure
* @param {object} context The current context
* @param {boolean} mountAll Whether or not to immediately mount all components
* @param {Element} parent ?
* @param {boolean} componentRoot ?
* @returns {import('../dom').PreactElement} The created/mutated element
* @private
*/
export function diff(dom, vnode, context, mountAll, parent, componentRoot) {
// diffLevel having been 0 here indicates initial entry into the diff (not a subdiff)
if (!diffLevel++) {
// when first starting the diff, check if we're diffing an SVG or within an SVG
isSvgMode = parent!=null && parent.ownerSVGElement!==undefined;
// hydration is indicated by the existing element to be diffed not having a prop cache
hydrating = dom!=null && !(ATTR_KEY in dom);
}
let ret = idiff(dom, vnode, context, mountAll, componentRoot);
// append the element if its a new parent
if (parent && ret.parentNode!==parent) parent.appendChild(ret);
// diffLevel being reduced to 0 means we're exiting the diff
if (!--diffLevel) {
hydrating = false;
// invoke queued componentDidMount lifecycle methods
if (!componentRoot) flushMounts();
}
return ret;
}
/**
* Internals of `diff()`, separated to allow bypassing diffLevel / mount flushing.
* @param {import('../dom').PreactElement} dom A DOM node to mutate into the shape of a `vnode`
* @param {import('../vnode').VNode} vnode A VNode (with descendants forming a tree) representing the desired DOM structure
* @param {object} context The current context
* @param {boolean} mountAll Whether or not to immediately mount all components
* @param {boolean} [componentRoot] ?
* @private
*/
function idiff(dom, vnode, context, mountAll, componentRoot) {
let out = dom,
prevSvgMode = isSvgMode;
// empty values (null, undefined, booleans) render as empty Text nodes
if (vnode==null || typeof vnode==='boolean') vnode = '';
// Fast case: Strings & Numbers create/update Text nodes.
if (typeof vnode==='string' || typeof vnode==='number') {
// update if it's already a Text node:
if (dom && dom.splitText!==undefined && dom.parentNode && (!dom._component || componentRoot)) {
/* istanbul ignore if */ /* Browser quirk that can't be covered: https://github.com/developit/preact/commit/fd4f21f5c45dfd75151bd27b4c217d8003aa5eb9 */
if (dom.nodeValue!=vnode) {
dom.nodeValue = vnode;
}
}
else {
// it wasn't a Text node: replace it with one and recycle the old Element
out = document.createTextNode(vnode);
if (dom) {
if (dom.parentNode) dom.parentNode.replaceChild(out, dom);
recollectNodeTree(dom, true);
}
}
out[ATTR_KEY] = true;
return out;
}
// If the VNode represents a Component, perform a component diff:
let vnodeName = vnode.nodeName;
if (typeof vnodeName==='function') {
return buildComponentFromVNode(dom, vnode, context, mountAll);
}
// Tracks entering and exiting SVG namespace when descending through the tree.
isSvgMode = vnodeName==='svg' ? true : vnodeName==='foreignObject' ? false : isSvgMode;
// If there's no existing element or it's the wrong type, create a new one:
vnodeName = String(vnodeName);
if (!dom || !isNamedNode(dom, vnodeName)) {
out = createNode(vnodeName, isSvgMode);
if (dom) {
// move children into the replacement node
while (dom.firstChild) out.appendChild(dom.firstChild);
// if the previous Element was mounted into the DOM, replace it inline
if (dom.parentNode) dom.parentNode.replaceChild(out, dom);
// recycle the old element (skips non-Element node types)
recollectNodeTree(dom, true);
}
}
let fc = out.firstChild,
props = out[ATTR_KEY],
vchildren = vnode.children;
if (props==null) {
props = out[ATTR_KEY] = {};
for (let a=out.attributes, i=a.length; i--; ) props[a[i].name] = a[i].value;
}
// Optimization: fast-path for elements containing a single TextNode:
if (!hydrating && vchildren && vchildren.length===1 && typeof vchildren[0]==='string' && fc!=null && fc.splitText!==undefined && fc.nextSibling==null) {
if (fc.nodeValue!=vchildren[0]) {
fc.nodeValue = vchildren[0];
}
}
// otherwise, if there are existing or new children, diff them:
else if (vchildren && vchildren.length || fc!=null) {
innerDiffNode(out, vchildren, context, mountAll, hydrating || props.dangerouslySetInnerHTML!=null);
}
// Apply attributes/props from VNode to the DOM Element:
diffAttributes(out, vnode.attributes, props);
// restore previous SVG mode: (in case we're exiting an SVG namespace)
isSvgMode = prevSvgMode;
return out;
}
/**
* Apply child and attribute changes between a VNode and a DOM Node to the DOM.
* @param {import('../dom').PreactElement} dom Element whose children should be compared & mutated
* @param {Array<import('../vnode').VNode>} vchildren Array of VNodes to compare to `dom.childNodes`
* @param {object} context Implicitly descendant context object (from most
* recent `getChildContext()`)
* @param {boolean} mountAll Whether or not to immediately mount all components
* @param {boolean} isHydrating if `true`, consumes externally created elements
* similar to hydration
*/
function innerDiffNode(dom, vchildren, context, mountAll, isHydrating) {
let originalChildren = dom.childNodes,
children = [],
keyed = {},
keyedLen = 0,
min = 0,
len = originalChildren.length,
childrenLen = 0,
vlen = vchildren ? vchildren.length : 0,
j, c, f, vchild, child;
// Build up a map of keyed children and an Array of unkeyed children:
if (len!==0) {
for (let i=0; i<len; i++) {
let child = originalChildren[i],
props = child[ATTR_KEY],
key = vlen && props ? child._component ? child._component.__key : props.key : null;
if (key!=null) {
keyedLen++;
keyed[key] = child;
}
else if (props || (child.splitText!==undefined ? (isHydrating ? child.nodeValue.trim() : true) : isHydrating)) {
children[childrenLen++] = child;
}
}
}
if (vlen!==0) {
for (let i=0; i<vlen; i++) {
vchild = vchildren[i];
child = null;
// attempt to find a node based on key matching
let key = vchild.key;
if (key!=null) {
if (keyedLen && keyed[key]!==undefined) {
child = keyed[key];
keyed[key] = undefined;
keyedLen--;
}
}
// attempt to pluck a node of the same type from the existing children
else if (min<childrenLen) {
for (j=min; j<childrenLen; j++) {
if (children[j]!==undefined && isSameNodeType(c = children[j], vchild, isHydrating)) {
child = c;
children[j] = undefined;
if (j===childrenLen-1) childrenLen--;
if (j===min) min++;
break;
}
}
}
// morph the matched/found/created DOM child to match vchild (deep)
child = idiff(child, vchild, context, mountAll);
f = originalChildren[i];
if (child && child!==dom && child!==f) {
if (f==null) {
dom.appendChild(child);
}
else if (child===f.nextSibling) {
removeNode(f);
}
else {
dom.insertBefore(child, f);
}
}
}
}
// remove unused keyed children:
if (keyedLen) {
for (let i in keyed) if (keyed[i]!==undefined) recollectNodeTree(keyed[i], false);
}
// remove orphaned unkeyed children:
while (min<=childrenLen) {
if ((child = children[childrenLen--])!==undefined) recollectNodeTree(child, false);
}
}
/**
* Recursively recycle (or just unmount) a node and its descendants.
* @param {import('../dom').PreactElement} node DOM node to start
* unmount/removal from
* @param {boolean} [unmountOnly=false] If `true`, only triggers unmount
* lifecycle, skips removal
*/
export function recollectNodeTree(node, unmountOnly) {
let component = node._component;
if (component) {
// if node is owned by a Component, unmount that component (ends up recursing back here)
unmountComponent(component);
}
else {
// If the node's VNode had a ref function, invoke it with null here.
// (this is part of the React spec, and smart for unsetting references)
if (node[ATTR_KEY]!=null) applyRef(node[ATTR_KEY].ref, null);
if (unmountOnly===false || node[ATTR_KEY]==null) {
removeNode(node);
}
removeChildren(node);
}
}
/**
* Recollect/unmount all children.
* - we use .lastChild here because it causes less reflow than .firstChild
* - it's also cheaper than accessing the .childNodes Live NodeList
*/
export function removeChildren(node) {
node = node.lastChild;
while (node) {
let next = node.previousSibling;
recollectNodeTree(node, true);
node = next;
}
}
/**
* Apply differences in attributes from a VNode to the given DOM Element.
* @param {import('../dom').PreactElement} dom Element with attributes to diff `attrs` against
* @param {object} attrs The desired end-state key-value attribute pairs
* @param {object} old Current/previous attributes (from previous VNode or
* element's prop cache)
*/
function diffAttributes(dom, attrs, old) {
let name;
// remove attributes no longer present on the vnode by setting them to undefined
for (name in old) {
if (!(attrs && attrs[name]!=null) && old[name]!=null) {
setAccessor(dom, name, old[name], old[name] = undefined, isSvgMode);
}
}
// add new & update changed attributes
for (name in attrs) {
if (name!=='children' && name!=='innerHTML' && (!(name in old) || attrs[name]!==(name==='value' || name==='checked' ? dom[name] : old[name]))) {
setAccessor(dom, name, old[name], old[name] = attrs[name], isSvgMode);
}
}
}
| 1 | 12,288 | VIM didn't like your whitespace. | preactjs-preact | js |
@@ -3,14 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
-using System;
-using System.Collections.Generic;
-using System.IO;
-using System.Linq;
-using System.Reflection;
-using System.Threading;
-using System.Threading.Tasks;
-using Datadog.Trace.TestHelpers;
+using Datadog.Trace.ClrProfiler;
using Xunit;
using Xunit.Abstractions;
using Xunit.Sdk; | 1 | // <copyright file="CustomTestFramework.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Reflection;
using System.Threading;
using System.Threading.Tasks;
using Datadog.Trace.TestHelpers;
using Xunit;
using Xunit.Abstractions;
using Xunit.Sdk;
[assembly: TestFramework("Datadog.Trace.ClrProfiler.IntegrationTests.CustomTestFramework", "Datadog.Trace.ClrProfiler.IntegrationTests")]
namespace Datadog.Trace.ClrProfiler.IntegrationTests
{
public class CustomTestFramework : XunitTestFramework
{
public CustomTestFramework(IMessageSink messageSink)
: base(messageSink)
{
var targetPath = GetProfilerTargetFolder();
if (targetPath != null)
{
var file = typeof(Instrumentation).Assembly.Location;
var destination = Path.Combine(targetPath, Path.GetFileName(file));
File.Copy(file, destination, true);
messageSink.OnMessage(new DiagnosticMessage("Replaced {0} with {1} to setup code coverage", destination, file));
return;
}
var message = "Could not find the target framework directory";
messageSink.OnMessage(new DiagnosticMessage(message));
throw new DirectoryNotFoundException(message);
}
internal static string GetProfilerTargetFolder()
{
var targetFrameworkDirectory = GetTargetFrameworkDirectory();
var paths = EnvironmentHelper.GetProfilerPathCandidates(null).ToArray();
foreach (var path in paths)
{
var baseDirectory = Path.GetDirectoryName(path);
var finalDirectory = Path.Combine(baseDirectory, targetFrameworkDirectory);
if (Directory.Exists(finalDirectory))
{
return finalDirectory;
}
}
return null;
}
protected override ITestFrameworkExecutor CreateExecutor(AssemblyName assemblyName)
{
return new CustomExecutor(assemblyName, SourceInformationProvider, DiagnosticMessageSink);
}
private static string GetTargetFrameworkDirectory()
{
// The conditions looks weird, but it seems like _OR_GREATER is not supported yet in all environments
// We can trim all the additional conditions when this is fixed
#if NETCOREAPP3_1_OR_GREATER || NETCOREAPP3_1 || NET50
return "netcoreapp3.1";
#elif NETCOREAPP || NETSTANDARD
return "netstandard2.0";
#elif NET461_OR_GREATER || NET461 || NET47 || NET471 || NET472 || NET48
return "net461";
#elif NET45_OR_GREATER || NET45 || NET451 || NET452 || NET46
return "net45";
#else
#error Unexpected TFM
#endif
}
private class CustomExecutor : XunitTestFrameworkExecutor
{
public CustomExecutor(AssemblyName assemblyName, ISourceInformationProvider sourceInformationProvider, IMessageSink diagnosticMessageSink)
: base(assemblyName, sourceInformationProvider, diagnosticMessageSink)
{
}
protected override async void RunTestCases(IEnumerable<IXunitTestCase> testCases, IMessageSink executionMessageSink, ITestFrameworkExecutionOptions executionOptions)
{
using (var assemblyRunner = new CustomAssemblyRunner(TestAssembly, testCases, DiagnosticMessageSink, executionMessageSink, executionOptions))
{
await assemblyRunner.RunAsync();
}
}
}
private class CustomAssemblyRunner : XunitTestAssemblyRunner
{
public CustomAssemblyRunner(ITestAssembly testAssembly, IEnumerable<IXunitTestCase> testCases, IMessageSink diagnosticMessageSink, IMessageSink executionMessageSink, ITestFrameworkExecutionOptions executionOptions)
: base(testAssembly, testCases, diagnosticMessageSink, executionMessageSink, executionOptions)
{
}
protected override Task<RunSummary> RunTestCollectionAsync(IMessageBus messageBus, ITestCollection testCollection, IEnumerable<IXunitTestCase> testCases, CancellationTokenSource cancellationTokenSource)
{
return new CustomTestCollectionRunner(testCollection, testCases, DiagnosticMessageSink, messageBus, TestCaseOrderer, new ExceptionAggregator(Aggregator), cancellationTokenSource).RunAsync();
}
}
private class CustomTestCollectionRunner : XunitTestCollectionRunner
{
private readonly IMessageSink _diagnosticMessageSink;
public CustomTestCollectionRunner(ITestCollection testCollection, IEnumerable<IXunitTestCase> testCases, IMessageSink diagnosticMessageSink, IMessageBus messageBus, ITestCaseOrderer testCaseOrderer, ExceptionAggregator aggregator, CancellationTokenSource cancellationTokenSource)
: base(testCollection, testCases, diagnosticMessageSink, messageBus, testCaseOrderer, aggregator, cancellationTokenSource)
{
_diagnosticMessageSink = diagnosticMessageSink;
}
protected override Task<RunSummary> RunTestClassAsync(ITestClass testClass, IReflectionTypeInfo @class, IEnumerable<IXunitTestCase> testCases)
{
return new CustomTestClassRunner(testClass, @class, testCases, _diagnosticMessageSink, MessageBus, TestCaseOrderer, new ExceptionAggregator(Aggregator), CancellationTokenSource, CollectionFixtureMappings)
.RunAsync();
}
}
private class CustomTestClassRunner : XunitTestClassRunner
{
public CustomTestClassRunner(ITestClass testClass, IReflectionTypeInfo @class, IEnumerable<IXunitTestCase> testCases, IMessageSink diagnosticMessageSink, IMessageBus messageBus, ITestCaseOrderer testCaseOrderer, ExceptionAggregator aggregator, CancellationTokenSource cancellationTokenSource, IDictionary<Type, object> collectionFixtureMappings)
: base(testClass, @class, testCases, diagnosticMessageSink, messageBus, testCaseOrderer, aggregator, cancellationTokenSource, collectionFixtureMappings)
{
}
protected override Task<RunSummary> RunTestMethodAsync(ITestMethod testMethod, IReflectionMethodInfo method, IEnumerable<IXunitTestCase> testCases, object[] constructorArguments)
{
return new CustomTestMethodRunner(testMethod, this.Class, method, testCases, this.DiagnosticMessageSink, this.MessageBus, new ExceptionAggregator(this.Aggregator), this.CancellationTokenSource, constructorArguments)
.RunAsync();
}
}
private class CustomTestMethodRunner : XunitTestMethodRunner
{
private readonly IMessageSink _diagnosticMessageSink;
public CustomTestMethodRunner(ITestMethod testMethod, IReflectionTypeInfo @class, IReflectionMethodInfo method, IEnumerable<IXunitTestCase> testCases, IMessageSink diagnosticMessageSink, IMessageBus messageBus, ExceptionAggregator aggregator, CancellationTokenSource cancellationTokenSource, object[] constructorArguments)
: base(testMethod, @class, method, testCases, diagnosticMessageSink, messageBus, aggregator, cancellationTokenSource, constructorArguments)
{
_diagnosticMessageSink = diagnosticMessageSink;
}
protected override async Task<RunSummary> RunTestCaseAsync(IXunitTestCase testCase)
{
var parameters = string.Empty;
if (testCase.TestMethodArguments != null)
{
parameters = string.Join(", ", testCase.TestMethodArguments.Select(a => a?.ToString() ?? "null"));
}
var test = $"{TestMethod.TestClass.Class.Name}.{TestMethod.Method.Name}({parameters})";
_diagnosticMessageSink.OnMessage(new DiagnosticMessage($"STARTED: {test}"));
try
{
var result = await base.RunTestCaseAsync(testCase);
var status = result.Failed > 0 ? "FAILURE" : "SUCCESS";
_diagnosticMessageSink.OnMessage(new DiagnosticMessage($"{status}: {test} ({result.Time}s)"));
return result;
}
catch (Exception ex)
{
_diagnosticMessageSink.OnMessage(new DiagnosticMessage($"ERROR: {test} ({ex.Message})"));
throw;
}
}
}
}
}
| 1 | 21,524 | Are you sure the namespace should change? | DataDog-dd-trace-dotnet | .cs |
@@ -12,7 +12,7 @@ namespace MvvmCross.ViewModels
void Initialize();
- void Startup(object hint);
+ object Startup(object hint);
void Reset();
} | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MS-PL license.
// See the LICENSE file in the project root for more information.
using MvvmCross.Plugin;
namespace MvvmCross.ViewModels
{
public interface IMvxApplication : IMvxViewModelLocatorCollection
{
void LoadPlugins(IMvxPluginManager pluginManager);
void Initialize();
void Startup(object hint);
void Reset();
}
public interface IMvxApplication<THint> : IMvxApplication
{
THint Startup(THint hint);
}
}
| 1 | 14,337 | @martijn00 I'm not sure why we're keeping the object parameter and return type since this can be done by using MvxApplication<THint> with THint set to object | MvvmCross-MvvmCross | .cs |
@@ -31,6 +31,7 @@ import com.pingcap.tikv.meta.TiColumnInfo.InternalTypeHolder;
// https://dev.mysql.com/doc/refman/8.0/en/time.html
public class TimeType extends DataType {
+ public static final TimeType TIME = new TimeType(MySQLType.TypeDuration);
public static final MySQLType[] subTypes = new MySQLType[] {MySQLType.TypeDuration};
protected static final long NANOSECOND = 1;
protected static final long MICROSECOND = 1000 * NANOSECOND; | 1 | /*
*
* Copyright 2017 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.pingcap.tikv.types;
import com.pingcap.tidb.tipb.ExprType;
import com.pingcap.tikv.codec.Codec;
import com.pingcap.tikv.codec.Codec.IntegerCodec;
import com.pingcap.tikv.codec.CodecDataInput;
import com.pingcap.tikv.codec.CodecDataOutput;
import com.pingcap.tikv.exception.ConvertNotSupportException;
import com.pingcap.tikv.exception.ConvertOverflowException;
import com.pingcap.tikv.exception.TypeException;
import com.pingcap.tikv.meta.Collation;
import com.pingcap.tikv.meta.TiColumnInfo.InternalTypeHolder;
// https://dev.mysql.com/doc/refman/8.0/en/time.html
public class TimeType extends DataType {
public static final MySQLType[] subTypes = new MySQLType[] {MySQLType.TypeDuration};
protected static final long NANOSECOND = 1;
protected static final long MICROSECOND = 1000 * NANOSECOND;
protected static final long MILLISECOND = 1000 * MICROSECOND;
protected static final long SECOND = 1000 * MILLISECOND;
protected static final long MINUTE = 60 * SECOND;
protected static final long HOUR = 60 * MINUTE;
@SuppressWarnings("unused")
protected TimeType(InternalTypeHolder holder) {
super(holder);
}
@SuppressWarnings("unused")
protected TimeType(MySQLType type, int flag, int len, int decimal) {
super(type, flag, len, decimal, "", Collation.DEF_COLLATION_CODE);
}
@SuppressWarnings("unused")
protected TimeType(MySQLType tp) {
super(tp);
}
@Override
protected Object decodeNotNull(int flag, CodecDataInput cdi) {
if (flag == Codec.VARINT_FLAG) {
return IntegerCodec.readVarLong(cdi);
} else if (flag == Codec.DURATION_FLAG) {
return IntegerCodec.readLong(cdi);
}
throw new TypeException("Invalid TimeType flag: " + flag);
}
@Override
protected Object doConvertToTiDBType(Object value)
throws ConvertNotSupportException, ConvertOverflowException {
throw new ConvertNotSupportException(value.getClass().getName(), this.getClass().getName());
}
@Override
protected void encodeKey(CodecDataOutput cdo, Object value) {
IntegerCodec.writeDuration(cdo, Converter.convertToLong(value));
}
@Override
protected void encodeValue(CodecDataOutput cdo, Object value) {
// per tidb's implementation, comparable is not needed.
encodeKey(cdo, value);
}
@Override
protected void encodeProto(CodecDataOutput cdo, Object value) {
// in tidb, duration will be firstly flatten into int64 and then encoded.
if (value instanceof Long) {
IntegerCodec.writeLong(cdo, (long) value);
} else {
long val = Converter.convertStrToDuration(Converter.convertToString(value));
IntegerCodec.writeLong(cdo, val);
}
}
@Override
public ExprType getProtoExprType() {
return ExprType.MysqlDuration;
}
@Override
public Object getOriginDefaultValueNonNull(String value, long version) {
return Converter.convertStrToDuration(value);
}
}
| 1 | 10,686 | why do we create a time type here? | pingcap-tispark | java |
@@ -1,4 +1,4 @@
-define(["jQuery", "datetime", "loading", "libraryMenu", "listViewStyle", "paper-icon-button-light"], function ($, datetime, loading, libraryMenu) {
+define(["jQuery", "datetime", "loading", "libraryMenu", "css!components/listview/listview", "paper-icon-button-light"], function ($, datetime, loading, libraryMenu) {
"use strict";
function populateRatings(allParentalRatings, page) { | 1 | define(["jQuery", "datetime", "loading", "libraryMenu", "listViewStyle", "paper-icon-button-light"], function ($, datetime, loading, libraryMenu) {
"use strict";
function populateRatings(allParentalRatings, page) {
var html = "";
html += "<option value=''></option>";
var i;
var length;
var rating;
var ratings = [];
for (i = 0, length = allParentalRatings.length; i < length; i++) {
if (rating = allParentalRatings[i], ratings.length) {
var lastRating = ratings[ratings.length - 1];
if (lastRating.Value === rating.Value) {
lastRating.Name += "/" + rating.Name;
continue;
}
}
ratings.push({
Name: rating.Name,
Value: rating.Value
});
}
for (i = 0, length = ratings.length; i < length; i++) {
rating = ratings[i];
html += "<option value='" + rating.Value + "'>" + rating.Name + "</option>";
}
$("#selectMaxParentalRating", page).html(html);
}
function loadUnratedItems(page, user) {
var items = [{
name: Globalize.translate("OptionBlockBooks"),
value: "Book"
}, {
name: Globalize.translate("OptionBlockChannelContent"),
value: "ChannelContent"
}, {
name: Globalize.translate("OptionBlockLiveTvChannels"),
value: "LiveTvChannel"
}, {
name: Globalize.translate("OptionBlockMovies"),
value: "Movie"
}, {
name: Globalize.translate("OptionBlockMusic"),
value: "Music"
}, {
name: Globalize.translate("OptionBlockTrailers"),
value: "Trailer"
}, {
name: Globalize.translate("OptionBlockTvShows"),
value: "Series"
}];
var html = "";
html += '<h3 class="checkboxListLabel">' + Globalize.translate("HeaderBlockItemsWithNoRating") + "</h3>";
html += '<div class="checkboxList paperList checkboxList-paperList">';
for (var i = 0, length = items.length; i < length; i++) {
var item = items[i];
var checkedAttribute = -1 != user.Policy.BlockUnratedItems.indexOf(item.value) ? ' checked="checked"' : "";
html += '<label><input type="checkbox" is="emby-checkbox" class="chkUnratedItem" data-itemtype="' + item.value + '" type="checkbox"' + checkedAttribute + "><span>" + item.name + "</span></label>";
}
html += "</div>";
$(".blockUnratedItems", page).html(html).trigger("create");
}
function loadUser(page, user, allParentalRatings) {
page.querySelector(".username").innerHTML = user.Name;
libraryMenu.setTitle(user.Name);
loadUnratedItems(page, user);
loadBlockedTags(page, user.Policy.BlockedTags);
populateRatings(allParentalRatings, page);
var ratingValue = "";
if (user.Policy.MaxParentalRating) {
for (var i = 0, length = allParentalRatings.length; i < length; i++) {
var rating = allParentalRatings[i];
if (user.Policy.MaxParentalRating >= rating.Value) {
ratingValue = rating.Value;
}
}
}
$("#selectMaxParentalRating", page).val(ratingValue);
if (user.Policy.IsAdministrator) {
$(".accessScheduleSection", page).hide();
} else {
$(".accessScheduleSection", page).show();
}
renderAccessSchedule(page, user.Policy.AccessSchedules || []);
loading.hide();
}
function loadBlockedTags(page, tags) {
var html = tags.map(function (h) {
var li = '<div class="listItem">';
li += '<div class="listItemBody">';
li += '<h3 class="listItemBodyText">';
li += h;
li += "</h3>";
li += "</div>";
li += '<button type="button" is="paper-icon-button-light" class="blockedTag btnDeleteTag listItemButton" data-tag="' + h + '"><i class="material-icons">delete</i></button>';
return li += "</div>";
}).join("");
if (html) {
html = '<div class="paperList">' + html + "</div>";
}
var elem = $(".blockedTags", page).html(html).trigger("create");
$(".btnDeleteTag", elem).on("click", function () {
var tag = this.getAttribute("data-tag");
var newTags = tags.filter(function (t) {
return t != tag;
});
loadBlockedTags(page, newTags);
});
}
function deleteAccessSchedule(page, schedules, index) {
schedules.splice(index, 1);
renderAccessSchedule(page, schedules);
}
function renderAccessSchedule(page, schedules) {
var html = "";
var index = 0;
html += schedules.map(function (a) {
var itemHtml = "";
itemHtml += '<div class="liSchedule listItem" data-day="' + a.DayOfWeek + '" data-start="' + a.StartHour + '" data-end="' + a.EndHour + '">';
itemHtml += '<div class="listItemBody two-line">';
itemHtml += '<h3 class="listItemBodyText">';
itemHtml += Globalize.translate("Option" + a.DayOfWeek);
itemHtml += "</h3>";
itemHtml += '<div class="listItemBodyText secondary">' + getDisplayTime(a.StartHour) + " - " + getDisplayTime(a.EndHour) + "</div>";
itemHtml += "</div>";
itemHtml += '<button type="button" is="paper-icon-button-light" class="btnDelete listItemButton" data-index="' + index + '"><i class="material-icons">delete</i></button>';
itemHtml += "</div>";
index++;
return itemHtml;
}).join("");
var accessScheduleList = page.querySelector(".accessScheduleList");
accessScheduleList.innerHTML = html;
$(".btnDelete", accessScheduleList).on("click", function () {
deleteAccessSchedule(page, schedules, parseInt(this.getAttribute("data-index")));
});
}
function onSaveComplete(page) {
loading.hide();
require(["toast"], function (toast) {
toast(Globalize.translate("SettingsSaved"));
});
}
function saveUser(user, page) {
user.Policy.MaxParentalRating = $("#selectMaxParentalRating", page).val() || null;
user.Policy.BlockUnratedItems = $(".chkUnratedItem", page).get().filter(function (i) {
return i.checked;
}).map(function (i) {
return i.getAttribute("data-itemtype");
});
user.Policy.AccessSchedules = getSchedulesFromPage(page);
user.Policy.BlockedTags = getBlockedTagsFromPage(page);
ApiClient.updateUserPolicy(user.Id, user.Policy).then(function () {
onSaveComplete(page);
});
}
function getDisplayTime(hours) {
var minutes = 0;
var pct = hours % 1;
if (pct) {
minutes = parseInt(60 * pct);
}
return datetime.getDisplayTime(new Date(2000, 1, 1, hours, minutes, 0, 0));
}
function showSchedulePopup(page, schedule, index) {
schedule = schedule || {};
require(["components/accessschedule/accessschedule"], function (accessschedule) {
accessschedule.show({
schedule: schedule
}).then(function (updatedSchedule) {
var schedules = getSchedulesFromPage(page);
if (-1 == index) {
index = schedules.length;
}
schedules[index] = updatedSchedule;
renderAccessSchedule(page, schedules);
});
});
}
function getSchedulesFromPage(page) {
return $(".liSchedule", page).map(function () {
return {
DayOfWeek: this.getAttribute("data-day"),
StartHour: this.getAttribute("data-start"),
EndHour: this.getAttribute("data-end")
};
}).get();
}
function getBlockedTagsFromPage(page) {
return $(".blockedTag", page).map(function () {
return this.getAttribute("data-tag");
}).get();
}
function showBlockedTagPopup(page) {
require(["prompt"], function (prompt) {
prompt({
label: Globalize.translate("LabelTag")
}).then(function (value) {
var tags = getBlockedTagsFromPage(page);
if (-1 == tags.indexOf(value)) {
tags.push(value);
loadBlockedTags(page, tags);
}
});
});
}
window.UserParentalControlPage = {
onSubmit: function () {
var page = $(this).parents(".page");
loading.show();
var userId = getParameterByName("userId");
ApiClient.getUser(userId).then(function (result) {
saveUser(result, page);
});
return false;
}
};
$(document).on("pageinit", "#userParentalControlPage", function () {
var page = this;
$(".btnAddSchedule", page).on("click", function () {
showSchedulePopup(page, {}, -1);
});
$(".btnAddBlockedTag", page).on("click", function () {
showBlockedTagPopup(page);
});
$(".userParentalControlForm").off("submit", UserParentalControlPage.onSubmit).on("submit", UserParentalControlPage.onSubmit);
}).on("pageshow", "#userParentalControlPage", function () {
var page = this;
loading.show();
var userId = getParameterByName("userId");
var promise1 = ApiClient.getUser(userId);
var promise2 = ApiClient.getParentalRatings();
Promise.all([promise1, promise2]).then(function (responses) {
loadUser(page, responses[0], responses[1]);
});
});
});
| 1 | 12,938 | Does this style actually need to get loaded in all of these components? | jellyfin-jellyfin-web | js |
@@ -30,6 +30,9 @@ import io.servicecomb.foundation.ssl.SSLCustom;
import io.servicecomb.foundation.ssl.SSLOption;
import io.servicecomb.foundation.ssl.SSLOptionFactory;
import io.servicecomb.foundation.vertx.VertxTLSBuilder;
+import io.servicecomb.transport.rest.vertx.accesslog.AccessLogConfiguration;
+import io.servicecomb.transport.rest.vertx.accesslog.impl.AccessLogHandlerImpl;
+import io.servicecomb.transport.rest.vertx.accesslog.parser.impl.DefaultAccessLogPatternParser;
import io.vertx.core.AbstractVerticle;
import io.vertx.core.Context;
import io.vertx.core.Future; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicecomb.transport.rest.vertx;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.servicecomb.core.Endpoint;
import io.servicecomb.core.transport.AbstractTransport;
import io.servicecomb.foundation.common.net.URIEndpointObject;
import io.servicecomb.foundation.common.utils.SPIServiceUtils;
import io.servicecomb.foundation.ssl.SSLCustom;
import io.servicecomb.foundation.ssl.SSLOption;
import io.servicecomb.foundation.ssl.SSLOptionFactory;
import io.servicecomb.foundation.vertx.VertxTLSBuilder;
import io.vertx.core.AbstractVerticle;
import io.vertx.core.Context;
import io.vertx.core.Future;
import io.vertx.core.Vertx;
import io.vertx.core.http.HttpServer;
import io.vertx.core.http.HttpServerOptions;
import io.vertx.ext.web.Router;
public class RestServerVerticle extends AbstractVerticle {
private static final Logger LOGGER = LoggerFactory.getLogger(RestServerVerticle.class);
private static final String SSL_KEY = "rest.provider";
private static final int ACCEPT_BACKLOG = 2048;
private static final int SEND_BUFFER_SIZE = 4096;
private static final int RECEIVE_BUFFER_SIZE = 4096;
private Endpoint endpoint;
private URIEndpointObject endpointObject;
@Override
public void init(Vertx vertx, Context context) {
super.init(vertx, context);
this.endpoint = (Endpoint) context.config().getValue(AbstractTransport.ENDPOINT_KEY);
this.endpointObject = (URIEndpointObject) endpoint.getAddress();
}
@Override
public void start(Future<Void> startFuture) throws Exception {
super.start();
// 如果本地未配置地址,则表示不必监听,只需要作为客户端使用即可
if (endpointObject == null) {
LOGGER.warn("rest listen address is not configured, will not start.");
startFuture.complete();
return;
}
Router mainRouter = Router.router(vertx);
initDispatcher(mainRouter);
HttpServer httpServer = createHttpServer();
httpServer.requestHandler(mainRouter::accept);
startListen(httpServer, startFuture);
}
private void initDispatcher(Router mainRouter) {
List<VertxHttpDispatcher> dispatchers = SPIServiceUtils.getSortedService(VertxHttpDispatcher.class);
for (VertxHttpDispatcher dispatcher : dispatchers) {
LOGGER.info("init vertx http dispatcher {}", dispatcher.getClass().getName());
dispatcher.init(mainRouter);
}
}
private void startListen(HttpServer server, Future<Void> startFuture) {
server.listen(endpointObject.getPort(), endpointObject.getHostOrIp(), ar -> {
if (ar.succeeded()) {
LOGGER.info("rest listen success. address={}:{}",
endpointObject.getHostOrIp(),
ar.result().actualPort());
startFuture.complete();
return;
}
String msg = String.format("rest listen failed, address=%s:%d",
endpointObject.getHostOrIp(),
endpointObject.getPort());
LOGGER.error(msg, ar.cause());
startFuture.fail(ar.cause());
});
}
private HttpServer createHttpServer() {
HttpServerOptions serverOptions = createDefaultHttpServerOptions();
return vertx.createHttpServer(serverOptions);
}
private HttpServerOptions createDefaultHttpServerOptions() {
HttpServerOptions serverOptions = new HttpServerOptions();
serverOptions.setAcceptBacklog(ACCEPT_BACKLOG);
serverOptions.setSendBufferSize(SEND_BUFFER_SIZE);
serverOptions.setReceiveBufferSize(RECEIVE_BUFFER_SIZE);
serverOptions.setUsePooledBuffers(true);
serverOptions.setIdleTimeout(TransportConfig.getConnectionIdleTimeoutInSeconds());
if (endpointObject.isSslEnabled()) {
SSLOptionFactory factory =
SSLOptionFactory.createSSLOptionFactory(SSL_KEY, null);
SSLOption sslOption;
if (factory == null) {
sslOption = SSLOption.buildFromYaml(SSL_KEY);
} else {
sslOption = factory.createSSLOption();
}
SSLCustom sslCustom = SSLCustom.createSSLCustom(sslOption.getSslCustomClass());
VertxTLSBuilder.buildNetServerOptions(sslOption, sslCustom, serverOptions);
}
return serverOptions;
}
}
| 1 | 8,030 | import but not used so you did not resolve compile warnings? | apache-servicecomb-java-chassis | java |
@@ -865,15 +865,10 @@ BlockType_t Creature::blockHit(Creature* attacker, CombatType_t combatType, int3
}
if (checkArmor) {
- int32_t armorValue = getArmor();
- if (armorValue > 1) {
- double armorFormula = armorValue * 0.475;
- int32_t armorReduction = static_cast<int32_t>(std::ceil(armorFormula));
- damage -= uniform_random(
- armorReduction,
- armorReduction + static_cast<int32_t>(std::floor(armorFormula))
- );
- } else if (armorValue == 1) {
+ int32_t armor = getArmor();
+ if (armor > 3) {
+ damage -= uniform_random(armor / 2, armor - (armor % 2 + 1));
+ } else if (armor > 0) {
--damage;
}
| 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2016 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "creature.h"
#include "game.h"
#include "monster.h"
#include "configmanager.h"
#include "scheduler.h"
double Creature::speedA = 857.36;
double Creature::speedB = 261.29;
double Creature::speedC = -4795.01;
extern Game g_game;
extern ConfigManager g_config;
extern CreatureEvents* g_creatureEvents;
Creature::Creature() :
localMapCache(), isInternalRemoved(false)
{
referenceCounter = 0;
id = 0;
tile = nullptr;
direction = DIRECTION_SOUTH;
master = nullptr;
lootDrop = true;
skillLoss = true;
health = 1000;
healthMax = 1000;
mana = 0;
lastStep = 0;
lastStepCost = 1;
baseSpeed = 220;
varSpeed = 0;
followCreature = nullptr;
hasFollowPath = false;
eventWalk = 0;
cancelNextWalk = false;
forceUpdateFollowPath = false;
isMapLoaded = false;
isUpdatingPath = false;
attackedCreature = nullptr;
lastHitCreatureId = 0;
blockCount = 0;
blockTicks = 0;
walkUpdateTicks = 0;
creatureCheck = false;
inCheckCreaturesVector = false;
scriptEventsBitField = 0;
hiddenHealth = false;
skull = SKULL_NONE;
onIdleStatus();
}
Creature::~Creature()
{
for (Creature* summon : summons) {
summon->setAttackedCreature(nullptr);
summon->setMaster(nullptr);
summon->decrementReferenceCounter();
}
for (Condition* condition : conditions) {
condition->endCondition(this);
delete condition;
}
}
bool Creature::canSee(const Position& myPos, const Position& pos, int32_t viewRangeX, int32_t viewRangeY)
{
if (myPos.z <= 7) {
//we are on ground level or above (7 -> 0)
//view is from 7 -> 0
if (pos.z > 7) {
return false;
}
} else if (myPos.z >= 8) {
//we are underground (8 -> 15)
//view is +/- 2 from the floor we stand on
if (Position::getDistanceZ(myPos, pos) > 2) {
return false;
}
}
const int_fast32_t offsetz = myPos.getZ() - pos.getZ();
return (pos.getX() >= myPos.getX() - viewRangeX + offsetz) && (pos.getX() <= myPos.getX() + viewRangeX + offsetz)
&& (pos.getY() >= myPos.getY() - viewRangeY + offsetz) && (pos.getY() <= myPos.getY() + viewRangeY + offsetz);
}
bool Creature::canSee(const Position& pos) const
{
return canSee(getPosition(), pos, Map::maxViewportX, Map::maxViewportY);
}
bool Creature::canSeeCreature(const Creature* creature) const
{
if (!canSeeInvisibility() && creature->isInvisible()) {
return false;
}
return true;
}
void Creature::setSkull(Skulls_t newSkull)
{
skull = newSkull;
g_game.updateCreatureSkull(this);
}
int64_t Creature::getTimeSinceLastMove() const
{
if (lastStep) {
return OTSYS_TIME() - lastStep;
}
return std::numeric_limits<int64_t>::max();
}
int32_t Creature::getWalkDelay(Direction dir) const
{
if (lastStep == 0) {
return 0;
}
int64_t ct = OTSYS_TIME();
int64_t stepDuration = getStepDuration(dir);
return stepDuration - (ct - lastStep);
}
int32_t Creature::getWalkDelay() const
{
//Used for auto-walking
if (lastStep == 0) {
return 0;
}
int64_t ct = OTSYS_TIME();
int64_t stepDuration = getStepDuration() * lastStepCost;
return stepDuration - (ct - lastStep);
}
void Creature::onThink(uint32_t interval)
{
if (!isMapLoaded && useCacheMap()) {
isMapLoaded = true;
updateMapCache();
}
if (followCreature && master != followCreature && !canSeeCreature(followCreature)) {
onCreatureDisappear(followCreature, false);
}
if (attackedCreature && master != attackedCreature && !canSeeCreature(attackedCreature)) {
onCreatureDisappear(attackedCreature, false);
}
blockTicks += interval;
if (blockTicks >= 1000) {
blockCount = std::min<uint32_t>(blockCount + 1, 2);
blockTicks = 0;
}
if (followCreature) {
walkUpdateTicks += interval;
if (forceUpdateFollowPath || walkUpdateTicks >= 2000) {
walkUpdateTicks = 0;
forceUpdateFollowPath = false;
isUpdatingPath = true;
}
}
if (isUpdatingPath) {
isUpdatingPath = false;
goToFollowCreature();
}
//scripting event - onThink
const CreatureEventList& thinkEvents = getCreatureEvents(CREATURE_EVENT_THINK);
for (CreatureEvent* thinkEvent : thinkEvents) {
thinkEvent->executeOnThink(this, interval);
}
}
void Creature::onAttacking(uint32_t interval)
{
if (!attackedCreature) {
return;
}
onAttacked();
attackedCreature->onAttacked();
if (g_game.isSightClear(getPosition(), attackedCreature->getPosition(), true)) {
doAttacking(interval);
}
}
void Creature::onIdleStatus()
{
if (getHealth() > 0) {
damageMap.clear();
lastHitCreatureId = 0;
}
}
void Creature::onWalk()
{
if (getWalkDelay() <= 0) {
Direction dir;
uint32_t flags = FLAG_IGNOREFIELDDAMAGE;
if (getNextStep(dir, flags)) {
ReturnValue ret = g_game.internalMoveCreature(this, dir, flags);
if (ret != RETURNVALUE_NOERROR) {
if (Player* player = getPlayer()) {
player->sendCancelMessage(ret);
player->sendCancelWalk();
}
forceUpdateFollowPath = true;
}
} else {
if (listWalkDir.empty()) {
onWalkComplete();
}
stopEventWalk();
}
}
if (cancelNextWalk) {
listWalkDir.clear();
onWalkAborted();
cancelNextWalk = false;
}
if (eventWalk != 0) {
eventWalk = 0;
addEventWalk();
}
}
void Creature::onWalk(Direction& dir)
{
if (hasCondition(CONDITION_DRUNK)) {
uint32_t r = uniform_random(0, 20);
if (r <= DIRECTION_DIAGONAL_MASK) {
if (r < DIRECTION_DIAGONAL_MASK) {
dir = static_cast<Direction>(r);
}
g_game.internalCreatureSay(this, TALKTYPE_MONSTER_SAY, "Hicks!", false);
}
}
}
bool Creature::getNextStep(Direction& dir, uint32_t&)
{
if (listWalkDir.empty()) {
return false;
}
dir = listWalkDir.front();
listWalkDir.pop_front();
onWalk(dir);
return true;
}
void Creature::startAutoWalk(const std::forward_list<Direction>& listDir)
{
listWalkDir = listDir;
size_t size = 0;
for (auto it = listDir.begin(); it != listDir.end() && size <= 1; ++it) {
size++;
}
addEventWalk(size == 1);
}
void Creature::addEventWalk(bool firstStep)
{
cancelNextWalk = false;
if (getStepSpeed() <= 0) {
return;
}
if (eventWalk != 0) {
return;
}
int64_t ticks = getEventStepTicks(firstStep);
if (ticks <= 0) {
return;
}
// Take first step right away, but still queue the next
if (ticks == 1) {
g_game.checkCreatureWalk(getID());
}
eventWalk = g_scheduler.addEvent(createSchedulerTask(ticks, std::bind(&Game::checkCreatureWalk, &g_game, getID())));
}
void Creature::stopEventWalk()
{
if (eventWalk != 0) {
g_scheduler.stopEvent(eventWalk);
eventWalk = 0;
}
}
void Creature::updateMapCache()
{
Tile* tile;
const Position& myPos = getPosition();
Position pos(0, 0, myPos.z);
for (int32_t y = -maxWalkCacheHeight; y <= maxWalkCacheHeight; ++y) {
for (int32_t x = -maxWalkCacheWidth; x <= maxWalkCacheWidth; ++x) {
pos.x = myPos.getX() + x;
pos.y = myPos.getY() + y;
tile = g_game.map.getTile(pos);
updateTileCache(tile, pos);
}
}
}
void Creature::updateTileCache(const Tile* tile, int32_t dx, int32_t dy)
{
if (std::abs(dx) <= maxWalkCacheWidth && std::abs(dy) <= maxWalkCacheHeight) {
localMapCache[maxWalkCacheHeight + dy][maxWalkCacheWidth + dx] = tile && tile->queryAdd(0, *this, 1, FLAG_PATHFINDING | FLAG_IGNOREFIELDDAMAGE) == RETURNVALUE_NOERROR;
}
}
void Creature::updateTileCache(const Tile* tile, const Position& pos)
{
const Position& myPos = getPosition();
if (pos.z == myPos.z) {
int32_t dx = Position::getOffsetX(pos, myPos);
int32_t dy = Position::getOffsetY(pos, myPos);
updateTileCache(tile, dx, dy);
}
}
int32_t Creature::getWalkCache(const Position& pos) const
{
if (!useCacheMap()) {
return 2;
}
const Position& myPos = getPosition();
if (myPos.z != pos.z) {
return 0;
}
if (pos == myPos) {
return 1;
}
int32_t dx = Position::getOffsetX(pos, myPos);
if (std::abs(dx) <= maxWalkCacheWidth) {
int32_t dy = Position::getOffsetY(pos, myPos);
if (std::abs(dy) <= maxWalkCacheHeight) {
if (localMapCache[maxWalkCacheHeight + dy][maxWalkCacheWidth + dx]) {
return 1;
} else {
return 0;
}
}
}
//out of range
return 2;
}
void Creature::onAddTileItem(const Tile* tile, const Position& pos)
{
if (isMapLoaded && pos.z == getPosition().z) {
updateTileCache(tile, pos);
}
}
void Creature::onUpdateTileItem(const Tile* tile, const Position& pos, const Item*,
const ItemType& oldType, const Item*, const ItemType& newType)
{
if (!isMapLoaded) {
return;
}
if (oldType.blockSolid || oldType.blockPathFind || newType.blockPathFind || newType.blockSolid) {
if (pos.z == getPosition().z) {
updateTileCache(tile, pos);
}
}
}
void Creature::onRemoveTileItem(const Tile* tile, const Position& pos, const ItemType& iType, const Item*)
{
if (!isMapLoaded) {
return;
}
if (iType.blockSolid || iType.blockPathFind || iType.isGroundTile()) {
if (pos.z == getPosition().z) {
updateTileCache(tile, pos);
}
}
}
void Creature::onCreatureAppear(Creature* creature, bool)
{
if (creature == this) {
if (useCacheMap()) {
isMapLoaded = true;
updateMapCache();
}
} else if (isMapLoaded) {
if (creature->getPosition().z == getPosition().z) {
updateTileCache(creature->getTile(), creature->getPosition());
}
}
}
void Creature::onRemoveCreature(Creature* creature, bool)
{
onCreatureDisappear(creature, true);
if (creature == this) {
if (master && !master->isRemoved()) {
master->removeSummon(this);
}
} else if (isMapLoaded) {
if (creature->getPosition().z == getPosition().z) {
updateTileCache(creature->getTile(), creature->getPosition());
}
}
}
void Creature::onCreatureDisappear(const Creature* creature, bool isLogout)
{
if (attackedCreature == creature) {
setAttackedCreature(nullptr);
onAttackedCreatureDisappear(isLogout);
}
if (followCreature == creature) {
setFollowCreature(nullptr);
onFollowCreatureDisappear(isLogout);
}
}
void Creature::onChangeZone(ZoneType_t zone)
{
if (attackedCreature && zone == ZONE_PROTECTION) {
onCreatureDisappear(attackedCreature, false);
}
}
void Creature::onAttackedCreatureChangeZone(ZoneType_t zone)
{
if (zone == ZONE_PROTECTION) {
onCreatureDisappear(attackedCreature, false);
}
}
void Creature::onCreatureMove(Creature* creature, const Tile* newTile, const Position& newPos,
const Tile* oldTile, const Position& oldPos, bool teleport)
{
if (creature == this) {
lastStep = OTSYS_TIME();
lastStepCost = 1;
if (!teleport) {
if (oldPos.z != newPos.z) {
//floor change extra cost
lastStepCost = 2;
} else if (Position::getDistanceX(newPos, oldPos) >= 1 && Position::getDistanceY(newPos, oldPos) >= 1) {
//diagonal extra cost
lastStepCost = 3;
}
} else {
stopEventWalk();
}
if (!summons.empty()) {
//check if any of our summons is out of range (+/- 2 floors or 30 tiles away)
std::forward_list<Creature*> despawnList;
for (Creature* summon : summons) {
const Position& pos = summon->getPosition();
if (Position::getDistanceZ(newPos, pos) > 2 || (std::max<int32_t>(Position::getDistanceX(newPos, pos), Position::getDistanceY(newPos, pos)) > 30)) {
despawnList.push_front(summon);
}
}
for (Creature* despawnCreature : despawnList) {
g_game.removeCreature(despawnCreature, true);
}
}
if (newTile->getZone() != oldTile->getZone()) {
onChangeZone(getZone());
}
//update map cache
if (isMapLoaded) {
if (teleport || oldPos.z != newPos.z) {
updateMapCache();
} else {
Tile* tile;
const Position& myPos = getPosition();
Position pos;
if (oldPos.y > newPos.y) { //north
//shift y south
for (int32_t y = mapWalkHeight - 1; --y >= 0;) {
memcpy(localMapCache[y + 1], localMapCache[y], sizeof(localMapCache[y]));
}
//update 0
for (int32_t x = -maxWalkCacheWidth; x <= maxWalkCacheWidth; ++x) {
tile = g_game.map.getTile(myPos.getX() + x, myPos.getY() - maxWalkCacheHeight, myPos.z);
updateTileCache(tile, x, -maxWalkCacheHeight);
}
} else if (oldPos.y < newPos.y) { // south
//shift y north
for (int32_t y = 0; y <= mapWalkHeight - 2; ++y) {
memcpy(localMapCache[y], localMapCache[y + 1], sizeof(localMapCache[y]));
}
//update mapWalkHeight - 1
for (int32_t x = -maxWalkCacheWidth; x <= maxWalkCacheWidth; ++x) {
tile = g_game.map.getTile(myPos.getX() + x, myPos.getY() + maxWalkCacheHeight, myPos.z);
updateTileCache(tile, x, maxWalkCacheHeight);
}
}
if (oldPos.x < newPos.x) { // east
//shift y west
int32_t starty = 0;
int32_t endy = mapWalkHeight - 1;
int32_t dy = Position::getDistanceY(oldPos, newPos);
if (dy < 0) {
endy += dy;
} else if (dy > 0) {
starty = dy;
}
for (int32_t y = starty; y <= endy; ++y) {
for (int32_t x = 0; x <= mapWalkWidth - 2; ++x) {
localMapCache[y][x] = localMapCache[y][x + 1];
}
}
//update mapWalkWidth - 1
for (int32_t y = -maxWalkCacheHeight; y <= maxWalkCacheHeight; ++y) {
tile = g_game.map.getTile(myPos.x + maxWalkCacheWidth, myPos.y + y, myPos.z);
updateTileCache(tile, maxWalkCacheWidth, y);
}
} else if (oldPos.x > newPos.x) { // west
//shift y east
int32_t starty = 0;
int32_t endy = mapWalkHeight - 1;
int32_t dy = Position::getDistanceY(oldPos, newPos);
if (dy < 0) {
endy += dy;
} else if (dy > 0) {
starty = dy;
}
for (int32_t y = starty; y <= endy; ++y) {
for (int32_t x = mapWalkWidth - 1; --x >= 0;) {
localMapCache[y][x + 1] = localMapCache[y][x];
}
}
//update 0
for (int32_t y = -maxWalkCacheHeight; y <= maxWalkCacheHeight; ++y) {
tile = g_game.map.getTile(myPos.x - maxWalkCacheWidth, myPos.y + y, myPos.z);
updateTileCache(tile, -maxWalkCacheWidth, y);
}
}
updateTileCache(oldTile, oldPos);
}
}
} else {
if (isMapLoaded) {
const Position& myPos = getPosition();
if (newPos.z == myPos.z) {
updateTileCache(newTile, newPos);
}
if (oldPos.z == myPos.z) {
updateTileCache(oldTile, oldPos);
}
}
}
if (creature == followCreature || (creature == this && followCreature)) {
if (hasFollowPath) {
isUpdatingPath = true;
}
if (newPos.z != oldPos.z || !canSee(followCreature->getPosition())) {
onCreatureDisappear(followCreature, false);
}
}
if (creature == attackedCreature || (creature == this && attackedCreature)) {
if (newPos.z != oldPos.z || !canSee(attackedCreature->getPosition())) {
onCreatureDisappear(attackedCreature, false);
} else {
if (hasExtraSwing()) {
//our target is moving lets see if we can get in hit
g_dispatcher.addTask(createTask(std::bind(&Game::checkCreatureAttack, &g_game, getID())));
}
if (newTile->getZone() != oldTile->getZone()) {
onAttackedCreatureChangeZone(attackedCreature->getZone());
}
}
}
}
void Creature::onDeath()
{
bool lastHitUnjustified = false;
bool mostDamageUnjustified = false;
Creature* lastHitCreature = g_game.getCreatureByID(lastHitCreatureId);
Creature* lastHitCreatureMaster;
if (lastHitCreature) {
lastHitUnjustified = lastHitCreature->onKilledCreature(this);
lastHitCreatureMaster = lastHitCreature->getMaster();
} else {
lastHitCreatureMaster = nullptr;
}
Creature* mostDamageCreature = nullptr;
const int64_t timeNow = OTSYS_TIME();
const uint32_t inFightTicks = g_config.getNumber(ConfigManager::PZ_LOCKED);
int32_t mostDamage = 0;
std::map<Creature*, uint64_t> experienceMap;
for (const auto& it : damageMap) {
if (Creature* attacker = g_game.getCreatureByID(it.first)) {
CountBlock_t cb = it.second;
if ((cb.total > mostDamage && (timeNow - cb.ticks <= inFightTicks))) {
mostDamage = cb.total;
mostDamageCreature = attacker;
}
if (attacker != this) {
uint64_t gainExp = getGainedExperience(attacker);
if (Player* player = attacker->getPlayer()) {
Party* party = player->getParty();
if (party && party->getLeader() && party->isSharedExperienceActive() && party->isSharedExperienceEnabled()) {
attacker = party->getLeader();
}
}
auto tmpIt = experienceMap.find(attacker);
if (tmpIt == experienceMap.end()) {
experienceMap[attacker] = gainExp;
} else {
tmpIt->second += gainExp;
}
}
}
}
for (const auto& it : experienceMap) {
it.first->onGainExperience(it.second, this);
}
if (mostDamageCreature) {
if (mostDamageCreature != lastHitCreature && mostDamageCreature != lastHitCreatureMaster) {
Creature* mostDamageCreatureMaster = mostDamageCreature->getMaster();
if (lastHitCreature != mostDamageCreatureMaster && (lastHitCreatureMaster == nullptr || mostDamageCreatureMaster != lastHitCreatureMaster)) {
mostDamageUnjustified = mostDamageCreature->onKilledCreature(this, false);
}
}
}
bool droppedCorpse = dropCorpse(lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified);
death(lastHitCreature);
if (master) {
master->removeSummon(this);
}
if (droppedCorpse) {
g_game.removeCreature(this, false);
}
}
bool Creature::dropCorpse(Creature* lastHitCreature, Creature* mostDamageCreature, bool lastHitUnjustified, bool mostDamageUnjustified)
{
if (!lootDrop && getMonster()) {
if (master) {
//scripting event - onDeath
const CreatureEventList& deathEvents = getCreatureEvents(CREATURE_EVENT_DEATH);
for (CreatureEvent* deathEvent : deathEvents) {
deathEvent->executeOnDeath(this, nullptr, lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified);
}
}
g_game.addMagicEffect(getPosition(), CONST_ME_POFF);
} else {
Item* splash;
switch (getRace()) {
case RACE_VENOM:
splash = Item::CreateItem(ITEM_FULLSPLASH, FLUID_GREEN);
break;
case RACE_BLOOD:
splash = Item::CreateItem(ITEM_FULLSPLASH, FLUID_BLOOD);
break;
default:
splash = nullptr;
break;
}
Tile* tile = getTile();
if (splash) {
g_game.internalAddItem(tile, splash, INDEX_WHEREEVER, FLAG_NOLIMIT);
g_game.startDecay(splash);
}
Item* corpse = getCorpse(lastHitCreature, mostDamageCreature);
if (corpse) {
g_game.internalAddItem(tile, corpse, INDEX_WHEREEVER, FLAG_NOLIMIT);
g_game.startDecay(corpse);
}
//scripting event - onDeath
for (CreatureEvent* deathEvent : getCreatureEvents(CREATURE_EVENT_DEATH)) {
deathEvent->executeOnDeath(this, corpse, lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified);
}
if (corpse) {
dropLoot(corpse->getContainer(), lastHitCreature);
}
}
return true;
}
bool Creature::hasBeenAttacked(uint32_t attackerId)
{
auto it = damageMap.find(attackerId);
if (it == damageMap.end()) {
return false;
}
return (OTSYS_TIME() - it->second.ticks) <= g_config.getNumber(ConfigManager::PZ_LOCKED);
}
Item* Creature::getCorpse(Creature*, Creature*)
{
return Item::CreateItem(getLookCorpse());
}
void Creature::changeHealth(int32_t healthChange, bool sendHealthChange/* = true*/)
{
int32_t oldHealth = health;
if (healthChange > 0) {
health += std::min<int32_t>(healthChange, getMaxHealth() - health);
} else {
health = std::max<int32_t>(0, health + healthChange);
}
if (sendHealthChange && oldHealth != health) {
g_game.addCreatureHealth(this);
}
}
void Creature::changeMana(int32_t manaChange)
{
if (manaChange > 0) {
mana += std::min<int32_t>(manaChange, getMaxMana() - mana);
} else {
mana = std::max<int32_t>(0, mana + manaChange);
}
}
void Creature::gainHealth(Creature* healer, int32_t healthGain)
{
changeHealth(healthGain);
if (healer) {
healer->onTargetCreatureGainHealth(this, healthGain);
}
}
void Creature::drainHealth(Creature* attacker, int32_t damage)
{
changeHealth(-damage, false);
if (attacker) {
attacker->onAttackedCreatureDrainHealth(this, damage);
}
}
void Creature::drainMana(Creature* attacker, int32_t manaLoss)
{
onAttacked();
changeMana(-manaLoss);
if (attacker) {
addDamagePoints(attacker, manaLoss);
}
}
BlockType_t Creature::blockHit(Creature* attacker, CombatType_t combatType, int32_t& damage,
bool checkDefense /* = false */, bool checkArmor /* = false */, bool /* field = false */)
{
BlockType_t blockType = BLOCK_NONE;
if (isImmune(combatType)) {
damage = 0;
blockType = BLOCK_IMMUNITY;
} else if (checkDefense || checkArmor) {
bool hasDefense = false;
if (blockCount > 0) {
--blockCount;
hasDefense = true;
}
if (checkDefense && hasDefense) {
int32_t defense = getDefense();
damage -= uniform_random(defense / 2, defense);
if (damage <= 0) {
damage = 0;
blockType = BLOCK_DEFENSE;
checkArmor = false;
}
}
if (checkArmor) {
int32_t armorValue = getArmor();
if (armorValue > 1) {
double armorFormula = armorValue * 0.475;
int32_t armorReduction = static_cast<int32_t>(std::ceil(armorFormula));
damage -= uniform_random(
armorReduction,
armorReduction + static_cast<int32_t>(std::floor(armorFormula))
);
} else if (armorValue == 1) {
--damage;
}
if (damage <= 0) {
damage = 0;
blockType = BLOCK_ARMOR;
}
}
if (hasDefense && blockType != BLOCK_NONE) {
onBlockHit();
}
}
if (attacker) {
attacker->onAttackedCreature(this);
attacker->onAttackedCreatureBlockHit(blockType);
}
onAttacked();
return blockType;
}
bool Creature::setAttackedCreature(Creature* creature)
{
if (creature) {
const Position& creaturePos = creature->getPosition();
if (creaturePos.z != getPosition().z || !canSee(creaturePos)) {
attackedCreature = nullptr;
return false;
}
attackedCreature = creature;
onAttackedCreature(attackedCreature);
attackedCreature->onAttacked();
} else {
attackedCreature = nullptr;
}
for (Creature* summon : summons) {
summon->setAttackedCreature(creature);
}
return true;
}
void Creature::getPathSearchParams(const Creature*, FindPathParams& fpp) const
{
fpp.fullPathSearch = !hasFollowPath;
fpp.clearSight = true;
fpp.maxSearchDist = 12;
fpp.minTargetDist = 1;
fpp.maxTargetDist = 1;
}
void Creature::goToFollowCreature()
{
if (followCreature) {
FindPathParams fpp;
getPathSearchParams(followCreature, fpp);
Monster* monster = getMonster();
if (monster && !monster->getMaster() && (monster->isFleeing() || fpp.maxTargetDist > 1)) {
Direction dir = DIRECTION_NONE;
if (monster->isFleeing()) {
monster->getDistanceStep(followCreature->getPosition(), dir, true);
} else { //maxTargetDist > 1
if (!monster->getDistanceStep(followCreature->getPosition(), dir)) {
// if we can't get anything then let the A* calculate
listWalkDir.clear();
if (getPathTo(followCreature->getPosition(), listWalkDir, fpp)) {
hasFollowPath = true;
startAutoWalk(listWalkDir);
} else {
hasFollowPath = false;
}
return;
}
}
if (dir != DIRECTION_NONE) {
listWalkDir.clear();
listWalkDir.push_front(dir);
hasFollowPath = true;
startAutoWalk(listWalkDir);
}
} else {
listWalkDir.clear();
if (getPathTo(followCreature->getPosition(), listWalkDir, fpp)) {
hasFollowPath = true;
startAutoWalk(listWalkDir);
} else {
hasFollowPath = false;
}
}
}
onFollowCreatureComplete(followCreature);
}
bool Creature::setFollowCreature(Creature* creature)
{
if (creature) {
if (followCreature == creature) {
return true;
}
const Position& creaturePos = creature->getPosition();
if (creaturePos.z != getPosition().z || !canSee(creaturePos)) {
followCreature = nullptr;
return false;
}
if (!listWalkDir.empty()) {
listWalkDir.clear();
onWalkAborted();
}
hasFollowPath = false;
forceUpdateFollowPath = false;
followCreature = creature;
isUpdatingPath = true;
} else {
isUpdatingPath = false;
followCreature = nullptr;
}
onFollowCreature(creature);
return true;
}
double Creature::getDamageRatio(Creature* attacker) const
{
uint32_t totalDamage = 0;
uint32_t attackerDamage = 0;
for (const auto& it : damageMap) {
const CountBlock_t& cb = it.second;
totalDamage += cb.total;
if (it.first == attacker->getID()) {
attackerDamage += cb.total;
}
}
if (totalDamage == 0) {
return 0;
}
return (static_cast<double>(attackerDamage) / totalDamage);
}
uint64_t Creature::getGainedExperience(Creature* attacker) const
{
return std::floor(getDamageRatio(attacker) * getLostExperience());
}
void Creature::addDamagePoints(Creature* attacker, int32_t damagePoints)
{
if (damagePoints <= 0) {
return;
}
uint32_t attackerId = attacker->id;
auto it = damageMap.find(attackerId);
if (it == damageMap.end()) {
CountBlock_t cb;
cb.ticks = OTSYS_TIME();
cb.total = damagePoints;
damageMap[attackerId] = cb;
} else {
it->second.total += damagePoints;
it->second.ticks = OTSYS_TIME();
}
lastHitCreatureId = attackerId;
}
void Creature::onAddCondition(ConditionType_t type)
{
if (type == CONDITION_PARALYZE && hasCondition(CONDITION_HASTE)) {
removeCondition(CONDITION_HASTE);
} else if (type == CONDITION_HASTE && hasCondition(CONDITION_PARALYZE)) {
removeCondition(CONDITION_PARALYZE);
}
}
void Creature::onAddCombatCondition(ConditionType_t)
{
//
}
void Creature::onEndCondition(ConditionType_t)
{
//
}
void Creature::onTickCondition(ConditionType_t type, bool& bRemove)
{
const MagicField* field = getTile()->getFieldItem();
if (!field) {
return;
}
switch (type) {
case CONDITION_FIRE:
bRemove = (field->getCombatType() != COMBAT_FIREDAMAGE);
break;
case CONDITION_ENERGY:
bRemove = (field->getCombatType() != COMBAT_ENERGYDAMAGE);
break;
case CONDITION_POISON:
bRemove = (field->getCombatType() != COMBAT_EARTHDAMAGE);
break;
case CONDITION_FREEZING:
bRemove = (field->getCombatType() != COMBAT_ICEDAMAGE);
break;
case CONDITION_DAZZLED:
bRemove = (field->getCombatType() != COMBAT_HOLYDAMAGE);
break;
case CONDITION_CURSED:
bRemove = (field->getCombatType() != COMBAT_DEATHDAMAGE);
break;
case CONDITION_DROWN:
bRemove = (field->getCombatType() != COMBAT_DROWNDAMAGE);
break;
case CONDITION_BLEEDING:
bRemove = (field->getCombatType() != COMBAT_PHYSICALDAMAGE);
break;
default:
break;
}
}
void Creature::onCombatRemoveCondition(Condition* condition)
{
removeCondition(condition);
}
void Creature::onAttacked()
{
//
}
void Creature::onAttackedCreatureDrainHealth(Creature* target, int32_t points)
{
target->addDamagePoints(this, points);
}
bool Creature::onKilledCreature(Creature* target, bool)
{
if (master) {
master->onKilledCreature(target);
}
//scripting event - onKill
const CreatureEventList& killEvents = getCreatureEvents(CREATURE_EVENT_KILL);
for (CreatureEvent* killEvent : killEvents) {
killEvent->executeOnKill(this, target);
}
return false;
}
void Creature::onGainExperience(uint64_t gainExp, Creature* target)
{
if (gainExp == 0 || !master) {
return;
}
gainExp /= 2;
master->onGainExperience(gainExp, target);
SpectatorVec list;
g_game.map.getSpectators(list, position, false, true);
if (list.empty()) {
return;
}
TextMessage message(MESSAGE_EXPERIENCE_OTHERS, ucfirst(getNameDescription()) + " gained " + std::to_string(gainExp) + (gainExp != 1 ? " experience points." : " experience point."));
message.position = position;
message.primary.color = TEXTCOLOR_WHITE_EXP;
message.primary.value = gainExp;
for (Creature* spectator : list) {
spectator->getPlayer()->sendTextMessage(message);
}
}
void Creature::addSummon(Creature* creature)
{
creature->setDropLoot(false);
creature->setLossSkill(false);
creature->setMaster(this);
creature->incrementReferenceCounter();
summons.push_back(creature);
}
void Creature::removeSummon(Creature* creature)
{
auto cit = std::find(summons.begin(), summons.end(), creature);
if (cit != summons.end()) {
creature->setDropLoot(false);
creature->setLossSkill(true);
creature->setMaster(nullptr);
creature->decrementReferenceCounter();
summons.erase(cit);
}
}
bool Creature::addCondition(Condition* condition, bool force/* = false*/)
{
if (condition == nullptr) {
return false;
}
if (!force && condition->getType() == CONDITION_HASTE && hasCondition(CONDITION_PARALYZE)) {
int64_t walkDelay = getWalkDelay();
if (walkDelay > 0) {
g_scheduler.addEvent(createSchedulerTask(walkDelay, std::bind(&Game::forceAddCondition, &g_game, getID(), condition)));
return false;
}
}
Condition* prevCond = getCondition(condition->getType(), condition->getId(), condition->getSubId());
if (prevCond) {
prevCond->addCondition(this, condition);
delete condition;
return true;
}
if (condition->startCondition(this)) {
conditions.push_back(condition);
onAddCondition(condition->getType());
return true;
}
delete condition;
return false;
}
bool Creature::addCombatCondition(Condition* condition)
{
//Caution: condition variable could be deleted after the call to addCondition
ConditionType_t type = condition->getType();
if (!addCondition(condition)) {
return false;
}
onAddCombatCondition(type);
return true;
}
void Creature::removeCondition(ConditionType_t type, bool force/* = false*/)
{
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (condition->getType() != type) {
++it;
continue;
}
if (!force && type == CONDITION_PARALYZE) {
int64_t walkDelay = getWalkDelay();
if (walkDelay > 0) {
g_scheduler.addEvent(createSchedulerTask(walkDelay, std::bind(&Game::forceRemoveCondition, &g_game, getID(), type)));
return;
}
}
it = conditions.erase(it);
condition->endCondition(this);
delete condition;
onEndCondition(type);
}
}
void Creature::removeCondition(ConditionType_t type, ConditionId_t conditionId, bool force/* = false*/)
{
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (condition->getType() != type || condition->getId() != conditionId) {
++it;
continue;
}
if (!force && type == CONDITION_PARALYZE) {
int64_t walkDelay = getWalkDelay();
if (walkDelay > 0) {
g_scheduler.addEvent(createSchedulerTask(walkDelay, std::bind(&Game::forceRemoveCondition, &g_game, getID(), type)));
return;
}
}
it = conditions.erase(it);
condition->endCondition(this);
delete condition;
onEndCondition(type);
}
}
void Creature::removeCombatCondition(ConditionType_t type)
{
std::vector<Condition*> removeConditions;
for (Condition* condition : conditions) {
if (condition->getType() == type) {
removeConditions.push_back(condition);
}
}
for (Condition* condition : removeConditions) {
onCombatRemoveCondition(condition);
}
}
void Creature::removeCondition(Condition* condition, bool force/* = false*/)
{
auto it = std::find(conditions.begin(), conditions.end(), condition);
if (it == conditions.end()) {
return;
}
if (!force && condition->getType() == CONDITION_PARALYZE) {
int64_t walkDelay = getWalkDelay();
if (walkDelay > 0) {
g_scheduler.addEvent(createSchedulerTask(walkDelay, std::bind(&Game::forceRemoveCondition, &g_game, getID(), condition->getType())));
return;
}
}
conditions.erase(it);
condition->endCondition(this);
onEndCondition(condition->getType());
delete condition;
}
Condition* Creature::getCondition(ConditionType_t type) const
{
for (Condition* condition : conditions) {
if (condition->getType() == type) {
return condition;
}
}
return nullptr;
}
Condition* Creature::getCondition(ConditionType_t type, ConditionId_t conditionId, uint32_t subId/* = 0*/) const
{
for (Condition* condition : conditions) {
if (condition->getType() == type && condition->getId() == conditionId && condition->getSubId() == subId) {
return condition;
}
}
return nullptr;
}
void Creature::executeConditions(uint32_t interval)
{
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (!condition->executeCondition(this, interval)) {
ConditionType_t type = condition->getType();
it = conditions.erase(it);
condition->endCondition(this);
delete condition;
onEndCondition(type);
} else {
++it;
}
}
}
bool Creature::hasCondition(ConditionType_t type, uint32_t subId/* = 0*/) const
{
if (isSuppress(type)) {
return false;
}
int64_t timeNow = OTSYS_TIME();
for (Condition* condition : conditions) {
if (condition->getType() != type || condition->getSubId() != subId) {
continue;
}
if (condition->getEndTime() >= timeNow) {
return true;
}
}
return false;
}
bool Creature::isImmune(CombatType_t type) const
{
return hasBitSet(static_cast<uint32_t>(type), getDamageImmunities());
}
bool Creature::isImmune(ConditionType_t type) const
{
return hasBitSet(static_cast<uint32_t>(type), getConditionImmunities());
}
bool Creature::isSuppress(ConditionType_t type) const
{
return hasBitSet(static_cast<uint32_t>(type), getConditionSuppressions());
}
int64_t Creature::getStepDuration(Direction dir) const
{
int64_t stepDuration = getStepDuration();
if ((dir & DIRECTION_DIAGONAL_MASK) != 0) {
stepDuration *= 3;
}
return stepDuration;
}
int64_t Creature::getStepDuration() const
{
if (isRemoved()) {
return 0;
}
uint32_t calculatedStepSpeed;
uint32_t groundSpeed;
int32_t stepSpeed = getStepSpeed();
if (stepSpeed > -Creature::speedB) {
calculatedStepSpeed = floor((Creature::speedA * log((stepSpeed / 2) + Creature::speedB) + Creature::speedC) + 0.5);
if (calculatedStepSpeed <= 0) {
calculatedStepSpeed = 1;
}
} else {
calculatedStepSpeed = 1;
}
Item* ground = tile->getGround();
if (ground) {
groundSpeed = Item::items[ground->getID()].speed;
if (groundSpeed == 0) {
groundSpeed = 150;
}
} else {
groundSpeed = 150;
}
double duration = std::floor(1000 * groundSpeed / calculatedStepSpeed);
int64_t stepDuration = std::ceil(duration / 50) * 50;
const Monster* monster = getMonster();
if (monster && monster->isTargetNearby() && !monster->isFleeing() && !monster->getMaster()) {
stepDuration *= 2;
}
return stepDuration;
}
int64_t Creature::getEventStepTicks(bool onlyDelay) const
{
int64_t ret = getWalkDelay();
if (ret <= 0) {
int64_t stepDuration = getStepDuration();
if (onlyDelay && stepDuration > 0) {
ret = 1;
} else {
ret = stepDuration * lastStepCost;
}
}
return ret;
}
void Creature::getCreatureLight(LightInfo& light) const
{
light = internalLight;
}
void Creature::setNormalCreatureLight()
{
internalLight.level = 0;
internalLight.color = 0;
}
bool Creature::registerCreatureEvent(const std::string& name)
{
CreatureEvent* event = g_creatureEvents->getEventByName(name);
if (!event) {
return false;
}
CreatureEventType_t type = event->getEventType();
if (hasEventRegistered(type)) {
for (CreatureEvent* creatureEvent : eventsList) {
if (creatureEvent == event) {
return false;
}
}
} else {
scriptEventsBitField |= static_cast<uint32_t>(1) << type;
}
eventsList.push_back(event);
return true;
}
bool Creature::unregisterCreatureEvent(const std::string& name)
{
CreatureEvent* event = g_creatureEvents->getEventByName(name);
if (!event) {
return false;
}
CreatureEventType_t type = event->getEventType();
if (!hasEventRegistered(type)) {
return false;
}
bool resetTypeBit = true;
auto it = eventsList.begin(), end = eventsList.end();
while (it != end) {
CreatureEvent* curEvent = *it;
if (curEvent == event) {
it = eventsList.erase(it);
continue;
}
if (curEvent->getEventType() == type) {
resetTypeBit = false;
}
++it;
}
if (resetTypeBit) {
scriptEventsBitField &= ~(static_cast<uint32_t>(1) << type);
}
return true;
}
CreatureEventList Creature::getCreatureEvents(CreatureEventType_t type)
{
CreatureEventList tmpEventList;
if (!hasEventRegistered(type)) {
return tmpEventList;
}
for (CreatureEvent* creatureEvent : eventsList) {
if (creatureEvent->getEventType() == type) {
tmpEventList.push_back(creatureEvent);
}
}
return tmpEventList;
}
bool FrozenPathingConditionCall::isInRange(const Position& startPos, const Position& testPos,
const FindPathParams& fpp) const
{
if (fpp.fullPathSearch) {
if (testPos.x > targetPos.x + fpp.maxTargetDist) {
return false;
}
if (testPos.x < targetPos.x - fpp.maxTargetDist) {
return false;
}
if (testPos.y > targetPos.y + fpp.maxTargetDist) {
return false;
}
if (testPos.y < targetPos.y - fpp.maxTargetDist) {
return false;
}
} else {
int_fast32_t dx = Position::getOffsetX(startPos, targetPos);
int32_t dxMax = (dx >= 0 ? fpp.maxTargetDist : 0);
if (testPos.x > targetPos.x + dxMax) {
return false;
}
int32_t dxMin = (dx <= 0 ? fpp.maxTargetDist : 0);
if (testPos.x < targetPos.x - dxMin) {
return false;
}
int_fast32_t dy = Position::getOffsetY(startPos, targetPos);
int32_t dyMax = (dy >= 0 ? fpp.maxTargetDist : 0);
if (testPos.y > targetPos.y + dyMax) {
return false;
}
int32_t dyMin = (dy <= 0 ? fpp.maxTargetDist : 0);
if (testPos.y < targetPos.y - dyMin) {
return false;
}
}
return true;
}
bool FrozenPathingConditionCall::operator()(const Position& startPos, const Position& testPos,
const FindPathParams& fpp, int32_t& bestMatchDist) const
{
if (!isInRange(startPos, testPos, fpp)) {
return false;
}
if (fpp.clearSight && !g_game.isSightClear(testPos, targetPos, true)) {
return false;
}
int32_t testDist = std::max<int32_t>(Position::getDistanceX(targetPos, testPos), Position::getDistanceY(targetPos, testPos));
if (fpp.maxTargetDist == 1) {
if (testDist < fpp.minTargetDist || testDist > fpp.maxTargetDist) {
return false;
}
return true;
} else if (testDist <= fpp.maxTargetDist) {
if (testDist < fpp.minTargetDist) {
return false;
}
if (testDist == fpp.maxTargetDist) {
bestMatchDist = 0;
return true;
} else if (testDist > bestMatchDist) {
//not quite what we want, but the best so far
bestMatchDist = testDist;
return true;
}
}
return false;
}
bool Creature::isInvisible() const
{
return std::find_if(conditions.begin(), conditions.end(), [] (const Condition* condition) {
return condition->getType() == CONDITION_INVISIBLE;
}) != conditions.end();
}
bool Creature::getPathTo(const Position& targetPos, std::forward_list<Direction>& dirList, const FindPathParams& fpp) const
{
return g_game.map.getPathMatching(*this, dirList, FrozenPathingConditionCall(targetPos), fpp);
}
bool Creature::getPathTo(const Position& targetPos, std::forward_list<Direction>& dirList, int32_t minTargetDist, int32_t maxTargetDist, bool fullPathSearch /*= true*/, bool clearSight /*= true*/, int32_t maxSearchDist /*= 0*/) const
{
FindPathParams fpp;
fpp.fullPathSearch = fullPathSearch;
fpp.maxSearchDist = maxSearchDist;
fpp.clearSight = clearSight;
fpp.minTargetDist = minTargetDist;
fpp.maxTargetDist = maxTargetDist;
return getPathTo(targetPos, dirList, fpp);
}
| 1 | 12,966 | This is so much more concise and beautiful than the previous formula | otland-forgottenserver | cpp |
@@ -41,9 +41,19 @@ import org.hyperledger.besu.tests.acceptance.dsl.transaction.perm.PermissioningT
import org.hyperledger.besu.tests.acceptance.dsl.transaction.privacy.PrivacyTransactions;
import org.hyperledger.besu.tests.acceptance.dsl.transaction.web3.Web3Transactions;
+import java.io.File;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.ThreadContext;
import org.junit.After;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+import org.junit.rules.TestWatcher;
+import org.junit.runner.Description;
public class AcceptanceTestBase {
+ protected final Logger LOG = LogManager.getLogger();
protected final Accounts accounts;
protected final AccountTransactions accountTransactions; | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.tests.acceptance.dsl;
import org.hyperledger.besu.tests.acceptance.dsl.account.Accounts;
import org.hyperledger.besu.tests.acceptance.dsl.blockchain.Blockchain;
import org.hyperledger.besu.tests.acceptance.dsl.condition.admin.AdminConditions;
import org.hyperledger.besu.tests.acceptance.dsl.condition.clique.CliqueConditions;
import org.hyperledger.besu.tests.acceptance.dsl.condition.eth.EthConditions;
import org.hyperledger.besu.tests.acceptance.dsl.condition.ibft2.Ibft2Conditions;
import org.hyperledger.besu.tests.acceptance.dsl.condition.login.LoginConditions;
import org.hyperledger.besu.tests.acceptance.dsl.condition.net.NetConditions;
import org.hyperledger.besu.tests.acceptance.dsl.condition.perm.PermissioningConditions;
import org.hyperledger.besu.tests.acceptance.dsl.condition.priv.PrivConditions;
import org.hyperledger.besu.tests.acceptance.dsl.condition.web3.Web3Conditions;
import org.hyperledger.besu.tests.acceptance.dsl.contract.ContractVerifier;
import org.hyperledger.besu.tests.acceptance.dsl.node.cluster.Cluster;
import org.hyperledger.besu.tests.acceptance.dsl.node.configuration.BesuNodeFactory;
import org.hyperledger.besu.tests.acceptance.dsl.node.configuration.permissioning.PermissionedNodeBuilder;
import org.hyperledger.besu.tests.acceptance.dsl.transaction.account.AccountTransactions;
import org.hyperledger.besu.tests.acceptance.dsl.transaction.admin.AdminTransactions;
import org.hyperledger.besu.tests.acceptance.dsl.transaction.clique.CliqueTransactions;
import org.hyperledger.besu.tests.acceptance.dsl.transaction.contract.ContractTransactions;
import org.hyperledger.besu.tests.acceptance.dsl.transaction.eth.EthTransactions;
import org.hyperledger.besu.tests.acceptance.dsl.transaction.ibft2.Ibft2Transactions;
import org.hyperledger.besu.tests.acceptance.dsl.transaction.miner.MinerTransactions;
import org.hyperledger.besu.tests.acceptance.dsl.transaction.net.NetTransactions;
import org.hyperledger.besu.tests.acceptance.dsl.transaction.perm.PermissioningTransactions;
import org.hyperledger.besu.tests.acceptance.dsl.transaction.privacy.PrivacyTransactions;
import org.hyperledger.besu.tests.acceptance.dsl.transaction.web3.Web3Transactions;
import org.junit.After;
public class AcceptanceTestBase {
protected final Accounts accounts;
protected final AccountTransactions accountTransactions;
protected final AdminConditions admin;
protected final AdminTransactions adminTransactions;
protected final Blockchain blockchain;
protected final CliqueConditions clique;
protected final CliqueTransactions cliqueTransactions;
protected final Cluster cluster;
protected final ContractVerifier contractVerifier;
protected final ContractTransactions contractTransactions;
protected final EthConditions eth;
protected final EthTransactions ethTransactions;
protected final Ibft2Transactions ibftTwoTransactions;
protected final Ibft2Conditions ibftTwo;
protected final LoginConditions login;
protected final NetConditions net;
protected final BesuNodeFactory besu;
protected final PermissioningConditions perm;
protected final PermissionedNodeBuilder permissionedNodeBuilder;
protected final PermissioningTransactions permissioningTransactions;
protected final MinerTransactions minerTransactions;
protected final Web3Conditions web3;
protected final PrivConditions priv;
protected final PrivacyTransactions privacyTransactions;
protected AcceptanceTestBase() {
ethTransactions = new EthTransactions();
accounts = new Accounts(ethTransactions);
adminTransactions = new AdminTransactions();
cliqueTransactions = new CliqueTransactions();
ibftTwoTransactions = new Ibft2Transactions();
accountTransactions = new AccountTransactions(accounts);
permissioningTransactions = new PermissioningTransactions();
privacyTransactions = new PrivacyTransactions();
contractTransactions = new ContractTransactions();
minerTransactions = new MinerTransactions();
blockchain = new Blockchain(ethTransactions);
clique = new CliqueConditions(ethTransactions, cliqueTransactions);
eth = new EthConditions(ethTransactions);
ibftTwo = new Ibft2Conditions(ibftTwoTransactions);
login = new LoginConditions();
net = new NetConditions(new NetTransactions());
cluster = new Cluster(net);
perm = new PermissioningConditions(permissioningTransactions);
priv = new PrivConditions(privacyTransactions);
admin = new AdminConditions(adminTransactions);
web3 = new Web3Conditions(new Web3Transactions());
besu = new BesuNodeFactory();
contractVerifier = new ContractVerifier(accounts.getPrimaryBenefactor());
permissionedNodeBuilder = new PermissionedNodeBuilder();
}
@After
public void tearDownAcceptanceTestBase() {
cluster.close();
}
}
| 1 | 21,547 | Is there any reason not to have the `static` modifier for the logger? _(that would be in keeping with the reference being uppercase)_ | hyperledger-besu | java |
@@ -27,11 +27,18 @@ import (
// DialogCreator creates new dialog between consumer and provider, using given contact information
type DialogCreator func(consumerID, providerID identity.Identity, contact market.Contact) (communication.Dialog, error)
+// SessionCreationConfig are the parameters that get sent to the provider
+type SessionCreationConfig interface{}
+
+// ConsumerConfig are the parameters used for the initiation of connection
+type ConsumerConfig interface{}
+
// Connection represents a connection
type Connection interface {
- Start() error
+ Start(ConnectOptions) error
Wait() error
Stop()
+ GetSessionConfig() (SessionCreationConfig, error)
}
// StateChannel is the channel we receive state change events on | 1 | /*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package connection
import (
"github.com/mysteriumnetwork/node/communication"
"github.com/mysteriumnetwork/node/consumer"
"github.com/mysteriumnetwork/node/identity"
"github.com/mysteriumnetwork/node/market"
)
// DialogCreator creates new dialog between consumer and provider, using given contact information
type DialogCreator func(consumerID, providerID identity.Identity, contact market.Contact) (communication.Dialog, error)
// Connection represents a connection
type Connection interface {
Start() error
Wait() error
Stop()
}
// StateChannel is the channel we receive state change events on
type StateChannel chan State
// StatisticsChannel is the channel we receive stats change events on
type StatisticsChannel chan consumer.SessionStatistics
// PromiseIssuer issues promises from consumer to provider.
// Consumer signs those promises.
type PromiseIssuer interface {
Start(proposal market.ServiceProposal) error
Stop() error
}
// PromiseIssuerCreator creates new PromiseIssuer given context
type PromiseIssuerCreator func(issuerID identity.Identity, dialog communication.Dialog) PromiseIssuer
// Manager interface provides methods to manage connection
type Manager interface {
// Connect creates new connection from given consumer to provider, reports error if connection already exists
Connect(consumerID identity.Identity, proposal market.ServiceProposal, params ConnectParams) error
// Status queries current status of connection
Status() ConnectionStatus
// Disconnect closes established connection, reports error if no connection
Disconnect() error
}
| 1 | 12,986 | It's not about session creation config. It's about passing consumer config parameters to underlying transport - nothing to do with session itself | mysteriumnetwork-node | go |
@@ -97,7 +97,7 @@ type ConfigLocal struct {
kbpki KBPKI
renamer ConflictRenamer
registry metrics.Registry
- loggerFn func(prefix string) logger.Logger
+ loggerFn func(prefix string, overrideEnableDebug bool) logger.Logger
noBGFlush bool // logic opposite so the default value is the common setting
rwpWaitTime time.Duration
diskLimiter DiskLimiter | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/cache"
"github.com/keybase/kbfs/ioutil"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfsmd"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
metrics "github.com/rcrowley/go-metrics"
"github.com/shirou/gopsutil/mem"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/storage"
"golang.org/x/net/context"
"golang.org/x/net/trace"
)
const (
// Max supported size of a directory entry name.
maxNameBytesDefault = 255
// Maximum supported plaintext size of a directory in KBFS. TODO:
// increase this once we support levels of indirection for
// directories.
maxDirBytesDefault = MaxBlockSizeBytesDefault
// Default time after setting the rekey bit before prompting for a
// paper key.
rekeyWithPromptWaitTimeDefault = 10 * time.Minute
// see Config doc for the purpose of DelayedCancellationGracePeriod
delayedCancellationGracePeriodDefault = 2 * time.Second
// How often do we check for stuff to reclaim?
qrPeriodDefault = 1 * time.Minute
// How long must something be unreferenced before we reclaim it?
qrUnrefAgeDefault = 1 * time.Minute
// How old must the most recent TLF revision be before another
// device can run QR on that TLF? This is large, to avoid
// unnecessary conflicts on the TLF between devices.
qrMinHeadAgeDefault = 24 * time.Hour
// tlfValidDurationDefault is the default for tlf validity before redoing identify.
tlfValidDurationDefault = 6 * time.Hour
// bgFlushDirOpThresholdDefault is the default for how many
// directory operations should be batched together in a single
// background flush.
bgFlushDirOpBatchSizeDefault = 100
// bgFlushPeriodDefault is the default for how long to wait for a
// batch to fill up before syncing a set of changes to the servers.
bgFlushPeriodDefault = 1 * time.Second
keyBundlesCacheCapacityBytes = 10 * cache.MB
// folder name for persisted config parameters.
syncedTlfConfigFolderName = "synced_tlf_config"
// By default, this will be the block type given to all blocks
// that aren't explicitly some other type.
defaultBlockTypeDefault = keybase1.BlockType_DATA
)
// ConfigLocal implements the Config interface using purely local
// server objects (no KBFS operations used RPCs).
type ConfigLocal struct {
lock sync.RWMutex
kbfs KBFSOps
keyman KeyManager
rep Reporter
kcache KeyCache
kbcache kbfsmd.KeyBundleCache
bcache BlockCache
dirtyBcache DirtyBlockCache
diskBlockCache DiskBlockCache
codec kbfscodec.Codec
mdops MDOps
kops KeyOps
crypto Crypto
mdcache MDCache
bops BlockOps
mdserv MDServer
bserv BlockServer
keyserv KeyServer
service KeybaseService
bsplit BlockSplitter
notifier Notifier
clock Clock
kbpki KBPKI
renamer ConflictRenamer
registry metrics.Registry
loggerFn func(prefix string) logger.Logger
noBGFlush bool // logic opposite so the default value is the common setting
rwpWaitTime time.Duration
diskLimiter DiskLimiter
syncedTlfs map[tlf.ID]bool
defaultBlockType keybase1.BlockType
kbfsService *KBFSService
kbCtx Context
maxNameBytes uint32
maxDirBytes uint64
rekeyQueue RekeyQueue
storageRoot string
diskCacheMode DiskCacheMode
traceLock sync.RWMutex
traceEnabled bool
qrPeriod time.Duration
qrUnrefAge time.Duration
qrMinHeadAge time.Duration
delayedCancellationGracePeriod time.Duration
// allKnownConfigsForTesting is used for testing, and contains all created
// Config objects in this test.
allKnownConfigsForTesting *[]Config
// tlfValidDuration is the time TLFs are valid before redoing identification.
tlfValidDuration time.Duration
// bgFlushDirOpBatchSize indicates how many directory operations
// should be batched together in a single background flush.
bgFlushDirOpBatchSize int
// bgFlushPeriod indicates how long to wait for a batch to fill up
// before syncing a set of changes to the servers.
bgFlushPeriod time.Duration
// metadataVersion is the version to use when creating new metadata.
metadataVersion kbfsmd.MetadataVer
mode InitMode
quotaUsage map[keybase1.UserOrTeamID]*EventuallyConsistentQuotaUsage
rekeyFSMLimiter *OngoingWorkLimiter
}
// DiskCacheMode represents the mode of initialization for the disk cache.
type DiskCacheMode int
var _ flag.Value = (*DiskCacheMode)(nil)
const (
// DiskCacheModeOff indicates to leave off the disk cache.
DiskCacheModeOff DiskCacheMode = iota
// DiskCacheModeLocal indicates to use a local disk cache.
DiskCacheModeLocal
// DiskCacheModeRemote indicates to use a remote disk cache.
DiskCacheModeRemote
)
// String outputs a human-readable description of this DiskBlockCacheMode.
func (m DiskCacheMode) String() string {
switch m {
case DiskCacheModeOff:
return "off"
case DiskCacheModeLocal:
return "local"
case DiskCacheModeRemote:
return "remote"
}
return "unknown"
}
// Set parses a string representing a disk block cache initialization mode,
// and outputs the mode value corresponding to that string. Defaults to
// DiskCacheModeOff.
func (m *DiskCacheMode) Set(s string) error {
*m = DiskCacheModeOff
switch strings.ToLower(strings.TrimSpace(s)) {
case "local":
*m = DiskCacheModeLocal
case "remote":
*m = DiskCacheModeRemote
}
return nil
}
var _ Config = (*ConfigLocal)(nil)
// LocalUser represents a fake KBFS user, useful for testing.
type LocalUser struct {
UserInfo
Asserts []string
// Index into UserInfo.CryptPublicKeys.
CurrentCryptPublicKeyIndex int
// Index into UserInfo.VerifyingKeys.
CurrentVerifyingKeyIndex int
// Unverified keys.
UnverifiedKeys []keybase1.PublicKey
}
// GetCurrentCryptPublicKey returns this LocalUser's public encryption key.
func (lu *LocalUser) GetCurrentCryptPublicKey() kbfscrypto.CryptPublicKey {
return lu.CryptPublicKeys[lu.CurrentCryptPublicKeyIndex]
}
// GetCurrentVerifyingKey returns this LocalUser's public signing key.
func (lu *LocalUser) GetCurrentVerifyingKey() kbfscrypto.VerifyingKey {
return lu.VerifyingKeys[lu.CurrentVerifyingKeyIndex]
}
func verifyingKeysToPublicKeys(
keys []kbfscrypto.VerifyingKey) []keybase1.PublicKey {
publicKeys := make([]keybase1.PublicKey, len(keys))
for i, key := range keys {
publicKeys[i] = keybase1.PublicKey{
KID: key.KID(),
IsSibkey: true,
}
}
return publicKeys
}
func cryptPublicKeysToPublicKeys(
keys []kbfscrypto.CryptPublicKey) []keybase1.PublicKey {
publicKeys := make([]keybase1.PublicKey, len(keys))
for i, key := range keys {
publicKeys[i] = keybase1.PublicKey{
KID: key.KID(),
IsSibkey: false,
}
}
return publicKeys
}
// GetPublicKeys returns all of this LocalUser's public encryption keys.
func (lu *LocalUser) GetPublicKeys() []keybase1.PublicKey {
sibkeys := verifyingKeysToPublicKeys(lu.VerifyingKeys)
subkeys := cryptPublicKeysToPublicKeys(lu.CryptPublicKeys)
return append(sibkeys, subkeys...)
}
// Helper functions to get a various keys for a local user suitable
// for use with CryptoLocal. Each function will return the same key
// will always be returned for a given user.
// MakeLocalUserSigningKeyOrBust returns a unique signing key for this user.
func MakeLocalUserSigningKeyOrBust(
name libkb.NormalizedUsername) kbfscrypto.SigningKey {
return kbfscrypto.MakeFakeSigningKeyOrBust(
string(name) + " signing key")
}
// MakeLocalUserVerifyingKeyOrBust makes a new verifying key
// corresponding to the signing key for this user.
func MakeLocalUserVerifyingKeyOrBust(
name libkb.NormalizedUsername) kbfscrypto.VerifyingKey {
return MakeLocalUserSigningKeyOrBust(name).GetVerifyingKey()
}
// MakeLocalUserCryptPrivateKeyOrBust returns a unique private
// encryption key for this user.
func MakeLocalUserCryptPrivateKeyOrBust(
name libkb.NormalizedUsername) kbfscrypto.CryptPrivateKey {
return kbfscrypto.MakeFakeCryptPrivateKeyOrBust(
string(name) + " crypt key")
}
// MakeLocalUserCryptPublicKeyOrBust returns the public key
// corresponding to the crypt private key for this user.
func MakeLocalUserCryptPublicKeyOrBust(
name libkb.NormalizedUsername) kbfscrypto.CryptPublicKey {
return MakeLocalUserCryptPrivateKeyOrBust(name).GetPublicKey()
}
// MakeLocalTLFCryptKeyOrBust returns a unique private symmetric key
// for a TLF.
func MakeLocalTLFCryptKeyOrBust(
name string, keyGen kbfsmd.KeyGen) kbfscrypto.TLFCryptKey {
// Put the key gen first to make it more likely to fit into the
// 32-character "random" seed.
return kbfscrypto.MakeFakeTLFCryptKeyOrBust(
string(name) + " " + string(keyGen) + " crypt key ")
}
// MakeLocalUsers is a helper function to generate a list of
// LocalUsers suitable to use with KeybaseDaemonLocal.
func MakeLocalUsers(users []libkb.NormalizedUsername) []LocalUser {
localUsers := make([]LocalUser, len(users))
for i := 0; i < len(users); i++ {
verifyingKey := MakeLocalUserVerifyingKeyOrBust(users[i])
cryptPublicKey := MakeLocalUserCryptPublicKeyOrBust(users[i])
localUsers[i] = LocalUser{
UserInfo: UserInfo{
Name: users[i],
UID: keybase1.MakeTestUID(uint32(i + 1)),
VerifyingKeys: []kbfscrypto.VerifyingKey{verifyingKey},
CryptPublicKeys: []kbfscrypto.CryptPublicKey{cryptPublicKey},
KIDNames: map[keybase1.KID]string{
verifyingKey.KID(): "dev1",
},
},
CurrentCryptPublicKeyIndex: 0,
CurrentVerifyingKeyIndex: 0,
}
}
return localUsers
}
func makeLocalTeams(
teams []libkb.NormalizedUsername, startingIndex int, ty tlf.Type) (
localTeams []TeamInfo) {
localTeams = make([]TeamInfo, len(teams))
for index := 0; index < len(teams); index++ {
i := index + startingIndex
cryptKey := MakeLocalTLFCryptKeyOrBust(
buildCanonicalPathForTlfType(
tlf.SingleTeam, string(teams[index])),
kbfsmd.FirstValidKeyGen)
localTeams[index] = TeamInfo{
Name: teams[index],
TID: keybase1.MakeTestTeamID(uint32(i+1), ty == tlf.Public),
CryptKeys: map[kbfsmd.KeyGen]kbfscrypto.TLFCryptKey{
kbfsmd.FirstValidKeyGen: cryptKey,
},
LatestKeyGen: kbfsmd.FirstValidKeyGen,
}
// If this is a subteam, set the root ID.
if strings.Contains(string(teams[index]), ".") {
parts := strings.SplitN(string(teams[index]), ".", 2)
for j := 0; j < index; j++ {
if parts[0] == string(localTeams[j].Name) {
localTeams[index].RootID = localTeams[j].TID
break
}
}
}
}
return localTeams
}
// MakeLocalTeams is a helper function to generate a list of local
// teams suitable to use with KeybaseDaemonLocal. Any subteams must come
// after their root team names in the `teams` slice.
func MakeLocalTeams(teams []libkb.NormalizedUsername) []TeamInfo {
return makeLocalTeams(teams, 0, tlf.Private)
}
// getDefaultCleanBlockCacheCapacity returns the default clean block
// cache capacity. If we can get total RAM of the system, we cap at
// the smaller of <1/4 of available memory> and
// <MaxBlockSizeBytesDefault * DefaultBlocksInMemCache>; otherwise,
// fallback to latter.
func getDefaultCleanBlockCacheCapacity() uint64 {
capacity := uint64(MaxBlockSizeBytesDefault) * DefaultBlocksInMemCache
vmstat, err := mem.VirtualMemory()
if err == nil {
ramBased := vmstat.Total / 8
if ramBased < capacity {
capacity = ramBased
}
}
return capacity
}
// NewConfigLocal constructs a new ConfigLocal with some default
// components that don't depend on a logger. The caller will have to
// fill in the rest.
//
// TODO: Now that NewConfigLocal takes loggerFn, add more default
// components.
func NewConfigLocal(mode InitMode, loggerFn func(module string) logger.Logger,
storageRoot string, diskCacheMode DiskCacheMode, kbCtx Context) *ConfigLocal {
config := &ConfigLocal{
loggerFn: loggerFn,
storageRoot: storageRoot,
mode: mode,
diskCacheMode: diskCacheMode,
kbCtx: kbCtx,
}
if diskCacheMode == DiskCacheModeLocal {
config.loadSyncedTlfsLocked()
}
config.SetClock(wallClock{})
config.SetReporter(NewReporterSimple(config.Clock(), 10))
config.SetConflictRenamer(WriterDeviceDateConflictRenamer{config})
config.ResetCaches()
config.SetCodec(kbfscodec.NewMsgpack())
config.SetKeyOps(&KeyOpsStandard{config})
config.SetRekeyQueue(NewRekeyQueueStandard(config))
config.maxNameBytes = maxNameBytesDefault
config.maxDirBytes = maxDirBytesDefault
config.rwpWaitTime = rekeyWithPromptWaitTimeDefault
config.delayedCancellationGracePeriod = delayedCancellationGracePeriodDefault
config.qrPeriod = qrPeriodDefault
config.qrUnrefAge = qrUnrefAgeDefault
config.qrMinHeadAge = qrMinHeadAgeDefault
// Don't bother creating the registry if UseNilMetrics is set, or
// if we're in minimal mode.
if !metrics.UseNilMetrics && config.Mode() != InitMinimal {
registry := metrics.NewRegistry()
config.SetMetricsRegistry(registry)
}
config.tlfValidDuration = tlfValidDurationDefault
config.bgFlushDirOpBatchSize = bgFlushDirOpBatchSizeDefault
config.bgFlushPeriod = bgFlushPeriodDefault
config.metadataVersion = defaultClientMetadataVer
config.defaultBlockType = defaultBlockTypeDefault
config.quotaUsage =
make(map[keybase1.UserOrTeamID]*EventuallyConsistentQuotaUsage)
switch config.mode.Mode() {
case InitDefault:
// In normal desktop app, we limit to 16 routines.
config.rekeyFSMLimiter = NewOngoingWorkLimiter(16)
case InitMinimal:
// This is likely mobile. Limit it to 4.
config.rekeyFSMLimiter = NewOngoingWorkLimiter(4)
case InitSingleOp:
// Just block all rekeys and don't bother cleaning up requests since the process is short lived anyway.
config.rekeyFSMLimiter = NewOngoingWorkLimiter(0)
default:
panic(fmt.Sprintf("😱 unknown init mode %v", config.mode))
}
return config
}
// KBFSOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KBFSOps() KBFSOps {
c.lock.RLock()
defer c.lock.RUnlock()
return c.kbfs
}
// SetKBFSOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKBFSOps(k KBFSOps) {
c.lock.Lock()
defer c.lock.Unlock()
c.kbfs = k
}
// KBPKI implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KBPKI() KBPKI {
c.lock.RLock()
defer c.lock.RUnlock()
return c.kbpki
}
// CurrentSessionGetter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) CurrentSessionGetter() CurrentSessionGetter {
c.lock.RLock()
defer c.lock.RUnlock()
return c.kbpki
}
// SetKBPKI implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKBPKI(k KBPKI) {
c.lock.Lock()
defer c.lock.Unlock()
c.kbpki = k
}
// KeyManager implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeyManager() KeyManager {
c.lock.RLock()
defer c.lock.RUnlock()
return c.keyman
}
// SetKeyManager implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeyManager(k KeyManager) {
c.lock.Lock()
defer c.lock.Unlock()
c.keyman = k
}
// KeyGetter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) keyGetter() blockKeyGetter {
c.lock.RLock()
defer c.lock.RUnlock()
return c.keyman
}
// Reporter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Reporter() Reporter {
c.lock.RLock()
defer c.lock.RUnlock()
return c.rep
}
// SetReporter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetReporter(r Reporter) {
c.lock.Lock()
defer c.lock.Unlock()
c.rep = r
}
// KeyCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeyCache() KeyCache {
c.lock.RLock()
defer c.lock.RUnlock()
return c.kcache
}
// SetKeyCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeyCache(k KeyCache) {
c.lock.Lock()
defer c.lock.Unlock()
c.kcache = k
}
// KeyBundleCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeyBundleCache() kbfsmd.KeyBundleCache {
c.lock.RLock()
defer c.lock.RUnlock()
return c.kbcache
}
// SetKeyBundleCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeyBundleCache(k kbfsmd.KeyBundleCache) {
c.lock.Lock()
defer c.lock.Unlock()
c.kbcache = k
}
// BlockCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) BlockCache() BlockCache {
c.lock.RLock()
defer c.lock.RUnlock()
return c.bcache
}
// SetBlockCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetBlockCache(b BlockCache) {
c.lock.Lock()
defer c.lock.Unlock()
c.bcache = b
}
// DirtyBlockCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DirtyBlockCache() DirtyBlockCache {
c.lock.RLock()
defer c.lock.RUnlock()
return c.dirtyBcache
}
// SetDirtyBlockCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetDirtyBlockCache(d DirtyBlockCache) {
c.lock.Lock()
defer c.lock.Unlock()
c.dirtyBcache = d
}
// DiskBlockCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DiskBlockCache() DiskBlockCache {
c.lock.RLock()
defer c.lock.RUnlock()
return c.diskBlockCache
}
// DiskLimiter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DiskLimiter() DiskLimiter {
c.lock.RLock()
defer c.lock.RUnlock()
return c.diskLimiter
}
// Crypto implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Crypto() Crypto {
c.lock.RLock()
defer c.lock.RUnlock()
return c.crypto
}
// Signer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Signer() kbfscrypto.Signer {
c.lock.RLock()
defer c.lock.RUnlock()
return c.crypto
}
// SetCrypto implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetCrypto(cr Crypto) {
c.lock.Lock()
defer c.lock.Unlock()
c.crypto = cr
}
// CryptoPure implements the Config interface for ConfigLocal.
func (c *ConfigLocal) cryptoPure() cryptoPure {
c.lock.RLock()
defer c.lock.RUnlock()
return c.crypto
}
// Codec implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Codec() kbfscodec.Codec {
c.lock.RLock()
defer c.lock.RUnlock()
return c.codec
}
// SetCodec implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetCodec(co kbfscodec.Codec) {
c.lock.Lock()
defer c.lock.Unlock()
c.codec = co
RegisterOps(c.codec)
}
// MDOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MDOps() MDOps {
c.lock.RLock()
defer c.lock.RUnlock()
return c.mdops
}
// SetMDOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetMDOps(m MDOps) {
c.lock.Lock()
defer c.lock.Unlock()
c.mdops = m
}
// KeyOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeyOps() KeyOps {
c.lock.RLock()
defer c.lock.RUnlock()
return c.kops
}
// SetKeyOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeyOps(k KeyOps) {
c.lock.Lock()
defer c.lock.Unlock()
c.kops = k
}
// MDCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MDCache() MDCache {
c.lock.RLock()
defer c.lock.RUnlock()
return c.mdcache
}
// SetMDCache implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetMDCache(m MDCache) {
c.lock.Lock()
defer c.lock.Unlock()
c.mdcache = m
}
// BlockOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) BlockOps() BlockOps {
c.lock.RLock()
defer c.lock.RUnlock()
return c.bops
}
// SetBlockOps implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetBlockOps(b BlockOps) {
c.lock.Lock()
defer c.lock.Unlock()
c.bops = b
}
// MDServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MDServer() MDServer {
c.lock.RLock()
defer c.lock.RUnlock()
return c.mdserv
}
// SetMDServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetMDServer(m MDServer) {
c.lock.Lock()
defer c.lock.Unlock()
c.mdserv = m
}
// BlockServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) BlockServer() BlockServer {
c.lock.RLock()
defer c.lock.RUnlock()
return c.bserv
}
// SetBlockServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetBlockServer(b BlockServer) {
c.lock.Lock()
defer c.lock.Unlock()
c.bserv = b
}
// KeyServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeyServer() KeyServer {
c.lock.RLock()
defer c.lock.RUnlock()
return c.keyserv
}
// SetKeyServer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeyServer(k KeyServer) {
c.lock.Lock()
defer c.lock.Unlock()
c.keyserv = k
}
// KeybaseService implements the Config interface for ConfigLocal.
func (c *ConfigLocal) KeybaseService() KeybaseService {
c.lock.RLock()
defer c.lock.RUnlock()
return c.service
}
// SetKeybaseService implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetKeybaseService(k KeybaseService) {
c.lock.Lock()
defer c.lock.Unlock()
c.service = k
}
// BlockSplitter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) BlockSplitter() BlockSplitter {
c.lock.RLock()
defer c.lock.RUnlock()
return c.bsplit
}
// SetBlockSplitter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetBlockSplitter(b BlockSplitter) {
c.lock.Lock()
defer c.lock.Unlock()
c.bsplit = b
}
// Notifier implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Notifier() Notifier {
c.lock.RLock()
defer c.lock.RUnlock()
return c.notifier
}
// SetNotifier implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetNotifier(n Notifier) {
c.lock.Lock()
defer c.lock.Unlock()
c.notifier = n
}
// Clock implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Clock() Clock {
c.lock.RLock()
defer c.lock.RUnlock()
return c.clock
}
// SetClock implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetClock(cl Clock) {
c.lock.Lock()
defer c.lock.Unlock()
c.clock = cl
}
// ConflictRenamer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) ConflictRenamer() ConflictRenamer {
c.lock.RLock()
defer c.lock.RUnlock()
return c.renamer
}
// SetConflictRenamer implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetConflictRenamer(cr ConflictRenamer) {
c.lock.Lock()
defer c.lock.Unlock()
c.renamer = cr
}
// MetadataVersion implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MetadataVersion() kbfsmd.MetadataVer {
c.lock.RLock()
defer c.lock.RUnlock()
return c.metadataVersion
}
// SetMetadataVersion implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetMetadataVersion(mdVer kbfsmd.MetadataVer) {
c.lock.Lock()
defer c.lock.Unlock()
c.metadataVersion = mdVer
}
// DataVersion implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DataVersion() DataVer {
return AtLeastTwoLevelsOfChildrenDataVer
}
// DefaultBlockType implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DefaultBlockType() keybase1.BlockType {
c.lock.RLock()
defer c.lock.RUnlock()
return c.defaultBlockType
}
// SetDefaultBlockType implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetDefaultBlockType(blockType keybase1.BlockType) {
c.lock.Lock()
defer c.lock.Unlock()
c.defaultBlockType = blockType
}
// DoBackgroundFlushes implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DoBackgroundFlushes() bool {
if c.Mode() == InitMinimal {
// Don't do background flushes when in minimal mode, since
// there shouldn't be any data writes.
return false
}
c.lock.RLock()
defer c.lock.RUnlock()
return !c.noBGFlush
}
// SetDoBackgroundFlushes implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetDoBackgroundFlushes(doBGFlush bool) {
c.lock.Lock()
defer c.lock.Unlock()
c.noBGFlush = !doBGFlush
}
// RekeyWithPromptWaitTime implements the Config interface for
// ConfigLocal.
func (c *ConfigLocal) RekeyWithPromptWaitTime() time.Duration {
c.lock.Lock()
defer c.lock.Unlock()
return c.rwpWaitTime
}
// SetRekeyWithPromptWaitTime implements the Config interface for
// ConfigLocal.
func (c *ConfigLocal) SetRekeyWithPromptWaitTime(d time.Duration) {
c.lock.RLock()
defer c.lock.RUnlock()
c.rwpWaitTime = d
}
// Mode implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Mode() InitMode {
// We return the mode with the test flag masked out.
return c.mode.Mode()
}
// IsTestMode implements the Config interface for ConfigLocal.
func (c *ConfigLocal) IsTestMode() bool {
return c.mode.HasFlags(InitTest)
}
// DelayedCancellationGracePeriod implements the Config interface for ConfigLocal.
func (c *ConfigLocal) DelayedCancellationGracePeriod() time.Duration {
return c.delayedCancellationGracePeriod
}
// SetDelayedCancellationGracePeriod implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetDelayedCancellationGracePeriod(d time.Duration) {
c.delayedCancellationGracePeriod = d
}
// QuotaReclamationPeriod implements the Config interface for ConfigLocal.
func (c *ConfigLocal) QuotaReclamationPeriod() time.Duration {
return c.qrPeriod
}
// QuotaReclamationMinUnrefAge implements the Config interface for ConfigLocal.
func (c *ConfigLocal) QuotaReclamationMinUnrefAge() time.Duration {
return c.qrUnrefAge
}
// QuotaReclamationMinHeadAge implements the Config interface for ConfigLocal.
func (c *ConfigLocal) QuotaReclamationMinHeadAge() time.Duration {
return c.qrMinHeadAge
}
// ReqsBufSize implements the Config interface for ConfigLocal.
func (c *ConfigLocal) ReqsBufSize() int {
return 20
}
// MaxNameBytes implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MaxNameBytes() uint32 {
return c.maxNameBytes
}
// MaxDirBytes implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MaxDirBytes() uint64 {
return c.maxDirBytes
}
// StorageRoot implements the Config interface for ConfigLocal.
func (c *ConfigLocal) StorageRoot() string {
return c.storageRoot
}
func (c *ConfigLocal) resetCachesWithoutShutdown() DirtyBlockCache {
c.lock.Lock()
defer c.lock.Unlock()
c.mdcache = NewMDCacheStandard(defaultMDCacheCapacity)
c.kcache = NewKeyCacheStandard(defaultMDCacheCapacity)
c.kbcache = kbfsmd.NewKeyBundleCacheLRU(keyBundlesCacheCapacityBytes)
log := c.MakeLogger("")
var capacity uint64
if c.bcache == nil {
capacity = getDefaultCleanBlockCacheCapacity()
log.Debug("setting default clean block cache capacity to %d",
capacity)
} else {
capacity = c.bcache.GetCleanBytesCapacity()
log.Debug("setting clean block cache capacity based on existing value %d",
capacity)
}
c.bcache = NewBlockCacheStandard(10000, capacity)
if c.Mode() == InitMinimal {
// No blocks will be dirtied in minimal mode, so don't bother
// with the dirty block cache.
return nil
}
oldDirtyBcache := c.dirtyBcache
// TODO: we should probably fail or re-schedule this reset if
// there is anything dirty in the dirty block cache.
// The minimum number of bytes we'll try to sync in parallel.
// This should be roughly the minimum amount of bytes we expect
// our worst supported connection to send within the timeout
// forced on us by the upper layer (19 seconds on OS X). With the
// current default of a single block, this minimum works out to
// ~1MB, so we can support a connection speed as low as ~54 KB/s.
minSyncBufferSize := int64(MaxBlockSizeBytesDefault)
// The maximum number of bytes we can try to sync at once (also limits the
// amount of memory used by dirty blocks). We use the same value from clean
// block cache capacity here.
maxSyncBufferSize := int64(capacity)
// Start off conservatively to avoid getting immediate timeouts on
// slow connections.
startSyncBufferSize := minSyncBufferSize
dbcLog := c.MakeLogger("DBC")
c.dirtyBcache = NewDirtyBlockCacheStandard(c.clock, dbcLog,
minSyncBufferSize, maxSyncBufferSize, startSyncBufferSize)
return oldDirtyBcache
}
// ResetCaches implements the Config interface for ConfigLocal.
func (c *ConfigLocal) ResetCaches() {
oldDirtyBcache := c.resetCachesWithoutShutdown()
jServer, err := GetJournalServer(c)
if err == nil {
if err := c.journalizeBcaches(jServer); err != nil {
if log := c.MakeLogger(""); log != nil {
log.CWarningf(nil, "Error journalizing dirty block cache: %+v", err)
}
}
}
if oldDirtyBcache != nil {
// Shutdown outside of the lock so it doesn't block other
// access to this config.
if err := oldDirtyBcache.Shutdown(); err != nil {
if log := c.MakeLogger(""); log != nil {
log.CWarningf(nil,
"Error shutting down old dirty block cache: %+v", err)
}
}
}
}
// MakeLogger implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MakeLogger(module string) logger.Logger {
// No need to lock since c.loggerFn is initialized once at
// construction. Also resetCachesWithoutShutdown would deadlock.
return c.loggerFn(module)
}
// MetricsRegistry implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MetricsRegistry() metrics.Registry {
return c.registry
}
// SetRekeyQueue implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetRekeyQueue(r RekeyQueue) {
c.rekeyQueue = r
}
// RekeyQueue implements the Config interface for ConfigLocal.
func (c *ConfigLocal) RekeyQueue() RekeyQueue {
return c.rekeyQueue
}
// SetMetricsRegistry implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetMetricsRegistry(r metrics.Registry) {
c.registry = r
}
// SetTraceOptions implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetTraceOptions(enabled bool) {
c.traceLock.Lock()
defer c.traceLock.Unlock()
c.traceEnabled = enabled
}
// MaybeStartTrace implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MaybeStartTrace(
ctx context.Context, family, title string) context.Context {
traceEnabled := func() bool {
c.traceLock.RLock()
defer c.traceLock.RUnlock()
return c.traceEnabled
}()
if !traceEnabled {
return ctx
}
tr := trace.New(family, title)
tr.SetMaxEvents(25)
ctx = trace.NewContext(ctx, tr)
return ctx
}
// MaybeFinishTrace implements the Config interface for ConfigLocal.
func (c *ConfigLocal) MaybeFinishTrace(ctx context.Context, err error) {
if tr, ok := trace.FromContext(ctx); ok {
if err != nil {
tr.LazyPrintf("err=%+v", err)
tr.SetError()
}
tr.Finish()
}
}
// SetTLFValidDuration implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetTLFValidDuration(r time.Duration) {
c.tlfValidDuration = r
}
// TLFValidDuration implements the Config interface for ConfigLocal.
func (c *ConfigLocal) TLFValidDuration() time.Duration {
return c.tlfValidDuration
}
// SetBGFlushDirOpBatchSize implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetBGFlushDirOpBatchSize(s int) {
c.lock.Lock()
defer c.lock.Unlock()
c.bgFlushDirOpBatchSize = s
}
// BGFlushDirOpBatchSize implements the Config interface for ConfigLocal.
func (c *ConfigLocal) BGFlushDirOpBatchSize() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.bgFlushDirOpBatchSize
}
// SetBGFlushPeriod implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetBGFlushPeriod(p time.Duration) {
c.lock.Lock()
defer c.lock.Unlock()
c.bgFlushPeriod = p
}
// BGFlushPeriod implements the Config interface for ConfigLocal.
func (c *ConfigLocal) BGFlushPeriod() time.Duration {
c.lock.RLock()
defer c.lock.RUnlock()
return c.bgFlushPeriod
}
// Shutdown implements the Config interface for ConfigLocal.
func (c *ConfigLocal) Shutdown(ctx context.Context) error {
c.RekeyQueue().Shutdown()
if c.CheckStateOnShutdown() && c.allKnownConfigsForTesting != nil {
// Before we do anything, wait for all archiving and
// journaling to finish.
for _, config := range *c.allKnownConfigsForTesting {
kbfsOps, ok := config.KBFSOps().(*KBFSOpsStandard)
if !ok {
continue
}
for _, fbo := range kbfsOps.ops {
if err := fbo.fbm.waitForArchives(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForDeletingBlocks(ctx); err != nil {
return err
}
log := config.MakeLogger("")
if err := WaitForTLFJournal(ctx, config, fbo.id(),
log); err != nil {
return err
}
// The above wait could have resulted in some MD
// flushes, so now we have to wait on any archives as
// well. We only need one more check for this, since
// archives don't produce MDs.
if err := fbo.mdFlushes.Wait(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForArchives(ctx); err != nil {
return err
}
if err := WaitForTLFJournal(ctx, config, fbo.id(),
log); err != nil {
return err
}
}
}
}
var errorList []error
err := c.KBFSOps().Shutdown(ctx)
if err != nil {
errorList = append(errorList, err)
// Continue with shutdown regardless of err.
err = nil
}
c.BlockOps().Shutdown()
c.MDServer().Shutdown()
c.KeyServer().Shutdown()
c.KeybaseService().Shutdown()
c.BlockServer().Shutdown(ctx)
c.Crypto().Shutdown()
c.Reporter().Shutdown()
dirtyBcache := c.DirtyBlockCache()
if dirtyBcache != nil {
err = dirtyBcache.Shutdown()
}
if err != nil {
errorList = append(errorList, err)
}
dbc := c.DiskBlockCache()
if dbc != nil {
dbc.Shutdown(ctx)
}
kbfsServ := c.kbfsService
if kbfsServ != nil {
kbfsServ.Shutdown()
}
if len(errorList) == 1 {
return errorList[0]
} else if len(errorList) > 1 {
// Aggregate errors
return errors.Errorf("Multiple errors on shutdown: %+v", errorList)
}
return nil
}
// CheckStateOnShutdown implements the Config interface for ConfigLocal.
func (c *ConfigLocal) CheckStateOnShutdown() bool {
if md, ok := c.MDServer().(mdServerLocal); ok {
return !md.isShutdown()
}
return false
}
func (c *ConfigLocal) journalizeBcaches(jServer *JournalServer) error {
syncCache, ok := c.DirtyBlockCache().(*DirtyBlockCacheStandard)
if !ok {
return errors.Errorf("Dirty bcache unexpectedly type %T", syncCache)
}
jServer.delegateDirtyBlockCache = syncCache
// Make a dirty block cache specifically for the journal
// server. Since this doesn't rely directly on the network,
// there's no need for an adaptive sync buffer size, so we
// always set the min and max to the same thing.
maxSyncBufferSize := int64(ForcedBranchSquashBytesThresholdDefault)
log := c.MakeLogger("DBCJ")
journalCache := NewDirtyBlockCacheStandard(c.clock, log,
maxSyncBufferSize, maxSyncBufferSize, maxSyncBufferSize)
c.SetDirtyBlockCache(jServer.dirtyBlockCache(journalCache))
jServer.delegateBlockCache = c.BlockCache()
c.SetBlockCache(jServer.blockCache())
return nil
}
func (c *ConfigLocal) getQuotaUsage(
chargedTo keybase1.UserOrTeamID) *EventuallyConsistentQuotaUsage {
c.lock.RLock()
quota, ok := c.quotaUsage[chargedTo]
if ok {
c.lock.RUnlock()
return quota
}
c.lock.RUnlock()
c.lock.Lock()
defer c.lock.Unlock()
quota, ok = c.quotaUsage[chargedTo]
if !ok {
if chargedTo.IsTeamOrSubteam() {
quota = NewEventuallyConsistentTeamQuotaUsage(
c, chargedTo.AsTeamOrBust(), "BDL")
} else {
quota = NewEventuallyConsistentQuotaUsage(c, "BDL")
}
c.quotaUsage[chargedTo] = quota
}
return quota
}
// EnableDiskLimiter fills in c.ciskLimiter for use in journaling and
// disk caching. It returns the EventuallyConsistentQuotaUsage object
// used by the disk limiter.
func (c *ConfigLocal) EnableDiskLimiter(configRoot string) error {
if c.diskLimiter != nil {
return errors.New("c.diskLimiter is already non-nil")
}
params := makeDefaultBackpressureDiskLimiterParams(
configRoot, c.getQuotaUsage)
log := c.MakeLogger("")
log.Debug("Setting disk storage byte limit to %d and file limit to %d",
params.byteLimit, params.fileLimit)
os.MkdirAll(configRoot, 0700)
diskLimiter, err := newBackpressureDiskLimiter(log, params)
if err != nil {
return err
}
c.diskLimiter = diskLimiter
return nil
}
// EnableJournaling creates a JournalServer and attaches it to
// this config. journalRoot must be non-empty. Errors returned are
// non-fatal.
func (c *ConfigLocal) EnableJournaling(
ctx context.Context, journalRoot string,
bws TLFJournalBackgroundWorkStatus) error {
jServer, err := GetJournalServer(c)
if err == nil {
// Journaling shouldn't be enabled twice for the same
// config.
return errors.New("Trying to enable journaling twice")
}
// TODO: Sanity-check the root directory, e.g. create
// it if it doesn't exist, make sure that it doesn't
// point to /keybase itself, etc.
log := c.MakeLogger("")
branchListener := c.KBFSOps().(branchChangeListener)
flushListener := c.KBFSOps().(mdFlushListener)
// Make sure the journal root exists.
err = ioutil.MkdirAll(journalRoot, 0700)
if err != nil {
return err
}
jServer = makeJournalServer(c, log, journalRoot, c.BlockCache(),
c.DirtyBlockCache(), c.BlockServer(), c.MDOps(), branchListener,
flushListener)
c.SetBlockServer(jServer.blockServer())
c.SetMDOps(jServer.mdOps())
bcacheErr := c.journalizeBcaches(jServer)
enableErr := func() error {
// If this fails, then existing journals will be
// enabled when we receive the login notification.
session, err := c.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
err = jServer.EnableExistingJournals(
ctx, session.UID, session.VerifyingKey, bws)
if err != nil {
return err
}
return nil
}()
switch {
case bcacheErr != nil && enableErr != nil:
return errors.Errorf(
"Got errors %+v and %+v", bcacheErr, enableErr)
case bcacheErr != nil:
return bcacheErr
case enableErr != nil:
return enableErr
}
return nil
}
func (c *ConfigLocal) resetDiskBlockCacheLocked() error {
dbc, err := newDiskBlockCacheWrapped(c, c.storageRoot)
if err != nil {
return err
}
c.diskBlockCache = dbc
return nil
}
// MakeDiskBlockCacheIfNotExists implements the Config interface for
// ConfigLocal.
func (c *ConfigLocal) MakeDiskBlockCacheIfNotExists() error {
c.lock.Lock()
defer c.lock.Unlock()
if c.diskBlockCache != nil {
return nil
}
switch c.diskCacheMode {
case DiskCacheModeOff:
return nil
case DiskCacheModeLocal:
return c.resetDiskBlockCacheLocked()
case DiskCacheModeRemote:
dbc, err := NewDiskBlockCacheRemote(c.kbCtx, c)
if err != nil {
return err
}
c.diskBlockCache = dbc
return nil
}
return nil
}
func (c *ConfigLocal) openConfigLevelDB(configName string) (*levelDb, error) {
dbPath := filepath.Join(c.storageRoot, configName)
stor, err := storage.OpenFile(dbPath, false)
if err != nil {
return nil, err
}
return openLevelDB(stor)
}
func (c *ConfigLocal) loadSyncedTlfsLocked() (err error) {
syncedTlfs := make(map[tlf.ID]bool)
if c.IsTestMode() {
c.syncedTlfs = syncedTlfs
return nil
}
if c.storageRoot == "" {
return errors.New("empty storageRoot specified for non-test run")
}
ldb, err := c.openConfigLevelDB(syncedTlfConfigFolderName)
if err != nil {
return err
}
defer ldb.Close()
iter := ldb.NewIterator(nil, nil)
defer iter.Release()
log := c.MakeLogger("")
// If there are any un-parseable IDs, delete them.
deleteBatch := new(leveldb.Batch)
for iter.Next() {
key := string(iter.Key())
tlfID, err := tlf.ParseID(key)
if err != nil {
log.Debug("deleting TLF %s from synced TLF list", key)
deleteBatch.Delete(iter.Key())
continue
}
syncedTlfs[tlfID] = true
}
c.syncedTlfs = syncedTlfs
return ldb.Write(deleteBatch, nil)
}
// IsSyncedTlf implements the isSyncedTlfGetter interface for ConfigLocal.
func (c *ConfigLocal) IsSyncedTlf(tlfID tlf.ID) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.syncedTlfs[tlfID]
}
// SetTlfSyncState implements the Config interface for ConfigLocal.
func (c *ConfigLocal) SetTlfSyncState(tlfID tlf.ID, isSynced bool) error {
c.lock.Lock()
defer c.lock.Unlock()
if isSynced {
diskCacheWrapped, ok := c.diskBlockCache.(*diskBlockCacheWrapped)
if !ok {
return errors.Errorf("invalid disk cache type to set TLF sync "+
"state: %T", c.diskBlockCache)
}
if !diskCacheWrapped.IsSyncCacheEnabled() {
return errors.New("sync block cache is not enabled")
}
}
if !c.IsTestMode() {
if c.storageRoot == "" {
return errors.New("empty storageRoot specified for non-test run")
}
ldb, err := c.openConfigLevelDB(syncedTlfConfigFolderName)
if err != nil {
return err
}
defer ldb.Close()
tlfBytes, err := tlfID.MarshalText()
if err != nil {
return err
}
if isSynced {
err = ldb.Put(tlfBytes, nil, nil)
} else {
err = ldb.Delete(tlfBytes, nil)
}
if err != nil {
return err
}
}
c.syncedTlfs[tlfID] = isSynced
<-c.bops.TogglePrefetcher(true)
return nil
}
// PrefetchStatus implements the Config interface for ConfigLocal.
func (c *ConfigLocal) PrefetchStatus(ctx context.Context, tlfID tlf.ID,
ptr BlockPointer) PrefetchStatus {
_, prefetchStatus, _, err := c.BlockCache().GetWithPrefetch(ptr)
if err != nil {
prefetchStatus = NoPrefetch
dbc := c.DiskBlockCache()
if dbc != nil {
_, _, prefetchStatus, err = dbc.Get(ctx, tlfID, ptr.ID)
if err != nil {
prefetchStatus = NoPrefetch
}
}
}
return prefetchStatus
}
// GetRekeyFSMLimiter implements the Config interface for ConfigLocal.
func (c *ConfigLocal) GetRekeyFSMLimiter() *OngoingWorkLimiter {
return c.rekeyFSMLimiter
}
// SetKBFSService sets the KBFSService for this ConfigLocal.
func (c *ConfigLocal) SetKBFSService(k *KBFSService) {
c.lock.Lock()
defer c.lock.Unlock()
if c.kbfsService != nil {
c.kbfsService.Shutdown()
}
c.kbfsService = k
}
| 1 | 18,513 | Why is there an "enable" in the param name? Couldn't it just be `overrideDebug`? Here are everywhere else. | keybase-kbfs | go |
@@ -1,4 +1,8 @@
class VideosController < ApplicationController
+ def index
+ @videos = Video.published.recently_published_first
+ end
+
def show
@video = Video.find(params[:id])
@offering = Offering.new(@video.watchable, current_user) | 1 | class VideosController < ApplicationController
def show
@video = Video.find(params[:id])
@offering = Offering.new(@video.watchable, current_user)
if @offering.user_has_license?
render "show_licensed"
elsif @video.preview_wistia_id.present?
render "show"
else
redirect_to @video.watchable
end
end
end
| 1 | 12,698 | `published.recently_published_first` reads a little oddly to me. Is this the same thing as `Video.published.ordered`? | thoughtbot-upcase | rb |
@@ -20,7 +20,11 @@ var createIntegrationPreprocessor = function(logger) {
// and add the test data to it
var htmlpath = file.originalPath.replace(extRegex, '.html');
var html = fs.readFileSync(htmlpath, 'utf-8');
- var test = JSON.parse(content);
+ try {
+ var test = JSON.parse(content);
+ } catch (e) {
+ throw new Error('Unable to parse content of ' + file.originalPath)
+ }
test.content = html;
var result = template.replace('{}; /*tests*/', JSON.stringify(test)); | 1 | var path = require('path');
var fs = require('fs');
var extRegex = /\.json$/;
var template = fs.readFileSync(path.join(__dirname, 'runner.js'), 'utf-8');
/**
* Turn each rule.json integration test JSON into a js file using
* the runner.js script. This allow us to load the JSON files in
* the karma config and they'll run as js files.
*/
var createIntegrationPreprocessor = function(logger) {
var log = logger.create('preprocessor.integration');
return function(content, file, done) {
try {
log.debug('Processing "%s".', file.originalPath);
file.path = file.originalPath.replace(extRegex, '.js');
// turn the json file into the a test file using the js test template
// and add the test data to it
var htmlpath = file.originalPath.replace(extRegex, '.html');
var html = fs.readFileSync(htmlpath, 'utf-8');
var test = JSON.parse(content);
test.content = html;
var result = template.replace('{}; /*tests*/', JSON.stringify(test));
done(null, result);
} catch (e) {
console.log('e:', e);
done(e, null);
}
};
};
createIntegrationPreprocessor.$inject = ['logger'];
// PUBLISH DI MODULE
module.exports = {
'preprocessor:integration': ['factory', createIntegrationPreprocessor]
};
| 1 | 16,490 | Encountered this because I had a stray comma. Figured I'd tweak it a bit. | dequelabs-axe-core | js |
@@ -306,6 +306,13 @@ var _ = infrastructure.DatastoreDescribe("service loop prevention; with 2 nodes"
cfg.Spec.ServiceLoopPrevention = "Disabled"
})
+ // Expect to see empty cali-cidr-block chains. (Allowing time for a Felix
+ // restart.) This ensures that the cali-cidr-block chain has been cleared
+ // before we try a test ping.
+ for _, felix := range felixes {
+ Eventually(getCIDRBlockRules(felix, "iptables-save"), "8s", "0.5s").Should(BeEmpty())
+ }
+
By("test that we DO get a routing loop")
// (In order to test that the tryRoutingLoop setup is genuine.)
tryRoutingLoop(true) | 1 | // Copyright (c) 2020-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build fvtests
package fv_test
import (
"context"
"regexp"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
api "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
"github.com/projectcalico/felix/fv/containers"
"github.com/projectcalico/felix/fv/infrastructure"
"github.com/projectcalico/felix/fv/utils"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
client "github.com/projectcalico/libcalico-go/lib/clientv3"
"github.com/projectcalico/libcalico-go/lib/errors"
"github.com/projectcalico/libcalico-go/lib/options"
)
var _ = infrastructure.DatastoreDescribe("service loop prevention; with 2 nodes", []apiconfig.DatastoreType{apiconfig.EtcdV3, apiconfig.Kubernetes}, func(getInfra infrastructure.InfraFactory) {
var (
infra infrastructure.DatastoreInfra
felixes []*infrastructure.Felix
client client.Interface
)
BeforeEach(func() {
infra = getInfra()
options := infrastructure.DefaultTopologyOptions()
options.IPIPEnabled = false
felixes, client = infrastructure.StartNNodeTopology(2, options, infra)
})
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
for _, felix := range felixes {
felix.Exec("iptables-save", "-c")
felix.Exec("ipset", "list")
felix.Exec("ip", "r")
felix.Exec("ip", "a")
}
}
for _, felix := range felixes {
felix.Stop()
}
if CurrentGinkgoTestDescription().Failed {
infra.DumpErrorData()
}
infra.Stop()
})
updateFelixConfig := func(deltaFn func(*api.FelixConfiguration)) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cfg, err := client.FelixConfigurations().Get(ctx, "default", options.GetOptions{})
if _, doesNotExist := err.(errors.ErrorResourceDoesNotExist); doesNotExist {
cfg = api.NewFelixConfiguration()
cfg.Name = "default"
deltaFn(cfg)
_, err = client.FelixConfigurations().Create(ctx, cfg, options.SetOptions{})
Expect(err).NotTo(HaveOccurred())
} else {
Expect(err).NotTo(HaveOccurred())
deltaFn(cfg)
_, err = client.FelixConfigurations().Update(ctx, cfg, options.SetOptions{})
Expect(err).NotTo(HaveOccurred())
}
}
updateBGPConfig := func(deltaFn func(*api.BGPConfiguration)) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cfg, err := client.BGPConfigurations().Get(ctx, "default", options.GetOptions{})
if _, doesNotExist := err.(errors.ErrorResourceDoesNotExist); doesNotExist {
cfg = api.NewBGPConfiguration()
cfg.Name = "default"
deltaFn(cfg)
_, err = client.BGPConfigurations().Create(ctx, cfg, options.SetOptions{})
Expect(err).NotTo(HaveOccurred())
} else {
Expect(err).NotTo(HaveOccurred())
deltaFn(cfg)
_, err = client.BGPConfigurations().Update(ctx, cfg, options.SetOptions{})
Expect(err).NotTo(HaveOccurred())
}
}
getCIDRBlockRules := func(felix *infrastructure.Felix, saveCommand string) func() []string {
return func() []string {
out, err := felix.ExecOutput(saveCommand, "-t", "filter")
Expect(err).NotTo(HaveOccurred())
var cidrBlockLines []string
for _, line := range strings.Split(out, "\n") {
if strings.Contains(line, "-A cali-cidr-block") {
cidrBlockLines = append(cidrBlockLines, line)
}
}
return cidrBlockLines
}
}
tryRoutingLoop := func(expectLoop bool) {
// Run containers to model a default gateway, and an external client connecting to
// services within the cluster via that gateway.
externalGW := containers.Run("external-gw",
containers.RunOpts{AutoRemove: true},
"--privileged", // So that we can add routes inside the container.
utils.Config.BusyboxImage,
"/bin/sh", "-c", "sleep 1000")
defer externalGW.Stop()
externalClient := containers.Run("external-client",
containers.RunOpts{AutoRemove: true},
"--privileged", // So that we can add routes inside the container.
utils.Config.BusyboxImage,
"/bin/sh", "-c", "sleep 1000")
defer externalClient.Stop()
// Add a service CIDR route in those containers, similar to the routes that they
// would have via BGP per our service advertisement feature. (This should really be
// an ECMP route to both Felixes, but busybox's ip can't program ECMP routes, and a
// non-ECMP route is sufficient to demonstrate the looping issue.)
externalClient.Exec("ip", "r", "a", "10.96.0.0/17", "via", externalGW.IP)
externalGW.Exec("ip", "r", "a", "10.96.0.0/17", "via", felixes[0].IP)
// Configure the external gateway client to forward, in order to create the
// conditions for looping.
externalClient.Exec("sysctl", "-w", "net.ipv4.ip_forward=1")
externalGW.Exec("sysctl", "-w", "net.ipv4.ip_forward=1")
// Also tell Felix to route that CIDR to the external gateway.
felixes[0].ExecMayFail("ip", "r", "d", "10.96.0.0/17")
felixes[0].Exec("ip", "r", "a", "10.96.0.0/17", "via", externalGW.IP)
felixes[0].Exec("iptables", "-P", "FORWARD", "ACCEPT")
// Start monitoring all packets, on the Felix, to or from a specific (but
// unused) service IP.
tcpdumpF := felixes[0].AttachTCPDump("eth0")
tcpdumpF.AddMatcher("serviceIPPackets", regexp.MustCompile("10\\.96\\.0\\.19"))
tcpdumpF.Start()
defer tcpdumpF.Stop()
// Send a single ping from the external client to the unused service IP.
err := externalClient.ExecMayFail("ping", "-c", "1", "-W", "1", "10.96.0.19")
Expect(err).To(HaveOccurred())
countServiceIPPackets := func() int {
// Return the number of packets observed to/from 10.96.0.19.
return tcpdumpF.MatchCount("serviceIPPackets")
}
if expectLoop {
// Tcpdump should see more than 2 packets, because of looping. Note: 2
// packets would be Felix receiving the ping and then forwarding it out
// again. I want to check here that it's also looped around again by the
// gateway, resulting in MORE THAN 2 packets.
Eventually(countServiceIPPackets).Should(BeNumerically(">", 2))
} else {
// Tcpdump should see just 1 packet, the request, with no response (because
// we DROP) and no looping.
Eventually(countServiceIPPackets).Should(BeNumerically("==", 1))
}
}
It("programs iptables as expected to block service routing loops", func() {
By("configuring service cluster IPs")
updateBGPConfig(func(cfg *api.BGPConfiguration) {
cfg.Spec.ServiceClusterIPs = []api.ServiceClusterIPBlock{
{
CIDR: "10.96.0.0/17",
},
{
CIDR: "fd5f::/119",
},
}
})
// Default ServiceLoopPrevention is Drop, so expect to see rules in cali-cidr-block
// chains with DROP. (Felix handles BGPConfiguration without restarting, so this
// should be quick.)
for _, felix := range felixes {
Eventually(getCIDRBlockRules(felix, "iptables-save")).Should(ConsistOf(
MatchRegexp("-A cali-cidr-block -d 10\\.96\\.0\\.0/17 .* -j DROP"),
))
Eventually(getCIDRBlockRules(felix, "ip6tables-save")).Should(ConsistOf(
MatchRegexp("-A cali-cidr-block -d fd5f::/119 .* -j DROP"),
))
}
By("test that we don't get a routing loop")
tryRoutingLoop(false)
By("configuring ServiceLoopPrevention=Reject")
updateFelixConfig(func(cfg *api.FelixConfiguration) {
cfg.Spec.ServiceLoopPrevention = "Reject"
})
// Expect to see rules in cali-cidr-block chains with REJECT. (Allowing time for a
// Felix restart.)
for _, felix := range felixes {
Eventually(getCIDRBlockRules(felix, "iptables-save"), "8s", "0.5s").Should(ConsistOf(
MatchRegexp("-A cali-cidr-block -d 10\\.96\\.0\\.0/17 .* -j REJECT"),
))
Eventually(getCIDRBlockRules(felix, "ip6tables-save"), "8s", "0.5s").Should(ConsistOf(
MatchRegexp("-A cali-cidr-block -d fd5f::/119 .* -j REJECT"),
))
}
By("configuring ServiceLoopPrevention=Disabled")
updateFelixConfig(func(cfg *api.FelixConfiguration) {
cfg.Spec.ServiceLoopPrevention = "Disabled"
})
// Expect to see empty cali-cidr-block chains. (Allowing time for a Felix restart.)
for _, felix := range felixes {
Eventually(getCIDRBlockRules(felix, "iptables-save"), "8s", "0.5s").Should(BeEmpty())
Eventually(getCIDRBlockRules(felix, "ip6tables-save"), "8s", "0.5s").Should(BeEmpty())
}
By("test that we DO get a routing loop")
// (In order to test that the tryRoutingLoop setup is genuine.)
tryRoutingLoop(true)
By("configuring ServiceLoopPrevention=Drop")
updateFelixConfig(func(cfg *api.FelixConfiguration) {
cfg.Spec.ServiceLoopPrevention = "Drop"
})
// Expect to see rules in cali-cidr-block chains with DROP. (Allowing time for a
// Felix restart.)
for _, felix := range felixes {
Eventually(getCIDRBlockRules(felix, "iptables-save"), "8s", "0.5s").Should(ConsistOf(
MatchRegexp("-A cali-cidr-block -d 10\\.96\\.0\\.0/17 .* -j DROP"),
))
Eventually(getCIDRBlockRules(felix, "ip6tables-save"), "8s", "0.5s").Should(ConsistOf(
MatchRegexp("-A cali-cidr-block -d fd5f::/119 .* -j DROP"),
))
}
By("updating the service CIDRs")
updateBGPConfig(func(cfg *api.BGPConfiguration) {
cfg.Spec.ServiceClusterIPs = []api.ServiceClusterIPBlock{
{
CIDR: "1.1.0.0/16",
},
{
CIDR: "fd5e::/119",
},
}
})
// Expect to see rules in cali-cidr-block chains with DROP and the updated CIDRs.
// (BGPConfiguration change is handled without needing a restart.)
for _, felix := range felixes {
Eventually(getCIDRBlockRules(felix, "iptables-save")).Should(ConsistOf(
MatchRegexp("-A cali-cidr-block -d 1\\.1\\.0\\.0/16 .* -j DROP"),
))
Eventually(getCIDRBlockRules(felix, "ip6tables-save")).Should(ConsistOf(
MatchRegexp("-A cali-cidr-block -d fd5e::/119 .* -j DROP"),
))
}
By("resetting BGP config")
updateBGPConfig(func(cfg *api.BGPConfiguration) {
cfg.Spec.ServiceClusterIPs = nil
})
})
It("ServiceExternalIPs also blocks service routing loop", func() {
By("configuring service external IPs")
updateBGPConfig(func(cfg *api.BGPConfiguration) {
cfg.Spec.ServiceExternalIPs = []api.ServiceExternalIPBlock{
{
CIDR: "10.96.0.0/17",
},
}
})
By("test that we don't get a routing loop")
tryRoutingLoop(false)
By("configuring ServiceLoopPrevention=Disabled")
updateFelixConfig(func(cfg *api.FelixConfiguration) {
cfg.Spec.ServiceLoopPrevention = "Disabled"
})
By("test that we DO get a routing loop")
// (In order to test that the tryRoutingLoop setup is genuine.)
tryRoutingLoop(true)
By("resetting BGP config")
updateBGPConfig(func(cfg *api.BGPConfiguration) {
cfg.Spec.ServiceExternalIPs = nil
})
})
It("ServiceLoadBalancerIPs also blocks service routing loop", func() {
By("configuring service LB IPs")
updateBGPConfig(func(cfg *api.BGPConfiguration) {
cfg.Spec.ServiceLoadBalancerIPs = []api.ServiceLoadBalancerIPBlock{
{
CIDR: "10.96.0.0/17",
},
}
})
By("test that we don't get a routing loop")
tryRoutingLoop(false)
By("configuring ServiceLoopPrevention=Disabled")
updateFelixConfig(func(cfg *api.FelixConfiguration) {
cfg.Spec.ServiceLoopPrevention = "Disabled"
})
By("test that we DO get a routing loop")
// (In order to test that the tryRoutingLoop setup is genuine.)
tryRoutingLoop(true)
By("resetting BGP config")
updateBGPConfig(func(cfg *api.BGPConfiguration) {
cfg.Spec.ServiceLoadBalancerIPs = nil
})
})
})
| 1 | 19,564 | qq: Should this include the iptables6-save sim. to the inverse checks above? | projectcalico-felix | c |
@@ -195,8 +195,16 @@ func (cfg *Config) Merge(rhs Config) *Config {
for i := 0; i < left.NumField(); i++ {
leftField := left.Field(i)
- if utils.ZeroOrNil(leftField.Interface()) {
- leftField.Set(reflect.ValueOf(right.Field(i).Interface()))
+ switch leftField.Interface().(type) {
+ case BooleanDefaultFalse, BooleanDefaultTrue:
+ str, _ := json.Marshal(reflect.ValueOf(leftField.Interface()).Interface())
+ if string(str) == "null" {
+ leftField.Set(reflect.ValueOf(right.Field(i).Interface()))
+ }
+ default:
+ if utils.ZeroOrNil(leftField.Interface()) {
+ leftField.Set(reflect.ValueOf(right.Field(i).Interface()))
+ }
}
}
| 1 | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package config
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"reflect"
"strings"
"time"
apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors"
"github.com/aws/amazon-ecs-agent/agent/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/cihub/seelog"
)
const (
// http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
DockerReservedPort = 2375
DockerReservedSSLPort = 2376
// DockerTagSeparator is the charactor used to separate names and tag in docker
DockerTagSeparator = ":"
// DefaultDockerTag is the default tag used by docker
DefaultDockerTag = "latest"
SSHPort = 22
// AgentIntrospectionPort is used to serve the metadata about the agent and to query the tasks being managed by the agent.
AgentIntrospectionPort = 51678
// AgentCredentialsPort is used to serve the credentials for tasks.
AgentCredentialsPort = 51679
// AgentPrometheusExpositionPort is used to expose Prometheus metrics that can be scraped by a Prometheus server
AgentPrometheusExpositionPort = 51680
// defaultConfigFileName is the default (json-formatted) config file
defaultConfigFileName = "/etc/ecs_container_agent/config.json"
// DefaultClusterName is the name of the default cluster.
DefaultClusterName = "default"
// DefaultTaskCleanupWaitDuration specifies the default value for task cleanup duration. It is used to
// clean up task's containers.
DefaultTaskCleanupWaitDuration = 3 * time.Hour
// DefaultPollingMetricsWaitDuration specifies the default value for polling metrics wait duration
// This is only used when PollMetrics is set to true
DefaultPollingMetricsWaitDuration = DefaultContainerMetricsPublishInterval / 2
// defaultDockerStopTimeout specifies the value for container stop timeout duration
defaultDockerStopTimeout = 30 * time.Second
// DefaultImageCleanupTimeInterval specifies the default value for image cleanup duration. It is used to
// remove the images pulled by agent.
DefaultImageCleanupTimeInterval = 30 * time.Minute
// DefaultNumImagesToDeletePerCycle specifies the default number of images to delete when agent performs
// image cleanup.
DefaultNumImagesToDeletePerCycle = 5
// DefaultNumNonECSContainersToDeletePerCycle specifies the default number of nonecs containers to delete when agent performs
// nonecs containers cleanup.
DefaultNumNonECSContainersToDeletePerCycle = 5
// DefaultImageDeletionAge specifies the default value for minimum amount of elapsed time after an image
// has been pulled before it can be deleted.
DefaultImageDeletionAge = 1 * time.Hour
// DefaultNonECSImageDeletionAge specifies the default value for minimum amount of elapsed time after an image
// has been created before it can be deleted
DefaultNonECSImageDeletionAge = 1 * time.Hour
// minimumTaskCleanupWaitDuration specifies the minimum duration to wait before cleaning up
// a task's container. This is used to enforce sane values for the config.TaskCleanupWaitDuration field.
minimumTaskCleanupWaitDuration = 1 * time.Minute
// minimumImagePullInactivityTimeout specifies the minimum amount of time for that an image can be
// 'stuck' in the pull / unpack step. Very small values are unsafe and lead to high failure rate.
minimumImagePullInactivityTimeout = 1 * time.Minute
// minimumPollingMetricsWaitDuration specifies the minimum duration to wait before polling for new stats
// from docker. This is only used when PollMetrics is set to true
minimumPollingMetricsWaitDuration = 5 * time.Second
// maximumPollingMetricsWaitDuration specifies the maximum duration to wait before polling for new stats
// from docker. This is only used when PollMetrics is set to true
maximumPollingMetricsWaitDuration = DefaultContainerMetricsPublishInterval
// minimumDockerStopTimeout specifies the minimum value for docker StopContainer API
minimumDockerStopTimeout = 1 * time.Second
// minimumImageCleanupInterval specifies the minimum time for agent to wait before performing
// image cleanup.
minimumImageCleanupInterval = 10 * time.Minute
// minimumNumImagesToDeletePerCycle specifies the minimum number of images that to be deleted when
// performing image cleanup.
minimumNumImagesToDeletePerCycle = 1
// defaultCNIPluginsPath is the default path where cni binaries are located
defaultCNIPluginsPath = "/amazon-ecs-cni-plugins"
// DefaultMinSupportedCNIVersion denotes the minimum version of cni spec required
DefaultMinSupportedCNIVersion = "0.3.0"
// pauseContainerTarball is the path to the pause container tarball
pauseContainerTarballPath = "/images/amazon-ecs-pause.tar"
// DefaultTaskMetadataSteadyStateRate is set as 40. This is arrived from our benchmarking
// results where task endpoint can handle 4000 rps effectively. Here, 100 containers
// will be able to send out 40 rps.
DefaultTaskMetadataSteadyStateRate = 40
// DefaultTaskMetadataBurstRate is set to handle 60 burst requests at once
DefaultTaskMetadataBurstRate = 60
//Known cached image names
CachedImageNamePauseContainer = "amazon/amazon-ecs-pause:0.1.0"
CachedImageNameAgentContainer = "amazon/amazon-ecs-agent:latest"
// DefaultNvidiaRuntime is the name of the runtime to pass Nvidia GPUs to containers
DefaultNvidiaRuntime = "nvidia"
// defaultCgroupCPUPeriod is set to 100 ms to set isCFS period and quota for task limits
defaultCgroupCPUPeriod = 100 * time.Millisecond
maximumCgroupCPUPeriod = 100 * time.Millisecond
minimumCgroupCPUPeriod = 8 * time.Millisecond
// DefaultContainerMetricsPublishInterval is the default interval that we publish
// metrics to the ECS telemetry backend (TACS)
DefaultContainerMetricsPublishInterval = 20 * time.Second
)
const (
// ImagePullDefaultBehavior specifies the behavior that if an image pull API call fails,
// agent tries to start from the Docker image cache anyway, assuming that the image has not changed.
ImagePullDefaultBehavior ImagePullBehaviorType = iota
// ImagePullAlwaysBehavior specifies the behavior that if an image pull API call fails,
// the task fails instead of using cached image.
ImagePullAlwaysBehavior
// ImagePullOnceBehavior specifies the behavior that agent will only attempt to pull
// the same image once, once an image is pulled, local image cache will be used
// for all the containers.
ImagePullOnceBehavior
// ImagePullPreferCachedBehavior specifies the behavior that agent will only attempt to pull
// the image if there is no cached image.
ImagePullPreferCachedBehavior
)
const (
// When ContainerInstancePropagateTagsFromNoneType is specified, no DescribeTags
// API call will be made.
ContainerInstancePropagateTagsFromNoneType ContainerInstancePropagateTagsFromType = iota
// When ContainerInstancePropagateTagsFromEC2InstanceType is specified, agent will
// make DescribeTags API call to get tags remotely.
ContainerInstancePropagateTagsFromEC2InstanceType
)
var (
// DefaultPauseContainerImageName is the name of the pause container image. The linker's
// load flags are used to populate this value from the Makefile
DefaultPauseContainerImageName = ""
// DefaultPauseContainerTag is the tag for the pause container image. The linker's load
// flags are used to populate this value from the Makefile
DefaultPauseContainerTag = ""
)
// Merge merges two config files, preferring the ones on the left. Any nil or
// zero values present in the left that are present in the right will be overridden
func (cfg *Config) Merge(rhs Config) *Config {
left := reflect.ValueOf(cfg).Elem()
right := reflect.ValueOf(&rhs).Elem()
for i := 0; i < left.NumField(); i++ {
leftField := left.Field(i)
if utils.ZeroOrNil(leftField.Interface()) {
leftField.Set(reflect.ValueOf(right.Field(i).Interface()))
}
}
return cfg //make it chainable
}
// NewConfig returns a config struct created by merging environment variables,
// a config file, and EC2 Metadata info.
// The 'config' struct it returns can be used, even if an error is returned. An
// error is returned, however, if the config is incomplete in some way that is
// considered fatal.
func NewConfig(ec2client ec2.EC2MetadataClient) (*Config, error) {
var errs []error
envConfig, err := environmentConfig() //Environment overrides all else
if err != nil {
errs = append(errs, err)
}
config := &envConfig
if config.complete() {
// No need to do file / network IO
return config, nil
}
fcfg, err := fileConfig()
if err != nil {
errs = append(errs, err)
}
config.Merge(fcfg)
config.Merge(userDataConfig(ec2client))
if config.AWSRegion == "" {
if config.NoIID {
// get it from AWS SDK if we don't have instance identity document
awsRegion, err := ec2client.Region()
if err != nil {
errs = append(errs, err)
}
config.AWSRegion = awsRegion
} else {
// Get it from metadata only if we need to (network io)
config.Merge(ec2MetadataConfig(ec2client))
}
}
return config, config.mergeDefaultConfig(errs)
}
func (config *Config) mergeDefaultConfig(errs []error) error {
config.trimWhitespace()
config.Merge(DefaultConfig())
err := config.validateAndOverrideBounds()
if err != nil {
errs = append(errs, err)
}
if len(errs) != 0 {
return apierrors.NewMultiError(errs...)
}
return nil
}
// trimWhitespace trims whitespace from all string cfg values with the
// `trim` tag
func (cfg *Config) trimWhitespace() {
cfgElem := reflect.ValueOf(cfg).Elem()
cfgStructField := reflect.Indirect(reflect.ValueOf(cfg)).Type()
for i := 0; i < cfgElem.NumField(); i++ {
cfgField := cfgElem.Field(i)
if !cfgField.CanInterface() {
continue
}
trimTag := cfgStructField.Field(i).Tag.Get("trim")
if len(trimTag) == 0 {
continue
}
if cfgField.Kind() != reflect.String {
seelog.Warnf("Cannot trim non-string field type %v index %v", cfgField.Kind().String(), i)
continue
}
str := cfgField.Interface().(string)
cfgField.SetString(strings.TrimSpace(str))
}
}
// validateAndOverrideBounds performs validation over members of the Config struct
// and check the value against the minimum required value.
func (cfg *Config) validateAndOverrideBounds() error {
err := cfg.checkMissingAndDepreciated()
if err != nil {
return err
}
if cfg.DockerStopTimeout < minimumDockerStopTimeout {
return fmt.Errorf("config: invalid value for docker container stop timeout: %v", cfg.DockerStopTimeout.String())
}
if cfg.ContainerStartTimeout < minimumContainerStartTimeout {
return fmt.Errorf("config: invalid value for docker container start timeout: %v", cfg.ContainerStartTimeout.String())
}
var badDrivers []string
for _, driver := range cfg.AvailableLoggingDrivers {
_, ok := dockerclient.LoggingDriverMinimumVersion[driver]
if !ok {
badDrivers = append(badDrivers, string(driver))
}
}
if len(badDrivers) > 0 {
return errors.New("Invalid logging drivers: " + strings.Join(badDrivers, ", "))
}
// If a value has been set for taskCleanupWaitDuration and the value is less than the minimum allowed cleanup duration,
// print a warning and override it
if cfg.TaskCleanupWaitDuration < minimumTaskCleanupWaitDuration {
seelog.Warnf("Invalid value for ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", DefaultTaskCleanupWaitDuration.String(), cfg.TaskCleanupWaitDuration, minimumTaskCleanupWaitDuration)
cfg.TaskCleanupWaitDuration = DefaultTaskCleanupWaitDuration
}
if cfg.ImagePullInactivityTimeout < minimumImagePullInactivityTimeout {
seelog.Warnf("Invalid value for image pull inactivity timeout duration, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", defaultImagePullInactivityTimeout.String(), cfg.ImagePullInactivityTimeout, minimumImagePullInactivityTimeout)
cfg.ImagePullInactivityTimeout = defaultImagePullInactivityTimeout
}
if cfg.ImageCleanupInterval < minimumImageCleanupInterval {
seelog.Warnf("Invalid value for ECS_IMAGE_CLEANUP_INTERVAL, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", DefaultImageCleanupTimeInterval.String(), cfg.ImageCleanupInterval, minimumImageCleanupInterval)
cfg.ImageCleanupInterval = DefaultImageCleanupTimeInterval
}
if cfg.NumImagesToDeletePerCycle < minimumNumImagesToDeletePerCycle {
seelog.Warnf("Invalid value for number of images to delete for image cleanup, will be overridden with the default value: %d. Parsed value: %d, minimum value: %d.", DefaultImageDeletionAge, cfg.NumImagesToDeletePerCycle, minimumNumImagesToDeletePerCycle)
cfg.NumImagesToDeletePerCycle = DefaultNumImagesToDeletePerCycle
}
if cfg.TaskMetadataSteadyStateRate <= 0 || cfg.TaskMetadataBurstRate <= 0 {
seelog.Warnf("Invalid values for rate limits, will be overridden with default values: %d,%d.", DefaultTaskMetadataSteadyStateRate, DefaultTaskMetadataBurstRate)
cfg.TaskMetadataSteadyStateRate = DefaultTaskMetadataSteadyStateRate
cfg.TaskMetadataBurstRate = DefaultTaskMetadataBurstRate
}
// check the PollMetrics specific configurations
cfg.pollMetricsOverrides()
cfg.platformOverrides()
return nil
}
func (cfg *Config) pollMetricsOverrides() {
if cfg.PollMetrics {
if cfg.PollingMetricsWaitDuration < minimumPollingMetricsWaitDuration {
seelog.Warnf("ECS_POLLING_METRICS_WAIT_DURATION parsed value (%s) is less than the minimum of %s. Setting polling interval to minimum.",
cfg.PollingMetricsWaitDuration, minimumPollingMetricsWaitDuration)
cfg.PollingMetricsWaitDuration = minimumPollingMetricsWaitDuration
}
if cfg.PollingMetricsWaitDuration > maximumPollingMetricsWaitDuration {
seelog.Warnf("ECS_POLLING_METRICS_WAIT_DURATION parsed value (%s) is greater than the maximum of %s. Setting polling interval to maximum.",
cfg.PollingMetricsWaitDuration, maximumPollingMetricsWaitDuration)
cfg.PollingMetricsWaitDuration = maximumPollingMetricsWaitDuration
}
}
}
// checkMissingAndDeprecated checks all zero-valued fields for tags of the form
// missing:STRING and acts based on that string. Current options are: fatal,
// warn. Fatal will result in an error being returned, warn will result in a
// warning that the field is missing being logged.
func (cfg *Config) checkMissingAndDepreciated() error {
cfgElem := reflect.ValueOf(cfg).Elem()
cfgStructField := reflect.Indirect(reflect.ValueOf(cfg)).Type()
fatalFields := []string{}
for i := 0; i < cfgElem.NumField(); i++ {
cfgField := cfgElem.Field(i)
if utils.ZeroOrNil(cfgField.Interface()) {
missingTag := cfgStructField.Field(i).Tag.Get("missing")
if len(missingTag) == 0 {
continue
}
switch missingTag {
case "warn":
seelog.Warnf("Configuration key not set, key: %v", cfgStructField.Field(i).Name)
case "fatal":
seelog.Criticalf("Configuration key not set, key: %v", cfgStructField.Field(i).Name)
fatalFields = append(fatalFields, cfgStructField.Field(i).Name)
default:
seelog.Warnf("Unexpected `missing` tag value, tag %v", missingTag)
}
} else {
// present
deprecatedTag := cfgStructField.Field(i).Tag.Get("deprecated")
if len(deprecatedTag) == 0 {
continue
}
seelog.Warnf("Use of deprecated configuration key, key: %v message: %v", cfgStructField.Field(i).Name, deprecatedTag)
}
}
if len(fatalFields) > 0 {
return errors.New("Missing required fields: " + strings.Join(fatalFields, ", "))
}
return nil
}
// complete returns true if all fields of the config are populated / nonzero
func (cfg *Config) complete() bool {
cfgElem := reflect.ValueOf(cfg).Elem()
for i := 0; i < cfgElem.NumField(); i++ {
if utils.ZeroOrNil(cfgElem.Field(i).Interface()) {
return false
}
}
return true
}
func fileConfig() (Config, error) {
fileName := utils.DefaultIfBlank(os.Getenv("ECS_AGENT_CONFIG_FILE_PATH"), defaultConfigFileName)
cfg := Config{}
file, err := os.Open(fileName)
if err != nil {
return cfg, nil
}
data, err := ioutil.ReadAll(file)
if err != nil {
seelog.Errorf("Unable to read cfg file, err %v", err)
return cfg, err
}
if strings.TrimSpace(string(data)) == "" {
// empty file, not an error
return cfg, nil
}
err = json.Unmarshal(data, &cfg)
if err != nil {
seelog.Criticalf("Error reading cfg json data, err %v", err)
return cfg, err
}
// Handle any deprecated keys correctly here
if utils.ZeroOrNil(cfg.Cluster) && !utils.ZeroOrNil(cfg.ClusterArn) {
cfg.Cluster = cfg.ClusterArn
}
return cfg, nil
}
// userDataConfig reads configuration JSON from instance's userdata. It doesn't
// return any error as it's entirely optional to configure the ECS agent using
// this method.
// Example:
// {"ECSAgentConfiguration":{"Cluster":"default"}}
func userDataConfig(ec2Client ec2.EC2MetadataClient) Config {
type userDataParser struct {
Config Config `json:"ECSAgentConfiguration"`
}
parsedUserData := userDataParser{
Config: Config{},
}
userData, err := ec2Client.GetUserData()
if err != nil {
seelog.Warnf("Unable to fetch user data: %v", err)
// Unable to read userdata from instance metadata. Just
// return early
return parsedUserData.Config
}
// In the future, if we want to support base64 encoded config,
// we'd need to add logic to decode the string here.
err = json.Unmarshal([]byte(userData), &parsedUserData)
if err != nil {
seelog.Debugf("Non-json user data, skip merging into agent config: %v", err)
// Unable to parse userdata as a valid JSON. Return the
// empty config
return Config{}
}
return parsedUserData.Config
}
// environmentConfig reads the given configs from the environment and attempts
// to convert them to the given type
func environmentConfig() (Config, error) {
dataDir := os.Getenv("ECS_DATADIR")
steadyStateRate, burstRate := parseTaskMetadataThrottles()
var errs []error
instanceAttributes, errs := parseInstanceAttributes(errs)
containerInstanceTags, errs := parseContainerInstanceTags(errs)
additionalLocalRoutes, errs := parseAdditionalLocalRoutes(errs)
var err error
if len(errs) > 0 {
err = apierrors.NewMultiError(errs...)
}
return Config{
Cluster: os.Getenv("ECS_CLUSTER"),
APIEndpoint: os.Getenv("ECS_BACKEND_HOST"),
AWSRegion: os.Getenv("AWS_DEFAULT_REGION"),
DockerEndpoint: os.Getenv("DOCKER_HOST"),
ReservedPorts: parseReservedPorts("ECS_RESERVED_PORTS"),
ReservedPortsUDP: parseReservedPorts("ECS_RESERVED_PORTS_UDP"),
DataDir: dataDir,
Checkpoint: parseCheckpoint(dataDir),
EngineAuthType: os.Getenv("ECS_ENGINE_AUTH_TYPE"),
EngineAuthData: NewSensitiveRawMessage([]byte(os.Getenv("ECS_ENGINE_AUTH_DATA"))),
UpdatesEnabled: utils.ParseBool(os.Getenv("ECS_UPDATES_ENABLED"), false),
UpdateDownloadDir: os.Getenv("ECS_UPDATE_DOWNLOAD_DIR"),
DisableMetrics: utils.ParseBool(os.Getenv("ECS_DISABLE_METRICS"), false),
ReservedMemory: parseEnvVariableUint16("ECS_RESERVED_MEMORY"),
AvailableLoggingDrivers: parseAvailableLoggingDrivers(),
PrivilegedDisabled: utils.ParseBool(os.Getenv("ECS_DISABLE_PRIVILEGED"), false),
SELinuxCapable: utils.ParseBool(os.Getenv("ECS_SELINUX_CAPABLE"), false),
AppArmorCapable: utils.ParseBool(os.Getenv("ECS_APPARMOR_CAPABLE"), false),
TaskCleanupWaitDuration: parseEnvVariableDuration("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION"),
TaskENIEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_TASK_ENI"), false),
TaskIAMRoleEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_TASK_IAM_ROLE"), false),
DeleteNonECSImagesEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_UNTRACKED_IMAGE_CLEANUP"), false),
TaskCPUMemLimit: parseTaskCPUMemLimitEnabled(),
DockerStopTimeout: parseDockerStopTimeout(),
ContainerStartTimeout: parseContainerStartTimeout(),
ImagePullInactivityTimeout: parseImagePullInactivityTimeout(),
CredentialsAuditLogFile: os.Getenv("ECS_AUDIT_LOGFILE"),
CredentialsAuditLogDisabled: utils.ParseBool(os.Getenv("ECS_AUDIT_LOGFILE_DISABLED"), false),
TaskIAMRoleEnabledForNetworkHost: utils.ParseBool(os.Getenv("ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST"), false),
ImageCleanupDisabled: utils.ParseBool(os.Getenv("ECS_DISABLE_IMAGE_CLEANUP"), false),
MinimumImageDeletionAge: parseEnvVariableDuration("ECS_IMAGE_MINIMUM_CLEANUP_AGE"),
NonECSMinimumImageDeletionAge: parseEnvVariableDuration("NON_ECS_IMAGE_MINIMUM_CLEANUP_AGE"),
ImageCleanupInterval: parseEnvVariableDuration("ECS_IMAGE_CLEANUP_INTERVAL"),
NumImagesToDeletePerCycle: parseNumImagesToDeletePerCycle(),
NumNonECSContainersToDeletePerCycle: parseNumNonECSContainersToDeletePerCycle(),
ImagePullBehavior: parseImagePullBehavior(),
ImageCleanupExclusionList: parseImageCleanupExclusionList("ECS_EXCLUDE_UNTRACKED_IMAGE"),
InstanceAttributes: instanceAttributes,
CNIPluginsPath: os.Getenv("ECS_CNI_PLUGINS_PATH"),
AWSVPCBlockInstanceMetdata: utils.ParseBool(os.Getenv("ECS_AWSVPC_BLOCK_IMDS"), false),
AWSVPCAdditionalLocalRoutes: additionalLocalRoutes,
ContainerMetadataEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_CONTAINER_METADATA"), false),
DataDirOnHost: os.Getenv("ECS_HOST_DATA_DIR"),
OverrideAWSLogsExecutionRole: utils.ParseBool(os.Getenv("ECS_ENABLE_AWSLOGS_EXECUTIONROLE_OVERRIDE"), false),
CgroupPath: os.Getenv("ECS_CGROUP_PATH"),
TaskMetadataSteadyStateRate: steadyStateRate,
TaskMetadataBurstRate: burstRate,
SharedVolumeMatchFullConfig: utils.ParseBool(os.Getenv("ECS_SHARED_VOLUME_MATCH_FULL_CONFIG"), false),
ContainerInstanceTags: containerInstanceTags,
ContainerInstancePropagateTagsFrom: parseContainerInstancePropagateTagsFrom(),
PollMetrics: utils.ParseBool(os.Getenv("ECS_POLL_METRICS"), true),
PollingMetricsWaitDuration: parseEnvVariableDuration("ECS_POLLING_METRICS_WAIT_DURATION"),
DisableDockerHealthCheck: utils.ParseBool(os.Getenv("ECS_DISABLE_DOCKER_HEALTH_CHECK"), false),
GPUSupportEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_GPU_SUPPORT"), false),
InferentiaSupportEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_INF_SUPPORT"), false),
NvidiaRuntime: os.Getenv("ECS_NVIDIA_RUNTIME"),
TaskMetadataAZDisabled: utils.ParseBool(os.Getenv("ECS_DISABLE_TASK_METADATA_AZ"), false),
CgroupCPUPeriod: parseCgroupCPUPeriod(),
SpotInstanceDrainingEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_SPOT_INSTANCE_DRAINING"), false),
GMSACapable: parseGMSACapability(),
VolumePluginCapabilities: parseVolumePluginCapabilities(),
}, err
}
func ec2MetadataConfig(ec2client ec2.EC2MetadataClient) Config {
iid, err := ec2client.InstanceIdentityDocument()
if err != nil {
seelog.Criticalf("Unable to communicate with EC2 Metadata service to infer region: %v", err.Error())
return Config{}
}
return Config{AWSRegion: iid.Region}
}
// String returns a lossy string representation of the config suitable for human readable display.
// Consequently, it *should not* return any sensitive information.
func (cfg *Config) String() string {
return fmt.Sprintf(
"Cluster: %v, "+
" Region: %v, "+
" DataDir: %v,"+
" Checkpoint: %v, "+
"AuthType: %v, "+
"UpdatesEnabled: %v, "+
"DisableMetrics: %v, "+
"PollMetrics: %v, "+
"PollingMetricsWaitDuration: %v, "+
"ReservedMem: %v, "+
"TaskCleanupWaitDuration: %v, "+
"DockerStopTimeout: %v, "+
"ContainerStartTimeout: %v, "+
"TaskCPUMemLimit: %v, "+
"%s",
cfg.Cluster,
cfg.AWSRegion,
cfg.DataDir,
cfg.Checkpoint,
cfg.EngineAuthType,
cfg.UpdatesEnabled,
cfg.DisableMetrics,
cfg.PollMetrics,
cfg.PollingMetricsWaitDuration,
cfg.ReservedMemory,
cfg.TaskCleanupWaitDuration,
cfg.DockerStopTimeout,
cfg.ContainerStartTimeout,
cfg.TaskCPUMemLimit,
cfg.platformString(),
)
}
| 1 | 24,719 | im open to explicitly checking NotSet value here but json logic already handles it so kept it this way. | aws-amazon-ecs-agent | go |
@@ -27,6 +27,7 @@ extern "C" {
#include "ScriptingEnvironment.h"
#include "../typedefs.h"
#include "../Util/OpenMPWrapper.h"
+#include "../Util/Lua.h"
ScriptingEnvironment::ScriptingEnvironment() {}
ScriptingEnvironment::ScriptingEnvironment(const char * fileName) { | 1 | /*
open source routing machine
Copyright (C) Dennis Luxen, others 2010
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU AFFERO General Public License as published by
the Free Software Foundation; either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
or see http://www.gnu.org/licenses/agpl.txt.
*/
extern "C" {
#include <lua.h>
#include <lauxlib.h>
#include <lualib.h>
}
#include "ScriptingEnvironment.h"
#include "../typedefs.h"
#include "../Util/OpenMPWrapper.h"
ScriptingEnvironment::ScriptingEnvironment() {}
ScriptingEnvironment::ScriptingEnvironment(const char * fileName) {
INFO("Using script " << fileName);
// Create a new lua state
for(int i = 0; i < omp_get_max_threads(); ++i)
luaStateVector.push_back(luaL_newstate());
// Connect LuaBind to this lua state for all threads
#pragma omp parallel
{
lua_State * myLuaState = getLuaStateForThreadID(omp_get_thread_num());
luabind::open(myLuaState);
//open utility libraries string library;
luaL_openlibs(myLuaState);
// Add our function to the state's global scope
luabind::module(myLuaState) [
luabind::def("print", LUA_print<std::string>),
luabind::def("parseMaxspeed", parseMaxspeed),
luabind::def("durationIsValid", durationIsValid),
luabind::def("parseDuration", parseDuration)
];
//#pragma omp critical
// {
// if(0 != luaL_dostring(
// myLuaState,
// "print('Initializing LUA engine')\n"
// )) {
// ERR(lua_tostring(myLuaState,-1)<< " occured in scripting block");
// }
// }
luabind::module(myLuaState) [
luabind::class_<HashTable<std::string, std::string> >("keyVals")
.def("Add", &HashTable<std::string, std::string>::Add)
.def("Find", &HashTable<std::string, std::string>::Find)
.def("Holds", &HashTable<std::string, std::string>::Holds)
];
luabind::module(myLuaState) [
luabind::class_<ImportNode>("Node")
.def(luabind::constructor<>())
.def_readwrite("lat", &ImportNode::lat)
.def_readwrite("lon", &ImportNode::lon)
.def_readwrite("id", &ImportNode::id)
.def_readwrite("bollard", &ImportNode::bollard)
.def_readwrite("traffic_light", &ImportNode::trafficLight)
.def_readwrite("tags", &ImportNode::keyVals)
];
luabind::module(myLuaState) [
luabind::class_<_Way>("Way")
.def(luabind::constructor<>())
.def_readwrite("name", &_Way::name)
.def_readwrite("speed", &_Way::speed)
.def_readwrite("type", &_Way::type)
.def_readwrite("access", &_Way::access)
.def_readwrite("roundabout", &_Way::roundabout)
.def_readwrite("is_duration_set", &_Way::isDurationSet)
.def_readwrite("is_access_restricted", &_Way::isAccessRestricted)
.def_readwrite("ignore_in_grid", &_Way::ignoreInGrid)
.def_readwrite("tags", &_Way::keyVals)
.def_readwrite("direction", &_Way::direction)
.enum_("constants")
[
luabind::value("notSure", 0),
luabind::value("oneway", 1),
luabind::value("bidirectional", 2),
luabind::value("opposite", 3)
]
];
// Now call our function in a lua script
//#pragma omp critical
// {
// INFO("Parsing speedprofile from " << fileName );
// }
if(0 != luaL_dofile(myLuaState, fileName) ) {
ERR(lua_tostring(myLuaState,-1)<< " occured in scripting block");
}
}
}
ScriptingEnvironment::~ScriptingEnvironment() {
for(unsigned i = 0; i < luaStateVector.size(); ++i) {
// luaStateVector[i];
}
}
lua_State * ScriptingEnvironment::getLuaStateForThreadID(const int id) {
return luaStateVector[id];
}
| 1 | 12,289 | The naming of the include file appears to be unlucky. | Project-OSRM-osrm-backend | cpp |
@@ -1018,7 +1018,8 @@ class BarPlot(BarsMixin, ColorbarPlot, LegendPlot):
if self.show_legend and any(len(l) for l in labels) and (sdim or not self.multi_level):
leg_spec = self.legend_specs[self.legend_position]
if self.legend_cols: leg_spec['ncol'] = self.legend_cols
- axis.legend(title=title, **leg_spec)
+ self.legend_opts.update(**leg_spec)
+ axis.legend(title=title, **self.legend_opts)
return bars, xticks, ax_dims
| 1 | from __future__ import absolute_import, division, unicode_literals
import param
import numpy as np
import matplotlib as mpl
from matplotlib import cm
from matplotlib.collections import LineCollection
from matplotlib.dates import DateFormatter, date2num
from ...core.dimension import Dimension, dimension_name
from ...core.options import Store, abbreviated_exception
from ...core.util import (
match_spec, basestring, isfinite, dt_to_int, dt64_to_dt, search_indices,
unique_array, isscalar, isdatetime
)
from ...element import Raster, HeatMap
from ...operation import interpolate_curve
from ...util.transform import dim
from ..plot import PlotSelector
from ..mixins import AreaMixin, BarsMixin, SpikesMixin
from ..util import compute_sizes, get_sideplot_ranges, get_min_distance
from .element import ElementPlot, ColorbarPlot, LegendPlot
from .path import PathPlot
from .plot import AdjoinedPlot, mpl_rc_context
from .util import mpl_version
class ChartPlot(ElementPlot):
"""
Baseclass to plot Chart elements.
"""
class CurvePlot(ChartPlot):
"""
CurvePlot can plot Curve and ViewMaps of Curve, which can be
displayed as a single frame or animation. Axes, titles and legends
are automatically generated from dim_info.
If the dimension is set to cyclic in the dim_info it will rotate
the curve so that minimum y values are at the minimum x value to
make the plots easier to interpret.
"""
autotick = param.Boolean(default=False, doc="""
Whether to let matplotlib automatically compute tick marks
or to allow the user to control tick marks.""")
interpolation = param.ObjectSelector(objects=['linear', 'steps-mid',
'steps-pre', 'steps-post'],
default='linear', doc="""
Defines how the samples of the Curve are interpolated,
default is 'linear', other options include 'steps-mid',
'steps-pre' and 'steps-post'.""")
relative_labels = param.Boolean(default=False, doc="""
If plotted quantity is cyclic and center_cyclic is enabled,
will compute tick labels relative to the center.""")
padding = param.ClassSelector(default=(0, 0.1), class_=(int, float, tuple))
show_grid = param.Boolean(default=False, doc="""
Enable axis grid.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
style_opts = ['alpha', 'color', 'visible', 'linewidth', 'linestyle', 'marker', 'ms']
_nonvectorized_styles = style_opts
_plot_methods = dict(single='plot')
def get_data(self, element, ranges, style):
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
if 'steps' in self.interpolation:
element = interpolate_curve(element, interpolation=self.interpolation)
xs = element.dimension_values(0)
ys = element.dimension_values(1)
dims = element.dimensions()
if isdatetime(xs):
dimtype = element.get_dimension_type(0)
dt_format = Dimension.type_formatters.get(dimtype, '%Y-%m-%d %H:%M:%S')
dims[0] = dims[0].clone(value_format=DateFormatter(dt_format))
coords = (ys, xs) if self.invert_axes else (xs, ys)
return coords, style, {'dimensions': dims}
def init_artists(self, ax, plot_args, plot_kwargs):
xs, ys = plot_args
if isdatetime(xs):
artist = ax.plot_date(xs, ys, '-', **plot_kwargs)[0]
else:
artist = ax.plot(xs, ys, **plot_kwargs)[0]
return {'artist': artist}
def update_handles(self, key, axis, element, ranges, style):
artist = self.handles['artist']
(xs, ys), style, axis_kwargs = self.get_data(element, ranges, style)
artist.set_xdata(xs)
artist.set_ydata(ys)
return axis_kwargs
class ErrorPlot(ColorbarPlot):
"""
ErrorPlot plots the ErrorBar Element type and supporting
both horizontal and vertical error bars via the 'horizontal'
plot option.
"""
style_opts = ['edgecolor', 'elinewidth', 'capsize', 'capthick',
'barsabove', 'lolims', 'uplims', 'xlolims',
'errorevery', 'xuplims', 'alpha', 'linestyle',
'linewidth', 'markeredgecolor', 'markeredgewidth',
'markerfacecolor', 'markersize', 'solid_capstyle',
'solid_joinstyle', 'dashes', 'color']
_plot_methods = dict(single='errorbar')
def init_artists(self, ax, plot_data, plot_kwargs):
handles = ax.errorbar(*plot_data, **plot_kwargs)
bottoms, tops = None, None
if mpl_version >= str('2.0'):
_, caps, verts = handles
if caps:
bottoms, tops = caps
else:
_, (bottoms, tops), verts = handles
return {'bottoms': bottoms, 'tops': tops, 'verts': verts[0], 'artist': verts[0]}
def get_data(self, element, ranges, style):
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
color = style.get('color')
if isinstance(color, np.ndarray):
style['ecolor'] = color
if 'edgecolor' in style:
style['ecolor'] = style.pop('edgecolor')
c = style.get('c')
if isinstance(c, np.ndarray):
with abbreviated_exception():
raise ValueError('Mapping a continuous or categorical '
'dimension to a color on a ErrorBarPlot '
'is not supported by the {backend} backend. '
'To map a dimension to a color supply '
'an explicit list of rgba colors.'.format(
backend=self.renderer.backend
)
)
style['fmt'] = 'none'
dims = element.dimensions()
xs, ys = (element.dimension_values(i) for i in range(2))
err = element.array(dimensions=dims[2:4])
err_key = 'xerr' if element.horizontal ^ self.invert_axes else 'yerr'
coords = (ys, xs) if self.invert_axes else (xs, ys)
style[err_key] = err.T if len(dims) > 3 else err[:, 0]
return coords, style, {}
def update_handles(self, key, axis, element, ranges, style):
bottoms = self.handles['bottoms']
tops = self.handles['tops']
verts = self.handles['verts']
_, style, axis_kwargs = self.get_data(element, ranges, style)
xs, ys, neg_error = (element.dimension_values(i) for i in range(3))
pos_idx = 3 if len(element.dimensions()) > 3 else 2
pos_error = element.dimension_values(pos_idx)
if element.horizontal:
bxs, bys = xs - neg_error, ys
txs, tys = xs + pos_error, ys
else:
bxs, bys = xs, ys - neg_error
txs, tys = xs, ys + pos_error
if self.invert_axes:
bxs, bys = bys, bxs
txs, tys = tys, txs
new_arrays = np.moveaxis(np.array([[bxs, bys], [txs, tys]]), 2, 0)
verts.set_paths(new_arrays)
if bottoms:
bottoms.set_xdata(bxs)
bottoms.set_ydata(bys)
if tops:
tops.set_xdata(txs)
tops.set_ydata(tys)
if 'ecolor' in style:
verts.set_edgecolors(style['ecolor'])
if 'linewidth' in style:
verts.set_linewidths(style['linewidth'])
return axis_kwargs
class AreaPlot(AreaMixin, ChartPlot):
padding = param.ClassSelector(default=(0, 0.1), class_=(int, float, tuple))
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
style_opts = ['color', 'facecolor', 'alpha', 'edgecolor', 'linewidth',
'hatch', 'linestyle', 'joinstyle',
'fill', 'capstyle', 'interpolate']
_nonvectorized_styles = style_opts
_plot_methods = dict(single='fill_between')
def get_data(self, element, ranges, style):
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
xs = element.dimension_values(0)
ys = [element.dimension_values(vdim) for vdim in element.vdims]
return tuple([xs]+ys), style, {}
def init_artists(self, ax, plot_data, plot_kwargs):
fill_fn = ax.fill_betweenx if self.invert_axes else ax.fill_between
stack = fill_fn(*plot_data, **plot_kwargs)
return {'artist': stack}
class SideAreaPlot(AdjoinedPlot, AreaPlot):
bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
Make plot background invisible.""")
border_size = param.Number(default=0, doc="""
The size of the border expressed as a fraction of the main plot.""")
xaxis = param.ObjectSelector(default='bare',
objects=['top', 'bottom', 'bare', 'top-bare',
'bottom-bare', None], doc="""
Whether and where to display the xaxis, bare options allow suppressing
all axis labels including ticks and xlabel. Valid options are 'top',
'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
yaxis = param.ObjectSelector(default='bare',
objects=['left', 'right', 'bare', 'left-bare',
'right-bare', None], doc="""
Whether and where to display the yaxis, bare options allow suppressing
all axis labels including ticks and ylabel. Valid options are 'left',
'right', 'bare' 'left-bare' and 'right-bare'.""")
class SpreadPlot(AreaPlot):
"""
SpreadPlot plots the Spread Element type.
"""
padding = param.ClassSelector(default=(0, 0.1), class_=(int, float, tuple))
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
def __init__(self, element, **params):
super(SpreadPlot, self).__init__(element, **params)
def get_data(self, element, ranges, style):
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
xs = element.dimension_values(0)
mean = element.dimension_values(1)
neg_error = element.dimension_values(2)
pos_idx = 3 if len(element.dimensions()) > 3 else 2
pos_error = element.dimension_values(pos_idx)
return (xs, mean-neg_error, mean+pos_error), style, {}
def get_extents(self, element, ranges, range_type='combined'):
return ChartPlot.get_extents(self, element, ranges, range_type)
class HistogramPlot(ColorbarPlot):
"""
HistogramPlot can plot DataHistograms and ViewMaps of
DataHistograms, which can be displayed as a single frame or
animation.
"""
style_opts = ['alpha', 'color', 'align', 'visible', 'facecolor',
'edgecolor', 'log', 'capsize', 'error_kw', 'hatch',
'linewidth']
_nonvectorized_styles = ['alpha', 'log', 'error_kw', 'hatch', 'visible', 'align']
def __init__(self, histograms, **params):
self.center = False
self.cyclic = False
super(HistogramPlot, self).__init__(histograms, **params)
if self.invert_axes:
self.axis_settings = ['ylabel', 'xlabel', 'yticks']
else:
self.axis_settings = ['xlabel', 'ylabel', 'xticks']
val_dim = self.hmap.last.get_dimension(1)
self.cyclic_range = val_dim.range if val_dim.cyclic else None
@mpl_rc_context
def initialize_plot(self, ranges=None):
hist = self.hmap.last
key = self.keys[-1]
ranges = self.compute_ranges(self.hmap, key, ranges)
el_ranges = match_spec(hist, ranges)
# Get plot ranges and values
dims = hist.dimensions()[:2]
edges, hvals, widths, lims, is_datetime = self._process_hist(hist)
if is_datetime and not dims[0].value_format:
dt_format = Dimension.type_formatters[np.datetime64]
dims[0] = dims[0].clone(value_format=DateFormatter(dt_format))
style = self.style[self.cyclic_index]
if self.invert_axes:
self.offset_linefn = self.handles['axis'].axvline
self.plotfn = self.handles['axis'].barh
else:
self.offset_linefn = self.handles['axis'].axhline
self.plotfn = self.handles['axis'].bar
with abbreviated_exception():
style = self._apply_transforms(hist, ranges, style)
if 'vmin' in style:
raise ValueError('Mapping a continuous dimension to a '
'color on a HistogramPlot is not '
'supported by the {backend} backend. '
'To map a dimension to a color supply '
'an explicit list of rgba colors.'.format(
backend=self.renderer.backend
)
)
# Plot bars and make any adjustments
legend = hist.label if self.show_legend else ''
bars = self.plotfn(edges, hvals, widths, zorder=self.zorder, label=legend, align='edge', **style)
self.handles['artist'] = self._update_plot(self.keys[-1], hist, bars, lims, ranges) # Indexing top
ticks = self._compute_ticks(hist, edges, widths, lims)
ax_settings = self._process_axsettings(hist, lims, ticks)
ax_settings['dimensions'] = dims
return self._finalize_axis(self.keys[-1], ranges=el_ranges, element=hist, **ax_settings)
def _process_hist(self, hist):
"""
Get data from histogram, including bin_ranges and values.
"""
self.cyclic = hist.get_dimension(0).cyclic
x = hist.kdims[0]
edges = hist.interface.coords(hist, x, edges=True)
values = hist.dimension_values(1)
hist_vals = np.array(values)
xlim = hist.range(0)
ylim = hist.range(1)
is_datetime = isdatetime(edges)
if is_datetime:
edges = np.array([dt64_to_dt(e) if isinstance(e, np.datetime64) else e for e in edges])
edges = date2num(edges)
xlim = tuple(dt_to_int(v, 'D') for v in xlim)
widths = np.diff(edges)
return edges[:-1], hist_vals, widths, xlim+ylim, is_datetime
def _compute_ticks(self, element, edges, widths, lims):
"""
Compute the ticks either as cyclic values in degrees or as roughly
evenly spaced bin centers.
"""
if self.xticks is None or not isinstance(self.xticks, int):
return None
if self.cyclic:
x0, x1, _, _ = lims
xvals = np.linspace(x0, x1, self.xticks)
labels = ["%.0f" % np.rad2deg(x) + '\N{DEGREE SIGN}' for x in xvals]
elif self.xticks:
dim = element.get_dimension(0)
inds = np.linspace(0, len(edges), self.xticks, dtype=np.int)
edges = list(edges) + [edges[-1] + widths[-1]]
xvals = [edges[i] for i in inds]
labels = [dim.pprint_value(v) for v in xvals]
return [xvals, labels]
def get_extents(self, element, ranges, range_type='combined'):
ydim = element.get_dimension(1)
s0, s1 = ranges[ydim.name]['soft']
s0 = min(s0, 0) if isfinite(s0) else 0
s1 = max(s1, 0) if isfinite(s1) else 0
ranges[ydim.name]['soft'] = (s0, s1)
return super(HistogramPlot, self).get_extents(element, ranges, range_type)
def _process_axsettings(self, hist, lims, ticks):
"""
Get axis settings options including ticks, x- and y-labels
and limits.
"""
axis_settings = dict(zip(self.axis_settings, [None, None, (None if self.overlaid else ticks)]))
return axis_settings
def _update_plot(self, key, hist, bars, lims, ranges):
"""
Process bars can be subclassed to manually adjust bars
after being plotted.
"""
return bars
def _update_artists(self, key, hist, edges, hvals, widths, lims, ranges):
"""
Update all the artists in the histogram. Subclassable to
allow updating of further artists.
"""
plot_vals = zip(self.handles['artist'], edges, hvals, widths)
for bar, edge, height, width in plot_vals:
if self.invert_axes:
bar.set_y(edge)
bar.set_width(height)
bar.set_height(width)
else:
bar.set_x(edge)
bar.set_height(height)
bar.set_width(width)
def update_handles(self, key, axis, element, ranges, style):
# Process values, axes and style
edges, hvals, widths, lims, _ = self._process_hist(element)
ticks = self._compute_ticks(element, edges, widths, lims)
ax_settings = self._process_axsettings(element, lims, ticks)
self._update_artists(key, element, edges, hvals, widths, lims, ranges)
return ax_settings
class SideHistogramPlot(AdjoinedPlot, HistogramPlot):
bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
Make plot background invisible.""")
offset = param.Number(default=0.2, bounds=(0,1), doc="""
Histogram value offset for a colorbar.""")
show_grid = param.Boolean(default=False, doc="""
Whether to overlay a grid on the axis.""")
def _process_hist(self, hist):
"""
Subclassed to offset histogram by defined amount.
"""
edges, hvals, widths, lims, isdatetime = super(SideHistogramPlot, self)._process_hist(hist)
offset = self.offset * lims[3]
hvals *= 1-self.offset
hvals += offset
lims = lims[0:3] + (lims[3] + offset,)
return edges, hvals, widths, lims, isdatetime
def _update_artists(self, n, element, edges, hvals, widths, lims, ranges):
super(SideHistogramPlot, self)._update_artists(n, element, edges, hvals, widths, lims, ranges)
self._update_plot(n, element, self.handles['artist'], lims, ranges)
def _update_plot(self, key, element, bars, lims, ranges):
"""
Process the bars and draw the offset line as necessary. If a
color map is set in the style of the 'main' ViewableElement object, color
the bars appropriately, respecting the required normalization
settings.
"""
main = self.adjoined.main
_, y1 = element.range(1)
offset = self.offset * y1
range_item, main_range, dim = get_sideplot_ranges(self, element, main, ranges)
# Check if plot is colormapped
plot_type = Store.registry['matplotlib'].get(type(range_item))
if isinstance(plot_type, PlotSelector):
plot_type = plot_type.get_plot_class(range_item)
opts = self.lookup_options(range_item, 'plot')
if plot_type and issubclass(plot_type, ColorbarPlot):
cidx = opts.options.get('color_index', None)
if cidx is None:
opts = self.lookup_options(range_item, 'style')
cidx = opts.kwargs.get('color', None)
if cidx not in range_item:
cidx = None
cdim = None if cidx is None else range_item.get_dimension(cidx)
else:
cdim = None
# Get colormapping options
if isinstance(range_item, (HeatMap, Raster)) or cdim:
style = self.lookup_options(range_item, 'style')[self.cyclic_index]
cmap = cm.get_cmap(style.get('cmap'))
main_range = style.get('clims', main_range)
else:
cmap = None
if offset and ('offset_line' not in self.handles):
self.handles['offset_line'] = self.offset_linefn(offset,
linewidth=1.0,
color='k')
elif offset:
self._update_separator(offset)
if cmap is not None:
self._colorize_bars(cmap, bars, element, main_range, dim)
return bars
def _colorize_bars(self, cmap, bars, element, main_range, dim):
"""
Use the given cmap to color the bars, applying the correct
color ranges as necessary.
"""
cmap_range = main_range[1] - main_range[0]
lower_bound = main_range[0]
colors = np.array(element.dimension_values(dim))
colors = (colors - lower_bound) / (cmap_range)
for c, bar in zip(colors, bars):
bar.set_facecolor(cmap(c))
bar.set_clip_on(False)
def _update_separator(self, offset):
"""
Compute colorbar offset and update separator line
if map is non-zero.
"""
offset_line = self.handles['offset_line']
if offset == 0:
offset_line.set_visible(False)
else:
offset_line.set_visible(True)
if self.invert_axes:
offset_line.set_xdata(offset)
else:
offset_line.set_ydata(offset)
class PointPlot(ChartPlot, ColorbarPlot):
"""
Note that the 'cmap', 'vmin' and 'vmax' style arguments control
how point magnitudes are rendered to different colors.
"""
show_grid = param.Boolean(default=False, doc="""
Whether to draw grid lines at the tick positions.""")
# Deprecated parameters
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Deprecated in favor of color style mapping, e.g. `color=dim('color')`""")
size_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Deprecated in favor of size style mapping, e.g. `size=dim('size')`""")
scaling_method = param.ObjectSelector(default="area",
objects=["width", "area"],
doc="""
Deprecated in favor of size style mapping, e.g.
size=dim('size')**2.""")
scaling_factor = param.Number(default=1, bounds=(0, None), doc="""
Scaling factor which is applied to either the width or area
of each point, depending on the value of `scaling_method`.""")
size_fn = param.Callable(default=np.abs, doc="""
Function applied to size values before applying scaling,
to remove values lower than zero.""")
style_opts = ['alpha', 'color', 'edgecolors', 'facecolors',
'linewidth', 'marker', 'size', 'visible',
'cmap', 'vmin', 'vmax', 'norm']
_nonvectorized_styles = ['alpha', 'marker', 'cmap', 'vmin', 'vmax',
'norm', 'visible']
_disabled_opts = ['size']
_plot_methods = dict(single='scatter')
def get_data(self, element, ranges, style):
xs, ys = (element.dimension_values(i) for i in range(2))
self._compute_styles(element, ranges, style)
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
return (ys, xs) if self.invert_axes else (xs, ys), style, {}
def _compute_styles(self, element, ranges, style):
cdim = element.get_dimension(self.color_index)
color = style.pop('color', None)
cmap = style.get('cmap', None)
if cdim and ((isinstance(color, basestring) and color in element) or isinstance(color, dim)):
self.param.warning(
"Cannot declare style mapping for 'color' option and "
"declare a color_index; ignoring the color_index.")
cdim = None
if cdim and cmap:
cs = element.dimension_values(self.color_index)
# Check if numeric otherwise treat as categorical
if cs.dtype.kind in 'uif':
style['c'] = cs
else:
style['c'] = search_indices(cs, unique_array(cs))
self._norm_kwargs(element, ranges, style, cdim)
elif color is not None:
style['color'] = color
style['edgecolors'] = style.pop('edgecolors', style.pop('edgecolor', 'none'))
ms = style.get('s', mpl.rcParams['lines.markersize'])
sdim = element.get_dimension(self.size_index)
if sdim and ((isinstance(ms, basestring) and ms in element) or isinstance(ms, dim)):
self.param.warning(
"Cannot declare style mapping for 's' option and "
"declare a size_index; ignoring the size_index.")
sdim = None
if sdim:
sizes = element.dimension_values(self.size_index)
sizes = compute_sizes(sizes, self.size_fn, self.scaling_factor,
self.scaling_method, ms)
if sizes is None:
eltype = type(element).__name__
self.param.warning(
'%s dimension is not numeric, cannot use to '
'scale %s size.' % (sdim.pprint_label, eltype))
else:
style['s'] = sizes
style['edgecolors'] = style.pop('edgecolors', 'none')
def update_handles(self, key, axis, element, ranges, style):
paths = self.handles['artist']
(xs, ys), style, _ = self.get_data(element, ranges, style)
xdim, ydim = element.dimensions()[:2]
if 'factors' in ranges.get(xdim.name, {}):
factors = list(ranges[xdim.name]['factors'])
xs = [factors.index(x) for x in xs if x in factors]
if 'factors' in ranges.get(ydim.name, {}):
factors = list(ranges[ydim.name]['factors'])
ys = [factors.index(y) for y in ys if y in factors]
paths.set_offsets(np.column_stack([xs, ys]))
if 's' in style:
sizes = style['s']
if isscalar(sizes):
sizes = [sizes]
paths.set_sizes(sizes)
if 'vmin' in style:
paths.set_clim((style['vmin'], style['vmax']))
if 'c' in style:
paths.set_array(style['c'])
if 'norm' in style:
paths.norm = style['norm']
if 'linewidth' in style:
paths.set_linewidths(style['linewidth'])
if 'edgecolors' in style:
paths.set_edgecolors(style['edgecolors'])
if 'facecolors' in style:
paths.set_edgecolors(style['facecolors'])
class VectorFieldPlot(ColorbarPlot):
"""
Renders vector fields in sheet coordinates. The vectors are
expressed in polar coordinates and may be displayed according to
angle alone (with some common, arbitrary arrow length) or may be
true polar vectors.
The color or magnitude can be mapped onto any dimension using the
color_index and size_index.
The length of the arrows is controlled by the 'scale' style
option. The scaling of the arrows may also be controlled via the
normalize_lengths and rescale_lengths plot option, which will
normalize the lengths to a maximum of 1 and scale them according
to the minimum distance respectively.
"""
arrow_heads = param.Boolean(default=True, doc="""
Whether or not to draw arrow heads. If arrowheads are enabled,
they may be customized with the 'headlength' and
'headaxislength' style options.""")
magnitude = param.ClassSelector(class_=(basestring, dim), doc="""
Dimension or dimension value transform that declares the magnitude
of each vector. Magnitude is expected to be scaled between 0-1,
by default the magnitudes are rescaled relative to the minimum
distance between vectors, this can be disabled with the
rescale_lengths option.""")
padding = param.ClassSelector(default=0.05, class_=(int, float, tuple))
rescale_lengths = param.Boolean(default=True, doc="""
Whether the lengths will be rescaled to take into account the
smallest non-zero distance between two vectors.""")
# Deprecated parameters
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Deprecated in favor of dimension value transform on color option,
e.g. `color=dim('Magnitude')`.
""")
size_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Deprecated in favor of the magnitude option, e.g.
`magnitude=dim('Magnitude')`.
""")
normalize_lengths = param.Boolean(default=True, doc="""
Deprecated in favor of rescaling length using dimension value
transforms using the magnitude option, e.g.
`dim('Magnitude').norm()`.""")
style_opts = ['alpha', 'color', 'edgecolors', 'facecolors',
'linewidth', 'marker', 'visible', 'cmap',
'scale', 'headlength', 'headaxislength', 'pivot',
'width', 'headwidth', 'norm']
_nonvectorized_styles = ['alpha', 'marker', 'cmap', 'visible', 'norm',
'pivot', 'headlength', 'headaxislength',
'headwidth']
_plot_methods = dict(single='quiver')
def _get_magnitudes(self, element, style, ranges):
size_dim = element.get_dimension(self.size_index)
mag_dim = self.magnitude
if size_dim and mag_dim:
self.param.warning(
"Cannot declare style mapping for 'magnitude' option "
"and declare a size_index; ignoring the size_index.")
elif size_dim:
mag_dim = size_dim
elif isinstance(mag_dim, basestring):
mag_dim = element.get_dimension(mag_dim)
if mag_dim is not None:
if isinstance(mag_dim, dim):
magnitudes = mag_dim.apply(element, flat=True)
else:
magnitudes = element.dimension_values(mag_dim)
_, max_magnitude = ranges[dimension_name(mag_dim)]['combined']
if self.normalize_lengths and max_magnitude != 0:
magnitudes = magnitudes / max_magnitude
else:
magnitudes = np.ones(len(element))
return magnitudes
def get_data(self, element, ranges, style):
# Compute coordinates
xidx, yidx = (1, 0) if self.invert_axes else (0, 1)
xs = element.dimension_values(xidx) if len(element.data) else []
ys = element.dimension_values(yidx) if len(element.data) else []
# Compute vector angle and magnitude
radians = element.dimension_values(2) if len(element.data) else []
if self.invert_axes: radians = radians+1.5*np.pi
angles = list(np.rad2deg(radians))
magnitudes = self._get_magnitudes(element, style, ranges)
input_scale = style.pop('scale', 1.0)
if self.rescale_lengths:
min_dist = get_min_distance(element)
input_scale = input_scale / min_dist
args = (xs, ys, magnitudes, [0.0] * len(element))
# Compute color
cdim = element.get_dimension(self.color_index)
color = style.get('color', None)
if cdim and ((isinstance(color, basestring) and color in element) or isinstance(color, dim)):
self.param.warning(
"Cannot declare style mapping for 'color' option and "
"declare a color_index; ignoring the color_index.")
cdim = None
if cdim:
colors = element.dimension_values(self.color_index)
style['c'] = colors
cdim = element.get_dimension(self.color_index)
self._norm_kwargs(element, ranges, style, cdim)
style.pop('color', None)
# Process style
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
style.update(dict(scale=input_scale, angles=angles, units='x', scale_units='x'))
if 'vmin' in style:
style['clim'] = (style.pop('vmin'), style.pop('vmax'))
if 'c' in style:
style['array'] = style.pop('c')
if 'pivot' not in style:
style['pivot'] = 'mid'
if not self.arrow_heads:
style['headaxislength'] = 0
return args, style, {}
def update_handles(self, key, axis, element, ranges, style):
args, style, axis_kwargs = self.get_data(element, ranges, style)
# Set magnitudes, angles and colors if supplied.
quiver = self.handles['artist']
quiver.set_offsets(np.column_stack(args[:2]))
quiver.U = args[2]
quiver.angles = style['angles']
if 'color' in style:
quiver.set_facecolors(style['color'])
quiver.set_edgecolors(style['color'])
if 'array' in style:
quiver.set_array(style['array'])
if 'clim' in style:
quiver.set_clim(style['clim'])
if 'linewidth' in style:
quiver.set_linewidths(style['linewidth'])
return axis_kwargs
class BarPlot(BarsMixin, ColorbarPlot, LegendPlot):
bar_padding = param.Number(default=0.2, doc="""
Defines the padding between groups.""")
multi_level = param.Boolean(default=True, doc="""
Whether the Bars should be grouped into a second categorical axis level.""")
stacked = param.Boolean(default=False, doc="""
Whether the bars should be stacked or grouped.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
style_opts = ['alpha', 'color', 'align', 'visible', 'edgecolor',
'log', 'facecolor', 'capsize', 'error_kw', 'hatch']
_nonvectorized_styles = ['visible']
legend_specs = dict(LegendPlot.legend_specs, **{
'top': dict(bbox_to_anchor=(0., 1.02, 1., .102),
ncol=3, loc=3, mode="expand", borderaxespad=0.),
'bottom': dict(ncol=3, mode="expand", loc=2,
bbox_to_anchor=(0., -0.4, 1., .102),
borderaxespad=0.1)})
def _get_values(self, element, ranges):
"""
Get unique index value for each bar
"""
gvals, cvals = self._get_coords(element, ranges, as_string=False)
kdims = element.kdims
if element.ndims == 1:
dimensions = kdims + [None, None]
values = {'group': gvals, 'stack': [None]}
elif self.stacked:
stack_dim = kdims[1]
dimensions = [kdims[0], None, stack_dim]
if stack_dim.values:
stack_order = stack_dim.values
elif stack_dim in ranges and ranges[stack_dim.name].get('factors'):
stack_order = ranges[stack_dim]['factors']
else:
stack_order = element.dimension_values(1, False)
stack_order = list(stack_order)
values = {'group': gvals, 'stack': stack_order}
else:
dimensions = kdims + [None]
values = {'group': gvals, 'category': cvals}
return dimensions, values
@mpl_rc_context
def initialize_plot(self, ranges=None):
element = self.hmap.last
vdim = element.vdims[0]
axis = self.handles['axis']
key = self.keys[-1]
style = dict(zorder=self.zorder, **self.style[self.cyclic_index])
ranges = self.compute_ranges(self.hmap, key, ranges)
ranges = match_spec(element, ranges)
self.handles['artist'], xticks, xdims = self._create_bars(axis, element, ranges, style)
kwargs = {'yticks': xticks} if self.invert_axes else {'xticks': xticks}
return self._finalize_axis(key, ranges=ranges, element=element,
dimensions=[xdims, vdim], **kwargs)
def _finalize_ticks(self, axis, element, xticks, yticks, zticks):
"""
Apply ticks with appropriate offsets.
"""
alignments = None
ticks = xticks or yticks
if ticks is not None:
ticks, labels, alignments = zip(*sorted(ticks, key=lambda x: x[0]))
ticks = (list(ticks), list(labels))
if xticks:
xticks = ticks
elif yticks:
yticks = ticks
super(BarPlot, self)._finalize_ticks(axis, element, xticks, yticks, zticks)
if alignments:
if xticks:
for t, y in zip(axis.get_xticklabels(), alignments):
t.set_y(y)
elif yticks:
for t, x in zip(axis.get_yticklabels(), alignments):
t.set_x(x)
def _create_bars(self, axis, element, ranges, style):
# Get values dimensions, and style information
(gdim, cdim, sdim), values = self._get_values(element, ranges)
style_dim = None
if sdim:
cats = values['stack']
style_dim = sdim
elif cdim:
cats = values['category']
style_dim = cdim
if style_dim:
style_map = {style_dim.pprint_value(v): self.style[i]
for i, v in enumerate(cats)}
else:
style_map = {None: {}}
# Compute widths
width = (1-(2.*self.bar_padding)) / len(values.get('category', [None]))
if self.invert_axes:
plot_fn = 'barh'
x, y, w, bottom = 'y', 'width', 'height', 'left'
else:
plot_fn = 'bar'
x, y, w, bottom = 'x', 'height', 'width', 'bottom'
# Iterate over group, category and stack dimension values
# computing xticks and drawing bars and applying styles
xticks, labels, bar_data = [], [], {}
for gidx, grp in enumerate(values.get('group', [None])):
sel_key = {}
label = None
if grp is not None:
grp_label = gdim.pprint_value(grp)
sel_key[gdim.name] = [grp]
yalign = -0.04 if cdim and self.multi_level else 0
xticks.append((gidx+0.5, grp_label, yalign))
for cidx, cat in enumerate(values.get('category', [None])):
xpos = gidx+self.bar_padding+(cidx*width)
if cat is not None:
label = cdim.pprint_value(cat)
sel_key[cdim.name] = [cat]
if self.multi_level:
xticks.append((xpos+width/2., label, 0))
prev = 0
for stk in values.get('stack', [None]):
if stk is not None:
label = sdim.pprint_value(stk)
sel_key[sdim.name] = [stk]
el = element.select(**sel_key)
vals = el.dimension_values(element.vdims[0].name)
val = float(vals[0]) if len(vals) else np.NaN
xval = xpos+width/2.
if label in bar_data:
group = bar_data[label]
group[x].append(xval)
group[y].append(val)
group[bottom].append(prev)
else:
bar_style = dict(style, **style_map.get(label, {}))
with abbreviated_exception():
bar_style = self._apply_transforms(el, ranges, bar_style)
bar_data[label] = {
x:[xval], y: [val], w: width, bottom: [prev],
'label': label,
}
bar_data[label].update(bar_style)
prev += val if isfinite(val) else 0
if label is not None:
labels.append(label)
# Draw bars
bars = [getattr(axis, plot_fn)(**bar_spec) for bar_spec in bar_data.values()]
# Generate legend and axis labels
ax_dims = [gdim]
title = ''
if sdim:
title = sdim.pprint_label
ax_dims.append(sdim)
elif cdim:
title = cdim.pprint_label
if self.multi_level:
ax_dims.append(cdim)
if self.show_legend and any(len(l) for l in labels) and (sdim or not self.multi_level):
leg_spec = self.legend_specs[self.legend_position]
if self.legend_cols: leg_spec['ncol'] = self.legend_cols
axis.legend(title=title, **leg_spec)
return bars, xticks, ax_dims
class SpikesPlot(SpikesMixin, PathPlot, ColorbarPlot):
aspect = param.Parameter(default='square', doc="""
The aspect ratio mode of the plot. Allows setting an
explicit aspect ratio as width/height as well as
'square' and 'equal' options.""")
color_index = param.ClassSelector(default=None, allow_None=True,
class_=(basestring, int), doc="""
Index of the dimension from which the color will the drawn""")
spike_length = param.Number(default=0.1, doc="""
The length of each spike if Spikes object is one dimensional.""")
padding = param.ClassSelector(default=(0, 0.1), class_=(int, float, tuple))
position = param.Number(default=0., doc="""
The position of the lower end of each spike.""")
style_opts = PathPlot.style_opts + ['cmap']
def init_artists(self, ax, plot_args, plot_kwargs):
if 'c' in plot_kwargs:
plot_kwargs['array'] = plot_kwargs.pop('c')
if 'vmin' in plot_kwargs and 'vmax' in plot_kwargs:
plot_kwargs['clim'] = plot_kwargs.pop('vmin'), plot_kwargs.pop('vmax')
line_segments = LineCollection(*plot_args, **plot_kwargs)
ax.add_collection(line_segments)
return {'artist': line_segments}
def get_data(self, element, ranges, style):
dimensions = element.dimensions(label=True)
ndims = len(dimensions)
opts = self.lookup_options(element, 'plot').options
pos = self.position
if ndims > 1 and 'spike_length' not in opts:
data = element.columns([0, 1])
xs, ys = data[dimensions[0]], data[dimensions[1]]
data = [[(x, pos), (x, pos+y)] for x, y in zip(xs, ys)]
else:
xs = element.array([0])
height = self.spike_length
data = [[(x[0], pos), (x[0], pos+height)] for x in xs]
if self.invert_axes:
data = [(line[0][::-1], line[1][::-1]) for line in data]
dims = element.dimensions()
clean_spikes = []
for spike in data:
xs, ys = zip(*spike)
cols = []
for i, vs in enumerate((xs, ys)):
vs = np.array(vs)
if isdatetime(vs):
dt_format = Dimension.type_formatters.get(
type(vs[0]),
Dimension.type_formatters[np.datetime64]
)
vs = date2num(vs)
dims[i] = dims[i].clone(value_format=DateFormatter(dt_format))
cols.append(vs)
clean_spikes.append(np.column_stack(cols))
cdim = element.get_dimension(self.color_index)
color = style.get('color', None)
if cdim and ((isinstance(color, basestring) and color in element) or isinstance(color, dim)):
self.param.warning(
"Cannot declare style mapping for 'color' option and "
"declare a color_index; ignoring the color_index.")
cdim = None
if cdim:
style['array'] = element.dimension_values(cdim)
self._norm_kwargs(element, ranges, style, cdim)
if 'spike_length' in opts:
axis_dims = (element.dimensions()[0], None)
elif len(element.dimensions()) == 1:
axis_dims = (element.dimensions()[0], None)
else:
axis_dims = (element.dimensions()[0], element.dimensions()[1])
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
return (clean_spikes,), style, {'dimensions': axis_dims}
def update_handles(self, key, axis, element, ranges, style):
artist = self.handles['artist']
(data,), kwargs, axis_kwargs = self.get_data(element, ranges, style)
artist.set_paths(data)
artist.set_visible(style.get('visible', True))
if 'color' in kwargs:
artist.set_edgecolors(kwargs['color'])
if 'array' in kwargs or 'c' in kwargs:
artist.set_array(kwargs.get('array', kwargs.get('c')))
if 'vmin' in kwargs:
artist.set_clim((kwargs['vmin'], kwargs['vmax']))
if 'norm' in kwargs:
artist.norm = kwargs['norm']
if 'linewidth' in kwargs:
artist.set_linewidths(kwargs['linewidth'])
return axis_kwargs
class SideSpikesPlot(AdjoinedPlot, SpikesPlot):
bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
Make plot background invisible.""")
border_size = param.Number(default=0, doc="""
The size of the border expressed as a fraction of the main plot.""")
subplot_size = param.Number(default=0.1, doc="""
The size subplots as expressed as a fraction of the main plot.""")
spike_length = param.Number(default=1, doc="""
The length of each spike if Spikes object is one dimensional.""")
xaxis = param.ObjectSelector(default='bare',
objects=['top', 'bottom', 'bare', 'top-bare',
'bottom-bare', None], doc="""
Whether and where to display the xaxis, bare options allow suppressing
all axis labels including ticks and xlabel. Valid options are 'top',
'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
yaxis = param.ObjectSelector(default='bare',
objects=['left', 'right', 'bare', 'left-bare',
'right-bare', None], doc="""
Whether and where to display the yaxis, bare options allow suppressing
all axis labels including ticks and ylabel. Valid options are 'left',
'right', 'bare' 'left-bare' and 'right-bare'.""")
| 1 | 23,808 | Bit worried about this. At minimum you should make a copy of the dict here to avoid modifying a user supplied variable. | holoviz-holoviews | py |
@@ -455,6 +455,11 @@ func (r *AWSMachinePoolReconciler) reconcileLaunchTemplate(machinePoolScope *sco
// userdata, OR we've discovered a new AMI ID.
if needsUpdate || tagsChanged || *imageID != *launchTemplate.AMI.ID || launchTemplateUserDataHash != bootstrapDataHash {
machinePoolScope.Info("creating new version for launch template", "existing", launchTemplate, "incoming", machinePoolScope.AWSMachinePool.Spec.AWSLaunchTemplate)
+ // There is a limit to the number of Launch Template Versions.
+ // We ensure that the number of versions does not grow without bound by following a simple rule: For each version we create, we delete one old version.
+ if err := ec2svc.PruneLaunchTemplateVersions(machinePoolScope.AWSMachinePool.Status.LaunchTemplateID); err != nil {
+ return err
+ }
if err := ec2svc.CreateLaunchTemplateVersion(machinePoolScope, imageID, bootstrapData); err != nil {
return err
} | 1 | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"reflect"
"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/tools/record"
infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4"
"sigs.k8s.io/cluster-api-provider-aws/controllers"
ekscontrolplane "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1alpha4"
infrav1exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha4"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services"
asg "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/autoscaling"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/userdata"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4"
capiv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/predicates"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
// AWSMachinePoolReconciler reconciles a AWSMachinePool object.
type AWSMachinePoolReconciler struct {
client.Client
Recorder record.EventRecorder
WatchFilterValue string
asgServiceFactory func(cloud.ClusterScoper) services.ASGInterface
ec2ServiceFactory func(scope.EC2Scope) services.EC2MachineInterface
}
func (r *AWSMachinePoolReconciler) getASGService(scope cloud.ClusterScoper) services.ASGInterface {
if r.asgServiceFactory != nil {
return r.asgServiceFactory(scope)
}
return asg.NewService(scope)
}
func (r *AWSMachinePoolReconciler) getEC2Service(scope scope.EC2Scope) services.EC2MachineInterface {
if r.ec2ServiceFactory != nil {
return r.ec2ServiceFactory(scope)
}
return ec2.NewService(scope)
}
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch
// Reconcile is the reconciliation loop for AWSMachinePool.
func (r *AWSMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {
log := ctrl.LoggerFrom(ctx)
// Fetch the AWSMachinePool .
awsMachinePool := &infrav1exp.AWSMachinePool{}
err := r.Get(ctx, req.NamespacedName, awsMachinePool)
if err != nil {
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
// Fetch the CAPI MachinePool
machinePool, err := getOwnerMachinePool(ctx, r.Client, awsMachinePool.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if machinePool == nil {
log.Info("MachinePool Controller has not yet set OwnerRef")
return reconcile.Result{}, nil
}
log = log.WithValues("machinePool", machinePool.Name)
// Fetch the Cluster.
cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta)
if err != nil {
log.Info("MachinePool is missing cluster label or cluster does not exist")
return reconcile.Result{}, nil
}
log = log.WithValues("cluster", cluster.Name)
infraCluster, err := r.getInfraCluster(ctx, log, cluster, awsMachinePool)
if err != nil {
return ctrl.Result{}, errors.New("error getting infra provider cluster or control plane object")
}
if infraCluster == nil {
log.Info("AWSCluster or AWSManagedControlPlane is not ready yet")
return ctrl.Result{}, nil
}
// Create the machine pool scope
machinePoolScope, err := scope.NewMachinePoolScope(scope.MachinePoolScopeParams{
Client: r.Client,
Cluster: cluster,
MachinePool: machinePool,
InfraCluster: infraCluster,
AWSMachinePool: awsMachinePool,
})
if err != nil {
log.Error(err, "failed to create scope")
return ctrl.Result{}, err
}
// Always close the scope when exiting this function so we can persist any AWSMachine changes.
defer func() {
// set Ready condition before AWSMachinePool is patched
conditions.SetSummary(machinePoolScope.AWSMachinePool,
conditions.WithConditions(
infrav1exp.ASGReadyCondition,
infrav1exp.LaunchTemplateReadyCondition,
),
conditions.WithStepCounterIfOnly(
infrav1exp.ASGReadyCondition,
infrav1exp.LaunchTemplateReadyCondition,
),
)
if err := machinePoolScope.Close(); err != nil && reterr == nil {
reterr = err
}
}()
switch infraScope := infraCluster.(type) {
case *scope.ManagedControlPlaneScope:
if !awsMachinePool.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(machinePoolScope, infraScope, infraScope)
}
return r.reconcileNormal(ctx, machinePoolScope, infraScope, infraScope)
case *scope.ClusterScope:
if !awsMachinePool.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(machinePoolScope, infraScope, infraScope)
}
return r.reconcileNormal(ctx, machinePoolScope, infraScope, infraScope)
default:
return ctrl.Result{}, errors.New("infraCluster has unknown type")
}
}
func (r *AWSMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&infrav1exp.AWSMachinePool{}).
Watches(
&source.Kind{Type: &capiv1exp.MachinePool{}},
handler.EnqueueRequestsFromMapFunc(machinePoolToInfrastructureMapFunc(infrav1exp.GroupVersion.WithKind("AWSMachinePool"))),
).
WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
Complete(r)
}
func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper, ec2Scope scope.EC2Scope) (ctrl.Result, error) {
clusterScope.Info("Reconciling AWSMachinePool")
// If the AWSMachine is in an error state, return early.
if machinePoolScope.HasFailed() {
machinePoolScope.Info("Error state detected, skipping reconciliation")
// TODO: If we are in a failed state, delete the secret regardless of instance state
return ctrl.Result{}, nil
}
// If the AWSMachinepool doesn't have our finalizer, add it
controllerutil.AddFinalizer(machinePoolScope.AWSMachinePool, infrav1exp.MachinePoolFinalizer)
// Register finalizer immediately to avoid orphaning AWS resources
if err := machinePoolScope.PatchObject(); err != nil {
return ctrl.Result{}, err
}
if !machinePoolScope.Cluster.Status.InfrastructureReady {
machinePoolScope.Info("Cluster infrastructure is not ready yet")
conditions.MarkFalse(machinePoolScope.AWSMachinePool, infrav1exp.ASGReadyCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "")
return ctrl.Result{}, nil
}
// Make sure bootstrap data is available and populated
if machinePoolScope.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil {
machinePoolScope.Info("Bootstrap data secret reference is not yet available")
conditions.MarkFalse(machinePoolScope.AWSMachinePool, infrav1exp.ASGReadyCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "")
return ctrl.Result{}, nil
}
if err := r.reconcileLaunchTemplate(machinePoolScope, ec2Scope); err != nil {
r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "FailedLaunchTemplateReconcile", "Failed to reconcile launch template: %v", err)
machinePoolScope.Error(err, "failed to reconcile launch template")
return ctrl.Result{}, err
}
// set the LaunchTemplateReady condition
conditions.MarkTrue(machinePoolScope.AWSMachinePool, infrav1exp.LaunchTemplateReadyCondition)
// Initialize ASG client
asgsvc := r.getASGService(clusterScope)
// Find existing ASG
asg, err := r.findASG(machinePoolScope, asgsvc)
if err != nil {
conditions.MarkUnknown(machinePoolScope.AWSMachinePool, infrav1exp.ASGReadyCondition, infrav1exp.ASGNotFoundReason, err.Error())
return ctrl.Result{}, err
}
if asg == nil {
// Create new ASG
if _, err := r.createPool(machinePoolScope, clusterScope); err != nil {
conditions.MarkFalse(machinePoolScope.AWSMachinePool, infrav1exp.ASGReadyCondition, infrav1exp.ASGProvisionFailedReason, clusterv1.ConditionSeverityError, err.Error())
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
if err := r.updatePool(machinePoolScope, clusterScope, asg); err != nil {
machinePoolScope.Error(err, "error updating AWSMachinePool")
return ctrl.Result{}, err
}
err = r.reconcileTags(machinePoolScope, clusterScope, ec2Scope)
if err != nil {
return ctrl.Result{}, errors.Wrap(err, "error updating tags")
}
// Make sure Spec.ProviderID is always set.
machinePoolScope.AWSMachinePool.Spec.ProviderID = asg.ID
providerIDList := make([]string, len(asg.Instances))
for i, ec2 := range asg.Instances {
providerIDList[i] = fmt.Sprintf("aws:///%s/%s", ec2.AvailabilityZone, ec2.ID)
}
machinePoolScope.SetAnnotation("cluster-api-provider-aws", "true")
machinePoolScope.AWSMachinePool.Spec.ProviderIDList = providerIDList
machinePoolScope.AWSMachinePool.Status.Replicas = int32(len(providerIDList))
machinePoolScope.AWSMachinePool.Status.Ready = true
conditions.MarkTrue(machinePoolScope.AWSMachinePool, infrav1exp.ASGReadyCondition)
err = machinePoolScope.UpdateInstanceStatuses(ctx, asg.Instances)
if err != nil {
machinePoolScope.Info("Failed updating instances", "instances", asg.Instances)
}
return ctrl.Result{}, nil
}
func (r *AWSMachinePoolReconciler) reconcileDelete(machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper, ec2Scope scope.EC2Scope) (ctrl.Result, error) {
clusterScope.Info("Handling deleted AWSMachinePool")
ec2Svc := r.getEC2Service(ec2Scope)
asgSvc := r.getASGService(clusterScope)
asg, err := r.findASG(machinePoolScope, asgSvc)
if err != nil {
return ctrl.Result{}, err
}
if asg == nil {
machinePoolScope.V(2).Info("Unable to locate ASG")
r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeNormal, "NoASGFound", "Unable to find matching ASG")
} else {
machinePoolScope.SetASGStatus(asg.Status)
switch asg.Status {
case infrav1exp.ASGStatusDeleteInProgress:
// ASG is already deleting
machinePoolScope.SetNotReady()
conditions.MarkFalse(machinePoolScope.AWSMachinePool, infrav1exp.ASGReadyCondition, infrav1exp.ASGDeletionInProgress, clusterv1.ConditionSeverityWarning, "")
r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "DeletionInProgress", "ASG deletion in progress: %q", asg.Name)
machinePoolScope.Info("ASG is already deleting", "name", asg.Name)
default:
machinePoolScope.Info("Deleting ASG", "id", asg.Name, "status", asg.Status)
if err := asgSvc.DeleteASGAndWait(asg.Name); err != nil {
r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "FailedDelete", "Failed to delete ASG %q: %v", asg.Name, err)
return ctrl.Result{}, errors.Wrap(err, "failed to delete ASG")
}
}
}
launchTemplateID := machinePoolScope.AWSMachinePool.Status.LaunchTemplateID
launchTemplate, _, err := ec2Svc.GetLaunchTemplate(machinePoolScope.Name())
if err != nil {
return ctrl.Result{}, err
}
if launchTemplate == nil {
machinePoolScope.V(2).Info("Unable to locate launch template")
r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeNormal, "NoASGFound", "Unable to find matching ASG")
controllerutil.RemoveFinalizer(machinePoolScope.AWSMachinePool, infrav1exp.MachinePoolFinalizer)
return ctrl.Result{}, nil
}
machinePoolScope.Info("deleting launch template", "name", launchTemplate.Name)
if err := ec2Svc.DeleteLaunchTemplate(launchTemplateID); err != nil {
r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "FailedDelete", "Failed to delete launch template %q: %v", launchTemplate.Name, err)
return ctrl.Result{}, errors.Wrap(err, "failed to delete ASG")
}
machinePoolScope.Info("successfully deleted AutoScalingGroup and Launch Template")
// remove finalizer
controllerutil.RemoveFinalizer(machinePoolScope.AWSMachinePool, infrav1exp.MachinePoolFinalizer)
return ctrl.Result{}, nil
}
func (r *AWSMachinePoolReconciler) updatePool(machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper, existingASG *infrav1exp.AutoScalingGroup) error {
if asgNeedsUpdates(machinePoolScope, existingASG) {
machinePoolScope.Info("updating AutoScalingGroup")
asgSvc := r.getASGService(clusterScope)
if err := asgSvc.UpdateASG(machinePoolScope); err != nil {
r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "FailedUpdate", "Failed to update ASG: %v", err)
return errors.Wrap(err, "unable to update ASG")
}
}
return nil
}
func (r *AWSMachinePoolReconciler) createPool(machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper) (*infrav1exp.AutoScalingGroup, error) {
clusterScope.Info("Initializing ASG client")
asgsvc := r.getASGService(clusterScope)
machinePoolScope.Info("Creating Autoscaling Group")
asg, err := asgsvc.CreateASG(machinePoolScope)
if err != nil {
return nil, errors.Wrapf(err, "failed to create AWSMachinePool")
}
return asg, nil
}
func (r *AWSMachinePoolReconciler) findASG(machinePoolScope *scope.MachinePoolScope, asgsvc services.ASGInterface) (*infrav1exp.AutoScalingGroup, error) {
// Query the instance using tags.
asg, err := asgsvc.GetASGByName(machinePoolScope)
if err != nil {
return nil, errors.Wrapf(err, "failed to query AWSMachinePool by name")
}
return asg, nil
}
func (r *AWSMachinePoolReconciler) reconcileLaunchTemplate(machinePoolScope *scope.MachinePoolScope, ec2Scope scope.EC2Scope) error {
bootstrapData, err := machinePoolScope.GetRawBootstrapData()
if err != nil {
r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "FailedGetBootstrapData", err.Error())
}
bootstrapDataHash := userdata.ComputeHash(bootstrapData)
ec2svc := r.getEC2Service(ec2Scope)
machinePoolScope.Info("checking for existing launch template")
launchTemplate, launchTemplateUserDataHash, err := ec2svc.GetLaunchTemplate(machinePoolScope.Name())
if err != nil {
conditions.MarkUnknown(machinePoolScope.AWSMachinePool, infrav1exp.LaunchTemplateReadyCondition, infrav1exp.LaunchTemplateNotFoundReason, err.Error())
return err
}
imageID, err := ec2svc.DiscoverLaunchTemplateAMI(machinePoolScope)
if err != nil {
conditions.MarkFalse(machinePoolScope.AWSMachinePool, infrav1exp.LaunchTemplateReadyCondition, infrav1exp.LaunchTemplateCreateFailedReason, clusterv1.ConditionSeverityError, err.Error())
return err
}
if launchTemplate == nil {
machinePoolScope.Info("no existing launch template found, creating")
launchTemplateID, err := ec2svc.CreateLaunchTemplate(machinePoolScope, imageID, bootstrapData)
if err != nil {
conditions.MarkFalse(machinePoolScope.AWSMachinePool, infrav1exp.LaunchTemplateReadyCondition, infrav1exp.LaunchTemplateCreateFailedReason, clusterv1.ConditionSeverityError, err.Error())
return err
}
machinePoolScope.SetLaunchTemplateIDStatus(launchTemplateID)
return machinePoolScope.PatchObject()
}
// LaunchTemplateID is set during LaunchTemplate creation, but for a scenario such as `clusterctl move`, status fields become blank.
// If launchTemplate already exists but LaunchTemplateID field in the status is empty, get the ID and update the status.
if machinePoolScope.AWSMachinePool.Status.LaunchTemplateID == "" {
launchTemplateID, err := ec2svc.GetLaunchTemplateID(machinePoolScope.Name())
if err != nil {
conditions.MarkUnknown(machinePoolScope.AWSMachinePool, infrav1exp.LaunchTemplateReadyCondition, infrav1exp.LaunchTemplateNotFoundReason, err.Error())
return err
}
machinePoolScope.SetLaunchTemplateIDStatus(launchTemplateID)
return machinePoolScope.PatchObject()
}
annotation, err := r.machinePoolAnnotationJSON(machinePoolScope.AWSMachinePool, TagsLastAppliedAnnotation)
if err != nil {
return err
}
// Check if the instance tags were changed. If they were, create a new LaunchTemplate.
tagsChanged, _, _, _ := tagsChanged(annotation, machinePoolScope.AdditionalTags()) // nolint:dogsled
needsUpdate, err := ec2svc.LaunchTemplateNeedsUpdate(machinePoolScope, &machinePoolScope.AWSMachinePool.Spec.AWSLaunchTemplate, launchTemplate)
if err != nil {
return err
}
// If there is a change: before changing the template, check if there exist an ongoing instance refresh,
// because only 1 instance refresh can be "InProgress". If template is updated when refresh cannot be started,
// that change will not trigger a refresh. Do not start an instance refresh if only userdata changed.
if needsUpdate || tagsChanged || *imageID != *launchTemplate.AMI.ID {
asgSvc := r.getASGService(ec2Scope)
canStart, err := asgSvc.CanStartASGInstanceRefresh(machinePoolScope)
if err != nil {
return err
}
if !canStart {
conditions.MarkFalse(machinePoolScope.AWSMachinePool, infrav1exp.InstanceRefreshStartedCondition, infrav1exp.InstanceRefreshNotReadyReason, clusterv1.ConditionSeverityWarning, "")
return errors.New("Cannot start a new instance refresh. Unfinished instance refresh exist")
}
}
// Create a new launch template version if there's a difference in configuration, tags,
// userdata, OR we've discovered a new AMI ID.
if needsUpdate || tagsChanged || *imageID != *launchTemplate.AMI.ID || launchTemplateUserDataHash != bootstrapDataHash {
machinePoolScope.Info("creating new version for launch template", "existing", launchTemplate, "incoming", machinePoolScope.AWSMachinePool.Spec.AWSLaunchTemplate)
if err := ec2svc.CreateLaunchTemplateVersion(machinePoolScope, imageID, bootstrapData); err != nil {
return err
}
}
// After creating a new version of launch template, instance refresh is required
// to trigger a rolling replacement of all previously launched instances.
// If ONLY the userdata changed, previously launched instances continue to use the old launch
// template.
//
// FIXME(dlipovetsky,sedefsavas): If the controller terminates, or the StartASGInstanceRefresh returns an error,
// this conditional will not evaluate to true the next reconcile. If any machines use an older
// Launch Template version, and the difference between the older and current versions is _more_
// than userdata, we should start an Instance Refresh.
if needsUpdate || tagsChanged || *imageID != *launchTemplate.AMI.ID {
machinePoolScope.Info("starting instance refresh", "number of instances", machinePoolScope.MachinePool.Spec.Replicas)
asgSvc := r.getASGService(ec2Scope)
if err := asgSvc.StartASGInstanceRefresh(machinePoolScope); err != nil {
conditions.MarkFalse(machinePoolScope.AWSMachinePool, infrav1exp.InstanceRefreshStartedCondition, infrav1exp.InstanceRefreshFailedReason, clusterv1.ConditionSeverityError, err.Error())
return err
}
conditions.MarkTrue(machinePoolScope.AWSMachinePool, infrav1exp.InstanceRefreshStartedCondition)
}
return nil
}
func (r *AWSMachinePoolReconciler) reconcileTags(machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper, ec2Scope scope.EC2Scope) error {
ec2Svc := r.getEC2Service(ec2Scope)
asgSvc := r.getASGService(clusterScope)
launchTemplateID := machinePoolScope.AWSMachinePool.Status.LaunchTemplateID
asgName := machinePoolScope.Name()
additionalTags := machinePoolScope.AdditionalTags()
tagsChanged, err := r.ensureTags(ec2Svc, asgSvc, machinePoolScope.AWSMachinePool, &launchTemplateID, &asgName, additionalTags)
if err != nil {
return err
}
if tagsChanged {
r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeNormal, "UpdatedTags", "updated tags on resources")
}
return nil
}
// asgNeedsUpdates compares incoming AWSMachinePool and compares against existing ASG.
func asgNeedsUpdates(machinePoolScope *scope.MachinePoolScope, existingASG *infrav1exp.AutoScalingGroup) bool {
if machinePoolScope.MachinePool.Spec.Replicas != nil && machinePoolScope.MachinePool.Spec.Replicas != existingASG.DesiredCapacity {
return true
}
if machinePoolScope.AWSMachinePool.Spec.MaxSize != existingASG.MaxSize {
return true
}
if machinePoolScope.AWSMachinePool.Spec.MinSize != existingASG.MinSize {
return true
}
if machinePoolScope.AWSMachinePool.Spec.CapacityRebalance != existingASG.CapacityRebalance {
return true
}
if !reflect.DeepEqual(machinePoolScope.AWSMachinePool.Spec.MixedInstancesPolicy, existingASG.MixedInstancesPolicy) {
machinePoolScope.Info("got a mixed diff here", "incoming", machinePoolScope.AWSMachinePool.Spec.MixedInstancesPolicy, "existing", existingASG.MixedInstancesPolicy)
return true
}
// todo subnet diff
return false
}
// getOwnerMachinePool returns the MachinePool object owning the current resource.
func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*capiv1exp.MachinePool, error) {
for _, ref := range obj.OwnerReferences {
if ref.Kind != "MachinePool" {
continue
}
gv, err := schema.ParseGroupVersion(ref.APIVersion)
if err != nil {
return nil, errors.WithStack(err)
}
if gv.Group == capiv1exp.GroupVersion.Group {
return getMachinePoolByName(ctx, c, obj.Namespace, ref.Name)
}
}
return nil, nil
}
// getMachinePoolByName finds and return a Machine object using the specified params.
func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*capiv1exp.MachinePool, error) {
m := &capiv1exp.MachinePool{}
key := client.ObjectKey{Name: name, Namespace: namespace}
if err := c.Get(ctx, key, m); err != nil {
return nil, err
}
return m, nil
}
func machinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.MapFunc {
return func(o client.Object) []reconcile.Request {
m, ok := o.(*capiv1exp.MachinePool)
if !ok {
panic(fmt.Sprintf("Expected a MachinePool but got a %T", o))
}
gk := gvk.GroupKind()
// Return early if the GroupKind doesn't match what we expect
infraGK := m.Spec.Template.Spec.InfrastructureRef.GroupVersionKind().GroupKind()
if gk != infraGK {
return nil
}
return []reconcile.Request{
{
NamespacedName: client.ObjectKey{
Namespace: m.Namespace,
Name: m.Spec.Template.Spec.InfrastructureRef.Name,
},
},
}
}
}
func (r *AWSMachinePoolReconciler) getInfraCluster(ctx context.Context, log logr.Logger, cluster *clusterv1.Cluster, awsMachinePool *infrav1exp.AWSMachinePool) (scope.EC2Scope, error) {
var clusterScope *scope.ClusterScope
var managedControlPlaneScope *scope.ManagedControlPlaneScope
var err error
if cluster.Spec.ControlPlaneRef != nil && cluster.Spec.ControlPlaneRef.Kind == controllers.AWSManagedControlPlaneRefKind {
controlPlane := &ekscontrolplane.AWSManagedControlPlane{}
controlPlaneName := client.ObjectKey{
Namespace: awsMachinePool.Namespace,
Name: cluster.Spec.ControlPlaneRef.Name,
}
if err := r.Get(ctx, controlPlaneName, controlPlane); err != nil {
// AWSManagedControlPlane is not ready
return nil, nil // nolint:nilerr
}
managedControlPlaneScope, err = scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{
Client: r.Client,
Logger: log,
Cluster: cluster,
ControlPlane: controlPlane,
ControllerName: "awsManagedControlPlane",
})
if err != nil {
return nil, err
}
return managedControlPlaneScope, nil
}
awsCluster := &infrav1.AWSCluster{}
infraClusterName := client.ObjectKey{
Namespace: awsMachinePool.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(ctx, infraClusterName, awsCluster); err != nil {
// AWSCluster is not ready
return nil, nil // nolint:nilerr
}
// Create the cluster scope
clusterScope, err = scope.NewClusterScope(scope.ClusterScopeParams{
Client: r.Client,
Logger: log,
Cluster: cluster,
AWSCluster: awsCluster,
ControllerName: "awsmachine",
})
if err != nil {
return nil, err
}
return clusterScope, nil
}
| 1 | 19,519 | Is it better to create one before pruning? In case creation fails we don't want to delete the previous one. We create a new one, it is tagged as latest, so the previous can be deleted. `CreateLaunchTemplateVersion` returns the version created, how about directly trying to delete the previous version? Assuming the numbering is strictly increasing. | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -27,13 +27,13 @@ class ContainerInformationTab extends Tab {
private final WinePrefixContainerDTO container;
- private Consumer<ContainerDTO> onDeletePrefix;
+ private Consumer<ContainerDTO> onDeleteContainer;
private Consumer<ContainerDTO> onOpenFileBrowser;
- ContainerInformationTab(WinePrefixContainerDTO container) {
+ ContainerInformationTab(ContainerDTO container) {
super(tr("Information"));
- this.container = container;
+ this.container = (WinePrefixContainerDTO) container; // TODO: use generic container
this.setClosable(false);
| 1 | package org.phoenicis.javafx.views.mainwindow.containers;
import javafx.scene.control.Button;
import javafx.scene.control.Label;
import javafx.scene.control.Tab;
import javafx.scene.layout.GridPane;
import javafx.scene.layout.Priority;
import javafx.scene.layout.Region;
import javafx.scene.layout.VBox;
import javafx.scene.text.Text;
import org.phoenicis.containers.dto.ContainerDTO;
import org.phoenicis.containers.dto.WinePrefixContainerDTO;
import org.phoenicis.javafx.views.common.TextWithStyle;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import static org.phoenicis.configuration.localisation.Localisation.tr;
/**
* Created by marc on 27.05.17.
*/
class ContainerInformationTab extends Tab {
private static final String CAPTION_TITLE_CSS_CLASS = "captionTitle";
private static final String CONFIGURATION_PANE_CSS_CLASS = "containerConfigurationPane";
private static final String TITLE_CSS_CLASS = "title";
private final WinePrefixContainerDTO container;
private Consumer<ContainerDTO> onDeletePrefix;
private Consumer<ContainerDTO> onOpenFileBrowser;
ContainerInformationTab(WinePrefixContainerDTO container) {
super(tr("Information"));
this.container = container;
this.setClosable(false);
this.populate();
}
private void populate() {
final VBox informationPane = new VBox();
final Text title = new TextWithStyle(tr("Information"), TITLE_CSS_CLASS);
informationPane.getStyleClass().add(CONFIGURATION_PANE_CSS_CLASS);
informationPane.getChildren().add(title);
final GridPane informationContentPane = new GridPane();
informationContentPane.getStyleClass().add("grid");
informationContentPane.add(new TextWithStyle(tr("Name:"), CAPTION_TITLE_CSS_CLASS), 0, 0);
Label name = new Label(container.getName());
name.setWrapText(true);
informationContentPane.add(name, 1, 0);
informationContentPane.add(new TextWithStyle(tr("Path:"), CAPTION_TITLE_CSS_CLASS), 0, 1);
Label path = new Label(container.getPath());
path.setWrapText(true);
informationContentPane.add(path, 1, 1);
informationContentPane.add(new TextWithStyle(tr("Installed shortcuts:"), CAPTION_TITLE_CSS_CLASS), 0, 2);
Label installedShortcuts = new Label(container.getInstalledShortcuts().stream()
.map(shortcutDTO -> shortcutDTO.getInfo().getName()).collect(Collectors.joining("; ")));
installedShortcuts.setWrapText(true);
informationContentPane.add(installedShortcuts, 1, 2);
// TODO: find generic solution which works for all container types
informationContentPane.add(new TextWithStyle(tr("Wine version:"), CAPTION_TITLE_CSS_CLASS), 0, 3);
Label version = new Label(container.getVersion());
version.setWrapText(true);
informationContentPane.add(version, 1, 3);
informationContentPane.add(new TextWithStyle(tr("Wine architecture:"), CAPTION_TITLE_CSS_CLASS), 0, 4);
Label architecture = new Label(container.getArchitecture());
architecture.setWrapText(true);
informationContentPane.add(architecture, 1, 4);
informationContentPane.add(new TextWithStyle(tr("Wine distribution:"), CAPTION_TITLE_CSS_CLASS), 0, 5);
Label distribution = new Label(container.getDistribution());
distribution.setWrapText(true);
informationContentPane.add(distribution, 1, 5);
Region spacer = new Region();
spacer.setPrefHeight(20);
VBox.setVgrow(spacer, Priority.NEVER);
// changing engine does not work currently
// disabled combobox to avoid confusion of users
/*
* ComboBox<EngineVersionDTO> changeEngineComboBox = new ComboBox<EngineVersionDTO>(
* FXCollections.observableList(engineVersions));
* changeEngineComboBox.setConverter(new StringConverter<EngineVersionDTO>() {
*
* @Override
* public String toString(EngineVersionDTO object) {
* return object.getVersion();
* }
*
* @Override
* public EngineVersionDTO fromString(String string) {
* return engineVersions.stream().filter(engineVersion -> engineVersion.getVersion().equals(string))
* .findFirst().get();
* }
* });
* changeEngineComboBox.getSelectionModel().select(engineVersions.stream()
* .filter(engineVersion -> engineVersion.getVersion().equals(container.getVersion())).findFirst().get());
*/
Button deleteButton = new Button(tr("Delete container"));
deleteButton.setOnMouseClicked(event -> this.onDeletePrefix.accept(container));
Region buttonSpacer = new Region();
buttonSpacer.setPrefHeight(20);
VBox.setVgrow(buttonSpacer, Priority.NEVER);
Button openFileBrowserButton = new Button(tr("Open in file browser"));
openFileBrowserButton.setOnMouseClicked(event -> this.onOpenFileBrowser.accept(container));
informationPane.getChildren().addAll(informationContentPane, spacer, /* changeEngineComboBox, */ deleteButton,
buttonSpacer, openFileBrowserButton);
this.setContent(informationPane);
}
protected void setOnDeletePrefix(Consumer<ContainerDTO> onDeletePrefix) {
this.onDeletePrefix = onDeletePrefix;
}
protected void setOnOpenFileBrowser(Consumer<ContainerDTO> onOpenFileBrowser) {
this.onOpenFileBrowser = onOpenFileBrowser;
}
}
| 1 | 13,008 | Do we require the specific implementation information here? | PhoenicisOrg-phoenicis | java |
@@ -0,0 +1,13 @@
+class DomainBlacklist < ActiveRecord::Base
+ validates :domain, uniqueness: { case_sensitive: false }
+
+ class << self
+ def email_banned?(email_address)
+ contains?(email_address.split('@').last)
+ end
+
+ def contains?(domain)
+ exists?(['lower(domain) LIKE ?', "%#{domain.downcase}%"])
+ end
+ end
+end | 1 | 1 | 6,576 | How about `exists?(['domain ~* ?', domain.downcase])` ? | blackducksoftware-ohloh-ui | rb |
|
@@ -729,6 +729,15 @@ func newContextForLongPoll(c *cli.Context) (context.Context, context.CancelFunc)
return newContextWithTimeout(c, defaultContextTimeoutForLongPoll)
}
+func newContextForBackground(c *cli.Context) (context.Context, context.CancelFunc) {
+ if c.GlobalIsSet(FlagContextTimeout) {
+ timeout := time.Duration(c.GlobalInt(FlagContextTimeout)) * time.Second
+ return rpc.NewContextWithTimeoutAndCLIHeaders(timeout)
+ }
+
+ return rpc.NewContextWithCLIHeaders()
+}
+
func newContextWithTimeout(c *cli.Context, timeout time.Duration) (context.Context, context.CancelFunc) {
if c.GlobalIsSet(FlagContextTimeout) {
timeout = time.Duration(c.GlobalInt(FlagContextTimeout)) * time.Second | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cli
import (
"bufio"
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"regexp"
"runtime/debug"
"strconv"
"strings"
"time"
"github.com/fatih/color"
"github.com/gogo/protobuf/proto"
"github.com/urfave/cli"
"github.com/valyala/fastjson"
commonpb "go.temporal.io/temporal-proto/common"
eventpb "go.temporal.io/temporal-proto/event"
filterpb "go.temporal.io/temporal-proto/filter"
tasklistpb "go.temporal.io/temporal-proto/tasklist"
sdkclient "go.temporal.io/temporal/client"
"github.com/temporalio/temporal/common/codec"
"github.com/temporalio/temporal/common/payload"
"github.com/temporalio/temporal/common/rpc"
)
// GetHistory helper method to iterate over all pages and return complete list of history events
func GetHistory(ctx context.Context, workflowClient sdkclient.Client, workflowID, runID string) (*eventpb.History, error) {
iter := workflowClient.GetWorkflowHistory(ctx, workflowID, runID, false,
filterpb.HistoryEventFilterType_AllEvent)
var events []*eventpb.HistoryEvent
for iter.HasNext() {
event, err := iter.Next()
if err != nil {
return nil, err
}
events = append(events, event)
}
history := &eventpb.History{}
history.Events = events
return history, nil
}
// HistoryEventToString convert HistoryEvent to string
func HistoryEventToString(e *eventpb.HistoryEvent, printFully bool, maxFieldLength int) string {
data := getEventAttributes(e)
return anyToString(data, printFully, maxFieldLength)
}
func anyToString(d interface{}, printFully bool, maxFieldLength int) string {
v := reflect.ValueOf(d)
switch v.Kind() {
case reflect.Ptr:
return anyToString(v.Elem().Interface(), printFully, maxFieldLength)
case reflect.Struct:
var buf bytes.Buffer
t := reflect.TypeOf(d)
buf.WriteString("{")
for i := 0; i < v.NumField(); i++ {
f := v.Field(i)
if f.Kind() == reflect.Invalid {
continue
}
fieldValue := valueToString(f, printFully, maxFieldLength)
if len(fieldValue) == 0 {
continue
}
if buf.Len() > 1 {
buf.WriteString(", ")
}
fieldName := t.Field(i).Name
if !isAttributeName(fieldName) {
if !printFully {
fieldValue = trimTextAndBreakWords(fieldValue, maxFieldLength)
} else if maxFieldLength != 0 { // for command run workflow and observe history
fieldValue = trimText(fieldValue, maxFieldLength)
}
}
if fieldName == "Reason" || fieldName == "Details" || fieldName == "Cause" {
buf.WriteString(fmt.Sprintf("%s:%s", color.RedString(fieldName), color.MagentaString(fieldValue)))
} else {
buf.WriteString(fmt.Sprintf("%s:%s", fieldName, fieldValue))
}
}
buf.WriteString("}")
return buf.String()
default:
return fmt.Sprint(d)
}
}
func valueToString(v reflect.Value, printFully bool, maxFieldLength int) string {
switch v.Kind() {
case reflect.Ptr:
return valueToString(v.Elem(), printFully, maxFieldLength)
case reflect.Struct:
return anyToString(v.Interface(), printFully, maxFieldLength)
case reflect.Invalid:
return ""
case reflect.Slice:
if v.Type().Elem().Kind() == reflect.Uint8 {
n := string(v.Bytes())
if n != "" && n[len(n)-1] == '\n' {
return fmt.Sprintf("[%v]", n[:len(n)-1])
}
return fmt.Sprintf("[%v]", n)
}
return fmt.Sprintf("[len=%d]", v.Len())
case reflect.Map:
str := "map{"
for i, key := range v.MapKeys() {
str += key.String() + ":"
val := v.MapIndex(key)
switch typedV := val.Interface().(type) {
case []byte:
str += string(typedV)
case *commonpb.Payload:
var data string
err := payload.Decode(typedV, &data)
if err == nil {
str += data
} else {
str += anyToString(*typedV, printFully, maxFieldLength)
}
default:
str += val.String()
}
if i != len(v.MapKeys())-1 {
str += ", "
}
}
str += "}"
return str
default:
return fmt.Sprint(v.Interface())
}
}
// limit the maximum length for each field
func trimText(input string, maxFieldLength int) string {
if len(input) > maxFieldLength {
input = fmt.Sprintf("%s ... %s", input[:maxFieldLength/2], input[(len(input)-maxFieldLength/2):])
}
return input
}
// limit the maximum length for each field, and break long words for table item correctly wrap words
func trimTextAndBreakWords(input string, maxFieldLength int) string {
input = trimText(input, maxFieldLength)
return breakLongWords(input, maxWordLength)
}
// long words will make output in table cell looks bad,
// break long text "ltltltltllt..." to "ltlt ltlt lt..." will make use of table autowrap so that output is pretty.
func breakLongWords(input string, maxWordLength int) string {
if len(input) <= maxWordLength {
return input
}
cnt := 0
for i := 0; i < len(input); i++ {
if cnt == maxWordLength {
cnt = 0
input = input[:i] + " " + input[i:]
continue
}
cnt++
if input[i] == ' ' {
cnt = 0
}
}
return input
}
// ColorEvent takes an event and return string with color
// Event with color mapping rules:
// Failed - red
// Timeout - yellow
// Canceled - magenta
// Completed - green
// Started - blue
// Others - default (white/black)
func ColorEvent(e *eventpb.HistoryEvent) string {
var data string
switch e.GetEventType() {
case eventpb.EventType_WorkflowExecutionStarted:
data = color.BlueString(e.EventType.String())
case eventpb.EventType_WorkflowExecutionCompleted:
data = color.GreenString(e.EventType.String())
case eventpb.EventType_WorkflowExecutionFailed:
data = color.RedString(e.EventType.String())
case eventpb.EventType_WorkflowExecutionTimedOut:
data = color.YellowString(e.EventType.String())
case eventpb.EventType_DecisionTaskScheduled:
data = e.EventType.String()
case eventpb.EventType_DecisionTaskStarted:
data = e.EventType.String()
case eventpb.EventType_DecisionTaskCompleted:
data = e.EventType.String()
case eventpb.EventType_DecisionTaskTimedOut:
data = color.YellowString(e.EventType.String())
case eventpb.EventType_ActivityTaskScheduled:
data = e.EventType.String()
case eventpb.EventType_ActivityTaskStarted:
data = e.EventType.String()
case eventpb.EventType_ActivityTaskCompleted:
data = e.EventType.String()
case eventpb.EventType_ActivityTaskFailed:
data = color.RedString(e.EventType.String())
case eventpb.EventType_ActivityTaskTimedOut:
data = color.YellowString(e.EventType.String())
case eventpb.EventType_ActivityTaskCancelRequested:
data = e.EventType.String()
case eventpb.EventType_RequestCancelActivityTaskFailed:
data = color.RedString(e.EventType.String())
case eventpb.EventType_ActivityTaskCanceled:
data = e.EventType.String()
case eventpb.EventType_TimerStarted:
data = e.EventType.String()
case eventpb.EventType_TimerFired:
data = e.EventType.String()
case eventpb.EventType_CancelTimerFailed:
data = color.RedString(e.EventType.String())
case eventpb.EventType_TimerCanceled:
data = color.MagentaString(e.EventType.String())
case eventpb.EventType_WorkflowExecutionCancelRequested:
data = e.EventType.String()
case eventpb.EventType_WorkflowExecutionCanceled:
data = color.MagentaString(e.EventType.String())
case eventpb.EventType_RequestCancelExternalWorkflowExecutionInitiated:
data = e.EventType.String()
case eventpb.EventType_RequestCancelExternalWorkflowExecutionFailed:
data = color.RedString(e.EventType.String())
case eventpb.EventType_ExternalWorkflowExecutionCancelRequested:
data = e.EventType.String()
case eventpb.EventType_MarkerRecorded:
data = e.EventType.String()
case eventpb.EventType_WorkflowExecutionSignaled:
data = e.EventType.String()
case eventpb.EventType_WorkflowExecutionTerminated:
data = e.EventType.String()
case eventpb.EventType_WorkflowExecutionContinuedAsNew:
data = e.EventType.String()
case eventpb.EventType_StartChildWorkflowExecutionInitiated:
data = e.EventType.String()
case eventpb.EventType_StartChildWorkflowExecutionFailed:
data = color.RedString(e.EventType.String())
case eventpb.EventType_ChildWorkflowExecutionStarted:
data = color.BlueString(e.EventType.String())
case eventpb.EventType_ChildWorkflowExecutionCompleted:
data = color.GreenString(e.EventType.String())
case eventpb.EventType_ChildWorkflowExecutionFailed:
data = color.RedString(e.EventType.String())
case eventpb.EventType_ChildWorkflowExecutionCanceled:
data = color.MagentaString(e.EventType.String())
case eventpb.EventType_ChildWorkflowExecutionTimedOut:
data = color.YellowString(e.EventType.String())
case eventpb.EventType_ChildWorkflowExecutionTerminated:
data = e.EventType.String()
case eventpb.EventType_SignalExternalWorkflowExecutionInitiated:
data = e.EventType.String()
case eventpb.EventType_SignalExternalWorkflowExecutionFailed:
data = color.RedString(e.EventType.String())
case eventpb.EventType_ExternalWorkflowExecutionSignaled:
data = e.EventType.String()
case eventpb.EventType_UpsertWorkflowSearchAttributes:
data = e.EventType.String()
default:
data = e.EventType.String()
}
return data
}
func getEventAttributes(e *eventpb.HistoryEvent) interface{} {
var data interface{}
switch e.GetEventType() {
case eventpb.EventType_WorkflowExecutionStarted:
data = e.GetWorkflowExecutionStartedEventAttributes()
case eventpb.EventType_WorkflowExecutionCompleted:
data = e.GetWorkflowExecutionCompletedEventAttributes()
case eventpb.EventType_WorkflowExecutionFailed:
data = e.GetWorkflowExecutionFailedEventAttributes()
case eventpb.EventType_WorkflowExecutionTimedOut:
data = e.GetWorkflowExecutionTimedOutEventAttributes()
case eventpb.EventType_DecisionTaskScheduled:
data = e.GetDecisionTaskScheduledEventAttributes()
case eventpb.EventType_DecisionTaskStarted:
data = e.GetDecisionTaskStartedEventAttributes()
case eventpb.EventType_DecisionTaskCompleted:
data = e.GetDecisionTaskCompletedEventAttributes()
case eventpb.EventType_DecisionTaskTimedOut:
data = e.GetDecisionTaskTimedOutEventAttributes()
case eventpb.EventType_ActivityTaskScheduled:
data = e.GetActivityTaskScheduledEventAttributes()
case eventpb.EventType_ActivityTaskStarted:
data = e.GetActivityTaskStartedEventAttributes()
case eventpb.EventType_ActivityTaskCompleted:
data = e.GetActivityTaskCompletedEventAttributes()
case eventpb.EventType_ActivityTaskFailed:
data = e.GetActivityTaskFailedEventAttributes()
case eventpb.EventType_ActivityTaskTimedOut:
data = e.GetActivityTaskTimedOutEventAttributes()
case eventpb.EventType_ActivityTaskCancelRequested:
data = e.GetActivityTaskCancelRequestedEventAttributes()
case eventpb.EventType_RequestCancelActivityTaskFailed:
data = e.GetRequestCancelActivityTaskFailedEventAttributes()
case eventpb.EventType_ActivityTaskCanceled:
data = e.GetActivityTaskCanceledEventAttributes()
case eventpb.EventType_TimerStarted:
data = e.GetTimerStartedEventAttributes()
case eventpb.EventType_TimerFired:
data = e.GetTimerFiredEventAttributes()
case eventpb.EventType_CancelTimerFailed:
data = e.GetCancelTimerFailedEventAttributes()
case eventpb.EventType_TimerCanceled:
data = e.GetTimerCanceledEventAttributes()
case eventpb.EventType_WorkflowExecutionCancelRequested:
data = e.GetWorkflowExecutionCancelRequestedEventAttributes()
case eventpb.EventType_WorkflowExecutionCanceled:
data = e.GetWorkflowExecutionCanceledEventAttributes()
case eventpb.EventType_RequestCancelExternalWorkflowExecutionInitiated:
data = e.GetRequestCancelExternalWorkflowExecutionInitiatedEventAttributes()
case eventpb.EventType_RequestCancelExternalWorkflowExecutionFailed:
data = e.GetRequestCancelExternalWorkflowExecutionFailedEventAttributes()
case eventpb.EventType_ExternalWorkflowExecutionCancelRequested:
data = e.GetExternalWorkflowExecutionCancelRequestedEventAttributes()
case eventpb.EventType_MarkerRecorded:
data = e.GetMarkerRecordedEventAttributes()
case eventpb.EventType_WorkflowExecutionSignaled:
data = e.GetWorkflowExecutionSignaledEventAttributes()
case eventpb.EventType_WorkflowExecutionTerminated:
data = e.GetWorkflowExecutionTerminatedEventAttributes()
case eventpb.EventType_WorkflowExecutionContinuedAsNew:
data = e.GetWorkflowExecutionContinuedAsNewEventAttributes()
case eventpb.EventType_StartChildWorkflowExecutionInitiated:
data = e.GetStartChildWorkflowExecutionInitiatedEventAttributes()
case eventpb.EventType_StartChildWorkflowExecutionFailed:
data = e.GetStartChildWorkflowExecutionFailedEventAttributes()
case eventpb.EventType_ChildWorkflowExecutionStarted:
data = e.GetChildWorkflowExecutionStartedEventAttributes()
case eventpb.EventType_ChildWorkflowExecutionCompleted:
data = e.GetChildWorkflowExecutionCompletedEventAttributes()
case eventpb.EventType_ChildWorkflowExecutionFailed:
data = e.GetChildWorkflowExecutionFailedEventAttributes()
case eventpb.EventType_ChildWorkflowExecutionCanceled:
data = e.GetChildWorkflowExecutionCanceledEventAttributes()
case eventpb.EventType_ChildWorkflowExecutionTimedOut:
data = e.GetChildWorkflowExecutionTimedOutEventAttributes()
case eventpb.EventType_ChildWorkflowExecutionTerminated:
data = e.GetChildWorkflowExecutionTerminatedEventAttributes()
case eventpb.EventType_SignalExternalWorkflowExecutionInitiated:
data = e.GetSignalExternalWorkflowExecutionInitiatedEventAttributes()
case eventpb.EventType_SignalExternalWorkflowExecutionFailed:
data = e.GetSignalExternalWorkflowExecutionFailedEventAttributes()
case eventpb.EventType_ExternalWorkflowExecutionSignaled:
data = e.GetExternalWorkflowExecutionSignaledEventAttributes()
default:
data = e
}
return data
}
func isAttributeName(name string) bool {
for _, eventTypeName := range eventpb.EventType_name {
if name == eventTypeName+"EventAttributes" {
return true
}
}
return false
}
func getCurrentUserFromEnv() string {
for _, n := range envKeysForUserName {
if len(os.Getenv(n)) > 0 {
return os.Getenv(n)
}
}
return "unkown"
}
func prettyPrintJSONObject(o interface{}) {
var b []byte
var err error
if pb, ok := o.(proto.Message); ok {
encoder := codec.NewJSONPBIndentEncoder(" ")
b, err = encoder.Encode(pb)
} else {
b, err = json.MarshalIndent(o, "", " ")
}
if err != nil {
fmt.Printf("Error when try to print pretty: %v\n", err)
fmt.Println(o)
}
_, _ = os.Stdout.Write(b)
fmt.Println()
}
func mapKeysToArray(m map[string]string) []string {
var out []string
for k := range m {
out = append(out, k)
}
return out
}
func printError(msg string, err error) {
if err != nil {
fmt.Printf("%s %s\n%s %+v\n", colorRed("Error:"), msg, colorMagenta("Error Details:"), err)
if os.Getenv(showErrorStackEnv) != `` {
fmt.Printf("Stack trace:\n")
debug.PrintStack()
} else {
fmt.Printf("('export %s=1' to see stack traces)\n", showErrorStackEnv)
}
} else {
fmt.Printf("%s %s\n", colorRed("Error:"), msg)
}
}
// ErrorAndExit print easy to understand error msg first then error detail in a new line
func ErrorAndExit(msg string, err error) {
printError(msg, err)
osExit(1)
}
func getWorkflowClient(c *cli.Context) sdkclient.Client {
namespace := getRequiredGlobalOption(c, FlagNamespace)
return cFactory.SDKClient(c, namespace)
}
func getWorkflowClientWithOptionalNamespace(c *cli.Context) sdkclient.Client {
if !c.GlobalIsSet(FlagNamespace) {
_ = c.GlobalSet(FlagNamespace, "system-namespace")
}
return getWorkflowClient(c)
}
func getRequiredOption(c *cli.Context, optionName string) string {
value := c.String(optionName)
if len(value) == 0 {
ErrorAndExit(fmt.Sprintf("Option %s is required", optionName), nil)
}
return value
}
func getRequiredInt64Option(c *cli.Context, optionName string) int64 {
if !c.IsSet(optionName) {
ErrorAndExit(fmt.Sprintf("Option %s is required", optionName), nil)
}
return c.Int64(optionName)
}
func getRequiredIntOption(c *cli.Context, optionName string) int {
if !c.IsSet(optionName) {
ErrorAndExit(fmt.Sprintf("Option %s is required", optionName), nil)
}
return c.Int(optionName)
}
func getRequiredGlobalOption(c *cli.Context, optionName string) string {
value := c.GlobalString(optionName)
if len(value) == 0 {
ErrorAndExit(fmt.Sprintf("Global option %s is required", optionName), nil)
}
return value
}
func convertTime(unixNano int64, onlyTime bool) string {
t := time.Unix(0, unixNano)
var result string
if onlyTime {
result = t.Format(defaultTimeFormat)
} else {
result = t.Format(defaultDateTimeFormat)
}
return result
}
func parseTime(timeStr string, defaultValue int64, now time.Time) int64 {
if len(timeStr) == 0 {
return defaultValue
}
// try to parse
parsedTime, err := time.Parse(defaultDateTimeFormat, timeStr)
if err == nil {
return parsedTime.UnixNano()
}
// treat as raw time
resultValue, err := strconv.ParseInt(timeStr, 10, 64)
if err == nil {
return resultValue
}
// treat as time range format
parsedTime, err = parseTimeRange(timeStr, now)
if err != nil {
ErrorAndExit(fmt.Sprintf("Cannot parse time '%s', use UTC format '2006-01-02T15:04:05Z', "+
"time range or raw UnixNano directly. See help for more details.", timeStr), err)
}
return parsedTime.UnixNano()
}
// parseTimeRange parses a given time duration string (in format X<time-duration>) and
// returns parsed timestamp given that duration in the past from current time.
// All valid values must contain a number followed by a time-duration, from the following list (long form/short form):
// - second/s
// - minute/m
// - hour/h
// - day/d
// - week/w
// - month/M
// - year/y
// For example, possible input values, and their result:
// - "3d" or "3day" --> three days --> time.Now().Add(-3 * 24 * time.Hour)
// - "2m" or "2minute" --> two minutes --> time.Now().Add(-2 * time.Minute)
// - "1w" or "1week" --> one week --> time.Now().Add(-7 * 24 * time.Hour)
// - "30s" or "30second" --> thirty seconds --> time.Now().Add(-30 * time.Second)
// Note: Duration strings are case-sensitive, and should be used as mentioned above only.
// Limitation: Value of numerical multiplier, X should be in b/w 0 - 1e6 (1 million), boundary values excluded i.e.
// 0 < X < 1e6. Also, the maximum time in the past can be 1 January 1970 00:00:00 UTC (epoch time),
// so giving "1000y" will result in epoch time.
func parseTimeRange(timeRange string, now time.Time) (time.Time, error) {
match, err := regexp.MatchString(defaultDateTimeRangeShortRE, timeRange)
if !match { // fallback on to check if it's of longer notation
match, err = regexp.MatchString(defaultDateTimeRangeLongRE, timeRange)
}
if err != nil {
return time.Time{}, err
}
re, _ := regexp.Compile(defaultDateTimeRangeNum)
idx := re.FindStringSubmatchIndex(timeRange)
if idx == nil {
return time.Time{}, fmt.Errorf("cannot parse timeRange %s", timeRange)
}
num, err := strconv.Atoi(timeRange[idx[0]:idx[1]])
if err != nil {
return time.Time{}, fmt.Errorf("cannot parse timeRange %s", timeRange)
}
if num >= 1e6 {
return time.Time{}, fmt.Errorf("invalid time-duation multiplier %d, allowed range is 0 < multiplier < 1000000", num)
}
dur, err := parseTimeDuration(timeRange[idx[1]:])
if err != nil {
return time.Time{}, fmt.Errorf("cannot parse timeRange %s", timeRange)
}
res := now.Add(time.Duration(-num) * dur) // using server's local timezone
epochTime := time.Unix(0, 0)
if res.Before(epochTime) {
res = epochTime
}
return res, nil
}
// parseTimeDuration parses the given time duration in either short or long convention
// and returns the time.Duration
// Valid values (long notation/short notation):
// - second/s
// - minute/m
// - hour/h
// - day/d
// - week/w
// - month/M
// - year/y
// NOTE: the input "duration" is case-sensitive
func parseTimeDuration(duration string) (dur time.Duration, err error) {
switch duration {
case "s", "second":
dur = time.Second
case "m", "minute":
dur = time.Minute
case "h", "hour":
dur = time.Hour
case "d", "day":
dur = day
case "w", "week":
dur = week
case "M", "month":
dur = month
case "y", "year":
dur = year
default:
err = fmt.Errorf("unknown time duration %s", duration)
}
return
}
func strToTaskListType(str string) tasklistpb.TaskListType {
if strings.ToLower(str) == "activity" {
return tasklistpb.TaskListType_Activity
}
return tasklistpb.TaskListType_Decision
}
func getCliIdentity() string {
hostName, err := os.Hostname()
if err != nil {
hostName = "UnKnown"
}
return fmt.Sprintf("tctl@%s", hostName)
}
func newContext(c *cli.Context) (context.Context, context.CancelFunc) {
return newContextWithTimeout(c, defaultContextTimeout)
}
func newContextForLongPoll(c *cli.Context) (context.Context, context.CancelFunc) {
return newContextWithTimeout(c, defaultContextTimeoutForLongPoll)
}
func newContextWithTimeout(c *cli.Context, timeout time.Duration) (context.Context, context.CancelFunc) {
if c.GlobalIsSet(FlagContextTimeout) {
timeout = time.Duration(c.GlobalInt(FlagContextTimeout)) * time.Second
}
return rpc.NewContextWithTimeoutAndCLIHeaders(timeout)
}
// process and validate input provided through cmd or file
func processJSONInput(c *cli.Context) string {
return processJSONInputHelper(c, jsonTypeInput)
}
// process and validate json
func processJSONInputHelper(c *cli.Context, jType jsonType) string {
var flagNameOfRawInput string
var flagNameOfInputFileName string
switch jType {
case jsonTypeInput:
flagNameOfRawInput = FlagInput
flagNameOfInputFileName = FlagInputFile
case jsonTypeMemo:
flagNameOfRawInput = FlagMemo
flagNameOfInputFileName = FlagMemoFile
default:
return ""
}
var input string
if c.IsSet(flagNameOfRawInput) {
input = c.String(flagNameOfRawInput)
} else if c.IsSet(flagNameOfInputFileName) {
inputFile := c.String(flagNameOfInputFileName)
// This method is purely used to parse input from the CLI. The input comes from a trusted user
// #nosec
data, err := ioutil.ReadFile(inputFile)
if err != nil {
ErrorAndExit("Error reading input file", err)
}
input = string(data)
}
if input != "" {
if err := validateJSONs(input); err != nil {
ErrorAndExit("Input is not valid JSON, or JSONs concatenated with spaces/newlines.", err)
}
}
return input
}
// validate whether str is a valid json or multi valid json concatenated with spaces/newlines
func validateJSONs(str string) error {
input := []byte(str)
dec := json.NewDecoder(bytes.NewReader(input))
for {
_, err := dec.Token()
if err == io.EOF {
return nil // End of input, valid JSON
}
if err != nil {
return err // Invalid input
}
}
}
// use parseBool to ensure all BOOL search attributes only be "true" or "false"
func parseBool(str string) (bool, error) {
switch str {
case "true":
return true, nil
case "false":
return false, nil
}
return false, fmt.Errorf("not parseable bool value: %s", str)
}
func trimSpace(strs []string) []string {
result := make([]string, len(strs))
for i, v := range strs {
result[i] = strings.TrimSpace(v)
}
return result
}
func parseArray(v string) (interface{}, error) {
if len(v) > 0 && v[0] == '[' && v[len(v)-1] == ']' {
parsedValues, err := fastjson.Parse(v)
if err != nil {
return nil, err
}
arr, err := parsedValues.Array()
if err != nil {
return nil, err
}
result := make([]interface{}, len(arr))
for i, item := range arr {
s := item.String()
if len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"' { // remove addition quote from json
s = s[1 : len(s)-1]
if sTime, err := time.Parse(defaultDateTimeFormat, s); err == nil {
result[i] = sTime
continue
}
}
result[i] = s
}
return result, nil
}
return nil, errors.New("not array")
}
func convertStringToRealType(v string) interface{} {
var genVal interface{}
var err error
if genVal, err = strconv.ParseInt(v, 10, 64); err == nil {
} else if genVal, err = parseBool(v); err == nil {
} else if genVal, err = strconv.ParseFloat(v, 64); err == nil {
} else if genVal, err = time.Parse(defaultDateTimeFormat, v); err == nil {
} else if genVal, err = parseArray(v); err == nil {
} else {
genVal = v
}
return genVal
}
func truncate(str string) string {
if len(str) > maxOutputStringLength {
return str[:maxOutputStringLength]
}
return str
}
// this only works for ANSI terminal, which means remove existing lines won't work if users redirect to file
// ref: https://en.wikipedia.org/wiki/ANSI_escape_code
func removePrevious2LinesFromTerminal() {
fmt.Printf("\033[1A")
fmt.Printf("\033[2K")
fmt.Printf("\033[1A")
fmt.Printf("\033[2K")
}
func showNextPage() bool {
fmt.Printf("Press %s to show next page, press %s to quit: ",
color.GreenString("Enter"), color.RedString("any other key then Enter"))
var input string
_, _ = fmt.Scanln(&input)
return strings.Trim(input, " ") == ""
}
// prompt will show input msg, then waiting user input y/yes to continue
func prompt(msg string, autoConfirm bool) {
reader := bufio.NewReader(os.Stdin)
fmt.Print(msg, " ")
var text string
if autoConfirm {
text = "y"
fmt.Print("y")
} else {
text, _ = reader.ReadString('\n')
}
fmt.Println()
textLower := strings.ToLower(strings.TrimRight(text, "\n"))
if textLower != "y" && textLower != "yes" {
os.Exit(0)
}
}
| 1 | 9,525 | Is this getting used in other places? Can we switch all the places to use the new API you added? | temporalio-temporal | go |
@@ -483,17 +483,6 @@ int HTTP_OP::libcurl_exec(
curl_easy_setopt(curlEasy, CURLOPT_SSL_VERIFYPEER, 1L);
//curl_easy_setopt(curlEasy, CURLOPT_SSL_VERIFYPEER, FALSE);
- // if the above is nonzero, you need the following:
- //
-#ifndef _WIN32
- if (boinc_file_exists(CA_BUNDLE_FILENAME)) {
- // call this only if a local copy of ca-bundle.crt exists;
- // otherwise, let's hope that it exists in the default place
- //
- curl_easy_setopt(curlEasy, CURLOPT_CAINFO, CA_BUNDLE_FILENAME);
- }
-#endif
-
// set the user agent as this boinc client & version
//
curl_easy_setopt(curlEasy, CURLOPT_USERAGENT, g_user_agent_string); | 1 | // This file is part of BOINC.
// http://boinc.berkeley.edu
// Copyright (C) 2008 University of California
//
// BOINC is free software; you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License
// as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any later version.
//
// BOINC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with BOINC. If not, see <http://www.gnu.org/licenses/>.
#include "cpp.h"
#ifdef _WIN32
#include "boinc_win.h"
#else
#include "config.h"
#include <cstring>
#include <sstream>
#include <algorithm>
#include <sys/stat.h>
#include <cerrno>
#include <unistd.h>
#include <fcntl.h>
#if HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#endif
#include "base64.h"
#include "error_numbers.h"
#include "filesys.h"
#include "str_util.h"
#include "str_replace.h"
#include "url.h"
#include "util.h"
#include "client_msgs.h"
#include "client_state.h"
#include "cs_proxy.h"
#include "file_names.h"
#include "log_flags.h"
#include "network.h"
#include "net_stats.h"
#include "project.h"
#include "http_curl.h"
using std::min;
using std::vector;
static CURLM* g_curlMulti = NULL;
static char g_user_agent_string[256] = {""};
static unsigned int g_trace_count = 0;
static bool got_expectation_failed = false;
// Whether we've got a 417 HTTP error.
// If we did, it's probably because we talked HTTP 1.1 to a 1.0 proxy;
// use 1.0 from now on.
static void get_user_agent_string() {
if (g_user_agent_string[0]) return;
snprintf(g_user_agent_string, sizeof(g_user_agent_string),
"BOINC client (%s %d.%d.%d)",
HOSTTYPE,
BOINC_MAJOR_VERSION, BOINC_MINOR_VERSION, BOINC_RELEASE
);
if (strlen(gstate.client_brand)) {
char buf[1024];
snprintf(buf, sizeof(buf), " (%s)", gstate.client_brand);
safe_strcat(g_user_agent_string, buf);
}
}
size_t libcurl_write(void *ptr, size_t size, size_t nmemb, HTTP_OP* phop) {
// take the stream param as a FILE* and write to disk
// TODO: maybe assert stRead == size*nmemb,
// add exception handling on phop members
//
size_t stWrite = fwrite(ptr, size, nmemb, phop->fileOut);
if (log_flags.http_xfer_debug) {
msg_printf(NULL, MSG_INFO,
"[http_xfer] [ID#%d] HTTP: wrote %d bytes", phop->trace_id, (int)stWrite
);
}
phop->bytes_xferred += (double)(stWrite);
phop->update_speed(); // this should update the transfer speed
daily_xfer_history.add(stWrite, false);
return stWrite;
}
size_t libcurl_read(void *ptr, size_t size, size_t nmemb, HTTP_OP* phop) {
// OK here's the deal -- phop points to the calling object,
// which has already pre-opened the file. we'll want to
// use pByte as a pointer for fseek calls into the file, and
// write out size*nmemb # of bytes to ptr
// take the stream param as a FILE* and write to disk
// if (pByte) delete [] pByte;
// pByte = new unsigned char[content_length];
// memset(pByte, 0x00, content_length); // may as will initialize it!
// note that fileIn was opened earlier,
// go to lSeek from the top and read from there
//
size_t stSend = size * nmemb;
int stRead = 0;
if (phop->req1 && !phop->bSentHeader) {
// need to send headers first, then data file
// so requests from 0 to strlen(req1)-1 are from memory,
// and from strlen(req1) to content_length are from the file
if (phop->lSeek < (long) strlen(phop->req1)) {
// need to read header, either just starting to read
// (i.e. this is the first time in this function for this phop)
// or the last read didn't ask for the entire header
stRead = (int)strlen(phop->req1) - phop->lSeek;
// how much of header left to read
// only memcpy if request isn't out of bounds
if (stRead < 0) {
stRead = 0;
} else {
memcpy(ptr, (void*)(phop->req1 + phop->lSeek), stRead);
}
phop->lSeek += (long) stRead; // increment lSeek to new position
// Don't count header in bytes transferred.
// Otherwise the GUI will show e.g. "400 out of 300 bytes xferred"
//phop->bytes_xferred += (double)(stRead);
daily_xfer_history.add(stRead, true);
// see if we're done with headers
if (phop->lSeek >= (long) strlen(phop->req1)) {
phop->bSentHeader = true;
phop->lSeek = 0;
}
return stRead;
} else {
// shouldn't happen
phop->bSentHeader = true;
phop->lSeek = 0;
}
}
if (phop->fileIn) {
long lFileSeek = phop->lSeek + (long) phop->file_offset;
fseek(phop->fileIn, lFileSeek, SEEK_SET);
if (!feof(phop->fileIn)) {
stRead = (int)fread(ptr, 1, stSend, phop->fileIn);
}
phop->lSeek += (long) stRead;
phop->bytes_xferred += (double)(stRead);
daily_xfer_history.add(stRead, true);
}
phop->update_speed();
return stRead;
}
curlioerr libcurl_ioctl(CURL*, curliocmd cmd, HTTP_OP* phop) {
// reset input stream to beginning - resends header
// and restarts data back to starting point
switch(cmd) {
case CURLIOCMD_RESTARTREAD:
phop->lSeek = 0;
phop->bytes_xferred = phop->file_offset;
phop->bSentHeader = false;
break;
default: // should never get here
return CURLIOE_UNKNOWNCMD;
}
return CURLIOE_OK;
}
void libcurl_logdebug(
HTTP_OP* phop, const char* desc, char *data
) {
if (!log_flags.http_debug) return;
char hdr[256];
char buf[2048], *p = buf;
sprintf(hdr, "[ID#%u] %s", phop->trace_id, desc);
strlcpy(buf, data, sizeof(buf));
p = strtok(buf, "\n");
while(p) {
msg_printf(phop->project, MSG_INFO,
"[http] %s %s\n", hdr, p
);
p = strtok(NULL, "\n");
}
}
int libcurl_debugfunction(
CURL*, curl_infotype type, char *data, size_t /*size*/, HTTP_OP* phop
) {
const char* desc = NULL;
switch (type) {
case CURLINFO_TEXT:
desc = "Info: ";
break;
case CURLINFO_HEADER_OUT:
desc = "Sent header to server:";
break;
case CURLINFO_HEADER_IN:
desc = "Received header from server:";
break;
default: /* in case a new one is introduced to shock us */
return 0;
}
libcurl_logdebug(phop, desc, data);
return 0;
}
void HTTP_OP::init(PROJECT* p) {
reset();
start_time = gstate.now;
start_bytes_xferred = 0;
project = p;
}
void HTTP_OP::reset() {
req1 = NULL;
req1_len = 0;
safe_strcpy(infile, "");
safe_strcpy(outfile, "");
safe_strcpy(error_msg, "");
CurlResult = CURLE_OK;
bTempOutfile = true;
want_download = false;
want_upload = false;
fileIn = NULL;
fileOut = NULL;
connect_error = 0;
response = 0;
start_time = 0;
bytes_xferred = 0;
start_bytes_xferred = 0;
bSentHeader = false;
project = 0;
close_socket();
}
HTTP_OP::HTTP_OP() {
safe_strcpy(m_url, "");
safe_strcpy(m_curl_user_credentials, "");
content_length = 0;
file_offset = 0;
safe_strcpy(request_header, "");
http_op_state = HTTP_STATE_IDLE;
http_op_type = HTTP_OP_NONE;
http_op_retval = 0;
trace_id = g_trace_count++;
pcurlList = NULL; // these have to be NULL, just in constructor
curlEasy = NULL;
pcurlFormStart = NULL;
pcurlFormEnd = NULL;
pByte = NULL;
lSeek = 0;
xfer_speed = 0;
is_background = false;
reset();
}
HTTP_OP::~HTTP_OP() {
close_socket();
close_file();
}
// Initialize HTTP GET operation;
// output goes to the given file, starting at given offset
//
int HTTP_OP::init_get(
PROJECT* p, const char* url, const char* out, bool del_old_file,
double off, double size
) {
if (del_old_file) {
unlink(out);
}
req1 = NULL; // not using req1, but init_post2 uses it
file_offset = off;
HTTP_OP::init(p);
// usually have an outfile on a get
if (off != 0) {
bytes_xferred = off;
start_bytes_xferred = off;
}
http_op_type = HTTP_OP_GET;
http_op_state = HTTP_STATE_CONNECTING;
if (log_flags.http_debug) {
msg_printf(project, MSG_INFO, "[http] HTTP_OP::init_get(): %s", url);
}
return HTTP_OP::libcurl_exec(url, NULL, out, off, size, false);
}
// Initialize HTTP POST operation where
// the input is a file, and the output is a file,
// and both are read/written from the beginning (no resumption of partial ops)
// This is used for scheduler requests and account mgr RPCs.
//
int HTTP_OP::init_post(
PROJECT* p, const char* url, const char* in, const char* out
) {
int retval;
double size;
req1 = NULL; // not using req1, but init_post2 uses it
if (in) {
safe_strcpy(infile, in);
retval = file_size(infile, size);
if (retval) return retval; // this will return 0 or ERR_NOT_FOUND
content_length = (int)size;
}
HTTP_OP::init(p);
http_op_type = HTTP_OP_POST;
http_op_state = HTTP_STATE_CONNECTING;
if (log_flags.http_debug) {
msg_printf(project, MSG_INFO, "[http] HTTP_OP::init_post(): %s", url);
}
return HTTP_OP::libcurl_exec(url, in, out, 0, 0, true);
}
// Initialize an HTTP POST operation,
// where the input is a memory string (r1) followed by an optional file (in)
// with optional offset,
// and the output goes to memory (also r1, limited by r1_len)
// This is used for file upload (both get_file_size and file_upload)
// and for trickle-ups.
//
int HTTP_OP::init_post2(
PROJECT* p, const char* url, char* r1, int r1_len, const char* in, double offset
) {
int retval;
double size;
init(p);
req1 = r1;
req1_len = r1_len;
content_length = 0;
if (in) {
safe_strcpy(infile, in);
file_offset = offset;
retval = file_size(infile, size);
if (retval) {
if (log_flags.http_debug) {
msg_printf(project, MSG_INFO, "[http] HTTP::init_post2: couldn't get file size");
}
return retval; // this will be 0 or ERR_NOT_FOUND
}
content_length = (int)size - (int)offset;
}
content_length += (int)strlen(req1);
http_op_type = HTTP_OP_POST2;
http_op_state = HTTP_STATE_CONNECTING;
return HTTP_OP::libcurl_exec(url, in, NULL, offset, 0, true);
}
// is URL in proxy exception list?
//
bool HTTP_OP::no_proxy_for_url(const char* url) {
PARSED_URL purl, purl2;
char noproxy[256];
if (log_flags.proxy_debug) {
msg_printf(0, MSG_INFO, "[proxy] HTTP_OP::no_proxy_for_url(): %s", url);
}
parse_url(url, purl);
// tokenize the noproxy-entry and check for identical hosts
//
safe_strcpy(noproxy, working_proxy_info.noproxy_hosts);
char* token = strtok(noproxy, ",");
while (token != NULL) {
// extract the host from the no_proxy url
parse_url(token, purl2);
if (!strcmp(purl.host, purl2.host)) {
if (log_flags.proxy_debug) {
msg_printf(0, MSG_INFO, "[proxy] disabling proxy for %s", url);
}
return true;
}
token = strtok(NULL, ",");
}
if (log_flags.proxy_debug) {
msg_printf(0, MSG_INFO, "[proxy] returning false");
}
return false;
}
#ifndef _WIN32
static int set_cloexec(void*, curl_socket_t fd, curlsocktype purpose) {
if (purpose != CURLSOCKTYPE_IPCXN) return 0;
fcntl(fd, F_SETFD, FD_CLOEXEC);
return 0;
}
#endif
// the following will do an HTTP GET or POST using libcurl
//
int HTTP_OP::libcurl_exec(
const char* url, const char* in, const char* out, double offset,
#ifdef _WIN32
double size,
#else
double,
#endif
bool is_post
) {
CURLMcode curlMErr;
char buf[256];
static int outfile_seqno=0;
if (g_user_agent_string[0] == 0x00) {
get_user_agent_string();
}
if (in) {
safe_strcpy(infile, in);
}
if (out) {
bTempOutfile = false;
safe_strcpy(outfile, out);
} else {
// always want an outfile for the server response, delete when op done
bTempOutfile = true;
snprintf(outfile, sizeof(outfile), "http_temp_%d", outfile_seqno++);
}
curlEasy = curl_easy_init(); // get a curl_easy handle to use
if (!curlEasy) {
if (log_flags.http_debug) {
msg_printf(project, MSG_INFO, "Couldn't create curlEasy handle");
}
return ERR_HTTP_TRANSIENT; // returns 0 (CURLM_OK) on successful handle creation
}
// the following seems to be a no-op
// curl_easy_setopt(curlEasy, CURLOPT_ERRORBUFFER, error_msg);
string_substitute(url, m_url, sizeof(m_url), " ", "%20");
curl_easy_setopt(curlEasy, CURLOPT_URL, m_url);
// This option determines whether curl verifies that the server
// claims to be who you want it to be.
// When negotiating an SSL connection,
// the server sends a certificate indicating its identity.
// When CURLOPT_SSL_VERIFYHOST is 2,
// that certificate must indicate that the server is the server
// to which you meant to connect, or the connection fails.
// Curl considers the server the intended one when the
// Common Name field or a Subject Alternate Name field in the certificate
// matches the host name in the URL to which you told Curl to connect.
// When the value is 1, the certificate must contain a Common Name field,
// but it doesn't matter what name it says.
// (This is not ordinarily a useful setting).
// When the value is 0, the connection succeeds
// regardless of the names in the certificate.
// The default, since 7.10, is 2.
// The checking this option controls is of the identity that
// the server claims. The server could be lying.
// To control lying, see CURLOPT_SSL_VERIFYPEER.
//
curl_easy_setopt(curlEasy, CURLOPT_SSL_VERIFYHOST, 2L);
//curl_easy_setopt(curlEasy, CURLOPT_SSL_VERIFYHOST, 0);
// the following sets "tough" certificate checking
// (i.e. whether self-signed is OK)
// if zero below, will accept self-signed certificates
// (cert not 3rd party trusted)
// if non-zero below, you need a valid 3rd party CA (i.e. Verisign, Thawte)
//
curl_easy_setopt(curlEasy, CURLOPT_SSL_VERIFYPEER, 1L);
//curl_easy_setopt(curlEasy, CURLOPT_SSL_VERIFYPEER, FALSE);
// if the above is nonzero, you need the following:
//
#ifndef _WIN32
if (boinc_file_exists(CA_BUNDLE_FILENAME)) {
// call this only if a local copy of ca-bundle.crt exists;
// otherwise, let's hope that it exists in the default place
//
curl_easy_setopt(curlEasy, CURLOPT_CAINFO, CA_BUNDLE_FILENAME);
}
#endif
// set the user agent as this boinc client & version
//
curl_easy_setopt(curlEasy, CURLOPT_USERAGENT, g_user_agent_string);
// bypass any signal handlers that curl may want to install
//
curl_easy_setopt(curlEasy, CURLOPT_NOSIGNAL, 1L);
// bypass progress meter
//
curl_easy_setopt(curlEasy, CURLOPT_NOPROGRESS, 1L);
#ifndef _WIN32
// arrange for a function to get called between socket() and connect()
// so that we can mark the socket as close-on-exec
//
curl_easy_setopt(curlEasy, CURLOPT_SOCKOPTFUNCTION, set_cloexec);
#endif
// setup timeouts
//
curl_easy_setopt(curlEasy, CURLOPT_TIMEOUT, 0L);
curl_easy_setopt(curlEasy, CURLOPT_LOW_SPEED_LIMIT, cc_config.http_transfer_timeout_bps);
curl_easy_setopt(curlEasy, CURLOPT_LOW_SPEED_TIME, cc_config.http_transfer_timeout);
curl_easy_setopt(curlEasy, CURLOPT_CONNECTTIMEOUT, 120L);
// force curl to use HTTP/1.0 if config specifies it
// (curl uses 1.1 by default)
//
if (cc_config.http_1_0 || (cc_config.force_auth == "ntlm") || got_expectation_failed) {
curl_easy_setopt(curlEasy, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0);
}
curl_easy_setopt(curlEasy, CURLOPT_MAXREDIRS, 50L);
curl_easy_setopt(curlEasy, CURLOPT_AUTOREFERER, 1L);
curl_easy_setopt(curlEasy, CURLOPT_FOLLOWLOCATION, 1L);
curl_easy_setopt(curlEasy, CURLOPT_POST301, 1L);
// if we tell Curl to accept any encoding (e.g. deflate)
// it seems to accept them all, which screws up projects that
// use gzip at the application level.
// So, detect this and don't accept any encoding in that case
//
// Per: http://curl.haxx.se/dev/readme-encoding.html
// NULL disables, empty string accepts all.
if (out) {
if (ends_with(out, ".gzt") || ends_with(out, ".gz") || ends_with(out, ".tgz")) {
curl_easy_setopt(curlEasy, CURLOPT_ENCODING, NULL);
} else {
curl_easy_setopt(curlEasy, CURLOPT_ENCODING, "");
}
} else {
curl_easy_setopt(curlEasy, CURLOPT_ENCODING, "");
}
// setup any proxy they may need
//
setup_proxy_session(no_proxy_for_url(url));
if (strlen(gstate.language)) {
snprintf(buf, sizeof(buf), "Accept-Language: %s", gstate.language);
pcurlList = curl_slist_append(pcurlList, buf);
}
// set the file offset for resumable downloads
//
if (!is_post && offset>0.0f) {
file_offset = offset;
snprintf(buf, sizeof(buf), "Range: bytes=%.0f-", offset);
pcurlList = curl_slist_append(pcurlList, buf);
}
// set up an output file for the reply
//
if (strlen(outfile)) {
if (file_offset > 0) {
fileOut = boinc_fopen(outfile, "ab+");
} else {
#ifdef _WIN32
// on Win, pre-allocate big files to avoid fragmentation
//
if (size > 1e6) {
boinc_allocate_file(outfile, size);
}
#endif
fileOut = boinc_fopen(outfile, "wb+");
}
if (!fileOut) {
msg_printf(NULL, MSG_INTERNAL_ERROR,
"Can't create HTTP response output file %s", outfile
);
http_op_retval = ERR_FOPEN;
http_op_state = HTTP_STATE_DONE;
return ERR_FOPEN;
}
// we can make the libcurl_write "fancier" in the future,
// for now it just fwrite's to the file request, which is sufficient
//
curl_easy_setopt(curlEasy, CURLOPT_WRITEFUNCTION, libcurl_write);
// note that in my lib_write I'm sending in a pointer
// to this instance of HTTP_OP
//
curl_easy_setopt(curlEasy, CURLOPT_WRITEDATA, this);
}
if (is_post) {
want_upload = true;
want_download = false;
if (infile && strlen(infile)>0) {
fileIn = boinc_fopen(infile, "rb");
if (!fileIn) {
msg_printf(NULL, MSG_INTERNAL_ERROR, "No HTTP input file %s", infile);
http_op_retval = ERR_FOPEN;
http_op_state = HTTP_STATE_DONE;
return ERR_FOPEN;
}
}
if (pcurlList) { // send custom headers if required
curl_easy_setopt(curlEasy, CURLOPT_HTTPHEADER, pcurlList);
}
// set the data file info to read for the PUT/POST
// note the use of this curl typedef for large filesizes
#if 0
// HTTP PUT method
curl_off_t fs = (curl_off_t) content_length;
curl_easy_setopt(curlEasy, CURLOPT_POSTFIELDS, NULL);
curl_easy_setopt(curlEasy, CURLOPT_INFILESIZE, content_length);
curl_easy_setopt(curlEasy, CURLOPT_READDATA, fileIn);
curl_easy_setopt(curlEasy, CURLOPT_INFILESIZE_LARGE, fs);
curl_easy_setopt(curlEasy, CURLOPT_PUT, 1L);
#endif
// HTTP POST method
// set the multipart form for the file --
// boinc just has the one section (file)
#if 0
// if we ever want to do POST as multipart forms someday
// (many seem to prefer it that way, i.e. libcurl)
//
pcurlFormStart = pcurlFormEnd = NULL;
curl_formadd(&pcurlFormStart, &pcurlFormEnd,
CURLFORM_FILECONTENT, infile,
CURLFORM_CONTENTSLENGTH, content_length,
CURLFORM_END
);
curl_formadd(&post, &last,
CURLFORM_COPYNAME, "logotype-image",
CURLFORM_FILECONTENT, "curl.png", CURLFORM_END
);
curl_easy_setopt(curlEasy, CURLOPT_HTTPPOST, pcurlFormStart);
#endif
curl_off_t fs = (curl_off_t) content_length;
pByte = NULL;
lSeek = 0; // initialize the vars we're going to use for byte transfers
// we can make the libcurl_read "fancier" in the future,
// for now it just fwrite's to the file request, which is sufficient
//
curl_easy_setopt(curlEasy, CURLOPT_POSTFIELDS, NULL);
curl_easy_setopt(curlEasy, CURLOPT_POSTFIELDSIZE_LARGE, fs);
curl_easy_setopt(curlEasy, CURLOPT_READFUNCTION, libcurl_read);
// in my lib_write I'm sending in a pointer to this instance of HTTP_OP
//
curl_easy_setopt(curlEasy, CURLOPT_READDATA, this);
// callback function to rewind input file
//
curl_easy_setopt(curlEasy, CURLOPT_IOCTLFUNCTION, libcurl_ioctl);
curl_easy_setopt(curlEasy, CURLOPT_IOCTLDATA, this);
curl_easy_setopt(curlEasy, CURLOPT_POST, 1L);
} else { // GET
want_upload = false;
want_download = true;
// now write the header, pcurlList gets freed in net_xfer_curl
//
if (pcurlList) { // send custom headers if required
curl_easy_setopt(curlEasy, CURLOPT_HTTPHEADER, pcurlList);
}
// setup the GET!
//
curl_easy_setopt(curlEasy, CURLOPT_HTTPGET, 1L);
}
#ifdef __APPLE__
// cURL 7.19.7 with c-ares 1.7.0 did not fall back to IPv4 when IPv6
// DNS lookup failed on Macs with certain default settings if connected
// to the Internet by an AT&T U-Verse 2-Wire Gateway. This work-around
// may not be needed any more for cURL 7.21.7, but keep it to be safe.
curl_easy_setopt(curlEasy, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V4);
#endif
// turn on debug info if tracing enabled
//
if (log_flags.http_debug) {
curl_easy_setopt(curlEasy, CURLOPT_DEBUGFUNCTION, libcurl_debugfunction);
curl_easy_setopt(curlEasy, CURLOPT_DEBUGDATA, this );
curl_easy_setopt(curlEasy, CURLOPT_VERBOSE, 1L);
}
// last but not least, add this to the curl_multi
curlMErr = curl_multi_add_handle(g_curlMulti, curlEasy);
if (curlMErr != CURLM_OK && curlMErr != CURLM_CALL_MULTI_PERFORM) {
// bad error, couldn't attach easy curl handle
msg_printf(0, MSG_INTERNAL_ERROR,
"Couldn't add curlEasy handle to curlMulti"
);
return ERR_HTTP_TRANSIENT;
// returns 0 (CURLM_OK) on successful handle creation
}
return 0;
}
// Returns true if the HTTP operation is complete
//
bool HTTP_OP::http_op_done() {
return (http_op_state == HTTP_STATE_DONE);
}
HTTP_OP_SET::HTTP_OP_SET() {
bytes_up = 0;
bytes_down = 0;
}
// Adds an HTTP_OP to the set
//
void HTTP_OP_SET::insert(HTTP_OP* ho) {
http_ops.push_back(ho);
}
// Remove an HTTP_OP from the set
//
int HTTP_OP_SET::remove(HTTP_OP* p) {
vector<HTTP_OP*>::iterator iter;
iter = http_ops.begin();
while (iter != http_ops.end()) {
if (*iter == p) {
iter = http_ops.erase(iter);
return 0;
}
++iter;
}
return ERR_NOT_FOUND;
}
int HTTP_OP_SET::nops() {
return (int)http_ops.size();
}
// Curl self-explanatory setopt params for proxies:
// CURLOPT_HTTPPROXYTUNNEL
// CURLOPT_PROXYTYPE (pass in CURLPROXY_HTTP or CURLPROXY_SOCKS5)
// CURLOPT_PROXYPORT -- a long port #
// CURLOPT_PROXY - pass in char* of the proxy url
// CURLOPT_PROXYUSERPWD -- a char* in the format username:password
// CURLOPT_HTTPAUTH -- pass in one of CURLAUTH_BASIC, CURLAUTH_DIGEST,
// CURLAUTH_GSSNEGOTIATE, CURLAUTH_NTLM, CURLAUTH_ANY, CURLAUTH_ANYSAFE
// CURLOPT_PROXYAUTH -- "or" | the above bitmasks -- only basic, digest, ntlm work
void HTTP_OP::setup_proxy_session(bool no_proxy) {
// CMC Note: the string m_curl_user_credentials must remain in memory
// outside of this method (libcurl relies on it later when it makes
// the proxy connection), so it has been placed as a member data for HTTP_OP
//
safe_strcpy(m_curl_user_credentials, "");
if (no_proxy) {
curl_easy_setopt(curlEasy, CURLOPT_PROXY, "");
return;
}
pi = working_proxy_info;
if (pi.use_http_proxy) {
if (log_flags.proxy_debug) {
msg_printf(
0, MSG_INFO, "[proxy]: setting up proxy %s:%d",
pi.http_server_name, pi.http_server_port
);
}
// setup a basic http proxy
curl_easy_setopt(curlEasy, CURLOPT_PROXYTYPE, CURLPROXY_HTTP);
curl_easy_setopt(curlEasy, CURLOPT_PROXYPORT, (long) pi.http_server_port);
curl_easy_setopt(curlEasy, CURLOPT_PROXY, (char*) pi.http_server_name);
if (pi.use_http_auth) {
if (cc_config.force_auth == "basic") {
curl_easy_setopt(curlEasy, CURLOPT_PROXYAUTH, CURLAUTH_BASIC);
} else if (cc_config.force_auth == "digest") {
curl_easy_setopt(curlEasy, CURLOPT_PROXYAUTH, CURLAUTH_DIGEST);
} else if (cc_config.force_auth == "gss-negotiate") {
curl_easy_setopt(curlEasy, CURLOPT_PROXYAUTH, CURLAUTH_GSSNEGOTIATE);
} else if (cc_config.force_auth == "ntlm") {
curl_easy_setopt(curlEasy, CURLOPT_PROXYAUTH, CURLAUTH_NTLM);
} else {
curl_easy_setopt(curlEasy, CURLOPT_PROXYAUTH, CURLAUTH_ANY);
}
snprintf(m_curl_user_credentials, sizeof(m_curl_user_credentials),
"%s:%s",
pi.http_user_name, pi.http_user_passwd
);
curl_easy_setopt(curlEasy, CURLOPT_PROXYUSERPWD, m_curl_user_credentials);
}
} else if (pi.use_socks_proxy) {
// CURL only supports SOCKS version 5
curl_easy_setopt(curlEasy, CURLOPT_PROXYTYPE,
pi.socks5_remote_dns?CURLPROXY_SOCKS5_HOSTNAME:CURLPROXY_SOCKS5
);
curl_easy_setopt(curlEasy, CURLOPT_PROXYPORT, (long) pi.socks_server_port);
curl_easy_setopt(curlEasy, CURLOPT_PROXY, (char*) pi.socks_server_name);
// libcurl uses blocking sockets with socks proxy, so limit timeout.
// - imlemented with local patch to libcurl
curl_easy_setopt(curlEasy, CURLOPT_CONNECTTIMEOUT, 20L);
if (
strlen(pi.socks5_user_passwd) || strlen(pi.socks5_user_name)
) {
snprintf(m_curl_user_credentials, sizeof(m_curl_user_credentials),
"%s:%s",
pi.socks5_user_name, pi.socks5_user_passwd
);
curl_easy_setopt(curlEasy, CURLOPT_PROXYUSERPWD, m_curl_user_credentials);
curl_easy_setopt(curlEasy, CURLOPT_PROXYAUTH, CURLAUTH_ANY & ~CURLAUTH_NTLM);
}
} else if (pi.have_autodetect_proxy_settings && strlen(pi.autodetect_server_name)) {
if (log_flags.proxy_debug) {
msg_printf(0, MSG_INFO,
"[proxy] HTTP_OP::setup_proxy_session(): setting up automatic proxy %s:%d",
pi.autodetect_server_name, pi.autodetect_port
);
}
switch(pi.autodetect_protocol) {
case URL_PROTOCOL_SOCKS:
curl_easy_setopt(curlEasy, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5);
break;
case URL_PROTOCOL_HTTP:
case URL_PROTOCOL_HTTPS:
default:
curl_easy_setopt(curlEasy, CURLOPT_PROXYTYPE, CURLPROXY_HTTP);
break;
}
curl_easy_setopt(curlEasy, CURLOPT_PROXYPORT, (long) pi.autodetect_port);
curl_easy_setopt(curlEasy, CURLOPT_PROXY, (char*) pi.autodetect_server_name);
}
}
// the file descriptor sets need to be global so libcurl has access always
//
fd_set read_fds, write_fds, error_fds;
// call these once at the start of the program and once at the end
//
int curl_init() {
curl_global_init(CURL_GLOBAL_ALL);
g_curlMulti = curl_multi_init();
return (int)(g_curlMulti == NULL);
}
int curl_cleanup() {
if (g_curlMulti) {
curl_multi_cleanup(g_curlMulti);
}
curl_global_cleanup();
return 0;
}
void HTTP_OP::close_socket() {
// this cleans up the curlEasy, and "spoofs" the old close_socket
//
if (pcurlList) {
curl_slist_free_all(pcurlList);
pcurlList = NULL;
}
if (curlEasy && pcurlFormStart) {
curl_formfree(pcurlFormStart);
curl_formfree(pcurlFormEnd);
pcurlFormStart = pcurlFormEnd = NULL;
}
if (curlEasy && g_curlMulti) { // release this handle
curl_multi_remove_handle(g_curlMulti, curlEasy);
curl_easy_cleanup(curlEasy);
curlEasy = NULL;
}
}
void HTTP_OP::close_file() {
if (fileIn) {
fclose(fileIn);
fileIn = NULL;
}
if (fileOut) {
fclose(fileOut);
fileOut = NULL;
}
if (pByte) { //free any read memory used
delete [] pByte;
pByte = NULL;
}
}
void HTTP_OP_SET::get_fdset(FDSET_GROUP& fg) {
curl_multi_fdset(
g_curlMulti, &fg.read_fds, &fg.write_fds, &fg.exc_fds, &fg.max_fd
);
}
// we have a message for this HTTP_OP.
// get the response code for this request
//
void HTTP_OP::handle_messages(CURLMsg *pcurlMsg) {
int retval;
curl_easy_getinfo(curlEasy,
CURLINFO_RESPONSE_CODE, &response
);
curl_easy_getinfo(curlEasy,
CURLINFO_OS_ERRNO, &connect_error
);
// update byte counts and transfer speed
//
if (want_download) {
// SIZE_DOWNLOAD is the byte count "on the wire"
// (possible with compression)
// TOTAL_TIME is the elapsed time of the download
// STARTTRANSFER_TIME is portion of elapsed time involved
// with setup (connection establishment etc.)
// SPEED_DOWNLOAD is bytes/sec based on uncompressed size
// (we don't use it)
//
double size_download, total_time, starttransfer_time;
curl_easy_getinfo(curlEasy, CURLINFO_SIZE_DOWNLOAD, &size_download);
curl_easy_getinfo(curlEasy, CURLINFO_TOTAL_TIME, &total_time);
curl_easy_getinfo(curlEasy,
CURLINFO_STARTTRANSFER_TIME, &starttransfer_time
);
double dt = total_time - starttransfer_time;
if (dt > 0) {
gstate.net_stats.down.update(size_download, dt);
}
}
if (want_upload) {
double size_upload, total_time, starttransfer_time;
curl_easy_getinfo(curlEasy, CURLINFO_SIZE_UPLOAD, &size_upload);
curl_easy_getinfo(curlEasy, CURLINFO_TOTAL_TIME, &total_time);
curl_easy_getinfo(curlEasy,
CURLINFO_STARTTRANSFER_TIME, &starttransfer_time
);
double dt = total_time - starttransfer_time;
if (dt > 0) {
gstate.net_stats.up.update(size_upload, dt);
}
}
// the op is done if curl_multi_msg_read gave us a msg for this http_op
//
http_op_state = HTTP_STATE_DONE;
CurlResult = pcurlMsg->data.result;
if (CurlResult == CURLE_OK) {
switch ((response/100)*100) {
case HTTP_STATUS_OK: // 200
http_op_retval = 0;
break;
case HTTP_STATUS_CONTINUE: // 100
return;
case HTTP_STATUS_INTERNAL_SERVER_ERROR: // 500
http_op_retval = ERR_HTTP_TRANSIENT;
safe_strcpy(error_msg, boincerror(response));
break;
default: // 400
if (response == HTTP_STATUS_EXPECTATION_FAILED) {
got_expectation_failed = true;
}
http_op_retval = ERR_HTTP_PERMANENT;
safe_strcpy(error_msg, boincerror(response));
break;
}
net_status.http_op_succeeded();
} else {
safe_strcpy(error_msg, curl_easy_strerror(CurlResult));
switch(CurlResult) {
case CURLE_COULDNT_RESOLVE_HOST:
reset_dns();
http_op_retval = ERR_GETHOSTBYNAME;
break;
case CURLE_COULDNT_CONNECT:
http_op_retval = ERR_CONNECT;
break;
default:
http_op_retval = ERR_HTTP_TRANSIENT;
}
// trigger a check for whether we're connected,
// but not if this is a background operation
//
if (!is_background) {
net_status.got_http_error();
}
if (log_flags.http_debug) {
msg_printf(project, MSG_INFO,
"[http] HTTP error: %s", error_msg
);
}
}
if (!http_op_retval && http_op_type == HTTP_OP_POST2) {
// for a successfully completed request on a "post2" --
// read in the temp file into req1 memory
//
size_t dSize = ftell(fileOut);
retval = fseek(fileOut, 0, SEEK_SET);
if (retval) {
// flag as a bad response for a possible retry later
response = 1;
msg_printf(NULL, MSG_INTERNAL_ERROR,
"[http] can't rewind post output file %s",
outfile
);
} else {
strlcpy(req1, "", req1_len);
if (dSize >= (size_t)req1_len) {
dSize = req1_len-1;
}
size_t nread = fread(req1, 1, dSize, fileOut);
if (nread != dSize) {
if (log_flags.http_debug) {
msg_printf(project, MSG_INFO,
"[http] post output file read failed %d",
(int)nread
);
}
}
req1[nread] = 0;
}
}
// close files and "sockets" (i.e. libcurl handles)
//
close_file();
close_socket();
// finally remove the tmpfile if not explicitly set
//
if (bTempOutfile) {
boinc_delete_file(outfile);
}
}
void HTTP_OP_SET::got_select(FDSET_GROUP&, double timeout) {
int iNumMsg;
HTTP_OP* hop = NULL;
CURLMsg *pcurlMsg = NULL;
int iRunning = 0; // curl flags for max # of fds & # running queries
CURLMcode curlMErr;
// get the data waiting for transfer in or out
// use timeout value so that we don't hog CPU in this loop
//
while (1) {
curlMErr = curl_multi_perform(g_curlMulti, &iRunning);
if (curlMErr != CURLM_CALL_MULTI_PERFORM) break;
if (dtime() - gstate.now > timeout) break;
}
// read messages from curl that may have come in from the above loop
//
while (1) {
pcurlMsg = curl_multi_info_read(g_curlMulti, &iNumMsg);
if (!pcurlMsg) break;
// if we have a msg, then somebody finished
// can check also with pcurlMsg->msg == CURLMSG_DONE
//
hop = lookup_curl(pcurlMsg->easy_handle);
if (!hop) continue;
hop->handle_messages(pcurlMsg);
}
}
// Return the HTTP_OP object with given Curl object
//
HTTP_OP* HTTP_OP_SET::lookup_curl(CURL* pcurl) {
for (unsigned int i=0; i<http_ops.size(); i++) {
if (http_ops[i]->curlEasy == pcurl) {
return http_ops[i];
}
}
return 0;
}
// Update the transfer speed for this HTTP_OP
// called on every I/O
//
void HTTP_OP::update_speed() {
double delta_t = dtime() - start_time;
if (delta_t > 0) {
xfer_speed = (bytes_xferred-start_bytes_xferred) / delta_t;
}
}
void HTTP_OP::set_speed_limit(bool is_upload, double bytes_sec) {
#if LIBCURL_VERSION_NUM >= 0x070f05
CURLcode cc = CURLE_OK;
curl_off_t bs = (curl_off_t)bytes_sec;
if (is_upload) {
cc = curl_easy_setopt(curlEasy, CURLOPT_MAX_SEND_SPEED_LARGE, bs);
} else {
cc = curl_easy_setopt(curlEasy, CURLOPT_MAX_RECV_SPEED_LARGE, bs);
}
if (cc && log_flags.http_debug) {
msg_printf(project, MSG_INFO,
"[http] Curl error in set_speed_limit(): %s",
curl_easy_strerror(cc)
);
}
#endif
}
void HTTP_OP_SET::cleanup_temp_files() {
char filename[256];
DIRREF d = dir_open(".");
while (1) {
int retval = dir_scan(filename, d, sizeof(filename));
if (retval) break;
if (strstr(filename, "blc") != filename) continue;
if (!is_file(filename)) continue;
boinc_delete_file(filename);
}
dir_close(d);
}
| 1 | 16,159 | This need to be checked with linux. AFAIK, we have this file in our bin directory that is a link to the system file. | BOINC-boinc | php |
@@ -1,12 +1,12 @@
-define(['browser', 'dom', 'layoutManager', 'css!components/viewManager/viewContainer'], function (browser, dom, layoutManager) {
- 'use strict';
+import 'css!components/viewManager/viewContainer';
+/* eslint-disable indent */
function setControllerClass(view, options) {
if (options.controllerFactory) {
return Promise.resolve();
}
- var controllerUrl = view.getAttribute('data-controller');
+ let controllerUrl = view.getAttribute('data-controller');
if (controllerUrl) {
if (controllerUrl.indexOf('__plugin/') === 0) { | 1 | define(['browser', 'dom', 'layoutManager', 'css!components/viewManager/viewContainer'], function (browser, dom, layoutManager) {
'use strict';
function setControllerClass(view, options) {
if (options.controllerFactory) {
return Promise.resolve();
}
var controllerUrl = view.getAttribute('data-controller');
if (controllerUrl) {
if (controllerUrl.indexOf('__plugin/') === 0) {
controllerUrl = controllerUrl.substring('__plugin/'.length);
}
controllerUrl = Dashboard.getConfigurationResourceUrl(controllerUrl);
return getRequirePromise([controllerUrl]).then(function (ControllerFactory) {
options.controllerFactory = ControllerFactory;
});
}
return Promise.resolve();
}
function getRequirePromise(deps) {
return new Promise(function (resolve, reject) {
require(deps, resolve);
});
}
function loadView(options) {
if (!options.cancel) {
var selected = selectedPageIndex;
var previousAnimatable = selected === -1 ? null : allPages[selected];
var pageIndex = selected + 1;
if (pageIndex >= pageContainerCount) {
pageIndex = 0;
}
var isPluginpage = options.url.toLowerCase().indexOf('/configurationpage') !== -1;
var newViewInfo = normalizeNewView(options, isPluginpage);
var newView = newViewInfo.elem;
var modulesToLoad = [];
return new Promise(function (resolve) {
require(modulesToLoad, function () {
var currentPage = allPages[pageIndex];
if (currentPage) {
triggerDestroy(currentPage);
}
var view = newView;
if (typeof view == 'string') {
view = document.createElement('div');
view.innerHTML = newView;
}
view.classList.add('mainAnimatedPage');
if (currentPage) {
if (newViewInfo.hasScript && window.$) {
view = $(view).appendTo(mainAnimatedPages)[0];
mainAnimatedPages.removeChild(currentPage);
} else {
mainAnimatedPages.replaceChild(view, currentPage);
}
} else {
if (newViewInfo.hasScript && window.$) {
view = $(view).appendTo(mainAnimatedPages)[0];
} else {
mainAnimatedPages.appendChild(view);
}
}
if (options.type) {
view.setAttribute('data-type', options.type);
}
var properties = [];
if (options.fullscreen) {
properties.push('fullscreen');
}
if (properties.length) {
view.setAttribute('data-properties', properties.join(','));
}
allPages[pageIndex] = view;
setControllerClass(view, options).then(function () {
if (onBeforeChange) {
onBeforeChange(view, false, options);
}
beforeAnimate(allPages, pageIndex, selected);
selectedPageIndex = pageIndex;
currentUrls[pageIndex] = options.url;
if (!options.cancel && previousAnimatable) {
afterAnimate(allPages, pageIndex);
}
if (window.$) {
$.mobile = $.mobile || {};
$.mobile.activePage = view;
}
resolve(view);
});
});
});
}
}
function replaceAll(str, find, replace) {
return str.split(find).join(replace);
}
function parseHtml(html, hasScript) {
if (hasScript) {
html = replaceAll(html, '\x3c!--<script', '<script');
html = replaceAll(html, '<\/script>--\x3e', '<\/script>');
}
var wrapper = document.createElement('div');
wrapper.innerHTML = html;
return wrapper.querySelector('div[data-role="page"]');
}
function normalizeNewView(options, isPluginpage) {
var viewHtml = options.view;
if (viewHtml.indexOf('data-role="page"') === -1) {
return viewHtml;
}
var hasScript = viewHtml.indexOf('<script') !== -1;
var elem = parseHtml(viewHtml, hasScript);
if (hasScript) {
hasScript = elem.querySelector('script') != null;
}
var hasjQuery = false;
var hasjQuerySelect = false;
var hasjQueryChecked = false;
if (isPluginpage) {
hasjQuery = viewHtml.indexOf('jQuery') != -1 || viewHtml.indexOf('$(') != -1 || viewHtml.indexOf('$.') != -1;
hasjQueryChecked = viewHtml.indexOf('.checked(') != -1;
hasjQuerySelect = viewHtml.indexOf('.selectmenu(') != -1;
}
return {
elem: elem,
hasScript: hasScript,
hasjQuerySelect: hasjQuerySelect,
hasjQueryChecked: hasjQueryChecked,
hasjQuery: hasjQuery
};
}
function beforeAnimate(allPages, newPageIndex, oldPageIndex) {
for (var index = 0, length = allPages.length; index < length; index++) {
if (newPageIndex !== index && oldPageIndex !== index) {
allPages[index].classList.add('hide');
}
}
}
function afterAnimate(allPages, newPageIndex) {
for (var index = 0, length = allPages.length; index < length; index++) {
if (newPageIndex !== index) {
allPages[index].classList.add('hide');
}
}
}
function setOnBeforeChange(fn) {
onBeforeChange = fn;
}
function tryRestoreView(options) {
var url = options.url;
var index = currentUrls.indexOf(url);
if (index !== -1) {
var animatable = allPages[index];
var view = animatable;
if (view) {
if (options.cancel) {
return;
}
var selected = selectedPageIndex;
var previousAnimatable = selected === -1 ? null : allPages[selected];
return setControllerClass(view, options).then(function () {
if (onBeforeChange) {
onBeforeChange(view, true, options);
}
beforeAnimate(allPages, index, selected);
animatable.classList.remove('hide');
selectedPageIndex = index;
if (!options.cancel && previousAnimatable) {
afterAnimate(allPages, index);
}
if (window.$) {
$.mobile = $.mobile || {};
$.mobile.activePage = view;
}
return view;
});
}
}
return Promise.reject();
}
function triggerDestroy(view) {
view.dispatchEvent(new CustomEvent('viewdestroy', {}));
}
function reset() {
allPages = [];
currentUrls = [];
mainAnimatedPages.innerHTML = '';
selectedPageIndex = -1;
}
var onBeforeChange;
var mainAnimatedPages = document.querySelector('.mainAnimatedPages');
var allPages = [];
var currentUrls = [];
var pageContainerCount = 3;
var selectedPageIndex = -1;
reset();
mainAnimatedPages.classList.remove('hide');
return {
loadView: loadView,
tryRestoreView: tryRestoreView,
reset: reset,
setOnBeforeChange: setOnBeforeChange
};
});
| 1 | 17,088 | Shouldn't we import `default`? I can't get here to test. | jellyfin-jellyfin-web | js |
@@ -99,7 +99,7 @@ class UNIXServerClient(object):
self.unix_socket = os.path.join(self.temp_dir, 'luigid.sock')
def run_server(self):
- luigi.server.run(unix_socket=unix_socket)
+ luigi.server.run(unix_socket=self.unix_socket)
def scheduler(self):
url = ParseResult( | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import os
import multiprocessing
import random
import shutil
import signal
import time
import tempfile
from helpers import unittest, with_config, skipOnTravis
import luigi.rpc
import luigi.server
from luigi.scheduler import CentralPlannerScheduler
from luigi.six.moves.urllib.parse import urlencode, ParseResult
from tornado.testing import AsyncHTTPTestCase
from nose.plugins.attrib import attr
try:
from unittest import mock
except ImportError:
import mock
class ServerTestBase(AsyncHTTPTestCase):
def get_app(self):
return luigi.server.app(CentralPlannerScheduler())
def setUp(self):
super(ServerTestBase, self).setUp()
self._old_fetch = luigi.rpc.RemoteScheduler._fetch
def _fetch(obj, url, body, *args, **kwargs):
body = urlencode(body).encode('utf-8')
response = self.fetch(url, body=body, method='POST')
if response.code >= 400:
raise luigi.rpc.RPCError(
'Errror when connecting to remote scheduler'
)
return response.body.decode('utf-8')
luigi.rpc.RemoteScheduler._fetch = _fetch
def tearDown(self):
super(ServerTestBase, self).tearDown()
luigi.rpc.RemoteScheduler._fetch = self._old_fetch
class ServerTest(ServerTestBase):
def test_visualizer(self):
page = self.fetch('/').body
self.assertTrue(page.find(b'<title>') != -1)
def _test_404(self, path):
response = self.fetch(path)
self.assertEqual(response.code, 404)
def test_404(self):
self._test_404('/foo')
def test_api_404(self):
self._test_404('/api/foo')
class INETServerClient(object):
def __init__(self):
self.port = random.randint(1024, 9999)
def run_server(self):
luigi.server.run(api_port=self.port, address='127.0.0.1')
def scheduler(self):
return luigi.rpc.RemoteScheduler('http://localhost:' + str(self.port))
class UNIXServerClient(object):
def __init__(self):
self.tempdir = tempfile.mkdtemp()
self.unix_socket = os.path.join(self.temp_dir, 'luigid.sock')
def run_server(self):
luigi.server.run(unix_socket=unix_socket)
def scheduler(self):
url = ParseResult(
scheme='http+unix',
netloc=self.unix_socket,
path='',
params='',
query='',
fragment='',
).geturl()
return luigi.rpc.RemoteScheduler(url)
class ServerTestRun(unittest.TestCase):
"""Test to start and stop the server in a more "standard" way
"""
server_client_class = INETServerClient
def start_server(self):
self._process = multiprocessing.Process(
target=self.server_client.run_server
)
self._process.start()
time.sleep(0.1) # wait for server to start
self.sch = self.server_client.scheduler()
self.sch._wait = lambda: None
def stop_server(self):
self._process.terminate()
self._process.join(1)
if self._process.is_alive():
os.kill(self._process.pid, signal.SIGKILL)
def setUp(self):
self.server_client = self.server_client_class()
state_path = tempfile.mktemp(suffix=self.id())
self.addCleanup(functools.partial(os.unlink, state_path))
luigi.configuration.get_config().set('scheduler', 'state_path', state_path)
self.start_server()
def tearDown(self):
self.stop_server()
def test_ping(self):
self.sch.ping(worker='xyz')
def test_raw_ping(self):
self.sch._request('/api/ping', {'worker': 'xyz'})
def test_raw_ping_extended(self):
self.sch._request('/api/ping', {'worker': 'xyz', 'foo': 'bar'})
def test_404(self):
with self.assertRaises(luigi.rpc.RPCError):
self.sch._request('/api/fdsfds', {'dummy': 1})
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/72953884')
def test_save_state(self):
self.sch.add_task('X', 'B', deps=('A',))
self.sch.add_task('X', 'A')
self.assertEqual(self.sch.get_work('X')['task_id'], 'A')
self.stop_server()
self.start_server()
work = self.sch.get_work('X')['running_tasks'][0]
self.assertEqual(work['task_id'], 'A')
class URLLibServerTestRun(ServerTestRun):
@mock.patch.object(luigi.rpc, 'HAS_REQUESTS', False)
def start_server(self, *args, **kwargs):
super(URLLibServerTestRun, self).start_server(*args, **kwargs)
@attr('unix')
class UNIXServerTestRun(unittest.TestCase):
server_client_class = UNIXServerClient
def tearDown(self):
super(self, ServerTestRun).tearDown()
shutil.rmtree(self.server_client.tempdir)
if __name__ == '__main__':
unittest.main()
| 1 | 12,822 | !!!!!!!!!!!!! @graingert, does this mean that tests haven't been running??? | spotify-luigi | py |
@@ -484,11 +484,11 @@ namespace Microsoft.DotNet.Execute
public string FormatSetting(string option, string value, string type, string toolName)
{
string commandOption = null;
- if (type.Equals("passThrough"))
+ if (type != null && type.Equals("passThrough"))
{
commandOption = string.Format(" {0}", toolName.Equals("console") ? "" : value);
}
- else if (type.Equals(RunToolSettingValueTypeReservedKeyword)) { /* do nothing */ }
+ else if (type != null && type.Equals(RunToolSettingValueTypeReservedKeyword)) { /* do nothing */ }
else
{
Tool toolFormat; | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System;
using System.IO;
using System.Linq;
using System.Collections.Generic;
using System.Text;
namespace Microsoft.DotNet.Execute
{
public class Setup
{
private const string WhatIfReservedKeyword = "WhatIf";
private const string DryRunReservedKeyword = "dry-run";
private const string RunQuietReservedKeyword = "RunQuiet";
private const string RunToolSettingValueTypeReservedKeyword = "runToolSetting";
public Dictionary<string, string> ToolSettings { get; set; }
public Dictionary<string, Setting> Settings { get; set; }
public Dictionary<string, Command> Commands { get; set; }
public Dictionary<string, Tool> Tools { get; set; }
public Dictionary<string, string> SettingParameters { get; set; }
public string Os { get; set; }
public string ConfigurationFilePath { get; set; }
public string ExtraParameters { get; set; }
private int ValidateSettings()
{
int returnCode = 0;
foreach (var key in Settings.Keys)
{
if (!IsReservedKeyword(key))
{
if (Settings[key].ValueType == null)
{
Console.Error.WriteLine("Setting '{0}' is missing the required ValueType property.", key);
returnCode = 1;
}
if (Settings[key].Values == null)
{
Console.Error.WriteLine("Setting '{0}' is missing the required Values property.", key);
returnCode = 1;
}
}
}
return returnCode;
}
private bool IsReservedKeyword(string keyword)
{
if (keyword.Equals(RunQuietReservedKeyword, StringComparison.OrdinalIgnoreCase)
|| keyword.Equals(WhatIfReservedKeyword, StringComparison.OrdinalIgnoreCase)
|| keyword.Equals(DryRunReservedKeyword, StringComparison.OrdinalIgnoreCase))
{
return true;
}
return false;
}
private string ParseSettingValue(string inputValue)
{
string value = string.Empty;
int length = inputValue.Length;
for (int i = 0; i < length; i++)
{
if (i != length - 1 && inputValue[i] == '$')
{
if (inputValue[i + 1] == '{')
{
int j;
string memberName = string.Empty;
for (j = i + 2; inputValue[j] != '}' && j < length; j++)
memberName += inputValue[j];
// The string is not of format ${}, just add the chars to the value.
if (j == length)
value += "${" + memberName;
else
value += SettingValueProvider.Get(memberName);
// Put i to j counter.
i = j;
}
else
{
// If next char is not { then add $ to the value.
value += inputValue[i];
}
}
else
{
value += inputValue[i];
}
}
return value;
}
private void ParseRunToolSettings(string commandSelectedByUser = null)
{
Setting tempSetting;
Command tempCommand;
foreach (var toolSetting in ToolSettings.Keys.Select(k => k.ToString()).ToArray())
{
string tempValue = null;
// Attempt to get run tool setting value from parameters
SettingParameters.TryGetValue(toolSetting, out tempValue);
ToolSettings[toolSetting] = tempValue;
// Attempt to get run tool setting value from command section
if (string.IsNullOrEmpty(ToolSettings[toolSetting]) && commandSelectedByUser != null)
{
if (Commands.TryGetValue(commandSelectedByUser, out tempCommand))
{
tempCommand.DefaultValues.Settings.TryGetValue(toolSetting, out tempValue);
ToolSettings[toolSetting] = tempValue;
}
}
// Attempt to get run tool setting value from settings section
if (string.IsNullOrEmpty(ToolSettings[toolSetting]))
{
Settings.TryGetValue(toolSetting, out tempSetting);
ToolSettings[toolSetting] = tempSetting.DefaultValue;
}
}
}
private string FindSettingValue(string valueToFind)
{
Setting value;
if (Settings.TryGetValue(valueToFind, out value))
{
return ParseSettingValue(value.DefaultValue);
}
return null;
}
private string FindSettingType(string valueToFind)
{
Setting value;
if (Settings.TryGetValue(valueToFind, out value))
{
return value.ValueType;
}
return null;
}
public int PrepareValues(string os, Dictionary<string, string> parameters, string configFile)
{
SettingParameters = new Dictionary<string, string>(parameters);
Os = os;
ConfigurationFilePath = configFile;
// Add defaults for run tool settings if they haven't been defined in config.json
SetRunToolSettingsDefaults();
// Validate Settings before parsing out ToolSettings
int returnCode = ValidateSettings();
ToolSettings = Settings.Where(s => s.Value.ValueType == null ||
s.Value.ValueType.Equals(RunToolSettingValueTypeReservedKeyword)
).ToDictionary(s => s.Key, s => string.Empty) ?? new Dictionary<string, string>();
// A dev may have overriden the default values for a tool setting, but not specified the ValueType
foreach (var key in ToolSettings.Keys)
{
Settings[key].ValueType = RunToolSettingValueTypeReservedKeyword;
}
// Parse run tool settings for any settings which do not apply to a Command, this allows us to have run tool settings
// which are outside the scope of a command.
ParseRunToolSettings();
return returnCode;
}
private void SetRunToolSettingsDefaults()
{
// If RunQuiet is already defined in config.json, don't override it
if (!Settings.ContainsKey(RunQuietReservedKeyword))
{
Setting runQuietSetting = new Setting()
{
Values = new List<string>() { "true", "false" },
ValueType = RunToolSettingValueTypeReservedKeyword,
Description = "Run tool specific setting. Set to True to only display output from the executing command.",
DefaultValue = "false"
};
Settings.Add(RunQuietReservedKeyword, runQuietSetting);
}
// If WhatIf is already defined in config.json, don't override it
if (!Settings.ContainsKey(WhatIfReservedKeyword))
{
Setting whatIfSetting = new Setting()
{
Values = new List<string>() { "true", "false" },
ValueType = RunToolSettingValueTypeReservedKeyword,
Description = "Run tool specific setting. Set to 'true' to only display commands chosen and not execute them",
// Currently, providing a default value for this setting will prevent it being overwritten by the command line.
// While it is not intuitive, the code as-is supports "<some command> -whatif" without requiring a boolean argument.
// Created https://github.com/dotnet/buildtools/issues/1230 to track
DefaultValue = string.Empty
};
Settings.Add(WhatIfReservedKeyword, whatIfSetting);
}
// Settings don't currently allow aliases, so until we have a real need to do so can just define two,
// so as to support both -dry-run and -WhatIf style usage.
if (!Settings.ContainsKey(DryRunReservedKeyword))
{
Setting dryRunSetting = new Setting()
{
Values = new List<string>() { "true", "false" },
ValueType = RunToolSettingValueTypeReservedKeyword,
Description = "Run tool specific setting. Set to 'true' to only display commands chosen and not execute them",
// As above: currently, providing a default value for this setting will prevent it being overwritten by the command line.
// While it is not intuitive, the code as-is supports "<some command> -dry-run" without requiring a boolean argument.
// Created https://github.com/dotnet/buildtools/issues/1230 to track
DefaultValue = string.Empty
};
Settings.Add(DryRunReservedKeyword, dryRunSetting);
}
}
public int ExecuteCommand(string commandSelectedByUser, List<string> parametersSelectedByUser)
{
ParseRunToolSettings(commandSelectedByUser);
string runQuietValue;
bool runQuiet = false;
if (ToolSettings.TryGetValue(RunQuietReservedKeyword, out runQuietValue))
{
runQuiet = runQuietValue.Equals("true", StringComparison.OrdinalIgnoreCase);
}
string whatIfValue;
bool whatIf = false;
if (ToolSettings.TryGetValue(WhatIfReservedKeyword, out whatIfValue))
{
if (string.IsNullOrEmpty(whatIfValue))
{
ToolSettings.TryGetValue(DryRunReservedKeyword, out whatIfValue);
}
whatIf = whatIfValue.Equals("true", StringComparison.OrdinalIgnoreCase);
}
CompleteCommand commandToRun = BuildCommand(commandSelectedByUser, parametersSelectedByUser);
if (commandToRun != null)
{
int result = 0;
if (whatIf)
{
PrintColorMessage(ConsoleColor.Yellow, "Showing command, would execute:");
PrintColorMessage(ConsoleColor.Yellow, $"\n\n{commandToRun.ToolCommand} {commandToRun.ParametersCommand}\n");
}
else
{
if (!runQuiet)
{
PrintColorMessage(ConsoleColor.DarkYellow, $"Running: {commandToRun.ToolCommand} {commandToRun.ParametersCommand}");
}
result = RunProcess.ExecuteProcess(commandToRun.ToolCommand, commandToRun.ParametersCommand);
if (!runQuiet)
{
if (result == 0)
{
PrintColorMessage(ConsoleColor.Green, "Command execution succeeded.");
}
else
{
PrintColorMessage(ConsoleColor.Red, "Command execution failed with exit code {0}.", result);
}
}
}
return result;
}
return 1;
}
private void PrintColorMessage(ConsoleColor color, string message, params object[] args)
{
Console.ForegroundColor = color;
Console.WriteLine(message, args);
Console.ResetColor();
}
private CompleteCommand BuildCommand(string commandSelectedByUser, List<string> parametersSelectedByUser, Dictionary<string, string> parameters = null)
{
Command commandToExecute;
if (!Commands.TryGetValue(commandSelectedByUser, out commandToExecute))
{
Console.Error.WriteLine("Error: The command {0} is not specified in the Json file.", commandSelectedByUser);
return null;
}
string commandTool = GetTool(commandToExecute, Os, ConfigurationFilePath, parametersSelectedByUser);
if (string.IsNullOrEmpty(commandTool))
{
return null;
}
if (parameters == null)
{
if (BuildRequiredValueSettingsForCommand(commandToExecute, parametersSelectedByUser, SettingParameters) &&
BuildDefaultValueSettingsForCommand(commandToExecute, SettingParameters) &&
ValidExtraParametersForCommand(ExtraParameters, SettingParameters))
{
string commandParameters = $"{BuildParametersForCommand(SettingParameters, SettingParameters["toolName"])} {ExtraParameters}";
CompleteCommand completeCommand = new CompleteCommand(commandTool, commandParameters);
return completeCommand;
}
return null;
}
else
{
string commandParameters = BuildParametersForCommand(parameters, SettingParameters["toolName"]);
CompleteCommand completeCommand = new CompleteCommand(commandTool, commandParameters);
return completeCommand;
}
}
private string BuildParametersForCommand(Dictionary<string, string> commandParameters, string toolName)
{
string commandSetting = string.Empty;
Tools[toolName].osSpecific[Os].TryGetValue("defaultParameters", out commandSetting);
foreach (KeyValuePair<string, string> parameters in commandParameters)
{
if (!parameters.Key.Equals("toolName") && !string.IsNullOrEmpty(parameters.Value))
{
string value = parameters.Value.Equals("default") ? FindSettingValue(parameters.Key) : ParseSettingValue(parameters.Value);
commandSetting += string.Format(" {0}", FormatSetting(parameters.Key, value, FindSettingType(parameters.Key), toolName));
}
}
return commandSetting;
}
private bool BuildRequiredValueSettingsForCommand(Command commandToExecute, List<string> requiredSettings, Dictionary<string, string> commandValues)
{
foreach (string reqSetting in requiredSettings)
{
foreach (KeyValuePair<string, string> sett in commandToExecute.Alias[reqSetting].Settings)
{
string value = sett.Value;
string currentValue;
if (commandValues.TryGetValue(sett.Key, out currentValue))
{
if (string.IsNullOrEmpty(currentValue) || currentValue.Equals("default"))
{
commandValues[sett.Key] = value;
}
else if (!value.Equals("default") && !value.Equals(currentValue))
{
Console.Error.WriteLine("Error: The value for setting {0} can't be overwriten.", sett.Key);
return false;
}
}
else if (!sett.Key.Equals("toolName"))
{
Console.Error.WriteLine("Error: The setting {0} is not specified in the Json file.", sett.Key);
return false;
}
}
}
return true;
}
private bool BuildDefaultValueSettingsForCommand(Command commandToExecute, Dictionary<string, string> commandValues)
{
foreach (KeyValuePair<string, string> optSetting in commandToExecute.DefaultValues.Settings)
{
string currentValue;
if (commandValues.TryGetValue(optSetting.Key, out currentValue))
{
if (string.IsNullOrEmpty(currentValue))
{
commandValues[optSetting.Key] = optSetting.Value;
}
}
else
{
Console.Error.WriteLine("Error: The setting {0} is not specified in the Json file.", optSetting.Key);
return false;
}
}
return true;
}
private bool ValidExtraParametersForCommand(string extraParameters, Dictionary<string, string> commandValues)
{
int namePos, valuePos;
string tempParam, name, value;
if (string.IsNullOrEmpty(extraParameters))
{
return true;
}
string[] extraA = extraParameters.Split(' ');
foreach (string param in extraA)
{
namePos = 0;
valuePos = param.Length;
tempParam = param;
namePos = param.IndexOf(":");
if (namePos != -1)
{
tempParam = param.Substring(namePos + 1);
}
valuePos = tempParam.IndexOf("=");
if (valuePos != -1)
{
name = tempParam.Substring(0, valuePos);
value = tempParam.Substring(valuePos + 1);
}
else
{
name = tempParam;
value = string.Empty;
}
string paramValue;
if (commandValues.TryGetValue(name, out paramValue) && !string.IsNullOrEmpty(paramValue) && !paramValue.Equals("default") && !value.Equals(paramValue))
{
Console.Error.WriteLine("Error: The value for setting {0} can't be overwriten.", name);
return false;
}
}
return true;
}
private string GetTool(Command commandToExecute, string os, string configPath, List<string> parametersSelectedByUser)
{
string toolname = commandToExecute.DefaultValues.ToolName;
string project = GetProject(commandToExecute, parametersSelectedByUser);
Tool toolProperties = null;
if (Tools.TryGetValue(toolname, out toolProperties))
{
SettingParameters["toolName"] = toolname;
string value = string.Empty;
if (toolProperties.osSpecific[os].TryGetValue("path", out value) && !string.IsNullOrEmpty(value))
{
return Path.GetFullPath(Path.Combine(configPath, value));
}
else if (toolProperties.osSpecific[os].TryGetValue("filesExtension", out value) && !string.IsNullOrEmpty(value))
{
string extension = value;
return Path.GetFullPath(Path.Combine(configPath, string.Format("{0}.{1}", project, extension)));
}
else
{
Console.Error.WriteLine("Error: The process {0} has empty values for path and filesExtension properties. It is mandatory that one of the two has a value.", toolname);
return string.Empty;
}
}
Console.Error.WriteLine("Error: The process {0} is not specified in the Json file.", toolname);
return string.Empty;
}
private string GetProject(Command commandToExecute, List<string> parametersSelectedByUser)
{
string project = string.Empty;
if (parametersSelectedByUser != null)
{
if (parametersSelectedByUser.Count(p => commandToExecute.Alias[p].Settings.TryGetValue("Project", out project)) > 1)
{
Console.Error.WriteLine("Error: There can only be one project execution per command.");
return string.Empty;
}
}
if (string.IsNullOrEmpty(project))
{
project = commandToExecute.DefaultValues.Project;
}
return project;
}
public string FormatSetting(string option, string value, string type, string toolName)
{
string commandOption = null;
if (type.Equals("passThrough"))
{
commandOption = string.Format(" {0}", toolName.Equals("console") ? "" : value);
}
else if (type.Equals(RunToolSettingValueTypeReservedKeyword)) { /* do nothing */ }
else
{
Tool toolFormat;
if (Tools.TryGetValue(toolName, out toolFormat) && !string.IsNullOrEmpty(type))
{
if (toolFormat.ValueTypes.TryGetValue(type, out commandOption))
{
commandOption = commandOption.Replace("{name}", option).Replace("{value}", value);
}
else
{
Console.Error.WriteLine("The type \"{0}\" is not defined as a Value Type of the tool \"{1}\". Parameter ignored", type, toolName);
return null;
}
}
}
return commandOption;
}
public string GetHelpCommand(string commandName, string alias = null)
{
Command commandToPrint;
if (Commands.TryGetValue(commandName, out commandToPrint))
{
StringBuilder sb = new StringBuilder();
Dictionary<string, string> commandParametersToPrint = new Dictionary<string, string>();
List<string> aliasList = null;
sb.AppendLine().Append("Settings: ").AppendLine();
if (!string.IsNullOrEmpty(alias))
{
sb.Append(GetHelpAlias(commandToPrint.Alias[alias].Settings, commandParametersToPrint));
aliasList = new List<string>(alias.Split(' '));
}
sb.Append(GetHelpAlias(commandToPrint.DefaultValues.Settings, commandParametersToPrint));
CompleteCommand completeCommand = BuildCommand(commandName, aliasList, commandParametersToPrint);
sb.AppendLine().Append("It will run: ").AppendLine();
sb.Append(string.Format("{0} {1}", completeCommand.ToolCommand, completeCommand.ParametersCommand));
return sb.ToString();
}
return null;
}
private string GetHelpAlias(Dictionary<string, string> settings, Dictionary<string, string> commandParametersToPrint)
{
StringBuilder sb = new StringBuilder();
foreach (KeyValuePair<string, string> setting in settings)
{
string value = setting.Value.Equals("default") ? FindSettingValue(setting.Key) : setting.Value;
sb.Append(string.Format(" {0} = {2}", setting.Key, FindSettingType(setting.Key), value)).AppendLine();
commandParametersToPrint[setting.Key] = string.IsNullOrEmpty(value) ? "True" : value;
}
return sb.ToString();
}
private class CompleteCommand
{
public string ToolCommand { get { return _toolCommand; } }
public string ParametersCommand { get { return _parametersCommand; } }
internal CompleteCommand(string tool, string parameters)
{
_toolCommand = tool;
_parametersCommand = parameters;
}
private string _toolCommand;
private string _parametersCommand;
}
}
public class AliasPerCommand
{
public string Description { get; set; }
public Dictionary<string, string> Settings { get; set; }
}
public class DefaultValuesPerCommand
{
public string Project { get; set; }
public string ToolName { get; set; }
public string DefaultAlias { get; set; }
public Dictionary<string, string> Settings { get; set; }
}
public class Command
{
public Dictionary<string, AliasPerCommand> Alias { get; set; }
public DefaultValuesPerCommand DefaultValues { get; set; }
}
public class Tool
{
public Dictionary<string, Dictionary<string, string>> osSpecific { get; set; }
public Dictionary<string, string> ValueTypes { get; set; }
}
public class Setting
{
public string Description { get; set; }
public string ValueType { get; set; }
public List<string> Values { get; set; }
public string DefaultValue { get; set; }
}
}
| 1 | 12,006 | I've already tried the null-coalescing operator here and got a strange error, so did this the old-fashioned way. | dotnet-buildtools | .cs |
@@ -1,4 +1,11 @@
var parent = axe.commons.dom.getComposedParent(node);
-return (['UL', 'OL'].includes(parent.nodeName.toUpperCase()) ||
- (parent.getAttribute('role') || '').toLowerCase() === 'list');
-
+
+var parentRole = (parent.getAttribute('role') || '').toLowerCase();
+
+var isListRole = parentRole === 'list';
+
+return (
+ (['UL', 'OL'].includes(parent.nodeName.toUpperCase()) &&
+ (!parentRole || isListRole)) ||
+ isListRole
+); | 1 | var parent = axe.commons.dom.getComposedParent(node);
return (['UL', 'OL'].includes(parent.nodeName.toUpperCase()) ||
(parent.getAttribute('role') || '').toLowerCase() === 'list');
| 1 | 11,592 | This doesn't follow our spacing convention. It's also a little hard to read. Can you reformat? | dequelabs-axe-core | js |
@@ -4,10 +4,10 @@
package stack
import (
- "github.com/aws/amazon-ecs-cli-v2/internal/pkg/template"
"github.com/aws/aws-sdk-go/service/cloudformation"
+ "github.com/aws/copilot-cli/internal/pkg/template"
- "github.com/aws/amazon-ecs-cli-v2/internal/pkg/deploy"
+ "github.com/aws/copilot-cli/internal/pkg/deploy"
)
const pipelineCfnTemplatePath = "cicd/pipeline_cfn.yml" | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package stack
import (
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/template"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/deploy"
)
const pipelineCfnTemplatePath = "cicd/pipeline_cfn.yml"
type pipelineStackConfig struct {
*deploy.CreatePipelineInput
parser template.Parser
}
func NewPipelineStackConfig(in *deploy.CreatePipelineInput) *pipelineStackConfig {
return &pipelineStackConfig{
CreatePipelineInput: in,
parser: template.New(),
}
}
func (p *pipelineStackConfig) StackName() string {
return p.Name
}
func (p *pipelineStackConfig) Template() (string, error) {
content, err := p.parser.Parse(pipelineCfnTemplatePath, p, template.WithFuncs(cfTemplateFunctions))
if err != nil {
return "", err
}
return content.String(), nil
}
func (p *pipelineStackConfig) Parameters() ([]*cloudformation.Parameter, error) {
return nil, nil
}
func (p *pipelineStackConfig) Tags() []*cloudformation.Tag {
return mergeAndFlattenTags(p.AdditionalTags, map[string]string{
AppTagKey: p.AppName,
})
}
| 1 | 13,857 | The deploy should come before template? EDIT: I see that in other files, we put a separate line and put deploy at the end. What is the reason for this? | aws-copilot-cli | go |
@@ -60,7 +60,7 @@ public class Program
// turn off the above default. i.e any
// instrument which does not match any views
// gets dropped.
- // .AddView(instrumentName: "*", new DropAggregationConfig())
+ // .AddView(instrumentName: "*", new MetricStreamConfiguration() { Aggregation = Aggregation.Drop })
.AddConsoleExporter()
.Build();
| 1 | // <copyright file="Program.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Diagnostics.Metrics;
using OpenTelemetry;
using OpenTelemetry.Metrics;
public class Program
{
private static readonly Meter Meter1 = new Meter("CompanyA.ProductA.Library1", "1.0");
private static readonly Meter Meter2 = new Meter("CompanyA.ProductB.Library2", "1.0");
public static void Main(string[] args)
{
using var meterProvider = Sdk.CreateMeterProviderBuilder()
.AddSource(Meter1.Name)
.AddSource(Meter2.Name)
// Rename an instrument to new name.
.AddView(instrumentName: "MyCounter", name: "MyCounterRenamed")
// Change Histogram bounds
.AddView(instrumentName: "MyHistogram", new HistogramConfiguration() { BucketBounds = new double[] { 10, 20 }, Aggregation = Aggregation.LastValue })
// For the instrument "MyCounterCustomTags", aggregate with only the keys "tag1", "tag2".
.AddView(instrumentName: "MyCounterCustomTags", new MetricStreamConfiguration() { TagKeys = new string[] { "tag1", "tag2" } })
// Drop the instrument "MyCounterDrop".
.AddView(instrumentName: "MyCounterDrop", new MetricStreamConfiguration() { Aggregation = Aggregation.Drop })
// Advanced selection criteria and config via Func<Instrument, AggregationConfig>
.AddView((instrument) =>
{
if (instrument.Meter.Name.Equals("CompanyA.ProductB.Library2") &&
instrument.GetType().Name.Contains("Histogram"))
{
return new HistogramConfiguration() { BucketBounds = new double[] { 10, 20 } };
}
return null;
})
// An instrument which does not match any views
// gets processed with default behavior. (SDK default)
// Uncommenting the following line will
// turn off the above default. i.e any
// instrument which does not match any views
// gets dropped.
// .AddView(instrumentName: "*", new DropAggregationConfig())
.AddConsoleExporter()
.Build();
var random = new Random();
var counter = Meter1.CreateCounter<long>("MyCounter");
for (int i = 0; i < 20000; i++)
{
counter.Add(1, new("tag1", "value1"), new("tag2", "value2"));
}
var histogram = Meter1.CreateHistogram<long>("MyHistogram");
for (int i = 0; i < 20000; i++)
{
histogram.Record(random.Next(1, 1000), new("tag1", "value1"), new("tag2", "value2"));
}
var counterCustomTags = Meter1.CreateCounter<long>("MyCounterCustomTags");
for (int i = 0; i < 20000; i++)
{
counterCustomTags.Add(1, new("tag1", "value1"), new("tag2", "value2"), new("tag3", "value4"));
}
var counterDrop = Meter1.CreateCounter<long>("MyCounterDrop");
for (int i = 0; i < 20000; i++)
{
counterDrop.Add(1, new("tag1", "value1"), new("tag2", "value2"));
}
var histogram2 = Meter2.CreateHistogram<long>("MyHistogram2");
for (int i = 0; i < 20000; i++)
{
histogram2.Record(random.Next(1, 1000), new("tag1", "value1"), new("tag2", "value2"));
}
}
}
| 1 | 21,702 | Consider making a constant (e.g. `MetricStreamConfiguration.Drop`). | open-telemetry-opentelemetry-dotnet | .cs |
@@ -18,6 +18,7 @@ try:
except ImportError:
from rdkit.piddle import piddle
import ClusterUtils
+from rdkit.six.moves import xrange
import numpy
| 1 | # $Id$
#
# Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""Cluster tree visualization using Sping
"""
try:
from rdkit.sping import pid
piddle = pid
except ImportError:
from rdkit.piddle import piddle
import ClusterUtils
import numpy
class VisOpts(object):
""" stores visualization options for cluster viewing
**Instance variables**
- x/yOffset: amount by which the drawing is offset from the edges of the canvas
- lineColor: default color for drawing the cluster tree
- lineWidth: the width of the lines used to draw the tree
"""
xOffset = 20
yOffset = 20
lineColor = piddle.Color(0, 0, 0)
hideColor = piddle.Color(.8, .8, .8)
terminalColors = [piddle.Color(1, 0, 0), piddle.Color(0, 0, 1), piddle.Color(1, 1, 0),
piddle.Color(0, .5, .5), piddle.Color(0, .8, 0), piddle.Color(.5, .5, .5),
piddle.Color(.8, .3, .3), piddle.Color(.3, .3, .8), piddle.Color(.8, .8, .3),
piddle.Color(.3, .8, .8)]
lineWidth = 2
hideWidth = 1.1
nodeRad = 15
nodeColor = piddle.Color(1., .4, .4)
highlightColor = piddle.Color(1., 1., .4)
highlightRad = 10
def _scaleMetric(val, power=2, min=1e-4):
val = float(val)
nval = pow(val, power)
if nval < min:
return 0.0
else:
return numpy.log(nval / min)
class ClusterRenderer(object):
def __init__(self, canvas, size, ptColors=[], lineWidth=None, showIndices=0, showNodes=1,
stopAtCentroids=0, logScale=0, tooClose=-1):
self.canvas = canvas
self.size = size
self.ptColors = ptColors
self.lineWidth = lineWidth
self.showIndices = showIndices
self.showNodes = showNodes
self.stopAtCentroids = stopAtCentroids
self.logScale = logScale
self.tooClose = tooClose
def _AssignPointLocations(self, cluster, terminalOffset=4):
self.pts = cluster.GetPoints()
self.nPts = len(self.pts)
self.xSpace = float(self.size[0] - 2 * VisOpts.xOffset) / float(self.nPts - 1)
ySize = self.size[1]
for i in xrange(self.nPts):
pt = self.pts[i]
if self.logScale > 0:
v = _scaleMetric(pt.GetMetric(), self.logScale)
else:
v = float(pt.GetMetric())
pt._drawPos = (VisOpts.xOffset + i * self.xSpace,
ySize - (v * self.ySpace + VisOpts.yOffset) + terminalOffset)
def _AssignClusterLocations(self, cluster):
# first get the search order (top down)
toDo = [cluster]
examine = cluster.GetChildren()[:]
while len(examine):
node = examine.pop(0)
children = node.GetChildren()
if len(children):
toDo.append(node)
for child in children:
if not child.IsTerminal():
examine.append(child)
# and reverse it (to run from bottom up)
toDo.reverse()
for node in toDo:
if self.logScale > 0:
v = _scaleMetric(node.GetMetric(), self.logScale)
else:
v = float(node.GetMetric())
# average our children's x positions
childLocs = [x._drawPos[0] for x in node.GetChildren()]
if len(childLocs):
xp = sum(childLocs) / float(len(childLocs))
yp = self.size[1] - (v * self.ySpace + VisOpts.yOffset)
node._drawPos = (xp, yp)
def _DrawToLimit(self, cluster):
"""
we assume that _drawPos settings have been done already
"""
if self.lineWidth is None:
lineWidth = VisOpts.lineWidth
else:
lineWidth = self.lineWidth
examine = [cluster]
while len(examine):
node = examine.pop(0)
xp, yp = node._drawPos
children = node.GetChildren()
if abs(children[1]._drawPos[0] - children[0]._drawPos[0]) > self.tooClose:
# draw the horizontal line connecting things
drawColor = VisOpts.lineColor
self.canvas.drawLine(children[0]._drawPos[0], yp, children[-1]._drawPos[0], yp, drawColor,
lineWidth)
# and draw the lines down to the children
for child in children:
if self.ptColors and child.GetData() is not None:
drawColor = self.ptColors[child.GetData()]
else:
drawColor = VisOpts.lineColor
cxp, cyp = child._drawPos
self.canvas.drawLine(cxp, yp, cxp, cyp, drawColor, lineWidth)
if not child.IsTerminal():
examine.append(child)
else:
if self.showIndices and not self.stopAtCentroids:
try:
txt = str(child.GetName())
except Exception:
txt = str(child.GetIndex())
self.canvas.drawString(txt, cxp - self.canvas.stringWidth(txt) / 2, cyp)
else:
# draw a "hidden" line to the bottom
self.canvas.drawLine(xp, yp, xp, self.size[1] - VisOpts.yOffset, VisOpts.hideColor,
lineWidth)
def DrawTree(self, cluster, minHeight=2.0):
if self.logScale > 0:
v = _scaleMetric(cluster.GetMetric(), self.logScale)
else:
v = float(cluster.GetMetric())
if v <= 0:
v = minHeight
self.ySpace = float(self.size[1] - 2 * VisOpts.yOffset) / v
self._AssignPointLocations(cluster)
self._AssignClusterLocations(cluster)
if not self.stopAtCentroids:
self._DrawToLimit(cluster)
else:
raise NotImplementedError('stopAtCentroids drawing not yet implemented')
def DrawClusterTree(cluster, canvas, size, ptColors=[], lineWidth=None, showIndices=0, showNodes=1,
stopAtCentroids=0, logScale=0, tooClose=-1):
""" handles the work of drawing a cluster tree on a Sping canvas
**Arguments**
- cluster: the cluster tree to be drawn
- canvas: the Sping canvas on which to draw
- size: the size of _canvas_
- ptColors: if this is specified, the _colors_ will be used to color
the terminal nodes of the cluster tree. (color == _pid.Color_)
- lineWidth: if specified, it will be used for the widths of the lines
used to draw the tree
**Notes**
- _Canvas_ is neither _save_d nor _flush_ed at the end of this
- if _ptColors_ is the wrong length for the number of possible terminal
node types, this will throw an IndexError
- terminal node types are determined using their _GetData()_ methods
"""
renderer = ClusterRenderer(canvas, size, ptColors, lineWidth, showIndices, showNodes,
stopAtCentroids, logScale, tooClose)
renderer.DrawTree(cluster)
def _DrawClusterTree(cluster, canvas, size, ptColors=[], lineWidth=None, showIndices=0, showNodes=1,
stopAtCentroids=0, logScale=0, tooClose=-1):
""" handles the work of drawing a cluster tree on a Sping canvas
**Arguments**
- cluster: the cluster tree to be drawn
- canvas: the Sping canvas on which to draw
- size: the size of _canvas_
- ptColors: if this is specified, the _colors_ will be used to color
the terminal nodes of the cluster tree. (color == _pid.Color_)
- lineWidth: if specified, it will be used for the widths of the lines
used to draw the tree
**Notes**
- _Canvas_ is neither _save_d nor _flush_ed at the end of this
- if _ptColors_ is the wrong length for the number of possible terminal
node types, this will throw an IndexError
- terminal node types are determined using their _GetData()_ methods
"""
if lineWidth is None:
lineWidth = VisOpts.lineWidth
pts = cluster.GetPoints()
nPts = len(pts)
if nPts <= 1:
return
xSpace = float(size[0] - 2 * VisOpts.xOffset) / float(nPts - 1)
if logScale > 0:
v = _scaleMetric(cluster.GetMetric(), logScale)
else:
v = float(cluster.GetMetric())
ySpace = float(size[1] - 2 * VisOpts.yOffset) / v
for i in xrange(nPts):
pt = pts[i]
if logScale > 0:
v = _scaleMetric(pt.GetMetric(), logScale)
else:
v = float(pt.GetMetric())
pt._drawPos = (VisOpts.xOffset + i * xSpace, size[1] - (v * ySpace + VisOpts.yOffset))
if not stopAtCentroids or not hasattr(pt, '_isCentroid'):
allNodes.remove(pt)
if not stopAtCentroids:
allNodes = ClusterUtils.GetNodeList(cluster)
else:
allNodes = ClusterUtils.GetNodesDownToCentroids(cluster)
while len(allNodes):
node = allNodes.pop(0)
children = node.GetChildren()
if len(children):
if logScale > 0:
v = _scaleMetric(node.GetMetric(), logScale)
else:
v = float(node.GetMetric())
yp = size[1] - (v * ySpace + VisOpts.yOffset)
childLocs = [x._drawPos[0] for x in children]
xp = sum(childLocs) / float(len(childLocs))
node._drawPos = (xp, yp)
if not stopAtCentroids or node._aboveCentroid > 0:
for child in children:
if ptColors != [] and child.GetData() is not None:
drawColor = ptColors[child.GetData()]
else:
drawColor = VisOpts.lineColor
if showNodes and hasattr(child, '_isCentroid'):
canvas.drawLine(child._drawPos[0], child._drawPos[1] - VisOpts.nodeRad / 2,
child._drawPos[0], node._drawPos[1], drawColor, lineWidth)
else:
canvas.drawLine(child._drawPos[0], child._drawPos[1], child._drawPos[0],
node._drawPos[1], drawColor, lineWidth)
canvas.drawLine(children[0]._drawPos[0], node._drawPos[1], children[-1]._drawPos[0],
node._drawPos[1], VisOpts.lineColor, lineWidth)
else:
for child in children:
drawColor = VisOpts.hideColor
canvas.drawLine(child._drawPos[0], child._drawPos[1], child._drawPos[0], node._drawPos[1],
drawColor, VisOpts.hideWidth)
canvas.drawLine(children[0]._drawPos[0], node._drawPos[1], children[-1]._drawPos[0],
node._drawPos[1], VisOpts.hideColor, VisOpts.hideWidth)
if showIndices and (not stopAtCentroids or node._aboveCentroid >= 0):
txt = str(node.GetIndex())
if hasattr(node, '_isCentroid'):
txtColor = piddle.Color(1, .2, .2)
else:
txtColor = piddle.Color(0, 0, 0)
canvas.drawString(txt, node._drawPos[0] - canvas.stringWidth(txt) / 2,
node._drawPos[1] + canvas.fontHeight() / 4, color=txtColor)
if showNodes and hasattr(node, '_isCentroid'):
rad = VisOpts.nodeRad
canvas.drawEllipse(node._drawPos[0] - rad / 2, node._drawPos[1] - rad / 2,
node._drawPos[0] + rad / 2, node._drawPos[1] + rad / 2, piddle.transparent,
fillColor=VisOpts.nodeColor)
txt = str(node._clustID)
canvas.drawString(txt, node._drawPos[0] - canvas.stringWidth(txt) / 2,
node._drawPos[1] + canvas.fontHeight() / 4, color=piddle.Color(0, 0, 0))
if showIndices and not stopAtCentroids:
for pt in pts:
txt = str(pt.GetIndex())
canvas.drawString(
str(pt.GetIndex()), pt._drawPos[0] - canvas.stringWidth(txt) / 2, pt._drawPos[1])
def ClusterToPDF(cluster, fileName, size=(300, 300), ptColors=[], lineWidth=None, showIndices=0,
stopAtCentroids=0, logScale=0):
""" handles the work of drawing a cluster tree to an PDF file
**Arguments**
- cluster: the cluster tree to be drawn
- fileName: the name of the file to be created
- size: the size of output canvas
- ptColors: if this is specified, the _colors_ will be used to color
the terminal nodes of the cluster tree. (color == _pid.Color_)
- lineWidth: if specified, it will be used for the widths of the lines
used to draw the tree
**Notes**
- if _ptColors_ is the wrong length for the number of possible terminal
node types, this will throw an IndexError
- terminal node types are determined using their _GetData()_ methods
"""
try:
from rdkit.sping.PDF import pidPDF
except ImportError:
from rdkit.piddle import piddlePDF
pidPDF = piddlePDF
canvas = pidPDF.PDFCanvas(size, fileName)
if lineWidth is None:
lineWidth = VisOpts.lineWidth
DrawClusterTree(cluster, canvas, size, ptColors=ptColors, lineWidth=lineWidth,
showIndices=showIndices, stopAtCentroids=stopAtCentroids, logScale=logScale)
if fileName:
canvas.save()
return canvas
def ClusterToSVG(cluster, fileName, size=(300, 300), ptColors=[], lineWidth=None, showIndices=0,
stopAtCentroids=0, logScale=0):
""" handles the work of drawing a cluster tree to an SVG file
**Arguments**
- cluster: the cluster tree to be drawn
- fileName: the name of the file to be created
- size: the size of output canvas
- ptColors: if this is specified, the _colors_ will be used to color
the terminal nodes of the cluster tree. (color == _pid.Color_)
- lineWidth: if specified, it will be used for the widths of the lines
used to draw the tree
**Notes**
- if _ptColors_ is the wrong length for the number of possible terminal
node types, this will throw an IndexError
- terminal node types are determined using their _GetData()_ methods
"""
try:
from rdkit.sping.SVG import pidSVG
except ImportError:
from rdkit.piddle.piddleSVG import piddleSVG
pidSVG = piddleSVG
canvas = pidSVG.SVGCanvas(size, fileName)
if lineWidth is None:
lineWidth = VisOpts.lineWidth
DrawClusterTree(cluster, canvas, size, ptColors=ptColors, lineWidth=lineWidth,
showIndices=showIndices, stopAtCentroids=stopAtCentroids, logScale=logScale)
if fileName:
canvas.save()
return canvas
def ClusterToImg(cluster, fileName, size=(300, 300), ptColors=[], lineWidth=None, showIndices=0,
stopAtCentroids=0, logScale=0):
""" handles the work of drawing a cluster tree to an image file
**Arguments**
- cluster: the cluster tree to be drawn
- fileName: the name of the file to be created
- size: the size of output canvas
- ptColors: if this is specified, the _colors_ will be used to color
the terminal nodes of the cluster tree. (color == _pid.Color_)
- lineWidth: if specified, it will be used for the widths of the lines
used to draw the tree
**Notes**
- The extension on _fileName_ determines the type of image file created.
All formats supported by PIL can be used.
- if _ptColors_ is the wrong length for the number of possible terminal
node types, this will throw an IndexError
- terminal node types are determined using their _GetData()_ methods
"""
try:
from rdkit.sping.PIL import pidPIL
except ImportError:
from rdkit.piddle import piddlePIL
pidPIL = piddlePIL
canvas = pidPIL.PILCanvas(size, fileName)
if lineWidth is None:
lineWidth = VisOpts.lineWidth
DrawClusterTree(cluster, canvas, size, ptColors=ptColors, lineWidth=lineWidth,
showIndices=showIndices, stopAtCentroids=stopAtCentroids, logScale=logScale)
if fileName:
canvas.save()
return canvas
| 1 | 15,995 | same question: why not just switch this to range too? | rdkit-rdkit | cpp |
@@ -111,10 +111,10 @@ Variable IO::InquireVariable(const std::string &name)
helper::CheckForNullptr(m_IO, "for variable " + name +
", in call to IO::InquireVariable");
- const std::string type(m_IO->InquireVariableType(name));
+ const Type type(m_IO->InquireVariableType(name));
core::VariableBase *variable = nullptr;
- if (type == "unknown")
+ if (type == Type::None)
{
}
#define declare_template_instantiation(T) \ | 1 | /*
* Distributed under the OSI-approved Apache License, Version 2.0. See
* accompanying file Copyright.txt for details.
*
* py11IO.cpp
*
* Created on: Mar 14, 2017
* Author: William F Godoy [email protected]
*/
#include "py11IO.h"
#include "adios2/common/ADIOSMacros.h"
#include "adios2/helper/adiosFunctions.h" //GetType<T>
#include "py11types.h"
namespace adios2
{
namespace py11
{
IO::IO(core::IO *io) : m_IO(io) {}
IO::operator bool() const noexcept { return (m_IO == nullptr) ? false : true; }
bool IO::InConfigFile() const
{
helper::CheckForNullptr(m_IO, "in call to IO::InConfigFile");
return m_IO->InConfigFile();
}
void IO::SetEngine(const std::string type)
{
helper::CheckForNullptr(m_IO, "in call to IO::SetEngine");
m_IO->SetEngine(type);
}
void IO::SetParameter(const std::string key, const std::string value)
{
helper::CheckForNullptr(m_IO, "in call to IO::SetParameter");
m_IO->SetParameter(key, value);
}
void IO::SetParameters(const Params ¶meters)
{
helper::CheckForNullptr(m_IO, "in call to IO::SetParameters");
m_IO->SetParameters(parameters);
}
Params IO::Parameters() const
{
helper::CheckForNullptr(m_IO, "in call to IO::Parameters");
return m_IO->GetParameters();
}
size_t IO::AddTransport(const std::string type, const Params ¶meters)
{
helper::CheckForNullptr(m_IO, "in call to IO::AddTransport");
return m_IO->AddTransport(type, parameters);
}
void IO::SetTransportParameter(const size_t transportIndex,
const std::string key, const std::string value)
{
helper::CheckForNullptr(m_IO, "in call to IO::SetTransportParameter");
m_IO->SetTransportParameter(transportIndex, key, value);
}
Variable IO::DefineVariable(const std::string &name)
{
helper::CheckForNullptr(m_IO, "for variable " + name +
", in call to IO::DefineVariable");
return Variable(&m_IO->DefineVariable<std::string>(name));
}
Variable IO::DefineVariable(const std::string &name,
const pybind11::array &array, const Dims &shape,
const Dims &start, const Dims &count,
const bool isConstantDims)
{
helper::CheckForNullptr(m_IO, "for variable " + name +
", in call to IO::DefineVariable");
core::VariableBase *variable = nullptr;
if (false)
{
}
#define declare_type(T) \
else if (pybind11::isinstance< \
pybind11::array_t<T, pybind11::array::c_style>>(array)) \
{ \
variable = &m_IO->DefineVariable<T>(name, shape, start, count, \
isConstantDims); \
}
ADIOS2_FOREACH_NUMPY_TYPE_1ARG(declare_type)
#undef declare_type
else
{
throw std::invalid_argument("ERROR: variable " + name +
" can't be defined, either type is not "
"supported or is not memory "
"contiguous, in call to DefineVariable\n");
}
return Variable(variable);
}
Variable IO::InquireVariable(const std::string &name)
{
helper::CheckForNullptr(m_IO, "for variable " + name +
", in call to IO::InquireVariable");
const std::string type(m_IO->InquireVariableType(name));
core::VariableBase *variable = nullptr;
if (type == "unknown")
{
}
#define declare_template_instantiation(T) \
else if (type == helper::GetType<T>()) \
{ \
variable = m_IO->InquireVariable<T>(name); \
}
ADIOS2_FOREACH_PYTHON_TYPE_1ARG(declare_template_instantiation)
#undef declare_template_instantiation
return Variable(variable);
}
Attribute IO::DefineAttribute(const std::string &name,
const pybind11::array &array,
const std::string &variableName,
const std::string separator)
{
helper::CheckForNullptr(m_IO, "for attribute " + name +
", in call to IO::DefineAttribute");
core::AttributeBase *attribute = nullptr;
if (false)
{
}
#define declare_type(T) \
else if (pybind11::isinstance< \
pybind11::array_t<T, pybind11::array::c_style>>(array)) \
{ \
const T *data = reinterpret_cast<const T *>(array.data()); \
const size_t size = static_cast<size_t>(array.size()); \
attribute = &m_IO->DefineAttribute<T>(name, data, size, variableName, \
separator); \
}
ADIOS2_FOREACH_NUMPY_ATTRIBUTE_TYPE_1ARG(declare_type)
#undef declare_type
else
{
throw std::invalid_argument("ERROR: attribute " + name +
" can't be defined, either type is not "
"supported or is not memory "
"contiguous, in call to DefineAttribute\n");
}
return Attribute(attribute);
}
Attribute IO::DefineAttribute(const std::string &name,
const std::string &stringValue,
const std::string &variableName,
const std::string separator)
{
helper::CheckForNullptr(m_IO, "for attribute " + name +
", in call to IO::DefineAttribute");
return Attribute(&m_IO->DefineAttribute<std::string>(
name, stringValue, variableName, separator));
}
Attribute IO::DefineAttribute(const std::string &name,
const std::vector<std::string> &strings,
const std::string &variableName,
const std::string separator)
{
helper::CheckForNullptr(m_IO, "for attribute " + name +
", in call to IO::DefineAttribute");
return Attribute(&m_IO->DefineAttribute<std::string>(
name, strings.data(), strings.size(), variableName, separator));
}
Attribute IO::InquireAttribute(const std::string &name)
{
helper::CheckForNullptr(m_IO, "for attribute " + name +
", in call to IO::InquireAttribute");
core::AttributeBase *attribute = nullptr;
const std::string type(m_IO->InquireAttributeType(name));
if (type == "unknown")
{
}
#define declare_template_instantiation(T) \
else if (type == helper::GetType<T>()) \
{ \
attribute = m_IO->InquireAttribute<T>(name); \
}
ADIOS2_FOREACH_ATTRIBUTE_STDTYPE_1ARG(declare_template_instantiation)
#undef declare_template_instantiation
return Attribute(attribute);
}
bool IO::RemoveVariable(const std::string &name)
{
helper::CheckForNullptr(m_IO, "for variable " + name +
", in call to IO::RemoveVariable");
return m_IO->RemoveVariable(name);
}
void IO::RemoveAllVariables()
{
helper::CheckForNullptr(m_IO, ", in call to IO::RemoveAllVariables");
m_IO->RemoveAllVariables();
}
bool IO::RemoveAttribute(const std::string &name)
{
helper::CheckForNullptr(m_IO, "for variable " + name +
", in call to IO::RemoveAttribute");
return m_IO->RemoveAttribute(name);
}
void IO::RemoveAllAttributes()
{
helper::CheckForNullptr(m_IO, ", in call to IO::RemoveAllAttributes");
m_IO->RemoveAllAttributes();
}
Engine IO::Open(const std::string &name, const int mode)
{
helper::CheckForNullptr(m_IO,
"for engine " + name + ", in call to IO::Open");
return Engine(&m_IO->Open(name, static_cast<adios2::Mode>(mode)));
}
void IO::FlushAll()
{
helper::CheckForNullptr(m_IO, "in call to IO::FlushAll");
m_IO->FlushAll();
}
std::map<std::string, Params> IO::AvailableVariables()
{
helper::CheckForNullptr(m_IO, "in call to IO::AvailableVariables");
return m_IO->GetAvailableVariables();
}
std::map<std::string, Params> IO::AvailableAttributes()
{
helper::CheckForNullptr(m_IO, "in call to IO::AvailableAttributes");
return m_IO->GetAvailableAttributes();
}
std::string IO::VariableType(const std::string &name) const
{
helper::CheckForNullptr(m_IO, "for variable " + name +
" in call to IO::VariableType");
return m_IO->InquireVariableType(name);
}
std::string IO::AttributeType(const std::string &name) const
{
helper::CheckForNullptr(m_IO, "for attribute " + name +
" in call to IO::AttributeType");
return m_IO->InquireAttributeType(name);
}
std::string IO::EngineType() const
{
helper::CheckForNullptr(m_IO, "in call to IO::EngineType");
return m_IO->m_EngineType;
}
} // end namespace py11
} // end namespace adios2
| 1 | 14,271 | @chuckatkins most places used an empty string for "no type", but a few used `"unknown"`. I've converted both cases to `Type::None`. Do you know why there was a distinction before? | ornladios-ADIOS2 | cpp |
@@ -408,7 +408,7 @@ void CUDATreeLearner::copyDenseFeature() {
// looking for dword_features_ non-sparse feature-groups
if (!train_data_->IsMultiGroup(i)) {
dense_feature_group_map_.push_back(i);
- auto sizes_in_byte = train_data_->FeatureGroupSizesInByte(i);
+ auto sizes_in_byte = std::min(train_data_->FeatureGroupSizesInByte(i), static_cast<size_t>(num_data_));
void* tmp_data = train_data_->FeatureGroupData(i);
Log::Debug("Started copying dense features from CPU to GPU - 2");
CUDASUCCESS_OR_FATAL(cudaMemcpyAsync(&device_features[copied_feature * num_data_], tmp_data, sizes_in_byte, cudaMemcpyHostToDevice, stream_[device_id])); | 1 | /*!
* Copyright (c) 2020 IBM Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifdef USE_CUDA
#include "cuda_tree_learner.h"
#include <LightGBM/bin.h>
#include <LightGBM/network.h>
#include <LightGBM/cuda/cuda_utils.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/utils/common.h>
#include <pthread.h>
#include <algorithm>
#include <cinttypes>
#include <vector>
#include "../io/dense_bin.hpp"
namespace LightGBM {
#define cudaMemcpy_DEBUG 0 // 1: DEBUG cudaMemcpy
#define ResetTrainingData_DEBUG 0 // 1: Debug ResetTrainingData
#define CUDA_DEBUG 0
static void *launch_cuda_histogram(void *thread_data) {
ThreadData td = *(reinterpret_cast<ThreadData*>(thread_data));
int device_id = td.device_id;
CUDASUCCESS_OR_FATAL(cudaSetDevice(device_id));
// launch cuda kernel
cuda_histogram(td.histogram_size,
td.leaf_num_data, td.num_data, td.use_all_features,
td.is_constant_hessian, td.num_workgroups, td.stream,
td.device_features,
td.device_feature_masks,
td.num_data,
td.device_data_indices,
td.leaf_num_data,
td.device_gradients,
td.device_hessians, td.hessians_const,
td.device_subhistograms, td.sync_counters,
td.device_histogram_outputs,
td.exp_workgroups_per_feature);
CUDASUCCESS_OR_FATAL(cudaGetLastError());
return NULL;
}
CUDATreeLearner::CUDATreeLearner(const Config* config)
:SerialTreeLearner(config) {
use_bagging_ = false;
nthreads_ = 0;
if (config->gpu_use_dp && USE_DP_FLOAT) {
Log::Info("LightGBM using CUDA trainer with DP float!!");
} else {
Log::Info("LightGBM using CUDA trainer with SP float!!");
}
}
CUDATreeLearner::~CUDATreeLearner() {
}
void CUDATreeLearner::Init(const Dataset* train_data, bool is_constant_hessian) {
// initialize SerialTreeLearner
SerialTreeLearner::Init(train_data, is_constant_hessian);
// some additional variables needed for GPU trainer
num_feature_groups_ = train_data_->num_feature_groups();
// Initialize GPU buffers and kernels: get device info
InitGPU(config_->num_gpu);
}
// some functions used for debugging the GPU histogram construction
#if CUDA_DEBUG > 0
void PrintHistograms(hist_t* h, size_t size) {
double total_hess = 0;
for (size_t i = 0; i < size; ++i) {
printf("%03lu=%9.3g,%9.3g\t", i, GET_GRAD(h, i), GET_HESS(h, i));
if ((i & 3) == 3)
printf("\n");
total_hess += GET_HESS(h, i);
}
printf("\nSum hessians: %9.3g\n", total_hess);
}
union Float_t {
int64_t i;
double f;
static int64_t ulp_diff(Float_t a, Float_t b) {
return abs(a.i - b.i);
}
};
int CompareHistograms(hist_t* h1, hist_t* h2, size_t size, int feature_id, int dp_flag, int const_flag) {
int i;
int retval = 0;
printf("Comparing Histograms, feature_id = %d, size = %d\n", feature_id, static_cast<int>(size));
if (dp_flag) { // double precision
double af, bf;
int64_t ai, bi;
for (i = 0; i < static_cast<int>(size); ++i) {
af = GET_GRAD(h1, i);
bf = GET_GRAD(h2, i);
if ((((std::fabs(af - bf))/af) >= 1e-6) && ((std::fabs(af - bf)) >= 1e-6)) {
printf("i = %5d, h1.grad %13.6lf, h2.grad %13.6lf\n", i, af, bf);
++retval;
}
if (const_flag) {
ai = GET_HESS((reinterpret_cast<int64_t *>(h1)), i);
bi = GET_HESS((reinterpret_cast<int64_t *>(h2)), i);
if (ai != bi) {
printf("i = %5d, h1.hess %" PRId64 ", h2.hess %" PRId64 "\n", i, ai, bi);
++retval;
}
} else {
af = GET_HESS(h1, i);
bf = GET_HESS(h2, i);
if (((std::fabs(af - bf))/af) >= 1e-6) {
printf("i = %5d, h1.hess %13.6lf, h2.hess %13.6lf\n", i, af, bf);
++retval;
}
}
}
} else { // single precision
float af, bf;
int ai, bi;
for (i = 0; i < static_cast<int>(size); ++i) {
af = GET_GRAD(h1, i);
bf = GET_GRAD(h2, i);
if ((((std::fabs(af - bf))/af) >= 1e-6) && ((std::fabs(af - bf)) >= 1e-6)) {
printf("i = %5d, h1.grad %13.6f, h2.grad %13.6f\n", i, af, bf);
++retval;
}
if (const_flag) {
ai = GET_HESS(h1, i);
bi = GET_HESS(h2, i);
if (ai != bi) {
printf("i = %5d, h1.hess %d, h2.hess %d\n", i, ai, bi);
++retval;
}
} else {
af = GET_HESS(h1, i);
bf = GET_HESS(h2, i);
if (((std::fabs(af - bf))/af) >= 1e-5) {
printf("i = %5d, h1.hess %13.6f, h2.hess %13.6f\n", i, af, bf);
++retval;
}
}
}
}
printf("DONE Comparing Histograms...\n");
return retval;
}
#endif
int CUDATreeLearner::GetNumWorkgroupsPerFeature(data_size_t leaf_num_data) {
// we roughly want 256 workgroups per device, and we have num_dense_feature4_ feature tuples.
// also guarantee that there are at least 2K examples per workgroup
double x = 256.0 / num_dense_feature_groups_;
int exp_workgroups_per_feature = static_cast<int>(ceil(log2(x)));
double t = leaf_num_data / 1024.0;
Log::Debug("We can have at most %d workgroups per feature4 for efficiency reasons\n"
"Best workgroup size per feature for full utilization is %d\n", static_cast<int>(ceil(t)), (1 << exp_workgroups_per_feature));
exp_workgroups_per_feature = std::min(exp_workgroups_per_feature, static_cast<int>(ceil(log(static_cast<double>(t))/log(2.0))));
if (exp_workgroups_per_feature < 0)
exp_workgroups_per_feature = 0;
if (exp_workgroups_per_feature > kMaxLogWorkgroupsPerFeature)
exp_workgroups_per_feature = kMaxLogWorkgroupsPerFeature;
return exp_workgroups_per_feature;
}
void CUDATreeLearner::GPUHistogram(data_size_t leaf_num_data, bool use_all_features) {
// we have already copied ordered gradients, ordered hessians and indices to GPU
// decide the best number of workgroups working on one feature4 tuple
// set work group size based on feature size
// each 2^exp_workgroups_per_feature workgroups work on a feature4 tuple
int exp_workgroups_per_feature = GetNumWorkgroupsPerFeature(leaf_num_data);
std::vector<int> num_gpu_workgroups;
ThreadData *thread_data = reinterpret_cast<ThreadData*>(_mm_malloc(sizeof(ThreadData) * num_gpu_, 16));
for (int device_id = 0; device_id < num_gpu_; ++device_id) {
int num_gpu_feature_groups = num_gpu_feature_groups_[device_id];
int num_workgroups = (1 << exp_workgroups_per_feature) * num_gpu_feature_groups;
num_gpu_workgroups.push_back(num_workgroups);
if (num_workgroups > preallocd_max_num_wg_[device_id]) {
preallocd_max_num_wg_.at(device_id) = num_workgroups;
CUDASUCCESS_OR_FATAL(cudaFree(device_subhistograms_[device_id]));
CUDASUCCESS_OR_FATAL(cudaMalloc(&(device_subhistograms_[device_id]), static_cast<size_t>(num_workgroups * dword_features_ * device_bin_size_ * (3 * hist_bin_entry_sz_ / 2))));
}
// set thread_data
SetThreadData(thread_data, device_id, histogram_size_, leaf_num_data, use_all_features,
num_workgroups, exp_workgroups_per_feature);
}
for (int device_id = 0; device_id < num_gpu_; ++device_id) {
if (pthread_create(cpu_threads_[device_id], NULL, launch_cuda_histogram, reinterpret_cast<void *>(&thread_data[device_id]))) {
Log::Fatal("Error in creating threads.");
}
}
/* Wait for the threads to finish */
for (int device_id = 0; device_id < num_gpu_; ++device_id) {
if (pthread_join(*(cpu_threads_[device_id]), NULL)) {
Log::Fatal("Error in joining threads.");
}
}
for (int device_id = 0; device_id < num_gpu_; ++device_id) {
// copy the results asynchronously. Size depends on if double precision is used
size_t output_size = num_gpu_feature_groups_[device_id] * dword_features_ * device_bin_size_ * hist_bin_entry_sz_;
size_t host_output_offset = offset_gpu_feature_groups_[device_id] * dword_features_ * device_bin_size_ * hist_bin_entry_sz_;
CUDASUCCESS_OR_FATAL(cudaMemcpyAsync(reinterpret_cast<char*>(host_histogram_outputs_) + host_output_offset, device_histogram_outputs_[device_id], output_size, cudaMemcpyDeviceToHost, stream_[device_id]));
CUDASUCCESS_OR_FATAL(cudaEventRecord(histograms_wait_obj_[device_id], stream_[device_id]));
}
}
template <typename HistType>
void CUDATreeLearner::WaitAndGetHistograms(FeatureHistogram* leaf_histogram_array) {
HistType* hist_outputs = reinterpret_cast<HistType*>(host_histogram_outputs_);
#pragma omp parallel for schedule(static, num_gpu_)
for (int device_id = 0; device_id < num_gpu_; ++device_id) {
// when the output is ready, the computation is done
CUDASUCCESS_OR_FATAL(cudaEventSynchronize(histograms_wait_obj_[device_id]));
}
HistType* histograms = reinterpret_cast<HistType*>(leaf_histogram_array[0].RawData() - kHistOffset);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_dense_feature_groups_; ++i) {
if (!feature_masks_[i]) {
continue;
}
int dense_group_index = dense_feature_group_map_[i];
auto old_histogram_array = histograms + train_data_->GroupBinBoundary(dense_group_index) * 2;
int bin_size = train_data_->FeatureGroupNumBin(dense_group_index);
for (int j = 0; j < bin_size; ++j) {
GET_GRAD(old_histogram_array, j) = GET_GRAD(hist_outputs, i * device_bin_size_+ j);
GET_HESS(old_histogram_array, j) = GET_HESS(hist_outputs, i * device_bin_size_+ j);
}
}
}
void CUDATreeLearner::CountDenseFeatureGroups() {
num_dense_feature_groups_ = 0;
for (int i = 0; i < num_feature_groups_; ++i) {
if (!train_data_->IsMultiGroup(i)) {
num_dense_feature_groups_++;
}
}
if (!num_dense_feature_groups_) {
Log::Warning("GPU acceleration is disabled because no non-trival dense features can be found");
}
}
void CUDATreeLearner::prevAllocateGPUMemory() {
// how many feature-group tuples we have
// leave some safe margin for prefetching
// 256 work-items per workgroup. Each work-item prefetches one tuple for that feature
allocated_num_data_ = std::max(num_data_ + 256 * (1 << kMaxLogWorkgroupsPerFeature), allocated_num_data_);
// clear sparse/dense maps
dense_feature_group_map_.clear();
sparse_feature_group_map_.clear();
// do nothing it there is no dense feature
if (!num_dense_feature_groups_) {
return;
}
// calculate number of feature groups per gpu
num_gpu_feature_groups_.resize(num_gpu_);
offset_gpu_feature_groups_.resize(num_gpu_);
int num_features_per_gpu = num_dense_feature_groups_ / num_gpu_;
int remain_features = num_dense_feature_groups_ - num_features_per_gpu * num_gpu_;
int offset = 0;
for (int i = 0; i < num_gpu_; ++i) {
offset_gpu_feature_groups_.at(i) = offset;
num_gpu_feature_groups_.at(i) = (i < remain_features) ? num_features_per_gpu + 1 : num_features_per_gpu;
offset += num_gpu_feature_groups_.at(i);
}
feature_masks_.resize(num_dense_feature_groups_);
Log::Debug("Resized feature masks");
ptr_pinned_feature_masks_ = feature_masks_.data();
Log::Debug("Memset pinned_feature_masks_");
memset(ptr_pinned_feature_masks_, 0, num_dense_feature_groups_);
// histogram bin entry size depends on the precision (single/double)
hist_bin_entry_sz_ = 2 * (config_->gpu_use_dp ? sizeof(hist_t) : sizeof(gpu_hist_t)); // two elements in this "size"
CUDASUCCESS_OR_FATAL(cudaHostAlloc(reinterpret_cast<void **>(&host_histogram_outputs_), static_cast<size_t>(num_dense_feature_groups_ * device_bin_size_ * hist_bin_entry_sz_), cudaHostAllocPortable));
nthreads_ = std::min(omp_get_max_threads(), num_dense_feature_groups_ / dword_features_);
nthreads_ = std::max(nthreads_, 1);
}
// allocate GPU memory for each GPU
void CUDATreeLearner::AllocateGPUMemory() {
#pragma omp parallel for schedule(static, num_gpu_)
for (int device_id = 0; device_id < num_gpu_; ++device_id) {
// do nothing it there is no gpu feature
int num_gpu_feature_groups = num_gpu_feature_groups_[device_id];
if (num_gpu_feature_groups) {
CUDASUCCESS_OR_FATAL(cudaSetDevice(device_id));
// allocate memory for all features
if (device_features_[device_id] != NULL) {
CUDASUCCESS_OR_FATAL(cudaFree(device_features_[device_id]));
}
CUDASUCCESS_OR_FATAL(cudaMalloc(&(device_features_[device_id]), static_cast<size_t>(num_gpu_feature_groups * num_data_ * sizeof(uint8_t))));
Log::Debug("Allocated device_features_ addr=%p sz=%lu", device_features_[device_id], num_gpu_feature_groups * num_data_);
// allocate space for gradients and hessians on device
// we will copy gradients and hessians in after ordered_gradients_ and ordered_hessians_ are constructed
if (device_gradients_[device_id] != NULL) {
CUDASUCCESS_OR_FATAL(cudaFree(device_gradients_[device_id]));
}
if (device_hessians_[device_id] != NULL) {
CUDASUCCESS_OR_FATAL(cudaFree(device_hessians_[device_id]));
}
if (device_feature_masks_[device_id] != NULL) {
CUDASUCCESS_OR_FATAL(cudaFree(device_feature_masks_[device_id]));
}
CUDASUCCESS_OR_FATAL(cudaMalloc(&(device_gradients_[device_id]), static_cast<size_t>(allocated_num_data_ * sizeof(score_t))));
CUDASUCCESS_OR_FATAL(cudaMalloc(&(device_hessians_[device_id]), static_cast<size_t>(allocated_num_data_ * sizeof(score_t))));
CUDASUCCESS_OR_FATAL(cudaMalloc(&(device_feature_masks_[device_id]), static_cast<size_t>(num_gpu_feature_groups)));
// copy indices to the device
if (device_data_indices_[device_id] != NULL) {
CUDASUCCESS_OR_FATAL(cudaFree(device_data_indices_[device_id]));
}
CUDASUCCESS_OR_FATAL(cudaMalloc(&(device_data_indices_[device_id]), static_cast<size_t>(allocated_num_data_ * sizeof(data_size_t))));
CUDASUCCESS_OR_FATAL(cudaMemsetAsync(device_data_indices_[device_id], 0, allocated_num_data_ * sizeof(data_size_t), stream_[device_id]));
Log::Debug("Memset device_data_indices_");
// create output buffer, each feature has a histogram with device_bin_size_ bins,
// each work group generates a sub-histogram of dword_features_ features.
if (!device_subhistograms_[device_id]) {
// only initialize once here, as this will not need to change when ResetTrainingData() is called
CUDASUCCESS_OR_FATAL(cudaMalloc(&(device_subhistograms_[device_id]), static_cast<size_t>(preallocd_max_num_wg_[device_id] * dword_features_ * device_bin_size_ * (3 * hist_bin_entry_sz_ / 2))));
Log::Debug("created device_subhistograms_: %p", device_subhistograms_[device_id]);
}
// create atomic counters for inter-group coordination
CUDASUCCESS_OR_FATAL(cudaFree(sync_counters_[device_id]));
CUDASUCCESS_OR_FATAL(cudaMalloc(&(sync_counters_[device_id]), static_cast<size_t>(num_gpu_feature_groups * sizeof(int))));
CUDASUCCESS_OR_FATAL(cudaMemsetAsync(sync_counters_[device_id], 0, num_gpu_feature_groups * sizeof(int), stream_[device_id]));
// The output buffer is allocated to host directly, to overlap compute and data transfer
CUDASUCCESS_OR_FATAL(cudaFree(device_histogram_outputs_[device_id]));
CUDASUCCESS_OR_FATAL(cudaMalloc(&(device_histogram_outputs_[device_id]), static_cast<size_t>(num_gpu_feature_groups * device_bin_size_ * hist_bin_entry_sz_)));
}
}
}
void CUDATreeLearner::ResetGPUMemory() {
// clear sparse/dense maps
dense_feature_group_map_.clear();
sparse_feature_group_map_.clear();
}
void CUDATreeLearner::copyDenseFeature() {
if (num_feature_groups_ == 0) {
LGBM_config_::current_learner = use_cpu_learner;
return;
}
Log::Debug("Started copying dense features from CPU to GPU");
// find the dense feature-groups and group then into Feature4 data structure (several feature-groups packed into 4 bytes)
size_t copied_feature = 0;
// set device info
int device_id = 0;
uint8_t* device_features = device_features_[device_id];
CUDASUCCESS_OR_FATAL(cudaSetDevice(device_id));
Log::Debug("Started copying dense features from CPU to GPU - 1");
for (int i = 0; i < num_feature_groups_; ++i) {
// looking for dword_features_ non-sparse feature-groups
if (!train_data_->IsMultiGroup(i)) {
dense_feature_group_map_.push_back(i);
auto sizes_in_byte = train_data_->FeatureGroupSizesInByte(i);
void* tmp_data = train_data_->FeatureGroupData(i);
Log::Debug("Started copying dense features from CPU to GPU - 2");
CUDASUCCESS_OR_FATAL(cudaMemcpyAsync(&device_features[copied_feature * num_data_], tmp_data, sizes_in_byte, cudaMemcpyHostToDevice, stream_[device_id]));
Log::Debug("Started copying dense features from CPU to GPU - 3");
copied_feature++;
// reset device info
if (copied_feature == static_cast<size_t>(num_gpu_feature_groups_[device_id])) {
CUDASUCCESS_OR_FATAL(cudaEventRecord(features_future_[device_id], stream_[device_id]));
device_id += 1;
copied_feature = 0;
if (device_id < num_gpu_) {
device_features = device_features_[device_id];
CUDASUCCESS_OR_FATAL(cudaSetDevice(device_id));
}
}
} else {
sparse_feature_group_map_.push_back(i);
}
}
}
// InitGPU w/ num_gpu
void CUDATreeLearner::InitGPU(int num_gpu) {
// Get the max bin size, used for selecting best GPU kernel
max_num_bin_ = 0;
#if CUDA_DEBUG >= 1
printf("bin_size: ");
#endif
for (int i = 0; i < num_feature_groups_; ++i) {
if (train_data_->IsMultiGroup(i)) {
continue;
}
#if CUDA_DEBUG >= 1
printf("%d, ", train_data_->FeatureGroupNumBin(i));
#endif
max_num_bin_ = std::max(max_num_bin_, train_data_->FeatureGroupNumBin(i));
}
#if CUDA_DEBUG >= 1
printf("\n");
#endif
if (max_num_bin_ <= 16) {
device_bin_size_ = 16;
histogram_size_ = 16;
dword_features_ = 1;
} else if (max_num_bin_ <= 64) {
device_bin_size_ = 64;
histogram_size_ = 64;
dword_features_ = 1;
} else if (max_num_bin_ <= 256) {
Log::Debug("device_bin_size_ = 256");
device_bin_size_ = 256;
histogram_size_ = 256;
dword_features_ = 1;
} else {
Log::Fatal("bin size %d cannot run on GPU", max_num_bin_);
}
if (max_num_bin_ == 65) {
Log::Warning("Setting max_bin to 63 is sugguested for best performance");
}
if (max_num_bin_ == 17) {
Log::Warning("Setting max_bin to 15 is sugguested for best performance");
}
// get num_dense_feature_groups_
CountDenseFeatureGroups();
if (num_gpu > num_dense_feature_groups_) num_gpu = num_dense_feature_groups_;
// initialize GPU
int gpu_count;
CUDASUCCESS_OR_FATAL(cudaGetDeviceCount(&gpu_count));
num_gpu_ = (gpu_count < num_gpu) ? gpu_count : num_gpu;
// set cpu threads
cpu_threads_ = reinterpret_cast<pthread_t **>(_mm_malloc(sizeof(pthread_t *)*num_gpu_, 16));
for (int device_id = 0; device_id < num_gpu_; ++device_id) {
cpu_threads_[device_id] = reinterpret_cast<pthread_t *>(_mm_malloc(sizeof(pthread_t), 16));
}
// resize device memory pointers
device_features_.resize(num_gpu_);
device_gradients_.resize(num_gpu_);
device_hessians_.resize(num_gpu_);
device_feature_masks_.resize(num_gpu_);
device_data_indices_.resize(num_gpu_);
sync_counters_.resize(num_gpu_);
device_subhistograms_.resize(num_gpu_);
device_histogram_outputs_.resize(num_gpu_);
// create stream & events to handle multiple GPUs
preallocd_max_num_wg_.resize(num_gpu_, 1024);
stream_.resize(num_gpu_);
hessians_future_.resize(num_gpu_);
gradients_future_.resize(num_gpu_);
indices_future_.resize(num_gpu_);
features_future_.resize(num_gpu_);
kernel_start_.resize(num_gpu_);
kernel_wait_obj_.resize(num_gpu_);
histograms_wait_obj_.resize(num_gpu_);
for (int i = 0; i < num_gpu_; ++i) {
CUDASUCCESS_OR_FATAL(cudaSetDevice(i));
CUDASUCCESS_OR_FATAL(cudaStreamCreate(&(stream_[i])));
CUDASUCCESS_OR_FATAL(cudaEventCreate(&(hessians_future_[i])));
CUDASUCCESS_OR_FATAL(cudaEventCreate(&(gradients_future_[i])));
CUDASUCCESS_OR_FATAL(cudaEventCreate(&(indices_future_[i])));
CUDASUCCESS_OR_FATAL(cudaEventCreate(&(features_future_[i])));
CUDASUCCESS_OR_FATAL(cudaEventCreate(&(kernel_start_[i])));
CUDASUCCESS_OR_FATAL(cudaEventCreate(&(kernel_wait_obj_[i])));
CUDASUCCESS_OR_FATAL(cudaEventCreate(&(histograms_wait_obj_[i])));
}
allocated_num_data_ = 0;
prevAllocateGPUMemory();
AllocateGPUMemory();
copyDenseFeature();
}
Tree* CUDATreeLearner::Train(const score_t* gradients, const score_t *hessians) {
Tree *ret = SerialTreeLearner::Train(gradients, hessians);
return ret;
}
void CUDATreeLearner::ResetTrainingDataInner(const Dataset* train_data, bool is_constant_hessian, bool reset_multi_val_bin) {
// check data size
data_size_t old_allocated_num_data = allocated_num_data_;
SerialTreeLearner::ResetTrainingDataInner(train_data, is_constant_hessian, reset_multi_val_bin);
#if ResetTrainingData_DEBUG == 1
serial_time = std::chrono::steady_clock::now() - start_serial_time;
#endif
num_feature_groups_ = train_data_->num_feature_groups();
// GPU memory has to been reallocated because data may have been changed
#if ResetTrainingData_DEBUG == 1
auto start_alloc_gpu_time = std::chrono::steady_clock::now();
#endif
// AllocateGPUMemory only when the number of data increased
int old_num_feature_groups = num_dense_feature_groups_;
CountDenseFeatureGroups();
if ((old_allocated_num_data < (num_data_ + 256 * (1 << kMaxLogWorkgroupsPerFeature))) || (old_num_feature_groups < num_dense_feature_groups_)) {
prevAllocateGPUMemory();
AllocateGPUMemory();
} else {
ResetGPUMemory();
}
copyDenseFeature();
#if ResetTrainingData_DEBUG == 1
alloc_gpu_time = std::chrono::steady_clock::now() - start_alloc_gpu_time;
#endif
// setup GPU kernel arguments after we allocating all the buffers
#if ResetTrainingData_DEBUG == 1
auto start_set_arg_time = std::chrono::steady_clock::now();
#endif
#if ResetTrainingData_DEBUG == 1
set_arg_time = std::chrono::steady_clock::now() - start_set_arg_time;
reset_training_data_time = std::chrono::steady_clock::now() - start_reset_training_data_time;
Log::Info("reset_training_data_time: %f secs.", reset_training_data_time.count() * 1e-3);
Log::Info("serial_time: %f secs.", serial_time.count() * 1e-3);
Log::Info("alloc_gpu_time: %f secs.", alloc_gpu_time.count() * 1e-3);
Log::Info("set_arg_time: %f secs.", set_arg_time.count() * 1e-3);
#endif
}
void CUDATreeLearner::BeforeTrain() {
#if cudaMemcpy_DEBUG == 1
std::chrono::duration<double, std::milli> device_hessians_time = std::chrono::milliseconds(0);
std::chrono::duration<double, std::milli> device_gradients_time = std::chrono::milliseconds(0);
#endif
SerialTreeLearner::BeforeTrain();
#if CUDA_DEBUG >= 2
printf("CUDATreeLearner::BeforeTrain() Copying initial full gradients and hessians to device\n");
#endif
// Copy initial full hessians and gradients to GPU.
// We start copying as early as possible, instead of at ConstructHistogram().
if ((hessians_ != NULL) && (gradients_ != NULL)) {
if (!use_bagging_ && num_dense_feature_groups_) {
Log::Debug("CudaTreeLearner::BeforeTrain() No baggings, dense_feature_groups_=%d", num_dense_feature_groups_);
for (int device_id = 0; device_id < num_gpu_; ++device_id) {
if (!(share_state_->is_constant_hessian)) {
Log::Debug("CUDATreeLearner::BeforeTrain(): Starting hessians_ -> device_hessians_");
#if cudaMemcpy_DEBUG == 1
auto start_device_hessians_time = std::chrono::steady_clock::now();
#endif
CUDASUCCESS_OR_FATAL(cudaMemcpyAsync(device_hessians_[device_id], hessians_, num_data_*sizeof(score_t), cudaMemcpyHostToDevice, stream_[device_id]));
CUDASUCCESS_OR_FATAL(cudaEventRecord(hessians_future_[device_id], stream_[device_id]));
#if cudaMemcpy_DEBUG == 1
device_hessians_time = std::chrono::steady_clock::now() - start_device_hessians_time;
#endif
Log::Debug("queued copy of device_hessians_");
}
#if cudaMemcpy_DEBUG == 1
auto start_device_gradients_time = std::chrono::steady_clock::now();
#endif
CUDASUCCESS_OR_FATAL(cudaMemcpyAsync(device_gradients_[device_id], gradients_, num_data_ * sizeof(score_t), cudaMemcpyHostToDevice, stream_[device_id]));
CUDASUCCESS_OR_FATAL(cudaEventRecord(gradients_future_[device_id], stream_[device_id]));
#if cudaMemcpy_DEBUG == 1
device_gradients_time = std::chrono::steady_clock::now() - start_device_gradients_time;
#endif
Log::Debug("CUDATreeLearner::BeforeTrain: issued gradients_ -> device_gradients_");
}
}
}
// use bagging
if ((hessians_ != NULL) && (gradients_ != NULL)) {
if (data_partition_->leaf_count(0) != num_data_ && num_dense_feature_groups_) {
// On GPU, we start copying indices, gradients and hessians now, instead at ConstructHistogram()
// copy used gradients and hessians to ordered buffer
const data_size_t* indices = data_partition_->indices();
data_size_t cnt = data_partition_->leaf_count(0);
// transfer the indices to GPU
for (int device_id = 0; device_id < num_gpu_; ++device_id) {
CUDASUCCESS_OR_FATAL(cudaMemcpyAsync(device_data_indices_[device_id], indices, cnt * sizeof(*indices), cudaMemcpyHostToDevice, stream_[device_id]));
CUDASUCCESS_OR_FATAL(cudaEventRecord(indices_future_[device_id], stream_[device_id]));
if (!(share_state_->is_constant_hessian)) {
CUDASUCCESS_OR_FATAL(cudaMemcpyAsync(device_hessians_[device_id], const_cast<void*>(reinterpret_cast<const void*>(&(hessians_[0]))), num_data_ * sizeof(score_t), cudaMemcpyHostToDevice, stream_[device_id]));
CUDASUCCESS_OR_FATAL(cudaEventRecord(hessians_future_[device_id], stream_[device_id]));
}
CUDASUCCESS_OR_FATAL(cudaMemcpyAsync(device_gradients_[device_id], const_cast<void*>(reinterpret_cast<const void*>(&(gradients_[0]))), num_data_ * sizeof(score_t), cudaMemcpyHostToDevice, stream_[device_id]));
CUDASUCCESS_OR_FATAL(cudaEventRecord(gradients_future_[device_id], stream_[device_id]));
}
}
}
}
bool CUDATreeLearner::BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf) {
int smaller_leaf;
data_size_t num_data_in_left_child = GetGlobalDataCountInLeaf(left_leaf);
data_size_t num_data_in_right_child = GetGlobalDataCountInLeaf(right_leaf);
// only have root
if (right_leaf < 0) {
smaller_leaf = -1;
} else if (num_data_in_left_child < num_data_in_right_child) {
smaller_leaf = left_leaf;
} else {
smaller_leaf = right_leaf;
}
// Copy indices, gradients and hessians as early as possible
if (smaller_leaf >= 0 && num_dense_feature_groups_) {
// only need to initialize for smaller leaf
// Get leaf boundary
const data_size_t* indices = data_partition_->indices();
data_size_t begin = data_partition_->leaf_begin(smaller_leaf);
data_size_t end = begin + data_partition_->leaf_count(smaller_leaf);
for (int device_id = 0; device_id < num_gpu_; ++device_id) {
CUDASUCCESS_OR_FATAL(cudaMemcpyAsync(device_data_indices_[device_id], &indices[begin], (end-begin) * sizeof(data_size_t), cudaMemcpyHostToDevice, stream_[device_id]));
CUDASUCCESS_OR_FATAL(cudaEventRecord(indices_future_[device_id], stream_[device_id]));
}
}
const bool ret = SerialTreeLearner::BeforeFindBestSplit(tree, left_leaf, right_leaf);
return ret;
}
bool CUDATreeLearner::ConstructGPUHistogramsAsync(
const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data) {
if (num_data <= 0) {
return false;
}
// do nothing if no features can be processed on GPU
if (!num_dense_feature_groups_) {
Log::Debug("no dense feature groups, returning");
return false;
}
// copy data indices if it is not null
if (data_indices != nullptr && num_data != num_data_) {
for (int device_id = 0; device_id < num_gpu_; ++device_id) {
CUDASUCCESS_OR_FATAL(cudaMemcpyAsync(device_data_indices_[device_id], data_indices, num_data * sizeof(data_size_t), cudaMemcpyHostToDevice, stream_[device_id]));
CUDASUCCESS_OR_FATAL(cudaEventRecord(indices_future_[device_id], stream_[device_id]));
}
}
// converted indices in is_feature_used to feature-group indices
std::vector<int8_t> is_feature_group_used(num_feature_groups_, 0);
#pragma omp parallel for schedule(static, 1024) if (num_features_ >= 2048)
for (int i = 0; i < num_features_; ++i) {
if (is_feature_used[i]) {
int feature_group = train_data_->Feature2Group(i);
is_feature_group_used[feature_group] = (train_data_->FeatureGroupNumBin(feature_group) <= 16) ? 2 : 1;
}
}
// construct the feature masks for dense feature-groups
int used_dense_feature_groups = 0;
#pragma omp parallel for schedule(static, 1024) reduction(+:used_dense_feature_groups) if (num_dense_feature_groups_ >= 2048)
for (int i = 0; i < num_dense_feature_groups_; ++i) {
if (is_feature_group_used[dense_feature_group_map_[i]]) {
feature_masks_[i] = is_feature_group_used[dense_feature_group_map_[i]];
++used_dense_feature_groups;
} else {
feature_masks_[i] = 0;
}
}
bool use_all_features = ((used_dense_feature_groups == num_dense_feature_groups_) && (data_indices != nullptr));
// if no feature group is used, just return and do not use GPU
if (used_dense_feature_groups == 0) {
return false;
}
// if not all feature groups are used, we need to transfer the feature mask to GPU
// otherwise, we will use a specialized GPU kernel with all feature groups enabled
// We now copy even if all features are used.
#pragma omp parallel for schedule(static, num_gpu_)
for (int device_id = 0; device_id < num_gpu_; ++device_id) {
int offset = offset_gpu_feature_groups_[device_id];
CUDASUCCESS_OR_FATAL(cudaMemcpyAsync(device_feature_masks_[device_id], ptr_pinned_feature_masks_ + offset, num_gpu_feature_groups_[device_id] , cudaMemcpyHostToDevice, stream_[device_id]));
}
// All data have been prepared, now run the GPU kernel
GPUHistogram(num_data, use_all_features);
return true;
}
void CUDATreeLearner::ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract) {
std::vector<int8_t> is_sparse_feature_used(num_features_, 0);
std::vector<int8_t> is_dense_feature_used(num_features_, 0);
int num_dense_features = 0, num_sparse_features = 0;
#pragma omp parallel for schedule(static)
for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
if (!col_sampler_.is_feature_used_bytree()[feature_index]) continue;
if (!is_feature_used[feature_index]) continue;
if (train_data_->IsMultiGroup(train_data_->Feature2Group(feature_index))) {
is_sparse_feature_used[feature_index] = 1;
num_sparse_features++;
} else {
is_dense_feature_used[feature_index] = 1;
num_dense_features++;
}
}
// construct smaller leaf
hist_t* ptr_smaller_leaf_hist_data = smaller_leaf_histogram_array_[0].RawData() - kHistOffset;
// Check workgroups per feature4 tuple..
int exp_workgroups_per_feature = GetNumWorkgroupsPerFeature(smaller_leaf_splits_->num_data_in_leaf());
// if the workgroup per feature is 1 (2^0), return as the work is too small for a GPU
if (exp_workgroups_per_feature == 0) {
return SerialTreeLearner::ConstructHistograms(is_feature_used, use_subtract);
}
// ConstructGPUHistogramsAsync will return true if there are availabe feature groups dispatched to GPU
bool is_gpu_used = ConstructGPUHistogramsAsync(is_feature_used,
nullptr, smaller_leaf_splits_->num_data_in_leaf());
// then construct sparse features on CPU
// We set data_indices to null to avoid rebuilding ordered gradients/hessians
if (num_sparse_features > 0) {
train_data_->ConstructHistograms(is_sparse_feature_used,
smaller_leaf_splits_->data_indices(), smaller_leaf_splits_->num_data_in_leaf(),
gradients_, hessians_,
ordered_gradients_.data(), ordered_hessians_.data(),
share_state_.get(),
ptr_smaller_leaf_hist_data);
}
// wait for GPU to finish, only if GPU is actually used
if (is_gpu_used) {
if (config_->gpu_use_dp) {
// use double precision
WaitAndGetHistograms<hist_t>(smaller_leaf_histogram_array_);
} else {
// use single precision
WaitAndGetHistograms<gpu_hist_t>(smaller_leaf_histogram_array_);
}
}
// Compare GPU histogram with CPU histogram, useful for debuggin GPU code problem
// #define CUDA_DEBUG_COMPARE
#ifdef CUDA_DEBUG_COMPARE
printf("Start Comparing_Histogram between GPU and CPU, num_dense_feature_groups_ = %d\n", num_dense_feature_groups_);
bool compare = true;
for (int i = 0; i < num_dense_feature_groups_; ++i) {
if (!feature_masks_[i])
continue;
int dense_feature_group_index = dense_feature_group_map_[i];
size_t size = train_data_->FeatureGroupNumBin(dense_feature_group_index);
hist_t* ptr_smaller_leaf_hist_data = smaller_leaf_histogram_array_[0].RawData() - kHistOffset;
hist_t* current_histogram = ptr_smaller_leaf_hist_data + train_data_->GroupBinBoundary(dense_feature_group_index) * 2;
hist_t* gpu_histogram = new hist_t[size * 2];
data_size_t num_data = smaller_leaf_splits_->num_data_in_leaf();
printf("Comparing histogram for feature %d, num_data %d, num_data_ = %d, %lu bins\n", dense_feature_group_index, num_data, num_data_, size);
std::copy(current_histogram, current_histogram + size * 2, gpu_histogram);
std::memset(current_histogram, 0, size * sizeof(hist_t) * 2);
if (train_data_->FeatureGroupBin(dense_feature_group_index) == nullptr) {
continue;
}
if (num_data == num_data_) {
if (share_state_->is_constant_hessian) {
printf("ConstructHistogram(): num_data == num_data_ is_constant_hessian\n");
train_data_->FeatureGroupBin(dense_feature_group_index)->ConstructHistogram(
0,
num_data,
gradients_,
current_histogram);
} else {
printf("ConstructHistogram(): num_data == num_data_\n");
train_data_->FeatureGroupBin(dense_feature_group_index)->ConstructHistogram(
0,
num_data,
gradients_, hessians_,
current_histogram);
}
} else {
if (share_state_->is_constant_hessian) {
printf("ConstructHistogram(): is_constant_hessian\n");
train_data_->FeatureGroupBin(dense_feature_group_index)->ConstructHistogram(
smaller_leaf_splits_->data_indices(),
0,
num_data,
gradients_,
current_histogram);
} else {
printf("ConstructHistogram(): 4, num_data = %d, num_data_ = %d\n", num_data, num_data_);
train_data_->FeatureGroupBin(dense_feature_group_index)->ConstructHistogram(
smaller_leaf_splits_->data_indices(),
0,
num_data,
gradients_, hessians_,
current_histogram);
}
}
int retval;
if ((num_data != num_data_) && compare) {
retval = CompareHistograms(gpu_histogram, current_histogram, size, dense_feature_group_index, config_->gpu_use_dp, share_state_->is_constant_hessian);
printf("CompareHistograms reports %d errors\n", retval);
compare = false;
}
retval = CompareHistograms(gpu_histogram, current_histogram, size, dense_feature_group_index, config_->gpu_use_dp, share_state_->is_constant_hessian);
if (num_data == num_data_) {
printf("CompareHistograms reports %d errors\n", retval);
} else {
printf("CompareHistograms reports %d errors\n", retval);
}
std::copy(gpu_histogram, gpu_histogram + size * 2, current_histogram);
delete [] gpu_histogram;
}
printf("End Comparing Histogram between GPU and CPU\n");
fflush(stderr);
fflush(stdout);
#endif
if (larger_leaf_histogram_array_ != nullptr && !use_subtract) {
// construct larger leaf
hist_t* ptr_larger_leaf_hist_data = larger_leaf_histogram_array_[0].RawData() - kHistOffset;
is_gpu_used = ConstructGPUHistogramsAsync(is_feature_used,
larger_leaf_splits_->data_indices(), larger_leaf_splits_->num_data_in_leaf());
// then construct sparse features on CPU
// We set data_indices to null to avoid rebuilding ordered gradients/hessians
if (num_sparse_features > 0) {
train_data_->ConstructHistograms(is_sparse_feature_used,
larger_leaf_splits_->data_indices(), larger_leaf_splits_->num_data_in_leaf(),
gradients_, hessians_,
ordered_gradients_.data(), ordered_hessians_.data(),
share_state_.get(),
ptr_larger_leaf_hist_data);
}
// wait for GPU to finish, only if GPU is actually used
if (is_gpu_used) {
if (config_->gpu_use_dp) {
// use double precision
WaitAndGetHistograms<hist_t>(larger_leaf_histogram_array_);
} else {
// use single precision
WaitAndGetHistograms<gpu_hist_t>(larger_leaf_histogram_array_);
}
}
}
}
void CUDATreeLearner::FindBestSplits(const Tree* tree) {
SerialTreeLearner::FindBestSplits(tree);
#if CUDA_DEBUG >= 3
for (int feature_index = 0; feature_index < num_features_; ++feature_index) {
if (!col_sampler_.is_feature_used_bytree()[feature_index]) continue;
if (parent_leaf_histogram_array_ != nullptr
&& !parent_leaf_histogram_array_[feature_index].is_splittable()) {
smaller_leaf_histogram_array_[feature_index].set_is_splittable(false);
continue;
}
size_t bin_size = train_data_->FeatureNumBin(feature_index) + 1;
printf("CUDATreeLearner::FindBestSplits() Feature %d bin_size=%zd smaller leaf:\n", feature_index, bin_size);
PrintHistograms(smaller_leaf_histogram_array_[feature_index].RawData() - kHistOffset, bin_size);
if (larger_leaf_splits_ == nullptr || larger_leaf_splits_->leaf_index() < 0) { continue; }
printf("CUDATreeLearner::FindBestSplits() Feature %d bin_size=%zd larger leaf:\n", feature_index, bin_size);
PrintHistograms(larger_leaf_histogram_array_[feature_index].RawData() - kHistOffset, bin_size);
}
#endif
}
void CUDATreeLearner::Split(Tree* tree, int best_Leaf, int* left_leaf, int* right_leaf) {
const SplitInfo& best_split_info = best_split_per_leaf_[best_Leaf];
#if CUDA_DEBUG >= 2
printf("Splitting leaf %d with feature %d thresh %d gain %f stat %f %f %f %f\n", best_Leaf, best_split_info.feature, best_split_info.threshold, best_split_info.gain, best_split_info.left_sum_gradient, best_split_info.right_sum_gradient, best_split_info.left_sum_hessian, best_split_info.right_sum_hessian);
#endif
SerialTreeLearner::Split(tree, best_Leaf, left_leaf, right_leaf);
if (Network::num_machines() == 1) {
// do some sanity check for the GPU algorithm
if (best_split_info.left_count < best_split_info.right_count) {
if ((best_split_info.left_count != smaller_leaf_splits_->num_data_in_leaf()) ||
(best_split_info.right_count!= larger_leaf_splits_->num_data_in_leaf())) {
Log::Fatal("Bug in GPU histogram! split %d: %d, smaller_leaf: %d, larger_leaf: %d\n", best_split_info.left_count, best_split_info.right_count, smaller_leaf_splits_->num_data_in_leaf(), larger_leaf_splits_->num_data_in_leaf());
}
} else {
if ((best_split_info.left_count != larger_leaf_splits_->num_data_in_leaf()) ||
(best_split_info.right_count!= smaller_leaf_splits_->num_data_in_leaf())) {
Log::Fatal("Bug in GPU histogram! split %d: %d, smaller_leaf: %d, larger_leaf: %d\n", best_split_info.left_count, best_split_info.right_count, smaller_leaf_splits_->num_data_in_leaf(), larger_leaf_splits_->num_data_in_leaf());
}
}
}
}
} // namespace LightGBM
#undef cudaMemcpy_DEBUG
#endif // USE_CUDA
| 1 | 27,816 | should we assert for the type for `FeatureGroupData` ? I think it should be 1-Byte type. | microsoft-LightGBM | cpp |
@@ -51,10 +51,10 @@ class TestCube(ComparisonTestCase):
def test_dimension_values_vdim(self):
cube = Dataset(self.cube, kdims=['longitude', 'latitude'])
self.assertEqual(cube.dimension_values('unknown', flat=False),
- np.flipud(np.array([[ 0, 4, 8],
- [ 1, 5, 9],
- [ 2, 6, 10],
- [ 3, 7, 11]], dtype=np.int32).T))
+ np.array([[ 0, 4, 8],
+ [ 1, 5, 9],
+ [ 2, 6, 10],
+ [ 3, 7, 11]], dtype=np.int32).T)
def test_range_kdim(self):
cube = Dataset(self.cube, kdims=['longitude', 'latitude']) | 1 | import numpy as np
import unittest
try:
from iris.tests.stock import lat_lon_cube
except ImportError:
raise unittest.SkipTest("Could not import iris, skipping iris interface "
"tests.")
from holoviews.core.data import Dataset
from holoviews.core.data.iris import coord_to_dimension
from holoviews.element.comparison import ComparisonTestCase
class TestCube(ComparisonTestCase):
def setUp(self):
self.cube = lat_lon_cube()
self.epsilon = 0.01
def test_dim_to_coord(self):
dim = coord_to_dimension(self.cube.coords()[0])
self.assertEqual(dim.name, 'latitude')
self.assertEqual(dim.unit, 'degrees')
def test_initialize_cube(self):
cube = Dataset(self.cube)
self.assertEqual(cube.dimensions(label=True),
['longitude', 'latitude', 'unknown'])
def test_initialize_cube_with_kdims(self):
cube = Dataset(self.cube, kdims=['longitude', 'latitude'])
self.assertEqual(cube.dimensions('key', True),
['longitude', 'latitude'])
def test_initialize_cube_with_vdims(self):
cube = Dataset(self.cube, vdims=['Quantity'])
self.assertEqual(cube.dimensions('value', True),
['Quantity'])
def test_dimension_values_kdim_expanded(self):
cube = Dataset(self.cube, kdims=['longitude', 'latitude'])
self.assertEqual(cube.dimension_values('longitude'),
np.array([-1, -1, -1, 0, 0, 0,
1, 1, 1, 2, 2, 2], dtype=np.int32))
def test_dimension_values_kdim(self):
cube = Dataset(self.cube, kdims=['longitude', 'latitude'])
self.assertEqual(cube.dimension_values('longitude', expanded=False),
np.array([-1, 0, 1, 2], dtype=np.int32))
def test_dimension_values_vdim(self):
cube = Dataset(self.cube, kdims=['longitude', 'latitude'])
self.assertEqual(cube.dimension_values('unknown', flat=False),
np.flipud(np.array([[ 0, 4, 8],
[ 1, 5, 9],
[ 2, 6, 10],
[ 3, 7, 11]], dtype=np.int32).T))
def test_range_kdim(self):
cube = Dataset(self.cube, kdims=['longitude', 'latitude'])
self.assertEqual(cube.range('longitude'), (-1, 2))
def test_range_vdim(self):
cube = Dataset(self.cube, kdims=['longitude', 'latitude'])
self.assertEqual(cube.range('unknown'), (0, 11))
def test_select_index(self):
cube = Dataset(self.cube)
self.assertEqual(cube.select(longitude=0).data.data,
np.array([[1, 5, 9]], dtype=np.int32))
def test_select_slice(self):
cube = Dataset(self.cube)
self.assertEqual(cube.select(longitude=(0, 1+self.epsilon)).data.data,
np.array([[1, 2], [5, 6], [9, 10]], dtype=np.int32))
def test_select_set(self):
cube = Dataset(self.cube)
self.assertEqual(cube.select(longitude={0, 1}).data.data,
np.array([[1, 2], [5, 6], [9, 10]], dtype=np.int32))
def test_select_multi_index(self):
cube = Dataset(self.cube)
self.assertEqual(cube.select(longitude=0, latitude=0), 5)
def test_select_multi_slice1(self):
cube = Dataset(self.cube)
self.assertEqual(cube.select(longitude=(0, 1+self.epsilon),
latitude=(0, 1+self.epsilon)).data.data,
np.array([[5, 6], [9, 10]], dtype=np.int32))
def test_select_multi_slice2(self):
cube = Dataset(self.cube)
self.assertEqual(cube.select(longitude={0, 2},
latitude={0, 2}).data.data,
np.array([[5, 7]], dtype=np.int32))
def test_getitem_index(self):
cube = Dataset(self.cube)
self.assertEqual(cube[0].data.data,
np.array([[1, 5, 9]], dtype=np.int32))
def test_getitem_scalar(self):
cube = Dataset(self.cube)
self.assertEqual(cube[0, 0], 5)
| 1 | 15,261 | As long as you are sure this is definitely correct now... :-) | holoviz-holoviews | py |
@@ -166,7 +166,7 @@ public class ActionsFragment extends SubscriberFragment implements View.OnClickL
tempBasal.setVisibility(View.GONE);
tempBasalCancel.setVisibility(View.VISIBLE);
final TemporaryBasal activeTemp = MainApp.getConfigBuilder().getTempBasalFromHistory(System.currentTimeMillis());
- tempBasalCancel.setText(MainApp.instance().getString(R.string.cancel) + "\n" + activeTemp.toStringShort());
+ tempBasalCancel.setText(MainApp.instance().getString(R.string.cancel) + " " + activeTemp.toStringShort());
} else {
tempBasal.setVisibility(View.VISIBLE);
tempBasalCancel.setVisibility(View.GONE); | 1 | package info.nightscout.androidaps.plugins.Actions;
import android.app.Activity;
import android.os.Bundle;
import android.os.Handler;
import android.os.HandlerThread;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentManager;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Button;
import com.crashlytics.android.Crashlytics;
import com.crashlytics.android.answers.Answers;
import com.crashlytics.android.answers.CustomEvent;
import com.squareup.otto.Subscribe;
import info.nightscout.androidaps.Config;
import info.nightscout.androidaps.MainApp;
import info.nightscout.androidaps.R;
import info.nightscout.androidaps.db.ExtendedBolus;
import info.nightscout.androidaps.db.TemporaryBasal;
import info.nightscout.androidaps.events.EventExtendedBolusChange;
import info.nightscout.androidaps.events.EventInitializationChanged;
import info.nightscout.androidaps.events.EventRefreshOverview;
import info.nightscout.androidaps.events.EventTempBasalChange;
import info.nightscout.androidaps.interfaces.PumpInterface;
import info.nightscout.androidaps.plugins.Actions.dialogs.FillDialog;
import info.nightscout.androidaps.plugins.Actions.dialogs.NewExtendedBolusDialog;
import info.nightscout.androidaps.plugins.Actions.dialogs.NewTempBasalDialog;
import info.nightscout.androidaps.plugins.Careportal.CareportalFragment;
import info.nightscout.androidaps.plugins.Careportal.Dialogs.NewNSTreatmentDialog;
import info.nightscout.androidaps.plugins.Careportal.OptionsToShow;
import info.nightscout.androidaps.plugins.Common.SubscriberFragment;
/**
* A simple {@link Fragment} subclass.
*/
public class ActionsFragment extends SubscriberFragment implements View.OnClickListener {
static ActionsPlugin actionsPlugin = new ActionsPlugin();
static public ActionsPlugin getPlugin() {
return actionsPlugin;
}
Button profileSwitch;
Button tempTarget;
Button extendedBolus;
Button extendedBolusCancel;
Button tempBasal;
Button tempBasalCancel;
Button fill;
private static Handler sHandler;
private static HandlerThread sHandlerThread;
public ActionsFragment() {
super();
if (sHandlerThread == null) {
sHandlerThread = new HandlerThread(ActionsFragment.class.getSimpleName());
sHandlerThread.start();
sHandler = new Handler(sHandlerThread.getLooper());
}
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
try {
View view = inflater.inflate(R.layout.actions_fragment, container, false);
profileSwitch = (Button) view.findViewById(R.id.actions_profileswitch);
tempTarget = (Button) view.findViewById(R.id.actions_temptarget);
extendedBolus = (Button) view.findViewById(R.id.actions_extendedbolus);
extendedBolusCancel = (Button) view.findViewById(R.id.actions_extendedbolus_cancel);
tempBasal = (Button) view.findViewById(R.id.actions_settempbasal);
tempBasalCancel = (Button) view.findViewById(R.id.actions_canceltempbasal);
fill = (Button) view.findViewById(R.id.actions_fill);
profileSwitch.setOnClickListener(this);
tempTarget.setOnClickListener(this);
extendedBolus.setOnClickListener(this);
extendedBolusCancel.setOnClickListener(this);
tempBasal.setOnClickListener(this);
tempBasalCancel.setOnClickListener(this);
fill.setOnClickListener(this);
updateGUI();
return view;
} catch (Exception e) {
Crashlytics.logException(e);
}
return null;
}
@Subscribe
public void onStatusEvent(final EventInitializationChanged ev) {
updateGUI();
}
@Subscribe
public void onStatusEvent(final EventRefreshOverview ev) {
updateGUI();
}
@Subscribe
public void onStatusEvent(final EventExtendedBolusChange ev) {
updateGUI();
}
@Subscribe
public void onStatusEvent(final EventTempBasalChange ev) {
updateGUI();
}
@Override
protected void updateGUI() {
Activity activity = getActivity();
if (activity != null)
activity.runOnUiThread(new Runnable() {
@Override
public void run() {
if (MainApp.getConfigBuilder().getActiveProfileInterface().getProfile() == null) {
tempTarget.setVisibility(View.GONE);
profileSwitch.setVisibility(View.GONE);
extendedBolus.setVisibility(View.GONE);
extendedBolusCancel.setVisibility(View.GONE);
tempBasal.setVisibility(View.GONE);
tempBasalCancel.setVisibility(View.GONE);
fill.setVisibility(View.GONE);
return;
}
boolean allowProfileSwitch = MainApp.getConfigBuilder().getActiveProfileInterface().getProfile().getProfileList().size() > 1;
if (!MainApp.getConfigBuilder().getPumpDescription().isSetBasalProfileCapable || !MainApp.getConfigBuilder().isInitialized() || MainApp.getConfigBuilder().isSuspended() || !allowProfileSwitch)
profileSwitch.setVisibility(View.GONE);
else
profileSwitch.setVisibility(View.VISIBLE);
if (!MainApp.getConfigBuilder().getPumpDescription().isExtendedBolusCapable || !MainApp.getConfigBuilder().isInitialized() || MainApp.getConfigBuilder().isSuspended() || MainApp.getConfigBuilder().isFakingTempsByExtendedBoluses()) {
extendedBolus.setVisibility(View.GONE);
extendedBolusCancel.setVisibility(View.GONE);
} else {
if (MainApp.getConfigBuilder().isInHistoryExtendedBoluslInProgress()) {
extendedBolus.setVisibility(View.GONE);
extendedBolusCancel.setVisibility(View.VISIBLE);
ExtendedBolus running = MainApp.getConfigBuilder().getExtendedBolusFromHistory(System.currentTimeMillis());
extendedBolusCancel.setText(MainApp.instance().getString(R.string.cancel) + " " + running.toString());
} else {
extendedBolus.setVisibility(View.VISIBLE);
extendedBolusCancel.setVisibility(View.GONE);
}
}
if (!MainApp.getConfigBuilder().getPumpDescription().isTempBasalCapable || !MainApp.getConfigBuilder().isInitialized() || MainApp.getConfigBuilder().isSuspended()) {
tempBasal.setVisibility(View.GONE);
tempBasalCancel.setVisibility(View.GONE);
} else {
if (MainApp.getConfigBuilder().isTempBasalInProgress()) {
tempBasal.setVisibility(View.GONE);
tempBasalCancel.setVisibility(View.VISIBLE);
final TemporaryBasal activeTemp = MainApp.getConfigBuilder().getTempBasalFromHistory(System.currentTimeMillis());
tempBasalCancel.setText(MainApp.instance().getString(R.string.cancel) + "\n" + activeTemp.toStringShort());
} else {
tempBasal.setVisibility(View.VISIBLE);
tempBasalCancel.setVisibility(View.GONE);
}
}
if (!MainApp.getConfigBuilder().getPumpDescription().isRefillingCapable || !MainApp.getConfigBuilder().isInitialized() || MainApp.getConfigBuilder().isSuspended())
fill.setVisibility(View.GONE);
else
fill.setVisibility(View.VISIBLE);
if (!Config.APS)
tempTarget.setVisibility(View.GONE);
else
tempTarget.setVisibility(View.VISIBLE);
}
});
}
@Override
public void onClick(View view) {
FragmentManager manager = getFragmentManager();
final PumpInterface pump = MainApp.getConfigBuilder();
switch (view.getId()) {
case R.id.actions_profileswitch:
NewNSTreatmentDialog newDialog = new NewNSTreatmentDialog();
final OptionsToShow profileswitch = CareportalFragment.PROFILESWITCH;
profileswitch.executeProfileSwitch = true;
newDialog.setOptions(profileswitch, R.string.careportal_profileswitch);
newDialog.show(manager, "NewNSTreatmentDialog");
break;
case R.id.actions_temptarget:
NewNSTreatmentDialog newTTDialog = new NewNSTreatmentDialog();
final OptionsToShow temptarget = CareportalFragment.TEMPTARGET;
temptarget.executeTempTarget = true;
newTTDialog.setOptions(temptarget, R.string.careportal_temporarytarget);
newTTDialog.show(manager, "NewNSTreatmentDialog");
break;
case R.id.actions_extendedbolus:
NewExtendedBolusDialog newExtendedDialog = new NewExtendedBolusDialog();
newExtendedDialog.show(manager, "NewExtendedDialog");
break;
case R.id.actions_extendedbolus_cancel:
if (MainApp.getConfigBuilder().isInHistoryExtendedBoluslInProgress()) {
sHandler.post(new Runnable() {
@Override
public void run() {
pump.cancelExtendedBolus();
Answers.getInstance().logCustom(new CustomEvent("CancelExtended"));
}
});
}
break;
case R.id.actions_canceltempbasal:
if (MainApp.getConfigBuilder().isTempBasalInProgress()) {
sHandler.post(new Runnable() {
@Override
public void run() {
pump.cancelTempBasal(true);
Answers.getInstance().logCustom(new CustomEvent("CancelTemp"));
}
});
}
break;
case R.id.actions_settempbasal:
NewTempBasalDialog newTempDialog = new NewTempBasalDialog();
newTempDialog.show(manager, "NewTempDialog");
break;
case R.id.actions_fill:
FillDialog fillDialog = new FillDialog();
fillDialog.show(manager, "FillDialog");
break;
}
}
}
| 1 | 29,679 | ... so that all action buttons have the same height :-) | MilosKozak-AndroidAPS | java |
@@ -21,6 +21,12 @@
#include <fastdds/dds/log/Log.hpp>
#include <fastdds/dds/log/Colors.hpp>
#include <fastrtps/xmlparser/XMLProfileManager.h>
+#include <fastdds/dds/domain/DomainParticipantFactory.hpp>
+#include <fastdds/dds/domain/DomainParticipant.hpp>
+#include <fastdds/dds/publisher/DataWriterListener.hpp>
+#include <fastdds/dds/subscriber/qos/DataReaderQos.hpp>
+#include <fastdds/dds/subscriber/DataReader.hpp>
+#include <fastdds/dds/publisher/DataWriter.hpp>
#include <numeric>
#include <cmath> | 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file LatencyPublisher.cpp
*
*/
#include "LatencyTestPublisher.hpp"
#include <fastdds/dds/log/Log.hpp>
#include <fastdds/dds/log/Colors.hpp>
#include <fastrtps/xmlparser/XMLProfileManager.h>
#include <numeric>
#include <cmath>
#include <fstream>
#include <inttypes.h>
#define TIME_LIMIT_US 10000
using namespace eprosima::fastrtps;
using namespace eprosima::fastrtps::rtps;
using namespace eprosima::fastrtps::types;
using namespace std;
LatencyTestPublisher::LatencyTestPublisher()
: participant_(nullptr)
, data_publisher_(nullptr)
, command_publisher_(nullptr)
, data_subscriber_(nullptr)
, command_subscriber_(nullptr)
, overhead_time_(0.0)
, discovery_count_(0)
, command_msg_count_(0)
, data_msg_count_(0)
, received_count_(0)
, test_status_(0)
, subscribers_(0)
, samples_(0)
, latency_type_in_(nullptr)
, latency_type_out_(nullptr)
, dynamic_data_type_in_(nullptr)
, dynamic_data_type_out_(nullptr)
, data_pub_listener_(nullptr)
, data_sub_listener_(nullptr)
, command_pub_listener_(nullptr)
, command_sub_listener_(nullptr)
{
forced_domain_ = -1;
data_pub_listener_.latency_publisher_ = this;
data_sub_listener_.latency_publisher_ = this;
command_pub_listener_.latency_publisher_ = this;
command_sub_listener_.latency_publisher_ = this;
export_prefix_ = "";
raw_data_file_ = "";
}
LatencyTestPublisher::~LatencyTestPublisher()
{
Domain::removeParticipant(participant_);
}
bool LatencyTestPublisher::init(
int subscribers,
int samples,
bool reliable,
uint32_t pid,
bool hostname,
bool export_csv,
const std::string& export_prefix,
std::string raw_data_file,
const PropertyPolicy& part_property_policy,
const PropertyPolicy& property_policy,
const std::string& xml_config_file,
bool dynamic_data,
int forced_domain,
LatencyDataSizes& latency_data_sizes)
{
// Initialize state
xml_config_file_ = xml_config_file;
samples_ = samples;
subscribers_ = subscribers;
export_csv_ = export_csv;
export_prefix_ = export_prefix;
reliable_ = reliable;
dynamic_data_ = dynamic_data;
forced_domain_ = forced_domain;
raw_data_file_ = raw_data_file;
data_size_pub_ = latency_data_sizes.sample_sizes();
// Init dynamic data
if (dynamic_data_)
{
// Create basic builders
DynamicTypeBuilder_ptr struct_type_builder(DynamicTypeBuilderFactory::get_instance()->create_struct_builder());
// Add members to the struct.
struct_type_builder->add_member(0, "seqnum", DynamicTypeBuilderFactory::get_instance()->create_uint32_type());
struct_type_builder->add_member(1, "data",
DynamicTypeBuilderFactory::get_instance()->create_sequence_builder(
DynamicTypeBuilderFactory::get_instance()->create_byte_type(), data_size_pub_.back()
));
struct_type_builder->set_name("LatencyType");
dynamic_type_ = struct_type_builder->build();
dynamic_pub_sub_type_.SetDynamicType(dynamic_type_);
}
// Init output files
output_files_.push_back(std::make_shared<std::stringstream>());
output_files_.push_back(std::make_shared<std::stringstream>());
uint32_t data_index = DATA_BASE_INDEX;
for (std::vector<uint32_t>::iterator it = data_size_pub_.begin(); it != data_size_pub_.end(); ++it)
{
// Reliability
std::string str_reliable = "besteffort";
if (reliable_)
{
str_reliable = "reliable";
}
// Summary files
*output_files_[MINIMUM_INDEX] << "\"" << samples_ << " samples of " << *it + 4 << " bytes (us)\"";
*output_files_[AVERAGE_INDEX] << "\"" << samples_ << " samples of " << *it + 4 << " bytes (us)\"";
if (it != data_size_pub_.end() - 1)
{
*output_files_[MINIMUM_INDEX] << ",";
*output_files_[AVERAGE_INDEX] << ",";
}
output_files_.push_back(std::make_shared<std::stringstream>());
*output_files_[data_index] << "\"Minimum of " << samples_ << " samples (" << str_reliable << ")\",";
*output_files_[data_index] << "\"Average of " << samples_ << " samples (" << str_reliable << ")\"" << std::endl;
data_index++;
}
*output_files_[MINIMUM_INDEX] << std::endl;
*output_files_[AVERAGE_INDEX] << std::endl;
/* Create RTPSParticipant */
std::string participant_profile_name = "pub_participant_profile";
ParticipantAttributes participant_attributes;
// Default domain
participant_attributes.domainId = pid % 230;
// Default participant name
participant_attributes.rtps.setName("latency_test_publisher");
// Load XML configuration
if (xml_config_file_.length() > 0)
{
if (eprosima::fastrtps::xmlparser::XMLP_ret::XML_OK !=
eprosima::fastrtps::xmlparser::XMLProfileManager::fillParticipantAttributes(
participant_profile_name, participant_attributes))
{
return false;
}
}
// Apply user's force domain
if (forced_domain_ >= 0)
{
participant_attributes.domainId = forced_domain_;
}
// If the user has specified a participant property policy with command line arguments, it overrides whatever the
// XML configures.
if (PropertyPolicyHelper::length(part_property_policy) > 0)
{
participant_attributes.rtps.properties = part_property_policy;
}
// Create the participant
participant_ = Domain::createParticipant(participant_attributes);
if (participant_ == nullptr)
{
return false;
}
// Register the data type
if (dynamic_data_)
{
Domain::registerType(participant_, &dynamic_pub_sub_type_);
}
else
{
Domain::registerType(participant_, (TopicDataType*)&latency_data_type_);
}
// Register the command type
Domain::registerType(participant_, (TopicDataType*)&latency_command_type_);
/* Create Data Publisher */
std::string profile_name = "pub_publisher_profile";
PublisherAttributes publisher_data_attributes;
publisher_data_attributes.topic.topicDataType = "LatencyType";
publisher_data_attributes.topic.topicKind = NO_KEY;
std::ostringstream data_pub_topic_name;
data_pub_topic_name << "LatencyTest_";
if (hostname)
{
data_pub_topic_name << asio::ip::host_name() << "_";
}
data_pub_topic_name << pid << "_PUB2SUB";
publisher_data_attributes.topic.topicName = data_pub_topic_name.str();
publisher_data_attributes.times.heartbeatPeriod.seconds = 0;
publisher_data_attributes.times.heartbeatPeriod.nanosec = 100000000;
if (!reliable)
{
publisher_data_attributes.qos.m_reliability.kind = eprosima::fastrtps::BEST_EFFORT_RELIABILITY_QOS;
}
publisher_data_attributes.properties = property_policy;
publisher_data_attributes.historyMemoryPolicy = eprosima::fastrtps::rtps::PREALLOCATED_WITH_REALLOC_MEMORY_MODE;
if (xml_config_file_.length() > 0)
{
data_publisher_ =
Domain::createPublisher(participant_, profile_name, (PublisherListener*)&this->data_pub_listener_);
}
else
{
data_publisher_ =
Domain::createPublisher(participant_, publisher_data_attributes,
(PublisherListener*)&this->data_pub_listener_);
}
if (data_publisher_ == nullptr)
{
return false;
}
/* Create Data Echo Subscriber */
profile_name = "pub_subscriber_profile";
SubscriberAttributes subscriber_data_attributes;
subscriber_data_attributes.topic.topicDataType = "LatencyType";
subscriber_data_attributes.topic.topicKind = NO_KEY;
std::ostringstream data_sub_topic_name;
data_sub_topic_name << "LatencyTest_";
if (hostname)
{
data_sub_topic_name << asio::ip::host_name() << "_";
}
data_sub_topic_name << pid << "_SUB2PUB";
subscriber_data_attributes.topic.topicName = data_sub_topic_name.str();
if (reliable)
{
subscriber_data_attributes.qos.m_reliability.kind = eprosima::fastrtps::RELIABLE_RELIABILITY_QOS;
}
subscriber_data_attributes.properties = property_policy;
subscriber_data_attributes.historyMemoryPolicy = eprosima::fastrtps::rtps::PREALLOCATED_WITH_REALLOC_MEMORY_MODE;
if (xml_config_file_.length() > 0)
{
data_subscriber_ = Domain::createSubscriber(participant_, profile_name, &this->data_sub_listener_);
}
else
{
data_subscriber_ = Domain::createSubscriber(participant_, subscriber_data_attributes,
&this->data_sub_listener_);
}
if (data_subscriber_ == nullptr)
{
return false;
}
/* Create Command Publisher */
PublisherAttributes publisher_command_attributes;
publisher_command_attributes.topic.topicDataType = "TestCommandType";
publisher_command_attributes.topic.topicKind = NO_KEY;
std::ostringstream command_pub_topic_name;
command_pub_topic_name << "LatencyTest_Command_";
if (hostname)
{
command_pub_topic_name << asio::ip::host_name() << "_";
}
command_pub_topic_name << pid << "_PUB2SUB";
publisher_command_attributes.topic.topicName = command_pub_topic_name.str();
publisher_command_attributes.topic.historyQos.kind = eprosima::fastrtps::KEEP_ALL_HISTORY_QOS;
publisher_command_attributes.qos.m_durability.kind = eprosima::fastrtps::TRANSIENT_LOCAL_DURABILITY_QOS;
publisher_command_attributes.qos.m_reliability.kind = eprosima::fastrtps::RELIABLE_RELIABILITY_QOS;
publisher_command_attributes.qos.m_publishMode.kind = eprosima::fastrtps::SYNCHRONOUS_PUBLISH_MODE;
command_publisher_ = Domain::createPublisher(participant_, publisher_command_attributes,
&this->command_pub_listener_);
if (command_publisher_ == nullptr)
{
return false;
}
/* Create Command Subscriber */
SubscriberAttributes subscriber_command_attributes;
subscriber_command_attributes.topic.topicDataType = "TestCommandType";
subscriber_command_attributes.topic.topicKind = NO_KEY;
std::ostringstream command_sub_topic_name;
command_sub_topic_name << "LatencyTest_Command_";
if (hostname)
{
command_sub_topic_name << asio::ip::host_name() << "_";
}
command_sub_topic_name << pid << "_SUB2PUB";
subscriber_command_attributes.topic.topicName = command_sub_topic_name.str();
subscriber_command_attributes.topic.historyQos.kind = eprosima::fastrtps::KEEP_ALL_HISTORY_QOS;
subscriber_command_attributes.qos.m_reliability.kind = eprosima::fastrtps::RELIABLE_RELIABILITY_QOS;
subscriber_command_attributes.qos.m_durability.kind = eprosima::fastrtps::TRANSIENT_LOCAL_DURABILITY_QOS;
publisher_command_attributes.qos.m_publishMode.kind = eprosima::fastrtps::SYNCHRONOUS_PUBLISH_MODE;
command_subscriber_ = Domain::createSubscriber(participant_, subscriber_command_attributes,
&this->command_sub_listener_);
if (command_subscriber_ == nullptr)
{
return false;
}
/* Calculate Overhead */
start_time_ = std::chrono::steady_clock::now();
for (int i = 0; i < 1000; ++i)
{
end_time_ = std::chrono::steady_clock::now();
}
overhead_time_ = std::chrono::duration<double, std::micro>(end_time_ - start_time_) / 1001;
cout << "Overhead " << overhead_time_.count() << " ns" << endl;
/* Create the raw_data_file and add the header */
if (raw_data_file_ != "")
{
raw_sample_count_ = 0;
std::ofstream data_file;
data_file.open(raw_data_file_);
data_file << "Sample,Payload [Bytes],Latency [us]" << std::endl;
}
return true;
}
void LatencyTestPublisher::DataPubListener::onPublicationMatched(
Publisher* /*pub*/,
MatchingInfo& info)
{
std::unique_lock<std::mutex> lock(latency_publisher_->mutex_);
if (info.status == MATCHED_MATCHING)
{
std::cout << C_MAGENTA << "Data Pub Matched" << C_DEF << std::endl;
matched_++;
if (matched_ > latency_publisher_->subscribers_)
{
std::cout << "More matched subscribers than expected" << std::endl;
latency_publisher_->test_status_ = -1;
}
++latency_publisher_->discovery_count_;
}
else
{
--latency_publisher_->discovery_count_;
}
lock.unlock();
latency_publisher_->discovery_cv_.notify_one();
}
void LatencyTestPublisher::DataSubListener::onSubscriptionMatched(
Subscriber* /*sub*/,
MatchingInfo& info)
{
std::unique_lock<std::mutex> lock(latency_publisher_->mutex_);
if (info.status == MATCHED_MATCHING)
{
std::cout << C_MAGENTA << "Data Sub Matched" << C_DEF << std::endl;
matched_++;
if (matched_ > latency_publisher_->subscribers_)
{
std::cout << "More matched subscribers than expected" << std::endl;
latency_publisher_->test_status_ = -1;
}
++latency_publisher_->discovery_count_;
}
else
{
--latency_publisher_->discovery_count_;
}
lock.unlock();
latency_publisher_->discovery_cv_.notify_one();
}
void LatencyTestPublisher::CommandPubListener::onPublicationMatched(
Publisher* /*pub*/,
MatchingInfo& info)
{
std::unique_lock<std::mutex> lock(latency_publisher_->mutex_);
if (info.status == MATCHED_MATCHING)
{
std::cout << C_MAGENTA << "Command Pub Matched" << C_DEF << std::endl;
matched_++;
if (matched_ > latency_publisher_->subscribers_)
{
std::cout << "More matched subscribers than expected" << std::endl;
latency_publisher_->test_status_ = -1;
}
++latency_publisher_->discovery_count_;
}
else
{
--latency_publisher_->discovery_count_;
}
lock.unlock();
latency_publisher_->discovery_cv_.notify_one();
}
void LatencyTestPublisher::CommandSubListener::onSubscriptionMatched(
Subscriber* /*sub*/,
MatchingInfo& info)
{
std::unique_lock<std::mutex> lock(latency_publisher_->mutex_);
if (info.status == MATCHED_MATCHING)
{
std::cout << C_MAGENTA << "Command Sub Matched" << C_DEF << std::endl;
matched_++;
if (matched_ > latency_publisher_->subscribers_)
{
std::cout << "More matched subscribers than expected" << std::endl;
latency_publisher_->test_status_ = -1;
}
++latency_publisher_->discovery_count_;
}
else
{
--latency_publisher_->discovery_count_;
}
lock.unlock();
latency_publisher_->discovery_cv_.notify_one();
}
void LatencyTestPublisher::CommandSubListener::onNewDataMessage(
Subscriber* subscriber)
{
TestCommandType command;
SampleInfo_t info;
if (subscriber->takeNextData((void*)&command, &info))
{
if (info.sampleKind == ALIVE)
{
if (command.m_command == BEGIN)
{
latency_publisher_->mutex_.lock();
++latency_publisher_->command_msg_count_;
latency_publisher_->mutex_.unlock();
latency_publisher_->command_msg_cv_.notify_one();
}
}
}
else
{
std::cout << "Problem reading" << std::endl;
}
}
void LatencyTestPublisher::DataSubListener::onNewDataMessage(
Subscriber* subscriber)
{
if (latency_publisher_->dynamic_data_)
{
subscriber->takeNextData((void*)latency_publisher_->dynamic_data_type_in_, &latency_publisher_->sampleinfo_);
if (latency_publisher_->dynamic_data_type_in_->get_uint32_value(0) ==
latency_publisher_->dynamic_data_type_out_->get_uint32_value(0))
{
// Factor of 2 below is to calculate the roundtrip divided by two. Note that the overhead does not
// need to be halved, as we access the clock twice per round trip
latency_publisher_->end_time_ = std::chrono::steady_clock::now();
latency_publisher_->times_.push_back(std::chrono::duration<double, std::micro>(
latency_publisher_->end_time_ - latency_publisher_->start_time_) / 2. -
latency_publisher_->overhead_time_);
latency_publisher_->received_count_++;
// Reset seqnum from out data
latency_publisher_->dynamic_data_type_out_->set_uint32_value(0, 0);
latency_publisher_->mutex_.lock();
++latency_publisher_->data_msg_count_;
if (latency_publisher_->data_msg_count_ >= latency_publisher_->subscribers_)
{
latency_publisher_->data_msg_cv_.notify_one();
}
latency_publisher_->mutex_.unlock();
}
}
else
{
subscriber->takeNextData((void*)latency_publisher_->latency_type_in_, &latency_publisher_->sampleinfo_);
if (latency_publisher_->latency_type_in_->seqnum == latency_publisher_->latency_type_out_->seqnum)
{
latency_publisher_->end_time_ = std::chrono::steady_clock::now();
latency_publisher_->times_.push_back(std::chrono::duration<double, std::micro>(
latency_publisher_->end_time_ - latency_publisher_->start_time_) / 2. -
latency_publisher_->overhead_time_);
latency_publisher_->received_count_++;
latency_publisher_->mutex_.lock();
++latency_publisher_->data_msg_count_;
if (latency_publisher_->data_msg_count_ >= latency_publisher_->subscribers_)
{
// Reset seqnum from out data
latency_publisher_->latency_type_out_->seqnum = 0;
latency_publisher_->data_msg_cv_.notify_one();
}
latency_publisher_->mutex_.unlock();
}
}
}
void LatencyTestPublisher::run()
{
// WAIT FOR THE DISCOVERY PROCESS FO FINISH:
// EACH SUBSCRIBER NEEDS 4 Matchings (2 publishers and 2 subscribers)
std::unique_lock<std::mutex> disc_lock(mutex_);
while (discovery_count_ != (subscribers_ * 4))
{
discovery_cv_.wait(disc_lock);
}
disc_lock.unlock();
std::cout << C_B_MAGENTA << "Pub: DISCOVERY COMPLETE " << C_DEF << std::endl;
for (std::vector<uint32_t>::iterator payload = data_size_pub_.begin(); payload != data_size_pub_.end(); ++payload)
{
if (!this->test(*payload))
{
break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(100));
if (payload != data_size_pub_.end() - 1)
{
*output_files_[MINIMUM_INDEX] << ",";
*output_files_[AVERAGE_INDEX] << ",";
}
}
std::cout << "Pub: REMOVING PUBLISHER" << std::endl;
Domain::removePublisher(this->command_publisher_);
std::cout << "Pub: REMOVING SUBSCRIBER" << std::endl;
Domain::removeSubscriber(command_subscriber_);
std::string str_reliable = "besteffort";
if (reliable_)
{
str_reliable = "reliable";
}
// Print a summary table with the measurements
printf("Printing round-trip times in us, statistics for %d samples\n", samples_);
printf(" Bytes, Samples, stdev, mean, min, 50%%, 90%%, 99%%, 99.99%%, max\n");
printf("--------,--------,--------,--------,--------,--------,--------,--------,--------,--------,\n");
for (uint16_t i = 0; i < stats_.size(); i++)
{
print_stats(DATA_BASE_INDEX + i, stats_[i]);
if (export_csv_)
{
export_csv("_" + std::to_string(stats_[i].bytes_) + "_", str_reliable, *output_files_[i + 2]);
}
}
if (export_csv_)
{
export_csv("_minimum_", str_reliable, *output_files_[MINIMUM_INDEX]);
export_csv("_average_", str_reliable, *output_files_[AVERAGE_INDEX]);
}
}
void LatencyTestPublisher::export_csv(
const std::string& data_name,
const std::string& str_reliable,
const std::stringstream& data_stream)
{
std::ofstream out_file;
std::string prefix = export_prefix_;
if (prefix.length() == 0)
{
prefix = "perf_LatencyTest";
}
out_file.open(prefix + data_name + str_reliable + ".csv");
out_file << data_stream.str();
out_file.close();
}
bool LatencyTestPublisher::test(
uint32_t datasize)
{
test_status_ = 0;
received_count_ = 0;
if (dynamic_data_)
{
dynamic_data_type_in_ = DynamicDataFactory::get_instance()->create_data(dynamic_type_);
dynamic_data_type_out_ = DynamicDataFactory::get_instance()->create_data(dynamic_type_);
MemberId id_in;
MemberId id_out;
DynamicData* data_in = dynamic_data_type_in_->loan_value(dynamic_data_type_in_->get_member_id_at_index(1));
DynamicData* data_out = dynamic_data_type_out_->loan_value(
dynamic_data_type_out_->get_member_id_at_index(1));
for (uint32_t i = 0; i < datasize; ++i)
{
data_in->insert_sequence_data(id_in);
data_in->set_byte_value(0, id_in);
data_out->insert_sequence_data(id_out);
data_out->set_byte_value(0, id_out);
}
dynamic_data_type_in_->return_loaned_value(data_in);
dynamic_data_type_out_->return_loaned_value(data_out);
}
else
{
latency_type_in_ = new LatencyType(datasize);
latency_type_out_ = new LatencyType(datasize);
}
times_.clear();
TestCommandType command;
command.m_command = READY;
command_publisher_->write(&command);
std::unique_lock<std::mutex> lock(mutex_);
command_msg_cv_.wait(lock, [&]()
{
return command_msg_count_ >= subscribers_;
});
command_msg_count_ = 0;
lock.unlock();
// The first measurement it's usually not representative, so we take one more and then drop the first one.
for (unsigned int count = 1; count <= samples_ + 1; ++count)
{
if (dynamic_data_)
{
dynamic_data_type_in_->set_uint32_value(0, 0);
dynamic_data_type_out_->set_uint32_value(count, 0);
start_time_ = std::chrono::steady_clock::now();
data_publisher_->write((void*)dynamic_data_type_out_);
}
else
{
latency_type_in_->seqnum = 0;
latency_type_out_->seqnum = count;
start_time_ = std::chrono::steady_clock::now();
data_publisher_->write((void*)latency_type_out_);
}
lock.lock();
data_msg_cv_.wait_for(lock, std::chrono::milliseconds(4), [&]()
{
return data_msg_count_ >= subscribers_;
});
data_msg_count_ = 0;
lock.unlock();
}
command.m_command = STOP;
command_publisher_->write(&command);
if (test_status_ != 0)
{
std::cout << "Error in test " << std::endl;
return false;
}
// TEST FINISHED:
size_t removed = 0;
data_publisher_->removeAllChange(&removed);
// Drop the first measurement, as it's usually not representative
times_.erase(times_.begin());
// Log all data to CSV file if specified
if (raw_data_file_ != "")
{
export_raw_data(datasize + 4);
}
analyze_times(datasize);
if (dynamic_data_)
{
DynamicDataFactory::get_instance()->delete_data(dynamic_data_type_in_);
DynamicDataFactory::get_instance()->delete_data(dynamic_data_type_out_);
}
else
{
delete(latency_type_in_);
delete(latency_type_out_);
}
return true;
}
void LatencyTestPublisher::analyze_times(
uint32_t datasize)
{
// Collect statistics
TimeStats stats;
stats.bytes_ = datasize + 4;
stats.received_ = received_count_ - 1; // Because we are not counting the first one.
stats.minimum_ = *std::min_element(times_.begin(), times_.end());
stats.maximum_ = *std::max_element(times_.begin(), times_.end());
stats.mean_ = std::accumulate(times_.begin(), times_.end(),
std::chrono::duration<double, std::micro>(0)).count() / times_.size();
double aux_stdev = 0;
for (std::vector<std::chrono::duration<double, std::micro> >::iterator tit = times_.begin(); tit != times_.end();
++tit)
{
aux_stdev += pow(((*tit).count() - stats.mean_), 2);
}
aux_stdev = sqrt(aux_stdev / times_.size());
stats.stdev_ = aux_stdev;
/* Percentiles */
std::sort(times_.begin(), times_.end());
size_t elem = 0;
elem = static_cast<size_t>(times_.size() * 0.5);
if (elem > 0 && elem <= times_.size())
{
stats.percentile_50_ = times_.at(--elem).count();
}
else
{
stats.percentile_50_ = NAN;
}
elem = static_cast<size_t>(times_.size() * 0.9);
if (elem > 0 && elem <= times_.size())
{
stats.percentile_90_ = times_.at(--elem).count();
}
else
{
stats.percentile_90_ = NAN;
}
elem = static_cast<size_t>(times_.size() * 0.99);
if (elem > 0 && elem <= times_.size())
{
stats.percentile_99_ = times_.at(--elem).count();
}
else
{
stats.percentile_99_ = NAN;
}
elem = static_cast<size_t>(times_.size() * 0.9999);
if (elem > 0 && elem <= times_.size())
{
stats.percentile_9999_ = times_.at(--elem).count();
}
else
{
stats.percentile_9999_ = NAN;
}
stats_.push_back(stats);
}
void LatencyTestPublisher::print_stats(
uint32_t data_index,
TimeStats& stats)
{
*output_files_[MINIMUM_INDEX] << "\"" << stats.minimum_.count() << "\"";
*output_files_[AVERAGE_INDEX] << "\"" << stats.mean_ << "\"";
*output_files_[data_index] << "\"" << stats.minimum_.count() << "\",\"" << stats.mean_ << "\"" << std::endl;
#ifdef _WIN32
printf("%8I64u,%8u,%8.3f,%8.3f,%8.3f,%8.3f,%8.3f,%8.3f,%8.3f,%8.3f \n",
stats.bytes_, stats.received_, stats.stdev_, stats.mean_, stats.minimum_.count(), stats.percentile_50_,
stats.percentile_90_, stats.percentile_99_, stats.percentile_9999_, stats.maximum_.count());
#else
printf("%8" PRIu64 ",%8u,%8.3f,%8.3f,%8.3f,%8.3f,%8.3f,%8.3f,%8.3f,%8.3f \n",
stats.bytes_, stats.received_, stats.stdev_, stats.mean_, stats.minimum_.count(), stats.percentile_50_,
stats.percentile_90_, stats.percentile_99_, stats.percentile_9999_, stats.maximum_.count());
#endif // ifdef _WIN32
}
void LatencyTestPublisher::export_raw_data(
uint32_t datasize)
{
std::ofstream data_file;
data_file.open(raw_data_file_, std::fstream::app);
for (std::vector<std::chrono::duration<double, std::micro> >::iterator tit = times_.begin(); tit != times_.end();
++tit)
{
data_file << ++raw_sample_count_ << "," << datasize << "," << (*tit).count() << std::endl;
}
data_file.close();
}
| 1 | 21,435 | Use correct, alpha sorted, include order. Correct order means: 1. Header corresponding to this source (i.e. `"LatencyTestPublisher.hpp"`) 2. C system headers 3. C++ system headers 4. Alpha-sorted external libraries headers 5. Alpha-sorted public headers from this project 6. Alpha-sorted private headers | eProsima-Fast-DDS | cpp |
@@ -58,6 +58,10 @@ type OutboundOption func(*Outbound)
func (OutboundOption) httpOption() {}
+// RequestFactory allows clients to configure their outgoing http requests. If not set,
+// a default implemenation will use the HostPort to make a POST request with the request body.
+type RequestFactory func(*transport.Request) (*http.Request, error)
+
// URLTemplate specifies the URL this outbound makes requests to. For
// peer.Chooser-based outbounds, the peer (host:port) spection of the URL may
// vary from call to call but the rest will remain unchanged. For single-peer | 1 | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package http
import (
"context"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"time"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
opentracinglog "github.com/opentracing/opentracing-go/log"
"go.uber.org/yarpc"
"go.uber.org/yarpc/api/peer"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/internal/introspection"
intyarpcerrors "go.uber.org/yarpc/internal/yarpcerrors"
peerchooser "go.uber.org/yarpc/peer"
"go.uber.org/yarpc/peer/hostport"
"go.uber.org/yarpc/pkg/lifecycle"
"go.uber.org/yarpc/yarpcerrors"
)
// this ensures the HTTP outbound implements both transport.Outbound interfaces
var (
_ transport.UnaryOutbound = (*Outbound)(nil)
_ transport.OnewayOutbound = (*Outbound)(nil)
_ introspection.IntrospectableOutbound = (*Outbound)(nil)
)
var defaultURLTemplate, _ = url.Parse("http://localhost")
// OutboundOption customizes an HTTP Outbound.
type OutboundOption func(*Outbound)
func (OutboundOption) httpOption() {}
// URLTemplate specifies the URL this outbound makes requests to. For
// peer.Chooser-based outbounds, the peer (host:port) spection of the URL may
// vary from call to call but the rest will remain unchanged. For single-peer
// outbounds, the URL will be used as-is.
func URLTemplate(template string) OutboundOption {
return func(o *Outbound) {
o.setURLTemplate(template)
}
}
// AddHeader specifies that an HTTP outbound should always include the given
// header in outgoung requests.
//
// httpTransport.NewOutbound(chooser, http.AddHeader("X-Token", "TOKEN"))
//
// Note that headers starting with "Rpc-" are reserved by YARPC. This function
// will panic if the header starts with "Rpc-".
func AddHeader(key, value string) OutboundOption {
if strings.HasPrefix(strings.ToLower(key), "rpc-") {
panic(fmt.Errorf(
"invalid header name %q: "+
`headers starting with "Rpc-" are reserved by YARPC`, key))
}
return func(o *Outbound) {
if o.headers == nil {
o.headers = make(http.Header)
}
o.headers.Add(key, value)
}
}
// NewOutbound builds an HTTP outbound that sends requests to peers supplied
// by the given peer.Chooser. The URL template for used for the different
// peers may be customized using the URLTemplate option.
//
// The peer chooser and outbound must share the same transport, in this case
// the HTTP transport.
// The peer chooser must use the transport's RetainPeer to obtain peer
// instances and return those peers to the outbound when it calls Choose.
// The concrete peer type is private and intrinsic to the HTTP transport.
func (t *Transport) NewOutbound(chooser peer.Chooser, opts ...OutboundOption) *Outbound {
o := &Outbound{
once: lifecycle.NewOnce(),
chooser: chooser,
urlTemplate: defaultURLTemplate,
tracer: t.tracer,
transport: t,
bothResponseError: true,
}
for _, opt := range opts {
opt(o)
}
return o
}
// NewOutbound builds an HTTP outbound that sends requests to peers supplied
// by the given peer.Chooser. The URL template for used for the different
// peers may be customized using the URLTemplate option.
//
// The peer chooser and outbound must share the same transport, in this case
// the HTTP transport.
// The peer chooser must use the transport's RetainPeer to obtain peer
// instances and return those peers to the outbound when it calls Choose.
// The concrete peer type is private and intrinsic to the HTTP transport.
func NewOutbound(chooser peer.Chooser, opts ...OutboundOption) *Outbound {
return NewTransport().NewOutbound(chooser, opts...)
}
// NewSingleOutbound builds an outbound that sends YARPC requests over HTTP
// to the specified URL.
//
// The URLTemplate option has no effect in this form.
func (t *Transport) NewSingleOutbound(uri string, opts ...OutboundOption) *Outbound {
parsedURL, err := url.Parse(uri)
if err != nil {
panic(err.Error())
}
chooser := peerchooser.NewSingle(hostport.PeerIdentifier(parsedURL.Host), t)
o := t.NewOutbound(chooser)
for _, opt := range opts {
opt(o)
}
o.setURLTemplate(uri)
return o
}
// Outbound sends YARPC requests over HTTP. It may be constructed using the
// NewOutbound function or the NewOutbound or NewSingleOutbound methods on the
// HTTP Transport. It is recommended that services use a single HTTP transport
// to construct all HTTP outbounds, ensuring efficient sharing of resources
// across the different outbounds.
type Outbound struct {
chooser peer.Chooser
urlTemplate *url.URL
tracer opentracing.Tracer
transport *Transport
// Headers to add to all outgoing requests.
headers http.Header
once *lifecycle.Once
// should only be false in testing
bothResponseError bool
}
// setURLTemplate configures an alternate URL template.
// The host:port portion of the URL template gets replaced by the chosen peer's
// identifier for each outbound request.
func (o *Outbound) setURLTemplate(URL string) {
parsedURL, err := url.Parse(URL)
if err != nil {
log.Fatalf("failed to configure HTTP outbound: invalid URL template %q: %s", URL, err)
}
o.urlTemplate = parsedURL
}
// Transports returns the outbound's HTTP transport.
func (o *Outbound) Transports() []transport.Transport {
return []transport.Transport{o.transport}
}
// Chooser returns the outbound's peer chooser.
func (o *Outbound) Chooser() peer.Chooser {
return o.chooser
}
// Start the HTTP outbound
func (o *Outbound) Start() error {
return o.once.Start(o.chooser.Start)
}
// Stop the HTTP outbound
func (o *Outbound) Stop() error {
return o.once.Stop(o.chooser.Stop)
}
// IsRunning returns whether the Outbound is running.
func (o *Outbound) IsRunning() bool {
return o.once.IsRunning()
}
// Call makes a HTTP request
func (o *Outbound) Call(ctx context.Context, treq *transport.Request) (*transport.Response, error) {
if treq == nil {
return nil, yarpcerrors.InvalidArgumentErrorf("request for http unary outbound was nil")
}
return o.call(ctx, treq)
}
// CallOneway makes a oneway request
func (o *Outbound) CallOneway(ctx context.Context, treq *transport.Request) (transport.Ack, error) {
if treq == nil {
return nil, yarpcerrors.InvalidArgumentErrorf("request for http oneway outbound was nil")
}
_, err := o.call(ctx, treq)
if err != nil {
return nil, err
}
return time.Now(), nil
}
func (o *Outbound) call(ctx context.Context, treq *transport.Request) (*transport.Response, error) {
start := time.Now()
deadline, ok := ctx.Deadline()
if !ok {
return nil, yarpcerrors.Newf(yarpcerrors.CodeInvalidArgument, "missing context deadline")
}
ttl := deadline.Sub(start)
hreq, err := o.createRequest(treq)
if err != nil {
return nil, err
}
hreq.Header = applicationHeaders.ToHTTPHeaders(treq.Headers, nil)
ctx, hreq, span, err := o.withOpentracingSpan(ctx, hreq, treq, start)
if err != nil {
return nil, err
}
defer span.Finish()
hreq = o.withCoreHeaders(hreq, treq, ttl)
hreq = hreq.WithContext(ctx)
response, err := o.roundTrip(hreq, treq, start)
if err != nil {
span.SetTag("error", true)
span.LogFields(opentracinglog.String("event", err.Error()))
return nil, err
}
span.SetTag("http.status_code", response.StatusCode)
// Service name match validation, return yarpcerrors.CodeInternal error if not match
if match, resSvcName := checkServiceMatch(treq.Service, response.Header); !match {
return nil, transport.UpdateSpanWithErr(span,
yarpcerrors.InternalErrorf("service name sent from the request "+
"does not match the service name received in the response, sent %q, got: %q", treq.Service, resSvcName))
}
tres := &transport.Response{
Headers: applicationHeaders.FromHTTPHeaders(response.Header, transport.NewHeaders()),
Body: response.Body,
ApplicationError: response.Header.Get(ApplicationStatusHeader) == ApplicationErrorStatus,
}
bothResponseError := response.Header.Get(BothResponseErrorHeader) == AcceptTrue
if bothResponseError && o.bothResponseError {
if response.StatusCode >= 300 {
return tres, getYARPCErrorFromResponse(response, true)
}
return tres, nil
}
if response.StatusCode >= 200 && response.StatusCode < 300 {
return tres, nil
}
return nil, getYARPCErrorFromResponse(response, false)
}
func (o *Outbound) getPeerForRequest(ctx context.Context, treq *transport.Request) (*httpPeer, func(error), error) {
p, onFinish, err := o.chooser.Choose(ctx, treq)
if err != nil {
return nil, nil, err
}
hpPeer, ok := p.(*httpPeer)
if !ok {
return nil, nil, peer.ErrInvalidPeerConversion{
Peer: p,
ExpectedType: "*httpPeer",
}
}
return hpPeer, onFinish, nil
}
func (o *Outbound) createRequest(treq *transport.Request) (*http.Request, error) {
newURL := *o.urlTemplate
return http.NewRequest("POST", newURL.String(), treq.Body)
}
func (o *Outbound) withOpentracingSpan(ctx context.Context, req *http.Request, treq *transport.Request, start time.Time) (context.Context, *http.Request, opentracing.Span, error) {
// Apply HTTP Context headers for tracing and baggage carried by tracing.
tracer := o.tracer
var parent opentracing.SpanContext // ok to be nil
if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil {
parent = parentSpan.Context()
}
tags := opentracing.Tags{
"rpc.caller": treq.Caller,
"rpc.service": treq.Service,
"rpc.encoding": treq.Encoding,
"rpc.transport": "http",
}
for k, v := range yarpc.OpentracingTags {
tags[k] = v
}
span := tracer.StartSpan(
treq.Procedure,
opentracing.StartTime(start),
opentracing.ChildOf(parent),
tags,
)
ext.PeerService.Set(span, treq.Service)
ext.SpanKindRPCClient.Set(span)
ext.HTTPUrl.Set(span, req.URL.String())
ctx = opentracing.ContextWithSpan(ctx, span)
err := tracer.Inject(
span.Context(),
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(req.Header),
)
return ctx, req, span, err
}
func (o *Outbound) withCoreHeaders(req *http.Request, treq *transport.Request, ttl time.Duration) *http.Request {
// Add default headers to all requests.
for k, vs := range o.headers {
for _, v := range vs {
req.Header.Add(k, v)
}
}
req.Header.Set(CallerHeader, treq.Caller)
req.Header.Set(ServiceHeader, treq.Service)
req.Header.Set(ProcedureHeader, treq.Procedure)
if ttl != 0 {
req.Header.Set(TTLMSHeader, fmt.Sprintf("%d", ttl/time.Millisecond))
}
if treq.ShardKey != "" {
req.Header.Set(ShardKeyHeader, treq.ShardKey)
}
if treq.RoutingKey != "" {
req.Header.Set(RoutingKeyHeader, treq.RoutingKey)
}
if treq.RoutingDelegate != "" {
req.Header.Set(RoutingDelegateHeader, treq.RoutingDelegate)
}
encoding := string(treq.Encoding)
if encoding != "" {
req.Header.Set(EncodingHeader, encoding)
}
if o.bothResponseError {
req.Header.Set(AcceptsBothResponseErrorHeader, AcceptTrue)
}
return req
}
func getYARPCErrorFromResponse(response *http.Response, bothResponseError bool) error {
var contents string
if bothResponseError {
contents = response.Header.Get(ErrorMessageHeader)
} else {
contentsBytes, err := ioutil.ReadAll(response.Body)
if err != nil {
return yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error())
}
contents = string(contentsBytes)
if err := response.Body.Close(); err != nil {
return yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error())
}
}
// use the status code if we can't get a code from the headers
code := statusCodeToBestCode(response.StatusCode)
if errorCodeText := response.Header.Get(ErrorCodeHeader); errorCodeText != "" {
var errorCode yarpcerrors.Code
// TODO: what to do with error?
if err := errorCode.UnmarshalText([]byte(errorCodeText)); err == nil {
code = errorCode
}
}
return intyarpcerrors.NewWithNamef(
code,
response.Header.Get(ErrorNameHeader),
strings.TrimSuffix(contents, "\n"),
)
}
// Only does verification if there is a response header
func checkServiceMatch(reqSvcName string, resHeaders http.Header) (bool, string) {
serviceName := resHeaders.Get(ServiceHeader)
return serviceName == "" || serviceName == reqSvcName, serviceName
}
// RoundTrip implements the http.RoundTripper interface, making a YARPC HTTP outbound suitable as a
// Transport when constructing an HTTP Client. An HTTP client is suitable only for relative paths to
// a single outbound service. The HTTP outbound overrides the host:port portion of the URL of the
// provided request.
//
// Sample usage:
//
// client := http.Client{Transport: outbound}
//
// Thereafter use the Golang standard library HTTP to send requests with this client.
//
// ctx, cancel := context.WithTimeout(context.Background(), time.Second)
// defer cancel()
// req, err := http.NewRequest("GET", "http://example.com/", nil /* body */)
// req = req.WithContext(ctx)
// res, err := client.Do(req)
//
// All requests must have a deadline on the context.
// The peer chooser for raw HTTP requests will receive a YARPC transport.Request with no body.
//
// OpenTracing information must be added manually, before this call, to support context propagation.
func (o *Outbound) RoundTrip(hreq *http.Request) (*http.Response, error) {
return o.roundTrip(hreq, nil /* treq */, time.Now())
}
func (o *Outbound) roundTrip(hreq *http.Request, treq *transport.Request, start time.Time) (*http.Response, error) {
ctx := hreq.Context()
deadline, ok := ctx.Deadline()
if !ok {
return nil, yarpcerrors.Newf(
yarpcerrors.CodeInvalidArgument,
"missing context deadline")
}
ttl := deadline.Sub(start)
// When sending requests through the RoundTrip method, we construct the
// transport request from the HTTP headers as if it were an inbound
// request.
// The API for setting transport metadata for an outbound request when
// using the go stdlib HTTP client is to use headers as the YAPRC HTTP
// transport header conventions.
if treq == nil {
treq = &transport.Request{
Caller: hreq.Header.Get(CallerHeader),
Service: hreq.Header.Get(ServiceHeader),
Encoding: transport.Encoding(hreq.Header.Get(EncodingHeader)),
Procedure: hreq.Header.Get(ProcedureHeader),
ShardKey: hreq.Header.Get(ShardKeyHeader),
RoutingKey: hreq.Header.Get(RoutingKeyHeader),
RoutingDelegate: hreq.Header.Get(RoutingDelegateHeader),
Headers: applicationHeaders.FromHTTPHeaders(hreq.Header, transport.Headers{}),
}
}
if err := o.once.WaitUntilRunning(ctx); err != nil {
return nil, intyarpcerrors.AnnotateWithInfo(
yarpcerrors.FromError(err),
"error waiting for HTTP outbound to start for service: %s",
treq.Service)
}
p, onFinish, err := o.getPeerForRequest(ctx, treq)
if err != nil {
return nil, err
}
hres, err := o.doWithPeer(ctx, hreq, treq, start, ttl, p)
// Call the onFinish method before returning (with the error from call with peer)
onFinish(err)
return hres, err
}
func (o *Outbound) doWithPeer(
ctx context.Context,
hreq *http.Request,
treq *transport.Request,
start time.Time,
ttl time.Duration,
p *httpPeer,
) (*http.Response, error) {
hreq.URL.Host = p.HostPort()
response, err := o.transport.client.Do(hreq.WithContext(ctx))
if err != nil {
// Workaround borrowed from ctxhttp until
// https://github.com/golang/go/issues/17711 is resolved.
select {
case <-ctx.Done():
err = ctx.Err()
default:
}
if err == context.DeadlineExceeded {
// Note that the connection experienced a time out, which may
// indicate that the connection is half-open, that the destination
// died without sending a TCP FIN packet.
p.OnSuspect()
end := time.Now()
return nil, yarpcerrors.Newf(
yarpcerrors.CodeDeadlineExceeded,
"client timeout for procedure %q of service %q after %v",
treq.Procedure, treq.Service, end.Sub(start))
}
// Note that the connection may have been lost so the peer connection
// maintenance loop resumes probing for availability.
p.OnDisconnected()
return nil, yarpcerrors.Newf(yarpcerrors.CodeUnknown, "unknown error from http client: %s", err.Error())
}
return response, nil
}
// Introspect returns basic status about this outbound.
func (o *Outbound) Introspect() introspection.OutboundStatus {
state := "Stopped"
if o.IsRunning() {
state = "Running"
}
var chooser introspection.ChooserStatus
if i, ok := o.chooser.(introspection.IntrospectableChooser); ok {
chooser = i.Introspect()
} else {
chooser = introspection.ChooserStatus{
Name: "Introspection not available",
}
}
return introspection.OutboundStatus{
Transport: "http",
Endpoint: o.urlTemplate.String(),
State: state,
Chooser: chooser,
}
}
| 1 | 17,408 | I hate this name, open to suggestions. | yarpc-yarpc-go | go |
@@ -199,7 +199,13 @@ func (c *Client) locallyCacheResults(target *core.BuildTarget, digest *pb.Digest
}
data, _ := proto.Marshal(ar)
metadata.RemoteAction = data
- c.state.Cache.Store(target, c.localCacheKey(digest), nil)
+ // TODO(jpoole): Similar to retrieveTargetMetadataFromCache, it would be cleaner if we could store
+ // into the cache from an arbitrary reader.
+ if err := build.StoreTargetMetadata(target, metadata); err != nil {
+ log.Warning("%s", err)
+ return
+ }
+ c.state.Cache.Store(target, c.localCacheKey(digest), []string{target.TargetBuildMetadataFileName()})
}
// retrieveLocalResults retrieves locally cached results for a target if possible. | 1 | package remote
import (
"bytes"
"context"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"fmt"
treesdk "github.com/bazelbuild/remote-apis-sdks/go/pkg/tree"
"io/ioutil"
"os"
"path"
"sort"
"strings"
"time"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/chunker"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/digest"
pb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2"
"github.com/bazelbuild/remote-apis/build/bazel/semver"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/timestamp"
rpcstatus "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/encoding/gzip"
"google.golang.org/grpc/status"
"github.com/thought-machine/please/src/build"
"github.com/thought-machine/please/src/core"
"github.com/thought-machine/please/src/fs"
)
// xattrName is the name we use to record attributes on files.
const xattrName = "user.plz_hash_remote"
// sum calculates a checksum for a byte slice.
func (c *Client) sum(b []byte) []byte {
h := c.state.PathHasher.NewHash()
h.Write(b)
return h.Sum(nil)
}
// targetOutputs returns the outputs for a previously executed target.
// If it has not been executed this returns nil.
func (c *Client) targetOutputs(label core.BuildLabel) *pb.Directory {
c.outputMutex.RLock()
defer c.outputMutex.RUnlock()
return c.outputs[label]
}
// setOutputs sets the outputs for a previously executed target.
func (c *Client) setOutputs(target *core.BuildTarget, ar *pb.ActionResult) error {
o := &pb.Directory{
Files: make([]*pb.FileNode, len(ar.OutputFiles)),
Directories: make([]*pb.DirectoryNode, 0, len(ar.OutputDirectories)),
Symlinks: make([]*pb.SymlinkNode, len(ar.OutputFileSymlinks)+len(ar.OutputDirectorySymlinks)),
}
// N.B. At this point the various things we stick into this Directory proto can be in
// subdirectories. This is not how a Directory proto is meant to work but it makes things
// a lot easier for us to handle (since it is impossible to merge two DirectoryNode protos
// without downloading their respective Directory protos). Later on we sort this out in
// uploadInputDir.
for i, f := range ar.OutputFiles {
o.Files[i] = &pb.FileNode{
Name: f.Path,
Digest: f.Digest,
IsExecutable: f.IsExecutable,
}
}
for _, d := range ar.OutputDirectories {
tree := &pb.Tree{}
if err := c.client.ReadProto(context.Background(), digest.NewFromProtoUnvalidated(d.TreeDigest), tree); err != nil {
return wrap(err, "Downloading tree digest for %s [%s]", d.Path, d.TreeDigest.Hash)
}
if outDir := maybeGetOutDir(d.Path, target.OutputDirectories); outDir != "" {
files, dirs, err := getOutputsForOutDir(target, outDir, tree)
if err != nil {
return err
}
o.Directories = append(o.Directories, dirs...)
o.Files = append(o.Files, files...)
} else {
o.Directories = append(o.Directories, &pb.DirectoryNode{
Name: d.Path,
Digest: c.digestMessage(tree.Root),
})
}
}
for i, s := range append(ar.OutputFileSymlinks, ar.OutputDirectorySymlinks...) {
o.Symlinks[i] = &pb.SymlinkNode{
Name: s.Path,
Target: s.Target,
}
}
c.outputMutex.Lock()
defer c.outputMutex.Unlock()
c.outputs[target.Label] = o
return nil
}
func getOutputsForOutDir(target *core.BuildTarget, outDir core.OutputDirectory, tree *pb.Tree) ([]*pb.FileNode, []*pb.DirectoryNode, error) {
files := make([]*pb.FileNode, 0, len(tree.Root.Files))
dirs := make([]*pb.DirectoryNode, 0, len(tree.Root.Directories))
if outDir.ShouldAddFiles() {
outs, err := treesdk.FlattenTree(tree, "")
if err != nil {
return nil, nil, err
}
for _, o := range outs {
if o.IsEmptyDirectory {
continue
}
target.AddOutput(o.Path)
}
}
for _, out := range tree.Root.Files {
if !outDir.ShouldAddFiles() {
target.AddOutput(out.Name)
}
files = append(files, out)
}
for _, out := range tree.Root.Directories {
if !outDir.ShouldAddFiles() {
target.AddOutput(out.Name)
}
dirs = append(dirs, out)
}
return files, dirs, nil
}
// maybeGetOutDir will get the output directory based on the directory provided. If there's no matching directory, this
// will return an empty string indicating that that action output was not an output directory.
func maybeGetOutDir(dir string, outDirs []core.OutputDirectory) core.OutputDirectory {
for _, outDir := range outDirs {
if dir == outDir.Dir() {
return outDir
}
}
return ""
}
// digestMessage calculates the digest of a proto message as described in the
// Digest message's comments.
func (c *Client) digestMessage(msg proto.Message) *pb.Digest {
digest, _ := c.digestMessageContents(msg)
return digest
}
// digestMessageContents is like DigestMessage but returns the serialised contents as well.
func (c *Client) digestMessageContents(msg proto.Message) (*pb.Digest, []byte) {
b := mustMarshal(msg)
return c.digestBlob(b), b
}
// digestBlob digests a byteslice and returns the proto for it.
func (c *Client) digestBlob(b []byte) *pb.Digest {
sum := c.sum(b)
return &pb.Digest{
Hash: hex.EncodeToString(sum[:]),
SizeBytes: int64(len(b)),
}
}
// wrapActionErr wraps an error with information about the action related to it.
func (c *Client) wrapActionErr(err error, actionDigest *pb.Digest) error {
if err == nil || c.state.Config.Remote.DisplayURL == "" {
return err
}
return wrap(err, "Action URL: %s/action/%s/%s/%d/\n", c.state.Config.Remote.DisplayURL, c.state.Config.Remote.Instance, actionDigest.Hash, actionDigest.SizeBytes)
}
// actionURL returns a URL to the browser for a remote action, if the display URL is configured.
// If prefix is true then it is surrounded by "(action: %s)".
func (c *Client) actionURL(digest *pb.Digest, prefix bool) string {
if c.state.Config.Remote.DisplayURL == "" {
return ""
}
s := fmt.Sprintf("%s/action/%s/%s/%d/", c.state.Config.Remote.DisplayURL, c.state.Config.Remote.Instance, digest.Hash, digest.SizeBytes)
if prefix {
s = " (action: " + s + ")"
}
return s
}
// locallyCacheResults stores the actionresult for an action in the local (usually dir) cache.
func (c *Client) locallyCacheResults(target *core.BuildTarget, digest *pb.Digest, metadata *core.BuildMetadata, ar *pb.ActionResult) {
if c.state.Cache == nil {
return
}
data, _ := proto.Marshal(ar)
metadata.RemoteAction = data
c.state.Cache.Store(target, c.localCacheKey(digest), nil)
}
// retrieveLocalResults retrieves locally cached results for a target if possible.
// Note that this does not handle any file data, only the actionresult metadata.
func (c *Client) retrieveLocalResults(target *core.BuildTarget, digest *pb.Digest) (*core.BuildMetadata, *pb.ActionResult) {
if c.state.Cache != nil {
if metadata := retrieveTargetMetadataFromCache(c, target, digest); metadata != nil && len(metadata.RemoteAction) > 0 {
ar := &pb.ActionResult{}
if err := proto.Unmarshal(metadata.RemoteAction, ar); err == nil {
if err := c.setOutputs(target, ar); err == nil {
return metadata, ar
}
}
}
}
return nil, nil
}
func retrieveTargetMetadataFromCache(c *Client, target *core.BuildTarget, digest *pb.Digest) *core.BuildMetadata {
if c.state.Cache.Retrieve(target, c.localCacheKey(digest), []string{target.TargetBuildMetadataFileName()}) {
// TODO(jpoole): Retrieving the metadata file from the cache loads it into the targets output directory. This feels like a
// leaky abstration. A cleaner solution might be to enable the caches to load files into a writer. We could then
// load metadate without having to save it to disk first.
md, err := build.LoadTargetMetadata(target)
if err != nil {
log.Warningf("failed to retrieve metadata from cache for target %v: %v", target.Label, err)
return nil
}
return md
}
return nil
}
// localCacheKey returns the key we use in the local cache for a target.
// This is not the same as the digest hash since it includes the instance name (allowing them to be stored separately)
func (c *Client) localCacheKey(digest *pb.Digest) []byte {
key, _ := hex.DecodeString(digest.Hash)
instance := c.state.Config.Remote.Instance
if len(instance) > len(key) {
instance = instance[len(key):]
}
for i := 0; i < len(instance); i++ {
key[i] ^= instance[i]
}
return key
}
// outputsExist returns true if the outputs for this target exist and are up to date.
func (c *Client) outputsExist(target *core.BuildTarget, digest *pb.Digest) bool {
hash, _ := hex.DecodeString(digest.Hash)
for _, out := range target.FullOutputs() {
if !bytes.Equal(hash, fs.ReadAttr(out, xattrName, c.state.XattrsSupported)) {
return false
}
}
return true
}
// recordAttrs sets the xattrs on output files which we will use in outputsExist in future runs.
func (c *Client) recordAttrs(target *core.BuildTarget, digest *pb.Digest) {
hash, _ := hex.DecodeString(digest.Hash)
for _, out := range target.FullOutputs() {
fs.RecordAttr(out, hash, xattrName, c.state.XattrsSupported)
}
}
// mustMarshal encodes a message to a binary string.
func mustMarshal(msg proto.Message) []byte {
b, err := proto.Marshal(msg)
if err != nil {
// Not really sure if there is a valid possibility to bring us here (given that
// the messages in question have no required fields) so assume it won't happen :)
log.Fatalf("Failed to marshal message: %s", err)
}
return b
}
// lessThan returns true if the given semver instance is less than another one.
func lessThan(a, b *semver.SemVer) bool {
if a.Major < b.Major {
return true
} else if a.Major > b.Major {
return false
} else if a.Minor < b.Minor {
return true
} else if a.Minor > b.Minor {
return false
} else if a.Patch < b.Patch {
return true
} else if a.Patch > b.Patch {
return false
}
return a.Prerelease < b.Prerelease
}
// printVer pretty-prints a semver message.
// The default stringing of them is so bad as to be completely unreadable.
func printVer(v *semver.SemVer) string {
msg := fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
if v.Prerelease != "" {
msg += "-" + v.Prerelease
}
return msg
}
// toTime converts a protobuf timestamp into a time.Time.
// It's like the ptypes one but we ignore errors (we don't generally care that much)
func toTime(ts *timestamp.Timestamp) time.Time {
t, _ := ptypes.Timestamp(ts)
return t
}
// IsNotFound returns true if a given error is a "not found" error (which may be treated
// differently, for example if trying to retrieve artifacts that may not be there).
func IsNotFound(err error) bool {
return status.Code(err) == codes.NotFound
}
// hasChild returns true if a Directory has a child directory by the given name.
func hasChild(dir *pb.Directory, child string) bool {
for _, d := range dir.Directories {
if d.Name == child {
return true
}
}
return false
}
// convertError converts a single google.rpc.Status message into a Go error
func convertError(err *rpcstatus.Status) error {
if err.Code == int32(codes.OK) {
return nil
}
msg := fmt.Errorf("%s", err.Message)
for _, detail := range err.Details {
msg = fmt.Errorf("%s %s", msg, detail.Value)
}
return msg
}
// wrap wraps a grpc error in an additional description, but retains its code.
func wrap(err error, msg string, args ...interface{}) error {
s, ok := status.FromError(err)
if !ok {
return fmt.Errorf(fmt.Sprintf(msg, args...) + ": " + err.Error())
}
return status.Errorf(s.Code(), fmt.Sprintf(msg, args...)+": "+s.Message())
}
// timeout returns either a build or test timeout from a target.
func timeout(target *core.BuildTarget, test bool) time.Duration {
if test {
return target.TestTimeout
}
return target.BuildTimeout
}
// outputs returns the outputs of a target, split arbitrarily and inaccurately
// into files and directories.
// After some discussion we are hoping that servers are permissive about this if
// we get it wrong; we prefer to make an effort though as a minor nicety.
func outputs(target *core.BuildTarget) (files, dirs []string) {
outs := target.Outputs()
files = make([]string, 0, len(outs))
for _, out := range outs {
out = target.GetTmpOutput(out)
if !strings.ContainsRune(path.Base(out), '.') && !strings.HasSuffix(out, "file") && !target.IsBinary {
dirs = append(dirs, out)
} else {
files = append(files, out)
}
}
for _, out := range target.OutputDirectories {
dirs = append(dirs, out.Dir())
}
return files, dirs
}
// A dirBuilder is for helping build up a tree of Directory protos.
//
// This is pretty awkward; we need to recursively build a whole set of directories
// which does not match up to how we represent it (which is a series of files, with
// no corresponding directories, that are not usefully ordered for this purpose).
// We also need to handle the case of existing targets where we already know the
// directory structure but may not have the files physically on disk.
type dirBuilder struct {
c *Client
root *pb.Directory
dirs map[string]*pb.Directory
}
func newDirBuilder(c *Client) *dirBuilder {
root := &pb.Directory{}
return &dirBuilder{
dirs: map[string]*pb.Directory{
".": root, // Ensure the root is in there
"": root, // Some things might try to name it this way
},
root: root,
c: c,
}
}
// Dir ensures the given directory exists, and constructs any necessary parents.
func (b *dirBuilder) Dir(name string) *pb.Directory {
return b.dir(name, "")
}
func (b *dirBuilder) dir(dir, child string) *pb.Directory {
if dir == "." || dir == "/" {
return b.root
}
dir = strings.TrimSuffix(dir, "/")
d, present := b.dirs[dir]
if !present {
d = &pb.Directory{}
b.dirs[dir] = d
dir, base := path.Split(dir)
b.dir(dir, base)
}
// TODO(peterebden): The linear scan in hasChild is a bit suboptimal, we should
// really use the dirs map to determine this.
if child != "" && !hasChild(d, child) {
d.Directories = append(d.Directories, &pb.DirectoryNode{Name: child})
}
return d
}
// Root returns the root directory, calculates the digests of all others and uploads them
// if the given channel is not nil.
func (b *dirBuilder) Root(ch chan<- *chunker.Chunker) *pb.Directory {
b.dfs(".", ch)
return b.root
}
// Node returns either the file or directory corresponding to the given path (or nil for both if not found)
func (b *dirBuilder) Node(name string) (*pb.DirectoryNode, *pb.FileNode) {
dir := b.Dir(path.Dir(name))
base := path.Base(name)
for _, d := range dir.Directories {
if d.Name == base {
return d, nil
}
}
for _, f := range dir.Files {
if f.Name == base {
return nil, f
}
}
return nil, nil
}
// Tree returns the tree rooted at a given directory name.
// It does not calculate digests or upload, so call Root beforehand if that is needed.
func (b *dirBuilder) Tree(root string) *pb.Tree {
d := b.dir(root, "")
tree := &pb.Tree{Root: d}
b.tree(tree, root, d)
return tree
}
func (b *dirBuilder) tree(tree *pb.Tree, root string, dir *pb.Directory) {
tree.Children = append(tree.Children, dir)
for _, d := range dir.Directories {
name := path.Join(root, d.Name)
b.tree(tree, name, b.dirs[name])
}
}
func (b *dirBuilder) dfs(name string, ch chan<- *chunker.Chunker) *pb.Digest {
dir := b.dirs[name]
for _, d := range dir.Directories {
if d.Digest == nil { // It's not nil if we're reusing outputs from an earlier call.
d.Digest = b.dfs(path.Join(name, d.Name), ch)
}
}
// The protocol requires that these are sorted into lexicographic order. Not all servers
// necessarily care, but some do, and we should be compliant.
sort.Slice(dir.Files, func(i, j int) bool { return dir.Files[i].Name < dir.Files[j].Name })
sort.Slice(dir.Directories, func(i, j int) bool { return dir.Directories[i].Name < dir.Directories[j].Name })
sort.Slice(dir.Symlinks, func(i, j int) bool { return dir.Symlinks[i].Name < dir.Symlinks[j].Name })
chomk, _ := chunker.NewFromProto(dir, int(chunker.DefaultChunkSize))
if ch != nil {
ch <- chomk
}
return chomk.Digest().ToProto()
}
// convertPlatform converts the platform entries from the config into a Platform proto.
func convertPlatform(config *core.Configuration) *pb.Platform {
platform := &pb.Platform{}
for _, p := range config.Remote.Platform {
if parts := strings.SplitN(p, "=", 2); len(parts) == 2 {
platform.Properties = append(platform.Properties, &pb.Platform_Property{
Name: parts[0],
Value: parts[1],
})
} else {
log.Warning("Invalid config setting in remote.platform %s; will ignore", p)
}
}
return platform
}
// removeOutputs removes all outputs for a target.
func removeOutputs(target *core.BuildTarget) error {
outDir := target.OutDir()
for _, out := range target.Outputs() {
if err := os.RemoveAll(path.Join(outDir, out)); err != nil {
return fmt.Errorf("Failed to remove output for %s: %s", target, err)
}
}
return nil
}
// subresourceIntegrity returns a string corresponding to a target's hashes in the Subresource Integrity format.
func subresourceIntegrity(target *core.BuildTarget) string {
ret := make([]string, len(target.Hashes))
for i, h := range target.Hashes {
ret[i] = reencodeSRI(target, h)
}
return strings.Join(ret, " ")
}
// reencodeSRI re-encodes a hash from the hex format we use to base64-encoded.
func reencodeSRI(target *core.BuildTarget, h string) string {
if idx := strings.LastIndexByte(h, ':'); idx != -1 {
h = h[idx+1:]
}
// TODO(peterebden): we should validate at parse time that these are sensible.
b, _ := hex.DecodeString(h)
h = base64.StdEncoding.EncodeToString(b)
if len(b) == sha256.Size {
return "sha256-" + h
} else if len(b) == sha1.Size {
return "sha1-" + h
}
log.Warning("Hash string of unknown type on %s: %s", target, h)
return h
}
// updateHashFilename updates an output filename for a hash_filegroup.
func updateHashFilename(name string, digest *pb.Digest) string {
ext := path.Ext(name)
before := name[:len(name)-len(ext)]
b, _ := hex.DecodeString(digest.Hash)
return before + "-" + base64.RawURLEncoding.EncodeToString(b) + ext
}
// dialOpts returns a set of dial options to apply based on the config.
func (c *Client) dialOpts() ([]grpc.DialOption, error) {
// Set an arbitrarily large (400MB) max message size so it isn't a limitation.
callOpts := []grpc.CallOption{grpc.MaxCallRecvMsgSize(419430400)}
if c.state.Config.Remote.Gzip {
callOpts = append(callOpts, grpc.UseCompressor(gzip.Name))
}
opts := []grpc.DialOption{
grpc.WithStatsHandler(c.stats),
grpc.WithDefaultCallOptions(callOpts...),
}
if c.state.Config.Remote.TokenFile == "" {
return opts, nil
}
token, err := ioutil.ReadFile(c.state.Config.Remote.TokenFile)
if err != nil {
return opts, fmt.Errorf("Failed to load token from file: %s", err)
}
return append(opts, grpc.WithPerRPCCredentials(preSharedToken(string(token)))), nil
}
// preSharedToken returns a gRPC credential provider for a pre-shared token.
func preSharedToken(token string) tokenCredProvider {
return tokenCredProvider{
"authorization": "Bearer " + strings.TrimSpace(token),
}
}
type tokenCredProvider map[string]string
func (cred tokenCredProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
return cred, nil
}
func (cred tokenCredProvider) RequireTransportSecurity() bool {
return false // Allow these to be provided over an insecure channel; this facilitates e.g. service meshes like Istio.
}
| 1 | 9,078 | Does this log line work? We need Warningf or just warning without the format string. | thought-machine-please | go |
@@ -62,6 +62,7 @@ func New(checkpointer export.Checkpointer, exporter export.Exporter, opts ...Opt
impl := sdk.NewAccumulator(
checkpointer,
sdk.WithResource(c.Resource),
+ sdk.WithMetricsProcessors(c.MetricsProcessors...),
)
return &Controller{
provider: registry.NewMeterProvider(impl), | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package push // import "go.opentelemetry.io/otel/sdk/metric/controller/push"
import (
"context"
"sync"
"time"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/global"
"go.opentelemetry.io/otel/registry"
export "go.opentelemetry.io/otel/sdk/export/metric"
sdk "go.opentelemetry.io/otel/sdk/metric"
controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time"
)
// DefaultPushPeriod is the default time interval between pushes.
const DefaultPushPeriod = 10 * time.Second
// Controller organizes a periodic push of metric data.
type Controller struct {
lock sync.Mutex
accumulator *sdk.Accumulator
provider *registry.MeterProvider
checkpointer export.Checkpointer
exporter export.Exporter
wg sync.WaitGroup
ch chan struct{}
period time.Duration
timeout time.Duration
clock controllerTime.Clock
ticker controllerTime.Ticker
}
// New constructs a Controller, an implementation of MeterProvider, using the
// provided checkpointer, exporter, and options to configure an SDK with
// periodic collection.
func New(checkpointer export.Checkpointer, exporter export.Exporter, opts ...Option) *Controller {
c := &Config{
Period: DefaultPushPeriod,
}
for _, opt := range opts {
opt.Apply(c)
}
if c.Timeout == 0 {
c.Timeout = c.Period
}
impl := sdk.NewAccumulator(
checkpointer,
sdk.WithResource(c.Resource),
)
return &Controller{
provider: registry.NewMeterProvider(impl),
accumulator: impl,
checkpointer: checkpointer,
exporter: exporter,
ch: make(chan struct{}),
period: c.Period,
timeout: c.Timeout,
clock: controllerTime.RealClock{},
}
}
// SetClock supports setting a mock clock for testing. This must be
// called before Start().
func (c *Controller) SetClock(clock controllerTime.Clock) {
c.lock.Lock()
defer c.lock.Unlock()
c.clock = clock
}
// MeterProvider returns a MeterProvider instance for this controller.
func (c *Controller) MeterProvider() otel.MeterProvider {
return c.provider
}
// Start begins a ticker that periodically collects and exports
// metrics with the configured interval.
func (c *Controller) Start() {
c.lock.Lock()
defer c.lock.Unlock()
if c.ticker != nil {
return
}
c.ticker = c.clock.Ticker(c.period)
c.wg.Add(1)
go c.run(c.ch)
}
// Stop waits for the background goroutine to return and then collects
// and exports metrics one last time before returning.
func (c *Controller) Stop() {
c.lock.Lock()
defer c.lock.Unlock()
if c.ch == nil {
return
}
close(c.ch)
c.ch = nil
c.wg.Wait()
c.ticker.Stop()
c.tick()
}
func (c *Controller) run(ch chan struct{}) {
for {
select {
case <-ch:
c.wg.Done()
return
case <-c.ticker.C():
c.tick()
}
}
}
func (c *Controller) tick() {
ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
defer cancel()
ckpt := c.checkpointer.CheckpointSet()
ckpt.Lock()
defer ckpt.Unlock()
c.checkpointer.StartCollection()
c.accumulator.Collect(ctx)
if err := c.checkpointer.FinishCollection(); err != nil {
global.Handle(err)
}
if err := c.exporter.Export(ctx, ckpt); err != nil {
global.Handle(err)
}
}
| 1 | 13,580 | The pull controller would need similar updates. | open-telemetry-opentelemetry-go | go |
@@ -1,5 +1,5 @@
/**
- * Copyright 2015-2017 The OpenZipkin Authors
+ * Copyright 2015-2018 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at | 1 | /**
* Copyright 2015-2017 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin.server.brave;
import brave.Tracer;
import brave.sampler.BoundarySampler;
import brave.sampler.Sampler;
import com.github.kristofa.brave.Brave;
import com.github.kristofa.brave.InheritableServerClientAndLocalSpanState;
import com.github.kristofa.brave.ServerClientAndLocalSpanState;
import com.github.kristofa.brave.TracerAdapter;
import java.io.IOException;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
import org.springframework.context.annotation.Lazy;
import org.springframework.context.annotation.Scope;
import zipkin.Codec;
import zipkin.Endpoint;
import zipkin.Span;
import zipkin.collector.CollectorMetrics;
import zipkin.internal.Nullable;
import zipkin.reporter.AsyncReporter;
import zipkin.reporter.Callback;
import zipkin.reporter.Encoding;
import zipkin.reporter.Reporter;
import zipkin.reporter.ReporterMetrics;
import zipkin.reporter.Sender;
import zipkin.server.ConditionalOnSelfTracing;
import zipkin.storage.StorageComponent;
import static zipkin.internal.Util.propagateIfFatal;
@Configuration
@ConditionalOnSelfTracing
@Import(ApiTracerConfiguration.class)
public class BraveConfiguration {
/** This gets the lanIP without trying to lookup its name. */
// http://stackoverflow.com/questions/8765578/get-local-ip-address-without-connecting-to-the-internet
@Bean
@Scope Endpoint local(@Value("${server.port:9411}") int port) {
Endpoint.Builder builder = Endpoint.builder()
.serviceName("zipkin-server")
.port(port == -1 ? 0 : port);
try {
InetAddress address = Collections.list(NetworkInterface.getNetworkInterfaces()).stream()
.flatMap(i -> Collections.list(i.getInetAddresses()).stream())
.filter(InetAddress::isSiteLocalAddress)
.findAny().get();
builder.parseIp(address);
} catch (Exception ignored) {
}
return builder.build();
}
// Note: there's a chicken or egg problem here. TracedStorageComponent wraps StorageComponent with
// Brave. During initialization, if we eagerly reference StorageComponent from within Brave,
// BraveTracedStorageComponentEnhancer won't be able to process it. TL;DR; if you take out Lazy
// here, self-tracing will not affect the storage component, which reduces its effectiveness.
@Bean Reporter<Span> reporter(@Lazy StorageComponent storage,
@Value("${zipkin.self-tracing.flush-interval:1}") int flushInterval,
CollectorMetrics metrics) {
return AsyncReporter.builder(new LocalSender(storage))
.messageTimeout(flushInterval, TimeUnit.SECONDS)
.metrics(new ReporterMetricsAdapter(metrics.forTransport("local"))).build();
}
@Bean ServerClientAndLocalSpanState braveState(@Qualifier("local") Endpoint local) {
com.twitter.zipkin.gen.Endpoint braveEndpoint = com.twitter.zipkin.gen.Endpoint.builder()
.ipv4(local.ipv4)
.ipv6(local.ipv6)
.port(local.port)
.serviceName(local.serviceName)
.build();
return new InheritableServerClientAndLocalSpanState(braveEndpoint);
}
@Bean Tracer braveTracer(Reporter<Span> reporter, @Qualifier("local") Endpoint local,
@Value("${zipkin.self-tracing.sample-rate:1.0}") float rate) {
return Tracer.newBuilder()
.localEndpoint(local)
.sampler(rate < 0.01 ? BoundarySampler.create(rate) : Sampler.create(rate))
.reporter(reporter)
.build();
}
@Bean Brave brave(Tracer braveTracer, ServerClientAndLocalSpanState braveState) {
return TracerAdapter.newBrave(braveTracer, braveState);
}
/**
* Defined locally as StorageComponent is a lazy proxy, and we need to avoid eagerly calling it.
*/
static final class LocalSender implements Sender {
private final StorageComponent delegate;
LocalSender(StorageComponent delegate) {
this.delegate = delegate;
}
@Override public Encoding encoding() {
return Encoding.THRIFT;
}
@Override public int messageMaxBytes() {
return 5 * 1024 * 1024; // arbitrary
}
@Override public int messageSizeInBytes(List<byte[]> list) {
return Encoding.THRIFT.listSizeInBytes(list);
}
@Override public void sendSpans(List<byte[]> encodedSpans, Callback callback) {
try {
List<Span> spans = new ArrayList<>(encodedSpans.size());
for (byte[] encodedSpan : encodedSpans) {
spans.add(Codec.THRIFT.readSpan(encodedSpan));
}
delegate.asyncSpanConsumer().accept(spans, new CallbackAdapter(callback));
} catch (Throwable t) {
propagateIfFatal(t);
callback.onError(t);
}
}
@Override public CheckResult check() {
return CheckResult.OK;
}
@Override public void close() throws IOException {
}
}
static final class CallbackAdapter implements zipkin.storage.Callback<Void> {
final Callback delegate;
CallbackAdapter(Callback delegate) {
this.delegate = delegate;
}
@Override public void onSuccess(@Nullable Void aVoid) {
delegate.onComplete();
}
@Override public void onError(Throwable throwable) {
delegate.onError(throwable);
}
}
static final class ReporterMetricsAdapter implements ReporterMetrics {
final CollectorMetrics delegate;
ReporterMetricsAdapter(CollectorMetrics delegate) {
this.delegate = delegate;
}
@Override public void incrementMessages() {
delegate.incrementMessages();
}
@Override public void incrementMessagesDropped(Throwable throwable) {
delegate.incrementMessagesDropped();
}
@Override public void incrementSpans(int i) {
delegate.incrementSpans(i);
}
@Override public void incrementSpanBytes(int i) {
delegate.incrementBytes(i);
}
@Override public void incrementMessageBytes(int i) {
}
@Override public void incrementSpansDropped(int i) {
delegate.incrementMessagesDropped();
}
@Override public void updateQueuedSpans(int i) {
}
@Override public void updateQueuedBytes(int i) {
}
}
}
| 1 | 13,202 | ignore this.. just didn't want to litter with a commit update to satisfy license plugin | openzipkin-zipkin | java |
@@ -807,8 +807,8 @@ module Beaker
end
if host['is_cygwin'].nil? or host['is_cygwin'] == true
- dest = "/cygdrive/c/Windows/Temp/#{host['dist']}.msi"
- on host, "curl -O #{dest} #{link}"
+ dest = "#{host['dist']}.msi"
+ on host, "curl -O #{link}"
#Because the msi installer doesn't add Puppet to the environment path
#Add both potential paths for simplicity | 1 | require 'pathname'
module Beaker
module DSL
#
# This module contains methods to help cloning, extracting git info,
# ordering of Puppet packages, and installing ruby projects that
# contain an `install.rb` script.
#
# To mix this is into a class you need the following:
# * a method *hosts* that yields any hosts implementing
# {Beaker::Host}'s interface to act upon.
# * a method *options* that provides an options hash, see {Beaker::Options::OptionsHash}
# * the module {Beaker::DSL::Roles} that provides access to the various hosts implementing
# {Beaker::Host}'s interface to act upon
# * the module {Beaker::DSL::Wrappers} the provides convenience methods for {Beaker::DSL::Command} creation
module InstallUtils
# The default install path
SourcePath = "/opt/puppet-git-repos"
# A regex to know if the uri passed is pointing to a git repo
GitURI = %r{^(git|https?|file)://|^git@|^gitmirror@}
# Github's ssh signature for cloning via ssh
GitHubSig = 'github.com,207.97.227.239 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ=='
# The directories in the module directory that will not be scp-ed to the test system when using `copy_module_to`
PUPPET_MODULE_INSTALL_IGNORE = ['.bundle', '.git', '.idea', '.vagrant', '.vendor', 'vendor', 'acceptance', 'bundle', 'spec', 'tests', 'log']
# @param [String] uri A uri in the format of <git uri>#<revision>
# the `git://`, `http://`, `https://`, and ssh
# (if cloning as the remote git user) protocols
# are valid for <git uri>
#
# @example Usage
# project = extract_repo_info_from '[email protected]:puppetlabs/SuperSecretSauce#what_is_justin_doing'
#
# puts project[:name]
# #=> 'SuperSecretSauce'
#
# puts project[:rev]
# #=> 'what_is_justin_doing'
#
# @return [Hash{Symbol=>String}] Returns a hash containing the project
# name, repository path, and revision
# (defaults to HEAD)
#
# @api dsl
def extract_repo_info_from uri
project = {}
repo, rev = uri.split('#', 2)
project[:name] = Pathname.new(repo).basename('.git').to_s
project[:path] = repo
project[:rev] = rev || 'HEAD'
return project
end
# Takes an array of package info hashes (like that returned from
# {#extract_repo_info_from}) and sorts the `puppet`, `facter`, `hiera`
# packages so that puppet's dependencies will be installed first.
#
# @!visibility private
def order_packages packages_array
puppet = packages_array.select {|e| e[:name] == 'puppet' }
puppet_depends_on = packages_array.select do |e|
e[:name] == 'hiera' or e[:name] == 'facter'
end
depends_on_puppet = (packages_array - puppet) - puppet_depends_on
[puppet_depends_on, puppet, depends_on_puppet].flatten
end
# @param [Host] host An object implementing {Beaker::Hosts}'s
# interface.
# @param [String] path The path on the remote [host] to the repository
# @param [Hash{Symbol=>String}] repository A hash representing repo
# info like that emitted by
# {#extract_repo_info_from}
#
# @example Getting multiple project versions
# versions = [puppet_repo, facter_repo, hiera_repo].inject({}) do |vers, repo_info|
# vers.merge(find_git_repo_versions(host, '/opt/git-puppet-repos', repo_info) )
# end
# @return [Hash] Executes git describe on [host] and returns a Hash
# with the key of [repository[:name]] and value of
# the output from git describe.
#
# @note This requires the helper methods:
# * {Beaker::DSL::Structure#step}
# * {Beaker::DSL::Helpers#on}
#
# @api dsl
def find_git_repo_versions host, path, repository
version = {}
step "Grab version for #{repository[:name]}" do
on host, "cd #{path}/#{repository[:name]} && " +
"git describe || true" do
version[repository[:name]] = stdout.chomp
end
end
version
end
#
# @see #find_git_repo_versions
def install_from_git host, path, repository
name = repository[:name]
repo = repository[:path]
rev = repository[:rev]
depth = repository[:depth]
depth_branch = repository[:depth_branch]
target = "#{path}/#{name}"
if (depth_branch.nil?)
depth_branch = rev
end
clone_cmd = "git clone #{repo} #{target}"
if (depth)
clone_cmd = "git clone --branch #{depth_branch} --depth #{depth} #{repo} #{target}"
end
step "Clone #{repo} if needed" do
on host, "test -d #{path} || mkdir -p #{path}"
on host, "test -d #{target} || #{clone_cmd}"
end
step "Update #{name} and check out revision #{rev}" do
commands = ["cd #{target}",
"remote rm origin",
"remote add origin #{repo}",
"fetch origin +refs/pull/*:refs/remotes/origin/pr/* +refs/heads/*:refs/remotes/origin/*",
"clean -fdx",
"checkout -f #{rev}"]
on host, commands.join(" && git ")
end
step "Install #{name} on the system" do
# The solaris ruby IPS package has bindir set to /usr/ruby/1.8/bin.
# However, this is not the path to which we want to deliver our
# binaries. So if we are using solaris, we have to pass the bin and
# sbin directories to the install.rb
install_opts = ''
install_opts = '--bindir=/usr/bin --sbindir=/usr/sbin' if
host['platform'].include? 'solaris'
on host, "cd #{target} && " +
"if [ -f install.rb ]; then " +
"ruby ./install.rb #{install_opts}; " +
"else true; fi"
end
end
#Create the PE install command string based upon the host and options settings
# @param [Host] host The host that PE is to be installed on
# For UNIX machines using the full PE installer, the host object must have the 'pe_installer' field set correctly.
# @param [Hash{Symbol=>String}] opts The options
# @option opts [String] :pe_ver_win Default PE version to install or upgrade to on Windows hosts
# (Othersie uses individual Windows hosts pe_ver)
# @option opts [String] :pe_ver Default PE version to install or upgrade to
# (Otherwise uses individual hosts pe_ver)
# @option opts [Boolean] :pe_debug (false) Should we run the installer in debug mode?
# @example
# on host, "#{installer_cmd(host, opts)} -a #{host['working_dir']}/answers"
# @api private
def installer_cmd(host, opts)
version = host['pe_ver'] || opts[:pe_ver]
if host['platform'] =~ /windows/
log_file = "#{File.basename(host['working_dir'])}.log"
pe_debug = host[:pe_debug] || opts[:pe_debug] ? " && cat #{log_file}" : ''
"cd #{host['working_dir']} && cmd /C 'start /w msiexec.exe /qn /L*V #{log_file} /i #{host['dist']}.msi PUPPET_MASTER_SERVER=#{master} PUPPET_AGENT_CERTNAME=#{host}'#{pe_debug}"
# Frictionless install didn't exist pre-3.2.0, so in that case we fall
# through and do a regular install.
elsif host['roles'].include? 'frictionless' and ! version_is_less(version, '3.2.0')
# PE 3.4 introduced the ability to pass in config options to the bash script in the form
# of <section>:<key>=<value>
frictionless_install_opts = []
if host.has_key?('frictionless_options') and ! version_is_less(version, '3.4.0')
# since we have options to pass in, we need to tell the bash script
host['frictionless_options'].each do |section, settings|
settings.each do |key, value|
frictionless_install_opts << "#{section}:#{key}=#{value}"
end
end
end
pe_debug = host[:pe_debug] || opts[:pe_debug] ? ' -x' : ''
"cd #{host['working_dir']} && curl --tlsv1 -kO https://#{master}:8140/packages/#{version}/install.bash && bash#{pe_debug} install.bash #{frictionless_install_opts.join(' ')}".strip
elsif host['platform'] =~ /osx/
version = host['pe_ver'] || opts[:pe_ver]
pe_debug = host[:pe_debug] || opts[:pe_debug] ? ' -verboseR' : ''
"cd #{host['working_dir']} && hdiutil attach #{host['dist']}.dmg && installer#{pe_debug} -pkg /Volumes/puppet-enterprise-#{version}/puppet-enterprise-installer-#{version}.pkg -target /"
elsif host['platform'] =~ /eos/
commands = ['enable', "extension puppet-enterprise-#{version}-#{host['platform']}.swix"]
command = commands.join("\n")
"Cli -c '#{command}'"
else
pe_debug = host[:pe_debug] || opts[:pe_debug] ? ' -D' : ''
"cd #{host['working_dir']}/#{host['dist']} && ./#{host['pe_installer']}#{pe_debug} -a #{host['working_dir']}/answers"
end
end
#Create the Higgs install command string based upon the host and options settings. Installation command will be run as a
#background process. The output of the command will be stored in the provided host['higgs_file'].
# @param [Host] host The host that Higgs is to be installed on
# The host object must have the 'working_dir', 'dist' and 'pe_installer' field set correctly.
# @api private
def higgs_installer_cmd host
"cd #{host['working_dir']}/#{host['dist']} ; nohup ./#{host['pe_installer']} <<<Y > #{host['higgs_file']} 2>&1 &"
end
#Determine is a given URL is accessible
#@param [String] link The URL to examine
#@return [Boolean] true if the URL has a '200' HTTP response code, false otherwise
#@example
# extension = link_exists?("#{URL}.tar.gz") ? ".tar.gz" : ".tar"
# @api private
def link_exists?(link)
require "net/http"
require "net/https"
require "open-uri"
url = URI.parse(link)
http = Net::HTTP.new(url.host, url.port)
http.use_ssl = (url.scheme == 'https')
http.start do |http|
return http.head(url.request_uri).code == "200"
end
end
# Fetch file_name from the given base_url into dst_dir.
#
# @param [String] base_url The base url from which to recursively download
# files.
# @param [String] file_name The trailing name compnent of both the source url
# and the destination file.
# @param [String] dst_dir The local destination directory.
#
# @return [String] dst The name of the newly-created file.
#
# @!visibility private
def fetch_http_file(base_url, file_name, dst_dir)
FileUtils.makedirs(dst_dir)
src = "#{base_url}/#{file_name}"
dst = File.join(dst_dir, file_name)
if File.exists?(dst)
logger.notify "Already fetched #{dst}"
else
logger.notify "Fetching: #{src}"
logger.notify " and saving to #{dst}"
open(src) do |remote|
File.open(dst, "w") do |file|
FileUtils.copy_stream(remote, file)
end
end
end
return dst
end
# Recursively fetch the contents of the given http url, ignoring
# `index.html` and `*.gif` files.
#
# @param [String] url The base http url from which to recursively download
# files.
# @param [String] dst_dir The local destination directory.
#
# @return [String] dst The name of the newly-created subdirectory of
# dst_dir.
#
# @!visibility private
def fetch_http_dir(url, dst_dir)
logger.notify "fetch_http_dir (url: #{url}, dst_dir #{dst_dir})"
if url[-1, 1] !~ /\//
url += '/'
end
url = URI.parse(url)
chunks = url.path.split('/')
dst = File.join(dst_dir, chunks.last)
#determine directory structure to cut
#only want to keep the last directory, thus cut total number of dirs - 2 (hostname + last dir name)
cut = chunks.length - 2
wget_command = "wget -nv -P #{dst_dir} --reject \"index.html*\",\"*.gif\" --cut-dirs=#{cut} -np -nH --no-check-certificate -r #{url}"
logger.notify "Fetching remote directory: #{url}"
logger.notify " and saving to #{dst}"
logger.notify " using command: #{wget_command}"
#in ruby 1.9+ we can upgrade this to popen3 to gain access to the subprocess pid
result = `#{wget_command} 2>&1`
result.each_line do |line|
logger.debug(line)
end
if $?.to_i != 0
raise "Failed to fetch_remote_dir '#{url}' (exit code #{$?}"
end
dst
end
#Determine the PE package to download/upload on a mac host, download/upload that package onto the host.
# Assumed file name format: puppet-enterprise-3.3.0-rc1-559-g97f0833-osx-10.9-x86_64.dmg.
# @param [Host] host The mac host to download/upload and unpack PE onto
# @param [Hash{Symbol=>Symbol, String}] opts The options
# @option opts [String] :pe_dir Default directory or URL to pull PE package from
# (Otherwise uses individual hosts pe_dir)
# @api private
def fetch_puppet_on_mac(host, opts)
path = host['pe_dir'] || opts[:pe_dir]
local = File.directory?(path)
filename = "#{host['dist']}"
extension = ".dmg"
if local
if not File.exists?("#{path}/#{filename}#{extension}")
raise "attempting installation on #{host}, #{path}/#{filename}#{extension} does not exist"
end
scp_to host, "#{path}/#{filename}#{extension}", "#{host['working_dir']}/#{filename}#{extension}"
else
if not link_exists?("#{path}/#{filename}#{extension}")
raise "attempting installation on #{host}, #{path}/#{filename}#{extension} does not exist"
end
on host, "cd #{host['working_dir']}; curl -O #{path}/#{filename}#{extension}"
end
end
#Determine the PE package to download/upload on a windows host, download/upload that package onto the host.
#Assumed file name format: puppet-enterprise-3.3.0-rc1-559-g97f0833.msi
# @param [Host] host The windows host to download/upload and unpack PE onto
# @param [Hash{Symbol=>Symbol, String}] opts The options
# @option opts [String] :pe_dir Default directory or URL to pull PE package from
# (Otherwise uses individual hosts pe_dir)
# @option opts [String] :pe_ver_win Default PE version to install or upgrade to
# (Otherwise uses individual hosts pe_ver)
# @api private
def fetch_puppet_on_windows(host, opts)
path = host['pe_dir'] || opts[:pe_dir]
local = File.directory?(path)
version = host['pe_ver'] || opts[:pe_ver_win]
filename = "#{host['dist']}"
extension = ".msi"
if local
if not File.exists?("#{path}/#{filename}#{extension}")
raise "attempting installation on #{host}, #{path}/#{filename}#{extension} does not exist"
end
scp_to host, "#{path}/#{filename}#{extension}", "#{host['working_dir']}/#{filename}#{extension}"
else
if not link_exists?("#{path}/#{filename}#{extension}")
raise "attempting installation on #{host}, #{path}/#{filename}#{extension} does not exist"
end
on host, "cd #{host['working_dir']}; curl -O #{path}/#{filename}#{extension}"
end
end
#Determine the PE package to download/upload on a unix style host, download/upload that package onto the host
#and unpack it.
# @param [Host] host The unix style host to download/upload and unpack PE onto
# @param [Hash{Symbol=>Symbol, String}] opts The options
# @option opts [String] :pe_dir Default directory or URL to pull PE package from
# (Otherwise uses individual hosts pe_dir)
# @api private
def fetch_puppet_on_unix(host, opts)
path = host['pe_dir'] || opts[:pe_dir]
local = File.directory?(path)
filename = "#{host['dist']}"
if local
extension = File.exists?("#{path}/#{filename}.tar.gz") ? ".tar.gz" : ".tar"
if not File.exists?("#{path}/#{filename}#{extension}")
raise "attempting installation on #{host}, #{path}/#{filename}#{extension} does not exist"
end
scp_to host, "#{path}/#{filename}#{extension}", "#{host['working_dir']}/#{filename}#{extension}"
if extension =~ /gz/
on host, "cd #{host['working_dir']}; gunzip #{filename}#{extension}"
end
if extension =~ /tar/
on host, "cd #{host['working_dir']}; tar -xvf #{filename}.tar"
end
else
if host['platform'] =~ /eos/
extension = '.swix'
else
extension = link_exists?("#{path}/#{filename}.tar.gz") ? ".tar.gz" : ".tar"
end
if not link_exists?("#{path}/#{filename}#{extension}")
raise "attempting installation on #{host}, #{path}/#{filename}#{extension} does not exist"
end
if host['platform'] =~ /eos/
commands = ['enable', "copy #{path}/#{filename}#{extension} extension:"]
command = commands.join("\n")
on host, "Cli -c '#{command}'"
else
unpack = 'tar -xvf -'
unpack = extension =~ /gz/ ? 'gunzip | ' + unpack : unpack
on host, "cd #{host['working_dir']}; curl #{path}/#{filename}#{extension} | #{unpack}"
end
end
end
#Determine the PE package to download/upload per-host, download/upload that package onto the host
#and unpack it.
# @param [Array<Host>] hosts The hosts to download/upload and unpack PE onto
# @param [Hash{Symbol=>Symbol, String}] opts The options
# @option opts [String] :pe_dir Default directory or URL to pull PE package from
# (Otherwise uses individual hosts pe_dir)
# @option opts [String] :pe_ver Default PE version to install or upgrade to
# (Otherwise uses individual hosts pe_ver)
# @option opts [String] :pe_ver_win Default PE version to install or upgrade to on Windows hosts
# (Otherwise uses individual Windows hosts pe_ver)
# @api private
def fetch_puppet(hosts, opts)
hosts.each do |host|
# We install Puppet from the master for frictionless installs, so we don't need to *fetch* anything
next if host['roles'].include?('frictionless') && (! version_is_less(opts[:pe_ver] || host['pe_ver'], '3.2.0'))
if host['platform'] =~ /windows/
fetch_puppet_on_windows(host, opts)
elsif host['platform'] =~ /osx/
fetch_puppet_on_mac(host, opts)
else
fetch_puppet_on_unix(host, opts)
end
end
end
#Classify the master so that it can deploy frictionless packages for a given host.
# @param [Host] host The host to install pacakges for
# @api private
def deploy_frictionless_to_master(host)
klass = host['platform'].gsub(/-/, '_').gsub(/\./,'')
klass = "pe_repo::platform::#{klass}"
on dashboard, "cd /opt/puppet/share/puppet-dashboard && /opt/puppet/bin/bundle exec /opt/puppet/bin/rake nodeclass:add[#{klass},skip]"
on dashboard, "cd /opt/puppet/share/puppet-dashboard && /opt/puppet/bin/bundle exec /opt/puppet/bin/rake node:add[#{master},,,skip]"
on dashboard, "cd /opt/puppet/share/puppet-dashboard && /opt/puppet/bin/bundle exec /opt/puppet/bin/rake node:addclass[#{master},#{klass}]"
on master, puppet("agent -t"), :acceptable_exit_codes => [0,2]
end
#Perform a Puppet Enterprise upgrade or install
# @param [Array<Host>] hosts The hosts to install or upgrade PE on
# @param [Hash{Symbol=>Symbol, String}] opts The options
# @option opts [String] :pe_dir Default directory or URL to pull PE package from
# (Otherwise uses individual hosts pe_dir)
# @option opts [String] :pe_ver Default PE version to install or upgrade to
# (Otherwise uses individual hosts pe_ver)
# @option opts [String] :pe_ver_win Default PE version to install or upgrade to on Windows hosts
# (Otherwise uses individual Windows hosts pe_ver)
# @option opts [Symbol] :type (:install) One of :upgrade or :install
# @option opts [Hash<String>] :answers Pre-set answers based upon ENV vars and defaults
# (See {Beaker::Options::Presets.env_vars})
#
# @example
# do_install(hosts, {:type => :upgrade, :pe_dir => path, :pe_ver => version, :pe_ver_win => version_win})
#
# @api private
#
def do_install hosts, opts = {}
masterless = (defined? options) ? options[:masterless] : false
opts[:masterless] = masterless # has to pass masterless down for answer generation awareness
opts[:type] = opts[:type] || :install
unless masterless
pre30database = version_is_less(opts[:pe_ver] || database['pe_ver'], '3.0')
pre30master = version_is_less(opts[:pe_ver] || master['pe_ver'], '3.0')
unless version_is_less(opts[:pe_ver] || master['pe_ver'], '3.4')
master['puppetservice'] = 'pe-puppetserver'
end
end
# Set PE distribution for all the hosts, create working dir
use_all_tar = ENV['PE_USE_ALL_TAR'] == 'true'
hosts.each do |host|
host['pe_installer'] ||= 'puppet-enterprise-installer'
if host['platform'] !~ /windows|osx/
platform = use_all_tar ? 'all' : host['platform']
version = host['pe_ver'] || opts[:pe_ver]
host['dist'] = "puppet-enterprise-#{version}-#{platform}"
elsif host['platform'] =~ /osx/
version = host['pe_ver'] || opts[:pe_ver]
host['dist'] = "puppet-enterprise-#{version}-#{host['platform']}"
elsif host['platform'] =~ /windows/
version = host[:pe_ver] || opts['pe_ver_win']
#only install 64bit builds if
# - we are on pe version 3.4+
# - we do not have install_32 set on host
# - we do not have install_32 set globally
if !(version_is_less(version, '3.4')) and host.is_x86_64? and not host['install_32'] and not opts['install_32']
host['dist'] = "puppet-enterprise-#{version}-x64"
else
host['dist'] = "puppet-enterprise-#{version}"
end
end
host['working_dir'] = host.tmpdir(Time.new.strftime("%Y-%m-%d_%H.%M.%S"))
end
fetch_puppet(hosts, opts)
install_hosts = hosts.dup
unless masterless
# If we're installing a database version less than 3.0, ignore the database host
install_hosts.delete(database) if pre30database and database != master and database != dashboard
end
install_hosts.each do |host|
if host['platform'] =~ /windows/
on host, installer_cmd(host, opts)
else
# We only need answers if we're using the classic installer
version = host['pe_ver'] || opts[:pe_ver]
if host['roles'].include?('frictionless') && (! version_is_less(version, '3.2.0'))
# If We're *not* running the classic installer, we want
# to make sure the master has packages for us.
deploy_frictionless_to_master(host)
on host, installer_cmd(host, opts)
elsif host['platform'] =~ /osx|eos/
# If we're not frictionless, we need to run the OSX special-case
on host, installer_cmd(host, opts)
#set the certname and master
on host, puppet("config set server #{master}")
on host, puppet("config set certname #{host}")
#run once to request cert
acceptable_codes = host['platform'] =~ /osx/ ? [1] : [0, 1]
on host, puppet_agent('-t'), :acceptable_exit_codes => acceptable_codes
else
answers = Beaker::Answers.create(opts[:pe_ver] || host['pe_ver'], hosts, opts)
create_remote_file host, "#{host['working_dir']}/answers", answers.answer_string(host)
on host, installer_cmd(host, opts)
end
end
# On each agent, we ensure the certificate is signed then shut down the agent
sign_certificate_for(host) unless masterless
stop_agent_on(host)
end
unless masterless
# Wait for PuppetDB to be totally up and running (post 3.0 version of pe only)
sleep_until_puppetdb_started(database) unless pre30database
# Run the agent once to ensure everything is in the dashboard
install_hosts.each do |host|
on host, puppet_agent('-t'), :acceptable_exit_codes => [0,2]
# Workaround for PE-1105 when deploying 3.0.0
# The installer did not respect our database host answers in 3.0.0,
# and would cause puppetdb to be bounced by the agent run. By sleeping
# again here, we ensure that if that bounce happens during an upgrade
# test we won't fail early in the install process.
if host['pe_ver'] == '3.0.0' and host == database
sleep_until_puppetdb_started(database)
end
end
install_hosts.each do |host|
wait_for_host_in_dashboard(host)
end
if pre30master
task = 'nodegroup:add_all_nodes group=default'
else
task = 'defaultgroup:ensure_default_group'
end
on dashboard, "/opt/puppet/bin/rake -sf /opt/puppet/share/puppet-dashboard/Rakefile #{task} RAILS_ENV=production"
# Now that all hosts are in the dashbaord, run puppet one more
# time to configure mcollective
on install_hosts, puppet_agent('-t'), :acceptable_exit_codes => [0,2]
end
end
#Perform a Puppet Enterprise Higgs install up until web browser interaction is required, runs on linux hosts only.
# @param [Host] host The host to install higgs on
# @param [Hash{Symbol=>Symbol, String}] opts The options
# @option opts [String] :pe_dir Default directory or URL to pull PE package from
# (Otherwise uses individual hosts pe_dir)
# @option opts [String] :pe_ver Default PE version to install
# (Otherwise uses individual hosts pe_ver)
# @raise [StandardError] When installation times out
#
# @example
# do_higgs_install(master, {:pe_dir => path, :pe_ver => version})
#
# @api private
#
def do_higgs_install host, opts
use_all_tar = ENV['PE_USE_ALL_TAR'] == 'true'
platform = use_all_tar ? 'all' : host['platform']
version = host['pe_ver'] || opts[:pe_ver]
host['dist'] = "puppet-enterprise-#{version}-#{platform}"
use_all_tar = ENV['PE_USE_ALL_TAR'] == 'true'
host['pe_installer'] ||= 'puppet-enterprise-installer'
host['working_dir'] = host.tmpdir(Time.new.strftime("%Y-%m-%d_%H.%M.%S"))
fetch_puppet([host], opts)
host['higgs_file'] = "higgs_#{File.basename(host['working_dir'])}.log"
on host, higgs_installer_cmd(host), opts
#wait for output to host['higgs_file']
#we're all done when we find this line in the PE installation log
higgs_re = /Please\s+go\s+to\s+https:\/\/.*\s+in\s+your\s+browser\s+to\s+continue\s+installation/m
res = Result.new(host, 'tmp cmd')
tries = 10
attempts = 0
prev_sleep = 0
cur_sleep = 1
while (res.stdout !~ higgs_re) and (attempts < tries)
res = on host, "cd #{host['working_dir']}/#{host['dist']} && cat #{host['higgs_file']}", :acceptable_exit_codes => (0..255)
attempts += 1
sleep( cur_sleep )
prev_sleep = cur_sleep
cur_sleep = cur_sleep + prev_sleep
end
if attempts >= tries
raise "Failed to kick off PE (Higgs) web installation"
end
end
#Sort array of hosts so that it has the correct order for PE installation based upon each host's role
# @example
# h = sorted_hosts
#
# @note Order for installation should be
# First : master
# Second: database host (if not same as master)
# Third: dashboard (if not same as master or database)
# Fourth: everything else
#
# @!visibility private
def sorted_hosts
special_nodes = []
[master, database, dashboard].uniq.each do |host|
special_nodes << host if host != nil
end
real_agents = agents - special_nodes
special_nodes + real_agents
end
#Install FOSS based upon host configuration and options
# @example will install puppet 3.6.1 from native puppetlabs provided packages wherever possible and will fail over to gem installation when impossible
# install_puppet({
# :version => '3.6.1',
# :facter_version => '2.0.1',
# :hiera_version => '1.3.3',
# :default_action => 'gem_install',
#
# })
#
#
# @example Will install latest packages on Enterprise Linux and Debian based distros and fail hard on all othere platforms.
# install_puppet()
#
# @note This will attempt to add a repository for apt.puppetlabs.com on
# Debian, Ubuntu, or Cumulus machines, or yum.puppetlabs.com on EL or Fedora
# machines, then install the package 'puppet'.
# @param [Hash{Symbol=>String}] opts
# @option opts [String] :version Version of puppet to download
# @option opts [String] :mac_download_url Url to download msi pattern of %url%/puppet-%version%.msi
# @option opts [String] :win_download_url Url to download dmg pattern of %url%/(puppet|hiera|facter)-%version%.msi
#
# @api dsl
# @return nil
# @raise [StandardError] When encountering an unsupported platform by default, or if gem cannot be found when default_action => 'gem_install'
# @raise [FailTest] When error occurs during the actual installation process
def install_puppet(opts = {})
default_download_url = 'http://downloads.puppetlabs.com'
opts = {:win_download_url => "#{default_download_url}/windows",
:mac_download_url => "#{default_download_url}/mac"}.merge(opts)
hosts.each do |host|
if host['platform'] =~ /el-(5|6|7)/
relver = $1
install_puppet_from_rpm host, opts.merge(:release => relver, :family => 'el')
elsif host['platform'] =~ /fedora-(\d+)/
relver = $1
install_puppet_from_rpm host, opts.merge(:release => relver, :family => 'fedora')
elsif host['platform'] =~ /(ubuntu|debian|cumulus)/
install_puppet_from_deb host, opts
elsif host['platform'] =~ /windows/
relver = opts[:version]
install_puppet_from_msi host, opts
elsif host['platform'] =~ /osx/
install_puppet_from_dmg host, opts
else
if opts[:default_action] == 'gem_install'
install_puppet_from_gem host, opts
else
raise "install_puppet() called for unsupported platform '#{host['platform']}' on '#{host.name}'"
end
end
# Certain install paths may not create the config dirs/files needed
on host, "mkdir -p #{host['puppetpath']}"
on host, "echo '' >> #{host['hieraconf']}"
end
nil
end
# Configure a host entry on the give host
# @example: will add a host entry for forge.puppetlabs.com
# add_system32_hosts_entry(host, { :ip => '23.251.154.122', :name => 'forge.puppetlabs.com' })
#
# @api dsl
# @return nil
def add_system32_hosts_entry(host, opts = {})
if host['platform'] =~ /windows/
hosts_file = "C:\\Windows\\System32\\Drivers\\etc\\hosts"
host_entry = "#{opts['ip']}`t`t#{opts['name']}"
on host, powershell("\$text = \\\"#{host_entry}\\\"; Add-Content -path '#{hosts_file}' -value \$text")
else
raise "nothing to do for #{host.name} on #{host['platform']}"
end
end
# Installs Puppet and dependencies using rpm
#
# @param [Host] host The host to install packages on
# @param [Hash{Symbol=>String}] opts An options hash
# @option opts [String] :version The version of Puppet to install, if nil installs latest version
# @option opts [String] :facter_version The version of Facter to install, if nil installs latest version
# @option opts [String] :hiera_version The version of Hiera to install, if nil installs latest version
# @option opts [String] :default_action What to do if we don't know how to install native packages on host.
# Valid value is 'gem_install' or nil. If nil raises an exception when
# on an unsupported platform. When 'gem_install' attempts to install
# Puppet via gem.
# @option opts [String] :release The major release of the OS
# @option opts [String] :family The OS family (one of 'el' or 'fedora')
#
# @return nil
# @api private
def install_puppet_from_rpm( host, opts )
release_package_string = "http://yum.puppetlabs.com/puppetlabs-release-#{opts[:family]}-#{opts[:release]}.noarch.rpm"
on host, "rpm -q --quiet puppetlabs-release || rpm -ivh #{release_package_string}"
if opts[:facter_version]
on host, "yum install -y facter-#{opts[:facter_version]}"
end
if opts[:hiera_version]
on host, "yum install -y hiera-#{opts[:hiera_version]}"
end
puppet_pkg = opts[:version] ? "puppet-#{opts[:version]}" : 'puppet'
on host, "yum install -y #{puppet_pkg}"
end
# Installs Puppet and dependencies from deb
#
# @param [Host] host The host to install packages on
# @param [Hash{Symbol=>String}] opts An options hash
# @option opts [String] :version The version of Puppet to install, if nil installs latest version
# @option opts [String] :facter_version The version of Facter to install, if nil installs latest version
# @option opts [String] :hiera_version The version of Hiera to install, if nil installs latest version
#
# @return nil
# @api private
def install_puppet_from_deb( host, opts )
if ! host.check_for_package 'lsb-release'
host.install_package('lsb-release')
end
if ! host.check_for_command 'curl'
on host, 'apt-get install -y curl'
end
on host, 'curl -O http://apt.puppetlabs.com/puppetlabs-release-$(lsb_release -c -s).deb'
on host, 'dpkg -i puppetlabs-release-$(lsb_release -c -s).deb'
on host, 'apt-get update'
if opts[:facter_version]
on host, "apt-get install -y facter=#{opts[:facter_version]}-1puppetlabs1"
end
if opts[:hiera_version]
on host, "apt-get install -y hiera=#{opts[:hiera_version]}-1puppetlabs1"
end
if opts[:version]
on host, "apt-get install -y puppet-common=#{opts[:version]}-1puppetlabs1"
on host, "apt-get install -y puppet=#{opts[:version]}-1puppetlabs1"
else
on host, 'apt-get install -y puppet'
end
end
# Installs Puppet and dependencies from msi
#
# @param [Host] host The host to install packages on
# @param [Hash{Symbol=>String}] opts An options hash
# @option opts [String] :version The version of Puppet to install, required
# @option opts [String] :win_download_url The url to download puppet from
def install_puppet_from_msi( host, opts )
#only install 64bit builds if
# - we are on puppet version 3.7+
# - we do not have install_32 set on host
# - we do not have install_32 set globally
version = opts[:version]
if !(version_is_less(version, '3.7')) and host.is_x86_64? and not host['install_32'] and not opts['install_32']
host['dist'] = "puppet-#{version}-x64"
else
host['dist'] = "puppet-#{version}"
end
link = "#{opts[:win_download_url]}/#{host['dist']}.msi"
if not link_exists?( link )
raise "Puppet #{version} at #{link} does not exist!"
end
if host['is_cygwin'].nil? or host['is_cygwin'] == true
dest = "/cygdrive/c/Windows/Temp/#{host['dist']}.msi"
on host, "curl -O #{dest} #{link}"
#Because the msi installer doesn't add Puppet to the environment path
#Add both potential paths for simplicity
#NOTE - this is unnecessary if the host has been correctly identified as 'foss' during set up
puppetbin_path = "\"/cygdrive/c/Program Files (x86)/Puppet Labs/Puppet/bin\":\"/cygdrive/c/Program Files/Puppet Labs/Puppet/bin\""
on host, %Q{ echo 'export PATH=$PATH:#{puppetbin_path}' > /etc/bash.bashrc }
else
dest = "C:\\Windows\\Temp\\#{host['dist']}.msi"
on host, "set PATH=\"%PATH%;#{host['puppetbindir']}\""
on host, "setx PATH \"%PATH%;#{host['puppetbindir']}\""
on host, powershell("$webclient = New-Object System.Net.WebClient; $webclient.DownloadFile('#{link}','#{dest}')")
on host, "if not exist #{host['distmoduledir']} (md #{host['distmoduledir']})"
end
on host, "msiexec /qn /i #{dest}"
end
# Installs Puppet and dependencies from dmg
#
# @param [Host] host The host to install packages on
# @param [Hash{Symbol=>String}] opts An options hash
# @option opts [String] :version The version of Puppet to install, required
# @option opts [String] :facter_version The version of Facter to install, required
# @option opts [String] :hiera_version The version of Hiera to install, required
# @option opts [String] :mac_download_url Url to download msi pattern of %url%/puppet-%version%.msi
#
# @return nil
# @api private
def install_puppet_from_dmg( host, opts )
puppet_ver = opts[:version]
facter_ver = opts[:facter_version]
hiera_ver = opts[:hiera_version]
if [puppet_ver, facter_ver, hiera_ver].include?(nil)
raise "You need to specify versions for OSX host\n eg. install_puppet({:version => '3.6.2',:facter_version => '2.1.0',:hiera_version => '1.3.4',})"
end
on host, "curl -O #{opts[:mac_download_url]}/puppet-#{puppet_ver}.dmg"
on host, "curl -O #{opts[:mac_download_url]}/facter-#{facter_ver}.dmg"
on host, "curl -O #{opts[:mac_download_url]}/hiera-#{hiera_ver}.dmg"
on host, "hdiutil attach puppet-#{puppet_ver}.dmg"
on host, "hdiutil attach facter-#{facter_ver}.dmg"
on host, "hdiutil attach hiera-#{hiera_ver}.dmg"
on host, "installer -pkg /Volumes/puppet-#{puppet_ver}/puppet-#{puppet_ver}.pkg -target /"
on host, "installer -pkg /Volumes/facter-#{facter_ver}/facter-#{facter_ver}.pkg -target /"
on host, "installer -pkg /Volumes/hiera-#{hiera_ver}/hiera-#{hiera_ver}.pkg -target /"
end
# Installs Puppet and dependencies from gem
#
# @param [Host] host The host to install packages on
# @param [Hash{Symbol=>String}] opts An options hash
# @option opts [String] :version The version of Puppet to install, if nil installs latest
# @option opts [String] :facter_version The version of Facter to install, if nil installs latest
# @option opts [String] :hiera_version The version of Hiera to install, if nil installs latest
#
# @return nil
# @raise [StandardError] if gem does not exist on target host
# @api private
def install_puppet_from_gem( host, opts )
# There are a lot of special things to do for Solaris and Solaris 10.
# This is easier than checking host['platform'] every time.
is_solaris10 = host['platform'] =~ /solaris-10/
is_solaris = host['platform'] =~ /solaris/
# Hosts may be provisioned with csw but pkgutil won't be in the
# PATH by default to avoid changing the behavior for Puppet's tests
if is_solaris10
on host, 'ln -s /opt/csw/bin/pkgutil /usr/bin/pkgutil'
end
# Solaris doesn't necessarily have this, but gem needs it
if is_solaris
on host, 'mkdir -p /var/lib'
end
unless host.check_for_command( 'gem' )
gempkg = case host['platform']
when /solaris-11/ then 'ruby-18'
when /ubuntu-14/ then 'ruby'
when /solaris-10|ubuntu|debian|el-|cumulus/ then 'rubygems'
else
raise "install_puppet() called with default_action " +
"'gem_install' but program `gem' is " +
"not installed on #{host.name}"
end
host.install_package gempkg
end
# Link 'gem' to /usr/bin instead of adding /opt/csw/bin to PATH.
if is_solaris10
on host, 'ln -s /opt/csw/bin/gem /usr/bin/gem'
end
if host['platform'] =~ /debian|ubuntu|solaris|cumulus/
gem_env = YAML.load( on( host, 'gem environment' ).stdout )
gem_paths_array = gem_env['RubyGems Environment'].find {|h| h['GEM PATHS'] != nil }['GEM PATHS']
path_with_gem = 'export PATH=' + gem_paths_array.join(':') + ':${PATH}'
on host, "echo '#{path_with_gem}' >> ~/.bashrc"
end
if opts[:facter_version]
on host, "gem install facter -v#{opts[:facter_version]} --no-ri --no-rdoc"
end
if opts[:hiera_version]
on host, "gem install hiera -v#{opts[:hiera_version]} --no-ri --no-rdoc"
end
ver_cmd = opts[:version] ? "-v#{opts[:version]}" : ''
on host, "gem install puppet #{ver_cmd} --no-ri --no-rdoc"
# Similar to the treatment of 'gem' above.
# This avoids adding /opt/csw/bin to PATH.
if is_solaris
gem_env = YAML.load( on( host, 'gem environment' ).stdout )
# This is the section we want - this has the dir where gem executables go.
env_sect = 'EXECUTABLE DIRECTORY'
# Get the directory where 'gem' installs executables.
# On Solaris 10 this is usually /opt/csw/bin
gem_exec_dir = gem_env['RubyGems Environment'].find {|h| h[env_sect] != nil }[env_sect]
on host, "ln -s #{gem_exec_dir}/hiera /usr/bin/hiera"
on host, "ln -s #{gem_exec_dir}/facter /usr/bin/facter"
on host, "ln -s #{gem_exec_dir}/puppet /usr/bin/puppet"
end
end
#Install PE based upon host configuration and options
# @example
# install_pe
#
# @note Either pe_ver and pe_dir should be set in the ENV or each host should have pe_ver and pe_dir set individually.
# Install file names are assumed to be of the format puppet-enterprise-VERSION-PLATFORM.(tar)|(tar.gz)
# for Unix like systems and puppet-enterprise-VERSION.msi for Windows systems.
#
# @api dsl
def install_pe
#process the version files if necessary
hosts.each do |host|
host['pe_dir'] ||= options[:pe_dir]
if host['platform'] =~ /windows/
host['pe_ver'] = host['pe_ver'] || options['pe_ver'] ||
Beaker::Options::PEVersionScraper.load_pe_version(host[:pe_dir] || options[:pe_dir], options[:pe_version_file_win])
else
host['pe_ver'] = host['pe_ver'] || options['pe_ver'] ||
Beaker::Options::PEVersionScraper.load_pe_version(host[:pe_dir] || options[:pe_dir], options[:pe_version_file])
end
end
#send in the global options hash
do_install sorted_hosts, options
end
#Upgrade PE based upon host configuration and options
# @param [String] path A path (either local directory or a URL to a listing of PE builds).
# Will contain a LATEST file indicating the latest build to install.
# This is ignored if a pe_upgrade_ver and pe_upgrade_dir are specified
# in the host configuration file.
# @example
# upgrade_pe("http://neptune.puppetlabs.lan/3.0/ci-ready/")
#
# @note Install file names are assumed to be of the format puppet-enterprise-VERSION-PLATFORM.(tar)|(tar.gz)
# for Unix like systems and puppet-enterprise-VERSION.msi for Windows systems.
# @api dsl
def upgrade_pe path=nil
hosts.each do |host|
host['pe_dir'] = host['pe_upgrade_dir'] || path
if host['platform'] =~ /windows/
host['pe_ver'] = host['pe_upgrade_ver'] || options['pe_upgrade_ver'] ||
Options::PEVersionScraper.load_pe_version(host['pe_dir'], options[:pe_version_file_win])
else
host['pe_ver'] = host['pe_upgrade_ver'] || options['pe_upgrade_ver'] ||
Options::PEVersionScraper.load_pe_version(host['pe_dir'], options[:pe_version_file])
end
if version_is_less(host['pe_ver'], '3.0')
host['pe_installer'] ||= 'puppet-enterprise-upgrader'
end
end
#send in the global options hash
do_install(sorted_hosts, options.merge({:type => :upgrade}))
options['upgrade'] = true
end
# Install official puppetlabs release repository configuration on host.
#
# @param [Host] host An object implementing {Beaker::Hosts}'s
# interface.
#
# @note This method only works on redhat-like and debian-like hosts.
#
def install_puppetlabs_release_repo ( host )
variant, version, arch, codename = host['platform'].to_array
case variant
when /^(fedora|el|centos)$/
variant = (($1 == 'centos') ? 'el' : $1)
rpm = options[:release_yum_repo_url] +
"/puppetlabs-release-%s-%s.noarch.rpm" % [variant, version]
on host, "rpm -ivh #{rpm}"
when /^(debian|ubuntu|cumulus)$/
deb = URI.join(options[:release_apt_repo_url], "puppetlabs-release-%s.deb" % codename)
on host, "wget -O /tmp/puppet.deb #{deb}"
on host, "dpkg -i --force-all /tmp/puppet.deb"
on host, "apt-get update"
else
raise "No repository installation step for #{variant} yet..."
end
end
# Install development repository on the given host. This method pushes all
# repository information including package files for the specified
# package_name to the host and modifies the repository configuration file
# to point at the new repository. This is particularly useful for
# installing development packages on hosts that can't access the builds
# server.
#
# @param [Host] host An object implementing {Beaker::Hosts}'s
# interface.
# @param [String] package_name The name of the package whose repository is
# being installed.
# @param [String] build_version A string identifying the output of a
# packaging job for use in looking up
# repository directory information
# @param [String] repo_configs_dir A local directory where repository files will be
# stored as an intermediate step before
# pushing them to the given host.
#
# @note This method only works on redhat-like and debian-like hosts.
#
def install_puppetlabs_dev_repo ( host, package_name, build_version,
repo_configs_dir = 'tmp/repo_configs' )
variant, version, arch, codename = host['platform'].to_array
platform_configs_dir = File.join(repo_configs_dir, variant)
# some of the uses of dev_builds_url below can't include protocol info,
# pluse this opens up possibility of switching the behavior on provided
# url type
_, protocol, hostname = options[:dev_builds_url].partition /.*:\/\//
dev_builds_url = protocol + hostname
on host, "mkdir -p /root/#{package_name}"
case variant
when /^(fedora|el|centos)$/
variant = (($1 == 'centos') ? 'el' : $1)
fedora_prefix = ((variant == 'fedora') ? 'f' : '')
if host.is_pe?
pattern = "pl-%s-%s-repos-pe-%s-%s%s-%s.repo"
else
pattern = "pl-%s-%s-%s-%s%s-%s.repo"
end
repo_filename = pattern % [
package_name,
build_version,
variant,
fedora_prefix,
version,
arch
]
repo = fetch_http_file( "%s/%s/%s/repo_configs/rpm/" %
[ dev_builds_url, package_name, build_version ],
repo_filename,
platform_configs_dir)
link = "%s/%s/%s/repos/%s/%s%s/products/%s/" %
[ dev_builds_url, package_name, build_version, variant,
fedora_prefix, version, arch ]
if not link_exists?( link )
link = "%s/%s/%s/repos/%s/%s%s/devel/%s/" %
[ dev_builds_url, package_name, build_version, variant,
fedora_prefix, version, arch ]
end
if not link_exists?( link )
raise "Unable to reach a repo directory at #{link}"
end
repo_dir = fetch_http_dir( link, platform_configs_dir )
config_dir = '/etc/yum.repos.d/'
scp_to host, repo, config_dir
scp_to host, repo_dir, "/root/#{package_name}"
search = "baseurl\\s*=\\s*http:\\/\\/#{hostname}.*$"
replace = "baseurl=file:\\/\\/\\/root\\/#{package_name}\\/#{arch}"
sed_command = "sed -i 's/#{search}/#{replace}/'"
find_and_sed = "find #{config_dir} -name \"*.repo\" -exec #{sed_command} {} \\;"
on host, find_and_sed
when /^(debian|ubuntu|cumulus)$/
list = fetch_http_file( "%s/%s/%s/repo_configs/deb/" %
[ dev_builds_url, package_name, build_version ],
"pl-%s-%s-%s.list" %
[ package_name, build_version, codename ],
platform_configs_dir )
repo_dir = fetch_http_dir( "%s/%s/%s/repos/apt/%s" %
[ dev_builds_url, package_name,
build_version, codename ],
platform_configs_dir )
config_dir = '/etc/apt/sources.list.d'
scp_to host, list, config_dir
scp_to host, repo_dir, "/root/#{package_name}"
search = "deb\\s\\+http:\\/\\/#{hostname}.*$"
replace = "deb file:\\/\\/\\/root\\/#{package_name}\\/#{codename} #{codename} main"
sed_command = "sed -i 's/#{search}/#{replace}/'"
find_and_sed = "find #{config_dir} -name \"*.list\" -exec #{sed_command} {} \\;"
on host, find_and_sed
on host, "apt-get update"
else
raise "No repository installation step for #{variant} yet..."
end
end
# Installs packages from the local development repository on the given host
#
# @param [Host] host An object implementing {Beaker::Hosts}'s
# interface.
# @param [Regexp] package_name The name of the package whose repository is
# being installed.
#
# @note This method only works on redhat-like and debian-like hosts.
# @note This method is paired to be run directly after {#install_puppetlabs_dev_repo}
#
def install_packages_from_local_dev_repo( host, package_name )
if host['platform'] =~ /debian|ubuntu|cumulus/
find_filename = '*.deb'
find_command = 'dpkg -i'
elsif host['platform'] =~ /fedora|el|centos/
find_filename = '*.rpm'
find_command = 'rpm -ivh'
else
raise "No repository installation step for #{host['platform']} yet..."
end
find_command = "find /root/#{package_name} -type f -name '#{find_filename}' -exec #{find_command} {} \\;"
on host, find_command
end
# Install development repo of the puppet-agent on the given host
#
# @param [Host] host An object implementing {Beaker::Hosts}'s interface
# @param [Hash{Symbol=>String}] opts An options hash
# @option opts [String] :version The version of puppet-agent to install
# @option opts [String] :copy_base_local Directory where puppet-agent artifact
# will be stored locally
# (default: 'tmp/repo_configs')
# @option opts [String] :copy_dir_external Directory where puppet-agent
# artifact will be pushed to on the external machine
# (default: '/root')
# @return nil
def install_puppetagent_dev_repo( host, opts )
opts[:copy_base_local] ||= File.join('tmp', 'repo_configs')
opts[:copy_dir_external] ||= File.join('/', 'root')
variant, version, arch, codename = host['platform'].to_array
release_path = "#{options[:dev_builds_url]}/puppet-agent/#{opts[:version]}/artifacts/"
copy_dir_local = File.join(opts[:copy_base_local], variant)
onhost_copy_base = opts[:copy_dir_external]
case variant
when /^(fedora|el|centos)$/
release_path << "el/#{version}/products/#{arch}"
release_file = "puppet-agent-#{opts[:version]}-1.#{arch}.rpm"
when /^(debian|ubuntu|cumulus)$/
release_path << "deb/#{codename}"
release_file = "puppet-agent_#{opts[:version]}-1_#{arch}.deb"
else
raise "No repository installation step for #{variant} yet..."
end
onhost_copied_file = File.join(onhost_copy_base, release_file)
fetch_http_file( release_path, release_file, copy_dir_local)
scp_to host, File.join(copy_dir_local, release_file), onhost_copy_base
case variant
when /^(fedora|el|centos)$/
on host, "rpm -ivh #{onhost_copied_file}"
when /^(debian|ubuntu|cumulus)$/
on host, "dpkg -i --force-all #{onhost_copied_file}"
on host, "apt-get update"
end
end
#Install Higgs up till the point where you need to continue installation in a web browser, defaults to execution
#on the master node.
#@param [Host] higgs_host The host to install Higgs on (supported on linux platform only)
# @example
# install_higgs
#
# @note Either pe_ver and pe_dir should be set in the ENV or each host should have pe_ver and pe_dir set individually.
# Install file names are assumed to be of the format puppet-enterprise-VERSION-PLATFORM.(tar)|(tar.gz).
#
# @api dsl
def install_higgs( higgs_host = master )
#process the version files if necessary
master['pe_dir'] ||= options[:pe_dir]
master['pe_ver'] = master['pe_ver'] || options['pe_ver'] ||
Beaker::Options::PEVersionScraper.load_pe_version(master[:pe_dir] || options[:pe_dir], options[:pe_version_file])
if higgs_host['platform'] =~ /osx|windows/
raise "Attempting higgs installation on host #{higgs_host.name} with unsupported platform #{higgs_host['platform']}"
end
#send in the global options hash
do_higgs_install higgs_host, options
end
# Install the desired module on all hosts using either the PMT or a
# staging forge
#
# @see install_dev_puppet_module
def install_dev_puppet_module_on( host, opts )
if options[:forge_host]
with_forge_stubbed_on( host ) do
install_puppet_module_via_pmt_on( host, opts )
end
else
copy_module_to( host, opts )
end
end
alias :puppet_module_install_on :install_dev_puppet_module_on
# Install the desired module on all hosts using either the PMT or a
# staging forge
#
# Passes options through to either `install_puppet_module_via_pmt_on`
# or `copy_module_to`
#
# @param opts [Hash]
#
# @example Installing a module from the local directory
# install_dev_puppet_module( :source => './', :module_name => 'concat' )
#
# @example Installing a module from a staging forge
# options[:forge_host] = 'my-forge-api.example.com'
# install_dev_puppet_module( :source => './', :module_name => 'concat' )
#
# @see install_puppet_module_via_pmt
# @see copy_module_to
def install_dev_puppet_module( opts )
block_on( hosts ) {|h| install_dev_puppet_module_on( h, opts ) }
end
alias :puppet_module_install :install_dev_puppet_module
# Install the desired module with the PMT on a given host
#
# @param opts [Hash]
# @option opts [String] :module_name The short name of the module to be installed
# @option opts [String] :version The version of the module to be installed
def install_puppet_module_via_pmt_on( host, opts = {} )
block_on host do |h|
version_info = opts[:version] ? "-v #{opts[:version]}" : ""
if opts[:source]
author_name, module_name = parse_for_modulename( opts[:source] )
modname = "#{author_name}-#{module_name}"
else
modname = opts[:module_name]
end
puppet_opts = {}
if host[:default_module_install_opts].respond_to? :merge
puppet_opts = host[:default_module_install_opts].merge( puppet_opts )
end
on h, puppet("module install #{modname} #{version_info}", puppet_opts)
end
end
# Install the desired module with the PMT on all known hosts
# @see #install_puppet_module_via_pmt_on
def install_puppet_module_via_pmt( opts = {} )
install_puppet_module_via_pmt_on(hosts, opts)
end
# Install local module for acceptance testing
# should be used as a presuite to ensure local module is copied to the hosts you want, particularly masters
# @api dsl
# @param [Host, Array<Host>, String, Symbol] one_or_more_hosts
# One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @option opts [String] :source ('./')
# The current directory where the module sits, otherwise will try
# and walk the tree to figure out
# @option opts [String] :module_name (nil)
# Name which the module should be installed under, please do not include author,
# if none is provided it will attempt to parse the metadata.json and then the Modulefile to determine
# the name of the module
# @option opts [String] :target_module_path (host['distmoduledir']/modules)
# Location where the module should be installed, will default
# to host['distmoduledir']/modules
# @option opts [Array] :ignore_list
# @raise [ArgumentError] if not host is provided or module_name is not provided and can not be found in Modulefile
#
def copy_module_to(one_or_more_hosts, opts = {})
block_on one_or_more_hosts do |host|
opts = {:source => './',
:target_module_path => host['distmoduledir'],
:ignore_list => PUPPET_MODULE_INSTALL_IGNORE}.merge(opts)
ignore_list = build_ignore_list(opts)
target_module_dir = on( host, "echo #{opts[:target_module_path]}" ).stdout.chomp
source = File.expand_path( opts[:source] )
if opts.has_key?(:module_name)
module_name = opts[:module_name]
else
_, module_name = parse_for_modulename( source )
end
scp_to host, source, File.join(target_module_dir, module_name), {:ignore => ignore_list}
end
end
alias :copy_root_module_to :copy_module_to
# Install a package on a host
#
# @param [Host] host A host object
# @param [String] package_name Name of the package to install
#
# @return [Result] An object representing the outcome of *install command*.
def install_package host, package_name, package_version = nil
host.install_package package_name, '', package_version
end
# Check to see if a package is installed on a remote host
#
# @param [Host] host A host object
# @param [String] package_name Name of the package to check for.
#
# @return [Boolean] true/false if the package is found
def check_for_package host, package_name
host.check_for_package package_name
end
# Upgrade a package on a host. The package must already be installed
#
# @param [Host] host A host object
# @param [String] package_name Name of the package to install
#
# @return [Result] An object representing the outcome of *upgrade command*.
def upgrade_package host, package_name
host.upgrade_package package_name
end
#Recursive method for finding the module root
# Assumes that a Modulefile exists
# @param [String] possible_module_directory
# will look for Modulefile and if none found go up one level and try again until root is reached
#
# @return [String,nil]
def parse_for_moduleroot(possible_module_directory)
if File.exists?("#{possible_module_directory}/Modulefile") || File.exists?("#{possible_module_directory}/metadata.json")
possible_module_directory
elsif possible_module_directory === '/'
logger.error "At root, can't parse for another directory"
nil
else
logger.debug "No Modulefile or metadata.json found at #{possible_module_directory}, moving up"
parse_for_moduleroot File.expand_path(File.join(possible_module_directory,'..'))
end
end
#Parse root directory of a module for module name
# Searches for metadata.json and then if none found, Modulefile and parses for the Name attribute
# @param [String] root_module_dir
# @return [String] module name
def parse_for_modulename(root_module_dir)
author_name, module_name = nil, nil
if File.exists?("#{root_module_dir}/metadata.json")
logger.debug "Attempting to parse Modulename from metadata.json"
module_json = JSON.parse(File.read "#{root_module_dir}/metadata.json")
if(module_json.has_key?('name'))
author_name, module_name = get_module_name(module_json['name'])
end
end
if !module_name && File.exists?("#{root_module_dir}/Modulefile")
logger.debug "Attempting to parse Modulename from Modulefile"
if /^name\s+'?(\w+-\w+)'?\s*$/i.match(File.read("#{root_module_dir}/Modulefile"))
author_name, module_name = get_module_name(Regexp.last_match[1])
end
end
if !module_name && !author_name
logger.debug "Unable to determine name, returning null"
end
return author_name, module_name
end
#Parse modulename from the pattern 'Auther-ModuleName'
#
# @param [String] author_module_name <Author>-<ModuleName> pattern
#
# @return [String,nil]
#
def get_module_name(author_module_name)
split_name = split_author_modulename(author_module_name)
if split_name
return split_name[:author], split_name[:module]
end
end
#Split the Author-Name into a hash
# @param [String] author_module_attr
#
# @return [Hash<Symbol,String>,nil] :author and :module symbols will be returned
#
def split_author_modulename(author_module_attr)
result = /(\w+)-(\w+)/.match(author_module_attr)
if result
{:author => result[1], :module => result[2]}
else
nil
end
end
# Build an array list of files/directories to ignore when pushing to remote host
# Automatically adds '..' and '.' to array. If not opts of :ignore list is provided
# it will use the static variable PUPPET_MODULE_INSTALL_IGNORE
#
# @param opts [Hash]
# @option opts [Array] :ignore_list A list of files/directories to ignore
def build_ignore_list(opts = {})
ignore_list = opts[:ignore_list] || PUPPET_MODULE_INSTALL_IGNORE
if !ignore_list.kind_of?(Array) || ignore_list.nil?
raise ArgumentError "Ignore list must be an Array"
end
ignore_list << '.' unless ignore_list.include? '.'
ignore_list << '..' unless ignore_list.include? '..'
ignore_list
end
end
end
end
| 1 | 8,833 | Is this definition needed since it doesn't look like it's being used anywhere? | voxpupuli-beaker | rb |
@@ -35,6 +35,11 @@ module Mongoid
class << self
delegate :discriminator_key, to: ::Mongoid
end
+ end
+
+ unless fields.has_key?(self.discriminator_key) || descendants.length == 0
+ default_proc = lambda { self.class.name }
+ field(self.discriminator_key, default: default_proc, type: String)
end
end
end | 1 | # frozen_string_literal: true
# encoding: utf-8
module Mongoid
# Provides behavior around traversing the document graph.
#
# @since 4.0.0
module Traversable
extend ActiveSupport::Concern
def _parent
@__parent ||= nil
end
def _parent=(p)
@__parent = p
end
# Module used for prepending to the discriminator_key= function
#
# @api private
module DiscriminatorKeyAssignment
def discriminator_key=(value)
if hereditary?
raise Errors::InvalidDiscriminatorKeyTarget.new(self, self.superclass)
end
if value
super
else
# When discriminator key is set to nil, replace the class's definition
# of the discriminator key reader (provided by class_attribute earlier)
# and re-delegate to Mongoid.
class << self
delegate :discriminator_key, to: ::Mongoid
end
end
end
end
included do
class_attribute :discriminator_key, instance_accessor: false
class << self
delegate :discriminator_key, to: ::Mongoid
prepend DiscriminatorKeyAssignment
end
end
# Get all child +Documents+ to this +Document+, going n levels deep if
# necessary. This is used when calling update persistence operations from
# the root document, where changes in the entire tree need to be
# determined. Note that persistence from the embedded documents will
# always be preferred, since they are optimized calls... This operation
# can get expensive in domains with large hierarchies.
#
# @example Get all the document's children.
# person._children
#
# @return [ Array<Document> ] All child documents in the hierarchy.
def _children
@__children ||= collect_children
end
# Collect all the children of this document.
#
# @example Collect all the children.
# document.collect_children
#
# @return [ Array<Document> ] The children.
#
# @since 2.4.0
def collect_children
children = []
embedded_relations.each_pair do |name, association|
without_autobuild do
child = send(name)
Array.wrap(child).each do |doc|
children.push(doc)
children.concat(doc._children)
end if child
end
end
children
end
# Marks all children as being persisted.
#
# @example Flag all the children.
# document.flag_children_persisted
#
# @return [ Array<Document> ] The flagged children.
#
# @since 3.0.7
def flag_children_persisted
_children.each do |child|
child.new_record = false
end
end
# Determines if the document is a subclass of another document.
#
# @example Check if the document is a subclass
# Square.new.hereditary?
#
# @return [ true, false ] True if hereditary, false if not.
def hereditary?
self.class.hereditary?
end
# Sets up a child/parent association. This is used for newly created
# objects so they can be properly added to the graph.
#
# @example Set the parent document.
# document.parentize(parent)
#
# @param [ Document ] document The parent document.
#
# @return [ Document ] The parent document.
def parentize(document)
self._parent = document
end
# Remove a child document from this parent. If an embeds one then set to
# nil, otherwise remove from the embeds many.
#
# This is called from the +RemoveEmbedded+ persistence command.
#
# @example Remove the child.
# document.remove_child(child)
#
# @param [ Document ] child The child (embedded) document to remove.
#
# @since 2.0.0.beta.1
def remove_child(child)
name = child.association_name
if child.embedded_one?
remove_ivar(name)
else
relation = send(name)
relation.send(:delete_one, child)
end
end
# After children are persisted we can call this to move all their changes
# and flag them as persisted in one call.
#
# @example Reset the children.
# document.reset_persisted_children
#
# @return [ Array<Document> ] The children.
#
# @since 2.1.0
def reset_persisted_children
_children.each do |child|
child.move_changes
child.new_record = false
end
_reset_memoized_children!
end
# Resets the memoized children on the object. Called internally when an
# embedded array changes size.
#
# @api semiprivate
#
# @example Reset the memoized children.
# document._reset_memoized_children!
#
# @return [ nil ] nil.
#
# @since 5.0.0
def _reset_memoized_children!
_parent._reset_memoized_children! if _parent
@__children = nil
end
# Return the root document in the object graph. If the current document
# is the root object in the graph it will return self.
#
# @example Get the root document in the hierarchy.
# document._root
#
# @return [ Document ] The root document in the hierarchy.
def _root
object = self
while (object._parent) do object = object._parent; end
object
end
# Is this document the root document of the hierarchy?
#
# @example Is the document the root?
# document._root?
#
# @return [ true, false ] If the document is the root.
#
# @since 3.1.0
def _root?
_parent ? false : true
end
module ClassMethods
# Determines if the document is a subclass of another document.
#
# @example Check if the document is a subclass.
# Square.hereditary?
#
# @return [ true, false ] True if hereditary, false if not.
def hereditary?
!!(Mongoid::Document > superclass)
end
# When inheriting, we want to copy the fields from the parent class and
# set the on the child to start, mimicking the behavior of the old
# class_inheritable_accessor that was deprecated in Rails edge.
#
# @example Inherit from this class.
# Person.inherited(Doctor)
#
# @param [ Class ] subclass The inheriting class.
#
# @since 2.0.0.rc.6
def inherited(subclass)
super
@_type = nil
subclass.aliased_fields = aliased_fields.dup
subclass.localized_fields = localized_fields.dup
subclass.fields = fields.dup
subclass.pre_processed_defaults = pre_processed_defaults.dup
subclass.post_processed_defaults = post_processed_defaults.dup
subclass._declared_scopes = Hash.new { |hash,key| self._declared_scopes[key] }
# We only need the _type field if inheritance is in play, but need to
# add to the root class as well for backwards compatibility.
unless fields.has_key?("_type")
default_proc = lambda { self.class.name }
field(:_type, default: default_proc, type: String)
end
end
end
end
end
| 1 | 12,656 | Can this condition be reworded using `if` please? | mongodb-mongoid | rb |
@@ -135,13 +135,15 @@ Status FetchEdgesExecutor::setupEdgeKeysFromRef() {
const InterimResult *inputs;
if (sentence_->ref()->isInputExpr()) {
inputs = inputs_.get();
- if (inputs == nullptr) {
+ if (inputs == nullptr
+ || (inputs != nullptr && !inputs->hasData())) {
// we have empty imputs from pipe.
return Status::OK();
}
} else {
inputs = ectx()->variableHolder()->get(varname_);
- if (inputs == nullptr) {
+ if (inputs == nullptr
+ || (inputs != nullptr && !inputs->hasData())) {
return Status::Error("Variable `%s' not defined", varname_.c_str());
}
} | 1 | /* Copyright (c) 2019 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "graph/FetchEdgesExecutor.h"
namespace nebula {
namespace graph {
FetchEdgesExecutor::FetchEdgesExecutor(Sentence *sentence, ExecutionContext *ectx)
: FetchExecutor(ectx) {
sentence_ = static_cast<FetchEdgesSentence*>(sentence);
}
Status FetchEdgesExecutor::prepare() {
return Status::OK();
}
Status FetchEdgesExecutor::prepareClauses() {
DCHECK_NOTNULL(sentence_);
Status status = Status::OK();
do {
status = checkIfGraphSpaceChosen();
if (!status.ok()) {
break;
}
expCtx_ = std::make_unique<ExpressionContext>();
spaceId_ = ectx()->rctx()->session()->space();
yieldClause_ = sentence_->yieldClause();
labelName_ = sentence_->edge();
auto result = ectx()->schemaManager()->toEdgeType(spaceId_, *labelName_);
if (!result.ok()) {
status = result.status();
break;
}
edgeType_ = result.value();
labelSchema_ = ectx()->schemaManager()->getEdgeSchema(spaceId_, edgeType_);
if (labelSchema_ == nullptr) {
LOG(ERROR) << *labelName_ << " edge schema not exist.";
status = Status::Error("%s edge schema not exist.", labelName_->c_str());
break;
}
status = prepareEdgeKeys();
if (!status.ok()) {
break;
}
status = prepareYield();
if (!status.ok()) {
break;
}
// Save the type
auto iter = colTypes_.begin();
for (auto i = 0u; i < colNames_.size(); i++) {
auto type = labelSchema_->getFieldType(colNames_[i]);
if (type == CommonConstants::kInvalidValueType()) {
iter++;
continue;
}
*iter = type.type;
iter++;
}
} while (false);
return status;
}
Status FetchEdgesExecutor::prepareEdgeKeys() {
Status status = Status::OK();
if (sentence_->isRef()) {
auto *edgeKeyRef = sentence_->ref();
srcid_ = edgeKeyRef->srcid();
DCHECK_NOTNULL(srcid_);
dstid_ = edgeKeyRef->dstid();
DCHECK_NOTNULL(dstid_);
rank_ = edgeKeyRef->rank();
auto ret = edgeKeyRef->varname();
if (!ret.ok()) {
status = std::move(ret).status();
}
varname_ = ret.value();
}
return status;
}
void FetchEdgesExecutor::execute() {
FLOG_INFO("Executing FetchEdges: %s", sentence_->toString().c_str());
auto status = prepareClauses();
if (!status.ok()) {
DCHECK(onError_);
onError_(std::move(status));
return;
}
status = setupEdgeKeys();
if (!status.ok()) {
DCHECK(onError_);
onError_(std::move(status));
return;
}
if (edgeKeys_.empty()) {
onEmptyInputs();
return;
}
fetchEdges();
}
Status FetchEdgesExecutor::setupEdgeKeys() {
Status status = Status::OK();
hash_ = [] (const storage::cpp2::EdgeKey &key) -> size_t {
return std::hash<VertexID>()(key.src)
^ std::hash<VertexID>()(key.dst)
^ std::hash<EdgeRanking>()(key.ranking);
};
if (sentence_->isRef()) {
status = setupEdgeKeysFromRef();
} else {
status = setupEdgeKeysFromExpr();
}
VLOG(3) << "EdgeKey length: " << edgeKeys_.size();
return status;
}
Status FetchEdgesExecutor::setupEdgeKeysFromRef() {
const InterimResult *inputs;
if (sentence_->ref()->isInputExpr()) {
inputs = inputs_.get();
if (inputs == nullptr) {
// we have empty imputs from pipe.
return Status::OK();
}
} else {
inputs = ectx()->variableHolder()->get(varname_);
if (inputs == nullptr) {
return Status::Error("Variable `%s' not defined", varname_.c_str());
}
}
auto ret = inputs->getVIDs(*srcid_);
if (!ret.ok()) {
return ret.status();
}
auto srcVids = std::move(ret).value();
ret = inputs->getVIDs(*dstid_);
if (!ret.ok()) {
return ret.status();
}
auto dstVids = std::move(ret).value();
std::vector<EdgeRanking> ranks;
if (rank_ != nullptr) {
ret = inputs->getVIDs(*rank_);
if (!ret.ok()) {
return ret.status();
}
ranks = std::move(ret).value();
}
std::unique_ptr<EdgeKeyHashSet> uniq;
if (distinct_) {
uniq = std::make_unique<EdgeKeyHashSet>(256, hash_);
}
for (decltype(srcVids.size()) index = 0u; index < srcVids.size(); ++index) {
storage::cpp2::EdgeKey key;
key.set_src(srcVids[index]);
key.set_edge_type(edgeType_);
key.set_dst(dstVids[index]);
key.set_ranking(rank_ == nullptr ? 0 : ranks[index]);
if (distinct_) {
auto result = uniq->emplace(key);
if (result.second) {
edgeKeys_.emplace_back(std::move(key));
}
} else {
edgeKeys_.emplace_back(std::move(key));
}
}
return Status::OK();
}
Status FetchEdgesExecutor::setupEdgeKeysFromExpr() {
Status status = Status::OK();
std::unique_ptr<EdgeKeyHashSet> uniq;
if (distinct_) {
uniq = std::make_unique<EdgeKeyHashSet>(256, hash_);
}
auto edgeKeyExprs = sentence_->keys()->keys();
for (auto *keyExpr : edgeKeyExprs) {
auto *srcExpr = keyExpr->srcid();
auto *dstExpr = keyExpr->dstid();
auto rank = keyExpr->rank();
status = srcExpr->prepare();
if (!status.ok()) {
break;
}
status = dstExpr->prepare();
if (!status.ok()) {
break;
}
auto value = srcExpr->eval();
if (!value.ok()) {
return value.status();
}
auto srcid = value.value();
value = dstExpr->eval();
if (!value.ok()) {
return value.status();
}
auto dstid = value.value();
if (!Expression::isInt(srcid) || !Expression::isInt(dstid)) {
status = Status::Error("ID should be of type integer.");
break;
}
storage::cpp2::EdgeKey key;
key.set_src(Expression::asInt(srcid));
key.set_edge_type(edgeType_);
key.set_dst(Expression::asInt(dstid));
key.set_ranking(rank);
if (distinct_) {
auto ret = uniq->emplace(key);
if (ret.second) {
edgeKeys_.emplace_back(std::move(key));
}
} else {
edgeKeys_.emplace_back(std::move(key));
}
}
return status;
}
void FetchEdgesExecutor::fetchEdges() {
std::vector<storage::cpp2::PropDef> props;
auto status = getPropNames(props);
if (!status.ok()) {
DCHECK(onError_);
onError_(status);
return;
}
if (props.empty()) {
DCHECK(onError_);
onError_(Status::Error("No props declared."));
return;
}
auto future = ectx()->storage()->getEdgeProps(spaceId_, edgeKeys_, std::move(props));
auto *runner = ectx()->rctx()->runner();
auto cb = [this] (RpcResponse &&result) mutable {
auto completeness = result.completeness();
if (completeness == 0) {
DCHECK(onError_);
onError_(Status::Error("Get props failed"));
return;
} else if (completeness != 100) {
LOG(INFO) << "Get edges partially failed: " << completeness << "%";
for (auto &error : result.failedParts()) {
LOG(ERROR) << "part: " << error.first
<< "error code: " << static_cast<int>(error.second);
}
}
processResult(std::move(result));
return;
};
auto error = [this] (auto &&e) {
LOG(ERROR) << "Exception caught: " << e.what();
onError_(Status::Error("Internal error"));
};
std::move(future).via(runner).thenValue(cb).thenError(error);
}
Status FetchEdgesExecutor::getPropNames(std::vector<storage::cpp2::PropDef> &props) {
for (auto &prop : expCtx_->aliasProps()) {
storage::cpp2::PropDef pd;
pd.owner = storage::cpp2::PropOwner::EDGE;
pd.name = prop.second;
auto status = ectx()->schemaManager()->toEdgeType(spaceId_, prop.first);
if (!status.ok()) {
return Status::Error("No schema found for '%s'", prop.first.c_str());
}
auto edgeType = status.value();
pd.id.set_edge_type(edgeType);
props.emplace_back(std::move(pd));
}
return Status::OK();
}
void FetchEdgesExecutor::processResult(RpcResponse &&result) {
auto all = result.responses();
std::shared_ptr<SchemaWriter> outputSchema;
std::unique_ptr<RowSetWriter> rsWriter;
auto uniqResult = std::make_unique<std::unordered_set<std::string>>();
for (auto &resp : all) {
if (!resp.__isset.schema || !resp.__isset.data
|| resp.get_schema() == nullptr || resp.get_data() == nullptr
|| resp.data.empty()) {
continue;
}
auto eschema = std::make_shared<ResultSchemaProvider>(*(resp.get_schema()));
RowSetReader rsReader(eschema, *(resp.get_data()));
auto iter = rsReader.begin();
if (outputSchema == nullptr) {
outputSchema = std::make_shared<SchemaWriter>();
auto status = getOutputSchema(eschema.get(), &*iter, outputSchema.get());
if (!status.ok()) {
LOG(ERROR) << "Get getOutputSchema failed" << status;
DCHECK(onError_);
onError_(std::move(status));
return;
}
rsWriter = std::make_unique<RowSetWriter>(outputSchema);
}
while (iter) {
VLOG(3) << "collect.";
auto collector = std::make_unique<Collector>(eschema.get());
auto writer = std::make_unique<RowWriter>(outputSchema);
auto &getters = expCtx_->getters();
getters.getAliasProp = [&](const std::string &,
const std::string &prop) -> OptVariantType {
return collector->getProp(prop, &*iter);
};
for (auto *column : yields_) {
auto *expr = column->expr();
auto value = expr->eval();
if (!value.ok()) {
onError_(value.status());
return;
}
collector->collect(value.value(), writer.get());
}
// TODO Consider float/double, and need to reduce mem copy.
std::string encode = writer->encode();
if (distinct_) {
auto ret = uniqResult->emplace(encode);
if (ret.second) {
rsWriter->addRow(std::move(encode));
}
} else {
rsWriter->addRow(std::move(encode));
}
++iter;
} // while `iter'
} // for `resp'
finishExecution(std::move(rsWriter));
}
} // namespace graph
} // namespace nebula
| 1 | 21,447 | after ||, no need inputs != nullptr | vesoft-inc-nebula | cpp |
@@ -159,7 +159,14 @@ func (s *Source) Owner() (string, error) {
// is deloying to and the containerized applications that will be deployed.
type PipelineStage struct {
*AssociatedEnvironment
- LocalApplications []string
+ LocalApplications []AppInStage
+}
+
+// AppInStage represents configurations for an app in a pipeline stage
+type AppInStage struct {
+ Name string
+ // the path to the integration test buildspec for this action
+ IntegTestBuildspecPath string
}
// AppTemplatePath returns the full path to the application CFN template | 1 | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package deploy holds the structures to deploy infrastructure resources.
// This file defines pipeline deployment resources.
package deploy
import (
"errors"
"fmt"
"regexp"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/archer"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/manifest"
)
// NOTE: this is duplicated from validate.go
var githubRepoExp = regexp.MustCompile(`(https:\/\/github\.com\/|)(?P<owner>.+)\/(?P<repo>.+)`)
const (
fmtInvalidGitHubRepo = "unable to locate the repository from the properties: %+v"
)
// CreatePipelineInput represents the fields required to deploy a pipeline.
type CreatePipelineInput struct {
// Name of the project this pipeline belongs to
ProjectName string
// Name of the pipeline
Name string
// The source code provider for this pipeline
Source *Source
// The stages of the pipeline. The order of stages in this list
// will be the order we deploy to
Stages []PipelineStage
// A list of artifact buckets and corresponding KMS keys that will
// be used in this pipeline.
ArtifactBuckets []ArtifactBucket
}
// ArtifactBucket represents an S3 bucket used by the CodePipeline to store
// intermediate artifacts produced by the pipeline.
type ArtifactBucket struct {
// The name of the S3 bucket.
BucketName string
// The ARN of the KMS key used to en/decrypt artifacts stored in this bucket.
KeyArn string
}
// Region parses out the region from the ARN of the KMS key associated with
// the artifact bucket.
func (a *ArtifactBucket) Region() (string, error) {
// We assume the bucket and the key are in the same AWS region.
parsedArn, err := arn.Parse(a.KeyArn)
if err != nil {
return "", fmt.Errorf("failed to parse region out of key ARN: %s, error: %w",
a.BucketName, err)
}
return parsedArn.Region, nil
}
// Source defines the source of the artifacts to be built and deployed.
type Source struct {
// The name of the source code provider. For example, "GitHub"
ProviderName string
// Contains provider-specific configurations, such as:
// "repository": "aws/amazon-ecs-cli-v2"
// "githubPersonalAccessTokenSecretId": "heyyo"
Properties map[string]interface{}
}
// GitHubPersonalAccessTokenSecretID returns the ID of the secret in the
// Secrets manager, which stores the GitHub Personal Access token if the
// provider is "GitHub". Otherwise, it returns an error.
func (s *Source) GitHubPersonalAccessTokenSecretID() (string, error) {
// TODO type check if properties are GitHubProperties?
secretID, exists := s.Properties[manifest.GithubSecretIdKeyName]
if !exists {
return "", errors.New("the GitHub token secretID is not configured")
}
id, ok := secretID.(string)
if !ok {
return "", fmt.Errorf("unable to locate the GitHub token secretID from %v", secretID)
}
if s.ProviderName != manifest.GithubProviderName {
return "", fmt.Errorf("failed attempt to retrieve GitHub token from a non-GitHub provider")
}
return id, nil
}
type ownerAndRepo struct {
owner string
repo string
}
func (s *Source) parseOwnerAndRepo() (*ownerAndRepo, error) {
if s.ProviderName != manifest.GithubProviderName {
return nil, fmt.Errorf("invalid provider: %s", s.ProviderName)
}
ownerAndRepoI, exists := s.Properties["repository"]
if !exists {
return nil, fmt.Errorf("unable to locate the repository from the properties: %+v", s.Properties)
}
ownerAndRepoStr, ok := ownerAndRepoI.(string)
if !ok {
return nil, fmt.Errorf(fmtInvalidGitHubRepo, ownerAndRepoI)
}
match := githubRepoExp.FindStringSubmatch(ownerAndRepoStr)
if len(match) == 0 {
return nil, fmt.Errorf(fmtInvalidGitHubRepo, ownerAndRepoStr)
}
matches := make(map[string]string)
for i, name := range githubRepoExp.SubexpNames() {
if i != 0 && name != "" {
matches[name] = match[i]
}
}
return &ownerAndRepo{
owner: matches["owner"],
repo: matches["repo"],
}, nil
}
// Repository returns the repository portion. For example,
// given "aws/amazon-ecs-cli-v2", this function returns "amazon-ecs-cli-v2"
func (s *Source) Repository() (string, error) {
oAndR, err := s.parseOwnerAndRepo()
if err != nil {
return "", err
}
return oAndR.repo, nil
}
// Owner returns the repository owner portion. For example,
// given "aws/amazon-ecs-cli-v2", this function returns "aws"
func (s *Source) Owner() (string, error) {
oAndR, err := s.parseOwnerAndRepo()
if err != nil {
return "", err
}
return oAndR.owner, nil
}
// PipelineStage represents configuration for each deployment stage
// of a workspace. A stage consists of the Archer Environment the pipeline
// is deloying to and the containerized applications that will be deployed.
type PipelineStage struct {
*AssociatedEnvironment
LocalApplications []string
}
// AppTemplatePath returns the full path to the application CFN template
// built during the build stage.
func (s *PipelineStage) AppTemplatePath(appName string) string {
return fmt.Sprintf(archer.AppCfnTemplateNameFormat, appName)
}
// AppTemplateConfigurationPath returns the full path to the application CFN
// template configuration file built during the build stage.
func (s *PipelineStage) AppTemplateConfigurationPath(appName string) string {
return fmt.Sprintf(archer.AppCfnTemplateConfigurationNameFormat,
appName, s.Name,
)
}
// AssociatedEnvironment defines the necessary information a pipline stage
// needs for an Archer Environment.
type AssociatedEnvironment struct {
// Name of the environment, must be unique within a project.
// This is also the name of the pipeline stage.
Name string
// The region this environment is stored in.
Region string
// AccountID of the account this environment is stored in.
AccountID string
// Whether or not this environment is a production environment.
Prod bool
}
| 1 | 11,389 | Is it just preference or on purpose that using slice of structs instead of slice of pointers? | aws-copilot-cli | go |
@@ -90,7 +90,7 @@ export function diff(parentDom, newVNode, oldVNode, context, isSvg, excessDomChi
c.state = c._nextState;
c._dirty = false;
c._vnode = newVNode;
- newVNode._dom = oldDom!=null ? oldDom!==oldVNode._dom ? oldDom : oldVNode._dom : null;
+ newVNode._dom = oldVNode._dom;
newVNode._children = oldVNode._children;
for (tmp = 0; tmp < newVNode._children.length; tmp++) {
if (newVNode._children[tmp]) newVNode._children[tmp]._parent = newVNode; | 1 | import { EMPTY_OBJ, EMPTY_ARR } from '../constants';
import { Component, enqueueRender } from '../component';
import { Fragment } from '../create-element';
import { diffChildren, toChildArray } from './children';
import { diffProps } from './props';
import { assign, removeNode } from '../util';
import options from '../options';
/**
* Diff two virtual nodes and apply proper changes to the DOM
* @param {import('../internal').PreactElement} parentDom The parent of the DOM element
* @param {import('../internal').VNode} newVNode The new virtual node
* @param {import('../internal').VNode} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this element is an SVG node
* @param {Array<import('../internal').PreactElement>} excessDomChildren
* @param {Array<import('../internal').Component>} mounts A list of newly
* mounted components
* @param {Element | Text} oldDom The current attached DOM
* element any new dom elements should be placed around. Likely `null` on first
* render (except when hydrating). Can be a sibling DOM element when diffing
* Fragments that have siblings. In most cases, it starts out as `oldChildren[0]._dom`.
* @param {boolean} isHydrating Whether or not we are in hydration
*/
export function diff(parentDom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, force, oldDom, isHydrating) {
let tmp, newType = newVNode.type;
// When passing through createElement it assigns the object
// constructor as undefined. This to prevent JSON-injection.
if (newVNode.constructor !== undefined) return null;
if (tmp = options._diff) tmp(newVNode);
try {
outer: if (typeof newType==='function') {
let c, isNew, oldProps, oldState, snapshot, clearProcessingException;
let newProps = newVNode.props;
// Necessary for createContext api. Setting this property will pass
// the context value as `this.context` just for this component.
tmp = newType.contextType;
let provider = tmp && context[tmp._id];
let cctx = tmp ? (provider ? provider.props.value : tmp._defaultValue) : context;
// Get component and set it to `c`
if (oldVNode._component) {
c = newVNode._component = oldVNode._component;
clearProcessingException = c._processingException = c._pendingError;
}
else {
// Instantiate the new component
if ('prototype' in newType && newType.prototype.render) {
newVNode._component = c = new newType(newProps, cctx); // eslint-disable-line new-cap
}
else {
newVNode._component = c = new Component(newProps, cctx);
c.constructor = newType;
c.render = doRender;
}
if (provider) provider.sub(c);
c.props = newProps;
if (!c.state) c.state = {};
c.context = cctx;
c._context = context;
isNew = c._dirty = true;
c._renderCallbacks = [];
}
// Invoke getDerivedStateFromProps
if (c._nextState==null) {
c._nextState = c.state;
}
if (newType.getDerivedStateFromProps!=null) {
assign(c._nextState==c.state ? (c._nextState = assign({}, c._nextState)) : c._nextState, newType.getDerivedStateFromProps(newProps, c._nextState));
}
// Invoke pre-render lifecycle methods
if (isNew) {
if (newType.getDerivedStateFromProps==null && c.componentWillMount!=null) c.componentWillMount();
if (c.componentDidMount!=null) mounts.push(c);
}
else {
if (newType.getDerivedStateFromProps==null && force==null && c.componentWillReceiveProps!=null) {
c.componentWillReceiveProps(newProps, cctx);
}
if (!force && c.shouldComponentUpdate!=null && c.shouldComponentUpdate(newProps, c._nextState, cctx)===false) {
c.props = newProps;
c.state = c._nextState;
c._dirty = false;
c._vnode = newVNode;
newVNode._dom = oldDom!=null ? oldDom!==oldVNode._dom ? oldDom : oldVNode._dom : null;
newVNode._children = oldVNode._children;
for (tmp = 0; tmp < newVNode._children.length; tmp++) {
if (newVNode._children[tmp]) newVNode._children[tmp]._parent = newVNode;
}
break outer;
}
if (c.componentWillUpdate!=null) {
c.componentWillUpdate(newProps, c._nextState, cctx);
}
}
oldProps = c.props;
oldState = c.state;
c.context = cctx;
c.props = newProps;
c.state = c._nextState;
if (tmp = options._render) tmp(newVNode);
c._dirty = false;
c._vnode = newVNode;
c._parentDom = parentDom;
tmp = c.render(c.props, c.state, c.context);
let isTopLevelFragment = tmp != null && tmp.type == Fragment && tmp.key == null;
newVNode._children = toChildArray(isTopLevelFragment ? tmp.props.children : tmp);
if (c.getChildContext!=null) {
context = assign(assign({}, context), c.getChildContext());
}
if (!isNew && c.getSnapshotBeforeUpdate!=null) {
snapshot = c.getSnapshotBeforeUpdate(oldProps, oldState);
}
diffChildren(parentDom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, oldDom, isHydrating);
c.base = newVNode._dom;
while (tmp=c._renderCallbacks.pop()) {
if (c._nextState) { c.state = c._nextState; }
tmp.call(c);
}
// Don't call componentDidUpdate on mount or when we bailed out via
// `shouldComponentUpdate`
if (!isNew && oldProps!=null && c.componentDidUpdate!=null) {
c.componentDidUpdate(oldProps, oldState, snapshot);
}
if (clearProcessingException) {
c._pendingError = c._processingException = null;
}
}
else {
newVNode._dom = diffElementNodes(oldVNode._dom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, isHydrating);
}
if (tmp = options.diffed) tmp(newVNode);
}
catch (e) {
options._catchError(e, newVNode, oldVNode);
}
return newVNode._dom;
}
export function commitRoot(mounts, root) {
let c;
while ((c = mounts.pop())) {
try {
c.componentDidMount();
}
catch (e) {
options._catchError(e, c._vnode);
}
}
if (options._commit) options._commit(root);
}
/**
* Diff two virtual nodes representing DOM element
* @param {import('../internal').PreactElement} dom The DOM element representing
* the virtual nodes being diffed
* @param {import('../internal').VNode} newVNode The new virtual node
* @param {import('../internal').VNode} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this DOM node is an SVG node
* @param {*} excessDomChildren
* @param {Array<import('../internal').Component>} mounts An array of newly
* mounted components
* @param {boolean} isHydrating Whether or not we are in hydration
* @returns {import('../internal').PreactElement}
*/
function diffElementNodes(dom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, isHydrating) {
let i;
let oldProps = oldVNode.props;
let newProps = newVNode.props;
// Tracks entering and exiting SVG namespace when descending through the tree.
isSvg = newVNode.type==='svg' || isSvg;
if (dom==null && excessDomChildren!=null) {
for (i=0; i<excessDomChildren.length; i++) {
const child = excessDomChildren[i];
if (child!=null && (newVNode.type===null ? child.nodeType===3 : child.localName===newVNode.type)) {
dom = child;
excessDomChildren[i] = null;
break;
}
}
}
if (dom==null) {
if (newVNode.type===null) {
return document.createTextNode(newProps);
}
dom = isSvg ? document.createElementNS('http://www.w3.org/2000/svg', newVNode.type) : document.createElement(newVNode.type);
// we created a new parent, so none of the previously attached children can be reused:
excessDomChildren = null;
}
if (newVNode.type===null) {
if (excessDomChildren!=null) excessDomChildren[excessDomChildren.indexOf(dom)] = null;
if (oldProps !== newProps) {
dom.data = newProps;
}
}
else if (newVNode!==oldVNode) {
if (excessDomChildren!=null) {
excessDomChildren = EMPTY_ARR.slice.call(dom.childNodes);
}
oldProps = oldVNode.props || EMPTY_OBJ;
let oldHtml = oldProps.dangerouslySetInnerHTML;
let newHtml = newProps.dangerouslySetInnerHTML;
// During hydration, props are not diffed at all (including dangerouslySetInnerHTML)
// @TODO we should warn in debug mode when props don't match here.
if (!isHydrating) {
if (newHtml || oldHtml) {
// Avoid re-applying the same '__html' if it did not changed between re-render
if (!newHtml || !oldHtml || newHtml.__html!=oldHtml.__html) {
dom.innerHTML = newHtml && newHtml.__html || '';
}
}
}
diffProps(dom, newProps, oldProps, isSvg, isHydrating);
newVNode._children = newVNode.props.children;
// If the new vnode didn't have dangerouslySetInnerHTML, diff its children
if (!newHtml) {
diffChildren(dom, newVNode, oldVNode, context, newVNode.type==='foreignObject' ? false : isSvg, excessDomChildren, mounts, EMPTY_OBJ, isHydrating);
}
// (as above, don't diff props during hydration)
if (!isHydrating) {
if (('value' in newProps) && newProps.value!==undefined && newProps.value !== dom.value) dom.value = newProps.value==null ? '' : newProps.value;
if (('checked' in newProps) && newProps.checked!==undefined && newProps.checked !== dom.checked) dom.checked = newProps.checked;
}
}
return dom;
}
/**
* Invoke or update a ref, depending on whether it is a function or object ref.
* @param {object|function} ref
* @param {any} value
* @param {import('../internal').VNode} vnode
*/
export function applyRef(ref, value, vnode) {
try {
if (typeof ref=='function') ref(value);
else ref.current = value;
}
catch (e) {
options._catchError(e, vnode);
}
}
/**
* Unmount a virtual node from the tree and apply DOM changes
* @param {import('../internal').VNode} vnode The virtual node to unmount
* @param {import('../internal').VNode} parentVNode The parent of the VNode that
* initiated the unmount
* @param {boolean} [skipRemove] Flag that indicates that a parent node of the
* current element is already detached from the DOM.
*/
export function unmount(vnode, parentVNode, skipRemove) {
let r;
if (options.unmount) options.unmount(vnode);
if (r = vnode.ref) {
applyRef(r, null, parentVNode);
}
let dom;
if (!skipRemove && typeof vnode.type !== 'function') {
skipRemove = (dom = vnode._dom)!=null;
}
vnode._dom = vnode._lastDomChild = null;
if ((r = vnode._component)!=null) {
if (r.componentWillUnmount) {
try {
r.componentWillUnmount();
}
catch (e) {
options._catchError(e, parentVNode);
}
}
r.base = r._parentDom = null;
}
if (r = vnode._children) {
for (let i = 0; i < r.length; i++) {
if (r[i]) unmount(r[i], parentVNode, skipRemove);
}
}
if (dom!=null) removeNode(dom);
}
/** The `.render()` method for a PFC backing instance. */
function doRender(props, state, context) {
return this.constructor(props, context);
}
/**
* Find the closest error boundary to a thrown error and call it
* @param {object} error The thrown value
* @param {import('../internal').VNode} vnode The vnode that threw
* the error that was caught (except for unmounting when this parameter
* is the highest parent that was being unmounted)
* @param {import('../internal').VNode} oldVNode The oldVNode of the vnode
* that threw, if this VNode threw while diffing
*/
(options)._catchError = function (error, vnode, oldVNode) {
/** @type {import('../internal').Component} */
let component;
for (; vnode = vnode._parent;) {
if ((component = vnode._component) && !component._processingException) {
try {
if (component.constructor && component.constructor.getDerivedStateFromError!=null) {
component.setState(component.constructor.getDerivedStateFromError(error));
}
else if (component.componentDidCatch!=null) {
component.componentDidCatch(error);
}
else {
continue;
}
return enqueueRender(component._pendingError = component);
}
catch (e) {
error = e;
}
}
}
throw error;
};
| 1 | 14,293 | This line always confused me anyway. Good catch! | preactjs-preact | js |
@@ -130,6 +130,6 @@ func (j journalBlockServer) IsUnflushed(ctx context.Context, tlfID tlf.ID,
}
func (j journalBlockServer) Shutdown() {
- j.jServer.shutdown()
+ j.jServer.shutdown(context.Background())
j.BlockServer.Shutdown()
} | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
type journalBlockServer struct {
jServer *JournalServer
BlockServer
enableAddBlockReference bool
}
var _ BlockServer = journalBlockServer{}
func (j journalBlockServer) Get(
ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context) (
data []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf, err error) {
if tlfJournal, ok := j.jServer.getTLFJournal(tlfID); ok {
defer func() {
err = translateToBlockServerError(err)
}()
data, serverHalf, err := tlfJournal.getBlockData(id)
switch errors.Cause(err).(type) {
case nil:
return data, serverHalf, nil
case blockNonExistentError:
break
case errTLFJournalDisabled:
break
default:
return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
}
}
return j.BlockServer.Get(ctx, tlfID, id, context)
}
func (j journalBlockServer) Put(
ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, context kbfsblock.Context,
buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) (err error) {
if tlfJournal, ok := j.jServer.getTLFJournal(tlfID); ok {
defer func() {
err = translateToBlockServerError(err)
}()
err := tlfJournal.putBlockData(ctx, id, context, buf, serverHalf)
switch errors.Cause(err).(type) {
case nil:
return nil
case errTLFJournalDisabled:
break
default:
return err
}
}
return j.BlockServer.Put(ctx, tlfID, id, context, buf, serverHalf)
}
func (j journalBlockServer) AddBlockReference(
ctx context.Context, tlfID tlf.ID, id kbfsblock.ID,
context kbfsblock.Context) (err error) {
if tlfJournal, ok := j.jServer.getTLFJournal(tlfID); ok {
if !j.enableAddBlockReference {
// TODO: Temporarily return an error until KBFS-1149 is
// fixed. This is needed despite
// journalBlockCache.CheckForBlockPtr, since
// CheckForBlockPtr may be called before journaling is
// turned on for a TLF.
return kbfsblock.BServerErrorBlockNonExistent{}
}
defer func() {
err = translateToBlockServerError(err)
}()
err := tlfJournal.addBlockReference(ctx, id, context)
switch errors.Cause(err).(type) {
case nil:
return nil
case errTLFJournalDisabled:
break
default:
return err
}
}
return j.BlockServer.AddBlockReference(ctx, tlfID, id, context)
}
func (j journalBlockServer) RemoveBlockReferences(
ctx context.Context, tlfID tlf.ID,
contexts kbfsblock.ContextMap) (
liveCounts map[kbfsblock.ID]int, err error) {
// Deletes always go straight to the server, since they slow down
// the journal and already only happen in the background anyway.
// Note that this means delete operations must be issued after the
// corresponding MD that unreferenced the block was flushed from
// the journal.
return j.BlockServer.RemoveBlockReferences(ctx, tlfID, contexts)
}
func (j journalBlockServer) ArchiveBlockReferences(
ctx context.Context, tlfID tlf.ID,
contexts kbfsblock.ContextMap) (err error) {
// Archives always go straight to the server, since they slow down
// the journal and already only happen in the background anyway.
// Note that this means delete operations must be issued after the
// corresponding MD that unreferenced the block was flushed from
// the journal.
return j.BlockServer.ArchiveBlockReferences(ctx, tlfID, contexts)
}
func (j journalBlockServer) IsUnflushed(ctx context.Context, tlfID tlf.ID,
id kbfsblock.ID) (isLocal bool, err error) {
if tlfJournal, ok := j.jServer.getTLFJournal(tlfID); ok {
defer func() {
err = translateToBlockServerError(err)
}()
return tlfJournal.isBlockUnflushed(id)
}
return j.BlockServer.IsUnflushed(ctx, tlfID, id)
}
func (j journalBlockServer) Shutdown() {
j.jServer.shutdown()
j.BlockServer.Shutdown()
}
| 1 | 15,649 | Should we add `ctx` to `BlockServer.Shutdown()` for this purpose? Would be nice, but I don't care too much. | keybase-kbfs | go |
@@ -42,7 +42,7 @@ bool DeadlineQosPolicy::addToCDRMessage(CDRMessage_t* msg)
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addInt32(msg,period.seconds);
- valid &= CDRMessage::addUInt32(msg,period.fraction);
+ valid &= CDRMessage::addUInt32(msg,period.nanosec);
return valid;
}
| 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file ParameterTypes.cpp
*
*/
#include <fastrtps/qos/QosPolicies.h>
#include <fastrtps/rtps/messages/CDRMessage.h>
#include <fastrtps/log/Log.h>
#include <fastcdr/Cdr.h>
using namespace eprosima::fastrtps;
using namespace eprosima::fastrtps::rtps;
bool DurabilityQosPolicy::addToCDRMessage(CDRMessage_t* msg)
{
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addOctet(msg,kind);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
return valid;
}
bool DeadlineQosPolicy::addToCDRMessage(CDRMessage_t* msg)
{
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addInt32(msg,period.seconds);
valid &= CDRMessage::addUInt32(msg,period.fraction);
return valid;
}
bool LatencyBudgetQosPolicy::addToCDRMessage(CDRMessage_t* msg) {
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addInt32(msg,duration.seconds);
valid &= CDRMessage::addUInt32(msg,duration.fraction);
return valid;
}
bool LivelinessQosPolicy::addToCDRMessage(CDRMessage_t* msg) {
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addOctet(msg,kind);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addInt32(msg,lease_duration.seconds);
valid &= CDRMessage::addUInt32(msg,lease_duration.fraction);
return valid;
}
bool OwnershipQosPolicy::addToCDRMessage(CDRMessage_t* msg) {
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addOctet(msg,kind);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
return valid;
}
bool ReliabilityQosPolicy::addToCDRMessage(CDRMessage_t* msg) {
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addOctet(msg,kind);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addInt32(msg,max_blocking_time.seconds);
valid &= CDRMessage::addUInt32(msg,max_blocking_time.fraction);
return valid;
}
bool DestinationOrderQosPolicy::addToCDRMessage(CDRMessage_t* msg) {
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addOctet(msg,kind);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
return valid;
}
bool TimeBasedFilterQosPolicy::addToCDRMessage(CDRMessage_t* msg) {
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addInt32(msg,minimum_separation.seconds);
valid &= CDRMessage::addUInt32(msg,minimum_separation.fraction);
return valid;
}
bool PresentationQosPolicy::addToCDRMessage(CDRMessage_t* msg) {
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, PARAMETER_PRESENTATION_LENGTH);//this->length);
valid &= CDRMessage::addOctet(msg,access_scope);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,(octet)coherent_access);
valid &= CDRMessage::addOctet(msg,(octet)ordered_access);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
return valid;
}
bool PartitionQosPolicy::addToCDRMessage(CDRMessage_t* msg)
{
bool valid = CDRMessage::addUInt16(msg, this->Pid);
//Obtain Length:
this->length = 0;
this->length += 4;
uint16_t rest;
for(std::vector<std::string>::iterator it = names.begin();it!=names.end();++it)
{
this->length +=4;
this->length += (uint16_t)it->size()+1;
rest = ((uint16_t)it->size() +1 ) % 4;
this->length += rest != 0 ? 4 - rest : 0;
}
valid &= CDRMessage::addUInt16(msg, this->length);
valid &= CDRMessage::addUInt32(msg,(uint32_t)this->names.size());
for(std::vector<std::string>::iterator it = names.begin();it!=names.end();++it)
valid &= CDRMessage::addString(msg,*it);
//valid &= CDRMessage::addOctetVector(msg,&name);
return valid;
}
bool UserDataQosPolicy::addToCDRMessage(CDRMessage_t* msg)
{
bool valid = CDRMessage::addUInt16(msg, this->Pid);
uint32_t align = (4 - (msg->pos + 6 + dataVec.size()) % 4) & 3; //align
this->length = (uint16_t)(4 + this->dataVec.size() + align);
valid &= CDRMessage::addUInt16(msg, this->length);
valid &= CDRMessage::addUInt32(msg, (uint32_t)this->dataVec.size());
valid &= CDRMessage::addData(msg,this->dataVec.data(),(uint32_t)this->dataVec.size());
for(uint32_t count = 0; count < align; ++count)
{
valid &= CDRMessage::addOctet(msg, 0);
}
return valid;
}
bool TopicDataQosPolicy::addToCDRMessage(CDRMessage_t* msg)
{
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addOctetVector(msg,&value);
return valid;
}
bool GroupDataQosPolicy::addToCDRMessage(CDRMessage_t* msg) {
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addOctetVector(msg,&value);
return valid;
}
bool HistoryQosPolicy::addToCDRMessage(CDRMessage_t* msg) {
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addOctet(msg,kind);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addInt32(msg,depth);
return valid;
}
bool DurabilityServiceQosPolicy::addToCDRMessage(CDRMessage_t* msg) {
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addInt32(msg,service_cleanup_delay.seconds);
valid &= CDRMessage::addUInt32(msg,service_cleanup_delay.fraction);
valid &= CDRMessage::addOctet(msg,history_kind);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addOctet(msg,0);
valid &= CDRMessage::addInt32(msg,history_depth);
valid &= CDRMessage::addInt32(msg,max_samples);
valid &= CDRMessage::addInt32(msg,max_instances);
valid &= CDRMessage::addInt32(msg,max_samples_per_instance);
return valid;
}
bool LifespanQosPolicy::addToCDRMessage(CDRMessage_t* msg) {
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addInt32(msg,duration.seconds);
valid &= CDRMessage::addUInt32(msg,duration.fraction);
return valid;
}
bool OwnershipStrengthQosPolicy::addToCDRMessage(CDRMessage_t* msg)
{
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addUInt32(msg,value);
return valid;
}
bool ResourceLimitsQosPolicy::addToCDRMessage(CDRMessage_t* msg) {
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addInt32(msg,max_samples);
valid &= CDRMessage::addInt32(msg,max_instances);
valid &= CDRMessage::addInt32(msg,max_samples_per_instance);
return valid;
}
bool TransportPriorityQosPolicy::addToCDRMessage(CDRMessage_t* msg) {
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);//this->length);
valid &= CDRMessage::addUInt32(msg, value);
return valid;
}
bool DataRepresentationQosPolicy::addToCDRMessage(CDRMessage_t* msg) {
bool valid = CDRMessage::addUInt32(msg, (uint32_t)m_value.size());
for (std::vector<DataRepresentationId_t>::iterator it = m_value.begin(); it != m_value.end(); ++it)
valid &= CDRMessage::addUInt16(msg, *it);
return valid;
}
bool TypeConsistencyEnforcementQosPolicy::addToCDRMessage(CDRMessage_t* msg)
{
bool valid = CDRMessage::addUInt32(msg, this->m_kind);
valid &= CDRMessage::addOctet(msg, (octet)m_ignore_sequence_bounds);
valid &= CDRMessage::addOctet(msg, (octet)m_ignore_string_bounds);
valid &= CDRMessage::addOctet(msg, (octet)m_ignore_member_names);
valid &= CDRMessage::addOctet(msg, (octet)m_prevent_type_widening);
valid &= CDRMessage::addOctet(msg, (octet)m_force_type_validation);
return valid;
}
bool DisablePositiveACKsQosPolicy::addToCDRMessage(CDRMessage_t* msg)
{
bool valid = CDRMessage::addUInt16(msg, this->Pid);
valid &= CDRMessage::addUInt16(msg, this->length);
valid &= CDRMessage::addOctet(msg, (octet)enabled);
return valid;
}
bool TypeIdV1::addToCDRMessage(CDRMessage_t* msg)
{
size_t size = TypeIdentifier::getCdrSerializedSize(m_type_identifier) + 4;
SerializedPayload_t payload(static_cast<uint32_t>(size));
eprosima::fastcdr::FastBuffer fastbuffer((char*) payload.data, payload.max_size);
eprosima::fastcdr::Cdr ser(fastbuffer, eprosima::fastcdr::Cdr::DEFAULT_ENDIAN,
eprosima::fastcdr::Cdr::DDS_CDR); // Object that serializes the data.
payload.encapsulation = ser.endianness() == eprosima::fastcdr::Cdr::BIG_ENDIANNESS ? CDR_BE : CDR_LE;
ser.serialize_encapsulation();
m_type_identifier.serialize(ser);
payload.length = (uint32_t)ser.getSerializedDataLength(); //Get the serialized length
bool valid = CDRMessage::addUInt16(msg, this->Pid);
this->length = static_cast<uint16_t>(payload.length);
valid &= CDRMessage::addUInt16(msg, this->length);
return valid & CDRMessage::addData(msg, payload.data, payload.length);
}
bool TypeIdV1::readFromCDRMessage(CDRMessage_t* msg, uint32_t size)
{
SerializedPayload_t payload(size);
eprosima::fastcdr::FastBuffer fastbuffer((char*)payload.data, size);
CDRMessage::readData(msg, payload.data, size); // Object that manages the raw buffer.
eprosima::fastcdr::Cdr deser(fastbuffer, eprosima::fastcdr::Cdr::DEFAULT_ENDIAN,
eprosima::fastcdr::Cdr::DDS_CDR); // Object that deserializes the data.
// Deserialize encapsulation.
deser.read_encapsulation();
payload.encapsulation = deser.endianness() == eprosima::fastcdr::Cdr::BIG_ENDIANNESS ? CDR_BE : CDR_LE;
try
{
m_type_identifier.deserialize(deser);
}
catch(eprosima::fastcdr::exception::NotEnoughMemoryException& /*exception*/)
{
return false;
}
return true;
}
bool TypeObjectV1::addToCDRMessage(CDRMessage_t* msg)
{
size_t size = TypeObject::getCdrSerializedSize(m_type_object) + 4;
SerializedPayload_t payload(static_cast<uint32_t>(size));
eprosima::fastcdr::FastBuffer fastbuffer((char*) payload.data, payload.max_size);
eprosima::fastcdr::Cdr ser(fastbuffer, eprosima::fastcdr::Cdr::DEFAULT_ENDIAN,
eprosima::fastcdr::Cdr::DDS_CDR); // Object that serializes the data.
payload.encapsulation = ser.endianness() == eprosima::fastcdr::Cdr::BIG_ENDIANNESS ? CDR_BE : CDR_LE;
ser.serialize_encapsulation();
m_type_object.serialize(ser);
payload.length = (uint32_t)ser.getSerializedDataLength(); //Get the serialized length
bool valid = CDRMessage::addUInt16(msg, this->Pid);
this->length = static_cast<uint16_t>(payload.length);
valid &= CDRMessage::addUInt16(msg, this->length);
return valid & CDRMessage::addData(msg, payload.data, payload.length);
}
bool TypeObjectV1::readFromCDRMessage(CDRMessage_t* msg, uint32_t size)
{
SerializedPayload_t payload(size);
eprosima::fastcdr::FastBuffer fastbuffer((char*)payload.data, size);
CDRMessage::readData(msg, payload.data, size); // Object that manages the raw buffer.
eprosima::fastcdr::Cdr deser(fastbuffer, eprosima::fastcdr::Cdr::DEFAULT_ENDIAN,
eprosima::fastcdr::Cdr::DDS_CDR); // Object that deserializes the data.
// Deserialize encapsulation.
deser.read_encapsulation();
payload.encapsulation = deser.endianness() == eprosima::fastcdr::Cdr::BIG_ENDIANNESS ? CDR_BE : CDR_LE;
try
{
m_type_object.deserialize(deser);
}
catch(eprosima::fastcdr::exception::NotEnoughMemoryException& /*exception*/)
{
return false;
}
return true;
}
| 1 | 14,289 | Duration_t at RTPS level must be serialized using fractions. | eProsima-Fast-DDS | cpp |
@@ -28,6 +28,14 @@ import (
"go.uber.org/yarpc/yarpcerrors"
)
+var msgInboundDispatcherNotRunning = "peer for service %q is not running"
+
+// NotRunningInboundError builds a YARPC error with code
+// yarpcerrors.CodeUnavailable when the dispatcher is not running.
+func NotRunningInboundError(service string) error {
+ return yarpcerrors.UnavailableErrorf(msgInboundDispatcherNotRunning, service)
+}
+
// RequestBodyDecodeError builds a YARPC error with code
// yarpcerrors.CodeInvalidArgument that represents a failure to decode
// the request body. | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package errors
import (
"fmt"
"strings"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/yarpcerrors"
)
// RequestBodyDecodeError builds a YARPC error with code
// yarpcerrors.CodeInvalidArgument that represents a failure to decode
// the request body.
func RequestBodyDecodeError(req *transport.Request, err error) error {
return newServerEncodingError(req, nil, false /*isResponse*/, false /*isHeader*/, err)
}
// ResponseBodyEncodeError builds a YARPC error with code
// yarpcerrors.CodeInvalidArgument that represents a failure to encode
// the response body.
func ResponseBodyEncodeError(req *transport.Request, err error) error {
return newServerEncodingError(req, nil, true /*isResponse*/, false /*isHeader*/, err)
}
// RequestHeadersDecodeError builds a YARPC error with code
// yarpcerrors.CodeInvalidArgument that represents a failure to
// decode the request headers.
func RequestHeadersDecodeError(req *transport.Request, err error) error {
return newServerEncodingError(req, nil, false /*isResponse*/, true /*isHeader*/, err)
}
// ResponseHeadersEncodeError builds a YARPC error with code
// yarpcerrors.CodeInvalidArgument that represents a failure to
// encode the response headers.
func ResponseHeadersEncodeError(req *transport.Request, err error) error {
return newServerEncodingError(req, nil, true /*isResponse*/, true /*isHeader*/, err)
}
// ExpectEncodings verifies that the given request has one of the given
// encodings, otherwise it returns a YARPC error with code
// yarpcerrors.CodeInvalidArgument.
func ExpectEncodings(req *transport.Request, want ...transport.Encoding) error {
got := req.Encoding
for _, w := range want {
if w == got {
return nil
}
}
return newServerEncodingError(req, want, false /*isResponse*/, false /*isHeader*/, newEncodingMismatchError(want, got))
}
func newServerEncodingError(req *transport.Request, encodings []transport.Encoding, isResponse bool, isHeader bool, err error) error {
if len(encodings) == 0 {
encodings = []transport.Encoding{req.Encoding}
}
parts := []string{"failed to"}
if isResponse {
switch len(encodings) {
case 1:
parts = append(parts, fmt.Sprintf("encode %q response", string(encodings[0])))
default:
parts = append(parts, fmt.Sprintf("encode %v response", encodings))
}
} else {
switch len(encodings) {
case 1:
parts = append(parts, fmt.Sprintf("decode %q request", string(encodings[0])))
default:
parts = append(parts, fmt.Sprintf("decode %v request", encodings))
}
}
if isHeader {
parts = append(parts, "headers")
} else {
parts = append(parts, "body")
}
parts = append(parts,
fmt.Sprintf("for procedure %q of service %q from caller %q: %v",
req.Procedure, req.Service, req.Caller, err))
return yarpcerrors.Newf(yarpcerrors.CodeInvalidArgument, strings.Join(parts, " "))
}
func newEncodingMismatchError(want []transport.Encoding, got transport.Encoding) error {
switch len(want) {
case 1:
return fmt.Errorf("expected encoding %q but got %q", want[0], got)
default:
return fmt.Errorf("expected one of encodings %v but got %q", want, got)
}
}
| 1 | 15,874 | "peer" has its own meaning within YARPC with its own class of objects. This should probably be "dispatcher" too? | yarpc-yarpc-go | go |
@@ -172,9 +172,18 @@ class ClangSA(analyzer_base.SourceAnalyzer):
'-Xclang', checker_name])
if config.ctu_dir and not self.__disable_ctu:
+ # ctu-clang5 compatibility
analyzer_cmd.extend(['-Xclang', '-analyzer-config',
'-Xclang',
- 'xtu-dir=' + self.get_xtu_dir()])
+ 'xtu-dir=' + self.get_ctu_dir()])
+ # ctu-clang6 compatibility (5.0 and 6.0 options work together)
+ analyzer_cmd.extend(['-Xclang', '-analyzer-config',
+ '-Xclang',
+ 'experimental-enable-naive-ctu-analysis'
+ '=true',
+ '-Xclang', '-analyzer-config',
+ '-Xclang',
+ 'ctu-dir=' + self.get_ctu_dir()])
if config.ctu_has_analyzer_display_ctu_progress:
analyzer_cmd.extend(['-Xclang',
'-analyzer-display-ctu-progress']) | 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
import os
import re
import shlex
import subprocess
from libcodechecker.analyze.analyzers import analyzer_base
from libcodechecker.analyze.analyzers import ctu_triple_arch
from libcodechecker.analyze import analyzer_env
from libcodechecker.logger import get_logger
from libcodechecker.util import get_binary_in_path
from libcodechecker.analyze.analyzer_env import\
extend_analyzer_cmd_with_resource_dir
LOG = get_logger('analyzer')
def parse_checkers(clangsa_output):
"""
Parse clang static analyzer checkers list output.
Return a list of (checker name, description) tuples.
"""
# Checker name and description in one line.
pattern = re.compile(
r'^\s\s(?P<checker_name>\S*)\s*(?P<description>.*)')
checkers_list = []
checker_name = None
for line in clangsa_output.splitlines():
if re.match(r'^CHECKERS:', line) or line == '':
continue
elif checker_name and not re.match(r'^\s\s\S', line):
# Collect description for the checker name.
checkers_list.append((checker_name, line.strip()))
checker_name = None
elif re.match(r'^\s\s\S+$', line.rstrip()):
# Only checker name is in the line.
checker_name = line.strip()
else:
# Checker name and description is in one line.
match = pattern.match(line.rstrip())
if match:
current = match.groupdict()
checkers_list.append((current['checker_name'],
current['description']))
return checkers_list
class ClangSA(analyzer_base.SourceAnalyzer):
"""
Constructs clang static analyzer commands.
"""
def __init__(self, config_handler, buildaction):
self.__disable_ctu = False
self.__checker_configs = []
super(ClangSA, self).__init__(config_handler, buildaction)
def is_ctu_available(self):
"""
Check if ctu is available for the analyzer.
If the ctu_dir is set in the config, the analyzer is capable to
run ctu analysis.
"""
config = self.config_handler
if config.ctu_dir:
return True
return False
def is_ctu_enabled(self):
"""
Check if ctu is enabled for the analyzer.
"""
return not self.__disable_ctu
def disable_ctu(self):
"""
Disable ctu even if ctu is available.
By default it is enabled if available.
"""
self.__disable_ctu = True
def enable_ctu(self):
self.__disable_ctu = False
def add_checker_config(self, checker_cfg):
"""
Add configuration options to specific checkers.
checker_cfg should be a list of arguments in case of
Clang Static Analyzer like this:
['-Xclang', '-analyzer-config', '-Xclang', 'checker_option=some_value']
"""
self.__checker_configs.append(checker_cfg)
@staticmethod
def get_analyzer_checkers(config_handler, env):
"""
Return the list of the supported checkers.
"""
analyzer_binary = config_handler.analyzer_binary
command = [analyzer_binary, "-cc1"]
for plugin in config_handler.analyzer_plugins:
command.extend(["-load", plugin])
command.append("-analyzer-checker-help")
try:
command = shlex.split(' '.join(command))
result = subprocess.check_output(command,
env=env)
return parse_checkers(result)
except (subprocess.CalledProcessError, OSError):
return {}
def construct_analyzer_cmd(self, result_handler):
"""
Called by the analyzer method.
Construct the analyzer command.
"""
try:
# Get an output file from the result handler.
analyzer_output_file = result_handler.analyzer_result_file
# Get the checkers list from the config_handler.
# Checker order matters.
config = self.config_handler
analyzer_cmd = [config.analyzer_binary]
# Do not warn about the unused gcc/g++ arguments.
analyzer_cmd.append('-Qunused-arguments')
analyzer_cmd.append('--analyze')
# Turn off clang hardcoded checkers list.
analyzer_cmd.append('--analyzer-no-default-checks')
for plugin in config.analyzer_plugins:
analyzer_cmd.extend(["-Xclang", "-plugin",
"-Xclang", "checkercfg",
"-Xclang", "-load",
"-Xclang", plugin])
analyzer_mode = 'plist-multi-file'
analyzer_cmd.extend(['-Xclang',
'-analyzer-opt-analyze-headers',
'-Xclang',
'-analyzer-output=' + analyzer_mode])
analyzer_cmd.extend(['-o', analyzer_output_file])
# Checker configuration arguments needs to be set before
# the checkers.
if self.__checker_configs:
for cfg in self.__checker_configs:
analyzer_cmd.extend(cfg)
# Config handler stores which checkers are enabled or disabled.
for checker_name, value in config.checks().items():
enabled, _ = value
if enabled:
analyzer_cmd.extend(['-Xclang',
'-analyzer-checker=' + checker_name])
else:
analyzer_cmd.extend(['-Xclang',
'-analyzer-disable-checker',
'-Xclang', checker_name])
if config.ctu_dir and not self.__disable_ctu:
analyzer_cmd.extend(['-Xclang', '-analyzer-config',
'-Xclang',
'xtu-dir=' + self.get_xtu_dir()])
if config.ctu_has_analyzer_display_ctu_progress:
analyzer_cmd.extend(['-Xclang',
'-analyzer-display-ctu-progress'])
if config.ctu_in_memory:
analyzer_cmd.extend(['-Xclang', '-analyzer-config',
'-Xclang',
'xtu-reparse=' +
os.path.abspath(config.log_file[0])])
# Set language.
analyzer_cmd.extend(['-x', self.buildaction.lang])
if self.buildaction.target != "":
analyzer_cmd.append("--target=" + self.buildaction.target)
analyzer_cmd.append(config.analyzer_extra_arguments)
analyzer_cmd.extend(self.buildaction.analyzer_options)
extend_analyzer_cmd_with_resource_dir(analyzer_cmd,
config.compiler_resource_dir)
analyzer_cmd.extend(self.buildaction.compiler_includes)
analyzer_cmd.append(self.source_file)
return analyzer_cmd
except Exception as ex:
LOG.error(ex)
return []
def get_xtu_dir(self):
"""
Returns the path of the xtu directory (containing the triple).
"""
config = self.config_handler
env = analyzer_env.get_check_env(config.path_env_extra,
config.ld_lib_path_extra)
triple_arch = ctu_triple_arch.get_triple_arch(self.buildaction,
self.source_file,
config, env)
xtu_dir = os.path.join(config.ctu_dir, triple_arch)
return xtu_dir
def get_analyzer_mentioned_files(self, output):
"""
Parse ClangSA's output to generate a list of files that were mentioned
in the standard output or standard error.
"""
if not output:
return set()
regex_for_ctu_ast_load = re.compile(
"ANALYZE \(CTU loaded AST for source file\): (.*)")
paths = []
xtu_ast_dir = os.path.join(self.get_xtu_dir(), "ast")
for line in output.splitlines():
match = re.match(regex_for_ctu_ast_load, line)
if match:
path = match.group(1)
if xtu_ast_dir in path:
paths.append(path[len(xtu_ast_dir):])
return set(paths)
@classmethod
def resolve_missing_binary(cls, configured_binary, env):
"""
In case of the configured binary for the analyzer is not found in the
PATH, this method is used to find a callable binary.
"""
LOG.debug(configured_binary + " not found in path for ClangSA!")
if os.path.isabs(configured_binary):
# Do not autoresolve if the path is an absolute path as there
# is nothing we could auto-resolve that way.
return False
# clang, clang-5.0, clang++, clang++-5.1, ...
clang = get_binary_in_path(['clang', 'clang++'],
r'^clang(\+\+)?(-\d+(\.\d+){0,2})?$',
env)
if clang:
LOG.debug("Using '" + clang + "' for ClangSA!")
return clang
| 1 | 9,420 | I do not like this line break before `=true`. Maybe starting the list in the next line would help to reduce the indent? This way we could keep this string together. | Ericsson-codechecker | c |
@@ -34,18 +34,10 @@ import (
// the default value in Default var.
func init() {
- flag.StringVar(&_overwritePath, "config-path", "", "Config path")
- flag.StringVar(&_secretPath, "secret-path", "", "Secret path")
- flag.StringVar(&_subChainPath, "sub-config-path", "", "Sub chain Config path")
flag.Var(&_plugins, "plugin", "Plugin of the node")
}
var (
- // overwritePath is the path to the config file which overwrite default values
- _overwritePath string
- // secretPath is the path to the config file store secret values
- _secretPath string
- _subChainPath string
_plugins strs
_evmNetworkID uint32
loadChainID sync.Once | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package config
import (
"crypto/ecdsa"
"flag"
"math/big"
"os"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/iotexproject/go-p2p"
"github.com/iotexproject/go-pkgs/crypto"
"github.com/iotexproject/iotex-election/committee"
"github.com/pkg/errors"
uconfig "go.uber.org/config"
"go.uber.org/zap"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/unit"
)
// IMPORTANT: to define a config, add a field or a new config type to the existing config types. In addition, provide
// the default value in Default var.
func init() {
flag.StringVar(&_overwritePath, "config-path", "", "Config path")
flag.StringVar(&_secretPath, "secret-path", "", "Secret path")
flag.StringVar(&_subChainPath, "sub-config-path", "", "Sub chain Config path")
flag.Var(&_plugins, "plugin", "Plugin of the node")
}
var (
// overwritePath is the path to the config file which overwrite default values
_overwritePath string
// secretPath is the path to the config file store secret values
_secretPath string
_subChainPath string
_plugins strs
_evmNetworkID uint32
loadChainID sync.Once
)
const (
// RollDPoSScheme means randomized delegated proof of stake
RollDPoSScheme = "ROLLDPOS"
// StandaloneScheme means that the node creates a block periodically regardless of others (if there is any)
StandaloneScheme = "STANDALONE"
// NOOPScheme means that the node does not create only block
NOOPScheme = "NOOP"
)
const (
// GatewayPlugin is the plugin of accepting user API requests and serving blockchain data to users
GatewayPlugin = iota
)
type strs []string
func (ss *strs) String() string {
return strings.Join(*ss, ",")
}
func (ss *strs) Set(str string) error {
*ss = append(*ss, str)
return nil
}
// Dardanelles consensus config
const (
SigP256k1 = "secp256k1"
SigP256sm2 = "p256sm2"
)
var (
// Default is the default config
Default = Config{
Plugins: make(map[int]interface{}),
SubLogs: make(map[string]log.GlobalConfig),
Network: Network{
Host: "0.0.0.0",
Port: 4689,
ExternalHost: "",
ExternalPort: 4689,
BootstrapNodes: []string{},
MasterKey: "",
RateLimit: p2p.DefaultRatelimitConfig,
ReconnectInterval: 300 * time.Second,
EnableRateLimit: true,
PrivateNetworkPSK: "",
},
Chain: Chain{
ChainDBPath: "/var/data/chain.db",
TrieDBPath: "/var/data/trie.db",
IndexDBPath: "/var/data/index.db",
BloomfilterIndexDBPath: "/var/data/bloomfilter.index.db",
CandidateIndexDBPath: "/var/data/candidate.index.db",
StakingIndexDBPath: "/var/data/staking.index.db",
ID: 1,
EVMNetworkID: 4689,
Address: "",
ProducerPrivKey: generateRandomKey(SigP256k1),
SignatureScheme: []string{SigP256k1},
EmptyGenesis: false,
GravityChainDB: db.Config{DbPath: "/var/data/poll.db", NumRetries: 10},
Committee: committee.Config{
GravityChainAPIs: []string{},
},
EnableTrielessStateDB: true,
EnableStateDBCaching: false,
EnableArchiveMode: false,
EnableAsyncIndexWrite: true,
EnableSystemLogIndexer: false,
EnableStakingProtocol: true,
EnableStakingIndexer: false,
CompressBlock: false,
AllowedBlockGasResidue: 10000,
MaxCacheSize: 0,
PollInitialCandidatesInterval: 10 * time.Second,
StateDBCacheSize: 1000,
WorkingSetCacheSize: 20,
},
ActPool: ActPool{
MaxNumActsPerPool: 32000,
MaxGasLimitPerPool: 320000000,
MaxNumActsPerAcct: 2000,
ActionExpiry: 10 * time.Minute,
MinGasPriceStr: big.NewInt(unit.Qev).String(),
BlackList: []string{},
},
Consensus: Consensus{
Scheme: StandaloneScheme,
RollDPoS: RollDPoS{
FSM: ConsensusTiming{
UnmatchedEventTTL: 3 * time.Second,
UnmatchedEventInterval: 100 * time.Millisecond,
AcceptBlockTTL: 4 * time.Second,
AcceptProposalEndorsementTTL: 2 * time.Second,
AcceptLockEndorsementTTL: 2 * time.Second,
CommitTTL: 2 * time.Second,
EventChanSize: 10000,
},
ToleratedOvertime: 2 * time.Second,
Delay: 5 * time.Second,
ConsensusDBPath: "/var/data/consensus.db",
},
},
DardanellesUpgrade: DardanellesUpgrade{
UnmatchedEventTTL: 2 * time.Second,
UnmatchedEventInterval: 100 * time.Millisecond,
AcceptBlockTTL: 2 * time.Second,
AcceptProposalEndorsementTTL: time.Second,
AcceptLockEndorsementTTL: time.Second,
CommitTTL: time.Second,
BlockInterval: 5 * time.Second,
Delay: 2 * time.Second,
},
BlockSync: BlockSync{
Interval: 30 * time.Second,
ProcessSyncRequestTTL: 10 * time.Second,
BufferSize: 200,
IntervalSize: 20,
MaxRepeat: 3,
RepeatDecayStep: 1,
},
Dispatcher: Dispatcher{
ActionChanSize: 1000,
BlockChanSize: 1000,
BlockSyncChanSize: 400,
},
API: API{
UseRDS: false,
Port: 14014,
TpsWindow: 10,
GasStation: GasStation{
SuggestBlockWindow: 20,
DefaultGas: uint64(unit.Qev),
Percentile: 60,
},
RangeQueryLimit: 1000,
},
System: System{
Active: true,
HeartbeatInterval: 10 * time.Second,
HTTPStatsPort: 8080,
HTTPAdminPort: 9009,
StartSubChainInterval: 10 * time.Second,
SystemLogDBPath: "/var/data/systemlog.db",
},
DB: db.Config{
NumRetries: 3,
MaxCacheSize: 64,
BlockStoreBatchSize: 16,
V2BlocksToSplitDB: 1000000,
Compressor: "Snappy",
CompressLegacy: false,
SplitDBSizeMB: 0,
SplitDBHeight: 900000,
HistoryStateRetention: 2000,
},
Indexer: Indexer{
RangeBloomFilterNumElements: 100000,
RangeBloomFilterSize: 1200000,
RangeBloomFilterNumHash: 8,
},
Genesis: genesis.Default,
}
// ErrInvalidCfg indicates the invalid config value
ErrInvalidCfg = errors.New("invalid config value")
// Validates is the collection config validation functions
Validates = []Validate{
ValidateRollDPoS,
ValidateArchiveMode,
ValidateDispatcher,
ValidateAPI,
ValidateActPool,
ValidateForkHeights,
}
)
// Network is the config struct for network package
type (
Network struct {
Host string `yaml:"host"`
Port int `yaml:"port"`
ExternalHost string `yaml:"externalHost"`
ExternalPort int `yaml:"externalPort"`
BootstrapNodes []string `yaml:"bootstrapNodes"`
MasterKey string `yaml:"masterKey"` // master key will be PrivateKey if not set.
// RelayType is the type of P2P network relay. By default, the value is empty, meaning disabled. Two relay types
// are supported: active, nat.
RelayType string `yaml:"relayType"`
ReconnectInterval time.Duration `yaml:"reconnectInterval"`
RateLimit p2p.RateLimitConfig `yaml:"rateLimit"`
EnableRateLimit bool `yaml:"enableRateLimit"`
PrivateNetworkPSK string `yaml:"privateNetworkPSK"`
}
// Chain is the config struct for blockchain package
Chain struct {
ChainDBPath string `yaml:"chainDBPath"`
TrieDBPath string `yaml:"trieDBPath"`
IndexDBPath string `yaml:"indexDBPath"`
BloomfilterIndexDBPath string `yaml:"bloomfilterIndexDBPath"`
CandidateIndexDBPath string `yaml:"candidateIndexDBPath"`
StakingIndexDBPath string `yaml:"stakingIndexDBPath"`
ID uint32 `yaml:"id"`
EVMNetworkID uint32 `yaml:"evmNetworkID"`
Address string `yaml:"address"`
ProducerPrivKey string `yaml:"producerPrivKey"`
SignatureScheme []string `yaml:"signatureScheme"`
EmptyGenesis bool `yaml:"emptyGenesis"`
GravityChainDB db.Config `yaml:"gravityChainDB"`
Committee committee.Config `yaml:"committee"`
EnableTrielessStateDB bool `yaml:"enableTrielessStateDB"`
// EnableStateDBCaching enables cachedStateDBOption
EnableStateDBCaching bool `yaml:"enableStateDBCaching"`
// EnableArchiveMode is only meaningful when EnableTrielessStateDB is false
EnableArchiveMode bool `yaml:"enableArchiveMode"`
// EnableAsyncIndexWrite enables writing the block actions' and receipts' index asynchronously
EnableAsyncIndexWrite bool `yaml:"enableAsyncIndexWrite"`
// deprecated
EnableSystemLogIndexer bool `yaml:"enableSystemLog"`
// EnableStakingProtocol enables staking protocol
EnableStakingProtocol bool `yaml:"enableStakingProtocol"`
// EnableStakingIndexer enables staking indexer
EnableStakingIndexer bool `yaml:"enableStakingIndexer"`
// deprecated by DB.CompressBlock
CompressBlock bool `yaml:"compressBlock"`
// AllowedBlockGasResidue is the amount of gas remained when block producer could stop processing more actions
AllowedBlockGasResidue uint64 `yaml:"allowedBlockGasResidue"`
// MaxCacheSize is the max number of blocks that will be put into an LRU cache. 0 means disabled
MaxCacheSize int `yaml:"maxCacheSize"`
// PollInitialCandidatesInterval is the config for committee init db
PollInitialCandidatesInterval time.Duration `yaml:"pollInitialCandidatesInterval"`
// StateDBCacheSize is the max size of statedb LRU cache
StateDBCacheSize int `yaml:"stateDBCacheSize"`
// WorkingSetCacheSize is the max size of workingset cache in state factory
WorkingSetCacheSize uint64 `yaml:"workingSetCacheSize"`
}
// Consensus is the config struct for consensus package
Consensus struct {
// There are three schemes that are supported
Scheme string `yaml:"scheme"`
RollDPoS RollDPoS `yaml:"rollDPoS"`
}
// BlockSync is the config struct for the BlockSync
BlockSync struct {
Interval time.Duration `yaml:"interval"` // update duration
ProcessSyncRequestTTL time.Duration `yaml:"processSyncRequestTTL"`
BufferSize uint64 `yaml:"bufferSize"`
IntervalSize uint64 `yaml:"intervalSize"`
// MaxRepeat is the maximal number of repeat of a block sync request
MaxRepeat int `yaml:"maxRepeat"`
// RepeatDecayStep is the step for repeat number decreasing by 1
RepeatDecayStep int `yaml:"repeatDecayStep"`
}
// DardanellesUpgrade is the config for dardanelles upgrade
DardanellesUpgrade struct {
UnmatchedEventTTL time.Duration `yaml:"unmatchedEventTTL"`
UnmatchedEventInterval time.Duration `yaml:"unmatchedEventInterval"`
AcceptBlockTTL time.Duration `yaml:"acceptBlockTTL"`
AcceptProposalEndorsementTTL time.Duration `yaml:"acceptProposalEndorsementTTL"`
AcceptLockEndorsementTTL time.Duration `yaml:"acceptLockEndorsementTTL"`
CommitTTL time.Duration `yaml:"commitTTL"`
BlockInterval time.Duration `yaml:"blockInterval"`
Delay time.Duration `yaml:"delay"`
}
// RollDPoS is the config struct for RollDPoS consensus package
RollDPoS struct {
FSM ConsensusTiming `yaml:"fsm"`
ToleratedOvertime time.Duration `yaml:"toleratedOvertime"`
Delay time.Duration `yaml:"delay"`
ConsensusDBPath string `yaml:"consensusDBPath"`
}
// ConsensusTiming defines a set of time durations used in fsm and event queue size
ConsensusTiming struct {
EventChanSize uint `yaml:"eventChanSize"`
UnmatchedEventTTL time.Duration `yaml:"unmatchedEventTTL"`
UnmatchedEventInterval time.Duration `yaml:"unmatchedEventInterval"`
AcceptBlockTTL time.Duration `yaml:"acceptBlockTTL"`
AcceptProposalEndorsementTTL time.Duration `yaml:"acceptProposalEndorsementTTL"`
AcceptLockEndorsementTTL time.Duration `yaml:"acceptLockEndorsementTTL"`
CommitTTL time.Duration `yaml:"commitTTL"`
}
// Dispatcher is the dispatcher config
Dispatcher struct {
ActionChanSize uint `yaml:"actionChanSize"`
BlockChanSize uint `yaml:"blockChanSize"`
BlockSyncChanSize uint `yaml:"blockSyncChanSize"`
// TODO: explorer dependency deleted at #1085, need to revive by migrating to api
}
// API is the api service config
API struct {
UseRDS bool `yaml:"useRDS"`
Port int `yaml:"port"`
TpsWindow int `yaml:"tpsWindow"`
GasStation GasStation `yaml:"gasStation"`
RangeQueryLimit uint64 `yaml:"rangeQueryLimit"`
}
// GasStation is the gas station config
GasStation struct {
SuggestBlockWindow int `yaml:"suggestBlockWindow"`
DefaultGas uint64 `yaml:"defaultGas"`
Percentile int `yaml:"Percentile"`
}
// System is the system config
System struct {
// Active is the status of the node. True means active and false means stand-by
Active bool `yaml:"active"`
HeartbeatInterval time.Duration `yaml:"heartbeatInterval"`
// HTTPProfilingPort is the port number to access golang performance profiling data of a blockchain node. It is
// 0 by default, meaning performance profiling has been disabled
HTTPAdminPort int `yaml:"httpAdminPort"`
HTTPStatsPort int `yaml:"httpStatsPort"`
StartSubChainInterval time.Duration `yaml:"startSubChainInterval"`
SystemLogDBPath string `yaml:"systemLogDBPath"`
}
// ActPool is the actpool config
ActPool struct {
// MaxNumActsPerPool indicates maximum number of actions the whole actpool can hold
MaxNumActsPerPool uint64 `yaml:"maxNumActsPerPool"`
// MaxGasLimitPerPool indicates maximum gas limit the whole actpool can hold
MaxGasLimitPerPool uint64 `yaml:"maxGasLimitPerPool"`
// MaxNumActsPerAcct indicates maximum number of actions an account queue can hold
MaxNumActsPerAcct uint64 `yaml:"maxNumActsPerAcct"`
// ActionExpiry defines how long an action will be kept in action pool.
ActionExpiry time.Duration `yaml:"actionExpiry"`
// MinGasPriceStr defines the minimal gas price the delegate will accept for an action
MinGasPriceStr string `yaml:"minGasPrice"`
// BlackList lists the account address that are banned from initiating actions
BlackList []string `yaml:"blackList"`
}
// Indexer is the config for indexer
Indexer struct {
// RangeBloomFilterNumElements is the number of elements each rangeBloomfilter will store in bloomfilterIndexer
RangeBloomFilterNumElements uint64 `yaml:"rangeBloomFilterNumElements"`
// RangeBloomFilterSize is the size (in bits) of rangeBloomfilter
RangeBloomFilterSize uint64 `yaml:"rangeBloomFilterSize"`
// RangeBloomFilterNumHash is the number of hash functions of rangeBloomfilter
RangeBloomFilterNumHash uint64 `yaml:"rangeBloomFilterNumHash"`
}
// Config is the root config struct, each package's config should be put as its sub struct
Config struct {
Plugins map[int]interface{} `ymal:"plugins"`
Network Network `yaml:"network"`
Chain Chain `yaml:"chain"`
ActPool ActPool `yaml:"actPool"`
Consensus Consensus `yaml:"consensus"`
DardanellesUpgrade DardanellesUpgrade `yaml:"dardanellesUpgrade"`
BlockSync BlockSync `yaml:"blockSync"`
Dispatcher Dispatcher `yaml:"dispatcher"`
API API `yaml:"api"`
System System `yaml:"system"`
DB db.Config `yaml:"db"`
Indexer Indexer `yaml:"indexer"`
Log log.GlobalConfig `yaml:"log"`
SubLogs map[string]log.GlobalConfig `yaml:"subLogs"`
Genesis genesis.Genesis `yaml:"genesis"`
}
// Validate is the interface of validating the config
Validate func(Config) error
)
// New creates a config instance. It first loads the default configs. If the config path is not empty, it will read from
// the file and override the default configs. By default, it will apply all validation functions. To bypass validation,
// use DoNotValidate instead.
func New(validates ...Validate) (Config, error) {
opts := make([]uconfig.YAMLOption, 0)
opts = append(opts, uconfig.Static(Default))
opts = append(opts, uconfig.Expand(os.LookupEnv))
if _overwritePath != "" {
opts = append(opts, uconfig.File(_overwritePath))
}
if _secretPath != "" {
opts = append(opts, uconfig.File(_secretPath))
}
yaml, err := uconfig.NewYAML(opts...)
if err != nil {
return Config{}, errors.Wrap(err, "failed to init config")
}
var cfg Config
if err := yaml.Get(uconfig.Root).Populate(&cfg); err != nil {
return Config{}, errors.Wrap(err, "failed to unmarshal YAML config to struct")
}
// set network master key to private key
if cfg.Network.MasterKey == "" {
cfg.Network.MasterKey = cfg.Chain.ProducerPrivKey
}
// set plugins
for _, plugin := range _plugins {
switch strings.ToLower(plugin) {
case "gateway":
cfg.Plugins[GatewayPlugin] = nil
default:
return Config{}, errors.Errorf("Plugin %s is not supported", plugin)
}
}
// By default, the config needs to pass all the validation
if len(validates) == 0 {
validates = Validates
}
for _, validate := range validates {
if err := validate(cfg); err != nil {
return Config{}, errors.Wrap(err, "failed to validate config")
}
}
return cfg, nil
}
// NewSub create config for sub chain.
func NewSub(validates ...Validate) (Config, error) {
if _subChainPath == "" {
return Config{}, nil
}
opts := make([]uconfig.YAMLOption, 0)
opts = append(opts, uconfig.Static(Default))
opts = append(opts, uconfig.Expand(os.LookupEnv))
opts = append(opts, uconfig.File(_subChainPath))
if _secretPath != "" {
opts = append(opts, uconfig.File(_secretPath))
}
yaml, err := uconfig.NewYAML(opts...)
if err != nil {
return Config{}, errors.Wrap(err, "failed to init config")
}
var cfg Config
if err := yaml.Get(uconfig.Root).Populate(&cfg); err != nil {
return Config{}, errors.Wrap(err, "failed to unmarshal YAML config to struct")
}
// By default, the config needs to pass all the validation
if len(validates) == 0 {
validates = Validates
}
for _, validate := range validates {
if err := validate(cfg); err != nil {
return Config{}, errors.Wrap(err, "failed to validate config")
}
}
return cfg, nil
}
// SetEVMNetworkID sets the extern chain ID
func SetEVMNetworkID(id uint32) {
loadChainID.Do(func() {
_evmNetworkID = id
})
}
// EVMNetworkID returns the extern chain ID
func EVMNetworkID() uint32 {
return atomic.LoadUint32(&_evmNetworkID)
}
// ProducerAddress returns the configured producer address derived from key
func (cfg Config) ProducerAddress() address.Address {
sk := cfg.ProducerPrivateKey()
addr, err := address.FromBytes(sk.PublicKey().Hash())
if err != nil {
log.L().Panic(
"Error when constructing producer address",
zap.Error(err),
)
}
return addr
}
// ProducerPrivateKey returns the configured private key
func (cfg Config) ProducerPrivateKey() crypto.PrivateKey {
sk, err := crypto.HexStringToPrivateKey(cfg.Chain.ProducerPrivKey)
if err != nil {
log.L().Panic(
"Error when decoding private key",
zap.Error(err),
)
}
if !cfg.whitelistSignatureScheme(sk) {
log.L().Panic("The private key's signature scheme is not whitelisted")
}
return sk
}
func (cfg Config) whitelistSignatureScheme(sk crypto.PrivateKey) bool {
var sigScheme string
switch sk.EcdsaPrivateKey().(type) {
case *ecdsa.PrivateKey:
sigScheme = SigP256k1
case *crypto.P256sm2PrvKey:
sigScheme = SigP256sm2
}
if sigScheme == "" {
return false
}
for _, e := range cfg.Chain.SignatureScheme {
if sigScheme == e {
// signature scheme is whitelisted
return true
}
}
return false
}
func generateRandomKey(scheme string) string {
// generate a random key
switch scheme {
case SigP256k1:
sk, _ := crypto.GenerateKey()
return sk.HexString()
case SigP256sm2:
sk, _ := crypto.GenerateKeySm2()
return sk.HexString()
}
return ""
}
// MinGasPrice returns the minimal gas price threshold
func (ap ActPool) MinGasPrice() *big.Int {
mgp, ok := big.NewInt(0).SetString(ap.MinGasPriceStr, 10)
if !ok {
log.S().Panicf("Error when parsing minimal gas price string: %s", ap.MinGasPriceStr)
}
return mgp
}
// ValidateDispatcher validates the dispatcher configs
func ValidateDispatcher(cfg Config) error {
if cfg.Dispatcher.ActionChanSize <= 0 || cfg.Dispatcher.BlockChanSize <= 0 || cfg.Dispatcher.BlockSyncChanSize <= 0 {
return errors.Wrap(ErrInvalidCfg, "dispatcher chan size should be greater than 0")
}
return nil
}
// ValidateRollDPoS validates the roll-DPoS configs
func ValidateRollDPoS(cfg Config) error {
if cfg.Consensus.Scheme != RollDPoSScheme {
return nil
}
rollDPoS := cfg.Consensus.RollDPoS
fsm := rollDPoS.FSM
if fsm.EventChanSize <= 0 {
return errors.Wrap(ErrInvalidCfg, "roll-DPoS event chan size should be greater than 0")
}
return nil
}
// ValidateArchiveMode validates the state factory setting
func ValidateArchiveMode(cfg Config) error {
if !cfg.Chain.EnableArchiveMode || !cfg.Chain.EnableTrielessStateDB {
return nil
}
return errors.Wrap(ErrInvalidCfg, "Archive mode is incompatible with trieless state DB")
}
// ValidateAPI validates the api configs
func ValidateAPI(cfg Config) error {
if cfg.API.TpsWindow <= 0 {
return errors.Wrap(ErrInvalidCfg, "tps window is not a positive integer when the api is enabled")
}
return nil
}
// ValidateActPool validates the given config
func ValidateActPool(cfg Config) error {
maxNumActPerPool := cfg.ActPool.MaxNumActsPerPool
maxNumActPerAcct := cfg.ActPool.MaxNumActsPerAcct
if maxNumActPerPool <= 0 || maxNumActPerAcct <= 0 {
return errors.Wrap(
ErrInvalidCfg,
"maximum number of actions per pool or per account cannot be zero or negative",
)
}
if maxNumActPerPool < maxNumActPerAcct {
return errors.Wrap(
ErrInvalidCfg,
"maximum number of actions per pool cannot be less than maximum number of actions per account",
)
}
return nil
}
// ValidateForkHeights validates the forked heights
func ValidateForkHeights(cfg Config) error {
hu := NewHeightUpgrade(&cfg.Genesis)
switch {
case hu.PacificBlockHeight() > hu.AleutianBlockHeight():
return errors.Wrap(ErrInvalidCfg, "Pacific is heigher than Aleutian")
case hu.AleutianBlockHeight() > hu.BeringBlockHeight():
return errors.Wrap(ErrInvalidCfg, "Aleutian is heigher than Bering")
case hu.BeringBlockHeight() > hu.CookBlockHeight():
return errors.Wrap(ErrInvalidCfg, "Bering is heigher than Cook")
case hu.CookBlockHeight() > hu.DardanellesBlockHeight():
return errors.Wrap(ErrInvalidCfg, "Cook is heigher than Dardanelles")
case hu.DardanellesBlockHeight() > hu.DaytonaBlockHeight():
return errors.Wrap(ErrInvalidCfg, "Dardanelles is heigher than Daytona")
case hu.DaytonaBlockHeight() > hu.EasterBlockHeight():
return errors.Wrap(ErrInvalidCfg, "Daytona is heigher than Easter")
case hu.EasterBlockHeight() > hu.FbkMigrationBlockHeight():
return errors.Wrap(ErrInvalidCfg, "Easter is heigher than FairbankMigration")
case hu.FbkMigrationBlockHeight() > hu.FairbankBlockHeight():
return errors.Wrap(ErrInvalidCfg, "FairbankMigration is heigher than Fairbank")
case hu.FairbankBlockHeight() > hu.GreenlandBlockHeight():
return errors.Wrap(ErrInvalidCfg, "Fairbank is heigher than Greenland")
}
return nil
}
// DoNotValidate validates the given config
func DoNotValidate(cfg Config) error { return nil }
| 1 | 23,286 | _plugins should be removed too | iotexproject-iotex-core | go |
@@ -0,0 +1,5 @@
+let nn = node.nodeName.toLowerCase();
+let validSetup =
+ node.hasAttribute('alt') && (nn === 'img' || nn === 'input' || nn === 'area');
+let validAttrValue = /^\s+$/.test(node.getAttribute('alt'));
+return validSetup && validAttrValue; | 1 | 1 | 13,664 | The rule selector will not include `<input>` and `<area>` elements. Why not make this a new, separate rule altogether? | dequelabs-axe-core | js |
|
@@ -1122,7 +1122,7 @@ class CommandDispatcher:
try:
userscripts.run_async(tab, cmd, *args, win_id=self._win_id,
env=env, verbose=verbose)
- except userscripts.UnsupportedError as e:
+ except (userscripts.UnsupportedError, userscripts.NotFoundError) as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window') | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Command dispatcher for TabbedBrowser."""
import os
import os.path
import shlex
import functools
from PyQt5.QtWidgets import QApplication, QTabBar
from PyQt5.QtCore import Qt, QUrl, QEvent, QUrlQuery
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtPrintSupport import QPrintDialog, QPrintPreviewDialog
try:
from PyQt5.QtWebKitWidgets import QWebPage
except ImportError:
QWebPage = None
try:
from PyQt5.QtWebEngineWidgets import QWebEnginePage
except ImportError:
QWebEnginePage = None
import pygments
import pygments.lexers
import pygments.formatters
from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners
from qutebrowser.config import config, configexc
from qutebrowser.browser import (urlmarks, browsertab, inspector, navigate,
webelem)
from qutebrowser.browser.webkit import downloads
try:
from qutebrowser.browser.webkit import mhtml
except ImportError:
# Failing imports on QtWebEngine, only used in QtWebKit commands.
# FIXME:qtwebengine don't import this anymore at all
pass
from qutebrowser.keyinput import modeman
from qutebrowser.utils import (message, usertypes, log, qtutils, urlutils,
objreg, utils, typing)
from qutebrowser.utils.usertypes import KeyMode
from qutebrowser.misc import editor, guiprocess
from qutebrowser.completion.models import instances, sortfilter
class CommandDispatcher:
"""Command dispatcher for TabbedBrowser.
Contains all commands which are related to the current tab.
We can't simply add these commands to BrowserTab directly and use
currentWidget() for TabbedBrowser.cmd because at the time
cmdutils.register() decorators are run, currentWidget() will return None.
Attributes:
_editor: The ExternalEditor object.
_win_id: The window ID the CommandDispatcher is associated with.
_tabbed_browser: The TabbedBrowser used.
"""
def __init__(self, win_id, tabbed_browser):
self._win_id = win_id
self._tabbed_browser = tabbed_browser
def __repr__(self):
return utils.get_repr(self)
def _new_tabbed_browser(self):
"""Get a tabbed-browser from a new window."""
from qutebrowser.mainwindow import mainwindow
new_window = mainwindow.MainWindow()
new_window.show()
return new_window.tabbed_browser
def _count(self):
"""Convenience method to get the widget count."""
return self._tabbed_browser.count()
def _set_current_index(self, idx):
"""Convenience method to set the current widget index."""
cmdutils.check_overflow(idx, 'int')
self._tabbed_browser.setCurrentIndex(idx)
def _current_index(self):
"""Convenience method to get the current widget index."""
return self._tabbed_browser.currentIndex()
def _current_url(self):
"""Convenience method to get the current url."""
try:
return self._tabbed_browser.current_url()
except qtutils.QtValueError as e:
msg = "Current URL is invalid"
if e.reason:
msg += " ({})".format(e.reason)
msg += "!"
raise cmdexc.CommandError(msg)
def _current_title(self):
"""Convenience method to get the current title."""
return self._current_widget().title()
def _current_widget(self):
"""Get the currently active widget from a command."""
widget = self._tabbed_browser.currentWidget()
if widget is None:
raise cmdexc.CommandError("No WebView available yet!")
return widget
def _open(self, url, tab=False, background=False, window=False,
explicit=True):
"""Helper function to open a page.
Args:
url: The URL to open as QUrl.
tab: Whether to open in a new tab.
background: Whether to open in the background.
window: Whether to open in a new window
"""
urlutils.raise_cmdexc_if_invalid(url)
tabbed_browser = self._tabbed_browser
cmdutils.check_exclusive((tab, background, window), 'tbw')
if window:
tabbed_browser = self._new_tabbed_browser()
tabbed_browser.tabopen(url)
elif tab:
tabbed_browser.tabopen(url, background=False, explicit=explicit)
elif background:
tabbed_browser.tabopen(url, background=True, explicit=explicit)
else:
widget = self._current_widget()
widget.openurl(url)
def _cntwidget(self, count=None):
"""Return a widget based on a count/idx.
Args:
count: The tab index, or None.
Return:
The current widget if count is None.
The widget with the given tab ID if count is given.
None if no widget was found.
"""
if count is None:
return self._tabbed_browser.currentWidget()
elif 1 <= count <= self._count():
cmdutils.check_overflow(count + 1, 'int')
return self._tabbed_browser.widget(count - 1)
else:
return None
def _tab_focus_last(self):
"""Select the tab which was last focused."""
try:
tab = objreg.get('last-focused-tab', scope='window',
window=self._win_id)
except KeyError:
raise cmdexc.CommandError("No last focused tab!")
idx = self._tabbed_browser.indexOf(tab)
if idx == -1:
raise cmdexc.CommandError("Last focused tab vanished!")
self._set_current_index(idx)
def _get_selection_override(self, left, right, opposite):
"""Helper function for tab_close to get the tab to select.
Args:
left: Force selecting the tab to the left of the current tab.
right: Force selecting the tab to the right of the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs->select-on-remove'.
Return:
QTabBar.SelectLeftTab, QTabBar.SelectRightTab, or None if no change
should be made.
"""
cmdutils.check_exclusive((left, right, opposite), 'lro')
if left:
return QTabBar.SelectLeftTab
elif right:
return QTabBar.SelectRightTab
elif opposite:
conf_selection = config.get('tabs', 'select-on-remove')
if conf_selection == QTabBar.SelectLeftTab:
return QTabBar.SelectRightTab
elif conf_selection == QTabBar.SelectRightTab:
return QTabBar.SelectLeftTab
elif conf_selection == QTabBar.SelectPreviousTab:
raise cmdexc.CommandError(
"-o is not supported with 'tabs->select-on-remove' set to "
"'previous'!")
else: # pragma: no cover
raise ValueError("Invalid select-on-remove value "
"{!r}!".format(conf_selection))
return None
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_close(self, left=False, right=False, opposite=False, count=None):
"""Close the current/[count]th tab.
Args:
left: Force selecting the tab to the left of the current tab.
right: Force selecting the tab to the right of the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs->select-on-remove'.
count: The tab index to close, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
tabbar = self._tabbed_browser.tabBar()
selection_override = self._get_selection_override(left, right,
opposite)
if selection_override is None:
self._tabbed_browser.close_tab(tab)
else:
old_selection_behavior = tabbar.selectionBehaviorOnRemove()
tabbar.setSelectionBehaviorOnRemove(selection_override)
self._tabbed_browser.close_tab(tab)
tabbar.setSelectionBehaviorOnRemove(old_selection_behavior)
@cmdutils.register(instance='command-dispatcher', name='open',
maxsplit=0, scope='window')
@cmdutils.argument('url', completion=usertypes.Completion.url)
@cmdutils.argument('count', count=True)
def openurl(self, url=None, implicit=False,
bg=False, tab=False, window=False, count=None):
"""Open a URL in the current/[count]th tab.
If the URL contains newlines, each line gets opened in its own tab.
Args:
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
implicit: If opening a new tab, treat the tab as implicit (like
clicking on a link).
count: The tab index to open the URL in, or None.
"""
if url is None:
if tab or bg or window:
urls = [config.get('general', 'default-page')]
else:
raise cmdexc.CommandError("No URL given, but -t/-b/-w is not "
"set!")
else:
urls = self._parse_url_input(url)
for i, cur_url in enumerate(urls):
if not window and i > 0:
tab = False
bg = True
if tab or bg or window:
self._open(cur_url, tab, bg, window, not implicit)
else:
curtab = self._cntwidget(count)
if curtab is None:
if count is None:
# We want to open a URL in the current tab, but none
# exists yet.
self._tabbed_browser.tabopen(cur_url)
else:
# Explicit count with a tab that doesn't exist.
return
else:
curtab.openurl(cur_url)
def _parse_url(self, url, *, force_search=False):
"""Parse a URL or quickmark or search query.
Args:
url: The URL to parse.
force_search: Whether to force a search even if the content can be
interpreted as a URL or a path.
Return:
A URL that can be opened.
"""
try:
return objreg.get('quickmark-manager').get(url)
except urlmarks.Error:
try:
return urlutils.fuzzy_url(url, force_search=force_search)
except urlutils.InvalidUrlError as e:
# We don't use cmdexc.CommandError here as this can be
# called async from edit_url
message.error(str(e))
return None
def _parse_url_input(self, url):
"""Parse a URL or newline-separated list of URLs.
Args:
url: The URL or list to parse.
Return:
A list of URLs that can be opened.
"""
force_search = False
urllist = [u for u in url.split('\n') if u.strip()]
if (len(urllist) > 1 and not urlutils.is_url(urllist[0]) and
urlutils.get_path_if_valid(urllist[0], check_exists=True)
is None):
urllist = [url]
force_search = True
for cur_url in urllist:
parsed = self._parse_url(cur_url, force_search=force_search)
if parsed is not None:
yield parsed
@cmdutils.register(instance='command-dispatcher', name='reload',
scope='window')
@cmdutils.argument('count', count=True)
def reloadpage(self, force=False, count=None):
"""Reload the current/[count]th tab.
Args:
count: The tab index to reload, or None.
force: Bypass the page cache.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.reload(force=force)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def stop(self, count=None):
"""Stop loading in the current/[count]th tab.
Args:
count: The tab index to stop, or None.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.stop()
@cmdutils.register(instance='command-dispatcher', name='print',
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('pdf', flag='f', metavar='file')
def printpage(self, preview=False, count=None, *, pdf=None):
"""Print the current/[count]th tab.
Args:
preview: Show preview instead of printing.
count: The tab index to print, or None.
pdf: The file path to write the PDF to.
"""
tab = self._cntwidget(count)
if tab is None:
return
try:
if pdf:
tab.printing.check_pdf_support()
else:
tab.printing.check_printer_support()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
if preview:
diag = QPrintPreviewDialog()
diag.setAttribute(Qt.WA_DeleteOnClose)
diag.setWindowFlags(diag.windowFlags() |
Qt.WindowMaximizeButtonHint |
Qt.WindowMinimizeButtonHint)
diag.paintRequested.connect(tab.printing.to_printer)
diag.exec_()
elif pdf:
pdf = os.path.expanduser(pdf)
directory = os.path.dirname(pdf)
if directory and not os.path.exists(directory):
os.mkdir(directory)
tab.printing.to_pdf(pdf)
log.misc.debug("Print to file: {}".format(pdf))
else:
diag = QPrintDialog()
diag.setAttribute(Qt.WA_DeleteOnClose)
diag.open(lambda: tab.printing.to_printer(diag.printer()))
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_clone(self, bg=False, window=False):
"""Duplicate the current tab.
Args:
bg: Open in a background tab.
window: Open in a new window.
Return:
The new QWebView.
"""
cmdutils.check_exclusive((bg, window), 'bw')
curtab = self._current_widget()
cur_title = self._tabbed_browser.page_title(self._current_index())
# The new tab could be in a new tabbed_browser (e.g. because of
# tabs-are-windows being set)
if window:
new_tabbed_browser = self._new_tabbed_browser()
else:
new_tabbed_browser = self._tabbed_browser
newtab = new_tabbed_browser.tabopen(background=bg, explicit=True)
new_tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=newtab.win_id)
idx = new_tabbed_browser.indexOf(newtab)
new_tabbed_browser.set_page_title(idx, cur_title)
if config.get('tabs', 'show-favicons'):
new_tabbed_browser.setTabIcon(idx, curtab.icon())
if config.get('tabs', 'tabs-are-windows'):
new_tabbed_browser.window().setWindowIcon(curtab.icon())
newtab.data.keep_icon = True
newtab.history.deserialize(curtab.history.serialize())
newtab.zoom.set_factor(curtab.zoom.factor())
return newtab
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_detach(self):
"""Detach the current tab to its own window."""
if self._count() < 2:
raise cmdexc.CommandError("Cannot detach one tab.")
url = self._current_url()
self._open(url, window=True)
cur_widget = self._current_widget()
self._tabbed_browser.close_tab(cur_widget, add_undo=False)
def _back_forward(self, tab, bg, window, count, forward):
"""Helper function for :back/:forward."""
history = self._current_widget().history
# Catch common cases before e.g. cloning tab
if not forward and not history.can_go_back():
raise cmdexc.CommandError("At beginning of history.")
elif forward and not history.can_go_forward():
raise cmdexc.CommandError("At end of history.")
if tab or bg or window:
widget = self.tab_clone(bg, window)
else:
widget = self._current_widget()
for _ in range(count):
if forward:
if not widget.history.can_go_forward():
raise cmdexc.CommandError("At end of history.")
widget.history.forward()
else:
if not widget.history.can_go_back():
raise cmdexc.CommandError("At beginning of history.")
widget.history.back()
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def back(self, tab=False, bg=False, window=False, count=1):
"""Go back in the history of the current tab.
Args:
tab: Go back in a new tab.
bg: Go back in a background tab.
window: Go back in a new window.
count: How many pages to go back.
"""
self._back_forward(tab, bg, window, count, forward=False)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def forward(self, tab=False, bg=False, window=False, count=1):
"""Go forward in the history of the current tab.
Args:
tab: Go forward in a new tab.
bg: Go forward in a background tab.
window: Go forward in a new window.
count: How many pages to go forward.
"""
self._back_forward(tab, bg, window, count, forward=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('where', choices=['prev', 'next', 'up', 'increment',
'decrement'])
@cmdutils.argument('count', count=True)
def navigate(self, where: str, tab=False, bg=False, window=False, count=1):
"""Open typical prev/next links or navigate using the URL path.
This tries to automatically click on typical _Previous Page_ or
_Next Page_ links using some heuristics.
Alternatively it can navigate by changing the current URL.
Args:
where: What to open.
- `prev`: Open a _previous_ link.
- `next`: Open a _next_ link.
- `up`: Go up a level in the current URL.
- `increment`: Increment the last number in the URL.
- `decrement`: Decrement the last number in the URL.
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
count: For `increment` and `decrement`, the number to change the
URL by. For `up`, the number of levels to go up in the URL.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
cmdutils.check_exclusive((tab, bg, window), 'tbw')
widget = self._current_widget()
url = self._current_url().adjusted(QUrl.RemoveFragment)
handlers = {
'prev': functools.partial(navigate.prevnext, prev=True),
'next': functools.partial(navigate.prevnext, prev=False),
'up': navigate.path_up,
'decrement': functools.partial(navigate.incdec,
inc_or_dec='decrement'),
'increment': functools.partial(navigate.incdec,
inc_or_dec='increment'),
}
try:
if where in ['prev', 'next']:
handler = handlers[where]
handler(browsertab=widget, win_id=self._win_id, baseurl=url,
tab=tab, background=bg, window=window)
elif where in ['up', 'increment', 'decrement']:
new_url = handlers[where](url, count)
self._open(new_url, tab, bg, window)
else: # pragma: no cover
raise ValueError("Got called with invalid value {} for "
"`where'.".format(where))
except navigate.Error as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
def scroll_px(self, dx: int, dy: int, count=1):
"""Scroll the current tab by 'count * dx/dy' pixels.
Args:
dx: How much to scroll in x-direction.
dy: How much to scroll in y-direction.
count: multiplier
"""
dx *= count
dy *= count
cmdutils.check_overflow(dx, 'int')
cmdutils.check_overflow(dy, 'int')
self._current_widget().scroller.delta(dx, dy)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
def scroll(self, direction: typing.Union[str, int], count=1):
"""Scroll the current tab in the given direction.
Args:
direction: In which direction to scroll
(up/down/left/right/top/bottom).
count: multiplier
"""
tab = self._current_widget()
funcs = {
'up': tab.scroller.up,
'down': tab.scroller.down,
'left': tab.scroller.left,
'right': tab.scroller.right,
'top': tab.scroller.top,
'bottom': tab.scroller.bottom,
'page-up': tab.scroller.page_up,
'page-down': tab.scroller.page_down,
}
try:
func = funcs[direction]
except KeyError:
expected_values = ', '.join(sorted(funcs))
raise cmdexc.CommandError("Invalid value {!r} for direction - "
"expected one of: {}".format(
direction, expected_values))
if direction in ['top', 'bottom']:
func()
else:
func(count=count)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('horizontal', flag='x')
def scroll_perc(self, perc: float=None, horizontal=False, count=None):
"""Scroll to a specific percentage of the page.
The percentage can be given either as argument or as count.
If no percentage is given, the page is scrolled to the end.
Args:
perc: Percentage to scroll.
horizontal: Scroll horizontally instead of vertically.
count: Percentage to scroll.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
if perc is None and count is None:
perc = 100
elif count is not None:
perc = count
if horizontal:
x = perc
y = None
else:
x = None
y = perc
self._current_widget().scroller.to_perc(x, y)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('top_navigate', metavar='ACTION',
choices=('prev', 'decrement'))
@cmdutils.argument('bottom_navigate', metavar='ACTION',
choices=('next', 'increment'))
def scroll_page(self, x: float, y: float, *,
top_navigate: str=None, bottom_navigate: str=None,
count=1):
"""Scroll the frame page-wise.
Args:
x: How many pages to scroll to the right.
y: How many pages to scroll down.
bottom_navigate: :navigate action (next, increment) to run when
scrolling down at the bottom of the page.
top_navigate: :navigate action (prev, decrement) to run when
scrolling up at the top of the page.
count: multiplier
"""
tab = self._current_widget()
if not tab.url().isValid():
# See https://github.com/The-Compiler/qutebrowser/issues/701
return
if bottom_navigate is not None and tab.scroller.at_bottom():
self.navigate(bottom_navigate)
return
elif top_navigate is not None and tab.scroller.at_top():
self.navigate(top_navigate)
return
try:
tab.scroller.delta_page(count * x, count * y)
except OverflowError:
raise cmdexc.CommandError(
"Numeric argument is too large for internal int "
"representation.")
def _yank_url(self, what):
"""Helper method for yank() to get the URL to copy."""
assert what in ['url', 'pretty-url'], what
flags = QUrl.RemovePassword
if what == 'pretty-url':
flags |= QUrl.DecodeReserved
else:
flags |= QUrl.FullyEncoded
url = QUrl(self._current_url())
url_query = QUrlQuery(url)
for key in dict(url_query.queryItems()):
if key in config.get('general', 'yank-ignored-url-parameters'):
url_query.removeQueryItem(key)
url.setQuery(url_query)
return url.toString(flags)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('what', choices=['selection', 'url', 'pretty-url',
'title', 'domain'])
def yank(self, what='url', sel=False, keep=False):
"""Yank something to the clipboard or primary selection.
Args:
what: What to yank.
- `url`: The current URL.
- `pretty-url`: The URL in pretty decoded form.
- `title`: The current page's title.
- `domain`: The current scheme, domain, and port number.
- `selection`: The selection under the cursor.
sel: Use the primary selection instead of the clipboard.
keep: Stay in visual mode after yanking the selection.
"""
if what == 'title':
s = self._tabbed_browser.page_title(self._current_index())
elif what == 'domain':
port = self._current_url().port()
s = '{}://{}{}'.format(self._current_url().scheme(),
self._current_url().host(),
':' + str(port) if port > -1 else '')
elif what in ['url', 'pretty-url']:
s = self._yank_url(what)
what = 'URL' # For printing
elif what == 'selection':
caret = self._current_widget().caret
s = caret.selection()
if not caret.has_selection() or not s:
message.info("Nothing to yank")
return
else: # pragma: no cover
raise ValueError("Invalid value {!r} for `what'.".format(what))
if sel and utils.supports_selection():
target = "primary selection"
else:
sel = False
target = "clipboard"
utils.set_clipboard(s, selection=sel)
if what != 'selection':
message.info("Yanked {} to {}: {}".format(what, target, s))
else:
message.info("{} {} yanked to {}".format(
len(s), "char" if len(s) == 1 else "chars", target))
if not keep:
modeman.maybe_leave(self._win_id, KeyMode.caret,
"yank selected")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_in(self, count=1):
"""Increase the zoom level for the current tab.
Args:
count: How many steps to zoom in.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(perc))
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_out(self, count=1):
"""Decrease the zoom level for the current tab.
Args:
count: How many steps to zoom out.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(-count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(perc))
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom(self, zoom: int=None, count=None):
"""Set the zoom level for the current tab.
The zoom can be given as argument or as [count]. If neither is
given, the zoom is set to the default zoom. If both are given,
use [count].
Args:
zoom: The zoom percentage to set.
count: The zoom percentage to set.
"""
level = count if count is not None else zoom
if level is None:
level = config.get('ui', 'default-zoom')
tab = self._current_widget()
try:
tab.zoom.set_factor(float(level) / 100)
except ValueError:
raise cmdexc.CommandError("Can't zoom {}%!".format(level))
message.info("Zoom level: {}%".format(level))
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_only(self, left=False, right=False):
"""Close all tabs except for the current one.
Args:
left: Keep tabs to the left of the current.
right: Keep tabs to the right of the current.
"""
cmdutils.check_exclusive((left, right), 'lr')
cur_idx = self._tabbed_browser.currentIndex()
assert cur_idx != -1
for i, tab in enumerate(self._tabbed_browser.widgets()):
if (i == cur_idx or (left and i < cur_idx) or
(right and i > cur_idx)):
continue
else:
self._tabbed_browser.close_tab(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
def undo(self):
"""Re-open a closed tab (optionally skipping [count] closed tabs)."""
try:
self._tabbed_browser.undo()
except IndexError:
raise cmdexc.CommandError("Nothing to undo!")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_prev(self, count=1):
"""Switch to the previous tab, or switch [count] tabs back.
Args:
count: How many tabs to switch back.
"""
if self._count() == 0:
# Running :tab-prev after last tab was closed
# See https://github.com/The-Compiler/qutebrowser/issues/1448
return
newidx = self._current_index() - count
if newidx >= 0:
self._set_current_index(newidx)
elif config.get('tabs', 'wrap'):
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("First tab")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_next(self, count=1):
"""Switch to the next tab, or switch [count] tabs forward.
Args:
count: How many tabs to switch forward.
"""
if self._count() == 0:
# Running :tab-next after last tab was closed
# See https://github.com/The-Compiler/qutebrowser/issues/1448
return
newidx = self._current_index() + count
if newidx < self._count():
self._set_current_index(newidx)
elif config.get('tabs', 'wrap'):
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("Last tab")
@cmdutils.register(instance='command-dispatcher', scope='window',
deprecated="Use :open {clipboard}")
def paste(self, sel=False, tab=False, bg=False, window=False):
"""Open a page from the clipboard.
If the pasted text contains newlines, each line gets opened in its own
tab.
Args:
sel: Use the primary selection instead of the clipboard.
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in new window.
"""
force_search = False
if not utils.supports_selection():
sel = False
try:
text = utils.get_clipboard(selection=sel)
except utils.ClipboardError as e:
raise cmdexc.CommandError(e)
text_urls = [u for u in text.split('\n') if u.strip()]
if (len(text_urls) > 1 and not urlutils.is_url(text_urls[0]) and
urlutils.get_path_if_valid(
text_urls[0], check_exists=True) is None):
force_search = True
text_urls = [text]
for i, text_url in enumerate(text_urls):
if not window and i > 0:
tab = False
bg = True
try:
url = urlutils.fuzzy_url(text_url, force_search=force_search)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', completion=usertypes.Completion.tab)
def buffer(self, index):
"""Select tab by index or url/title best match.
Focuses window if necessary.
Args:
index: The [win_id/]index of the tab to focus. Or a substring
in which case the closest match will be focused.
"""
index_parts = index.split('/', 1)
try:
for part in index_parts:
int(part)
except ValueError:
model = instances.get(usertypes.Completion.tab)
sf = sortfilter.CompletionFilterModel(source=model)
sf.set_pattern(index)
if sf.count() > 0:
index = sf.data(sf.first_item())
index_parts = index.split('/', 1)
else:
raise cmdexc.CommandError(
"No matching tab for: {}".format(index))
if len(index_parts) == 2:
win_id = int(index_parts[0])
idx = int(index_parts[1])
elif len(index_parts) == 1:
idx = int(index_parts[0])
active_win = objreg.get('app').activeWindow()
if active_win is None:
# Not sure how you enter a command without an active window...
raise cmdexc.CommandError(
"No window specified and couldn't find active window!")
win_id = active_win.win_id
if win_id not in objreg.window_registry:
raise cmdexc.CommandError(
"There's no window with id {}!".format(win_id))
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
if not 0 < idx <= tabbed_browser.count():
raise cmdexc.CommandError(
"There's no tab with index {}!".format(idx))
window = objreg.window_registry[win_id]
window.activateWindow()
window.raise_()
tabbed_browser.setCurrentIndex(idx-1)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['last'])
@cmdutils.argument('count', count=True)
def tab_focus(self, index: typing.Union[str, int]=None, count=None):
"""Select the tab given as argument/[count].
If neither count nor index are given, it behaves like tab-next.
If both are given, use count.
Args:
index: The tab index to focus, starting with 1. The special value
`last` focuses the last focused tab (regardless of count).
Negative indices count from the end, such that -1 is the
last tab.
count: The tab index to focus, starting with 1.
"""
if index == 'last':
self._tab_focus_last()
return
index = count if count is not None else index
if index is None:
self.tab_next()
return
if index < 0:
index = self._count() + index + 1
if 1 <= index <= self._count():
self._set_current_index(index - 1)
else:
raise cmdexc.CommandError("There's no tab with index {}!".format(
index))
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['+', '-'])
@cmdutils.argument('count', count=True)
def tab_move(self, index: typing.Union[str, int]=None, count=None):
"""Move the current tab according to the argument and [count].
If neither is given, move it to the first position.
Args:
index: `+` or `-` to move relative to the current tab by
count, or a default of 1 space.
A tab index to move to that index.
count: If moving relatively: Offset.
If moving absolutely: New position (default: 0). This
overrides the index argument, if given.
"""
if index in ['+', '-']:
# relative moving
new_idx = self._current_index()
delta = 1 if count is None else count
if index == '-':
new_idx -= delta
elif index == '+': # pragma: no branch
new_idx += delta
if config.get('tabs', 'wrap'):
new_idx %= self._count()
else:
# absolute moving
if count is not None:
new_idx = count - 1
elif index is not None:
new_idx = index - 1 if index >= 0 else index + self._count()
else:
new_idx = 0
if not 0 <= new_idx < self._count():
raise cmdexc.CommandError("Can't move tab to position {}!".format(
new_idx + 1))
tab = self._current_widget()
cur_idx = self._current_index()
icon = self._tabbed_browser.tabIcon(cur_idx)
label = self._tabbed_browser.page_title(cur_idx)
cmdutils.check_overflow(cur_idx, 'int')
cmdutils.check_overflow(new_idx, 'int')
self._tabbed_browser.setUpdatesEnabled(False)
try:
color = self._tabbed_browser.tabBar().tab_data(
cur_idx, 'indicator-color')
self._tabbed_browser.removeTab(cur_idx)
self._tabbed_browser.insertTab(new_idx, tab, icon, label)
self._set_current_index(new_idx)
self._tabbed_browser.set_tab_indicator_color(new_idx, color)
finally:
self._tabbed_browser.setUpdatesEnabled(True)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_replace_variables=True)
def spawn(self, cmdline, userscript=False, verbose=False, detach=False):
"""Spawn a command in a shell.
Args:
userscript: Run the command as a userscript. You can use an
absolute path, or store the userscript in one of those
locations:
- `~/.local/share/qutebrowser/userscripts`
(or `$XDG_DATA_DIR`)
- `/usr/share/qutebrowser/userscripts`
verbose: Show notifications when the command started/exited.
detach: Whether the command should be detached from qutebrowser.
cmdline: The commandline to execute.
"""
try:
cmd, *args = shlex.split(cmdline)
except ValueError as e:
raise cmdexc.CommandError("Error while splitting command: "
"{}".format(e))
args = runners.replace_variables(self._win_id, args)
log.procs.debug("Executing {} with args {}, userscript={}".format(
cmd, args, userscript))
if userscript:
# ~ expansion is handled by the userscript module.
self._run_userscript(cmd, *args, verbose=verbose)
else:
cmd = os.path.expanduser(cmd)
proc = guiprocess.GUIProcess(what='command', verbose=verbose,
parent=self._tabbed_browser)
if detach:
proc.start_detached(cmd, args)
else:
proc.start(cmd, args)
@cmdutils.register(instance='command-dispatcher', scope='window')
def home(self):
"""Open main startpage in current tab."""
self.openurl(config.get('general', 'startpage')[0])
def _run_userscript(self, cmd, *args, verbose=False):
"""Run a userscript given as argument.
Args:
cmd: The userscript to run.
args: Arguments to pass to the userscript.
verbose: Show notifications when the command started/exited.
"""
env = {
'QUTE_MODE': 'command',
}
idx = self._current_index()
if idx != -1:
env['QUTE_TITLE'] = self._tabbed_browser.page_title(idx)
tab = self._tabbed_browser.currentWidget()
if tab is not None and tab.caret.has_selection():
env['QUTE_SELECTED_TEXT'] = tab.caret.selection()
try:
env['QUTE_SELECTED_HTML'] = tab.caret.selection(html=True)
except browsertab.UnsupportedOperationError:
pass
# FIXME:qtwebengine: If tab is None, run_async will fail!
try:
url = self._tabbed_browser.current_url()
except qtutils.QtValueError:
pass
else:
env['QUTE_URL'] = url.toString(QUrl.FullyEncoded)
try:
userscripts.run_async(tab, cmd, *args, win_id=self._win_id,
env=env, verbose=verbose)
except userscripts.UnsupportedError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
def quickmark_save(self):
"""Save the current page as a quickmark."""
quickmark_manager = objreg.get('quickmark-manager')
quickmark_manager.prompt_save(self._win_id, self._current_url())
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name',
completion=usertypes.Completion.quickmark_by_name)
def quickmark_load(self, name, tab=False, bg=False, window=False):
"""Load a quickmark.
Args:
name: The name of the quickmark to load.
tab: Load the quickmark in a new tab.
bg: Load the quickmark in a new background tab.
window: Load the quickmark in a new window.
"""
try:
url = objreg.get('quickmark-manager').get(name)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name',
completion=usertypes.Completion.quickmark_by_name)
def quickmark_del(self, name=None):
"""Delete a quickmark.
Args:
name: The name of the quickmark to delete. If not given, delete the
quickmark for the current page (choosing one arbitrarily
if there are more than one).
"""
quickmark_manager = objreg.get('quickmark-manager')
if name is None:
url = self._current_url()
try:
name = quickmark_manager.get_by_qurl(url)
except urlmarks.DoesNotExistError as e:
raise cmdexc.CommandError(str(e))
try:
quickmark_manager.delete(name)
except KeyError:
raise cmdexc.CommandError("Quickmark '{}' not found!".format(name))
@cmdutils.register(instance='command-dispatcher', scope='window')
def bookmark_add(self, url=None, title=None, toggle=False):
"""Save the current page as a bookmark, or a specific url.
If no url and title are provided, then save the current page as a
bookmark.
If a url and title have been provided, then save the given url as
a bookmark with the provided title.
You can view all saved bookmarks on the
link:qute://bookmarks[bookmarks page].
Args:
url: url to save as a bookmark. If None, use url of current page.
title: title of the new bookmark.
toggle: remove the bookmark instead of raising an error if it
already exists.
"""
if url and not title:
raise cmdexc.CommandError('Title must be provided if url has '
'been provided')
bookmark_manager = objreg.get('bookmark-manager')
if url is None:
url = self._current_url()
else:
try:
url = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
if not title:
title = self._current_title()
try:
was_added = bookmark_manager.add(url, title, toggle=toggle)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
else:
msg = "Bookmarked {}!" if was_added else "Removed bookmark {}!"
message.info(msg.format(url.toDisplayString()))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=usertypes.Completion.bookmark_by_url)
def bookmark_load(self, url, tab=False, bg=False, window=False,
delete=False):
"""Load a bookmark.
Args:
url: The url of the bookmark to load.
tab: Load the bookmark in a new tab.
bg: Load the bookmark in a new background tab.
window: Load the bookmark in a new window.
delete: Whether to delete the bookmark afterwards.
"""
try:
qurl = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
self._open(qurl, tab, bg, window)
if delete:
self.bookmark_del(url)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=usertypes.Completion.bookmark_by_url)
def bookmark_del(self, url=None):
"""Delete a bookmark.
Args:
url: The url of the bookmark to delete. If not given, use the
current page's url.
"""
if url is None:
url = self._current_url().toString(QUrl.RemovePassword |
QUrl.FullyEncoded)
try:
objreg.get('bookmark-manager').delete(url)
except KeyError:
raise cmdexc.CommandError("Bookmark '{}' not found!".format(url))
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
def follow_selected(self, *, tab=False):
"""Follow the selected text.
Args:
tab: Load the selected link in a new tab.
"""
try:
self._current_widget().caret.follow_selected(tab=tab)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', name='inspector',
scope='window')
def toggle_inspector(self):
"""Toggle the web inspector.
Note: Due a bug in Qt, the inspector will show incorrect request
headers in the network tab.
"""
tab = self._current_widget()
# FIXME:qtwebengine have a proper API for this
page = tab._widget.page() # pylint: disable=protected-access
try:
if tab.data.inspector is None:
tab.data.inspector = inspector.create()
tab.data.inspector.inspect(page)
else:
tab.data.inspector.toggle(page)
except inspector.WebInspectorError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window',
backend=usertypes.Backend.QtWebKit)
@cmdutils.argument('dest_old', hide=True)
def download(self, url=None, dest_old=None, *, mhtml_=False, dest=None):
"""Download a given URL, or current page if no URL given.
The form `:download [url] [dest]` is deprecated, use `:download --dest
[dest] [url]` instead.
Args:
url: The URL to download. If not given, download the current page.
dest_old: (deprecated) Same as dest.
dest: The file path to write the download to, or None to ask.
mhtml_: Download the current page and all assets as mhtml file.
"""
if dest_old is not None:
message.warning(":download [url] [dest] is deprecated - use "
":download --dest [dest] [url]")
if dest is not None:
raise cmdexc.CommandError("Can't give two destinations for the"
" download.")
dest = dest_old
download_manager = objreg.get('download-manager', scope='window',
window=self._win_id)
if url:
if mhtml_:
raise cmdexc.CommandError("Can only download the current page"
" as mhtml.")
url = urlutils.qurl_from_user_input(url)
urlutils.raise_cmdexc_if_invalid(url)
if dest is None:
target = None
else:
target = usertypes.FileDownloadTarget(dest)
download_manager.get(url, target=target)
elif mhtml_:
self._download_mhtml(dest)
else:
tab = self._current_widget()
# FIXME:qtwebengine have a proper API for this
# pylint: disable=protected-access
qnam = tab._widget.page().networkAccessManager()
# pylint: enable=protected-access
if dest is None:
target = None
else:
target = usertypes.FileDownloadTarget(dest)
download_manager.get(self._current_url(), qnam=qnam, target=target)
def _download_mhtml(self, dest=None):
"""Download the current page as an MHTML file, including all assets.
Args:
dest: The file path to write the download to.
"""
tab = self._current_widget()
if dest is None:
suggested_fn = self._current_title() + ".mht"
suggested_fn = utils.sanitize_filename(suggested_fn)
filename, q = downloads.ask_for_filename(
suggested_fn, self._win_id, parent=tab,
)
if filename is not None:
mhtml.start_download_checked(filename, tab=tab)
else:
q.answered.connect(functools.partial(
mhtml.start_download_checked, tab=tab))
q.ask()
else:
mhtml.start_download_checked(dest, tab=tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
def view_source(self):
"""Show the source of the current page."""
# pylint: disable=no-member
# WORKAROUND for https://bitbucket.org/logilab/pylint/issue/491/
tab = self._current_widget()
if tab.data.viewing_source:
raise cmdexc.CommandError("Already viewing source!")
def show_source_cb(source):
"""Show source as soon as it's ready."""
lexer = pygments.lexers.HtmlLexer()
formatter = pygments.formatters.HtmlFormatter(full=True,
linenos='table')
highlighted = pygments.highlight(source, lexer, formatter)
try:
current_url = self._current_url()
except cmdexc.CommandError as e:
message.error(str(e))
return
new_tab = self._tabbed_browser.tabopen(explicit=True)
new_tab.set_html(highlighted, current_url)
new_tab.data.viewing_source = True
tab.dump_async(show_source_cb)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
def debug_dump_page(self, dest, plain=False):
"""Dump the current page's content to a file.
Args:
dest: Where to write the file to.
plain: Write plain text instead of HTML.
"""
tab = self._current_widget()
dest = os.path.expanduser(dest)
def callback(data):
try:
with open(dest, 'w', encoding='utf-8') as f:
f.write(data)
except OSError as e:
message.error('Could not write page: {}'.format(e))
else:
message.info("Dumped page to {}.".format(dest))
tab.dump_async(callback, plain=plain)
@cmdutils.register(instance='command-dispatcher', name='help',
scope='window')
@cmdutils.argument('topic', completion=usertypes.Completion.helptopic)
def show_help(self, tab=False, bg=False, window=False, topic=None):
r"""Show help about a command or setting.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
topic: The topic to show help for.
- :__command__ for commands.
- __section__\->__option__ for settings.
"""
if topic is None:
path = 'index.html'
elif topic.startswith(':'):
command = topic[1:]
if command not in cmdutils.cmd_dict:
raise cmdexc.CommandError("Invalid command {}!".format(
command))
path = 'commands.html#{}'.format(command)
elif '->' in topic:
parts = topic.split('->')
if len(parts) != 2:
raise cmdexc.CommandError("Invalid help topic {}!".format(
topic))
try:
config.get(*parts)
except configexc.NoSectionError:
raise cmdexc.CommandError("Invalid section {}!".format(
parts[0]))
except configexc.NoOptionError:
raise cmdexc.CommandError("Invalid option {}!".format(
parts[1]))
path = 'settings.html#{}'.format(topic.replace('->', '-'))
else:
raise cmdexc.CommandError("Invalid help topic {}!".format(topic))
url = QUrl('qute://help/{}'.format(path))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window')
def messages(self, level='error', plain=False, tab=False, bg=False,
window=False):
"""Show a log of past messages.
Args:
level: Include messages with `level` or higher severity.
Valid values: vdebug, debug, info, warning, error, critical.
plain: Whether to show plaintext (as opposed to html).
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
if level.upper() not in log.LOG_LEVELS:
raise cmdexc.CommandError("Invalid log level {}!".format(level))
if plain:
url = QUrl('qute://plainlog?level={}'.format(level))
else:
url = QUrl('qute://log?level={}'.format(level))
self._open(url, tab, bg, window)
def _open_editor_cb(self, elem):
"""Open editor after the focus elem was found in open_editor."""
if elem is None:
message.error("No element focused!")
return
if not elem.is_editable(strict=True):
message.error("Focused element is not editable!")
return
text = elem.text(use_js=True)
ed = editor.ExternalEditor(self._tabbed_browser)
ed.editing_finished.connect(functools.partial(
self.on_editing_finished, elem))
ed.edit(text)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
def open_editor(self):
"""Open an external editor with the currently selected form field.
The editor which should be launched can be configured via the
`general -> editor` config option.
"""
tab = self._current_widget()
tab.elements.find_focused(self._open_editor_cb)
def on_editing_finished(self, elem, text):
"""Write the editor text into the form field and clean up tempfile.
Callback for GUIProcess when the editor was closed.
Args:
elem: The WebElementWrapper which was modified.
text: The new text to insert.
"""
try:
elem.set_text(text, use_js=True)
except webelem.Error as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher',
deprecated="Use :insert-text {primary}",
modes=[KeyMode.insert], hide=True, scope='window',
backend=usertypes.Backend.QtWebKit)
def paste_primary(self):
"""Paste the primary selection at cursor position."""
try:
self.insert_text(utils.get_clipboard(selection=True))
except utils.SelectionUnsupportedError:
self.insert_text(utils.get_clipboard())
@cmdutils.register(instance='command-dispatcher', maxsplit=0,
scope='window')
def insert_text(self, text):
"""Insert text at cursor position.
Args:
text: The text to insert.
"""
tab = self._current_widget()
if not tab.has_js():
raise cmdexc.CommandError("This command needs javascript enabled.")
def _insert_text_cb(elem):
if elem is None:
message.error("No element focused!")
return
try:
elem.insert_text(text)
except webelem.Error as e:
message.error(str(e))
return
tab.elements.find_focused(_insert_text_cb)
@cmdutils.register(instance='command-dispatcher', scope='window',
hide=True)
@cmdutils.argument('filter_', choices=['id'])
def click_element(self, filter_: str, value, *,
target: usertypes.ClickTarget=
usertypes.ClickTarget.normal):
"""Click the element matching the given filter.
The given filter needs to result in exactly one element, otherwise, an
error is shown.
Args:
filter_: How to filter the elements.
id: Get an element based on its ID.
value: The value to filter for.
target: How to open the clicked element (normal/tab/tab-bg/window).
"""
tab = self._current_widget()
def single_cb(elem):
"""Click a single element."""
if elem is None:
message.error("No element found!")
return
try:
elem.click(target)
except webelem.Error as e:
message.error(str(e))
return
# def multiple_cb(elems):
# """Click multiple elements (with only one expected)."""
# if not elems:
# message.error("No element found!")
# return
# elif len(elems) != 1:
# message.error("{} elements found!".format(len(elems)))
# return
# elems[0].click(target)
handlers = {
'id': (tab.elements.find_id, single_cb),
}
handler, callback = handlers[filter_]
handler(value, callback)
def _search_cb(self, found, *, tab, old_scroll_pos, options, text, prev):
"""Callback called from search/search_next/search_prev.
Args:
found: Whether the text was found.
tab: The AbstractTab in which the search was made.
old_scroll_pos: The scroll position (QPoint) before the search.
options: The options (dict) the search was made with.
text: The text searched for.
prev: Whether we're searching backwards (i.e. :search-prev)
"""
# :search/:search-next without reverse -> down
# :search/:search-next with reverse -> up
# :search-prev without reverse -> up
# :search-prev with reverse -> down
going_up = options['reverse'] ^ prev
if found:
# Check if the scroll position got smaller and show info.
if not going_up and tab.scroller.pos_px().y() < old_scroll_pos.y():
message.info("Search hit BOTTOM, continuing at TOP")
elif going_up and tab.scroller.pos_px().y() > old_scroll_pos.y():
message.info("Search hit TOP, continuing at BOTTOM")
else:
message.warning("Text '{}' not found on page!".format(text))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
def search(self, text="", reverse=False):
"""Search for a text on the current page. With no text, clear results.
Args:
text: The text to search for.
reverse: Reverse search direction.
"""
self.set_mark("'")
tab = self._current_widget()
tab.search.clear()
options = {
'ignore_case': config.get('general', 'ignore-case'),
'reverse': reverse,
}
self._tabbed_browser.search_text = text
self._tabbed_browser.search_options = dict(options)
if text:
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=options, text=text, prev=False)
else:
cb = None
options['result_cb'] = cb
tab.search.search(text, **options)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
def search_next(self, count=1):
"""Continue the search to the ([count]th) next term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=False)
for _ in range(count - 1):
tab.search.next_result()
tab.search.next_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
@cmdutils.argument('count', count=True)
def search_prev(self, count=1):
"""Continue the search to the ([count]th) previous term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=True)
for _ in range(count - 1):
tab.search.prev_result()
tab.search.prev_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_line(self, count=1):
"""Move the cursor or selection to the next line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_line(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_line(self, count=1):
"""Move the cursor or selection to the prev line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_prev_line(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_char(self, count=1):
"""Move the cursor or selection to the next char.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_char(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_char(self, count=1):
"""Move the cursor or selection to the previous char.
Args:
count: How many chars to move.
"""
self._current_widget().caret.move_to_prev_char(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_word(self, count=1):
"""Move the cursor or selection to the end of the word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_end_of_word(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_word(self, count=1):
"""Move the cursor or selection to the next word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_next_word(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_word(self, count=1):
"""Move the cursor or selection to the previous word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_prev_word(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_start_of_line(self):
"""Move the cursor or selection to the start of the line."""
self._current_widget().caret.move_to_start_of_line()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_end_of_line(self):
"""Move the cursor or selection to the end of line."""
self._current_widget().caret.move_to_end_of_line()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_next_block(self, count=1):
"""Move the cursor or selection to the start of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_prev_block(self, count=1):
"""Move the cursor or selection to the start of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_next_block(self, count=1):
"""Move the cursor or selection to the end of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_prev_block(self, count=1):
"""Move the cursor or selection to the end of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_start_of_document(self):
"""Move the cursor or selection to the start of the document."""
self._current_widget().caret.move_to_start_of_document()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_end_of_document(self):
"""Move the cursor or selection to the end of the document."""
self._current_widget().caret.move_to_end_of_document()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def toggle_selection(self):
"""Toggle caret selection mode."""
self._current_widget().caret.toggle_selection()
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def drop_selection(self):
"""Drop selection and keep selection mode enabled."""
self._current_widget().caret.drop_selection()
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
@cmdutils.argument('count', count=True)
def debug_webaction(self, action, count=1):
"""Execute a webaction.
See http://doc.qt.io/qt-5/qwebpage.html#WebAction-enum for the
available actions.
Args:
action: The action to execute, e.g. MoveToNextChar.
count: How many times to repeat the action.
"""
tab = self._current_widget()
if tab.backend == usertypes.Backend.QtWebKit:
assert QWebPage is not None
member = getattr(QWebPage, action, None)
base = QWebPage.WebAction
elif tab.backend == usertypes.Backend.QtWebEngine:
assert QWebEnginePage is not None
member = getattr(QWebEnginePage, action, None)
base = QWebEnginePage.WebAction
if not isinstance(member, base):
raise cmdexc.CommandError("{} is not a valid web action!".format(
action))
for _ in range(count):
# This whole command is backend-specific anyways, so it makes no
# sense to introduce some API for this.
# pylint: disable=protected-access
tab._widget.triggerPageAction(member)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_cmd_split=True)
def jseval(self, js_code, quiet=False, *,
world: typing.Union[usertypes.JsWorld, int]=None):
"""Evaluate a JavaScript string.
Args:
js_code: The string to evaluate.
quiet: Don't show resulting JS object.
world: Ignored on QtWebKit. On QtWebEngine, a world ID or name to
run the snippet in.
"""
if world is None:
world = usertypes.JsWorld.jseval
if quiet:
jseval_cb = None
else:
def jseval_cb(out):
if out is None:
# Getting the actual error (if any) seems to be difficult.
# The error does end up in
# BrowserPage.javaScriptConsoleMessage(), but
# distinguishing between :jseval errors and errors from the
# webpage is not trivial...
message.info('No output or error')
else:
# The output can be a string, number, dict, array, etc. But
# *don't* output too much data, as this will make
# qutebrowser hang
out = str(out)
if len(out) > 5000:
out = out[:5000] + ' [...trimmed...]'
message.info(out)
widget = self._current_widget()
widget.run_js_async(js_code, callback=jseval_cb, world=world)
@cmdutils.register(instance='command-dispatcher', scope='window')
def fake_key(self, keystring, global_=False):
"""Send a fake keypress or key string to the website or qutebrowser.
:fake-key xy - sends the keychain 'xy'
:fake-key <Ctrl-x> - sends Ctrl-x
:fake-key <Escape> - sends the escape key
Args:
keystring: The keystring to send.
global_: If given, the keys are sent to the qutebrowser UI.
"""
try:
keyinfos = utils.parse_keystring(keystring)
except utils.KeyParseError as e:
raise cmdexc.CommandError(str(e))
for keyinfo in keyinfos:
press_event = QKeyEvent(QEvent.KeyPress, keyinfo.key,
keyinfo.modifiers, keyinfo.text)
release_event = QKeyEvent(QEvent.KeyRelease, keyinfo.key,
keyinfo.modifiers, keyinfo.text)
if global_:
window = QApplication.focusWindow()
if window is None:
raise cmdexc.CommandError("No focused window!")
QApplication.postEvent(window, press_event)
QApplication.postEvent(window, release_event)
else:
try:
tab = objreg.get('tab', scope='tab', tab='current')
except objreg.RegistryUnavailableError:
raise cmdexc.CommandError("No focused webview!")
tab = self._current_widget()
tab.send_event(press_event)
tab.send_event(release_event)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
def debug_clear_ssl_errors(self):
"""Clear remembered SSL error answers."""
self._current_widget().clear_ssl_errors()
@cmdutils.register(instance='command-dispatcher', scope='window')
def edit_url(self, url=None, bg=False, tab=False, window=False):
"""Navigate to a url formed in an external editor.
The editor which should be launched can be configured via the
`general -> editor` config option.
Args:
url: URL to edit; defaults to the current page url.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
"""
cmdutils.check_exclusive((tab, bg, window), 'tbw')
old_url = self._current_url().toString()
ed = editor.ExternalEditor(self._tabbed_browser)
# Passthrough for openurl args (e.g. -t, -b, -w)
ed.editing_finished.connect(functools.partial(
self._open_if_changed, old_url=old_url, bg=bg, tab=tab,
window=window))
ed.edit(url or old_url)
@cmdutils.register(instance='command-dispatcher', scope='window',
hide=True)
def set_mark(self, key):
"""Set a mark at the current scroll position in the current tab.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.set_mark(key)
@cmdutils.register(instance='command-dispatcher', scope='window',
hide=True)
def jump_mark(self, key):
"""Jump to the mark named by `key`.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.jump_mark(key)
def _open_if_changed(self, url=None, old_url=None, bg=False, tab=False,
window=False):
"""Open a URL unless it's already open in the tab.
Args:
old_url: The original URL to compare against.
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
"""
if bg or tab or window or url != old_url:
self.openurl(url=url, bg=bg, tab=tab, window=window)
| 1 | 16,644 | @The-Compiler Is this style okay or would you prefer a common userscript exception base? | qutebrowser-qutebrowser | py |
@@ -98,7 +98,8 @@ SchemaBuffer.prototype.cast = function (value, doc, init) {
return value;
} else if (value instanceof Binary) {
- return new MongooseBuffer(value.value(true), [this.path, doc]);
+ var sub_type = value.sub_type || 0x00;
+ return (new MongooseBuffer(value.value(true), [this.path, doc])).toObject(sub_type);
}
if (null === value) return value; | 1 | /*!
* Module dependencies.
*/
var SchemaType = require('../schematype')
, CastError = SchemaType.CastError
, MongooseBuffer = require('../types').Buffer
, Binary = MongooseBuffer.Binary
, Query = require('../query')
, utils = require('../utils')
, Document
/**
* Buffer SchemaType constructor
*
* @param {String} key
* @param {SchemaType} cast
* @inherits SchemaType
* @api private
*/
function SchemaBuffer (key, options) {
SchemaType.call(this, key, options, 'Buffer');
};
/*!
* Inherits from SchemaType.
*/
SchemaBuffer.prototype.__proto__ = SchemaType.prototype;
/**
* Check required
*
* @api private
*/
SchemaBuffer.prototype.checkRequired = function (value, doc) {
if (SchemaType._isRef(this, value, doc, true)) {
return null != value;
} else {
return !!(value && value.length);
}
};
/**
* Casts contents
*
* @param {Object} value
* @param {Document} doc document that triggers the casting
* @param {Boolean} init
* @api private
*/
SchemaBuffer.prototype.cast = function (value, doc, init) {
if (SchemaType._isRef(this, value, doc, init)) {
// wait! we may need to cast this to a document
if (null == value) {
return value;
}
// lazy load
Document || (Document = require('./../document'));
if (value instanceof Document) {
value.$__.wasPopulated = true;
return value;
}
// setting a populated path
if (Buffer.isBuffer(value)) {
return value;
} else if (!utils.isObject(value)) {
throw new CastError('buffer', value, this.path);
}
// Handle the case where user directly sets a populated
// path to a plain object; cast to the Model used in
// the population query.
var path = doc.$__fullPath(this.path);
var owner = doc.ownerDocument ? doc.ownerDocument() : doc;
var pop = owner.populated(path, true);
var ret = new pop.options.model(value);
ret.$__.wasPopulated = true;
return ret;
}
// documents
if (value && value._id) {
value = value._id;
}
if (Buffer.isBuffer(value)) {
if (!(value instanceof MongooseBuffer)) {
value = new MongooseBuffer(value, [this.path, doc]);
}
return value;
} else if (value instanceof Binary) {
return new MongooseBuffer(value.value(true), [this.path, doc]);
}
if (null === value) return value;
var type = typeof value;
if ('string' == type || 'number' == type || Array.isArray(value)) {
return new MongooseBuffer(value, [this.path, doc]);
}
throw new CastError('buffer', value, this.path);
};
/*!
* ignore
*/
function handleSingle (val) {
return this.castForQuery(val);
}
function handleArray (val) {
var self = this;
return val.map( function (m) {
return self.castForQuery(m);
});
}
SchemaBuffer.prototype.$conditionalHandlers = {
'$ne' : handleSingle
, '$in' : handleArray
, '$nin': handleArray
, '$gt' : handleSingle
, '$lt' : handleSingle
, '$gte': handleSingle
, '$lte': handleSingle
};
/**
* Casts contents for queries.
*
* @param {String} $conditional
* @param {any} [value]
* @api private
*/
SchemaBuffer.prototype.castForQuery = function ($conditional, val) {
var handler;
if (arguments.length === 2) {
handler = this.$conditionalHandlers[$conditional];
if (!handler)
throw new Error("Can't use " + $conditional + " with Buffer.");
return handler.call(this, val);
} else {
val = $conditional;
return this.cast(val).toObject();
}
};
/*!
* Module exports.
*/
module.exports = SchemaBuffer;
| 1 | 11,906 | we'd need to return a MongooseBuffer here instead of the Binary. lets add the subtype option to the buffer schema type as referenced in #1000 instead. | Automattic-mongoose | js |
@@ -1158,7 +1158,6 @@ function updateDocuments(coll, selector, document, options, callback) {
if ('function' === typeof options) (callback = options), (options = null);
if (options == null) options = {};
if (!('function' === typeof callback)) callback = null;
-
// If we are not providing a selector or document throw
if (selector == null || typeof selector !== 'object')
return callback(toError('selector must be a valid JavaScript object')); | 1 | 'use strict';
const applyWriteConcern = require('../utils').applyWriteConcern;
const applyRetryableWrites = require('../utils').applyRetryableWrites;
const checkCollectionName = require('../utils').checkCollectionName;
const Code = require('../core').BSON.Code;
const createIndexDb = require('./db_ops').createIndex;
const decorateWithCollation = require('../utils').decorateWithCollation;
const decorateWithReadConcern = require('../utils').decorateWithReadConcern;
const ensureIndexDb = require('./db_ops').ensureIndex;
const evaluate = require('./db_ops').evaluate;
const executeCommand = require('./db_ops').executeCommand;
const executeDbAdminCommand = require('./db_ops').executeDbAdminCommand;
const formattedOrderClause = require('../utils').formattedOrderClause;
const resolveReadPreference = require('../utils').resolveReadPreference;
const handleCallback = require('../utils').handleCallback;
const indexInformationDb = require('./db_ops').indexInformation;
const Long = require('../core').BSON.Long;
const MongoError = require('../core').MongoError;
const ReadPreference = require('../core').ReadPreference;
const toError = require('../utils').toError;
let collection;
function loadCollection() {
if (!collection) {
collection = require('../collection');
}
return collection;
}
/**
* Group function helper
* @ignore
*/
// var groupFunction = function () {
// var c = db[ns].find(condition);
// var map = new Map();
// var reduce_function = reduce;
//
// while (c.hasNext()) {
// var obj = c.next();
// var key = {};
//
// for (var i = 0, len = keys.length; i < len; ++i) {
// var k = keys[i];
// key[k] = obj[k];
// }
//
// var aggObj = map.get(key);
//
// if (aggObj == null) {
// var newObj = Object.extend({}, key);
// aggObj = Object.extend(newObj, initial);
// map.put(key, aggObj);
// }
//
// reduce_function(obj, aggObj);
// }
//
// return { "result": map.values() };
// }.toString();
const groupFunction =
'function () {\nvar c = db[ns].find(condition);\nvar map = new Map();\nvar reduce_function = reduce;\n\nwhile (c.hasNext()) {\nvar obj = c.next();\nvar key = {};\n\nfor (var i = 0, len = keys.length; i < len; ++i) {\nvar k = keys[i];\nkey[k] = obj[k];\n}\n\nvar aggObj = map.get(key);\n\nif (aggObj == null) {\nvar newObj = Object.extend({}, key);\naggObj = Object.extend(newObj, initial);\nmap.put(key, aggObj);\n}\n\nreduce_function(obj, aggObj);\n}\n\nreturn { "result": map.values() };\n}';
/**
* Perform a bulkWrite operation. See Collection.prototype.bulkWrite for more information.
*
* @method
* @param {Collection} a Collection instance.
* @param {object[]} operations Bulk operations to perform.
* @param {object} [options] Optional settings. See Collection.prototype.bulkWrite for a list of options.
* @param {Collection~bulkWriteOpCallback} [callback] The command result callback
*/
function bulkWrite(coll, operations, options, callback) {
// Add ignoreUndefined
if (coll.s.options.ignoreUndefined) {
options = Object.assign({}, options);
options.ignoreUndefined = coll.s.options.ignoreUndefined;
}
// Create the bulk operation
const bulk =
options.ordered === true || options.ordered == null
? coll.initializeOrderedBulkOp(options)
: coll.initializeUnorderedBulkOp(options);
// Do we have a collation
let collation = false;
// for each op go through and add to the bulk
try {
for (let i = 0; i < operations.length; i++) {
// Get the operation type
const key = Object.keys(operations[i])[0];
// Check if we have a collation
if (operations[i][key].collation) {
collation = true;
}
// Pass to the raw bulk
bulk.raw(operations[i]);
}
} catch (err) {
return callback(err, null);
}
// Final options for retryable writes and write concern
let finalOptions = Object.assign({}, options);
finalOptions = applyRetryableWrites(finalOptions, coll.s.db);
finalOptions = applyWriteConcern(finalOptions, { db: coll.s.db, collection: coll }, options);
const writeCon = finalOptions.writeConcern ? finalOptions.writeConcern : {};
const capabilities = coll.s.topology.capabilities();
// Did the user pass in a collation, check if our write server supports it
if (collation && capabilities && !capabilities.commandsTakeCollation) {
return callback(new MongoError('server/primary/mongos does not support collation'));
}
// Execute the bulk
bulk.execute(writeCon, finalOptions, (err, r) => {
// We have connection level error
if (!r && err) {
return callback(err, null);
}
r.insertedCount = r.nInserted;
r.matchedCount = r.nMatched;
r.modifiedCount = r.nModified || 0;
r.deletedCount = r.nRemoved;
r.upsertedCount = r.getUpsertedIds().length;
r.upsertedIds = {};
r.insertedIds = {};
// Update the n
r.n = r.insertedCount;
// Inserted documents
const inserted = r.getInsertedIds();
// Map inserted ids
for (let i = 0; i < inserted.length; i++) {
r.insertedIds[inserted[i].index] = inserted[i]._id;
}
// Upserted documents
const upserted = r.getUpsertedIds();
// Map upserted ids
for (let i = 0; i < upserted.length; i++) {
r.upsertedIds[upserted[i].index] = upserted[i]._id;
}
// Return the results
callback(null, r);
});
}
// Check the update operation to ensure it has atomic operators.
function checkForAtomicOperators(update) {
const keys = Object.keys(update);
// same errors as the server would give for update doc lacking atomic operators
if (keys.length === 0) {
return toError('The update operation document must contain at least one atomic operator.');
}
if (keys[0][0] !== '$') {
return toError('the update operation document must contain atomic operators.');
}
}
/**
* Count the number of documents in the collection that match the query.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} query The query for the count.
* @param {object} [options] Optional settings. See Collection.prototype.count for a list of options.
* @param {Collection~countCallback} [callback] The command result callback
*/
function count(coll, query, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = Object.assign({}, options);
options.collectionName = coll.collectionName;
options.readPreference = resolveReadPreference(coll, options);
let cmd;
try {
cmd = buildCountCommand(coll, query, options);
} catch (err) {
return callback(err);
}
executeCommand(coll.s.db, cmd, options, (err, result) => {
if (err) return handleCallback(callback, err);
handleCallback(callback, null, result.n);
});
}
function countDocuments(coll, query, options, callback) {
const skip = options.skip;
const limit = options.limit;
options = Object.assign({}, options);
const pipeline = [{ $match: query }];
// Add skip and limit if defined
if (typeof skip === 'number') {
pipeline.push({ $skip: skip });
}
if (typeof limit === 'number') {
pipeline.push({ $limit: limit });
}
pipeline.push({ $group: { _id: 1, n: { $sum: 1 } } });
delete options.limit;
delete options.skip;
coll.aggregate(pipeline, options).toArray((err, docs) => {
if (err) return handleCallback(callback, err);
handleCallback(callback, null, docs.length ? docs[0].n : 0);
});
}
/**
* Build the count command.
*
* @method
* @param {collectionOrCursor} an instance of a collection or cursor
* @param {object} query The query for the count.
* @param {object} [options] Optional settings. See Collection.prototype.count and Cursor.prototype.count for a list of options.
*/
function buildCountCommand(collectionOrCursor, query, options) {
const skip = options.skip;
const limit = options.limit;
let hint = options.hint;
const maxTimeMS = options.maxTimeMS;
query = query || {};
// Final query
const cmd = {
count: options.collectionName,
query: query
};
// check if collectionOrCursor is a cursor by using cursor.s.numberOfRetries
if (collectionOrCursor.s.numberOfRetries) {
if (collectionOrCursor.s.options.hint) {
hint = collectionOrCursor.s.options.hint;
} else if (collectionOrCursor.s.cmd.hint) {
hint = collectionOrCursor.s.cmd.hint;
}
decorateWithCollation(cmd, collectionOrCursor, collectionOrCursor.s.cmd);
} else {
decorateWithCollation(cmd, collectionOrCursor, options);
}
// Add limit, skip and maxTimeMS if defined
if (typeof skip === 'number') cmd.skip = skip;
if (typeof limit === 'number') cmd.limit = limit;
if (typeof maxTimeMS === 'number') cmd.maxTimeMS = maxTimeMS;
if (hint) cmd.hint = hint;
// Do we have a readConcern specified
decorateWithReadConcern(cmd, collectionOrCursor);
return cmd;
}
/**
* Create an index on the db and collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {(string|object)} fieldOrSpec Defines the index.
* @param {object} [options] Optional settings. See Collection.prototype.createIndex for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function createIndex(coll, fieldOrSpec, options, callback) {
createIndexDb(coll.s.db, coll.collectionName, fieldOrSpec, options, callback);
}
/**
* Create multiple indexes in the collection. This method is only supported for
* MongoDB 2.6 or higher. Earlier version of MongoDB will throw a command not supported
* error. Index specifications are defined at http://docs.mongodb.org/manual/reference/command/createIndexes/.
*
* @method
* @param {Collection} a Collection instance.
* @param {array} indexSpecs An array of index specifications to be created
* @param {Object} [options] Optional settings. See Collection.prototype.createIndexes for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function createIndexes(coll, indexSpecs, options, callback) {
const capabilities = coll.s.topology.capabilities();
// Ensure we generate the correct name if the parameter is not set
for (let i = 0; i < indexSpecs.length; i++) {
if (indexSpecs[i].name == null) {
const keys = [];
// Did the user pass in a collation, check if our write server supports it
if (indexSpecs[i].collation && capabilities && !capabilities.commandsTakeCollation) {
return callback(new MongoError('server/primary/mongos does not support collation'));
}
for (let name in indexSpecs[i].key) {
keys.push(`${name}_${indexSpecs[i].key[name]}`);
}
// Set the name
indexSpecs[i].name = keys.join('_');
}
}
options = Object.assign({}, options, { readPreference: ReadPreference.PRIMARY });
// Execute the index
executeCommand(
coll.s.db,
{
createIndexes: coll.collectionName,
indexes: indexSpecs
},
options,
callback
);
}
function deleteCallback(err, r, callback) {
if (callback == null) return;
if (err && callback) return callback(err);
if (r == null) return callback(null, { result: { ok: 1 } });
r.deletedCount = r.result.n;
if (callback) callback(null, r);
}
/**
* Delete multiple documents from the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} filter The Filter used to select the documents to remove
* @param {object} [options] Optional settings. See Collection.prototype.deleteMany for a list of options.
* @param {Collection~deleteWriteOpCallback} [callback] The command result callback
*/
function deleteMany(coll, filter, options, callback) {
options.single = false;
removeDocuments(coll, filter, options, (err, r) => deleteCallback(err, r, callback));
}
/**
* Delete a single document from the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} filter The Filter used to select the document to remove
* @param {object} [options] Optional settings. See Collection.prototype.deleteOne for a list of options.
* @param {Collection~deleteWriteOpCallback} [callback] The command result callback
*/
function deleteOne(coll, filter, options, callback) {
options.single = true;
removeDocuments(coll, filter, options, (err, r) => deleteCallback(err, r, callback));
}
/**
* Drop an index from this collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {string} indexName Name of the index to drop.
* @param {object} [options] Optional settings. See Collection.prototype.dropIndex for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function dropIndex(coll, indexName, options, callback) {
// Delete index command
const cmd = { dropIndexes: coll.collectionName, index: indexName };
// Decorate command with writeConcern if supported
applyWriteConcern(cmd, { db: coll.s.db, collection: coll }, options);
// Execute command
executeCommand(coll.s.db, cmd, options, (err, result) => {
if (typeof callback !== 'function') return;
if (err) return handleCallback(callback, err, null);
handleCallback(callback, null, result);
});
}
/**
* Drop all indexes from this collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {Object} [options] Optional settings. See Collection.prototype.dropIndexes for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function dropIndexes(coll, options, callback) {
dropIndex(coll, '*', options, err => {
if (err) return handleCallback(callback, err, false);
handleCallback(callback, null, true);
});
}
/**
* Ensure that an index exists. If the index does not exist, this function creates it.
*
* @method
* @param {Collection} a Collection instance.
* @param {(string|object)} fieldOrSpec Defines the index.
* @param {object} [options] Optional settings. See Collection.prototype.ensureIndex for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function ensureIndex(coll, fieldOrSpec, options, callback) {
ensureIndexDb(coll.s.db, coll.collectionName, fieldOrSpec, options, callback);
}
/**
* Find and update a document.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} query Query object to locate the object to modify.
* @param {array} sort If multiple docs match, choose the first one in the specified sort order as the object to manipulate.
* @param {object} doc The fields/vals to be updated.
* @param {object} [options] Optional settings. See Collection.prototype.findAndModify for a list of options.
* @param {Collection~findAndModifyCallback} [callback] The command result callback
* @deprecated use findOneAndUpdate, findOneAndReplace or findOneAndDelete instead
*/
function findAndModify(coll, query, sort, doc, options, callback) {
// Create findAndModify command object
const queryObject = {
findAndModify: coll.collectionName,
query: query
};
sort = formattedOrderClause(sort);
if (sort) {
queryObject.sort = sort;
}
queryObject.new = options.new ? true : false;
queryObject.remove = options.remove ? true : false;
queryObject.upsert = options.upsert ? true : false;
const projection = options.projection || options.fields;
if (projection) {
queryObject.fields = projection;
}
if (options.arrayFilters) {
queryObject.arrayFilters = options.arrayFilters;
delete options.arrayFilters;
}
if (doc && !options.remove) {
queryObject.update = doc;
}
if (options.maxTimeMS) queryObject.maxTimeMS = options.maxTimeMS;
// Either use override on the function, or go back to default on either the collection
// level or db
options.serializeFunctions = options.serializeFunctions || coll.s.serializeFunctions;
// No check on the documents
options.checkKeys = false;
// Final options for retryable writes and write concern
let finalOptions = Object.assign({}, options);
finalOptions = applyRetryableWrites(finalOptions, coll.s.db);
finalOptions = applyWriteConcern(finalOptions, { db: coll.s.db, collection: coll }, options);
// Decorate the findAndModify command with the write Concern
if (finalOptions.writeConcern) {
queryObject.writeConcern = finalOptions.writeConcern;
}
// Have we specified bypassDocumentValidation
if (finalOptions.bypassDocumentValidation === true) {
queryObject.bypassDocumentValidation = finalOptions.bypassDocumentValidation;
}
finalOptions.readPreference = ReadPreference.primary;
// Have we specified collation
try {
decorateWithCollation(queryObject, coll, finalOptions);
} catch (err) {
return callback(err, null);
}
// Execute the command
executeCommand(coll.s.db, queryObject, finalOptions, (err, result) => {
if (err) return handleCallback(callback, err, null);
return handleCallback(callback, null, result);
});
}
/**
* Find and remove a document.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} query Query object to locate the object to modify.
* @param {array} sort If multiple docs match, choose the first one in the specified sort order as the object to manipulate.
* @param {object} [options] Optional settings. See Collection.prototype.findAndRemove for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
* @deprecated use findOneAndDelete instead
*/
function findAndRemove(coll, query, sort, options, callback) {
// Add the remove option
options.remove = true;
// Execute the callback
findAndModify(coll, query, sort, null, options, callback);
}
/**
* Fetch the first document that matches the query.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} query Query for find Operation
* @param {object} [options] Optional settings. See Collection.prototype.findOne for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function findOne(coll, query, options, callback) {
const cursor = coll
.find(query, options)
.limit(-1)
.batchSize(1);
// Return the item
cursor.next((err, item) => {
if (err != null) return handleCallback(callback, toError(err), null);
handleCallback(callback, null, item);
});
}
/**
* Find a document and delete it in one atomic operation. This requires a write lock for the duration of the operation.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} filter Document selection filter.
* @param {object} [options] Optional settings. See Collection.prototype.findOneAndDelete for a list of options.
* @param {Collection~findAndModifyCallback} [callback] The collection result callback
*/
function findOneAndDelete(coll, filter, options, callback) {
// Final options
const finalOptions = Object.assign({}, options);
finalOptions.fields = options.projection;
finalOptions.remove = true;
// Execute find and Modify
findAndModify(coll, filter, options.sort, null, finalOptions, callback);
}
/**
* Find a document and replace it in one atomic operation. This requires a write lock for the duration of the operation.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} filter Document selection filter.
* @param {object} replacement Document replacing the matching document.
* @param {object} [options] Optional settings. See Collection.prototype.findOneAndReplace for a list of options.
* @param {Collection~findAndModifyCallback} [callback] The collection result callback
*/
function findOneAndReplace(coll, filter, replacement, options, callback) {
// Final options
const finalOptions = Object.assign({}, options);
finalOptions.fields = options.projection;
finalOptions.update = true;
finalOptions.new = options.returnOriginal !== void 0 ? !options.returnOriginal : false;
finalOptions.upsert = options.upsert !== void 0 ? !!options.upsert : false;
// Execute findAndModify
findAndModify(coll, filter, options.sort, replacement, finalOptions, callback);
}
/**
* Find a document and update it in one atomic operation. This requires a write lock for the duration of the operation.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} filter Document selection filter.
* @param {object} update Update operations to be performed on the document
* @param {object} [options] Optional settings. See Collection.prototype.findOneAndUpdate for a list of options.
* @param {Collection~findAndModifyCallback} [callback] The collection result callback
*/
function findOneAndUpdate(coll, filter, update, options, callback) {
// Final options
const finalOptions = Object.assign({}, options);
finalOptions.fields = options.projection;
finalOptions.update = true;
finalOptions.new = typeof options.returnOriginal === 'boolean' ? !options.returnOriginal : false;
finalOptions.upsert = typeof options.upsert === 'boolean' ? options.upsert : false;
// Execute findAndModify
findAndModify(coll, filter, options.sort, update, finalOptions, callback);
}
/**
* Run a group command across a collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {(object|array|function|code)} keys An object, array or function expressing the keys to group by.
* @param {object} condition An optional condition that must be true for a row to be considered.
* @param {object} initial Initial value of the aggregation counter object.
* @param {(function|Code)} reduce The reduce function aggregates (reduces) the objects iterated
* @param {(function|Code)} finalize An optional function to be run on each item in the result set just before the item is returned.
* @param {boolean} command Specify if you wish to run using the internal group command or using eval, default is true.
* @param {object} [options] Optional settings. See Collection.prototype.group for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
* @deprecated MongoDB 3.6 or higher will no longer support the group command. We recommend rewriting using the aggregation framework.
*/
function group(coll, keys, condition, initial, reduce, finalize, command, options, callback) {
// Execute using the command
if (command) {
const reduceFunction = reduce && reduce._bsontype === 'Code' ? reduce : new Code(reduce);
const selector = {
group: {
ns: coll.collectionName,
$reduce: reduceFunction,
cond: condition,
initial: initial,
out: 'inline'
}
};
// if finalize is defined
if (finalize != null) selector.group['finalize'] = finalize;
// Set up group selector
if ('function' === typeof keys || (keys && keys._bsontype === 'Code')) {
selector.group.$keyf = keys && keys._bsontype === 'Code' ? keys : new Code(keys);
} else {
const hash = {};
keys.forEach(key => {
hash[key] = 1;
});
selector.group.key = hash;
}
options = Object.assign({}, options);
// Ensure we have the right read preference inheritance
options.readPreference = resolveReadPreference(coll, options);
// Do we have a readConcern specified
decorateWithReadConcern(selector, coll, options);
// Have we specified collation
try {
decorateWithCollation(selector, coll, options);
} catch (err) {
return callback(err, null);
}
// Execute command
executeCommand(coll.s.db, selector, options, (err, result) => {
if (err) return handleCallback(callback, err, null);
handleCallback(callback, null, result.retval);
});
} else {
// Create execution scope
const scope = reduce != null && reduce._bsontype === 'Code' ? reduce.scope : {};
scope.ns = coll.collectionName;
scope.keys = keys;
scope.condition = condition;
scope.initial = initial;
// Pass in the function text to execute within mongodb.
const groupfn = groupFunction.replace(/ reduce;/, reduce.toString() + ';');
evaluate(coll.s.db, new Code(groupfn, scope), null, options, (err, results) => {
if (err) return handleCallback(callback, err, null);
handleCallback(callback, null, results.result || results);
});
}
}
/**
* Retrieve all the indexes on the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {Object} [options] Optional settings. See Collection.prototype.indexes for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function indexes(coll, options, callback) {
options = Object.assign({}, { full: true }, options);
indexInformationDb(coll.s.db, coll.collectionName, options, callback);
}
/**
* Check if one or more indexes exist on the collection. This fails on the first index that doesn't exist.
*
* @method
* @param {Collection} a Collection instance.
* @param {(string|array)} indexes One or more index names to check.
* @param {Object} [options] Optional settings. See Collection.prototype.indexExists for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function indexExists(coll, indexes, options, callback) {
indexInformation(coll, options, (err, indexInformation) => {
// If we have an error return
if (err != null) return handleCallback(callback, err, null);
// Let's check for the index names
if (!Array.isArray(indexes))
return handleCallback(callback, null, indexInformation[indexes] != null);
// Check in list of indexes
for (let i = 0; i < indexes.length; i++) {
if (indexInformation[indexes[i]] == null) {
return handleCallback(callback, null, false);
}
}
// All keys found return true
return handleCallback(callback, null, true);
});
}
/**
* Retrieve this collection's index info.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} [options] Optional settings. See Collection.prototype.indexInformation for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function indexInformation(coll, options, callback) {
indexInformationDb(coll.s.db, coll.collectionName, options, callback);
}
function insertDocuments(coll, docs, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Ensure we are operating on an array op docs
docs = Array.isArray(docs) ? docs : [docs];
// Final options for retryable writes and write concern
let finalOptions = Object.assign({}, options);
finalOptions = applyRetryableWrites(finalOptions, coll.s.db);
finalOptions = applyWriteConcern(finalOptions, { db: coll.s.db, collection: coll }, options);
// If keep going set unordered
if (finalOptions.keepGoing === true) finalOptions.ordered = false;
finalOptions.serializeFunctions = options.serializeFunctions || coll.s.serializeFunctions;
docs = prepareDocs(coll, docs, options);
// File inserts
coll.s.topology.insert(coll.s.namespace, docs, finalOptions, (err, result) => {
if (callback == null) return;
if (err) return handleCallback(callback, err);
if (result == null) return handleCallback(callback, null, null);
if (result.result.code) return handleCallback(callback, toError(result.result));
if (result.result.writeErrors)
return handleCallback(callback, toError(result.result.writeErrors[0]));
// Add docs to the list
result.ops = docs;
// Return the results
handleCallback(callback, null, result);
});
}
/**
* Insert a single document into the collection. See Collection.prototype.insertOne for more information.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} doc Document to insert.
* @param {object} [options] Optional settings. See Collection.prototype.insertOne for a list of options.
* @param {Collection~insertOneWriteOpCallback} [callback] The command result callback
*/
function insertOne(coll, doc, options, callback) {
if (Array.isArray(doc)) {
return callback(
MongoError.create({ message: 'doc parameter must be an object', driver: true })
);
}
insertDocuments(coll, [doc], options, (err, r) => {
if (callback == null) return;
if (err && callback) return callback(err);
// Workaround for pre 2.6 servers
if (r == null) return callback(null, { result: { ok: 1 } });
// Add values to top level to ensure crud spec compatibility
r.insertedCount = r.result.n;
r.insertedId = doc._id;
if (callback) callback(null, r);
});
}
/**
* Inserts an array of documents into MongoDB. If documents passed in do not contain the **_id** field,
* one will be added to each of the documents missing it by the driver, mutating the document. This behavior
* can be overridden by setting the **forceServerObjectId** flag.
*
* @method
* @param {Collection} a Collection instance.
* @param {object[]} docs Documents to insert.
* @param {number} [options] Optional settings. See Collection.prototype.insertMany for a list of options.
* @param {Collection~insertWriteOpCallback} [callback] The command result callback
*/
function insertMany(coll, docs, options, callback) {
if (!Array.isArray(docs)) {
return callback(
MongoError.create({ message: 'docs parameter must be an array of documents', driver: true })
);
}
// If keep going set unordered
options['serializeFunctions'] = options['serializeFunctions'] || coll.s.serializeFunctions;
docs = prepareDocs(coll, docs, options);
// Generate the bulk write operations
const operations = [
{
insertMany: docs
}
];
bulkWrite(coll, operations, options, (err, result) => {
if (err) return callback(err, null);
callback(null, mapInsertManyResults(docs, result));
});
}
function mapInsertManyResults(docs, r) {
const finalResult = {
result: { ok: 1, n: r.insertedCount },
ops: docs,
insertedCount: r.insertedCount,
insertedIds: r.insertedIds
};
if (r.getLastOp()) {
finalResult.result.opTime = r.getLastOp();
}
return finalResult;
}
/**
* Determine whether the collection is a capped collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {Object} [options] Optional settings. See Collection.prototype.isCapped for a list of options.
* @param {Collection~resultCallback} [callback] The results callback
*/
function isCapped(coll, options, callback) {
optionsOp(coll, options, (err, document) => {
if (err) return handleCallback(callback, err);
handleCallback(callback, null, !!(document && document.capped));
});
}
/**
* Return the options of the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {Object} [options] Optional settings. See Collection.prototype.options for a list of options.
* @param {Collection~resultCallback} [callback] The results callback
*/
function optionsOp(coll, opts, callback) {
coll.s.db.listCollections({ name: coll.collectionName }, opts).toArray((err, collections) => {
if (err) return handleCallback(callback, err);
if (collections.length === 0) {
return handleCallback(
callback,
MongoError.create({ message: `collection ${coll.namespace} not found`, driver: true })
);
}
handleCallback(callback, err, collections[0].options || null);
});
}
/**
* Return N parallel cursors for a collection to allow parallel reading of the entire collection. There are
* no ordering guarantees for returned results.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} [options] Optional settings. See Collection.prototype.parallelCollectionScan for a list of options.
* @param {Collection~parallelCollectionScanCallback} [callback] The command result callback
*/
function parallelCollectionScan(coll, options, callback) {
// Create command object
const commandObject = {
parallelCollectionScan: coll.collectionName,
numCursors: options.numCursors
};
// Do we have a readConcern specified
decorateWithReadConcern(commandObject, coll, options);
// Store the raw value
const raw = options.raw;
delete options['raw'];
// Execute the command
executeCommand(coll.s.db, commandObject, options, (err, result) => {
if (err) return handleCallback(callback, err, null);
if (result == null)
return handleCallback(
callback,
new Error('no result returned for parallelCollectionScan'),
null
);
options = Object.assign({ explicitlyIgnoreSession: true }, options);
const cursors = [];
// Add the raw back to the option
if (raw) options.raw = raw;
// Create command cursors for each item
for (let i = 0; i < result.cursors.length; i++) {
const rawId = result.cursors[i].cursor.id;
// Convert cursorId to Long if needed
const cursorId = typeof rawId === 'number' ? Long.fromNumber(rawId) : rawId;
// Add a command cursor
cursors.push(coll.s.topology.cursor(coll.namespace, cursorId, options));
}
handleCallback(callback, null, cursors);
});
}
// modifies documents before being inserted or updated
function prepareDocs(coll, docs, options) {
const forceServerObjectId =
typeof options.forceServerObjectId === 'boolean'
? options.forceServerObjectId
: coll.s.db.options.forceServerObjectId;
// no need to modify the docs if server sets the ObjectId
if (forceServerObjectId === true) {
return docs;
}
return docs.map(doc => {
if (forceServerObjectId !== true && doc._id == null) {
doc._id = coll.s.pkFactory.createPk();
}
return doc;
});
}
/**
* Reindex all indexes on the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {Object} [options] Optional settings. See Collection.prototype.reIndex for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function reIndex(coll, options, callback) {
// Reindex
const cmd = { reIndex: coll.collectionName };
// Execute the command
executeCommand(coll.s.db, cmd, options, (err, result) => {
if (callback == null) return;
if (err) return handleCallback(callback, err, null);
handleCallback(callback, null, result.ok ? true : false);
});
}
function removeDocuments(coll, selector, options, callback) {
if (typeof options === 'function') {
(callback = options), (options = {});
} else if (typeof selector === 'function') {
callback = selector;
options = {};
selector = {};
}
// Create an empty options object if the provided one is null
options = options || {};
// Final options for retryable writes and write concern
let finalOptions = Object.assign({}, options);
finalOptions = applyRetryableWrites(finalOptions, coll.s.db);
finalOptions = applyWriteConcern(finalOptions, { db: coll.s.db, collection: coll }, options);
// If selector is null set empty
if (selector == null) selector = {};
// Build the op
const op = { q: selector, limit: 0 };
if (options.single) {
op.limit = 1;
} else if (finalOptions.retryWrites) {
finalOptions.retryWrites = false;
}
// Have we specified collation
try {
decorateWithCollation(finalOptions, coll, options);
} catch (err) {
return callback(err, null);
}
// Execute the remove
coll.s.topology.remove(coll.s.namespace, [op], finalOptions, (err, result) => {
if (callback == null) return;
if (err) return handleCallback(callback, err, null);
if (result == null) return handleCallback(callback, null, null);
if (result.result.code) return handleCallback(callback, toError(result.result));
if (result.result.writeErrors)
return handleCallback(callback, toError(result.result.writeErrors[0]));
// Return the results
handleCallback(callback, null, result);
});
}
/**
* Rename the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {string} newName New name of of the collection.
* @param {object} [options] Optional settings. See Collection.prototype.rename for a list of options.
* @param {Collection~collectionResultCallback} [callback] The results callback
*/
function rename(coll, newName, options, callback) {
let Collection = loadCollection();
// Check the collection name
checkCollectionName(newName);
// Build the command
const renameCollection = coll.s.namespace.toString();
const toCollection = coll.s.namespace.withCollection(newName);
const dropTarget = typeof options.dropTarget === 'boolean' ? options.dropTarget : false;
const cmd = { renameCollection: renameCollection, to: toCollection, dropTarget: dropTarget };
// Decorate command with writeConcern if supported
applyWriteConcern(cmd, { db: coll.s.db, collection: coll }, options);
// Execute against admin
executeDbAdminCommand(coll.s.db.admin().s.db, cmd, options, (err, doc) => {
if (err) return handleCallback(callback, err, null);
// We have an error
if (doc.errmsg) return handleCallback(callback, toError(doc), null);
try {
return handleCallback(
callback,
null,
new Collection(
coll.s.db,
coll.s.topology,
coll.databaseName,
newName,
coll.s.pkFactory,
coll.s.options
)
);
} catch (err) {
return handleCallback(callback, toError(err), null);
}
});
}
/**
* Replace a document in the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} filter The Filter used to select the document to update
* @param {object} doc The Document that replaces the matching document
* @param {object} [options] Optional settings. See Collection.prototype.replaceOne for a list of options.
* @param {Collection~updateWriteOpCallback} [callback] The command result callback
*/
function replaceOne(coll, filter, doc, options, callback) {
// Set single document update
options.multi = false;
// Execute update
updateDocuments(coll, filter, doc, options, (err, r) => {
if (callback == null) return;
if (err && callback) return callback(err);
if (r == null) return callback(null, { result: { ok: 1 } });
r.modifiedCount = r.result.nModified != null ? r.result.nModified : r.result.n;
r.upsertedId =
Array.isArray(r.result.upserted) && r.result.upserted.length > 0
? r.result.upserted[0] // FIXME(major): should be `r.result.upserted[0]._id`
: null;
r.upsertedCount =
Array.isArray(r.result.upserted) && r.result.upserted.length ? r.result.upserted.length : 0;
r.matchedCount =
Array.isArray(r.result.upserted) && r.result.upserted.length > 0 ? 0 : r.result.n;
r.ops = [doc];
if (callback) callback(null, r);
});
}
/**
* Save a document.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} doc Document to save
* @param {object} [options] Optional settings. See Collection.prototype.save for a list of options.
* @param {Collection~writeOpCallback} [callback] The command result callback
* @deprecated use insertOne, insertMany, updateOne or updateMany
*/
function save(coll, doc, options, callback) {
// Get the write concern options
const finalOptions = applyWriteConcern(
Object.assign({}, options),
{ db: coll.s.db, collection: coll },
options
);
// Establish if we need to perform an insert or update
if (doc._id != null) {
finalOptions.upsert = true;
return updateDocuments(coll, { _id: doc._id }, doc, finalOptions, callback);
}
// Insert the document
insertDocuments(coll, [doc], finalOptions, (err, result) => {
if (callback == null) return;
if (doc == null) return handleCallback(callback, null, null);
if (err) return handleCallback(callback, err, null);
handleCallback(callback, null, result);
});
}
function updateCallback(err, r, callback) {
if (callback == null) return;
if (err) return callback(err);
if (r == null) return callback(null, { result: { ok: 1 } });
r.modifiedCount = r.result.nModified != null ? r.result.nModified : r.result.n;
r.upsertedId =
Array.isArray(r.result.upserted) && r.result.upserted.length > 0
? r.result.upserted[0] // FIXME(major): should be `r.result.upserted[0]._id`
: null;
r.upsertedCount =
Array.isArray(r.result.upserted) && r.result.upserted.length ? r.result.upserted.length : 0;
r.matchedCount =
Array.isArray(r.result.upserted) && r.result.upserted.length > 0 ? 0 : r.result.n;
callback(null, r);
}
function updateDocuments(coll, selector, document, options, callback) {
if ('function' === typeof options) (callback = options), (options = null);
if (options == null) options = {};
if (!('function' === typeof callback)) callback = null;
// If we are not providing a selector or document throw
if (selector == null || typeof selector !== 'object')
return callback(toError('selector must be a valid JavaScript object'));
if (document == null || typeof document !== 'object')
return callback(toError('document must be a valid JavaScript object'));
// Final options for retryable writes and write concern
let finalOptions = Object.assign({}, options);
finalOptions = applyRetryableWrites(finalOptions, coll.s.db);
finalOptions = applyWriteConcern(finalOptions, { db: coll.s.db, collection: coll }, options);
// Do we return the actual result document
// Either use override on the function, or go back to default on either the collection
// level or db
finalOptions.serializeFunctions = options.serializeFunctions || coll.s.serializeFunctions;
// Execute the operation
const op = { q: selector, u: document };
op.upsert = options.upsert !== void 0 ? !!options.upsert : false;
op.multi = options.multi !== void 0 ? !!options.multi : false;
if (finalOptions.arrayFilters) {
op.arrayFilters = finalOptions.arrayFilters;
delete finalOptions.arrayFilters;
}
if (finalOptions.retryWrites && op.multi) {
finalOptions.retryWrites = false;
}
// Have we specified collation
try {
decorateWithCollation(finalOptions, coll, options);
} catch (err) {
return callback(err, null);
}
// Update options
coll.s.topology.update(coll.s.namespace, [op], finalOptions, (err, result) => {
if (callback == null) return;
if (err) return handleCallback(callback, err, null);
if (result == null) return handleCallback(callback, null, null);
if (result.result.code) return handleCallback(callback, toError(result.result));
if (result.result.writeErrors)
return handleCallback(callback, toError(result.result.writeErrors[0]));
// Return the results
handleCallback(callback, null, result);
});
}
/**
* Update multiple documents in the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} filter The Filter used to select the documents to update
* @param {object} update The update operations to be applied to the document
* @param {object} [options] Optional settings. See Collection.prototype.updateMany for a list of options.
* @param {Collection~updateWriteOpCallback} [callback] The command result callback
*/
function updateMany(coll, filter, update, options, callback) {
// Set single document update
options.multi = true;
// Execute update
updateDocuments(coll, filter, update, options, (err, r) => updateCallback(err, r, callback));
}
/**
* Update a single document in the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} filter The Filter used to select the document to update
* @param {object} update The update operations to be applied to the document
* @param {object} [options] Optional settings. See Collection.prototype.updateOne for a list of options.
* @param {Collection~updateWriteOpCallback} [callback] The command result callback
*/
function updateOne(coll, filter, update, options, callback) {
// Set single document update
options.multi = false;
// Execute update
updateDocuments(coll, filter, update, options, (err, r) => updateCallback(err, r, callback));
}
module.exports = {
bulkWrite,
checkForAtomicOperators,
count,
countDocuments,
buildCountCommand,
createIndex,
createIndexes,
deleteMany,
deleteOne,
dropIndex,
dropIndexes,
ensureIndex,
findAndModify,
findAndRemove,
findOne,
findOneAndDelete,
findOneAndReplace,
findOneAndUpdate,
group,
indexes,
indexExists,
indexInformation,
insertMany,
insertOne,
isCapped,
optionsOp,
parallelCollectionScan,
prepareDocs,
reIndex,
removeDocuments,
rename,
replaceOne,
save,
updateDocuments,
updateMany,
updateOne
};
| 1 | 15,889 | Let's remove this change. | mongodb-node-mongodb-native | js |
@@ -727,7 +727,7 @@ def processNegativeStates(role, states, reason, negativeStates=None):
# but only if it is either focused or this is something other than a change event.
# The condition stops "not selected" from being spoken in some broken controls
# when the state change for the previous focus is issued before the focus change.
- if role in (ROLE_LISTITEM, ROLE_TREEVIEWITEM, ROLE_TABLEROW) and STATE_SELECTABLE in states and (reason != REASON_CHANGE or STATE_FOCUSED in states):
+ if role in (ROLE_LISTITEM, ROLE_TREEVIEWITEM, ROLE_TABLEROW,ROLE_TABLECELL,ROLE_TABLECOLUMNHEADER,ROLE_TABLEROWHEADER) and STATE_SELECTABLE in states and (reason != REASON_CHANGE or STATE_FOCUSED in states):
speakNegatives.add(STATE_SELECTED)
# Restrict "not checked" in a similar way to "not selected".
if (role in (ROLE_CHECKBOX, ROLE_RADIOBUTTON, ROLE_CHECKMENUITEM) or STATE_CHECKABLE in states) and (STATE_HALFCHECKED not in states) and (reason != REASON_CHANGE or STATE_FOCUSED in states): | 1 | #controlTypes.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2007-2016 NV Access Limited, Babbage B.V.
ROLE_UNKNOWN=0
ROLE_WINDOW=1
ROLE_TITLEBAR=2
ROLE_PANE=3
ROLE_DIALOG=4
ROLE_CHECKBOX=5
ROLE_RADIOBUTTON=6
ROLE_STATICTEXT=7
ROLE_EDITABLETEXT=8
ROLE_BUTTON=9
ROLE_MENUBAR=10
ROLE_MENUITEM=11
ROLE_POPUPMENU=12
ROLE_COMBOBOX=13
ROLE_LIST=14
ROLE_LISTITEM=15
ROLE_GRAPHIC=16
ROLE_HELPBALLOON=17
ROLE_TOOLTIP=18
ROLE_LINK=19
ROLE_TREEVIEW=20
ROLE_TREEVIEWITEM=21
ROLE_TAB=22
ROLE_TABCONTROL=23
ROLE_SLIDER=24
ROLE_PROGRESSBAR=25
ROLE_SCROLLBAR=26
ROLE_STATUSBAR=27
ROLE_TABLE=28
ROLE_TABLECELL=29
ROLE_TABLECOLUMN=30
ROLE_TABLEROW=31
ROLE_TABLECOLUMNHEADER=32
ROLE_TABLEROWHEADER=33
ROLE_FRAME=34
ROLE_TOOLBAR=35
ROLE_DROPDOWNBUTTON=36
ROLE_CLOCK=37
ROLE_SEPARATOR=38
ROLE_FORM=39
ROLE_HEADING=40
ROLE_HEADING1=41
ROLE_HEADING2=42
ROLE_HEADING3=43
ROLE_HEADING4=44
ROLE_HEADING5=45
ROLE_HEADING6=46
ROLE_PARAGRAPH=47
ROLE_BLOCKQUOTE=48
ROLE_TABLEHEADER=49
ROLE_TABLEBODY=50
ROLE_TABLEFOOTER=51
ROLE_DOCUMENT=52
ROLE_ANIMATION=53
ROLE_APPLICATION=54
ROLE_BOX=55
ROLE_GROUPING=56
ROLE_PROPERTYPAGE=57
ROLE_CANVAS=58
ROLE_CAPTION=59
ROLE_CHECKMENUITEM=60
ROLE_DATEEDITOR=61
ROLE_ICON=62
ROLE_DIRECTORYPANE=63
ROLE_EMBEDDEDOBJECT=64
ROLE_ENDNOTE=65
ROLE_FOOTER=66
ROLE_FOOTNOTE=67
ROLE_GLASSPANE=69
ROLE_HEADER=70
ROLE_IMAGEMAP=71
ROLE_INPUTWINDOW=72
ROLE_LABEL=73
ROLE_NOTE=74
ROLE_PAGE=75
ROLE_RADIOMENUITEM=76
ROLE_LAYEREDPANE=77
ROLE_REDUNDANTOBJECT=78
ROLE_ROOTPANE=79
ROLE_EDITBAR=80
ROLE_TERMINAL=82
ROLE_RICHEDIT=83
ROLE_RULER=84
ROLE_SCROLLPANE=85
ROLE_SECTION=86
ROLE_SHAPE=87
ROLE_SPLITPANE=88
ROLE_VIEWPORT=89
ROLE_TEAROFFMENU=90
ROLE_TEXTFRAME=91
ROLE_TOGGLEBUTTON=92
ROLE_BORDER=93
ROLE_CARET=94
ROLE_CHARACTER=95
ROLE_CHART=96
ROLE_CURSOR=97
ROLE_DIAGRAM=98
ROLE_DIAL=99
ROLE_DROPLIST=100
ROLE_SPLITBUTTON=101
ROLE_MENUBUTTON=102
ROLE_DROPDOWNBUTTONGRID=103
ROLE_MATH=104
ROLE_EQUATION=ROLE_MATH # Deprecated; for backwards compatibility.
ROLE_GRIP=105
ROLE_HOTKEYFIELD=106
ROLE_INDICATOR=107
ROLE_SPINBUTTON=108
ROLE_SOUND=109
ROLE_WHITESPACE=110
ROLE_TREEVIEWBUTTON=111
ROLE_IPADDRESS=112
ROLE_DESKTOPICON=113
ROLE_ALERT=114
ROLE_INTERNALFRAME=115
ROLE_DESKTOPPANE=116
ROLE_OPTIONPANE=117
ROLE_COLORCHOOSER=118
ROLE_FILECHOOSER=119
ROLE_FILLER=120
ROLE_MENU=121
ROLE_PANEL=122
ROLE_PASSWORDEDIT=123
ROLE_FONTCHOOSER=124
ROLE_LINE=125
ROLE_FONTNAME=126
ROLE_FONTSIZE=127
ROLE_BOLD=128
ROLE_ITALIC=129
ROLE_UNDERLINE=130
ROLE_FGCOLOR=131
ROLE_BGCOLOR=132
ROLE_SUPERSCRIPT=133
ROLE_SUBSCRIPT=134
ROLE_STYLE=135
ROLE_INDENT=136
ROLE_ALIGNMENT=137
ROLE_ALERT=138
ROLE_DATAGRID=139
ROLE_DATAITEM=140
ROLE_HEADERITEM=141
ROLE_THUMB=142
ROLE_CALENDAR=143
ROLE_VIDEO=144
ROLE_AUDIO=145
ROLE_CHARTELEMENT=146
ROLE_DELETED_CONTENT=147
ROLE_INSERTED_CONTENT=148
STATE_UNAVAILABLE=0X1
STATE_FOCUSED=0X2
STATE_SELECTED=0X4
STATE_BUSY=0X8
STATE_PRESSED=0X10
STATE_CHECKED=0X20
STATE_HALFCHECKED=0X40
STATE_READONLY=0X80
STATE_EXPANDED=0X100
STATE_COLLAPSED=0X200
STATE_INVISIBLE=0X400
STATE_VISITED=0X800
STATE_LINKED=0X1000
STATE_HASPOPUP=0X2000
STATE_PROTECTED=0X4000
STATE_REQUIRED=0X8000
STATE_DEFUNCT=0X10000
STATE_INVALID_ENTRY=0X20000
STATE_MODAL=0X40000
STATE_AUTOCOMPLETE=0x80000
STATE_MULTILINE=0X100000
STATE_ICONIFIED=0x200000
STATE_OFFSCREEN=0x400000
STATE_SELECTABLE=0x800000
STATE_FOCUSABLE=0x1000000
STATE_CLICKABLE=0x2000000
STATE_EDITABLE=0x4000000
STATE_CHECKABLE=0x8000000
STATE_DRAGGABLE=0x10000000
STATE_DRAGGING=0x20000000
STATE_DROPTARGET=0x40000000
STATE_SORTED=0x80000000
STATE_SORTED_ASCENDING=0x100000000
STATE_SORTED_DESCENDING=0x200000000
STATES_SORTED=frozenset([STATE_SORTED,STATE_SORTED_ASCENDING,STATE_SORTED_DESCENDING])
STATE_HASLONGDESC=0x400000000
STATE_PINNED=0x800000000
STATE_HASFORMULA=0x1000000000 #Mostly for spreadsheets
STATE_HASCOMMENT=0X2000000000
STATE_OBSCURED=0x4000000000
STATE_CROPPED=0x8000000000
STATE_OVERFLOWING=0x10000000000
STATE_UNLOCKED=0x20000000000
roleLabels={
# Translators: The word for an unknown control type.
ROLE_UNKNOWN:_("unknown"),
# Translators: The word for window of a program such as document window.
ROLE_WINDOW:_("window"),
# Translators: Used to identify title bar of a program.
ROLE_TITLEBAR:_("title bar"),
# Translators: The word used for pane such as desktop pane.
ROLE_PANE:_("pane"),
# Translators: The word used to denote a dialog box such as open dialog.
ROLE_DIALOG:_("dialog"),
# Translators: The text used to identify check boxes such as select check box.
ROLE_CHECKBOX:_("check box"),
# Translators: The text used to identify radio buttons such as yes or no radio button.
ROLE_RADIOBUTTON:_("radio button"),
# Translators: The word used to identify a static text such as dialog text.
ROLE_STATICTEXT:_("text"),
# Translators: The word used to identify edit fields such as subject edit field.
ROLE_EDITABLETEXT:_("edit"),
# Translators: The word used to identify a button such as OK button.
ROLE_BUTTON:_("button"),
# Translators: Text used to identify menu bar of a program.
ROLE_MENUBAR:_("menu bar"),
# Translators: Used to identify a menu item such as an item in file menu.
ROLE_MENUITEM:_("menu item"),
# Translators: The word used for menus such as edit menu.
ROLE_POPUPMENU:_("menu"),
# Translators: Used to identify combo boxes such as file type combo box.
ROLE_COMBOBOX:_("combo box"),
# Translators: The word used for lists such as folder list.
ROLE_LIST:_("list"),
# Translators: Used to identify a list item such as email list items.
ROLE_LISTITEM:_("list item"),
# Translators: The word used to identify graphics such as webpage graphics.
ROLE_GRAPHIC:_("graphic"),
# Translators: Used to identify help balloon (a circular window with helpful text such as notification text).
ROLE_HELPBALLOON:_("help balloon"),
# Translators: Used to identify a tooltip (a small window with additional text about selected item such as file information).
ROLE_TOOLTIP:_("tool tip"),
# Translators: Identifies a link in webpage documents.
ROLE_LINK:_("link"),
# Translators: Identifies a treeview (a tree-like structure such as treeviews for subfolders).
ROLE_TREEVIEW:_("tree view"),
# Translators: Identifies a tree view item.
ROLE_TREEVIEWITEM:_("tree view item"),
# Translators: The word presented for tabs in a tab enabled window.
ROLE_TAB: pgettext("controlType", "tab"),
# Translators: Identifies a tab control such as webpage tabs in web browsers.
ROLE_TABCONTROL:_("tab control"),
# Translators: Identifies a slider such as volume slider.
ROLE_SLIDER:_("slider"),
# Translators: Identifies a progress bar such as NvDA update progress.
ROLE_PROGRESSBAR:_("progress bar"),
# Translators: Identifies a scroll bar.
ROLE_SCROLLBAR:_("scroll bar"),
# Translators: Identifies a status bar (text at the bottom bar of the screen such as cursor position in a document).
ROLE_STATUSBAR:_("status bar"),
# Translators: Identifies a table such as ones used in various websites.
ROLE_TABLE:_("table"),
# Translators: Identifies a cell in a table.
ROLE_TABLECELL:_("cell"),
# Translators: Identifies a column (a group of vertical cells in a table).
ROLE_TABLECOLUMN:_("column"),
# Translators: Identifies a row (a group of horizontal cells in a table).
ROLE_TABLEROW:_("row"),
# Translators: Identifies a frame (a smaller window in a webpage or a document).
ROLE_FRAME:_("frame"),
# Translators: Identifies a tool bar.
ROLE_TOOLBAR:_("tool bar"),
# Translators: Identifies a column header in tables and spreadsheets.
ROLE_TABLECOLUMNHEADER:_("column header"),
# Translators: Identifies a row header in tables and spreadsheets.
ROLE_TABLEROWHEADER:_("row header"),
# Translators: Identifies a drop down button (a button that, when clicked, opens a menu of its own).
ROLE_DROPDOWNBUTTON:_("drop down button"),
# Translators: Identifies an element.
ROLE_CLOCK:_("clock"),
# Translators: Identifies a separator (a horizontal line drawn on the screen).
ROLE_SEPARATOR:_("separator"),
# Translators: Identifies a form (controls such as edit boxes, combo boxes and so on).
ROLE_FORM:_("form"),
# Translators: Identifies a heading (a bold text used for identifying a section).
ROLE_HEADING:_("heading"),
# Translators: Identifies a heading level.
ROLE_HEADING1:_("heading 1"),
# Translators: Identifies a heading level.
ROLE_HEADING2:_("heading 2"),
# Translators: Identifies a heading level.
ROLE_HEADING3:_("heading 3"),
# Translators: Identifies a heading level.
ROLE_HEADING4:_("heading 4"),
# Translators: Identifies a heading level.
ROLE_HEADING5:_("heading 5"),
# Translators: Identifies a heading level.
ROLE_HEADING6:_("heading 6"),
# Translators: Identifies a paragraph (a group of text surrounded by blank lines).
ROLE_PARAGRAPH:_("paragraph"),
# Translators: Presented for a section in a document which is a block quotation;
# i.e. a long quotation in a separate paragraph distinguished by indentation, etc.
# See http://en.wikipedia.org/wiki/Block_quotation
ROLE_BLOCKQUOTE:_("block quote"),
# Translators: Identifies a table header (a short text at the start of a table which describes what the table is about).
ROLE_TABLEHEADER:_("table header"),
# Translators: Identifies a table body (the main body of the table).
ROLE_TABLEBODY:_("table body"),
# Translators: Identifies a table footer (text placed at the end of the table).
ROLE_TABLEFOOTER:_("table footer"),
# Translators: Identifies a document (for example, a webpage document).
ROLE_DOCUMENT:_("document"),
# Translators: Identifies an animation in a document or a webpage.
ROLE_ANIMATION:_("animation"),
# Translators: Identifies an application in webpages.
ROLE_APPLICATION:_("application"),
# Translators: Identifies a box element.
ROLE_BOX:_("box"),
# Translators: Identifies a grouping (a number of related items grouped together, such as related options in dialogs).
ROLE_GROUPING:_("grouping"),
# Translators: Identifies a property page such as drive properties dialog.
ROLE_PROPERTYPAGE:_("property page"),
# Translators: Identifies a canvas element on webpages (a box with some background color with some text drawn on the box, like a canvas).
ROLE_CANVAS:_("canvas"),
# Translators: Identifies a caption (usually a short text identifying a picture or a graphic on websites).
ROLE_CAPTION:_("caption"),
# Translators: Identifies a check menu item (a menu item with a checkmark as part of the menu item's name).
ROLE_CHECKMENUITEM:_("check menu item"),
# Translators: Identifies a data edit field.
ROLE_DATEEDITOR:_("date edit"),
# Translators: Identifies an icon.
ROLE_ICON:_("icon"),
# Translators: Identifies a directory pane.
ROLE_DIRECTORYPANE:_("directory pane"),
# Translators: Identifies an embedded object such as flash content on webpages.
ROLE_EMBEDDEDOBJECT:_("embedded object"),
# Translators: Identifies an end note.
ROLE_ENDNOTE:_("end note"),
# Translators: Identifies a footer (usually text).
ROLE_FOOTER:_("footer"),
# Translators: Identifies a foot note (text at the end of a passage or used for anotations).
ROLE_FOOTNOTE:_("foot note"),
# Translators: Reported for an object which is a glass pane; i.e.
# a pane that is guaranteed to be on top of all panes beneath it.
ROLE_GLASSPANE:_("glass pane"),
# Translators: Identifies a header (usually text at top of documents or on tops of pages).
ROLE_HEADER:_("header"),
# Translators: Identifies an image map (a type of graphical link).
ROLE_IMAGEMAP:_("image map"),
# Translators: Identifies an input window.
ROLE_INPUTWINDOW:_("input window"),
# Translators: Identifies a label.
ROLE_LABEL:_("label"),
# Translators: Identifies a note field.
ROLE_NOTE:_("note"),
# Translators: Identifies a page.
ROLE_PAGE:_("page"),
# Translators: Identifies a radio menu item.
ROLE_RADIOMENUITEM:_("radio menu item"),
# Translators: Identifies a layered pane.
ROLE_LAYEREDPANE:_("layered pane"),
# Translators: Identifies a redundant object.
ROLE_REDUNDANTOBJECT:_("redundant object"),
# Translators: Identifies a root pane.
ROLE_ROOTPANE:_("root pane"),
# Translators: May be reported for an editable text object in a toolbar.
# This is deprecated and is not often (if ever) used.
ROLE_EDITBAR:_("edit bar"),
# Translators: Identifies a terminal window such as command prompt.
ROLE_TERMINAL:_("terminal"),
# Translators: Identifies a rich edit box (an edit box which allows entering formatting commands in addition to text; encountered on webpages and NvDA log viewer).
ROLE_RICHEDIT:_("rich edit"),
# Translators: Identifies a ruler object (commonly seen on some webpages and in some Office programs).
ROLE_RULER:_("ruler"),
# Translators: Identifies a scroll pane.
ROLE_SCROLLPANE:_("scroll pane"),
# Translators: Identifies a section of text.
ROLE_SECTION:_("section"),
# Translators: Identifies a shape.
ROLE_SHAPE:_("shape"),
# Translators: Identifies a split pane.
ROLE_SPLITPANE:_("split pane"),
# Translators: Reported for a view port; i.e. an object usually used in a scroll pane
# which represents the portion of the entire data that the user can see.
# As the user manipulates the scroll bars, the contents of the view port can change.
ROLE_VIEWPORT:_("view port"),
# Translators: Reported for an object that forms part of a menu system
# but which can be undocked from or torn off the menu system
# to exist as a separate window.
ROLE_TEAROFFMENU:_("tear off menu"),
# Translators: Identifies a text frame (a frame window which contains text).
ROLE_TEXTFRAME:_("text frame"),
# Translators: Identifies a toggle button (a button used to toggle something).
ROLE_TOGGLEBUTTON:_("toggle button"),
ROLE_BORDER:_("border"),
# Translators: Identifies a caret object.
ROLE_CARET:_("caret"),
# Translators: Identifies a character field (should not be confused with edit fields).
ROLE_CHARACTER:_("character"),
# Translators: Identifies a chart (commonly seen on some websites and in some Office documents).
ROLE_CHART:_("chart"),
# Translators: Identifies a cursor object.
ROLE_CURSOR:_("cursor"),
# Translators: Identifies a diagram (seen on some websites and on Office documents).
ROLE_DIAGRAM:_("diagram"),
# Translators: Identifies a dial object.
ROLE_DIAL:_("dial"),
# Translators: Identifies a drop list.
ROLE_DROPLIST:_("drop list"),
# Translators: Identifies a split button (a control which performs different actions when different parts are clicked).
ROLE_SPLITBUTTON:_("split button"),
# Translators: Identifies a menu button (a button which opens a menu of items).
ROLE_MENUBUTTON:_("menu button"),
# Translators: Reported for a button which expands a grid when it is pressed.
ROLE_DROPDOWNBUTTONGRID:_("drop down button grid"),
# Translators: Identifies mathematical content.
ROLE_MATH:_("math"),
# Translators: Identifies a grip control.
ROLE_GRIP:_("grip"),
# Translators: Identifies a hot key field (a field where one can enter a hot key for something, such as assigning shortcut for icons on the desktop).
ROLE_HOTKEYFIELD:_("hot key field"),
# Translators: Identifies an indicator control.
ROLE_INDICATOR:_("indicator"),
# Translators: Identifies a spin button (a button used to go through options in a spinning fashion).
ROLE_SPINBUTTON:_("spin button"),
# Translators: Identifies a sound clip on websites.
ROLE_SOUND:_("sound"),
# Translators: Identifies a whitespace.
ROLE_WHITESPACE:_("white space"),
# Translators: Identifies a tree view button.
ROLE_TREEVIEWBUTTON:_("tree view button"),
# Translators: Identifies an IP address (an IP address field element).
ROLE_IPADDRESS:_("IP address"),
# Translators: Identifies a desktop icon (the icons on the desktop such as computer and various shortcuts for programs).
ROLE_DESKTOPICON:_("desktop icon"),
# Translators: Identifies an alert message such as file download alert in Internet explorer 9 and above.
ROLE_ALERT:_("alert"),
# Translators: Identifies an internal frame. This is usually a frame on a web page; i.e. a web page embedded within a web page.
ROLE_INTERNALFRAME:_("frame"),
# Translators: Identifies desktop pane (the desktop window).
ROLE_DESKTOPPANE:_("desktop pane"),
# Translators: Identifies an option pane.
ROLE_OPTIONPANE:_("option pane"),
# Translators: Identifies a color chooser.
ROLE_COLORCHOOSER:_("color chooser"),
# Translators: Identifies a file chooser (to select a file or groups of files from a list).
ROLE_FILECHOOSER:_("file chooser"),
ROLE_FILLER:_("filler"),
# Translators: Identifies a menu such as file menu.
ROLE_MENU:_("menu"),
# Translators: Identifies a panel control for grouping related options.
ROLE_PANEL:_("panel"),
# Translators: Identifies a password field (a protected edit field for entering passwords such as when logging into web-based email sites).
ROLE_PASSWORDEDIT:_("password edit"),
# Translators: Identifies a font chooser.
ROLE_FONTCHOOSER:_("font chooser"),
ROLE_LINE:_("line"),
# Translators: Identifies a font name.
ROLE_FONTNAME:_("font name"),
# Translators: Identifies font size.
ROLE_FONTSIZE:_("font size"),
# Translators: Describes text formatting.
ROLE_BOLD:_("bold"),
# Translators: Describes text formatting.
ROLE_ITALIC:_("italic"),
# Translators: Describes text formatting.
ROLE_UNDERLINE:_("underline"),
# Translators: Describes text formatting.
ROLE_FGCOLOR:_("foreground color"),
# Translators: Describes text formatting.
ROLE_BGCOLOR:_("background color"),
# Translators: Describes text formatting.
ROLE_SUPERSCRIPT:_("superscript"),
# Translators: Describes text formatting.
ROLE_SUBSCRIPT:_("subscript"),
# Translators: Describes style of text.
ROLE_STYLE:_("style"),
# Translators: Describes text formatting.
ROLE_INDENT:_("indent"),
# Translators: Describes text formatting.
ROLE_ALIGNMENT:_("alignment"),
# Translators: Identifies an alert window or bar (usually on Internet Explorer 9 and above for alerts such as file downloads or pop-up blocker).
ROLE_ALERT:_("alert"),
# Translators: Identifies a data grid control (a grid which displays data).
ROLE_DATAGRID:_("data grid"),
ROLE_DATAITEM:_("data item"),
ROLE_HEADERITEM:_("header item"),
# Translators: Identifies a thumb control (a button-like control for changing options).
ROLE_THUMB:_("thumb control"),
ROLE_CALENDAR:_("calendar"),
ROLE_VIDEO:_("video"),
ROLE_AUDIO:_("audio"),
# Translators: Identifies a chart element.
ROLE_CHARTELEMENT:_("chart element"),
# Translators: Identifies deleted content.
ROLE_DELETED_CONTENT:_("deleted"),
# Translators: Identifies inserted content.
ROLE_INSERTED_CONTENT:_("inserted"),
}
stateLabels={
# Translators: This is presented when a control or document is unavailable.
STATE_UNAVAILABLE:_("unavailable"),
# Translators: This is presented when a control has focus.
STATE_FOCUSED:_("focused"),
# Translators: This is presented when the control is selected.
STATE_SELECTED:_("selected"),
# Translators: This is presented when a document is busy.
STATE_BUSY:_("busy"),
# Translators: This is presented when a button is pressed.
STATE_PRESSED:_("pressed"),
# Translators: This is presented when a check box is checked.
STATE_CHECKED:_("checked"),
# Translators: This is presented when a three state check box is half checked.
STATE_HALFCHECKED:_("half checked"),
# Translators: This is presented when the control is a read-only control such as read-only edit box.
STATE_READONLY:_("read only"),
# Translators: This is presented when a tree view or submenu item is expanded.
STATE_EXPANDED:_("expanded"),
# Translators: This is presented when a tree view or submenu is collapsed.
STATE_COLLAPSED:_("collapsed"),
# Translators: This is presented when a control or a document becomes invisible.
STATE_INVISIBLE:_("invisible"),
# Translators: This is presented when a visited link is encountered.
STATE_VISITED:_("visited"),
# Translators: This is presented when a link is encountered.
STATE_LINKED:_("linked"),
# Translators: This is presented when the control menu item has a submenu.
STATE_HASPOPUP:_("subMenu"),
# Translators: This is presented when a protected control or a document is encountered.
STATE_PROTECTED:_("protected"),
# Translators: This is presented when a required form field is encountered.
STATE_REQUIRED:_("required"),
# Translators: Reported when an object no longer exists in the user interface;
# i.e. it is dead and is no longer usable.
STATE_DEFUNCT:_("defunct"),
# Translators: This is presented when an invalid entry has been made.
STATE_INVALID_ENTRY:_("invalid entry"),
STATE_MODAL:_("modal"),
# Translators: This is presented when a field supports auto completion of entered text such as email address field in Microsoft Outlook.
STATE_AUTOCOMPLETE:_("has auto complete"),
# Translators: This is presented when an edit field allows typing multiple lines of text such as comment fields on websites.
STATE_MULTILINE:_("multi line"),
STATE_ICONIFIED:_("iconified"),
# Translators: Presented when the current control is located off screen.
STATE_OFFSCREEN:_("off screen"),
# Translators: Presented when the control allows selection such as text fields.
STATE_SELECTABLE:_("selectable"),
# Translators: Presented when a control can be moved to using system focus.
STATE_FOCUSABLE:_("focusable"),
# Translators: Presented when a control allows clicking via mouse (mostly presented on web controls).
STATE_CLICKABLE:_("clickable"),
STATE_EDITABLE:_("editable"),
STATE_CHECKABLE:_("checkable"),
STATE_DRAGGABLE:_("draggable"),
STATE_DRAGGING:_("dragging"),
# Translators: Reported where an object which is being dragged can be dropped.
# This is only reported for objects that support accessible drag and drop.
STATE_DROPTARGET:_("drop target"),
STATE_SORTED:_("sorted"),
STATE_SORTED_ASCENDING:_("sorted ascending"),
STATE_SORTED_DESCENDING:_("sorted descending"),
# Translators: a state that denotes that an object (usually a graphic) has a long description.
STATE_HASLONGDESC:_("has long description"),
# Translators: a state that denotes that an object is pinned in its current location
STATE_PINNED:_("pinned"),
# Translators: a state that denotes the existance of a formula on a spreadsheet cell
STATE_HASFORMULA:_("has formula"),
# Translators: a state that denotes the existance of a comment.
STATE_HASCOMMENT:_("has comment"),
# Translators: a state that denotes that the object is covered partially or fully by another object
STATE_OBSCURED:_("obscured"),
# Translators: a state that denotes that the object(text) is cropped as it couldn't be accommodated in the allocated/available space
STATE_CROPPED:_("cropped"),
# Translators: a state that denotes that the object(text) is overflowing into the adjacent space
STATE_OVERFLOWING:_("overflowing"),
# Translators: a state that denotes that the object is unlocked (such as an unlocked cell in a protected Excel spreadsheet).
STATE_UNLOCKED:_("unlocked"),
}
negativeStateLabels={
# Translators: This is presented when a selectable object (e.g. a list item) is not selected.
STATE_SELECTED:_("not selected"),
# Translators: This is presented when a button is not pressed.
STATE_PRESSED:_("not pressed"),
# Translators: This is presented when a checkbox is not checked.
STATE_CHECKED:_("not checked"),
# Translators: This is presented when drag and drop is finished.
# This is only reported for objects which support accessible drag and drop.
STATE_DROPTARGET:_("done dragging"),
}
silentRolesOnFocus={
ROLE_PANE,
ROLE_ROOTPANE,
ROLE_FRAME,
ROLE_UNKNOWN,
ROLE_APPLICATION,
ROLE_TABLECELL,
ROLE_LISTITEM,
ROLE_MENUITEM,
ROLE_CHECKMENUITEM,
ROLE_TREEVIEWITEM,
ROLE_STATICTEXT,
ROLE_BORDER,
}
silentValuesForRoles={
ROLE_CHECKBOX,
ROLE_RADIOBUTTON,
ROLE_LINK,
ROLE_MENUITEM,
ROLE_APPLICATION,
}
#{ Output reasons
# These constants are used to specify the reason that a given piece of output was generated.
#: An object to be reported due to a focus change or similar.
REASON_FOCUS="focus"
#: An ancestor of the focus object to be reported due to a focus change or similar.
REASON_FOCUSENTERED="focusEntered"
#: An item under the mouse.
REASON_MOUSE="mouse"
#: A response to a user query.
REASON_QUERY="query"
#: Reporting a change to an object.
REASON_CHANGE="change"
#: A generic, screen reader specific message.
REASON_MESSAGE="message"
#: Text reported as part of a say all.
REASON_SAYALL="sayAll"
#: Content reported due to caret movement or similar.
REASON_CARET="caret"
#: No output, but any state should be cached as if output had occurred.
REASON_ONLYCACHE="onlyCache"
#}
#: Text to use for 'current' values. These describe if an item is the current item
#: within a particular kind of selection.
isCurrentLabels = {
# Translators: Presented when an item is marked as current in a collection of items
True:_("current"),
# Translators: Presented when a page item is marked as current in a collection of page items
"page":_("current page"),
# Translators: Presented when a step item is marked as current in a collection of step items
"step":_("current step"),
# Translators: Presented when a location item is marked as current in a collection of location items
"location":_("current location"),
# Translators: Presented when a date item is marked as current in a collection of date items
"date":_("current date"),
# Translators: Presented when a time item is marked as current in a collection of time items
"time":_("current time"),
}
def processPositiveStates(role, states, reason, positiveStates=None):
"""Processes the states for an object and returns the positive states to output for a specified reason.
For example, if C{STATE_CHECKED} is in the returned states, it means that the processed object is checked.
@param role: The role of the object to process states for (e.g. C{ROLE_CHECKBOX}.
@type role: int
@param states: The raw states for an object to process.
@type states: set
@param reason: The reason to process the states (e.g. C{REASON_FOCUS}.
@type reason: str
@param positiveStates: Used for C{REASON_CHANGE}, specifies states changed from negative to positive;
@type positiveStates: set
@return: The processed positive states.
@rtype: set
"""
positiveStates = positiveStates.copy() if positiveStates is not None else states.copy()
# The user never cares about certain states.
if role==ROLE_EDITABLETEXT:
positiveStates.discard(STATE_EDITABLE)
if role!=ROLE_LINK:
positiveStates.discard(STATE_VISITED)
positiveStates.discard(STATE_SELECTABLE)
positiveStates.discard(STATE_FOCUSABLE)
positiveStates.discard(STATE_CHECKABLE)
if STATE_DRAGGING in positiveStates:
# It's obvious that the control is draggable if it's being dragged.
positiveStates.discard(STATE_DRAGGABLE)
if role == ROLE_COMBOBOX:
# Combo boxes inherently have a popup, so don't report it.
positiveStates.discard(STATE_HASPOPUP)
import config
if not config.conf['documentFormatting']['reportClickable'] or role in (ROLE_LINK, ROLE_BUTTON, ROLE_CHECKBOX, ROLE_RADIOBUTTON, ROLE_TOGGLEBUTTON, ROLE_MENUITEM, ROLE_TAB, ROLE_SLIDER, ROLE_DOCUMENT, ROLE_CHECKMENUITEM, ROLE_RADIOMENUITEM):
# This control is clearly clickable according to its role,
# or reporting clickable just isn't useful,
# or the user has explicitly requested no reporting clickable
positiveStates.discard(STATE_CLICKABLE)
if reason == REASON_QUERY:
return positiveStates
positiveStates.discard(STATE_DEFUNCT)
positiveStates.discard(STATE_MODAL)
positiveStates.discard(STATE_FOCUSED)
positiveStates.discard(STATE_OFFSCREEN)
positiveStates.discard(STATE_INVISIBLE)
if reason != REASON_CHANGE:
positiveStates.discard(STATE_LINKED)
if role in (ROLE_LISTITEM, ROLE_TREEVIEWITEM, ROLE_MENUITEM, ROLE_TABLEROW) and STATE_SELECTABLE in states:
positiveStates.discard(STATE_SELECTED)
if role not in (ROLE_EDITABLETEXT, ROLE_CHECKBOX):
positiveStates.discard(STATE_READONLY)
if role == ROLE_CHECKBOX:
positiveStates.discard(STATE_PRESSED)
if role == ROLE_MENUITEM:
# The user doesn't usually care if a menu item is expanded or collapsed.
positiveStates.discard(STATE_COLLAPSED)
positiveStates.discard(STATE_EXPANDED)
if STATE_FOCUSABLE not in states:
positiveStates.discard(STATE_EDITABLE)
return positiveStates
def processNegativeStates(role, states, reason, negativeStates=None):
"""Processes the states for an object and returns the negative states to output for a specified reason.
For example, if C{STATE_CHECKED} is in the returned states, it means that the processed object is not checked.
@param role: The role of the object to process states for (e.g. C{ROLE_CHECKBOX}.
@type role: int
@param states: The raw states for an object to process.
@type states: set
@param reason: The reason to process the states (e.g. C{REASON_FOCUS}.
@type reason: str
@param negativeStates: Used for C{REASON_CHANGE}, specifies states changed from positive to negative;
@type negativeStates: set
@return: The processed negative states.
@rtype: set
"""
if reason == REASON_CHANGE and not isinstance(negativeStates, set):
raise TypeError("negativeStates must be a set for this reason")
speakNegatives = set()
# Add the negative selected state if the control is selectable,
# but only if it is either focused or this is something other than a change event.
# The condition stops "not selected" from being spoken in some broken controls
# when the state change for the previous focus is issued before the focus change.
if role in (ROLE_LISTITEM, ROLE_TREEVIEWITEM, ROLE_TABLEROW) and STATE_SELECTABLE in states and (reason != REASON_CHANGE or STATE_FOCUSED in states):
speakNegatives.add(STATE_SELECTED)
# Restrict "not checked" in a similar way to "not selected".
if (role in (ROLE_CHECKBOX, ROLE_RADIOBUTTON, ROLE_CHECKMENUITEM) or STATE_CHECKABLE in states) and (STATE_HALFCHECKED not in states) and (reason != REASON_CHANGE or STATE_FOCUSED in states):
speakNegatives.add(STATE_CHECKED)
if role == ROLE_TOGGLEBUTTON:
speakNegatives.add(STATE_PRESSED)
if reason == REASON_CHANGE:
# We want to speak this state only if it is changing to negative.
speakNegatives.add(STATE_DROPTARGET)
# We were given states which have changed to negative.
# Return only those supplied negative states which should be spoken;
# i.e. the states in both sets.
speakNegatives &= negativeStates
# #6946: if HALFCHECKED is present but CHECKED isn't, we should make sure we add CHECKED to speakNegatives.
if (STATE_HALFCHECKED in negativeStates and STATE_CHECKED not in states):
speakNegatives.add(STATE_CHECKED)
if STATES_SORTED & negativeStates and not STATES_SORTED & states:
# If the object has just stopped being sorted, just report not sorted.
# The user doesn't care how it was sorted before.
speakNegatives.add(STATE_SORTED)
return speakNegatives
else:
# This is not a state change; only positive states were supplied.
# Return all negative states which should be spoken, excluding the positive states.
return speakNegatives - states
def processAndLabelStates(role, states, reason, positiveStates=None, negativeStates=None, positiveStateLabelDict={}, negativeStateLabelDict={}):
"""Processes the states for an object and returns the appropriate state labels for both positive and negative states.
@param role: The role of the object to process states for (e.g. C{ROLE_CHECKBOX}.
@type role: int
@param states: The raw states for an object to process.
@type states: set
@param reason: The reason to process the states (e.g. C{REASON_FOCUS}.
@type reason: str
@param positiveStates: Used for C{REASON_CHANGE}, specifies states changed from negative to positive;
@type positiveStates: set
@param negativeStates: Used for C{REASON_CHANGE}, specifies states changed from positive to negative;
@type negativeStates: setpositiveStateLabelDict={}, negativeStateLabelDict
@param positiveStateLabelDict: Dictionary containing state identifiers as keys and associated positive labels as their values.
@type positiveStateLabelDict: dict
@param negativeStateLabelDict: Dictionary containing state identifiers as keys and associated negative labels as their values.
@type negativeStateLabelDict: dict
@return: The labels of the relevant positive and negative states.
@rtype: [str, ...]
"""
mergedStateLabels=[]
positiveStates = processPositiveStates(role, states, reason, positiveStates)
negativeStates = processNegativeStates(role, states, reason, negativeStates)
for state in sorted(positiveStates | negativeStates):
if state in positiveStates:
mergedStateLabels.append(positiveStateLabelDict.get(state, stateLabels[state]))
elif state in negativeStates:
# Translators: Indicates that a particular state of an object is negated.
# Separate strings have now been defined for commonly negated states (e.g. not selected and not checked),
# but this still might be used in some other cases.
# %s will be replaced with the full identifier of the negated state (e.g. selected).
mergedStateLabels.append(negativeStateLabelDict.get(state, negativeStateLabels.get(state, _("not %s") % stateLabels[state])))
return mergedStateLabels
| 1 | 23,530 | Could you split this into multiple lines? | nvaccess-nvda | py |
@@ -25,12 +25,17 @@ import { __ } from '@wordpress/i18n';
/**
* Internal dependencies
*/
-import Layout from '../../../components/layout/layout';
+import Data from 'googlesitekit-data';
import DashboardModuleHeader from '../../../components/dashboard/dashboard-module-header';
import DashboardPageSpeed from '../components/DashboardPageSpeed';
+import { STORE_NAME } from '../../../googlesitekit/datastore/site/constants';
+const { useSelect } = Data;
function DashboardSpeed() {
- const description = global.googlesitekit.permaLink ? __( 'How fast this page is.', 'google-site-kit' ) : __( 'How fast your home page is.', 'google-site-kit' );
+ const currentEntityURL = useSelect( ( select ) => select( STORE_NAME ).getCurrentEntityURL() );
+ const description = currentEntityURL
+ ? __( 'How fast your page loads, how quickly people can interact with your content, and how stable your content is.', 'google-site-kit' )
+ : __( 'How fast your home page loads, how quickly people can interact with your content, and how stable your content is.', 'google-site-kit' );
return (
<Fragment> | 1 | /**
* DashboardSpeed component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import { Fragment } from '@wordpress/element';
import { __ } from '@wordpress/i18n';
/**
* Internal dependencies
*/
import Layout from '../../../components/layout/layout';
import DashboardModuleHeader from '../../../components/dashboard/dashboard-module-header';
import DashboardPageSpeed from '../components/DashboardPageSpeed';
function DashboardSpeed() {
const description = global.googlesitekit.permaLink ? __( 'How fast this page is.', 'google-site-kit' ) : __( 'How fast your home page is.', 'google-site-kit' );
return (
<Fragment>
<div id="googlesitekit-pagespeed-header" className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-12
">
<DashboardModuleHeader
title={ __( 'Speed', 'google-site-kit' ) }
description={ description }
/>
</div>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-12
">
<Layout className="googlesitekit-pagespeed-report">
<DashboardPageSpeed />
</Layout>
</div>
</Fragment>
);
}
export default DashboardSpeed;
| 1 | 29,699 | This technically works, however it may be safer to use both `getCurrentReferenceURL` and `getCurrentEntityURL` and then make this condition `currentReferenceURL === currentEntityURL`. Currently, this code only works based on the internals of `getCurrentReferenceURL`, so by checking both we would decouple that. | google-site-kit-wp | js |
@@ -69,9 +69,9 @@ func (c *WSClient) Connect() (conn.Connection, error) {
if resp != nil {
body, errRead := ioutil.ReadAll(resp.Body)
if errRead == nil {
- respMsg = fmt.Sprintf("response code: %d, response body: %s", resp.StatusCode, string(body))
+ respMsg = fmt.Sprintf("Response code: %d, response body: %s", resp.StatusCode, string(body))
} else {
- respMsg = fmt.Sprintf("response code: %d", resp.StatusCode)
+ respMsg = fmt.Sprintf("Response code: %d", resp.StatusCode)
}
resp.Body.Close()
} | 1 | package client
import (
"fmt"
"io/ioutil"
"k8s.io/klog/v2"
"github.com/gorilla/websocket"
"github.com/kubeedge/viaduct/pkg/api"
"github.com/kubeedge/viaduct/pkg/conn"
"github.com/kubeedge/viaduct/pkg/lane"
"github.com/kubeedge/viaduct/pkg/utils"
)
// the client based on websocket
type WSClient struct {
options Options
exOpts api.WSClientOption
dialer *websocket.Dialer
}
// new websocket client instance
func NewWSClient(options Options, exOpts interface{}) *WSClient {
extendOption, ok := exOpts.(api.WSClientOption)
if !ok {
panic("bad websocket extend option")
}
return &WSClient{
options: options,
exOpts: extendOption,
dialer: &websocket.Dialer{
TLSClientConfig: options.TLSConfig,
HandshakeTimeout: options.HandshakeTimeout,
},
}
}
// Connect try to connect remote server
func (c *WSClient) Connect() (conn.Connection, error) {
header := c.exOpts.Header
header.Add("ConnectionUse", string(c.options.ConnUse))
wsConn, resp, err := c.dialer.Dial(c.options.Addr, header)
if err == nil {
klog.Infof("dial %s successfully", c.options.Addr)
// do user's processing on connection or response
if c.exOpts.Callback != nil {
c.exOpts.Callback(wsConn, resp)
}
return conn.NewConnection(&conn.ConnectionOptions{
ConnType: api.ProtocolTypeWS,
ConnUse: c.options.ConnUse,
Base: wsConn,
Consumer: c.options.Consumer,
Handler: c.options.Handler,
CtrlLane: lane.NewLane(api.ProtocolTypeWS, wsConn),
State: &conn.ConnectionState{
State: api.StatConnected,
Headers: utils.DeepCopyHeader(c.exOpts.Header),
},
AutoRoute: c.options.AutoRoute,
}), nil
}
// something wrong!!
var respMsg string
if resp != nil {
body, errRead := ioutil.ReadAll(resp.Body)
if errRead == nil {
respMsg = fmt.Sprintf("response code: %d, response body: %s", resp.StatusCode, string(body))
} else {
respMsg = fmt.Sprintf("response code: %d", resp.StatusCode)
}
resp.Body.Close()
}
klog.Errorf("dial websocket error(%+v), response message: %s", err, respMsg)
return nil, err
}
| 1 | 22,406 | It does not recommend capitalization | kubeedge-kubeedge | go |
@@ -113,6 +113,17 @@ feature 'User creates a subscription' do
expect(page).to have_content(I18n.t('subscriptions.flashes.update.success'))
end
+ scenario 'updates Stripe subscription with declining credit card', :js => true do
+ FakeStripe.failure = true
+ sign_in_as_subscriber
+ visit my_account_path
+ submit_declining_credit_card_info
+
+ expect(current_path).to eq my_account_path
+ expect(page).to_not have_content(I18n.t('subscriptions.flashes.update.success'))
+ expect(page).to have_content 'Your credit card was declined'
+ end
+
scenario 'does not see option to update billing if not subscribing' do
visit my_account_path
| 1 | require 'spec_helper'
feature 'User creates a subscription' do
VALID_SANDBOX_CREDIT_CARD_NUMBER = '4111111111111111'
background do
create_subscribeable_product
sign_in
end
scenario 'creates a Stripe subscription with a valid credit card' do
subscribe_with_valid_credit_card
expect(current_user).to have_active_subscription
expect(current_path).to eq products_path
expect(page).to have_content(I18n.t('purchase.flashes.success', name: subscription_product.name))
end
scenario 'does not create a Stripe subscription with an invalid credit card' do
subscribe_with_invalid_credit_card
expect(current_user).not_to have_active_subscription
end
scenario 'does not see a group option' do
visit_subscription_product_page
expect(page).to_not have_content(I18n.t('products.show.purchase_for_company'))
end
scenario 'sees that the subscription is per month' do
start_purchasing_subscription
expect(page).to have_content('per month')
end
scenario 'does not see the option to pay with paypal' do
visit_subscription_product_page
click_purchase_link
expect(page).not_to have_css('#purchase_payment_method_paypal')
end
scenario 'does not see the coupon functionality' do
visit_subscription_product_page
click_purchase_link
expect(page).not_to have_content('Have a coupon code?')
end
scenario "user without github username sees github username input" do
current_user.github_username = nil
current_user.save!
start_purchasing_subscription
expect(page).to have_content('GitHub username')
expect(page).to have_css('input#github_username_1')
end
scenario "user with github username doesn't see github username input" do
current_user.github_username = 'cpyteltest'
current_user.save!
visit_subscription_product_page
click_purchase_link
expect(page).not_to have_content('GitHub username')
expect(page).not_to have_css('input#github_username_1')
end
scenario 'creates a Stripe subscription with a valid coupon', :js => true do
create_amount_stripe_coupon('5OFF', 'once', 500)
start_purchasing_subscription
expect(page).to have_content("$15 per month")
click_link "Have a coupon code?"
fill_in "Code", with: '5OFF'
click_button "Apply Coupon"
expect(page).to have_content("$10 the first month, then $15 per month")
fill_out_subscription_form_with VALID_SANDBOX_CREDIT_CARD_NUMBER
expect(current_path).to eq products_path
expect(page).to have_content(I18n.t('purchase.flashes.success', name: subscription_product.name))
end
scenario 'creates a Stripe subscription with an invalid coupon', :js => true do
start_purchasing_subscription
expect(page).to have_content("$15 per month")
click_link "Have a coupon code?"
fill_in "Code", with: '5OFF'
click_button "Apply Coupon"
expect(page).to have_content("The coupon code you supplied is not valid.")
end
scenario 'sees option to update billing for subscribers' do
sign_in_as_subscriber
visit my_account_path
expect(page).to have_content('Your Subscription Billing Info')
end
scenario 'updates Stripe subscription', :js => true do
sign_in_as_subscriber
visit my_account_path
submit_new_credit_card_info
expect(current_path).to eq my_account_path
expect(page).to have_content(I18n.t('subscriptions.flashes.update.success'))
end
scenario 'does not see option to update billing if not subscribing' do
visit my_account_path
expect(page).not_to have_content('Your Subscription Billing Info')
end
def submit_new_credit_card_info
credit_card_expires_on = Time.now.advance(years: 1)
month_selection = credit_card_expires_on.strftime('%-m - %B')
year_selection = credit_card_expires_on.strftime('%Y')
valid_cc_num = '4242424242424242'
fill_in 'Card Number', with: valid_cc_num
select month_selection, from: 'date[month]'
select year_selection, from: 'date[year]'
fill_in 'CVC', with: '333'
click_button 'Update Your Card'
end
def visit_subscription_product_page
visit products_path
end
def create_subscribeable_product
@subscription_product = create(:subscribeable_product)
end
def create_product_with_video
video_product = create :video_product
create :video, watchable: video_product
video_product
end
def sign_in
@current_user = create(:user)
visit root_path(as: @current_user)
end
def sign_in_as_subscriber
subscriber = create(:user, :with_subscription)
visit root_path(as: subscriber)
end
def subscribe_with_valid_credit_card
start_purchasing_subscription
fill_out_subscription_form_with VALID_SANDBOX_CREDIT_CARD_NUMBER
end
def subscribe_with_invalid_credit_card
start_purchasing_subscription
FakeStripe.failure = true
fill_out_subscription_form_with 'bad cc number'
end
def start_purchasing_subscription
visit_subscription_product_page
click_purchase_link
click_link I18n.t('products.show.purchase_subscription')
end
def click_purchase_link
click_link I18n.t('shared.subscription_call_to_action')
end
def current_user
@current_user
end
def subscription_product
@subscription_product
end
def fill_out_subscription_form_with(credit_card_number)
credit_card_expires_on = Time.now.advance(years: 1)
month_selection = credit_card_expires_on.strftime('%-m - %B')
year_selection = credit_card_expires_on.strftime('%Y')
fill_in 'GitHub username', with: 'cpytel'
fill_in 'Card Number', with: credit_card_number
select month_selection, from: 'date[month]'
select year_selection, from: 'date[year]'
fill_in 'CVC', with: '333'
click_button 'Submit Payment'
end
def create_amount_stripe_coupon(id, duration, amount_off)
Stripe::Coupon.create(
:id => id,
:duration => duration,
:amount_off => amount_off
)
end
end
| 1 | 7,334 | Minor thing, but can you use 1.9 hash syntax for this? | thoughtbot-upcase | rb |
@@ -361,6 +361,7 @@ class LambdaExecutorReuseContainers(LambdaExecutorContainers):
mount_volume = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
+ lambda_cwd_on_host = Util.format_windows_path(lambda_cwd)
mount_volume_str = '-v "%s":/var/task' % lambda_cwd_on_host if mount_volume else ''
# Create and start the container | 1 | import os
import re
import glob
import json
import time
import logging
import threading
import subprocess
import six
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote # for Python 2.7
from localstack import config
from localstack.utils.common import (
CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file,
to_str, run, cp_r, json_safe, get_free_tcp_port)
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils.aws.dead_letter_queue import lambda_error_to_dead_letter_queue, sqs_error_to_dead_letter_queue
from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
LAMBDA_RUNTIME_PYTHON36 = 'python3.6'
LAMBDA_RUNTIME_PYTHON37 = 'python3.7'
LAMBDA_RUNTIME_PYTHON38 = 'python3.8'
LAMBDA_RUNTIME_NODEJS = 'nodejs'
LAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'
LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
LAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'
LAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'
LAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'
LAMBDA_RUNTIME_JAVA8 = 'java8'
LAMBDA_RUNTIME_JAVA11 = 'java11'
LAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'
LAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'
LAMBDA_RUNTIME_GOLANG = 'go1.x'
LAMBDA_RUNTIME_RUBY = 'ruby'
LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'
LAMBDA_RUNTIME_PROVIDED = 'provided'
LAMBDA_EVENT_FILE = 'event_file.json'
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
EVENT_SOURCE_SQS = 'aws:sqs'
def get_from_event(event, key):
try:
return event['Records'][0][key]
except KeyError:
return None
def is_java_lambda(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]
def is_nodejs_runtime(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime.startswith('nodejs')
def _store_logs(func_details, log_output, invocation_time=None, container_id=None):
log_group_name = '/aws/lambda/%s' % func_details.name()
container_id = container_id or short_uid()
invocation_time = invocation_time or int(time.time() * 1000)
invocation_time_secs = int(invocation_time / 1000)
time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time_secs))
log_stream_name = '%s/[LATEST]%s' % (time_str, container_id)
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the _execute method """
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def execute(self, func_arn, func_details, event, context=None, version=None,
asynchronous=False, callback=None):
def do_execute(*args):
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
try:
result = self._execute(func_arn, func_details, event, context, version)
except Exception as e:
raised_error = e
if asynchronous:
if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:
sqs_queue_arn = get_from_event(event, 'eventSourceARN')
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(func_details, event, e)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)
# return final result
return result
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug('Lambda executed in Event (asynchronous) mode, no response will be returned to caller')
FuncThread(do_execute).start()
return None, 'Lambda executed asynchronously.'
return do_execute()
def _execute(self, func_arn, func_details, event, context=None, version=None):
""" This method must be overwritten by subclasses. """
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def run_lambda_executor(self, cmd, event=None, func_details=None, env_vars={}):
process = run(cmd, asynchronous=True, stderr=subprocess.PIPE, outfile=subprocess.PIPE,
env_vars=env_vars, stdin=True)
result, log_output = process.communicate(input=event)
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, six.string_types) and '\n' in result:
additional_logs, _, result = result.rpartition('\n')
log_output += '\n%s' % additional_logs
log_formatted = log_output.strip().replace('\n', '\n> ')
func_arn = func_details and func_details.arn()
LOG.debug('Lambda %s result / log output:\n%s\n> %s' % (func_arn, result.strip(), log_formatted))
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if return_code != 0:
raise Exception('Lambda process returned error status code: %s. Result: %s. Output:\n%s' %
(return_code, result, log_output))
return result
class ContainerInfo:
"""
Contains basic information about a docker container.
"""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
raise Exception('Not implemented')
def _docker_cmd(self):
""" Return the string to be used for running Docker commands. """
return config.DOCKER_CMD
def prepare_event(self, environment, event_body):
""" Return the event as a stdin string. """
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body
return None
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = func_details.envvars.copy()
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(json_safe(event))
stdin = self.prepare_event(environment, event_body)
docker_host = config.DOCKER_HOST_FROM_CONTAINER
environment['HOSTNAME'] = docker_host
environment['LOCALSTACK_HOSTNAME'] = docker_host
environment['_HANDLER'] = handler
if func_details.timeout:
environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
# custom command to execute in the container
command = ''
# if running a Java Lambda, set up classpath arguments
if is_java_lambda(runtime):
java_opts = Util.get_java_opts()
stdin = None
# copy executor jar into temp directory
target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))
if not os.path.exists(target_file):
cp_r(LAMBDA_EXECUTOR_JAR, target_file)
# TODO cleanup once we have custom Java Docker image
taskdir = '/var/task'
save_file(os.path.join(lambda_cwd, LAMBDA_EVENT_FILE), event_body)
classpath = Util.get_java_classpath(target_file)
command = ("bash -c 'cd %s; java %s -cp \"%s\" \"%s\" \"%s\" \"%s\"'" %
(taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, LAMBDA_EVENT_FILE))
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)
# lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
LOG.info('Running lambda cmd: %s' % cmd)
result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details)
return result
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)
# Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
# passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
# available for docker exec, to better support very large Lambda events (very long environment values)
exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# determine files to be copied into the container
copy_command = ''
docker_cmd = self._docker_cmd()
event_file = os.path.join(lambda_cwd, LAMBDA_EVENT_FILE)
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
copy_command = '%s cp "%s/." "%s:/var/task";' % (docker_cmd, lambda_cwd, container_info.name)
elif os.path.exists(event_file):
# otherwise, copy only the event file if it exists
copy_command = '%s cp "%s" "%s:/var/task";' % (docker_cmd, event_file, container_info.name)
cmd = (
'%s'
' %s exec'
' %s' # env variables
' %s' # container name
' %s' # run cmd
) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)
LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)
return cmd
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
mount_volume = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
mount_volume_str = '-v "%s":/var/task' % lambda_cwd_on_host if mount_volume else ''
# Create and start the container
LOG.debug('Creating container: %s' % container_name)
cmd = (
'%s create'
' %s' # --rm flag
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' %s'
' --interactive' # Keeps the container running bash.
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' %s' # env_vars
' %s' # network
' %s'
) % (docker_cmd, rm_flag, container_name, mount_volume_str, env_vars_str, network_str, docker_image)
LOG.debug(cmd)
run(cmd)
if not mount_volume:
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
cmd = (
'%s cp'
' "%s/." "%s:/var/task"'
) % (docker_cmd, lambda_cwd, container_name)
LOG.debug(cmd)
run(cmd)
LOG.debug('Starting container: %s' % container_name)
cmd = '%s start %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd)
# give the container some time to start up
time.sleep(1)
# Get the entry point for the image.
LOG.debug('Getting the entrypoint for image: %s' % (docker_image))
cmd = (
'%s image inspect'
' --format="{{ .ContainerConfig.Entrypoint }}"'
' %s'
) % (docker_cmd, docker_image)
LOG.debug(cmd)
run_result = run(cmd)
entry_point = run_result.strip('[]\n\r ')
container_network = self.get_docker_container_network(func_arn)
LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network))
return ContainerInfo(container_name, entry_point)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
docker_cmd = self._docker_cmd()
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = (
'%s stop -t0 %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
cmd = (
'%s rm %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = '%s ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"' % self._docker_cmd()
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running
# Note: filtering by *exact* name using regex filter '^...$' seems unstable on some
# systems. Therefore, we use a combination of filter and grep to get the results.
cmd = ("docker ps -a --filter name='%s' "
'--format "{{ .Status }} - {{ .Names }}" '
'| grep -w "%s" | cat') % (container_name, container_name)
LOG.debug('Getting status for container "%s": %s' % (container_name, cmd))
cmd_result = run(cmd)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ''
# Get the container name.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
# Get the container network
LOG.debug('Getting container network: %s' % container_name)
cmd = (
'%s inspect %s'
' --format "{{ .HostConfig.NetworkMode }}"'
) % (docker_cmd, container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
container_network = cmd_result.strip()
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.info('Checking if there are idle containers.')
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.next_port = 1
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment, event_body):
# Tell Lambci to use STDIN for the event
environment['DOCKER_LAMBDA_USE_STDIN'] = '1'
return event_body.encode()
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
else:
command = '"%s"' % handler
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
if network == 'host':
port = str(self.next_port + self.port_offset)
env_vars['DOCKER_LAMBDA_API_PORT'] = port
env_vars['DOCKER_LAMBDA_RUNTIME_PORT'] = port
self.next_port = (self.next_port + 1) % self.max_port
env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''
docker_cmd = self._docker_cmd()
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(%s create -i'
' %s' # entrypoint
' %s' # debug_docker_java_port
' %s' # env
' %s' # network
' %s' # --rm flag
' %s %s' # image and command
')";'
'%s cp "%s/." "$CONTAINER_ID:/var/task"; '
'%s start -ai "$CONTAINER_ID";'
) % (docker_cmd, entrypoint, debug_docker_java_port, env_vars_string, network_str, rm_flag,
docker_image, command,
docker_cmd, lambda_cwd,
docker_cmd)
else:
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
'%s run -i'
' %s -v "%s":/var/task'
' %s'
' %s' # network
' %s' # --rm flag
' %s %s'
) % (docker_cmd, entrypoint, lambda_cwd_on_host, env_vars_string,
network_str, rm_flag, docker_image, command)
return cmd
class LambdaExecutorLocal(LambdaExecutor):
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
environment = func_details.envvars.copy()
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
if lambda_cwd:
os.chdir(lambda_cwd)
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
queue.put(result)
process = Process(target=do_execute)
with CaptureOutput() as c:
process.run()
result = queue.get()
# TODO: Interweaving stdout/stderr currently not supported
log_output = ''
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ('\n' if log_output else '') + stream
# store logs to CloudWatch
_store_logs(func_details, log_output)
return result
def execute_java_lambda(self, event, context, main_file, func_details=None):
handler = func_details.handler
opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(event))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file, Util.get_java_classpath(main_file))
cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
LOG.warning(cmd)
result = self.run_lambda_executor(cmd, func_details=func_details)
return result
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ''
# Replace _debug_port_ with a random free port
if '_debug_port_' in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))
else:
# Parse the debug port from opts
m = re.match('.*address=(\\d+).*', opts)
if m is not None:
cls.debug_java_port = m.groups()[0]
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
@classmethod
def docker_image_for_runtime(cls, runtime):
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
# TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
# See https://github.com/lambci/docker-lambda/pull/218
lambdas_to_add_prefix = ['dotnetcore', 'python2.7', 'python3.6', 'python3.7']
if docker_image == 'lambci/lambda' and any(img in docker_tag for img in lambdas_to_add_prefix):
docker_tag = '20191117-%s' % docker_tag
return '"%s:%s"' % (docker_image, docker_tag)
@classmethod
def get_docker_remove_flag(cls):
return '--rm' if config.LAMBDA_REMOVE_CONTAINERS else ''
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ['.']
base_dir = os.path.dirname(archive)
for pattern in ['%s/*.jar', '%s/lib/*.jar', '%s/*.zip']:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
result = ':'.join(entries)
return result
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
| 1 | 10,934 | I tried to follow the code structure already present. | localstack-localstack | py |
@@ -191,7 +191,7 @@ public class FileHandler {
final long copied = Files.copy(from.toPath(), out);
final long length = from.length();
if (copied != length) {
- throw new IOException("Could not transfer all bytes.");
+ throw new IOException("Could not transfer all bytes of " + from.toPath());
}
}
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.io;
import com.google.common.collect.Lists;
import com.google.common.io.Closeables;
import org.openqa.selenium.Platform;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.OutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.util.List;
/**
* Utility methods for common filesystem activities
*/
public class FileHandler {
public static File unzip(InputStream resource) throws IOException {
File output = TemporaryFilesystem.getDefaultTmpFS().createTempDir("unzip", "stream");
new Zip().unzip(resource, output);
return output;
}
public static void copyResource(File outputDir, Class<?> forClassLoader, String... names)
throws IOException {
Zip zip = new Zip();
for (String name : names) {
InputStream is = locateResource(forClassLoader, name);
try {
zip.unzipFile(outputDir, is, name);
} finally {
is.close();
}
}
}
private static InputStream locateResource(Class<?> forClassLoader, String name)
throws IOException {
String arch = System.getProperty("os.arch").toLowerCase() + "/";
List<String> alternatives =
Lists.newArrayList(name, "/" + name, arch + name, "/" + arch + name);
if (Platform.getCurrent().is(Platform.MAC)) {
alternatives.add("mac/" + name);
alternatives.add("/mac/" + name);
}
// First look using our own classloader
for (String possibility : alternatives) {
InputStream stream = FileHandler.class.getResourceAsStream(possibility);
if (stream != null) {
return stream;
}
stream = forClassLoader.getResourceAsStream(possibility);
if (stream != null) {
return stream;
}
}
throw new IOException("Unable to locate: " + name);
}
public static boolean createDir(File dir) throws IOException {
if ((dir.exists() || dir.mkdirs()) && dir.canWrite())
return true;
if (dir.exists()) {
FileHandler.makeWritable(dir);
return dir.canWrite();
}
// Iterate through the parent directories until we find that exists,
// then sink down.
return createDir(dir.getParentFile());
}
public static boolean makeWritable(File file) throws IOException {
if (file.canWrite()) {
return true;
}
return file.setWritable(true);
}
public static boolean makeExecutable(File file) throws IOException {
if (canExecute(file)) {
return true;
}
return file.setExecutable(true);
}
public static Boolean canExecute(File file) {
return file.canExecute();
}
public static boolean isZipped(String fileName) {
return fileName.endsWith(".zip") || fileName.endsWith(".xpi");
}
public static boolean delete(File toDelete) {
boolean deleted = true;
if (toDelete.isDirectory()) {
File[] children = toDelete.listFiles();
if (children != null) {
for (File child : children) {
deleted &= child.canWrite() && delete(child);
}
}
}
return deleted && toDelete.canWrite() && toDelete.delete();
}
public static void copy(File from, File to) throws IOException {
copy(from, to, new NoFilter());
}
public static void copy(File source, File dest, String suffix) throws IOException {
copy(source, dest, suffix == null ? new NoFilter() : new FileSuffixFilter(suffix));
}
private static void copy(File source, File dest, Filter onlyCopy) throws IOException {
if (!source.exists()) {
return;
}
if (source.isDirectory()) {
copyDir(source, dest, onlyCopy);
} else {
copyFile(source, dest, onlyCopy);
}
}
private static void copyDir(File from, File to, Filter onlyCopy) throws IOException {
if (!onlyCopy.isRequired(from)) {
return;
}
// Create the target directory.
createDir(to);
// List children.
String[] children = from.list();
if (children == null) {
throw new IOException("Could not copy directory " + from.getPath());
}
for (String child : children) {
if (!".parentlock".equals(child) && !"parent.lock".equals(child)) {
copy(new File(from, child), new File(to, child), onlyCopy);
}
}
}
private static void copyFile(File from, File to, Filter onlyCopy) throws IOException {
if (!onlyCopy.isRequired(from)) {
return;
}
try (OutputStream out = new FileOutputStream(to)) {
final long copied = Files.copy(from.toPath(), out);
final long length = from.length();
if (copied != length) {
throw new IOException("Could not transfer all bytes.");
}
}
}
/**
* Used by file operations to determine whether or not to make use of a file.
*/
public interface Filter {
/**
* @param file File to be considered.
* @return Whether or not to make use of the file in this oprtation.
*/
boolean isRequired(File file);
}
private static class FileSuffixFilter implements Filter {
private final String suffix;
public FileSuffixFilter(String suffix) {
this.suffix = suffix;
}
public boolean isRequired(File file) {
return file.isDirectory() || file.getAbsolutePath().endsWith(suffix);
}
}
private static class NoFilter implements Filter {
public boolean isRequired(File file) {
return true;
}
}
public static String readAsString(File toRead) throws IOException {
Reader reader = null;
try {
reader = new BufferedReader(new FileReader(toRead));
StringBuilder builder = new StringBuilder();
char[] buffer = new char[4096];
int read;
while ((read = reader.read(buffer)) != -1) {
char[] target = new char[read];
System.arraycopy(buffer, 0, target, 0, read);
builder.append(target);
}
return builder.toString();
} finally {
Closeables.close(reader, false);
}
}
}
| 1 | 13,214 | seems reasonable to also want to include the 'to' location? | SeleniumHQ-selenium | rb |
@@ -118,7 +118,7 @@ class ExperimentTestBaseClass(HelperTestCaseBase):
global g_myEnv
if not g_myEnv:
# Setup environment
- params = type('obj', (object,), {'installDir' : os.environ['NUPIC']})
+ params = type('obj', (object,), {'installDir' : resource_filename("nupic","")})
g_myEnv = MyTestEnvironment(params)
| 1 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import imp
import json
import logging
from optparse import OptionParser
import os
import pprint
import shutil
import string
import subprocess
import sys
from pkg_resources import resource_filename
import unittest2 as unittest
from nupic.data import dictutils
from nupic.database.ClientJobsDAO import ClientJobsDAO
from nupic.support import aggregationDivide
from nupic.support.unittesthelpers.testcasebase import (
TestCaseBase as HelperTestCaseBase)
from nupic.swarming import HypersearchWorker
from nupic.swarming.permutationhelpers import PermuteChoices
from nupic.swarming.utils import generatePersistentJobGUID
from nupic.frameworks.opf.expdescriptionapi import OpfEnvironment
from nupic.frameworks.opf.exp_generator import ExpGenerator
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
LOGGER = logging.getLogger(__name__)
HOTGYM_INPUT = resource_filename("nupic.datafiles", "extra/hotgym/hotgym.csv")
g_debug = False
# Our __main__ entry block sets this to an instance of MyTestEnvironment()
g_myEnv = None
class MyTestEnvironment(object):
def __init__(self, options):
# Save all command line options
self.options = options
# Build installation root (e.g., ~/nupic/current)
installRootDir = os.path.abspath(options.installDir)
if not os.path.exists(installRootDir):
raise RuntimeError("install directory %s doesn't exist" % \
(options.installDir))
_debugOut("installRootDir=<%s>" % (installRootDir,))
# Where this script is running from (autotest expgenerator_test.py may have
# copied it from its original location)
self.testRunDir = os.path.dirname(os.path.abspath(__file__))
_debugOut("self.testRunDir=<%s>" % (self.testRunDir,))
# Where to place generated files
self.testOutDir = os.path.join(self.testRunDir, 'expGeneratorOut')
shutil.rmtree(self.testOutDir, ignore_errors=True)
os.makedirs(self.testOutDir)
LOGGER.info("Generating experiment description files in: %s", \
os.path.abspath(self.testOutDir))
def cleanUp(self):
shutil.rmtree(self.testOutDir, ignore_errors=True)
return
class ExperimentTestBaseClass(HelperTestCaseBase):
# We will load the description.py and permutations.py files as modules
# multiple times in order to verify that they are valid python scripts. To
# facilitate this, we reload with a unique module name
# ("expGenerator_generated_script%d") each time.
__pythonScriptImportCount = 0
@classmethod
def newScriptImportName(cls):
cls.__pythonScriptImportCount += 1
name = "expGenerator_generated_script%d" % cls.__pythonScriptImportCount
return name
def setUp(self):
""" Method called to prepare the test fixture. This is called by the
unittest framework immediately before calling the test method; any exception
raised by this method will be considered an error rather than a test
failure. The default implementation does nothing.
"""
global g_myEnv
if not g_myEnv:
# Setup environment
params = type('obj', (object,), {'installDir' : os.environ['NUPIC']})
g_myEnv = MyTestEnvironment(params)
def tearDown(self):
""" Method called immediately after the test method has been called and the
result recorded. This is called even if the test method raised an exception,
so the implementation in subclasses may need to be particularly careful
about checking internal state. Any exception raised by this method will be
considered an error rather than a test failure. This method will only be
called if the setUp() succeeds, regardless of the outcome of the test
method. The default implementation does nothing.
"""
self.resetExtraLogItems()
g_myEnv.cleanUp()
def shortDescription(self):
""" Override to force unittest framework to use test method names instead
of docstrings in the report.
"""
return None
def checkPythonScript(self, scriptAbsPath):
self.assertTrue(os.path.isabs(scriptAbsPath))
self.assertTrue(os.path.isfile(scriptAbsPath),
("Expected python script to be present here: <%s>") % \
(scriptAbsPath))
# Test viability of the file as a python script by loading it
# An exception will be raised if this fails
mod = imp.load_source(self.newScriptImportName(), scriptAbsPath)
return mod
def getModules(self, expDesc, hsVersion='v2'):
""" This does the following:
1.) Calls ExpGenerator to generate a base description file and permutations
file from expDescription.
2.) Verifies that description.py and permutations.py are valid python
modules that can be loaded
3.) Returns the loaded base description module and permutations module
Parameters:
-------------------------------------------------------------------
expDesc: JSON format experiment description
hsVersion: which version of hypersearch to use ('v2'; 'v1' was dropped)
retval: (baseModule, permutationsModule)
"""
#------------------------------------------------------------------
# Call ExpGenerator to generate the base description and permutations
# files.
shutil.rmtree(g_myEnv.testOutDir, ignore_errors=True)
args = [
"--description=%s" % (json.dumps(expDesc)),
"--outDir=%s" % (g_myEnv.testOutDir),
"--version=%s" % (hsVersion)
]
self.addExtraLogItem({'args':args})
ExpGenerator.expGenerator(args)
#----------------------------------------
# Check that generated scripts are present
descriptionPyPath = os.path.join(g_myEnv.testOutDir, "description.py")
permutationsPyPath = os.path.join(g_myEnv.testOutDir, "permutations.py")
return (self.checkPythonScript(descriptionPyPath),
self.checkPythonScript(permutationsPyPath))
def runBaseDescriptionAndPermutations(self, expDesc, hsVersion, maxModels=2):
""" This does the following:
1.) Calls ExpGenerator to generate a base description file and permutations
file from expDescription.
2.) Verifies that description.py and permutations.py are valid python
modules that can be loaded
3.) Runs the base description.py as an experiment using OPF RunExperiment.
4.) Runs a Hypersearch using the generated permutations.py by passing it
to HypersearchWorker.
Parameters:
-------------------------------------------------------------------
expDesc: JSON format experiment description
hsVersion: which version of hypersearch to use ('v2'; 'v1' was dropped)
retval: list of model results
"""
# --------------------------------------------------------------------
# Generate the description.py and permutations.py. These get generated
# in the g_myEnv.testOutDir directory.
self.getModules(expDesc, hsVersion=hsVersion)
permutationsPyPath = os.path.join(g_myEnv.testOutDir, "permutations.py")
# ----------------------------------------------------------------
# Try running the base experiment
args = [g_myEnv.testOutDir]
from nupic.frameworks.opf.experiment_runner import runExperiment
LOGGER.info("")
LOGGER.info("============================================================")
LOGGER.info("RUNNING EXPERIMENT")
LOGGER.info("============================================================")
runExperiment(args)
# ----------------------------------------------------------------
# Try running the generated permutations
jobParams = {'persistentJobGUID' : generatePersistentJobGUID(),
'permutationsPyFilename': permutationsPyPath,
'hsVersion': hsVersion,
}
if maxModels is not None:
jobParams['maxModels'] = maxModels
args = ['ignoreThis', '--params=%s' % (json.dumps(jobParams))]
self.resetExtraLogItems()
self.addExtraLogItem({'params':jobParams})
LOGGER.info("")
LOGGER.info("============================================================")
LOGGER.info("RUNNING PERMUTATIONS")
LOGGER.info("============================================================")
jobID = HypersearchWorker.main(args)
# Make sure all models completed successfully
cjDAO = ClientJobsDAO.get()
models = cjDAO.modelsGetUpdateCounters(jobID)
modelIDs = [model.modelId for model in models]
results = cjDAO.modelsGetResultAndStatus(modelIDs)
if maxModels is not None:
self.assertEqual(len(results), maxModels, "Expected to get %d model "
"results but only got %d" % (maxModels, len(results)))
for result in results:
self.assertEqual(result.completionReason, cjDAO.CMPL_REASON_EOF,
"Model did not complete successfully:\n%s" % (result.completionMsg))
return results
def assertIsInt(self, x, msg=None):
xInt = int(round(x))
if msg is None:
msg = "%s is not a valid integer" % (str(x))
self.assertLess(abs(x - xInt), 0.0001 * x, msg)
def assertValidSwarmingAggregations(self, expDesc, expectedAttempts):
""" Test that the set of aggregations produced for a swarm are correct
Parameters:
-----------------------------------------------------------------------
expDesc: JSON experiment description
expectedAttempts: list of (minAggregationMultiple, predictionSteps) pairs
that we expect to find in the aggregation choices.
"""
# Extract out the minAggregation
minAggregation = dict(expDesc['streamDef']['aggregation'])
minAggregation.pop('fields')
# --------------------------------------------------------------------
(base, perms) = self.getModules(expDesc)
predictionSteps = expDesc['inferenceArgs']['predictionSteps'][0]
# Make sure we have the expected info in the base description file
self.assertEqual(base.control['inferenceArgs']['predictionSteps'],
expDesc['inferenceArgs']['predictionSteps'])
#self.assertEqual(base.config['modelParams']['clParams']['steps'],
# '%s' % (predictionSteps))
tmpAggregationInfo = dictutils.rCopy(
base.config['aggregationInfo'],
lambda value, _: value)
tmpAggregationInfo.pop('fields')
self.assertDictEqual(tmpAggregationInfo, minAggregation)
predictAheadTime = dict(minAggregation)
for key in predictAheadTime.iterkeys():
predictAheadTime[key] *= predictionSteps
self.assertEqual(base.config['predictAheadTime'],
predictAheadTime)
# And in the permutations file
self.assertEqual(
perms.minimize,
("multiStepBestPredictions:multiStep:errorMetric='altMAPE':"
"steps=\\[.*\\]:window=1000:field=consumption"))
# Make sure the right metrics were put in
metrics = base.control['metrics']
metricTuples = [(metric.metric, metric.inferenceElement, metric.params) \
for metric in metrics]
self.assertIn(('multiStep',
'multiStepBestPredictions',
{'window': 1000, 'steps': [predictionSteps],
'errorMetric': 'altMAPE'}),
metricTuples)
# ------------------------------------------------------------------------
# Get the aggregation periods to permute over, and make sure each is
# valid
aggPeriods = perms.permutations['aggregationInfo']
aggAttempts = []
for agg in aggPeriods.choices:
# Make sure it's an integer multiple of minAggregation
multipleOfMinAgg = aggregationDivide(agg, minAggregation)
self.assertIsInt(multipleOfMinAgg,
"invalid aggregation period %s is not an integer multiple" \
"of minAggregation (%s)" % (agg, minAggregation))
self.assertGreaterEqual(int(round(multipleOfMinAgg)), 1,
"invalid aggregation period %s is not >= minAggregation (%s)" % \
(agg, minAggregation))
# Make sure the predictAheadTime is an integer multiple of the aggregation
requiredSteps = aggregationDivide(predictAheadTime, agg)
self.assertIsInt(requiredSteps,
"invalid aggregation period %s is not an integer factor" \
"of predictAheadTime (%s)" % (agg, predictAheadTime))
self.assertGreaterEqual(int(round(requiredSteps)), 1,
"invalid aggregation period %s greater than " \
" predictAheadTime (%s)" % (agg, predictAheadTime))
# Make sure that computeInterval is an integer multiple of the aggregation
quotient = aggregationDivide(expDesc['computeInterval'], agg)
self.assertIsInt(quotient,
"invalid aggregation period %s is not an integer factor" \
"of computeInterval (%s)" % (agg, expDesc['computeInterval']))
self.assertGreaterEqual(int(round(quotient)), 1,
"Invalid aggregation period %s is greater than the computeInterval " \
"%s" % (agg, expDesc['computeInterval']))
aggAttempts.append((int(round(multipleOfMinAgg)), int(requiredSteps)))
# Print summary of aggregation attempts
LOGGER.info("This swarm will try the following \
(minAggregationMultiple, predictionSteps) combinations: %s", aggAttempts)
# ----------------------------------------------------------------------
# Were these the expected attempts?
aggAttempts.sort()
expectedAttempts.sort()
self.assertEqual(aggAttempts, expectedAttempts, "Expected this swarm to " \
"try the following (minAggMultiple, predictionSteps) " \
"attempts: %s, but instead it is going to try: %s" % \
(expectedAttempts, aggAttempts))
class PositiveExperimentTests(ExperimentTestBaseClass):
def test_ShowSchema(self):
""" Test showing the schema
"""
args = [
"--showSchema"
]
self.addExtraLogItem({'args':args})
#----------------------------------------
# Run it
ExpGenerator.expGenerator(args)
return
def test_PredictionElement(self):
""" Test correct behavior in response to different settings in the
prediction element
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
"inferenceType":"MultiStep",
"inferenceArgs":{
"predictedField":"consumption",
"predictionSteps": [1]
},
'environment':OpfEnvironment.Experiment,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
"minValue": 0,
"maxValue": 200,
},
],
"resetPeriod": {"days" : 1, "hours" : 12},
"iterationCount": 10,
}
# --------------------------------------------------------------------
# Test it out with no prediction element
(_base, perms) = self.getModules(expDesc)
# Make sure we have the right optimization designation
self.assertEqual(perms.minimize,
("multiStepBestPredictions:multiStep:errorMetric='altMAPE':"
"steps=\\[1\\]:window=%d:field=consumption")
% ExpGenerator.METRIC_WINDOW,
msg="got: %s" % perms.minimize)
# Should not have any classifier info to permute over
self.assertNotIn('clAlpha', perms.permutations)
return
def assertMetric(self, base, perm, predictedField,
optimizeMetric, nupicScore,
movingBaseline,
oneGram,
trivialMetric,
legacyMetric=None):
print "base.control"
pprint.pprint(base.control)
#taskMetrics = base.control['tasks'][0]['taskControl']['metrics']
taskMetrics = base.control['metrics']
for metricSpec in taskMetrics:
print metricSpec.metric
self.assertTrue(metricSpec.metric in ["multiStep", optimizeMetric,
movingBaseline, oneGram,
nupicScore, trivialMetric,
legacyMetric],
"Unrecognized Metric type: %s"% metricSpec.metric)
if metricSpec.metric == trivialMetric:
self.assertEqual(metricSpec.metric, trivialMetric)
self.assertEqual(metricSpec.inferenceElement,
InferenceElement.prediction)
elif metricSpec.metric == movingBaseline:
self.assertTrue("errorMetric" in metricSpec.params)
elif metricSpec.metric == oneGram:
self.assertTrue("errorMetric" in metricSpec.params)
elif metricSpec.metric == "multiStep":
pass
else:
self.assertEqual(metricSpec.metric, optimizeMetric)
#optimizeString = "prediction:%s:window=%d:field=%s" % \
# (optimizeMetric, ExpGenerator.METRIC_WINDOW,
# predictedField)
optimizeString = ("multiStepBestPredictions:multiStep:"
"errorMetric='%s':steps=\[1\]"
":window=%d:field=%s" % \
(optimizeMetric, ExpGenerator.METRIC_WINDOW,
predictedField))
print "perm.minimize=",perm.minimize
print "optimizeString=",optimizeString
self.assertEqual(perm.minimize, optimizeString,
msg="got: %s" % perm.minimize)
def test_Metrics(self):
""" Test to make sure that the correct metrics are generated """
# =========================================================================
# Test category predicted field
# =========================================================================
streamDef = dict(
version = 1,
info = "test_category_predicted_field",
streams = [
# It doesn't matter if this stream source points to a real place or not.
dict(source="file://dummy",
info="dummy.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
"inferenceType":"MultiStep",
"inferenceArgs":{
"predictedField":"playType",
"predictionSteps": [1]
},
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "address",
"fieldType": "string"
},
{ "fieldName": "ydsToGo",
"fieldType": "float",
},
{ "fieldName": "playType",
"fieldType": "string",
},
],
}
# Make sure we have the right metric type
# (avg_err for categories, aae for scalars)
(base, perms) = self.getModules(expDesc)
self.assertMetric(base, perms, expDesc['inferenceArgs']['predictedField'],
'avg_err',
'moving_mode',
'one_gram',
InferenceElement.prediction,
"trivial")
self.assertEqual(base.control['loggedMetrics'][0], ".*")
# =========================================================================
# Test scalar predicted field
# =========================================================================
expDesc['inferenceArgs']['predictedField'] = 'ydsToGo'
(base, perms) = self.getModules(expDesc)
self.assertMetric(base, perms, expDesc['inferenceArgs']['predictedField'],
'altMAPE',"moving_mean","one_gram",
InferenceElement.encodings, "trivial")
self.assertEqual(base.control['loggedMetrics'][0], ".*")
def test_IncludedFields(self):
""" Test correct behavior in response to different settings in the
includedFields element
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
"inferenceType":"TemporalNextStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Experiment,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "gym",
"fieldType": "string"
},
{ "fieldName": "address",
"fieldType": "string"
},
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
},
],
"resetPeriod": {"days" : 1, "hours" : 12},
"iterationCount": 10,
}
# --------------------------------------------------------------------
# Test it out with all fields
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected encoders
actEncoderFields = set()
actEncoderNames = set()
for _, encoder in (
base.config['modelParams']['sensorParams']['encoders'].iteritems()):
actEncoderFields.add(encoder['fieldname'])
actEncoderNames.add(encoder['name'])
# Make sure we have the right optimization designation
self.assertEqual(actEncoderFields, set(['gym', 'address', 'timestamp',
'consumption']))
self.assertEqual(actEncoderNames, set(['gym', 'address',
'timestamp_timeOfDay', 'timestamp_dayOfWeek', 'timestamp_weekend',
'consumption']))
# --------------------------------------------------------------------
# Test with a subset of fields
expDesc['includedFields'] = [
{ "fieldName": "gym",
"fieldType": "string"
},
{ "fieldName": "consumption",
"fieldType": "float",
},
]
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected encoders
actEncoderFields = set()
actEncoderNames = set()
for _, encoder in (
base.config['modelParams']['sensorParams']['encoders'].iteritems()):
actEncoderFields.add(encoder['fieldname'])
actEncoderNames.add(encoder['name'])
# Make sure we have the right optimization designation
self.assertEqual(actEncoderFields, set(['gym', 'consumption']))
self.assertEqual(actEncoderNames, set(['gym', 'consumption']))
# --------------------------------------------------------------------
# Test that min and max are honored
expDesc['includedFields'] = [
{ "fieldName": "consumption",
"fieldType": "float",
"minValue" : 42,
"maxValue" : 42.42,
},
]
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected encoders
actEncoderFields = set()
actEncoderNames = set()
actEncoderTypes = set()
minValues = set()
maxValues = set()
for _, encoder in (
base.config['modelParams']['sensorParams']['encoders'].iteritems()):
actEncoderFields.add(encoder['fieldname'])
actEncoderNames.add(encoder['name'])
actEncoderTypes.add(encoder['type'])
minValues.add(encoder['minval'])
maxValues.add(encoder['maxval'])
# Make sure we have the right optimization designation
self.assertEqual(actEncoderFields, set(['consumption']))
self.assertEqual(actEncoderNames, set(['consumption']))
# Because both min and max were specifed,
# the encoder should be non-adaptive
self.assertEqual(actEncoderTypes, set(['ScalarEncoder']))
self.assertEqual(minValues, set([42]))
self.assertEqual(maxValues, set([42.42]))
# --------------------------------------------------------------------
# Test that overriding the encoderType is supported
expDesc['includedFields'] = [
{ "fieldName": "consumption",
"fieldType": "float",
"minValue" : 42,
"maxValue" : 42.42,
"encoderType": 'AdaptiveScalarEncoder',
},
]
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected encoders
actEncoderFields = set()
actEncoderNames = set()
actEncoderTypes = set()
minValues = set()
maxValues = set()
for _, encoder in (
base.config['modelParams']['sensorParams']['encoders'].iteritems()):
actEncoderFields.add(encoder['fieldname'])
actEncoderNames.add(encoder['name'])
actEncoderTypes.add(encoder['type'])
minValues.add(encoder['minval'])
maxValues.add(encoder['maxval'])
# Make sure we have the right optimization designation
self.assertEqual(actEncoderFields, set(['consumption']))
self.assertEqual(actEncoderNames, set(['consumption']))
self.assertEqual(actEncoderTypes, set(['AdaptiveScalarEncoder']))
self.assertEqual(minValues, set([42]))
self.assertEqual(maxValues, set([42.42]))
# --------------------------------------------------------------------
# Test that fieldnames with funny characters (-?<>!@##'"\=...) are
# generated properly. Should throw exception for \ character
characters = string.punctuation
expDesc['includedFields'] = [{'fieldName':char+'helloField'+char,
"fieldType":"float"}
for char in characters]\
+[{'fieldName':'consumption',
'fieldType':'float'}]
try:
(base, _perms) = self.getModules(expDesc)
except:
LOGGER.info("Passed: Threw exception for bad fieldname.")
# --------------------------------------------------------------------
## Now test without backslash
characters = characters.replace('\\','')
#expDesc['includedFields'] = [{'fieldName':char+'helloField'+char,
# "fieldType":"float"}
# for char in characters]\
# +[{'fieldName':'consumption',
# 'fieldType':'float'}]
#(base, perms) = self.getModules(expDesc)
return
def test_Aggregation(self):
""" Test that aggregation gets pulled out of the streamDef as it should
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "TestAggregation",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
aggregation = {
'years': 1,
'months': 2,
'weeks': 3,
'days': 4,
'hours': 5,
'minutes': 6,
'seconds': 7,
'milliseconds': 8,
'microseconds': 9,
'fields': [('consumption', 'sum'),
('gym', 'first')]
},
sequenceIdField = 'gym',
providers = {
"order": ["weather"],
"weather":{
"locationField": "address",
"providerType": "NamedProvider",
"timestampField": "timestamp",
"weatherTypes":[
"TEMP"
]
}
}
)
# Generate the experiment description
expDesc = {
"inferenceType":"TemporalNextStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Experiment,
"streamDef":streamDef,
"includedFields": [
{ "fieldName": "gym",
"fieldType": "string"
},
{ "fieldName": "consumption",
"fieldType": "float",
},
{ "fieldName": "TEMP",
"fieldType": "float",
"minValue": -30.0,
"maxValue": 120.0,
},
],
"iterationCount": 10,
"resetPeriod": {"days" : 1, "hours" : 12},
}
# --------------------------------------------------------------------
# Test with aggregation
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected aggregation
aggInfo = base.config['aggregationInfo']
aggInfo['fields'].sort()
streamDef['aggregation']['fields'].sort()
self.assertEqual(aggInfo, streamDef['aggregation'])
# --------------------------------------------------------------------
# Test with no aggregation
expDesc['streamDef'].pop('aggregation')
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected aggregation
aggInfo = base.config['aggregationInfo']
expAggInfo = {
'years': 0,
'months': 0,
'weeks': 0,
'days': 0,
'hours': 0,
'minutes': 0,
'seconds': 0,
'milliseconds': 0,
'microseconds': 0,
'fields': []
}
aggInfo['fields'].sort()
expAggInfo['fields'].sort()
self.assertEqual(aggInfo, expAggInfo)
return
def test_ResetPeriod(self):
""" Test that reset period gets handled correctly
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
"inferenceType":"TemporalNextStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Experiment,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "gym",
"fieldType": "string"
},
{ "fieldName": "consumption",
"fieldType": "float",
},
],
"iterationCount": 10,
"resetPeriod": {
'weeks': 3,
'days': 4,
'hours': 5,
'minutes': 6,
'seconds': 7,
'milliseconds': 8,
'microseconds': 9,
},
}
# --------------------------------------------------------------------
# Test with reset period
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected reset info
resetInfo = base.config['modelParams']['sensorParams']['sensorAutoReset']
self.assertEqual(resetInfo, expDesc['resetPeriod'])
# --------------------------------------------------------------------
# Test no reset period
expDesc.pop('resetPeriod')
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected reset info
resetInfo = base.config['modelParams']['sensorParams']['sensorAutoReset']
self.assertEqual(resetInfo, None)
return
def test_RunningExperimentHSv2(self):
""" Try running a basic Hypersearch V2 experiment and permutations
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
"inferenceType":"TemporalMultiStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Nupic,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
"minValue": 0,
"maxValue": 200,
},
],
"resetPeriod": {"days" : 1, "hours" : 12},
"iterationCount": 10,
}
# Test it out
self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
return
def test_MultiStep(self):
""" Test the we correctly generate a multi-step prediction experiment
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"],
last_record=20),
],
aggregation = {
'years': 0,
'months': 0,
'weeks': 0,
'days': 0,
'hours': 1,
'minutes': 0,
'seconds': 0,
'milliseconds': 0,
'microseconds': 0,
'fields': [('consumption', 'sum'),
('gym', 'first'),
('timestamp', 'first')]
}
)
# Generate the experiment description
expDesc = {
'environment': OpfEnvironment.Nupic,
"inferenceArgs":{
"predictedField":"consumption",
"predictionSteps": [1, 5],
},
"inferenceType": "MultiStep",
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
},
],
"iterationCount": -1,
"runBaselines": True,
}
# --------------------------------------------------------------------
(base, perms) = self.getModules(expDesc)
print "base.config['modelParams']:"
pprint.pprint(base.config['modelParams'])
print "perms.permutations"
pprint.pprint(perms.permutations)
print "perms.minimize"
pprint.pprint(perms.minimize)
print "expDesc"
pprint.pprint(expDesc)
# Make sure we have the expected info in the base description file
self.assertEqual(base.control['inferenceArgs']['predictionSteps'],
expDesc['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'],
expDesc['inferenceArgs']['predictedField'])
self.assertEqual(base.config['modelParams']['inferenceType'],
"TemporalMultiStep")
# Make sure there is a '_classifier_input' encoder with classifierOnly
# set to True
self.assertEqual(base.config['modelParams']['sensorParams']['encoders']
['_classifierInput']['classifierOnly'], True)
self.assertEqual(base.config['modelParams']['sensorParams']['encoders']
['_classifierInput']['fieldname'],
expDesc['inferenceArgs']['predictedField'])
# And in the permutations file
self.assertIn('inferenceType', perms.permutations['modelParams'])
self.assertEqual(perms.minimize,
"multiStepBestPredictions:multiStep:errorMetric='altMAPE':" \
+ "steps=\\[1, 5\\]:window=1000:field=consumption")
self.assertIn('alpha', perms.permutations['modelParams']['clParams'])
# Should permute over the _classifier_input encoder params
self.assertIn('_classifierInput',
perms.permutations['modelParams']['sensorParams']['encoders'])
# Should set inputPredictedField to "auto" (the default)
self.assertEqual(perms.inputPredictedField, "auto")
# Should have TP parameters being permuted
self.assertIn('activationThreshold',
perms.permutations['modelParams']['tpParams'])
self.assertIn('minThreshold', perms.permutations['modelParams']['tpParams'])
# Make sure the right metrics were put in
metrics = base.control['metrics']
metricTuples = [(metric.metric, metric.inferenceElement, metric.params) \
for metric in metrics]
self.assertIn(('multiStep',
'multiStepBestPredictions',
{'window': 1000, 'steps': [1, 5], 'errorMetric': 'aae'}),
metricTuples)
# Test running it
self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
# --------------------------------------
# If we put the 5 step first, we should still get a list of steps to
# optimize over
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceArgs']['predictionSteps'] = [5, 1]
(base, perms) = self.getModules(expDesc2)
self.assertEqual(perms.minimize,
"multiStepBestPredictions:multiStep:errorMetric='altMAPE':" \
+ "steps=\\[5, 1\\]:window=1000:field=consumption")
# --------------------------------------
# If we specify NonTemporal, we shouldn't permute over TP parameters
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceType'] = 'NontemporalMultiStep'
(base, perms) = self.getModules(expDesc2)
self.assertEqual(base.config['modelParams']['inferenceType'],
expDesc2['inferenceType'])
self.assertEqual(base.control['inferenceArgs']['predictionSteps'],
expDesc2['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'],
expDesc2['inferenceArgs']['predictedField'])
self.assertIn('alpha', perms.permutations['modelParams']['clParams'])
self.assertNotIn('inferenceType', perms.permutations['modelParams'])
self.assertNotIn('activationThreshold',
perms.permutations['modelParams']['tpParams'])
self.assertNotIn('minThreshold',
perms.permutations['modelParams']['tpParams'])
# Make sure the right metrics were put in
metrics = base.control['metrics']
metricTuples = [(metric.metric, metric.inferenceElement, metric.params) \
for metric in metrics]
self.assertIn(('multiStep',
'multiStepBestPredictions',
{'window': 1000, 'steps': [1, 5], 'errorMetric': 'aae'}),
metricTuples)
# Test running it
self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
# --------------------------------------
# If we specify just generic MultiStep, we should permute over the inference
# type
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceType'] = 'MultiStep'
(base, perms) = self.getModules(expDesc2)
self.assertEqual(base.config['modelParams']['inferenceType'],
'TemporalMultiStep')
self.assertEqual(base.control['inferenceArgs']['predictionSteps'],
expDesc2['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'],
expDesc2['inferenceArgs']['predictedField'])
self.assertIn('alpha', perms.permutations['modelParams']['clParams'])
self.assertIn('inferenceType', perms.permutations['modelParams'])
self.assertIn('activationThreshold',
perms.permutations['modelParams']['tpParams'])
self.assertIn('minThreshold', perms.permutations['modelParams']['tpParams'])
# Make sure the right metrics were put in
metrics = base.control['metrics']
metricTuples = [(metric.metric, metric.inferenceElement, metric.params) \
for metric in metrics]
self.assertIn(('multiStep',
'multiStepBestPredictions',
{'window': 1000, 'steps': [1,5], 'errorMetric': 'aae'}),
metricTuples)
# Test running it
self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
# ---------------------------------------------------------------------
# If the caller sets inferenceArgs.inputPredictedField, make
# sure the permutations file has the same setting
expDesc2 = copy.deepcopy(expDesc)
expDesc2["inferenceArgs"]["inputPredictedField"] = "yes"
(base, perms) = self.getModules(expDesc2)
self.assertEqual(perms.inputPredictedField, "yes")
expDesc2 = copy.deepcopy(expDesc)
expDesc2["inferenceArgs"]["inputPredictedField"] = "no"
(base, perms) = self.getModules(expDesc2)
self.assertEqual(perms.inputPredictedField, "no")
expDesc2 = copy.deepcopy(expDesc)
expDesc2["inferenceArgs"]["inputPredictedField"] = "auto"
(base, perms) = self.getModules(expDesc2)
self.assertEqual(perms.inputPredictedField, "auto")
# ---------------------------------------------------------------------
# If the caller sets inferenceArgs.inputPredictedField to 'no', make
# sure there is no encoder for the predicted field
expDesc2 = copy.deepcopy(expDesc)
expDesc2["inferenceArgs"]["inputPredictedField"] = "no"
(base, perms) = self.getModules(expDesc2)
self.assertNotIn(
'consumption',
base.config['modelParams']['sensorParams']['encoders'].keys())
def test_DeltaEncoders(self):
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
"inferenceType":"TemporalMultiStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Nupic,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
"minValue": 0,
"maxValue": 200,
"runDelta": True
},
],
}
(base, perms) = self.getModules(expDesc)
encoder = base.config["modelParams"]["sensorParams"]["encoders"]\
["consumption"]
encoderPerm = perms.permutations["modelParams"]["sensorParams"]\
["encoders"]["consumption"]
self.assertEqual(encoder["type"], "ScalarSpaceEncoder")
self.assertIsInstance(encoderPerm.kwArgs['space'], PermuteChoices)
expDesc = {
"inferenceType":"TemporalMultiStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Nupic,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
"minValue": 0,
"maxValue": 200,
"runDelta": True,
"space": "delta"
},
],
}
(base, perms) = self.getModules(expDesc)
encoder = base.config["modelParams"]["sensorParams"] \
["encoders"]["consumption"]
encoderPerm = perms.permutations["modelParams"]["sensorParams"] \
["encoders"]["consumption"]
self.assertEqual(encoder["type"], "ScalarSpaceEncoder")
self.assertEqual(encoder["space"], "delta")
self.assertEqual(encoderPerm.kwArgs['space'], "delta")
def test_AggregationSwarming(self):
""" Test the we correctly generate a multi-step prediction experiment that
uses aggregation swarming
"""
# The min aggregation
minAggregation = {
'years': 0,
'months': 0,
'weeks': 0,
'days': 0,
'hours': 0,
'minutes': 15,
'seconds': 0,
'milliseconds': 0,
'microseconds': 0,
}
streamAggregation = dict(minAggregation)
streamAggregation.update({
'fields': [('consumption', 'sum'),
('gym', 'first'),
('timestamp', 'first')]
})
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"],
last_record=10),
],
aggregation = streamAggregation,
)
# Generate the experiment description
expDesc = {
'environment': OpfEnvironment.Nupic,
"inferenceArgs":{
"predictedField":"consumption",
"predictionSteps": [24],
},
"inferenceType": "TemporalMultiStep",
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
},
],
"iterationCount": -1,
"runBaselines": False,
"computeInterval": {
'hours': 2
}
}
# ------------------------------------------------------------------------
# Test running it
#self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
# --------------------------------------------------------------------
# Check for consistency. (example 1)
# The expectedAttempts parameter is a list of
# (minAggregationMultiple, predictionSteps) pairs that will be attempted
self.assertValidSwarmingAggregations(expDesc = expDesc,
expectedAttempts = [(1, 24), (2, 12), (4, 6), (8, 3)])
# --------------------------------------------------------------------
# Try where there are lots of possible aggregations that we only try
# the last 5
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['streamDef']['aggregation']['minutes'] = 1
expDescTmp['inferenceArgs']['predictionSteps'] = \
[4*60/1] # 4 hours / 1 minute
self.assertValidSwarmingAggregations(expDesc = expDescTmp,
expectedAttempts = [(24, 10), (30, 8), (40, 6), (60, 4), (120, 2)])
# --------------------------------------------------------------------
# Make sure computeInterval is honored (example 2)
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['computeInterval']['hours'] = 3
expDescTmp['inferenceArgs']['predictionSteps'] = [16] # 4 hours
self.assertValidSwarmingAggregations(expDesc = expDescTmp,
expectedAttempts = [(1,16), (2, 8), (4, 4)])
# --------------------------------------------------------------------
# Make sure computeInterval in combination with predictAheadTime is honored
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['computeInterval']['hours'] = 2
expDescTmp['inferenceArgs']['predictionSteps'] = [16] # 4 hours
self.assertValidSwarmingAggregations(expDesc = expDescTmp,
expectedAttempts = [(1,16), (2, 8), (4, 4), (8, 2)])
# --------------------------------------------------------------------
# Make sure we catch bad cases:
# computeInterval must be >= minAggregation
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['computeInterval']['hours'] = 0
expDescTmp['computeInterval']['minutes'] = 1
with self.assertRaises(Exception) as cm:
self.assertValidSwarmingAggregations(expDesc = expDescTmp,
expectedAttempts = [(1, 16), (2, 8), (4, 4), (8, 2)])
LOGGER.info("Got expected exception: %s", cm.exception)
# computeInterval must be an integer multiple of minAggregation
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['computeInterval']['hours'] = 0
expDescTmp['computeInterval']['minutes'] = 25
with self.assertRaises(Exception) as cm:
self.assertValidSwarmingAggregations(expDesc = expDescTmp,
expectedAttempts = [(1, 16), (2, 8), (4, 4), (8, 2)])
LOGGER.info("Got expected exception: %s", cm.exception)
# More than 1 predictionSteps passed in
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['inferenceArgs']['predictionSteps'] = [1, 16]
with self.assertRaises(Exception) as cm:
self.assertValidSwarmingAggregations(expDesc = expDescTmp,
expectedAttempts = [(1, 16), (2, 8), (4, 4), (8, 2)])
LOGGER.info("Got expected exception: %s", cm.exception)
# No stream aggregation
expDescTmp = copy.deepcopy(expDesc)
expDescTmp['streamDef']['aggregation']['minutes'] = 0
with self.assertRaises(Exception) as cm:
self.assertValidSwarmingAggregations(expDesc = expDescTmp,
expectedAttempts = [(1, 16), (2, 8), (4, 4), (8, 2)])
LOGGER.info("Got expected exception: %s", cm.exception)
def test_SwarmSize(self):
""" Test correct behavior in response to different settings in the
swarmSize element
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
"swarmSize": "large",
"inferenceType":"TemporalNextStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Nupic,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
"minValue": 0,
"maxValue": 200,
},
],
"resetPeriod": {"days" : 1, "hours" : 12},
}
# --------------------------------------------------------------------
# Test out "large" swarm generation
(base, perms) = self.getModules(expDesc)
self.assertEqual(base.control['iterationCount'], -1,
msg="got: %s" % base.control['iterationCount'])
self.assertEqual(perms.minParticlesPerSwarm, 15,
msg="got: %s" % perms.minParticlesPerSwarm)
# Temporarily disable new large swarm features
#self.assertEqual(perms.killUselessSwarms, False,
# msg="got: %s" % perms.killUselessSwarms)
#self.assertEqual(perms.minFieldContribution, -1000,
# msg="got: %s" % perms.minFieldContribution)
#self.assertEqual(perms.maxFieldBranching, 10,
# msg="got: %s" % perms.maxFieldBranching)
#self.assertEqual(perms.tryAll3FieldCombinations, True,
# msg="got: %s" % perms.tryAll3FieldCombinations)
self.assertEqual(perms.tryAll3FieldCombinationsWTimestamps, True,
msg="got: %s" % perms.tryAll3FieldCombinationsWTimestamps)
self.assertFalse(hasattr(perms, 'maxModels'))
# Should set inputPredictedField to "auto"
self.assertEqual(perms.inputPredictedField, "auto")
# --------------------------------------------------------------------
# Test it out with medium swarm
expDesc["swarmSize"] = "medium"
(base, perms) = self.getModules(expDesc)
self.assertEqual(base.control['iterationCount'], 4000,
msg="got: %s" % base.control['iterationCount'])
self.assertEqual(perms.minParticlesPerSwarm, 5,
msg="got: %s" % perms.minParticlesPerSwarm)
self.assertEqual(perms.maxModels, 200,
msg="got: %s" % perms.maxModels)
self.assertFalse(hasattr(perms, 'killUselessSwarms'))
self.assertFalse(hasattr(perms, 'minFieldContribution'))
self.assertFalse(hasattr(perms, 'maxFieldBranching'))
self.assertFalse(hasattr(perms, 'tryAll3FieldCombinations'))
# Should set inputPredictedField to "auto"
self.assertEqual(perms.inputPredictedField, "auto")
# --------------------------------------------------------------------
# Test it out with small swarm
expDesc["swarmSize"] = "small"
(base, perms) = self.getModules(expDesc)
self.assertEqual(base.control['iterationCount'], 100,
msg="got: %s" % base.control['iterationCount'])
self.assertEqual(perms.minParticlesPerSwarm, 3,
msg="got: %s" % perms.minParticlesPerSwarm)
self.assertEqual(perms.maxModels, 1,
msg="got: %s" % perms.maxModels)
self.assertFalse(hasattr(perms, 'killUselessSwarms'))
self.assertFalse(hasattr(perms, 'minFieldContribution'))
self.assertFalse(hasattr(perms, 'maxFieldBranching'))
self.assertFalse(hasattr(perms, 'tryAll3FieldCombinations'))
# Should set inputPredictedField to "yes"
self.assertEqual(perms.inputPredictedField, "yes")
# --------------------------------------------------------------------
# Test it out with all of swarmSize, minParticlesPerSwarm, iteration
# count, and inputPredictedField specified
expDesc["swarmSize"] = "small"
expDesc["minParticlesPerSwarm"] = 2
expDesc["iterationCount"] = 42
expDesc["inferenceArgs"]["inputPredictedField"] = "auto"
(base, perms) = self.getModules(expDesc)
self.assertEqual(base.control['iterationCount'], 42,
msg="got: %s" % base.control['iterationCount'])
self.assertEqual(perms.minParticlesPerSwarm, 2,
msg="got: %s" % perms.minParticlesPerSwarm)
self.assertEqual(perms.maxModels, 1,
msg="got: %s" % perms.maxModels)
self.assertFalse(hasattr(perms, 'killUselessSwarms'))
self.assertFalse(hasattr(perms, 'minFieldContribution'))
self.assertFalse(hasattr(perms, 'maxFieldBranching'))
self.assertFalse(hasattr(perms, 'tryAll3FieldCombinations'))
self.assertEqual(perms.inputPredictedField, "auto")
# Test running it
modelResults = self.runBaseDescriptionAndPermutations(
expDesc, hsVersion='v2', maxModels=None)
self.assertEqual(len(modelResults), 1, "Expected to get %d model "
"results but only got %d" % (1, len(modelResults)))
def test_FixedFields(self):
""" Test correct behavior in response to setting the fixedFields swarming
option.
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
"swarmSize": "large",
"inferenceType":"TemporalNextStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Nupic,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
"minValue": 0,
"maxValue": 200,
},
],
"resetPeriod": {"days" : 1, "hours" : 12},
"fixedFields": ['consumption', 'timestamp'],
}
# --------------------------------------------------------------------
# Test out using fieldFields
(_base, perms) = self.getModules(expDesc)
self.assertEqual(perms.fixedFields, ['consumption', 'timestamp'],
msg="got: %s" % perms.fixedFields)
# Should be excluded from permutations script if not part of the JSON
# description
expDesc.pop('fixedFields')
(_base, perms) = self.getModules(expDesc)
self.assertFalse(hasattr(perms, 'fixedFields'))
def test_FastSwarmModelParams(self):
""" Test correct behavior in response to setting the fastSwarmModelParams
swarming option.
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
fastSwarmModelParams = {'this is': 'a test'}
# Generate the experiment description
expDesc = {
"swarmSize": "large",
"inferenceType":"TemporalNextStep",
"inferenceArgs":{
"predictedField":"consumption"
},
'environment':OpfEnvironment.Nupic,
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
"minValue": 0,
"maxValue": 200,
},
],
"resetPeriod": {"days" : 1, "hours" : 12},
"fastSwarmModelParams": fastSwarmModelParams,
}
# --------------------------------------------------------------------
# Test out using fieldFields
(_base, perms) = self.getModules(expDesc)
self.assertEqual(perms.fastSwarmModelParams, fastSwarmModelParams,
msg="got: %s" % perms.fastSwarmModelParams)
# Should be excluded from permutations script if not part of the JSON
# description
expDesc.pop('fastSwarmModelParams')
(base, perms) = self.getModules(expDesc)
self.assertFalse(hasattr(perms, 'fastSwarmModelParams'))
def test_AnomalyParams(self):
""" Test correct behavior in response to setting the anomalyParams
experiment description options
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"]),
],
)
# Generate the experiment description
expDesc = {
'environment': OpfEnvironment.Nupic,
"inferenceArgs":{
"predictedField":"consumption",
"predictionSteps": [1],
},
"inferenceType": "TemporalAnomaly",
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
},
],
"iterationCount": -1,
"anomalyParams": {
"autoDetectThreshold": 1.1,
"autoDetectWaitRecords": 0,
"anomalyCacheRecords": 10
}
}
# --------------------------------------------------------------------
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected info in the base description file
self.assertEqual(base.control['inferenceArgs']['predictionSteps'],
expDesc['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'],
expDesc['inferenceArgs']['predictedField'])
self.assertEqual(base.config['modelParams']['inferenceType'],
expDesc['inferenceType'])
self.assertEqual(base.config['modelParams']['anomalyParams'],
expDesc['anomalyParams'])
# Only TemporalAnomaly models will have and use anomalyParams
expDesc['inferenceType'] = 'TemporalNextStep'
(base, _perms) = self.getModules(expDesc)
# Make sure we have the expected info in the base description file
self.assertEqual(base.control['inferenceArgs']['predictionSteps'],
expDesc['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'],
expDesc['inferenceArgs']['predictedField'])
self.assertEqual(base.config['modelParams']['inferenceType'],
expDesc['inferenceType'])
self.assertEqual(base.config['modelParams']['anomalyParams'],
expDesc['anomalyParams'])
def test_NontemporalClassification(self):
""" Test the we correctly generate a Nontemporal classification experiment
"""
# Form the stream definition
streamDef = dict(
version = 1,
info = "test_NoProviders",
streams = [
dict(source="file://%s" % HOTGYM_INPUT,
info="hotGym.csv",
columns=["*"],
last_record=10),
],
aggregation = {
'years': 0,
'months': 0,
'weeks': 0,
'days': 0,
'hours': 1,
'minutes': 0,
'seconds': 0,
'milliseconds': 0,
'microseconds': 0,
'fields': [('consumption', 'sum'),
('gym', 'first'),
('timestamp', 'first')]
}
)
# Generate the experiment description
expDesc = {
'environment': OpfEnvironment.Nupic,
"inferenceArgs":{
"predictedField":"consumption",
"predictionSteps": [0],
},
"inferenceType": "TemporalMultiStep",
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "timestamp",
"fieldType": "datetime"
},
{ "fieldName": "consumption",
"fieldType": "float",
},
],
"iterationCount": -1,
"runBaselines": True,
}
# --------------------------------------------------------------------
(base, perms) = self.getModules(expDesc)
# Make sure we have the expected info in the base description file
self.assertEqual(base.control['inferenceArgs']['predictionSteps'],
expDesc['inferenceArgs']['predictionSteps'])
self.assertEqual(base.control['inferenceArgs']['predictedField'],
expDesc['inferenceArgs']['predictedField'])
self.assertEqual(base.config['modelParams']['inferenceType'],
InferenceType.NontemporalClassification)
self.assertEqual(base.config['modelParams']['sensorParams']['encoders']
['_classifierInput']['classifierOnly'], True)
self.assertEqual(base.config['modelParams']['sensorParams']['encoders']
['_classifierInput']['fieldname'],
expDesc['inferenceArgs']['predictedField'])
self.assertNotIn('consumption',
base.config['modelParams']['sensorParams']['encoders'].keys())
# The SP and TP should both be disabled
self.assertFalse(base.config['modelParams']['spEnable'])
self.assertFalse(base.config['modelParams']['tpEnable'])
# Check permutations file
self.assertNotIn('inferenceType', perms.permutations['modelParams'])
self.assertEqual(perms.minimize,
"multiStepBestPredictions:multiStep:errorMetric='altMAPE':" \
+ "steps=\\[0\\]:window=1000:field=consumption")
self.assertIn('alpha', perms.permutations['modelParams']['clParams'])
# Should have no SP or TP params to permute over
self.assertEqual(perms.permutations['modelParams']['tpParams'], {})
self.assertEqual(perms.permutations['modelParams']['spParams'], {})
# Make sure the right metrics were put in
metrics = base.control['metrics']
metricTuples = [(metric.metric, metric.inferenceElement, metric.params) \
for metric in metrics]
self.assertIn(('multiStep',
'multiStepBestPredictions',
{'window': 1000, 'steps': [0], 'errorMetric': 'aae'}),
metricTuples)
# Test running it
self.runBaseDescriptionAndPermutations(expDesc, hsVersion='v2')
# --------------------------------------
# If we specify NonTemporalClassification, we should get the same
# description and permutations files
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceType'] = 'NontemporalClassification'
(newBase, _newPerms) = self.getModules(expDesc2)
self.assertEqual(base.config, newBase.config)
# --------------------------------------
# If we specify NonTemporalClassification, prediction steps MUST be [0]
expDesc2 = copy.deepcopy(expDesc)
expDesc2['inferenceType'] = 'NontemporalClassification'
expDesc2['inferenceArgs']['predictionSteps'] = [1]
gotException = False
try:
(newBase, _newPerms) = self.getModules(expDesc2)
except:
gotException = True
self.assertTrue(gotException)
# --------------------------------------
# If we specify NonTemporalClassification, inferenceArgs.inputPredictedField
# can not be 'yes'
expDesc2 = copy.deepcopy(expDesc)
expDesc2["inferenceArgs"]["inputPredictedField"] = "yes"
gotException = False
try:
(newBase, _newPerms) = self.getModules(expDesc2)
except:
gotException = True
self.assertTrue(gotException)
return
def _executeExternalCmdAndReapStdout(args):
"""
args: Args list as defined for the args parameter in subprocess.Popen()
Returns: result dicionary:
{
'exitStatus':<exit-status-of-external-command>,
'stdoutData':"string",
'stderrData':"string"
}
"""
_debugOut(("_executeExternalCmdAndReapStdout: Starting...\n<%s>") % \
(args,))
p = subprocess.Popen(args,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_debugOut(("Process started for <%s>") % (args,))
(stdoutData, stderrData) = p.communicate()
_debugOut(("Process completed for <%s>: exit status=%s, " +
"stdoutDataType=%s, stdoutData=<%s>, stderrData=<%s>") % \
(args, p.returncode, type(stdoutData), stdoutData, stderrData))
result = dict(
exitStatus = p.returncode,
stdoutData = stdoutData,
stderrData = stderrData,
)
_debugOut(("_executeExternalCmdAndReapStdout for <%s>: result=\n%s") % \
(args, pprint.pformat(result, indent=4)))
return result
def _debugOut(text):
if g_debug:
LOGGER.info(text)
return
def _getTestList():
""" Get the list of tests that can be run from this module"""
suiteNames = ['PositiveExperimentTests']
testNames = []
for suite in suiteNames:
for f in dir(eval(suite)):
if f.startswith('test'):
testNames.append('%s.%s' % (suite, f))
return testNames
if __name__ == '__main__':
LOGGER.info("\nCURRENT DIRECTORY: %s", os.getcwd())
helpString = \
"""%prog [options] [suitename.testname | suitename]...
Run the Hypersearch unit tests. Available suitename.testnames: """
# Update help string
allTests = _getTestList()
for test in allTests:
helpString += "\n %s" % (test)
# ============================================================================
# Process command line arguments
parser = OptionParser(helpString)
# Our custom options (that don't get passed to unittest):
customOptions = ['--installDir', '--verbosity', '--logLevel']
parser.add_option(
"--installDir", dest="installDir", default=os.environ['NUPIC'],
help="Path to the NTA install directory [default: %default].")
parser.add_option("--verbosity", default=0, type="int",
help="Verbosity level, either 0, 1, 2, or 3 [default: %default].")
parser.add_option("--logLevel", action="store", type="int",
default=logging.INFO,
help="override default log level. Pass in an integer value that "
"represents the desired logging level (10=logging.DEBUG, "
"20=logging.INFO, etc.) [default: %default].")
# The following are put here to document what is accepted by the unittest
# module - we don't actually use them in this code bas.
parser.add_option("--verbose", dest="verbose", default=os.environ['NUPIC'],
help="Verbose output")
parser.add_option("--quiet", dest="quiet", default=None,
help="Minimal output")
parser.add_option("--failfast", dest="failfast", default=None,
help="Stop on first failure")
parser.add_option("--catch", dest="catch", default=None,
help="Catch control-C and display results")
parser.add_option("--buffer", dest="buffer", default=None,
help="Buffer stdout and stderr during test runs")
(options, args) = parser.parse_args()
# Setup our environment
g_myEnv = MyTestEnvironment(options)
# Remove our private options
args = sys.argv[:]
for arg in sys.argv:
for option in customOptions:
if arg.startswith(option):
args.remove(arg)
break
# Run the tests
unittest.main(argv=args)
| 1 | 18,606 | Please leave 1 space char between commas here and other places in this PR. | numenta-nupic | py |
@@ -100,8 +100,8 @@ bool ThreadsExec::spawn() {
pthread_attr_t attr;
- if (0 == pthread_attr_init(&attr) ||
- 0 == pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) ||
+ if (0 == pthread_attr_init(&attr) &&
+ 0 == pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) &&
0 == pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)) {
pthread_t pt;
| 1 | /*
//@HEADER
// ************************************************************************
//
// Kokkos v. 3.0
// Copyright (2020) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott ([email protected])
//
// ************************************************************************
//@HEADER
*/
#include <Kokkos_Macros.hpp>
#if defined(KOKKOS_ENABLE_THREADS)
#include <Kokkos_Core_fwd.hpp>
/* Standard 'C' Linux libraries */
#include <pthread.h>
#include <sched.h>
#include <errno.h>
/* Standard C++ libraries */
#include <cstdlib>
#include <string>
#include <iostream>
#include <stdexcept>
#include <Kokkos_Threads.hpp>
//----------------------------------------------------------------------------
namespace Kokkos {
namespace Impl {
namespace {
pthread_mutex_t host_internal_pthread_mutex = PTHREAD_MUTEX_INITIALIZER;
// Pthreads compatible driver.
// Recovery from an exception would require constant intra-thread health
// verification; which would negatively impact runtime. As such simply
// abort the process.
void* internal_pthread_driver(void*) {
try {
ThreadsExec::driver();
} catch (const std::exception& x) {
std::cerr << "Exception thrown from worker thread: " << x.what()
<< std::endl;
std::cerr.flush();
std::abort();
} catch (...) {
std::cerr << "Exception thrown from worker thread" << std::endl;
std::cerr.flush();
std::abort();
}
return nullptr;
}
} // namespace
//----------------------------------------------------------------------------
// Spawn a thread
bool ThreadsExec::spawn() {
bool result = false;
pthread_attr_t attr;
if (0 == pthread_attr_init(&attr) ||
0 == pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) ||
0 == pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)) {
pthread_t pt;
result = 0 == pthread_create(&pt, &attr, internal_pthread_driver, nullptr);
}
pthread_attr_destroy(&attr);
return result;
}
//----------------------------------------------------------------------------
bool ThreadsExec::is_process() {
static const pthread_t master_pid = pthread_self();
return pthread_equal(master_pid, pthread_self());
}
void ThreadsExec::global_lock() {
pthread_mutex_lock(&host_internal_pthread_mutex);
}
void ThreadsExec::global_unlock() {
pthread_mutex_unlock(&host_internal_pthread_mutex);
}
//----------------------------------------------------------------------------
void ThreadsExec::wait_yield(volatile int& flag, const int value) {
while (value == flag) {
sched_yield();
}
}
} // namespace Impl
} // namespace Kokkos
/* end #if defined( KOKKOS_ENABLE_THREADS ) */
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
#elif defined(KOKKOS_ENABLE_WINTHREAD)
#include <Kokkos_Core_fwd.hpp>
/* Windows libraries */
#include <winsock2.h>
#include <windows.h>
#include <process.h>
/* Standard C++ libraries */
#include <cstdlib>
#include <string>
#include <iostream>
#include <stdexcept>
#include <Kokkos_Threads.hpp>
//----------------------------------------------------------------------------
// Driver for each created pthread
namespace Kokkos {
namespace Impl {
namespace {
unsigned WINAPI internal_winthread_driver(void* arg) {
ThreadsExec::driver();
return 0;
}
class ThreadLockWindows {
private:
CRITICAL_SECTION m_handle;
~ThreadLockWindows() { DeleteCriticalSection(&m_handle); }
ThreadLockWindows();
{ InitializeCriticalSection(&m_handle); }
ThreadLockWindows(const ThreadLockWindows&);
ThreadLockWindows& operator=(const ThreadLockWindows&);
public:
static ThreadLockWindows& singleton();
void lock() { EnterCriticalSection(&m_handle); }
void unlock() { LeaveCriticalSection(&m_handle); }
};
ThreadLockWindows& ThreadLockWindows::singleton() {
static ThreadLockWindows self;
return self;
}
} // namespace
} // namespace Impl
} // namespace Kokkos
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
namespace Kokkos {
namespace Impl {
// Spawn this thread
bool ThreadsExec::spawn() {
unsigned Win32ThreadID = 0;
HANDLE handle =
_beginthreadex(0, 0, internal_winthread_driver, 0, 0, &Win32ThreadID);
return !handle;
}
bool ThreadsExec::is_process() { return true; }
void ThreadsExec::global_lock() { ThreadLockWindows::singleton().lock(); }
void ThreadsExec::global_unlock() { ThreadLockWindows::singleton().unlock(); }
void ThreadsExec::wait_yield(volatile int& flag, const int value){} {
while (value == flag) {
Sleep(0);
}
}
} // namespace Impl
} // namespace Kokkos
#else
void KOKKOS_CORE_SRC_THREADS_EXEC_BASE_PREVENT_LINK_ERROR() {}
#endif /* end #elif defined( KOKKOS_ENABLE_WINTHREAD ) */
| 1 | 29,849 | hm is this really &&? Not ||? Was it initially correct if any of these things are not set that it needs to recreated? | kokkos-kokkos | cpp |
@@ -149,7 +149,8 @@ public class PackageTool extends SolrCLI.ToolBase {
String version = parsedVersion.second();
boolean noprompt = cli.hasOption('y');
boolean isUpdate = cli.hasOption("update") || cli.hasOption('u');
- packageManager.deploy(packageName, version, PackageUtils.validateCollections(cli.getOptionValue("collections").split(",")), cli.getOptionValues("param"), isUpdate, noprompt);
+ String collections[] = cli.hasOption("collections")? PackageUtils.validateCollections(cli.getOptionValue("collections").split(",")): new String[] {};
+ packageManager.deploy(packageName, version, collections, cli.hasOption("cluster"), cli.getOptionValues("param"), isUpdate, noprompt);
break;
}
case "undeploy": | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.util;
import static org.apache.solr.packagemanager.PackageUtils.printGreen;
import static org.apache.solr.packagemanager.PackageUtils.print;
import java.io.File;
import java.lang.invoke.MethodHandles;
import java.nio.file.Paths;
import java.util.Map;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
import org.apache.commons.io.FileUtils;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.core.config.Configurator;
import org.apache.lucene.util.SuppressForbidden;
import org.apache.solr.client.solrj.impl.HttpClientUtil;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.util.Pair;
import org.apache.solr.packagemanager.PackageManager;
import org.apache.solr.packagemanager.PackageUtils;
import org.apache.solr.packagemanager.RepositoryManager;
import org.apache.solr.packagemanager.SolrPackage;
import org.apache.solr.packagemanager.SolrPackage.SolrPackageRelease;
import org.apache.solr.packagemanager.SolrPackageInstance;
import org.apache.solr.util.SolrCLI.StatusTool;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class PackageTool extends SolrCLI.ToolBase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@SuppressForbidden(reason = "Need to turn off logging, and SLF4J doesn't seem to provide for a way.")
public PackageTool() {
// Need a logging free, clean output going through to the user.
Configurator.setRootLevel(Level.OFF);
}
@Override
public String getName() {
return "package";
}
public static String solrUrl = null;
public static String solrBaseUrl = null;
public PackageManager packageManager;
public RepositoryManager repositoryManager;
@Override
@SuppressForbidden(reason = "We really need to print the stacktrace here, otherwise "
+ "there shall be little else information to debug problems. Other SolrCLI tools "
+ "don't print stack traces, hence special treatment is needed here.")
protected void runImpl(CommandLine cli) throws Exception {
try {
solrUrl = cli.getOptionValues("solrUrl")[cli.getOptionValues("solrUrl").length-1];
solrBaseUrl = solrUrl.replaceAll("\\/solr$", ""); // strip out ending "/solr"
log.info("Solr url:{}, solr base url: {}", solrUrl, solrBaseUrl);
String zkHost = getZkHost(cli);
log.info("ZK: {}", zkHost);
String cmd = cli.getArgList().size() == 0? "help": cli.getArgs()[0];
try (HttpSolrClient solrClient = new HttpSolrClient.Builder(solrBaseUrl).build()) {
if (cmd != null) {
packageManager = new PackageManager(solrClient, solrBaseUrl, zkHost);
try {
repositoryManager = new RepositoryManager(solrClient, packageManager);
switch (cmd) {
case "add-repo":
String repoName = cli.getArgs()[1];
String repoUrl = cli.getArgs()[2];
repositoryManager.addRepository(repoName, repoUrl);
PackageUtils.printGreen("Added repository: " + repoName);
break;
case "add-key":
String keyFilename = cli.getArgs()[1];
repositoryManager.addKey(FileUtils.readFileToByteArray(new File(keyFilename)), Paths.get(keyFilename).getFileName().toString());
break;
case "list-installed":
PackageUtils.printGreen("Installed packages:\n-----");
for (SolrPackageInstance pkg: packageManager.fetchInstalledPackageInstances()) {
PackageUtils.printGreen(pkg);
}
break;
case "list-available":
PackageUtils.printGreen("Available packages:\n-----");
for (SolrPackage pkg: repositoryManager.getPackages()) {
PackageUtils.printGreen(pkg.name + " \t\t"+pkg.description);
for (SolrPackageRelease version: pkg.versions) {
PackageUtils.printGreen("\tVersion: "+version.version);
}
}
break;
case "list-deployed":
if (cli.hasOption('c')) {
String collection = cli.getArgs()[1];
Map<String, SolrPackageInstance> packages = packageManager.getPackagesDeployed(collection);
PackageUtils.printGreen("Packages deployed on " + collection + ":");
for (String packageName: packages.keySet()) {
PackageUtils.printGreen("\t" + packages.get(packageName));
}
} else {
String packageName = cli.getArgs()[1];
Map<String, String> deployedCollections = packageManager.getDeployedCollections(packageName);
if (deployedCollections.isEmpty() == false) {
PackageUtils.printGreen("Collections on which package " + packageName + " was deployed:");
for (String collection: deployedCollections.keySet()) {
PackageUtils.printGreen("\t" + collection + "("+packageName+":"+deployedCollections.get(collection)+")");
}
} else {
PackageUtils.printGreen("Package "+packageName+" not deployed on any collection.");
}
}
break;
case "install":
{
Pair<String, String> parsedVersion = parsePackageVersion(cli.getArgList().get(1).toString());
String packageName = parsedVersion.first();
String version = parsedVersion.second();
repositoryManager.install(packageName, version);
PackageUtils.printGreen(packageName + " installed.");
break;
}
case "deploy":
{
Pair<String, String> parsedVersion = parsePackageVersion(cli.getArgList().get(1).toString());
String packageName = parsedVersion.first();
String version = parsedVersion.second();
boolean noprompt = cli.hasOption('y');
boolean isUpdate = cli.hasOption("update") || cli.hasOption('u');
packageManager.deploy(packageName, version, PackageUtils.validateCollections(cli.getOptionValue("collections").split(",")), cli.getOptionValues("param"), isUpdate, noprompt);
break;
}
case "undeploy":
{
Pair<String, String> parsedVersion = parsePackageVersion(cli.getArgList().get(1).toString());
if (parsedVersion.second() != null) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Only package name expected, without a version. Actual: " + cli.getArgList().get(1));
}
String packageName = parsedVersion.first();
packageManager.undeploy(packageName, cli.getOptionValue("collections").split(","));
break;
}
case "help":
case "usage":
print("Package Manager\n---------------");
printGreen("./solr package add-repo <repository-name> <repository-url>");
print("Add a repository to Solr.");
print("");
printGreen("./solr package install <package-name>[:<version>] ");
print("Install a package into Solr. This copies over the artifacts from the repository into Solr's internal package store and sets up classloader for this package to be used.");
print("");
printGreen("./solr package deploy <package-name>[:<version>] [-y] [--update] -collections <comma-separated-collections> [-p <param1>=<val1> -p <param2>=<val2> ...] ");
print("Bootstraps a previously installed package into the specified collections. It the package accepts parameters for its setup commands, they can be specified (as per package documentation).");
print("");
printGreen("./solr package list-installed");
print("Print a list of packages installed in Solr.");
print("");
printGreen("./solr package list-available");
print("Print a list of packages available in the repositories.");
print("");
printGreen("./solr package list-deployed -c <collection>");
print("Print a list of packages deployed on a given collection.");
print("");
printGreen("./solr package list-deployed <package-name>");
print("Print a list of collections on which a given package has been deployed.");
print("");
printGreen("./solr package undeploy <package-name> -collections <comma-separated-collections>");
print("Undeploys a package from specified collection(s)");
print("\n");
print("Note: (a) Please add '-solrUrl http://host:port' parameter if needed (usually on Windows).");
print(" (b) Please make sure that all Solr nodes are started with '-Denable.packages=true' parameter.");
print("\n");
break;
default:
throw new RuntimeException("Unrecognized command: "+cmd);
};
} finally {
packageManager.close();
}
}
}
log.info("Finished: {}", cmd);
} catch (Exception ex) {
ex.printStackTrace(); // We need to print this since SolrCLI drops the stack trace in favour of brevity. Package tool should surely print full stacktraces!
throw ex;
}
}
/**
* Parses package name and version in the format "name:version" or "name"
* @return A pair of package name (first) and version (second)
*/
private Pair<String, String> parsePackageVersion(String arg) {
String[] splits = arg.split(":");
if (splits.length > 2) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid package name: " + arg +
". Didn't match the pattern: <packagename>:<version> or <packagename>");
}
String packageName = splits[0];
String version = splits.length == 2? splits[1]: null;
return new Pair<>(packageName, version);
}
public Option[] getOptions() {
return new Option[] {
Option.builder("solrUrl")
.argName("URL")
.hasArg()
.required(true)
.desc("Address of the Solr Web application, defaults to: " + SolrCLI.DEFAULT_SOLR_URL)
.build(),
Option.builder("collections")
.argName("COLLECTIONS")
.hasArg()
.required(false)
.desc("List of collections. Run './solr package help' for more details.")
.build(),
Option.builder("p")
.argName("PARAMS")
.hasArgs()
.required(false)
.desc("List of parameters to be used with deploy command. Run './solr package help' for more details.")
.longOpt("param")
.build(),
Option.builder("u")
.required(false)
.desc("If a deployment is an update over a previous deployment. Run './solr package help' for more details.")
.longOpt("update")
.build(),
Option.builder("c")
.required(false)
.desc("Run './solr package help' for more details.")
.longOpt("collection")
.build(),
Option.builder("y")
.required(false)
.desc("Run './solr package help' for more details.")
.longOpt("noprompt")
.build()
};
}
private String getZkHost(CommandLine cli) throws Exception {
String zkHost = cli.getOptionValue("zkHost");
if (zkHost != null)
return zkHost;
String systemInfoUrl = solrUrl+"/admin/info/system";
CloseableHttpClient httpClient = SolrCLI.getHttpClient();
try {
// hit Solr to get system info
Map<String,Object> systemInfo = SolrCLI.getJson(httpClient, systemInfoUrl, 2, true);
// convert raw JSON into user-friendly output
StatusTool statusTool = new StatusTool();
Map<String,Object> status = statusTool.reportStatus(solrUrl+"/", systemInfo, httpClient);
@SuppressWarnings({"unchecked"})
Map<String,Object> cloud = (Map<String, Object>)status.get("cloud");
if (cloud != null) {
String zookeeper = (String) cloud.get("ZooKeeper");
if (zookeeper.endsWith("(embedded)")) {
zookeeper = zookeeper.substring(0, zookeeper.length() - "(embedded)".length());
}
zkHost = zookeeper;
}
} finally {
HttpClientUtil.close(httpClient);
}
return zkHost;
}
} | 1 | 35,323 | Please don't use C-style array declarations. IMO our pre-commit ought to be enhanced to not allow this | apache-lucene-solr | java |
@@ -259,6 +259,12 @@ bool pmix_value_cmp(pmix_value_t *p, pmix_value_t *p1)
case PMIX_STRING:
rc = strcmp(p->data.string, p1->data.string);
break;
+ case PMIX_COMPRESSED_STRING:
+ if (p->data.bo.size != p1->data.bo.size) {
+ return false;
+ } else {
+ return true;
+ }
case PMIX_STATUS:
rc = (p->data.status == p1->data.status);
break; | 1 | /*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2014-2016 Intel, Inc. All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2016 IBM Corporation. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include <src/include/pmix_config.h>
#include "src/util/argv.h"
#include "src/util/error.h"
#include "src/util/output.h"
#include "src/buffer_ops/buffer_ops.h"
#include "src/buffer_ops/internal.h"
pmix_status_t pmix_bfrop_copy(void **dest, void *src, pmix_data_type_t type)
{
pmix_bfrop_type_info_t *info;
/* check for error */
if (NULL == dest) {
PMIX_ERROR_LOG(PMIX_ERR_BAD_PARAM);
return PMIX_ERR_BAD_PARAM;
}
if (NULL == src) {
PMIX_ERROR_LOG(PMIX_ERR_BAD_PARAM);
return PMIX_ERR_BAD_PARAM;
}
/* Lookup the copy function for this type and call it */
if (NULL == (info = (pmix_bfrop_type_info_t*)pmix_pointer_array_get_item(&pmix_bfrop_types, type))) {
PMIX_ERROR_LOG(PMIX_ERR_UNKNOWN_DATA_TYPE);
return PMIX_ERR_UNKNOWN_DATA_TYPE;
}
return info->odti_copy_fn(dest, src, type);
}
pmix_status_t pmix_bfrop_copy_payload(pmix_buffer_t *dest, pmix_buffer_t *src)
{
size_t to_copy = 0;
char *ptr;
/* deal with buffer type */
if( NULL == dest->base_ptr ){
/* destination buffer is empty - derive src buffer type */
dest->type = src->type;
} else if( dest->type != src->type ){
/* buffer types mismatch */
PMIX_ERROR_LOG(PMIX_ERR_BAD_PARAM);
return PMIX_ERR_BAD_PARAM;
}
to_copy = src->pack_ptr - src->unpack_ptr;
if( NULL == (ptr = pmix_bfrop_buffer_extend(dest, to_copy)) ){
PMIX_ERROR_LOG(PMIX_ERR_OUT_OF_RESOURCE);
return PMIX_ERR_OUT_OF_RESOURCE;
}
memcpy(ptr,src->unpack_ptr, to_copy);
dest->bytes_used += to_copy;
dest->pack_ptr += to_copy;
return PMIX_SUCCESS;
}
/*
* STANDARD COPY FUNCTION - WORKS FOR EVERYTHING NON-STRUCTURED
*/
pmix_status_t pmix_bfrop_std_copy(void **dest, void *src, pmix_data_type_t type)
{
size_t datasize;
uint8_t *val = NULL;
switch(type) {
case PMIX_BOOL:
datasize = sizeof(bool);
break;
case PMIX_INT:
case PMIX_UINT:
datasize = sizeof(int);
break;
case PMIX_SIZE:
datasize = sizeof(size_t);
break;
case PMIX_PID:
datasize = sizeof(pid_t);
break;
case PMIX_BYTE:
case PMIX_INT8:
case PMIX_UINT8:
datasize = 1;
break;
case PMIX_INT16:
case PMIX_UINT16:
datasize = 2;
break;
case PMIX_INT32:
case PMIX_UINT32:
datasize = 4;
break;
case PMIX_INT64:
case PMIX_UINT64:
datasize = 8;
break;
case PMIX_FLOAT:
datasize = sizeof(float);
break;
case PMIX_TIMEVAL:
datasize = sizeof(struct timeval);
break;
case PMIX_TIME:
datasize = sizeof(time_t);
break;
case PMIX_STATUS:
datasize = sizeof(pmix_status_t);
break;
case PMIX_PROC_RANK:
datasize = sizeof(pmix_rank_t);
break;
case PMIX_PERSIST:
datasize = sizeof(pmix_persistence_t);
break;
case PMIX_POINTER:
datasize = sizeof(char*);
break;
case PMIX_SCOPE:
datasize = sizeof(pmix_scope_t);
break;
case PMIX_DATA_RANGE:
datasize = sizeof(pmix_data_range_t);
break;
case PMIX_COMMAND:
datasize = sizeof(pmix_cmd_t);
break;
case PMIX_INFO_DIRECTIVES:
datasize = sizeof(pmix_info_directives_t);
break;
case PMIX_PROC_STATE:
datasize = sizeof(pmix_proc_state_t);
break;
default:
return PMIX_ERR_UNKNOWN_DATA_TYPE;
}
val = (uint8_t*)malloc(datasize);
if (NULL == val) {
return PMIX_ERR_OUT_OF_RESOURCE;
}
memcpy(val, src, datasize);
*dest = val;
return PMIX_SUCCESS;
}
/* COPY FUNCTIONS FOR NON-STANDARD SYSTEM TYPES */
/*
* STRING
*/
pmix_status_t pmix_bfrop_copy_string(char **dest, char *src, pmix_data_type_t type)
{
if (NULL == src) { /* got zero-length string/NULL pointer - store NULL */
*dest = NULL;
} else {
*dest = strdup(src);
}
return PMIX_SUCCESS;
}
/* compare function for pmix_value_t */
bool pmix_value_cmp(pmix_value_t *p, pmix_value_t *p1)
{
bool rc = false;
if (p->type != p1->type) {
return rc;
}
switch (p->type) {
case PMIX_UNDEF:
rc = true;
break;
case PMIX_BOOL:
rc = (p->data.flag == p1->data.flag);
break;
case PMIX_BYTE:
rc = (p->data.byte == p1->data.byte);
break;
case PMIX_SIZE:
rc = (p->data.size == p1->data.size);
break;
case PMIX_INT:
rc = (p->data.integer == p1->data.integer);
break;
case PMIX_INT8:
rc = (p->data.int8 == p1->data.int8);
break;
case PMIX_INT16:
rc = (p->data.int16 == p1->data.int16);
break;
case PMIX_INT32:
rc = (p->data.int32 == p1->data.int32);
break;
case PMIX_INT64:
rc = (p->data.int64 == p1->data.int64);
break;
case PMIX_UINT:
rc = (p->data.uint == p1->data.uint);
break;
case PMIX_UINT8:
rc = (p->data.uint8 == p1->data.int8);
break;
case PMIX_UINT16:
rc = (p->data.uint16 == p1->data.uint16);
break;
case PMIX_UINT32:
rc = (p->data.uint32 == p1->data.uint32);
break;
case PMIX_UINT64:
rc = (p->data.uint64 == p1->data.uint64);
break;
case PMIX_STRING:
rc = strcmp(p->data.string, p1->data.string);
break;
case PMIX_STATUS:
rc = (p->data.status == p1->data.status);
break;
default:
pmix_output(0, "COMPARE-PMIX-VALUE: UNSUPPORTED TYPE %d", (int)p->type);
}
return rc;
}
/* COPY FUNCTIONS FOR GENERIC PMIX TYPES - we
* are not allocating memory and so we cannot
* use the regular copy functions */
PMIX_EXPORT pmix_status_t pmix_value_xfer(pmix_value_t *p, pmix_value_t *src)
{
size_t n, m;
pmix_status_t rc;
char **prarray, **strarray;
pmix_value_t *pv, *sv;
pmix_info_t *p1, *s1;
pmix_app_t *pa, *sa;
pmix_pdata_t *pd, *sd;
pmix_buffer_t *pb, *sb;
pmix_byte_object_t *pbo, *sbo;
pmix_kval_t *pk, *sk;
pmix_modex_data_t *pm, *sm;
pmix_proc_info_t *pi, *si;
pmix_query_t *pq, *sq;
/* copy the right field */
p->type = src->type;
switch (src->type) {
case PMIX_UNDEF:
break;
case PMIX_BOOL:
p->data.flag = src->data.flag;
break;
case PMIX_BYTE:
p->data.byte = src->data.byte;
break;
case PMIX_STRING:
if (NULL != src->data.string) {
p->data.string = strdup(src->data.string);
} else {
p->data.string = NULL;
}
break;
case PMIX_SIZE:
p->data.size = src->data.size;
break;
case PMIX_PID:
p->data.pid = src->data.pid;
break;
case PMIX_INT:
/* to avoid alignment issues */
memcpy(&p->data.integer, &src->data.integer, sizeof(int));
break;
case PMIX_INT8:
p->data.int8 = src->data.int8;
break;
case PMIX_INT16:
/* to avoid alignment issues */
memcpy(&p->data.int16, &src->data.int16, 2);
break;
case PMIX_INT32:
/* to avoid alignment issues */
memcpy(&p->data.int32, &src->data.int32, 4);
break;
case PMIX_INT64:
/* to avoid alignment issues */
memcpy(&p->data.int64, &src->data.int64, 8);
break;
case PMIX_UINT:
/* to avoid alignment issues */
memcpy(&p->data.uint, &src->data.uint, sizeof(unsigned int));
break;
case PMIX_UINT8:
p->data.uint8 = src->data.uint8;
break;
case PMIX_UINT16:
/* to avoid alignment issues */
memcpy(&p->data.uint16, &src->data.uint16, 2);
break;
case PMIX_UINT32:
/* to avoid alignment issues */
memcpy(&p->data.uint32, &src->data.uint32, 4);
break;
case PMIX_UINT64:
/* to avoid alignment issues */
memcpy(&p->data.uint64, &src->data.uint64, 8);
break;
case PMIX_FLOAT:
p->data.fval = src->data.fval;
break;
case PMIX_DOUBLE:
p->data.dval = src->data.dval;
break;
case PMIX_TIMEVAL:
memcpy(&p->data.tv, &src->data.tv, sizeof(struct timeval));
break;
case PMIX_TIME:
memcpy(&p->data.time, &src->data.time, sizeof(time_t));
break;
case PMIX_STATUS:
memcpy(&p->data.status, &src->data.status, sizeof(pmix_status_t));
break;
case PMIX_PROC:
memcpy(&p->data.proc, &src->data.proc, sizeof(pmix_proc_t));
break;
case PMIX_PROC_RANK:
memcpy(&p->data.proc, &src->data.rank, sizeof(pmix_rank_t));
break;
case PMIX_BYTE_OBJECT:
memset(&p->data.bo, 0, sizeof(pmix_byte_object_t));
if (NULL != src->data.bo.bytes && 0 < src->data.bo.size) {
p->data.bo.bytes = malloc(src->data.bo.size);
memcpy(p->data.bo.bytes, src->data.bo.bytes, src->data.bo.size);
p->data.bo.size = src->data.bo.size;
} else {
p->data.bo.bytes = NULL;
p->data.bo.size = 0;
}
break;
case PMIX_PERSIST:
memcpy(&p->data.persist, &src->data.persist, sizeof(pmix_persistence_t));
break;
case PMIX_SCOPE:
memcpy(&p->data.scope, &src->data.scope, sizeof(pmix_scope_t));
break;
case PMIX_DATA_RANGE:
memcpy(&p->data.range, &src->data.range, sizeof(pmix_data_range_t));
break;
case PMIX_PROC_STATE:
memcpy(&p->data.state, &src->data.state, sizeof(pmix_proc_state_t));
break;
case PMIX_PROC_INFO:
PMIX_PROC_INFO_CREATE(p->data.pinfo, 1);
if (NULL != src->data.pinfo->hostname) {
p->data.pinfo->hostname = strdup(src->data.pinfo->hostname);
}
if (NULL != src->data.pinfo->executable_name) {
p->data.pinfo->executable_name = strdup(src->data.pinfo->executable_name);
}
memcpy(&p->data.pinfo->pid, &src->data.pinfo->pid, sizeof(pid_t));
memcpy(&p->data.pinfo->exit_code, &src->data.pinfo->exit_code, sizeof(int));
memcpy(&p->data.pinfo->state, &src->data.pinfo->state, sizeof(pmix_proc_state_t));
break;
case PMIX_DATA_ARRAY:
p->data.darray = (pmix_data_array_t*)calloc(1, sizeof(pmix_data_array_t));
p->data.darray->type = src->data.darray->type;
p->data.darray->size = src->data.darray->size;
if (0 == p->data.darray->size || NULL == src->data.darray->array) {
p->data.darray->array = NULL;
p->data.darray->size = 0;
break;
}
/* allocate space and do the copy */
switch (src->type) {
case PMIX_UINT8:
case PMIX_INT8:
case PMIX_BYTE:
p->data.darray->array = (char*)malloc(src->data.darray->size);
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size);
break;
case PMIX_UINT16:
case PMIX_INT16:
p->data.darray->array = (char*)malloc(src->data.darray->size * sizeof(uint16_t));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(uint16_t));
break;
case PMIX_UINT32:
case PMIX_INT32:
p->data.darray->array = (char*)malloc(src->data.darray->size * sizeof(uint32_t));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(uint32_t));
break;
case PMIX_UINT64:
case PMIX_INT64:
p->data.darray->array = (char*)malloc(src->data.darray->size * sizeof(uint64_t));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(uint64_t));
break;
case PMIX_BOOL:
p->data.darray->array = (char*)malloc(src->data.darray->size * sizeof(bool));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(bool));
break;
case PMIX_SIZE:
p->data.darray->array = (char*)malloc(src->data.darray->size * sizeof(size_t));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(size_t));
break;
case PMIX_PID:
p->data.darray->array = (char*)malloc(src->data.darray->size * sizeof(pid_t));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(pid_t));
break;
case PMIX_STRING:
p->data.darray->array = (char**)malloc(src->data.darray->size * sizeof(char*));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
prarray = (char**)p->data.darray->array;
strarray = (char**)src->data.darray->array;
for (n=0; n < src->data.darray->size; n++) {
if (NULL != strarray[n]) {
prarray[n] = strdup(strarray[n]);
}
}
break;
case PMIX_INT:
case PMIX_UINT:
p->data.darray->array = (char*)malloc(src->data.darray->size * sizeof(int));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(int));
break;
case PMIX_FLOAT:
p->data.darray->array = (char*)malloc(src->data.darray->size * sizeof(float));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(float));
break;
case PMIX_DOUBLE:
p->data.darray->array = (char*)malloc(src->data.darray->size * sizeof(double));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(double));
break;
case PMIX_TIMEVAL:
p->data.darray->array = (struct timeval*)malloc(src->data.darray->size * sizeof(struct timeval));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(struct timeval));
break;
case PMIX_TIME:
p->data.darray->array = (time_t*)malloc(src->data.darray->size * sizeof(time_t));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(time_t));
break;
case PMIX_STATUS:
p->data.darray->array = (pmix_status_t*)malloc(src->data.darray->size * sizeof(pmix_status_t));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(pmix_status_t));
break;
case PMIX_VALUE:
PMIX_VALUE_CREATE(p->data.darray->array, src->data.darray->size);
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
pv = (pmix_value_t*)p->data.darray->array;
sv = (pmix_value_t*)src->data.darray->array;
for (n=0; n < src->data.darray->size; n++) {
if (PMIX_SUCCESS != (rc = pmix_value_xfer(&pv[n], &sv[n]))) {
PMIX_VALUE_FREE(pv, src->data.darray->size);
return rc;
}
}
break;
case PMIX_PROC:
PMIX_PROC_CREATE(p->data.darray->array, src->data.darray->size);
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(pmix_proc_t));
break;
case PMIX_APP:
PMIX_APP_CREATE(p->data.darray->array, src->data.darray->size);
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
pa = (pmix_app_t*)p->data.darray->array;
sa = (pmix_app_t*)src->data.darray->array;
for (n=0; n < src->data.darray->size; n++) {
if (NULL != sa[n].cmd) {
pa[n].cmd = strdup(sa[n].cmd);
}
if (NULL != sa[n].argv) {
pa[n].argv = pmix_argv_copy(sa[n].argv);
}
if (NULL != sa[n].env) {
pa[n].env = pmix_argv_copy(sa[n].env);
}
if (NULL != sa[n].cwd) {
pa[n].cwd = strdup(sa[n].cwd);
}
pa[n].maxprocs = sa[n].maxprocs;
if (0 < sa[n].ninfo && NULL != sa[n].info) {
PMIX_INFO_CREATE(pa[n].info, sa[n].ninfo);
if (NULL == pa[n].info) {
PMIX_APP_FREE(pa, src->data.darray->size);
return PMIX_ERR_NOMEM;
}
pa[n].ninfo = sa[n].ninfo;
for (m=0; m < pa[n].ninfo; m++) {
PMIX_INFO_XFER(&pa[n].info[m], &sa[n].info[m]);
}
}
}
break;
case PMIX_INFO:
PMIX_INFO_CREATE(p->data.darray->array, src->data.darray->size);
p1 = (pmix_info_t*)p->data.darray->array;
s1 = (pmix_info_t*)src->data.darray->array;
for (n=0; n < src->data.darray->size; n++) {
PMIX_INFO_LOAD(&p1[n], s1[n].key, &s1[n].value.data.flag, s1[n].value.type);
}
break;
case PMIX_PDATA:
PMIX_PDATA_CREATE(p->data.darray->array, src->data.darray->size);
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
pd = (pmix_pdata_t*)p->data.darray->array;
sd = (pmix_pdata_t*)src->data.darray->array;
for (n=0; n < src->data.darray->size; n++) {
PMIX_PDATA_LOAD(&pd[n], &sd[n].proc, sd[n].key, &sd[n].value.data.flag, sd[n].value.type);
}
break;
case PMIX_BUFFER:
p->data.darray->array = (pmix_buffer_t*)malloc(src->data.darray->size * sizeof(pmix_buffer_t));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
pb = (pmix_buffer_t*)p->data.darray->array;
sb = (pmix_buffer_t*)src->data.darray->array;
for (n=0; n < src->data.darray->size; n++) {
PMIX_CONSTRUCT(&pb[n], pmix_buffer_t);
pmix_bfrop.copy_payload(&pb[n], &sb[n]);
}
break;
case PMIX_BYTE_OBJECT:
p->data.darray->array = (pmix_byte_object_t*)malloc(src->data.darray->size * sizeof(pmix_byte_object_t));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
pbo = (pmix_byte_object_t*)p->data.darray->array;
sbo = (pmix_byte_object_t*)src->data.darray->array;
for (n=0; n < src->data.darray->size; n++) {
if (NULL != sbo[n].bytes && 0 < sbo[n].size) {
pbo[n].size = sbo[n].size;
pbo[n].bytes = (char*)malloc(pbo[n].size);
memcpy(pbo[n].bytes, sbo[n].bytes, pbo[n].size);
} else {
pbo[n].bytes = NULL;
pbo[n].size = 0;
}
}
break;
case PMIX_KVAL:
p->data.darray->array = (pmix_kval_t*)calloc(src->data.darray->size , sizeof(pmix_kval_t));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
pk = (pmix_kval_t*)p->data.darray->array;
sk = (pmix_kval_t*)src->data.darray->array;
for (n=0; n < src->data.darray->size; n++) {
if (NULL != sk[n].key) {
pk[n].key = strdup(sk[n].key);
}
if (NULL != sk[n].value) {
PMIX_VALUE_CREATE(pk[n].value, 1);
if (NULL == pk[n].value) {
free(p->data.darray->array);
return PMIX_ERR_NOMEM;
}
if (PMIX_SUCCESS != (rc = pmix_value_xfer(pk[n].value, sk[n].value))) {
return rc;
}
}
}
break;
case PMIX_MODEX:
PMIX_MODEX_CREATE(p->data.darray->array, src->data.darray->size);
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
pm = (pmix_modex_data_t*)p->data.darray->array;
sm = (pmix_modex_data_t*)src->data.darray->array;
for (n=0; n < src->data.darray->size; n++) {
memcpy(&pm[n], &sm[n], sizeof(pmix_modex_data_t));
if (NULL != sm[n].blob && 0 < sm[n].size) {
pm[n].blob = (uint8_t*)malloc(sm[n].size);
if (NULL == pm[n].blob) {
return PMIX_ERR_NOMEM;
}
memcpy(pm[n].blob, sm[n].blob, sm[n].size);
pm[n].size = sm[n].size;
} else {
pm[n].blob = NULL;
pm[n].size = 0;
}
}
break;
case PMIX_PERSIST:
p->data.darray->array = (pmix_persistence_t*)malloc(src->data.darray->size * sizeof(pmix_persistence_t));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(pmix_persistence_t));
break;
case PMIX_POINTER:
p->data.darray->array = (char**)malloc(src->data.darray->size * sizeof(char*));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
prarray = (char**)p->data.darray->array;
strarray = (char**)src->data.darray->array;
for (n=0; n < src->data.darray->size; n++) {
prarray[n] = strarray[n];
}
break;
case PMIX_SCOPE:
p->data.darray->array = (pmix_scope_t*)malloc(src->data.darray->size * sizeof(pmix_scope_t));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(pmix_scope_t));
break;
case PMIX_DATA_RANGE:
p->data.darray->array = (pmix_data_range_t*)malloc(src->data.darray->size * sizeof(pmix_data_range_t));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(pmix_data_range_t));
break;
case PMIX_COMMAND:
p->data.darray->array = (pmix_cmd_t*)malloc(src->data.darray->size * sizeof(pmix_cmd_t));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(pmix_cmd_t));
break;
case PMIX_INFO_DIRECTIVES:
p->data.darray->array = (pmix_info_directives_t*)malloc(src->data.darray->size * sizeof(pmix_info_directives_t));
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
memcpy(p->data.darray->array, src->data.darray->array, src->data.darray->size * sizeof(pmix_info_directives_t));
break;
case PMIX_PROC_INFO:
PMIX_PROC_INFO_CREATE(p->data.darray->array, src->data.darray->size);
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
pi = (pmix_proc_info_t*)p->data.darray->array;
si = (pmix_proc_info_t*)src->data.darray->array;
for (n=0; n < src->data.darray->size; n++) {
memcpy(&pi[n].proc, &si[n].proc, sizeof(pmix_proc_t));
if (NULL != si[n].hostname) {
pi[n].hostname = strdup(si[n].hostname);
} else {
pi[n].hostname = NULL;
}
if (NULL != si[n].executable_name) {
pi[n].executable_name = strdup(si[n].executable_name);
} else {
pi[n].executable_name = NULL;
}
pi[n].pid = si[n].pid;
pi[n].exit_code = si[n].exit_code;
pi[n].state = si[n].state;
}
break;
case PMIX_DATA_ARRAY:
return PMIX_ERR_NOT_SUPPORTED; // don't support iterative arrays
case PMIX_QUERY:
PMIX_QUERY_CREATE(p->data.darray->array, src->data.darray->size);
if (NULL == p->data.darray->array) {
return PMIX_ERR_NOMEM;
}
pq = (pmix_query_t*)p->data.darray->array;
sq = (pmix_query_t*)src->data.darray->array;
for (n=0; n < src->data.darray->size; n++) {
if (NULL != sq[n].keys) {
pq[n].keys = pmix_argv_copy(sq[n].keys);
}
if (NULL != sq[n].qualifiers && 0 < sq[n].nqual) {
PMIX_INFO_CREATE(pq[n].qualifiers, sq[n].nqual);
if (NULL == pq[n].qualifiers) {
PMIX_QUERY_FREE(pq, src->data.darray->size);
return PMIX_ERR_NOMEM;
}
for (m=0; m < sq[n].nqual; m++) {
PMIX_INFO_XFER(&pq[n].qualifiers[m], &sq[n].qualifiers[m]);
}
pq[n].nqual = sq[n].nqual;
} else {
pq[n].qualifiers = NULL;
pq[n].nqual = 0;
}
}
break;
default:
return PMIX_ERR_UNKNOWN_DATA_TYPE;
}
break;
case PMIX_POINTER:
memcpy(&p->data.ptr, &src->data.ptr, sizeof(void*));
break;
/**** DEPRECATED ****/
case PMIX_INFO_ARRAY:
p->data.array->size = src->data.array->size;
if (0 < src->data.array->size) {
p->data.array->array = (pmix_info_t*)malloc(src->data.array->size * sizeof(pmix_info_t));
if (NULL == p->data.array->array) {
return PMIX_ERR_NOMEM;
}
p1 = (pmix_info_t*)p->data.array->array;
s1 = (pmix_info_t*)src->data.array->array;
for (n=0; n < src->data.darray->size; n++) {
PMIX_INFO_LOAD(&p1[n], s1[n].key, &s1[n].value.data.flag, s1[n].value.type);
}
}
break;
/********************/
default:
pmix_output(0, "COPY-PMIX-VALUE: UNSUPPORTED TYPE %d", (int)src->type);
return PMIX_ERROR;
}
return PMIX_SUCCESS;
}
/* PMIX_VALUE */
pmix_status_t pmix_bfrop_copy_value(pmix_value_t **dest, pmix_value_t *src,
pmix_data_type_t type)
{
pmix_value_t *p;
/* create the new object */
*dest = (pmix_value_t*)malloc(sizeof(pmix_value_t));
if (NULL == *dest) {
return PMIX_ERR_OUT_OF_RESOURCE;
}
p = *dest;
/* copy the type */
p->type = src->type;
/* copy the data */
return pmix_value_xfer(p, src);
}
pmix_status_t pmix_bfrop_copy_info(pmix_info_t **dest, pmix_info_t *src,
pmix_data_type_t type)
{
*dest = (pmix_info_t*)malloc(sizeof(pmix_info_t));
(void)strncpy((*dest)->key, src->key, PMIX_MAX_KEYLEN);
(*dest)->flags = src->flags;
return pmix_value_xfer(&(*dest)->value, &src->value);
}
pmix_status_t pmix_bfrop_copy_buf(pmix_buffer_t **dest, pmix_buffer_t *src,
pmix_data_type_t type)
{
*dest = PMIX_NEW(pmix_buffer_t);
pmix_bfrop.copy_payload(*dest, src);
return PMIX_SUCCESS;
}
pmix_status_t pmix_bfrop_copy_app(pmix_app_t **dest, pmix_app_t *src,
pmix_data_type_t type)
{
size_t j;
*dest = (pmix_app_t*)malloc(sizeof(pmix_app_t));
(*dest)->cmd = strdup(src->cmd);
(*dest)->argv = pmix_argv_copy(src->argv);
(*dest)->env = pmix_argv_copy(src->env);
if (NULL != src->cwd) {
(*dest)->cwd = strdup(src->cwd);
}
(*dest)->maxprocs = src->maxprocs;
(*dest)->ninfo = src->ninfo;
(*dest)->info = (pmix_info_t*)malloc(src->ninfo * sizeof(pmix_info_t));
for (j=0; j < src->ninfo; j++) {
(void)strncpy((*dest)->info[j].key, src->info[j].key, PMIX_MAX_KEYLEN);
pmix_value_xfer(&(*dest)->info[j].value, &src->info[j].value);
}
return PMIX_SUCCESS;
}
pmix_status_t pmix_bfrop_copy_kval(pmix_kval_t **dest, pmix_kval_t *src,
pmix_data_type_t type)
{
pmix_kval_t *p;
/* create the new object */
*dest = PMIX_NEW(pmix_kval_t);
if (NULL == *dest) {
return PMIX_ERR_OUT_OF_RESOURCE;
}
p = *dest;
/* copy the type */
p->value->type = src->value->type;
/* copy the data */
return pmix_value_xfer(p->value, src->value);
}
pmix_status_t pmix_bfrop_copy_proc(pmix_proc_t **dest, pmix_proc_t *src,
pmix_data_type_t type)
{
*dest = (pmix_proc_t*)malloc(sizeof(pmix_proc_t));
if (NULL == *dest) {
return PMIX_ERR_OUT_OF_RESOURCE;
}
(void)strncpy((*dest)->nspace, src->nspace, PMIX_MAX_NSLEN);
(*dest)->rank = src->rank;
return PMIX_SUCCESS;
}
#if PMIX_HAVE_HWLOC
pmix_status_t pmix_bfrop_copy_topo(hwloc_topology_t *dest,
hwloc_topology_t src,
pmix_data_type_t type)
{
/* use the hwloc dup function */
return hwloc_topology_dup(dest, src);
}
#endif
pmix_status_t pmix_bfrop_copy_modex(pmix_modex_data_t **dest, pmix_modex_data_t *src,
pmix_data_type_t type)
{
*dest = (pmix_modex_data_t*)malloc(sizeof(pmix_modex_data_t));
if (NULL == *dest) {
return PMIX_ERR_OUT_OF_RESOURCE;
}
(*dest)->blob = NULL;
(*dest)->size = 0;
if (NULL != src->blob) {
(*dest)->blob = (uint8_t*)malloc(src->size * sizeof(uint8_t));
if (NULL == (*dest)->blob) {
return PMIX_ERR_OUT_OF_RESOURCE;
}
memcpy((*dest)->blob, src->blob, src->size * sizeof(uint8_t));
(*dest)->size = src->size;
}
return PMIX_SUCCESS;
}
pmix_status_t pmix_bfrop_copy_persist(pmix_persistence_t **dest, pmix_persistence_t *src,
pmix_data_type_t type)
{
*dest = (pmix_persistence_t*)malloc(sizeof(pmix_persistence_t));
if (NULL == *dest) {
return PMIX_ERR_OUT_OF_RESOURCE;
}
memcpy(*dest, src, sizeof(pmix_persistence_t));
return PMIX_SUCCESS;
}
pmix_status_t pmix_bfrop_copy_bo(pmix_byte_object_t **dest, pmix_byte_object_t *src,
pmix_data_type_t type)
{
*dest = (pmix_byte_object_t*)malloc(sizeof(pmix_byte_object_t));
if (NULL == *dest) {
return PMIX_ERR_OUT_OF_RESOURCE;
}
(*dest)->bytes = (char*)malloc(src->size);
memcpy((*dest)->bytes, src->bytes, src->size);
(*dest)->size = src->size;
return PMIX_SUCCESS;
}
pmix_status_t pmix_bfrop_copy_pdata(pmix_pdata_t **dest, pmix_pdata_t *src,
pmix_data_type_t type)
{
*dest = (pmix_pdata_t*)malloc(sizeof(pmix_pdata_t));
(void)strncpy((*dest)->proc.nspace, src->proc.nspace, PMIX_MAX_NSLEN);
(*dest)->proc.rank = src->proc.rank;
(void)strncpy((*dest)->key, src->key, PMIX_MAX_KEYLEN);
return pmix_value_xfer(&(*dest)->value, &src->value);
}
pmix_status_t pmix_bfrop_copy_pinfo(pmix_proc_info_t **dest, pmix_proc_info_t *src,
pmix_data_type_t type)
{
*dest = (pmix_proc_info_t*)malloc(sizeof(pmix_proc_info_t));
(void)strncpy((*dest)->proc.nspace, src->proc.nspace, PMIX_MAX_NSLEN);
(*dest)->proc.rank = src->proc.rank;
if (NULL != src->hostname) {
(*dest)->hostname = strdup(src->hostname);
}
if (NULL != src->executable_name) {
(*dest)->executable_name = strdup(src->executable_name);
}
(*dest)->pid = src->pid;
(*dest)->exit_code = src->exit_code;
(*dest)->state = src->state;
return PMIX_SUCCESS;
}
/* the pmix_data_array_t is a little different in that it
* is an array of values, and so we cannot just copy one
* value at a time. So handle all value types here */
pmix_status_t pmix_bfrop_copy_darray(pmix_data_array_t **dest,
pmix_data_array_t *src,
pmix_data_type_t type)
{
pmix_data_array_t *p;
size_t n, m;
pmix_status_t rc;
char **prarray, **strarray;
pmix_value_t *pv, *sv;
pmix_app_t *pa, *sa;
pmix_info_t *p1, *s1;
pmix_pdata_t *pd, *sd;
pmix_buffer_t *pb, *sb;
pmix_byte_object_t *pbo, *sbo;
pmix_kval_t *pk, *sk;
pmix_modex_data_t *pm, *sm;
pmix_proc_info_t *pi, *si;
pmix_query_t *pq, *sq;
p = (pmix_data_array_t*)calloc(1, sizeof(pmix_data_array_t));
if (NULL == p) {
return PMIX_ERR_NOMEM;
}
p->type = src->type;
p->size = src->size;
/* process based on type of array element */
switch (src->type) {
p->type = src->type;
p->size = src->size;
if (0 == p->size || NULL == src->array) {
p->array = NULL;
p->size = 0;
break;
}
case PMIX_UINT8:
case PMIX_INT8:
case PMIX_BYTE:
p->array = (char*)malloc(src->size);
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size);
break;
case PMIX_UINT16:
case PMIX_INT16:
p->array = (char*)malloc(src->size * sizeof(uint16_t));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(uint16_t));
break;
case PMIX_UINT32:
case PMIX_INT32:
p->array = (char*)malloc(src->size * sizeof(uint32_t));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(uint32_t));
break;
case PMIX_UINT64:
case PMIX_INT64:
p->array = (char*)malloc(src->size * sizeof(uint64_t));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(uint64_t));
break;
case PMIX_BOOL:
p->array = (char*)malloc(src->size * sizeof(bool));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(bool));
break;
case PMIX_SIZE:
p->array = (char*)malloc(src->size * sizeof(size_t));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(size_t));
break;
case PMIX_PID:
p->array = (char*)malloc(src->size * sizeof(pid_t));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(pid_t));
break;
case PMIX_STRING:
p->array = (char**)malloc(src->size * sizeof(char*));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
prarray = (char**)p->array;
strarray = (char**)src->array;
for (n=0; n < src->size; n++) {
if (NULL != strarray[n]) {
prarray[n] = strdup(strarray[n]);
}
}
break;
case PMIX_INT:
case PMIX_UINT:
p->array = (char*)malloc(src->size * sizeof(int));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(int));
break;
case PMIX_FLOAT:
p->array = (char*)malloc(src->size * sizeof(float));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(float));
break;
case PMIX_DOUBLE:
p->array = (char*)malloc(src->size * sizeof(double));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(double));
break;
case PMIX_TIMEVAL:
p->array = (struct timeval*)malloc(src->size * sizeof(struct timeval));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(struct timeval));
break;
case PMIX_TIME:
p->array = (time_t*)malloc(src->size * sizeof(time_t));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(time_t));
break;
case PMIX_STATUS:
p->array = (pmix_status_t*)malloc(src->size * sizeof(pmix_status_t));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(pmix_status_t));
break;
case PMIX_VALUE:
PMIX_VALUE_CREATE(p->array, src->size);
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
pv = (pmix_value_t*)p->array;
sv = (pmix_value_t*)src->array;
for (n=0; n < src->size; n++) {
if (PMIX_SUCCESS != (rc = pmix_value_xfer(&pv[n], &sv[n]))) {
PMIX_VALUE_FREE(pv, src->size);
free(p);
return rc;
}
}
break;
case PMIX_PROC:
PMIX_PROC_CREATE(p->array, src->size);
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(pmix_proc_t));
break;
case PMIX_PROC_RANK:
p->array = (char*)malloc(src->size * sizeof(pmix_rank_t));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(pmix_proc_t));
break;
case PMIX_APP:
PMIX_APP_CREATE(p->array, src->size);
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
pa = (pmix_app_t*)p->array;
sa = (pmix_app_t*)src->array;
for (n=0; n < src->size; n++) {
if (NULL != sa[n].cmd) {
pa[n].cmd = strdup(sa[n].cmd);
}
if (NULL != sa[n].argv) {
pa[n].argv = pmix_argv_copy(sa[n].argv);
}
if (NULL != sa[n].env) {
pa[n].env = pmix_argv_copy(sa[n].env);
}
if (NULL != sa[n].cwd) {
pa[n].cwd = strdup(sa[n].cwd);
}
pa[n].maxprocs = sa[n].maxprocs;
if (0 < sa[n].ninfo && NULL != sa[n].info) {
PMIX_INFO_CREATE(pa[n].info, sa[n].ninfo);
if (NULL == pa[n].info) {
PMIX_APP_FREE(pa, p->size);
free(p);
return PMIX_ERR_NOMEM;
}
pa[n].ninfo = sa[n].ninfo;
for (m=0; m < pa[n].ninfo; m++) {
PMIX_INFO_XFER(&pa[n].info[m], &sa[n].info[m]);
}
}
}
break;
case PMIX_INFO:
PMIX_INFO_CREATE(p->array, src->size);
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
p1 = (pmix_info_t*)p->array;
s1 = (pmix_info_t*)src->array;
for (n=0; n < src->size; n++) {
PMIX_INFO_LOAD(&p1[n], s1[n].key, &s1[n].value.data.flag, s1[n].value.type);
}
break;
case PMIX_PDATA:
PMIX_PDATA_CREATE(p->array, src->size);
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
pd = (pmix_pdata_t*)p->array;
sd = (pmix_pdata_t*)src->array;
for (n=0; n < src->size; n++) {
PMIX_PDATA_LOAD(&pd[n], &sd[n].proc, sd[n].key, &sd[n].value, sd[n].value.type);
}
break;
case PMIX_BUFFER:
p->array = (pmix_buffer_t*)malloc(src->size * sizeof(pmix_buffer_t));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
pb = (pmix_buffer_t*)p->array;
sb = (pmix_buffer_t*)src->array;
for (n=0; n < src->size; n++) {
PMIX_CONSTRUCT(&pb[n], pmix_buffer_t);
pmix_bfrop.copy_payload(&pb[n], &sb[n]);
}
break;
case PMIX_BYTE_OBJECT:
p->array = (pmix_byte_object_t*)malloc(src->size * sizeof(pmix_byte_object_t));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
pbo = (pmix_byte_object_t*)p->array;
sbo = (pmix_byte_object_t*)src->array;
for (n=0; n < src->size; n++) {
if (NULL != sbo[n].bytes && 0 < sbo[n].size) {
pbo[n].size = sbo[n].size;
pbo[n].bytes = (char*)malloc(pbo[n].size);
memcpy(pbo[n].bytes, sbo[n].bytes, pbo[n].size);
} else {
pbo[n].bytes = NULL;
pbo[n].size = 0;
}
}
break;
case PMIX_KVAL:
p->array = (pmix_kval_t*)calloc(src->size , sizeof(pmix_kval_t));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
pk = (pmix_kval_t*)p->array;
sk = (pmix_kval_t*)src->array;
for (n=0; n < src->size; n++) {
if (NULL != sk[n].key) {
pk[n].key = strdup(sk[n].key);
}
if (NULL != sk[n].value) {
PMIX_VALUE_CREATE(pk[n].value, 1);
if (NULL == pk[n].value) {
PMIX_VALUE_FREE(pk[n].value, 1);
free(p);
return PMIX_ERR_NOMEM;
}
if (PMIX_SUCCESS != (rc = pmix_value_xfer(pk[n].value, sk[n].value))) {
PMIX_VALUE_FREE(pk[n].value, 1);
free(p);
return rc;
}
}
}
break;
case PMIX_MODEX:
PMIX_MODEX_CREATE(p->array, src->size);
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
pm = (pmix_modex_data_t*)p->array;
sm = (pmix_modex_data_t*)src->array;
for (n=0; n < src->size; n++) {
memcpy(&pm[n], &sm[n], sizeof(pmix_modex_data_t));
if (NULL != sm[n].blob && 0 < sm[n].size) {
pm[n].blob = (uint8_t*)malloc(sm[n].size);
if (NULL == pm[n].blob) {
PMIX_MODEX_FREE(pm, src->size);
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(pm[n].blob, sm[n].blob, sm[n].size);
pm[n].size = sm[n].size;
} else {
pm[n].blob = NULL;
pm[n].size = 0;
}
}
break;
case PMIX_PERSIST:
p->array = (pmix_persistence_t*)malloc(src->size * sizeof(pmix_persistence_t));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(pmix_persistence_t));
break;
case PMIX_POINTER:
p->array = (char**)malloc(src->size * sizeof(char*));
prarray = (char**)p->array;
strarray = (char**)src->array;
for (n=0; n < src->size; n++) {
prarray[n] = strarray[n];
}
break;
case PMIX_SCOPE:
p->array = (pmix_scope_t*)malloc(src->size * sizeof(pmix_scope_t));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(pmix_scope_t));
break;
case PMIX_DATA_RANGE:
p->array = (pmix_data_range_t*)malloc(src->size * sizeof(pmix_data_range_t));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(pmix_data_range_t));
break;
case PMIX_COMMAND:
p->array = (pmix_cmd_t*)malloc(src->size * sizeof(pmix_cmd_t));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(pmix_cmd_t));
break;
case PMIX_INFO_DIRECTIVES:
p->array = (pmix_info_directives_t*)malloc(src->size * sizeof(pmix_info_directives_t));
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
memcpy(p->array, src->array, src->size * sizeof(pmix_info_directives_t));
break;
case PMIX_PROC_INFO:
PMIX_PROC_INFO_CREATE(p->array, src->size);
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
pi = (pmix_proc_info_t*)p->array;
si = (pmix_proc_info_t*)src->array;
for (n=0; n < src->size; n++) {
memcpy(&pi[n].proc, &si[n].proc, sizeof(pmix_proc_t));
if (NULL != si[n].hostname) {
pi[n].hostname = strdup(si[n].hostname);
} else {
pi[n].hostname = NULL;
}
if (NULL != si[n].executable_name) {
pi[n].executable_name = strdup(si[n].executable_name);
} else {
pi[n].executable_name = NULL;
}
pi[n].pid = si[n].pid;
pi[n].exit_code = si[n].exit_code;
pi[n].state = si[n].state;
}
break;
case PMIX_DATA_ARRAY:
free(p);
return PMIX_ERR_NOT_SUPPORTED; // don't support iterative arrays
case PMIX_QUERY:
PMIX_QUERY_CREATE(p->array, src->size);
if (NULL == p->array) {
free(p);
return PMIX_ERR_NOMEM;
}
pq = (pmix_query_t*)p->array;
sq = (pmix_query_t*)src->array;
for (n=0; n < src->size; n++) {
if (NULL != sq[n].keys) {
pq[n].keys = pmix_argv_copy(sq[n].keys);
}
if (NULL != sq[n].qualifiers && 0 < sq[n].nqual) {
PMIX_INFO_CREATE(pq[n].qualifiers, sq[n].nqual);
if (NULL == pq[n].qualifiers) {
PMIX_INFO_FREE(pq[n].qualifiers, sq[n].nqual);
free(p);
return PMIX_ERR_NOMEM;
}
for (m=0; m < sq[n].nqual; m++) {
PMIX_INFO_XFER(&pq[n].qualifiers[m], &sq[n].qualifiers[m]);
}
pq[n].nqual = sq[n].nqual;
} else {
pq[n].qualifiers = NULL;
pq[n].nqual = 0;
}
}
break;
default:
free(p);
return PMIX_ERR_UNKNOWN_DATA_TYPE;
}
(*dest) = p;
return PMIX_SUCCESS;
}
pmix_status_t pmix_bfrop_copy_query(pmix_query_t **dest,
pmix_query_t *src,
pmix_data_type_t type)
{
pmix_status_t rc;
*dest = (pmix_query_t*)malloc(sizeof(pmix_query_t));
if (NULL != src->keys) {
(*dest)->keys = pmix_argv_copy(src->keys);
}
(*dest)->nqual = src->nqual;
if (NULL != src->qualifiers) {
if (PMIX_SUCCESS != (rc = pmix_bfrop_copy_info(&((*dest)->qualifiers), src->qualifiers, PMIX_INFO))) {
free(*dest);
return rc;
}
}
return PMIX_SUCCESS;
}
/**** DEPRECATED ****/
pmix_status_t pmix_bfrop_copy_array(pmix_info_array_t **dest,
pmix_info_array_t *src,
pmix_data_type_t type)
{
pmix_info_t *d1, *s1;
*dest = (pmix_info_array_t*)malloc(sizeof(pmix_info_array_t));
(*dest)->size = src->size;
(*dest)->array = (pmix_info_t*)malloc(src->size * sizeof(pmix_info_t));
d1 = (pmix_info_t*)(*dest)->array;
s1 = (pmix_info_t*)src->array;
memcpy(d1, s1, src->size * sizeof(pmix_info_t));
return PMIX_SUCCESS;
}
/*******************/
| 1 | 6,819 | @rhc54 This doesn't look like a comprehensive comparison. For `PMIX_STRING` we seem to actually compare the content of the data while here we only compare meta-information which doesn't ensure that values are the same. | openpmix-openpmix | c |
@@ -1601,6 +1601,7 @@ void nano::json_handler::bootstrap ()
{
std::string address_text = request.get<std::string> ("address");
std::string port_text = request.get<std::string> ("port");
+ const bool confirmed_frontiers = request.get<bool> ("confirmed_frontiers", false);
boost::system::error_code address_ec;
auto address (boost::asio::ip::address_v6::from_string (address_text, address_ec));
if (!address_ec) | 1 | #include <nano/lib/config.hpp>
#include <nano/lib/json_error_response.hpp>
#include <nano/lib/timer.hpp>
#include <nano/node/common.hpp>
#include <nano/node/ipc.hpp>
#include <nano/node/json_handler.hpp>
#include <nano/node/json_payment_observer.hpp>
#include <nano/node/node.hpp>
#include <nano/node/node_rpc_config.hpp>
#include <boost/array.hpp>
#include <boost/bind.hpp>
#include <boost/endian/conversion.hpp>
#include <boost/polymorphic_cast.hpp>
#include <boost/property_tree/json_parser.hpp>
#include <boost/property_tree/ptree.hpp>
#include <boost/thread/thread_time.hpp>
#include <algorithm>
#include <chrono>
#include <cstdio>
#include <fstream>
#include <future>
#include <iostream>
#include <thread>
namespace
{
void construct_json (nano::seq_con_info_component * component, boost::property_tree::ptree & parent);
using ipc_json_handler_no_arg_func_map = std::unordered_map<std::string, std::function<void(nano::json_handler *)>>;
ipc_json_handler_no_arg_func_map create_ipc_json_handler_no_arg_func_map ();
auto ipc_json_handler_no_arg_funcs = create_ipc_json_handler_no_arg_func_map ();
bool block_confirmed (nano::node & node, nano::transaction & transaction, nano::block_hash const & hash, bool include_active, bool include_only_confirmed);
const char * epoch_as_string (nano::epoch);
}
nano::json_handler::json_handler (nano::node & node_a, nano::node_rpc_config const & node_rpc_config_a, std::string const & body_a, std::function<void(std::string const &)> const & response_a, std::function<void()> stop_callback_a) :
body (body_a),
node (node_a),
response (response_a),
stop_callback (stop_callback_a),
node_rpc_config (node_rpc_config_a)
{
}
void nano::json_handler::process_request (bool unsafe_a)
{
try
{
std::stringstream istream (body);
boost::property_tree::read_json (istream, request);
action = request.get<std::string> ("action");
auto no_arg_func_iter = ipc_json_handler_no_arg_funcs.find (action);
if (no_arg_func_iter != ipc_json_handler_no_arg_funcs.cend ())
{
// First try the map of options with no arguments
no_arg_func_iter->second (this);
}
else
{
// Try the rest of the options
if (action == "wallet_seed")
{
if (unsafe_a || node.network_params.network.is_test_network ())
{
wallet_seed ();
}
else
{
json_error_response (response, "Unsafe RPC not allowed");
}
}
else if (action == "chain")
{
chain ();
}
else if (action == "successors")
{
chain (true);
}
else if (action == "history")
{
request.put ("head", request.get<std::string> ("hash"));
account_history ();
}
else if (action == "knano_from_raw" || action == "krai_from_raw")
{
mnano_from_raw (nano::kxrb_ratio);
}
else if (action == "knano_to_raw" || action == "krai_to_raw")
{
mnano_to_raw (nano::kxrb_ratio);
}
else if (action == "nano_from_raw" || action == "rai_from_raw")
{
mnano_from_raw (nano::xrb_ratio);
}
else if (action == "nano_to_raw" || action == "rai_to_raw")
{
mnano_to_raw (nano::xrb_ratio);
}
else if (action == "mnano_from_raw" || action == "mrai_from_raw")
{
mnano_from_raw ();
}
else if (action == "mnano_to_raw" || action == "mrai_to_raw")
{
mnano_to_raw ();
}
else if (action == "password_valid")
{
password_valid ();
}
else if (action == "wallet_locked")
{
password_valid (true);
}
else
{
json_error_response (response, "Unknown command");
}
}
}
catch (std::runtime_error const &)
{
json_error_response (response, "Unable to parse JSON");
}
catch (...)
{
json_error_response (response, "Internal server error in RPC");
}
}
void nano::json_handler::response_errors ()
{
if (ec || response_l.empty ())
{
boost::property_tree::ptree response_error;
response_error.put ("error", ec ? ec.message () : "Empty response");
std::stringstream ostream;
boost::property_tree::write_json (ostream, response_error);
response (ostream.str ());
}
else
{
std::stringstream ostream;
boost::property_tree::write_json (ostream, response_l);
response (ostream.str ());
}
}
std::shared_ptr<nano::wallet> nano::json_handler::wallet_impl ()
{
if (!ec)
{
std::string wallet_text (request.get<std::string> ("wallet"));
nano::wallet_id wallet;
if (!wallet.decode_hex (wallet_text))
{
auto existing (node.wallets.items.find (wallet));
if (existing != node.wallets.items.end ())
{
return existing->second;
}
else
{
ec = nano::error_common::wallet_not_found;
}
}
else
{
ec = nano::error_common::bad_wallet_number;
}
}
return nullptr;
}
bool nano::json_handler::wallet_locked_impl (nano::transaction const & transaction_a, std::shared_ptr<nano::wallet> wallet_a)
{
bool result (false);
if (!ec)
{
if (!wallet_a->store.valid_password (transaction_a))
{
ec = nano::error_common::wallet_locked;
result = true;
}
}
return result;
}
bool nano::json_handler::wallet_account_impl (nano::transaction const & transaction_a, std::shared_ptr<nano::wallet> wallet_a, nano::account const & account_a)
{
bool result (false);
if (!ec)
{
if (wallet_a->store.find (transaction_a, account_a) != wallet_a->store.end ())
{
result = true;
}
else
{
ec = nano::error_common::account_not_found_wallet;
}
}
return result;
}
nano::account nano::json_handler::account_impl (std::string account_text, std::error_code ec_a)
{
nano::account result (0);
if (!ec)
{
if (account_text.empty ())
{
account_text = request.get<std::string> ("account");
}
if (result.decode_account (account_text))
{
ec = ec_a;
}
else if (account_text[3] == '-' || account_text[4] == '-')
{
// nano- and xrb- prefixes are deprecated
response_l.put ("deprecated_account_format", "1");
}
}
return result;
}
nano::account_info nano::json_handler::account_info_impl (nano::transaction const & transaction_a, nano::account const & account_a)
{
nano::account_info result;
if (!ec)
{
if (node.store.account_get (transaction_a, account_a, result))
{
ec = nano::error_common::account_not_found;
node.bootstrap_initiator.bootstrap_lazy (account_a, false, false);
}
}
return result;
}
nano::amount nano::json_handler::amount_impl ()
{
nano::amount result (0);
if (!ec)
{
std::string amount_text (request.get<std::string> ("amount"));
if (result.decode_dec (amount_text))
{
ec = nano::error_common::invalid_amount;
}
}
return result;
}
std::shared_ptr<nano::block> nano::json_handler::block_impl (bool signature_work_required)
{
std::shared_ptr<nano::block> result;
if (!ec)
{
std::string block_text (request.get<std::string> ("block"));
boost::property_tree::ptree block_l;
std::stringstream block_stream (block_text);
boost::property_tree::read_json (block_stream, block_l);
if (!signature_work_required)
{
block_l.put ("signature", "0");
block_l.put ("work", "0");
}
result = nano::deserialize_block_json (block_l);
if (result == nullptr)
{
ec = nano::error_blocks::invalid_block;
}
}
return result;
}
std::shared_ptr<nano::block> nano::json_handler::block_json_impl (bool signature_work_required)
{
std::shared_ptr<nano::block> result;
if (!ec)
{
auto block_l (request.get_child ("block"));
if (!signature_work_required)
{
block_l.put ("signature", "0");
block_l.put ("work", "0");
}
result = nano::deserialize_block_json (block_l);
if (result == nullptr)
{
ec = nano::error_blocks::invalid_block;
}
}
return result;
}
nano::block_hash nano::json_handler::hash_impl (std::string search_text)
{
nano::block_hash result (0);
if (!ec)
{
std::string hash_text (request.get<std::string> (search_text));
if (result.decode_hex (hash_text))
{
ec = nano::error_blocks::invalid_block_hash;
}
}
return result;
}
nano::amount nano::json_handler::threshold_optional_impl ()
{
nano::amount result (0);
boost::optional<std::string> threshold_text (request.get_optional<std::string> ("threshold"));
if (!ec && threshold_text.is_initialized ())
{
if (result.decode_dec (threshold_text.get ()))
{
ec = nano::error_common::bad_threshold;
}
}
return result;
}
uint64_t nano::json_handler::work_optional_impl ()
{
uint64_t result (0);
boost::optional<std::string> work_text (request.get_optional<std::string> ("work"));
if (!ec && work_text.is_initialized ())
{
if (nano::from_string_hex (work_text.get (), result))
{
ec = nano::error_common::bad_work_format;
}
}
return result;
}
uint64_t nano::json_handler::difficulty_optional_impl ()
{
uint64_t difficulty (node.network_params.network.publish_threshold);
boost::optional<std::string> difficulty_text (request.get_optional<std::string> ("difficulty"));
if (!ec && difficulty_text.is_initialized ())
{
if (nano::from_string_hex (difficulty_text.get (), difficulty))
{
ec = nano::error_rpc::bad_difficulty_format;
}
}
return difficulty;
}
double nano::json_handler::multiplier_optional_impl (uint64_t & difficulty)
{
double multiplier (1.);
boost::optional<std::string> multiplier_text (request.get_optional<std::string> ("multiplier"));
if (!ec && multiplier_text.is_initialized ())
{
auto success = boost::conversion::try_lexical_convert<double> (multiplier_text.get (), multiplier);
if (success && multiplier > 0.)
{
difficulty = nano::difficulty::from_multiplier (multiplier, node.network_params.network.publish_threshold);
}
else
{
ec = nano::error_rpc::bad_multiplier_format;
}
}
return multiplier;
}
namespace
{
bool decode_unsigned (std::string const & text, uint64_t & number)
{
bool result;
size_t end;
try
{
number = std::stoull (text, &end);
result = false;
}
catch (std::invalid_argument const &)
{
result = true;
}
catch (std::out_of_range const &)
{
result = true;
}
result = result || end != text.size ();
return result;
}
}
uint64_t nano::json_handler::count_impl ()
{
uint64_t result (0);
if (!ec)
{
std::string count_text (request.get<std::string> ("count"));
if (decode_unsigned (count_text, result) || result == 0)
{
ec = nano::error_common::invalid_count;
}
}
return result;
}
uint64_t nano::json_handler::count_optional_impl (uint64_t result)
{
boost::optional<std::string> count_text (request.get_optional<std::string> ("count"));
if (!ec && count_text.is_initialized ())
{
if (decode_unsigned (count_text.get (), result))
{
ec = nano::error_common::invalid_count;
}
}
return result;
}
uint64_t nano::json_handler::offset_optional_impl (uint64_t result)
{
boost::optional<std::string> offset_text (request.get_optional<std::string> ("offset"));
if (!ec && offset_text.is_initialized ())
{
if (decode_unsigned (offset_text.get (), result))
{
ec = nano::error_rpc::invalid_offset;
}
}
return result;
}
void nano::json_handler::account_balance ()
{
auto account (account_impl ());
if (!ec)
{
auto balance (node.balance_pending (account));
response_l.put ("balance", balance.first.convert_to<std::string> ());
response_l.put ("pending", balance.second.convert_to<std::string> ());
}
response_errors ();
}
void nano::json_handler::account_block_count ()
{
auto account (account_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto info (account_info_impl (transaction, account));
if (!ec)
{
response_l.put ("block_count", std::to_string (info.block_count));
}
}
response_errors ();
}
void nano::json_handler::account_create ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
const bool generate_work = rpc_l->request.get<bool> ("work", true);
nano::account new_key;
auto index_text (rpc_l->request.get_optional<std::string> ("index"));
if (index_text.is_initialized ())
{
uint64_t index;
if (decode_unsigned (index_text.get (), index) || index > static_cast<uint64_t> (std::numeric_limits<uint32_t>::max ()))
{
rpc_l->ec = nano::error_common::invalid_index;
}
else
{
new_key = wallet->deterministic_insert (static_cast<uint32_t> (index), generate_work);
}
}
else
{
new_key = wallet->deterministic_insert (generate_work);
}
if (!rpc_l->ec)
{
if (!new_key.is_zero ())
{
rpc_l->response_l.put ("account", new_key.to_account ());
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::account_get ()
{
std::string key_text (request.get<std::string> ("key"));
nano::public_key pub;
if (!pub.decode_hex (key_text))
{
response_l.put ("account", pub.to_account ());
}
else
{
ec = nano::error_common::bad_public_key;
}
response_errors ();
}
void nano::json_handler::account_info ()
{
auto account (account_impl ());
if (!ec)
{
const bool representative = request.get<bool> ("representative", false);
const bool weight = request.get<bool> ("weight", false);
const bool pending = request.get<bool> ("pending", false);
auto transaction (node.store.tx_begin_read ());
auto info (account_info_impl (transaction, account));
uint64_t confirmation_height;
if (node.store.confirmation_height_get (transaction, account, confirmation_height))
{
ec = nano::error_common::account_not_found;
}
if (!ec)
{
response_l.put ("frontier", info.head.to_string ());
response_l.put ("open_block", info.open_block.to_string ());
response_l.put ("representative_block", node.ledger.representative (transaction, info.head).to_string ());
std::string balance;
nano::uint128_union (info.balance).encode_dec (balance);
response_l.put ("balance", balance);
response_l.put ("modified_timestamp", std::to_string (info.modified));
response_l.put ("block_count", std::to_string (info.block_count));
response_l.put ("account_version", epoch_as_string (info.epoch ()));
response_l.put ("confirmation_height", std::to_string (confirmation_height));
if (representative)
{
response_l.put ("representative", info.representative.to_account ());
}
if (weight)
{
auto account_weight (node.ledger.weight (account));
response_l.put ("weight", account_weight.convert_to<std::string> ());
}
if (pending)
{
auto account_pending (node.ledger.account_pending (transaction, account));
response_l.put ("pending", account_pending.convert_to<std::string> ());
}
}
}
response_errors ();
}
void nano::json_handler::account_key ()
{
auto account (account_impl ());
if (!ec)
{
response_l.put ("key", account.to_string ());
}
response_errors ();
}
void nano::json_handler::account_list ()
{
auto wallet (wallet_impl ());
if (!ec)
{
boost::property_tree::ptree accounts;
auto transaction (node.wallets.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), j (wallet->store.end ()); i != j; ++i)
{
boost::property_tree::ptree entry;
entry.put ("", nano::account (i->first).to_account ());
accounts.push_back (std::make_pair ("", entry));
}
response_l.add_child ("accounts", accounts);
}
response_errors ();
}
void nano::json_handler::account_move ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
std::string source_text (rpc_l->request.get<std::string> ("source"));
auto accounts_text (rpc_l->request.get_child ("accounts"));
nano::wallet_id source;
if (!source.decode_hex (source_text))
{
auto existing (rpc_l->node.wallets.items.find (source));
if (existing != rpc_l->node.wallets.items.end ())
{
auto source (existing->second);
std::vector<nano::public_key> accounts;
for (auto i (accounts_text.begin ()), n (accounts_text.end ()); i != n; ++i)
{
auto account (rpc_l->account_impl (i->second.get<std::string> ("")));
accounts.push_back (account);
}
auto transaction (rpc_l->node.wallets.tx_begin_write ());
auto error (wallet->store.move (transaction, source->store, accounts));
rpc_l->response_l.put ("moved", error ? "0" : "1");
}
else
{
rpc_l->ec = nano::error_rpc::source_not_found;
}
}
else
{
rpc_l->ec = nano::error_rpc::bad_source;
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::account_remove ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
auto account (rpc_l->account_impl ());
if (!rpc_l->ec)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
rpc_l->wallet_locked_impl (transaction, wallet);
rpc_l->wallet_account_impl (transaction, wallet, account);
if (!rpc_l->ec)
{
wallet->store.erase (transaction, account);
rpc_l->response_l.put ("removed", "1");
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::account_representative ()
{
auto account (account_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto info (account_info_impl (transaction, account));
if (!ec)
{
response_l.put ("representative", info.representative.to_account ());
}
}
response_errors ();
}
void nano::json_handler::account_representative_set ()
{
auto rpc_l (shared_from_this ());
// clang-format off
node.worker.push_task ([ rpc_l, work_generation_enabled = node.work_generation_enabled () ]() {
// clang-format on
auto wallet (rpc_l->wallet_impl ());
auto account (rpc_l->account_impl ());
std::string representative_text (rpc_l->request.get<std::string> ("representative"));
auto representative (rpc_l->account_impl (representative_text, nano::error_rpc::bad_representative_number));
if (!rpc_l->ec)
{
auto work (rpc_l->work_optional_impl ());
if (!rpc_l->ec && work)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
rpc_l->wallet_locked_impl (transaction, wallet);
rpc_l->wallet_account_impl (transaction, wallet, account);
if (!rpc_l->ec)
{
auto block_transaction (rpc_l->node.store.tx_begin_read ());
auto info (rpc_l->account_info_impl (block_transaction, account));
if (!rpc_l->ec)
{
if (nano::work_validate (info.head, work))
{
rpc_l->ec = nano::error_common::invalid_work;
}
}
}
}
else if (!rpc_l->ec) // work == 0
{
if (!work_generation_enabled)
{
rpc_l->ec = nano::error_common::disabled_work_generation;
}
}
if (!rpc_l->ec)
{
bool generate_work (work == 0); // Disable work generation if "work" option is provided
auto response_a (rpc_l->response);
auto response_data (std::make_shared<boost::property_tree::ptree> (rpc_l->response_l));
// clang-format off
wallet->change_async(account, representative, [response_a, response_data](std::shared_ptr<nano::block> block) {
if (block != nullptr)
{
response_data->put("block", block->hash().to_string());
std::stringstream ostream;
boost::property_tree::write_json(ostream, *response_data);
response_a(ostream.str());
}
else
{
json_error_response(response_a, "Error generating block");
}
},
work, generate_work);
// clang-format on
}
}
// Because of change_async
if (rpc_l->ec)
{
rpc_l->response_errors ();
}
});
}
void nano::json_handler::account_weight ()
{
auto account (account_impl ());
if (!ec)
{
auto balance (node.weight (account));
response_l.put ("weight", balance.convert_to<std::string> ());
}
response_errors ();
}
void nano::json_handler::accounts_balances ()
{
boost::property_tree::ptree balances;
for (auto & accounts : request.get_child ("accounts"))
{
auto account (account_impl (accounts.second.data ()));
if (!ec)
{
boost::property_tree::ptree entry;
auto balance (node.balance_pending (account));
entry.put ("balance", balance.first.convert_to<std::string> ());
entry.put ("pending", balance.second.convert_to<std::string> ());
balances.push_back (std::make_pair (account.to_account (), entry));
}
}
response_l.add_child ("balances", balances);
response_errors ();
}
void nano::json_handler::accounts_create ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
auto count (rpc_l->count_impl ());
if (!rpc_l->ec)
{
const bool generate_work = rpc_l->request.get<bool> ("work", false);
boost::property_tree::ptree accounts;
for (auto i (0); accounts.size () < count; ++i)
{
nano::account new_key (wallet->deterministic_insert (generate_work));
if (!new_key.is_zero ())
{
boost::property_tree::ptree entry;
entry.put ("", new_key.to_account ());
accounts.push_back (std::make_pair ("", entry));
}
}
rpc_l->response_l.add_child ("accounts", accounts);
}
rpc_l->response_errors ();
});
}
void nano::json_handler::accounts_frontiers ()
{
boost::property_tree::ptree frontiers;
auto transaction (node.store.tx_begin_read ());
for (auto & accounts : request.get_child ("accounts"))
{
auto account (account_impl (accounts.second.data ()));
if (!ec)
{
auto latest (node.ledger.latest (transaction, account));
if (!latest.is_zero ())
{
frontiers.put (account.to_account (), latest.to_string ());
}
}
}
response_l.add_child ("frontiers", frontiers);
response_errors ();
}
void nano::json_handler::accounts_pending ()
{
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
const bool source = request.get<bool> ("source", false);
const bool include_active = request.get<bool> ("include_active", false);
const bool include_only_confirmed = request.get<bool> ("include_only_confirmed", false);
const bool sorting = request.get<bool> ("sorting", false);
auto simple (threshold.is_zero () && !source && !sorting); // if simple, response is a list of hashes for each account
boost::property_tree::ptree pending;
auto transaction (node.store.tx_begin_read ());
for (auto & accounts : request.get_child ("accounts"))
{
auto account (account_impl (accounts.second.data ()));
if (!ec)
{
boost::property_tree::ptree peers_l;
for (auto i (node.store.pending_begin (transaction, nano::pending_key (account, 0))); nano::pending_key (i->first).account == account && peers_l.size () < count; ++i)
{
nano::pending_key const & key (i->first);
if (block_confirmed (node, transaction, key.hash, include_active, include_only_confirmed))
{
if (simple)
{
boost::property_tree::ptree entry;
entry.put ("", key.hash.to_string ());
peers_l.push_back (std::make_pair ("", entry));
}
else
{
nano::pending_info const & info (i->second);
if (info.amount.number () >= threshold.number ())
{
if (source)
{
boost::property_tree::ptree pending_tree;
pending_tree.put ("amount", info.amount.number ().convert_to<std::string> ());
pending_tree.put ("source", info.source.to_account ());
peers_l.add_child (key.hash.to_string (), pending_tree);
}
else
{
peers_l.put (key.hash.to_string (), info.amount.number ().convert_to<std::string> ());
}
}
}
}
}
if (sorting && !simple)
{
if (source)
{
peers_l.sort ([](const auto & child1, const auto & child2) -> bool {
return child1.second.template get<nano::uint128_t> ("amount") > child2.second.template get<nano::uint128_t> ("amount");
});
}
else
{
peers_l.sort ([](const auto & child1, const auto & child2) -> bool {
return child1.second.template get<nano::uint128_t> ("") > child2.second.template get<nano::uint128_t> ("");
});
}
}
pending.add_child (account.to_account (), peers_l);
}
}
response_l.add_child ("blocks", pending);
response_errors ();
}
void nano::json_handler::active_difficulty ()
{
auto include_trend (request.get<bool> ("include_trend", false));
response_l.put ("network_minimum", nano::to_string_hex (node.network_params.network.publish_threshold));
auto difficulty_active = node.active.active_difficulty ();
response_l.put ("network_current", nano::to_string_hex (difficulty_active));
auto multiplier = nano::difficulty::to_multiplier (difficulty_active, node.network_params.network.publish_threshold);
response_l.put ("multiplier", nano::to_string (multiplier));
if (include_trend)
{
boost::property_tree::ptree trend_entry_l;
auto trend_l (node.active.difficulty_trend ());
for (auto multiplier_l : trend_l)
{
boost::property_tree::ptree entry;
entry.put ("", nano::to_string (multiplier_l));
trend_entry_l.push_back (std::make_pair ("", entry));
}
response_l.add_child ("difficulty_trend", trend_entry_l);
}
response_errors ();
}
void nano::json_handler::available_supply ()
{
auto genesis_balance (node.balance (node.network_params.ledger.genesis_account)); // Cold storage genesis
auto landing_balance (node.balance (nano::account ("059F68AAB29DE0D3A27443625C7EA9CDDB6517A8B76FE37727EF6A4D76832AD5"))); // Active unavailable account
auto faucet_balance (node.balance (nano::account ("8E319CE6F3025E5B2DF66DA7AB1467FE48F1679C13DD43BFDB29FA2E9FC40D3B"))); // Faucet account
auto burned_balance ((node.balance_pending (nano::account (0))).second); // Burning 0 account
auto available (node.network_params.ledger.genesis_amount - genesis_balance - landing_balance - faucet_balance - burned_balance);
response_l.put ("available", available.convert_to<std::string> ());
response_errors ();
}
void state_subtype (nano::transaction const & transaction_a, nano::node & node_a, std::shared_ptr<nano::block> block_a, nano::uint128_t const & balance_a, boost::property_tree::ptree & tree_a)
{
// Subtype check
auto previous_balance (node_a.ledger.balance (transaction_a, block_a->previous ()));
if (balance_a < previous_balance)
{
tree_a.put ("subtype", "send");
}
else
{
if (block_a->link ().is_zero ())
{
tree_a.put ("subtype", "change");
}
else if (balance_a == previous_balance && node_a.ledger.is_epoch_link (block_a->link ()))
{
tree_a.put ("subtype", "epoch");
}
else
{
tree_a.put ("subtype", "receive");
}
}
}
void nano::json_handler::block_info ()
{
auto hash (hash_impl ());
if (!ec)
{
nano::block_sideband sideband;
auto transaction (node.store.tx_begin_read ());
auto block (node.store.block_get (transaction, hash, &sideband));
if (block != nullptr)
{
nano::account account (block->account ().is_zero () ? sideband.account : block->account ());
response_l.put ("block_account", account.to_account ());
auto amount (node.ledger.amount (transaction, hash));
response_l.put ("amount", amount.convert_to<std::string> ());
auto balance (node.ledger.balance (transaction, hash));
response_l.put ("balance", balance.convert_to<std::string> ());
response_l.put ("height", std::to_string (sideband.height));
response_l.put ("local_timestamp", std::to_string (sideband.timestamp));
auto confirmed (node.ledger.block_confirmed (transaction, hash));
response_l.put ("confirmed", confirmed);
bool json_block_l = request.get<bool> ("json_block", false);
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
block->serialize_json (block_node_l);
response_l.add_child ("contents", block_node_l);
}
else
{
std::string contents;
block->serialize_json (contents);
response_l.put ("contents", contents);
}
if (block->type () == nano::block_type::state)
{
state_subtype (transaction, node, block, balance, response_l);
}
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::block_confirm ()
{
auto hash (hash_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto block_l (node.store.block_get (transaction, hash));
if (block_l != nullptr)
{
if (!node.ledger.block_confirmed (transaction, hash))
{
// Start new confirmation for unconfirmed block
node.block_confirm (std::move (block_l));
}
else
{
// Add record in confirmation history for confirmed block
nano::election_status status{ block_l, 0, std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::system_clock::now ().time_since_epoch ()), std::chrono::duration_values<std::chrono::milliseconds>::zero (), 0, nano::election_status_type::active_confirmation_height };
{
nano::lock_guard<std::mutex> lock (node.active.mutex);
node.active.confirmed.push_back (status);
if (node.active.confirmed.size () > node.config.confirmation_history_size)
{
node.active.confirmed.pop_front ();
}
}
// Trigger callback for confirmed block
node.block_arrival.add (hash);
auto account (node.ledger.account (transaction, hash));
auto amount (node.ledger.amount (transaction, hash));
bool is_state_send (false);
if (auto state = dynamic_cast<nano::state_block *> (block_l.get ()))
{
is_state_send = node.ledger.is_send (transaction, *state);
}
node.observers.blocks.notify (status, account, amount, is_state_send);
}
response_l.put ("started", "1");
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::blocks ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
boost::property_tree::ptree blocks;
auto transaction (node.store.tx_begin_read ());
for (boost::property_tree::ptree::value_type & hashes : request.get_child ("hashes"))
{
if (!ec)
{
std::string hash_text = hashes.second.data ();
nano::block_hash hash;
if (!hash.decode_hex (hash_text))
{
auto block (node.store.block_get (transaction, hash));
if (block != nullptr)
{
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
block->serialize_json (block_node_l);
blocks.add_child (hash_text, block_node_l);
}
else
{
std::string contents;
block->serialize_json (contents);
blocks.put (hash_text, contents);
}
}
else
{
ec = nano::error_blocks::not_found;
}
}
else
{
ec = nano::error_blocks::bad_hash_number;
}
}
}
response_l.add_child ("blocks", blocks);
response_errors ();
}
void nano::json_handler::blocks_info ()
{
const bool pending = request.get<bool> ("pending", false);
const bool source = request.get<bool> ("source", false);
const bool json_block_l = request.get<bool> ("json_block", false);
const bool include_not_found = request.get<bool> ("include_not_found", false);
boost::property_tree::ptree blocks;
boost::property_tree::ptree blocks_not_found;
auto transaction (node.store.tx_begin_read ());
for (boost::property_tree::ptree::value_type & hashes : request.get_child ("hashes"))
{
if (!ec)
{
std::string hash_text = hashes.second.data ();
nano::block_hash hash;
if (!hash.decode_hex (hash_text))
{
nano::block_sideband sideband;
auto block (node.store.block_get (transaction, hash, &sideband));
if (block != nullptr)
{
boost::property_tree::ptree entry;
nano::account account (block->account ().is_zero () ? sideband.account : block->account ());
entry.put ("block_account", account.to_account ());
auto amount (node.ledger.amount (transaction, hash));
entry.put ("amount", amount.convert_to<std::string> ());
auto balance (node.ledger.balance (transaction, hash));
entry.put ("balance", balance.convert_to<std::string> ());
entry.put ("height", std::to_string (sideband.height));
entry.put ("local_timestamp", std::to_string (sideband.timestamp));
auto confirmed (node.ledger.block_confirmed (transaction, hash));
entry.put ("confirmed", confirmed);
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
block->serialize_json (block_node_l);
entry.add_child ("contents", block_node_l);
}
else
{
std::string contents;
block->serialize_json (contents);
entry.put ("contents", contents);
}
if (block->type () == nano::block_type::state)
{
state_subtype (transaction, node, block, balance, entry);
}
if (pending)
{
bool exists (false);
auto destination (node.ledger.block_destination (transaction, *block));
if (!destination.is_zero ())
{
exists = node.store.pending_exists (transaction, nano::pending_key (destination, hash));
}
entry.put ("pending", exists ? "1" : "0");
}
if (source)
{
nano::block_hash source_hash (node.ledger.block_source (transaction, *block));
auto block_a (node.store.block_get (transaction, source_hash));
if (block_a != nullptr)
{
auto source_account (node.ledger.account (transaction, source_hash));
entry.put ("source_account", source_account.to_account ());
}
else
{
entry.put ("source_account", "0");
}
}
blocks.push_back (std::make_pair (hash_text, entry));
}
else if (include_not_found)
{
boost::property_tree::ptree entry;
entry.put ("", hash_text);
blocks_not_found.push_back (std::make_pair ("", entry));
}
else
{
ec = nano::error_blocks::not_found;
}
}
else
{
ec = nano::error_blocks::bad_hash_number;
}
}
}
if (!ec)
{
response_l.add_child ("blocks", blocks);
if (include_not_found)
{
response_l.add_child ("blocks_not_found", blocks_not_found);
}
}
response_errors ();
}
void nano::json_handler::block_account ()
{
auto hash (hash_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
if (node.store.block_exists (transaction, hash))
{
auto account (node.ledger.account (transaction, hash));
response_l.put ("account", account.to_account ());
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::block_count ()
{
auto transaction (node.store.tx_begin_read ());
response_l.put ("count", std::to_string (node.store.block_count (transaction).sum ()));
response_l.put ("unchecked", std::to_string (node.store.unchecked_count (transaction)));
response_l.put ("cemented", std::to_string (node.ledger.cemented_count));
response_errors ();
}
void nano::json_handler::block_count_type ()
{
auto transaction (node.store.tx_begin_read ());
nano::block_counts count (node.store.block_count (transaction));
response_l.put ("send", std::to_string (count.send));
response_l.put ("receive", std::to_string (count.receive));
response_l.put ("open", std::to_string (count.open));
response_l.put ("change", std::to_string (count.change));
response_l.put ("state", std::to_string (count.state));
response_errors ();
}
void nano::json_handler::block_create ()
{
if (!ec)
{
std::string type (request.get<std::string> ("type"));
nano::wallet_id wallet (0);
boost::optional<std::string> wallet_text (request.get_optional<std::string> ("wallet"));
if (wallet_text.is_initialized ())
{
if (wallet.decode_hex (wallet_text.get ()))
{
ec = nano::error_common::bad_wallet_number;
}
}
nano::account account (0);
boost::optional<std::string> account_text (request.get_optional<std::string> ("account"));
if (!ec && account_text.is_initialized ())
{
account = account_impl (account_text.get ());
}
nano::account representative (0);
boost::optional<std::string> representative_text (request.get_optional<std::string> ("representative"));
if (!ec && representative_text.is_initialized ())
{
representative = account_impl (representative_text.get (), nano::error_rpc::bad_representative_number);
}
nano::account destination (0);
boost::optional<std::string> destination_text (request.get_optional<std::string> ("destination"));
if (!ec && destination_text.is_initialized ())
{
destination = account_impl (destination_text.get (), nano::error_rpc::bad_destination);
}
nano::block_hash source (0);
boost::optional<std::string> source_text (request.get_optional<std::string> ("source"));
if (!ec && source_text.is_initialized ())
{
if (source.decode_hex (source_text.get ()))
{
ec = nano::error_rpc::bad_source;
}
}
nano::amount amount (0);
boost::optional<std::string> amount_text (request.get_optional<std::string> ("amount"));
if (!ec && amount_text.is_initialized ())
{
if (amount.decode_dec (amount_text.get ()))
{
ec = nano::error_common::invalid_amount;
}
}
auto work (work_optional_impl ());
nano::raw_key prv;
prv.data.clear ();
nano::block_hash previous (0);
nano::amount balance (0);
if (work == 0 && !node.work_generation_enabled ())
{
ec = nano::error_common::disabled_work_generation;
}
if (!ec && wallet != 0 && account != 0)
{
auto existing (node.wallets.items.find (wallet));
if (existing != node.wallets.items.end ())
{
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
wallet_locked_impl (transaction, existing->second);
wallet_account_impl (transaction, existing->second, account);
if (!ec)
{
existing->second->store.fetch (transaction, account, prv);
previous = node.ledger.latest (block_transaction, account);
balance = node.ledger.account_balance (block_transaction, account);
}
}
else
{
ec = nano::error_common::wallet_not_found;
}
}
boost::optional<std::string> key_text (request.get_optional<std::string> ("key"));
if (!ec && key_text.is_initialized ())
{
if (prv.data.decode_hex (key_text.get ()))
{
ec = nano::error_common::bad_private_key;
}
}
boost::optional<std::string> previous_text (request.get_optional<std::string> ("previous"));
if (!ec && previous_text.is_initialized ())
{
if (previous.decode_hex (previous_text.get ()))
{
ec = nano::error_rpc::bad_previous;
}
}
boost::optional<std::string> balance_text (request.get_optional<std::string> ("balance"));
if (!ec && balance_text.is_initialized ())
{
if (balance.decode_dec (balance_text.get ()))
{
ec = nano::error_rpc::invalid_balance;
}
}
nano::link link (0);
boost::optional<std::string> link_text (request.get_optional<std::string> ("link"));
if (!ec && link_text.is_initialized ())
{
if (link.decode_account (link_text.get ()))
{
if (link.decode_hex (link_text.get ()))
{
ec = nano::error_rpc::bad_link;
}
}
}
else
{
// Retrieve link from source or destination
if (source.is_zero ())
{
link = destination;
}
else
{
link = source;
}
}
if (!ec)
{
if (prv.data != 0)
{
nano::account pub (nano::pub_key (prv.as_private_key ()));
// Fetching account balance & previous for send blocks (if aren't given directly)
if (!previous_text.is_initialized () && !balance_text.is_initialized ())
{
auto transaction (node.store.tx_begin_read ());
previous = node.ledger.latest (transaction, pub);
balance = node.ledger.account_balance (transaction, pub);
}
// Double check current balance if previous block is specified
else if (previous_text.is_initialized () && balance_text.is_initialized () && type == "send")
{
auto transaction (node.store.tx_begin_read ());
if (node.store.block_exists (transaction, previous) && node.store.block_balance (transaction, previous) != balance.number ())
{
ec = nano::error_rpc::block_create_balance_mismatch;
}
}
// Check for incorrect account key
if (!ec && account_text.is_initialized ())
{
if (account != pub)
{
ec = nano::error_rpc::block_create_public_key_mismatch;
}
}
if (type == "state")
{
if (previous_text.is_initialized () && !representative.is_zero () && (!link.is_zero () || link_text.is_initialized ()))
{
if (work == 0)
{
nano::root root;
if (previous.is_zero ())
{
root = pub;
}
else
{
root = previous;
}
auto opt_work_l (node.work_generate_blocking (root, nano::account (pub)));
if (opt_work_l.is_initialized ())
{
work = *opt_work_l;
}
else
{
ec = nano::error_common::failure_work_generation;
}
}
if (!ec)
{
nano::state_block state (pub, previous, representative, balance, link, prv, pub, work);
response_l.put ("hash", state.hash ().to_string ());
bool json_block_l = request.get<bool> ("json_block", false);
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
state.serialize_json (block_node_l);
response_l.add_child ("block", block_node_l);
}
else
{
std::string contents;
state.serialize_json (contents);
response_l.put ("block", contents);
}
}
}
else
{
ec = nano::error_rpc::block_create_requirements_state;
}
}
else if (type == "open")
{
if (representative != 0 && source != 0)
{
if (work == 0)
{
auto opt_work_l (node.work_generate_blocking (pub, nano::account (pub)));
if (opt_work_l.is_initialized ())
{
work = *opt_work_l;
}
else
{
ec = nano::error_common::failure_work_generation;
}
}
if (!ec)
{
nano::open_block open (source, representative, pub, prv, pub, work);
response_l.put ("hash", open.hash ().to_string ());
std::string contents;
open.serialize_json (contents);
response_l.put ("block", contents);
}
}
else
{
ec = nano::error_rpc::block_create_requirements_open;
}
}
else if (type == "receive")
{
if (source != 0 && previous != 0)
{
if (work == 0)
{
auto opt_work_l (node.work_generate_blocking (previous, nano::account (pub)));
if (opt_work_l.is_initialized ())
{
work = *opt_work_l;
}
else
{
ec = nano::error_common::failure_work_generation;
}
}
if (!ec)
{
nano::receive_block receive (previous, source, prv, pub, work);
response_l.put ("hash", receive.hash ().to_string ());
std::string contents;
receive.serialize_json (contents);
response_l.put ("block", contents);
}
}
else
{
ec = nano::error_rpc::block_create_requirements_receive;
}
}
else if (type == "change")
{
if (representative != 0 && previous != 0)
{
if (work == 0)
{
auto opt_work_l (node.work_generate_blocking (previous, nano::account (pub)));
if (opt_work_l.is_initialized ())
{
work = *opt_work_l;
}
else
{
ec = nano::error_common::failure_work_generation;
}
}
if (!ec)
{
nano::change_block change (previous, representative, prv, pub, work);
response_l.put ("hash", change.hash ().to_string ());
std::string contents;
change.serialize_json (contents);
response_l.put ("block", contents);
}
}
else
{
ec = nano::error_rpc::block_create_requirements_change;
}
}
else if (type == "send")
{
if (destination != 0 && previous != 0 && balance != 0 && amount != 0)
{
if (balance.number () >= amount.number ())
{
if (work == 0)
{
auto opt_work_l (node.work_generate_blocking (previous, nano::account (pub)));
if (opt_work_l.is_initialized ())
{
work = *opt_work_l;
}
else
{
ec = nano::error_common::failure_work_generation;
}
}
if (!ec)
{
nano::send_block send (previous, destination, balance.number () - amount.number (), prv, pub, work);
response_l.put ("hash", send.hash ().to_string ());
std::string contents;
send.serialize_json (contents);
response_l.put ("block", contents);
}
}
else
{
ec = nano::error_common::insufficient_balance;
}
}
else
{
ec = nano::error_rpc::block_create_requirements_send;
}
}
else
{
ec = nano::error_blocks::invalid_type;
}
}
else
{
ec = nano::error_rpc::block_create_key_required;
}
}
}
response_errors ();
}
void nano::json_handler::block_hash ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
std::shared_ptr<nano::block> block;
if (json_block_l)
{
block = block_json_impl (true);
}
else
{
block = block_impl (true);
}
if (!ec)
{
response_l.put ("hash", block->hash ().to_string ());
}
response_errors ();
}
void nano::json_handler::bootstrap ()
{
std::string address_text = request.get<std::string> ("address");
std::string port_text = request.get<std::string> ("port");
boost::system::error_code address_ec;
auto address (boost::asio::ip::address_v6::from_string (address_text, address_ec));
if (!address_ec)
{
uint16_t port;
if (!nano::parse_port (port_text, port))
{
if (!node.flags.disable_legacy_bootstrap)
{
node.bootstrap_initiator.bootstrap (nano::endpoint (address, port));
response_l.put ("success", "");
}
else
{
ec = nano::error_rpc::disabled_bootstrap_legacy;
}
}
else
{
ec = nano::error_common::invalid_port;
}
}
else
{
ec = nano::error_common::invalid_ip_address;
}
response_errors ();
}
void nano::json_handler::bootstrap_any ()
{
if (!node.flags.disable_legacy_bootstrap)
{
node.bootstrap_initiator.bootstrap ();
response_l.put ("success", "");
}
else
{
ec = nano::error_rpc::disabled_bootstrap_legacy;
}
response_errors ();
}
void nano::json_handler::bootstrap_lazy ()
{
auto hash (hash_impl ());
const bool force = request.get<bool> ("force", false);
if (!ec)
{
if (!node.flags.disable_lazy_bootstrap)
{
node.bootstrap_initiator.bootstrap_lazy (hash, force);
response_l.put ("started", "1");
}
else
{
ec = nano::error_rpc::disabled_bootstrap_lazy;
}
}
response_errors ();
}
/*
* @warning This is an internal/diagnostic RPC, do not rely on its interface being stable
*/
void nano::json_handler::bootstrap_status ()
{
auto attempt (node.bootstrap_initiator.current_attempt ());
if (attempt != nullptr)
{
response_l.put ("clients", std::to_string (attempt->clients.size ()));
response_l.put ("pulls", std::to_string (attempt->pulls.size ()));
response_l.put ("pulling", std::to_string (attempt->pulling));
response_l.put ("connections", std::to_string (attempt->connections));
response_l.put ("idle", std::to_string (attempt->idle.size ()));
response_l.put ("target_connections", std::to_string (attempt->target_connections (attempt->pulls.size ())));
response_l.put ("total_blocks", std::to_string (attempt->total_blocks));
response_l.put ("runs_count", std::to_string (attempt->runs_count));
std::string mode_text;
if (attempt->mode == nano::bootstrap_mode::legacy)
{
mode_text = "legacy";
}
else if (attempt->mode == nano::bootstrap_mode::lazy)
{
mode_text = "lazy";
}
else if (attempt->mode == nano::bootstrap_mode::wallet_lazy)
{
mode_text = "wallet_lazy";
}
response_l.put ("mode", mode_text);
response_l.put ("lazy_blocks", std::to_string (attempt->lazy_blocks.size ()));
response_l.put ("lazy_state_backlog", std::to_string (attempt->lazy_state_backlog.size ()));
response_l.put ("lazy_balances", std::to_string (attempt->lazy_balances.size ()));
response_l.put ("lazy_destinations", std::to_string (attempt->lazy_destinations.size ()));
response_l.put ("lazy_undefined_links", std::to_string (attempt->lazy_undefined_links.size ()));
response_l.put ("lazy_pulls", std::to_string (attempt->lazy_pulls.size ()));
response_l.put ("lazy_keys", std::to_string (attempt->lazy_keys.size ()));
if (!attempt->lazy_keys.empty ())
{
response_l.put ("lazy_key_1", (*(attempt->lazy_keys.begin ())).to_string ());
}
response_l.put ("duration", std::chrono::duration_cast<std::chrono::seconds> (std::chrono::steady_clock::now () - attempt->attempt_start).count ());
}
else
{
response_l.put ("active", "0");
}
response_errors ();
}
void nano::json_handler::chain (bool successors)
{
successors = successors != request.get<bool> ("reverse", false);
auto hash (hash_impl ("block"));
auto count (count_impl ());
auto offset (offset_optional_impl (0));
if (!ec)
{
boost::property_tree::ptree blocks;
auto transaction (node.store.tx_begin_read ());
while (!hash.is_zero () && blocks.size () < count)
{
auto block_l (node.store.block_get (transaction, hash));
if (block_l != nullptr)
{
if (offset > 0)
{
--offset;
}
else
{
boost::property_tree::ptree entry;
entry.put ("", hash.to_string ());
blocks.push_back (std::make_pair ("", entry));
}
hash = successors ? node.store.block_successor (transaction, hash) : block_l->previous ();
}
else
{
hash.clear ();
}
}
response_l.add_child ("blocks", blocks);
}
response_errors ();
}
void nano::json_handler::confirmation_active ()
{
uint64_t announcements (0);
boost::optional<std::string> announcements_text (request.get_optional<std::string> ("announcements"));
if (announcements_text.is_initialized ())
{
announcements = strtoul (announcements_text.get ().c_str (), NULL, 10);
}
boost::property_tree::ptree elections;
{
nano::lock_guard<std::mutex> lock (node.active.mutex);
for (auto i (node.active.roots.begin ()), n (node.active.roots.end ()); i != n; ++i)
{
if (i->election->confirmation_request_count >= announcements && !i->election->confirmed && !i->election->stopped)
{
boost::property_tree::ptree entry;
entry.put ("", i->root.to_string ());
elections.push_back (std::make_pair ("", entry));
}
}
}
response_l.add_child ("confirmations", elections);
response_errors ();
}
void nano::json_handler::confirmation_height_currently_processing ()
{
auto hash = node.pending_confirmation_height.current ();
if (!hash.is_zero ())
{
response_l.put ("hash", node.pending_confirmation_height.current ().to_string ());
}
else
{
ec = nano::error_rpc::confirmation_height_not_processing;
}
response_errors ();
}
void nano::json_handler::confirmation_history ()
{
boost::property_tree::ptree elections;
boost::property_tree::ptree confirmation_stats;
std::chrono::milliseconds running_total (0);
nano::block_hash hash (0);
boost::optional<std::string> hash_text (request.get_optional<std::string> ("hash"));
if (hash_text.is_initialized ())
{
hash = hash_impl ();
}
if (!ec)
{
auto confirmed (node.active.list_confirmed ());
for (auto i (confirmed.begin ()), n (confirmed.end ()); i != n; ++i)
{
if (hash.is_zero () || i->winner->hash () == hash)
{
boost::property_tree::ptree election;
election.put ("hash", i->winner->hash ().to_string ());
election.put ("duration", i->election_duration.count ());
election.put ("time", i->election_end.count ());
election.put ("tally", i->tally.to_string_dec ());
election.put ("request_count", i->confirmation_request_count);
elections.push_back (std::make_pair ("", election));
}
running_total += i->election_duration;
}
}
confirmation_stats.put ("count", elections.size ());
if (elections.size () >= 1)
{
confirmation_stats.put ("average", (running_total.count ()) / elections.size ());
}
response_l.add_child ("confirmation_stats", confirmation_stats);
response_l.add_child ("confirmations", elections);
response_errors ();
}
void nano::json_handler::confirmation_info ()
{
const bool representatives = request.get<bool> ("representatives", false);
const bool contents = request.get<bool> ("contents", true);
const bool json_block_l = request.get<bool> ("json_block", false);
std::string root_text (request.get<std::string> ("root"));
nano::qualified_root root;
if (!root.decode_hex (root_text))
{
nano::lock_guard<std::mutex> lock (node.active.mutex);
auto conflict_info (node.active.roots.find (root));
if (conflict_info != node.active.roots.end ())
{
response_l.put ("announcements", std::to_string (conflict_info->election->confirmation_request_count));
auto election (conflict_info->election);
nano::uint128_t total (0);
response_l.put ("last_winner", election->status.winner->hash ().to_string ());
auto tally_l (election->tally ());
boost::property_tree::ptree blocks;
for (auto i (tally_l.begin ()), n (tally_l.end ()); i != n; ++i)
{
boost::property_tree::ptree entry;
auto const & tally (i->first);
entry.put ("tally", tally.convert_to<std::string> ());
total += tally;
if (contents)
{
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
i->second->serialize_json (block_node_l);
entry.add_child ("contents", block_node_l);
}
else
{
std::string contents;
i->second->serialize_json (contents);
entry.put ("contents", contents);
}
}
if (representatives)
{
std::multimap<nano::uint128_t, nano::account, std::greater<nano::uint128_t>> representatives;
for (auto ii (election->last_votes.begin ()), nn (election->last_votes.end ()); ii != nn; ++ii)
{
if (i->second->hash () == ii->second.hash)
{
nano::account const & representative (ii->first);
auto amount (node.ledger.rep_weights.representation_get (representative));
representatives.emplace (std::move (amount), representative);
}
}
boost::property_tree::ptree representatives_list;
for (auto ii (representatives.begin ()), nn (representatives.end ()); ii != nn; ++ii)
{
representatives_list.put (ii->second.to_account (), ii->first.convert_to<std::string> ());
}
entry.add_child ("representatives", representatives_list);
}
blocks.add_child ((i->second->hash ()).to_string (), entry);
}
response_l.put ("total_tally", total.convert_to<std::string> ());
response_l.add_child ("blocks", blocks);
}
else
{
ec = nano::error_rpc::confirmation_not_found;
}
}
else
{
ec = nano::error_rpc::invalid_root;
}
response_errors ();
}
void nano::json_handler::confirmation_quorum ()
{
response_l.put ("quorum_delta", node.delta ().convert_to<std::string> ());
response_l.put ("online_weight_quorum_percent", std::to_string (node.config.online_weight_quorum));
response_l.put ("online_weight_minimum", node.config.online_weight_minimum.to_string_dec ());
response_l.put ("online_stake_total", node.online_reps.online_stake ().convert_to<std::string> ());
response_l.put ("peers_stake_total", node.rep_crawler.total_weight ().convert_to<std::string> ());
response_l.put ("peers_stake_required", std::max (node.config.online_weight_minimum.number (), node.delta ()).convert_to<std::string> ());
if (request.get<bool> ("peer_details", false))
{
boost::property_tree::ptree peers;
for (auto & peer : node.rep_crawler.representatives ())
{
boost::property_tree::ptree peer_node;
peer_node.put ("account", peer.account.to_account ());
peer_node.put ("ip", peer.channel->to_string ());
peer_node.put ("weight", peer.weight.to_string_dec ());
peers.push_back (std::make_pair ("", peer_node));
}
response_l.add_child ("peers", peers);
}
response_errors ();
}
void nano::json_handler::database_txn_tracker ()
{
boost::property_tree::ptree json;
if (node.config.diagnostics_config.txn_tracking.enable)
{
unsigned min_read_time_milliseconds = 0;
boost::optional<std::string> min_read_time_text (request.get_optional<std::string> ("min_read_time"));
if (min_read_time_text.is_initialized ())
{
auto success = boost::conversion::try_lexical_convert<unsigned> (*min_read_time_text, min_read_time_milliseconds);
if (!success)
{
ec = nano::error_common::invalid_amount;
}
}
unsigned min_write_time_milliseconds = 0;
if (!ec)
{
boost::optional<std::string> min_write_time_text (request.get_optional<std::string> ("min_write_time"));
if (min_write_time_text.is_initialized ())
{
auto success = boost::conversion::try_lexical_convert<unsigned> (*min_write_time_text, min_write_time_milliseconds);
if (!success)
{
ec = nano::error_common::invalid_amount;
}
}
}
if (!ec)
{
node.store.serialize_mdb_tracker (json, std::chrono::milliseconds (min_read_time_milliseconds), std::chrono::milliseconds (min_write_time_milliseconds));
response_l.put_child ("txn_tracking", json);
}
}
else
{
ec = nano::error_common::tracking_not_enabled;
}
response_errors ();
}
void nano::json_handler::delegators ()
{
auto account (account_impl ());
if (!ec)
{
boost::property_tree::ptree delegators;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.latest_begin (transaction)), n (node.store.latest_end ()); i != n; ++i)
{
nano::account_info const & info (i->second);
if (info.representative == account)
{
std::string balance;
nano::uint128_union (info.balance).encode_dec (balance);
nano::account const & account (i->first);
delegators.put (account.to_account (), balance);
}
}
response_l.add_child ("delegators", delegators);
}
response_errors ();
}
void nano::json_handler::delegators_count ()
{
auto account (account_impl ());
if (!ec)
{
uint64_t count (0);
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.latest_begin (transaction)), n (node.store.latest_end ()); i != n; ++i)
{
nano::account_info const & info (i->second);
if (info.representative == account)
{
++count;
}
}
response_l.put ("count", std::to_string (count));
}
response_errors ();
}
void nano::json_handler::deterministic_key ()
{
std::string seed_text (request.get<std::string> ("seed"));
std::string index_text (request.get<std::string> ("index"));
nano::raw_key seed;
if (!seed.data.decode_hex (seed_text))
{
try
{
uint32_t index (std::stoul (index_text));
nano::private_key prv = nano::deterministic_key (seed, index);
nano::public_key pub (nano::pub_key (prv));
response_l.put ("private", prv.to_string ());
response_l.put ("public", pub.to_string ());
response_l.put ("account", pub.to_account ());
}
catch (std::logic_error const &)
{
ec = nano::error_common::invalid_index;
}
}
else
{
ec = nano::error_common::bad_seed;
}
response_errors ();
}
void epoch_upgrader (std::shared_ptr<nano::node> node_a, nano::private_key const & prv_a, nano::epoch epoch_a, uint64_t count_limit)
{
uint64_t const upgrade_batch_size = 1000;
nano::block_builder builder;
auto link (node_a->ledger.epoch_link (epoch_a));
nano::raw_key raw_key;
raw_key.data = prv_a;
auto signer (nano::pub_key (prv_a));
assert (signer == node_a->ledger.epoch_signer (link));
class account_upgrade_item final
{
public:
nano::account account{ 0 };
uint64_t modified{ 0 };
};
class account_tag
{
};
class modified_tag
{
};
boost::multi_index_container<
account_upgrade_item,
boost::multi_index::indexed_by<
boost::multi_index::ordered_non_unique<boost::multi_index::tag<modified_tag>, boost::multi_index::member<account_upgrade_item, uint64_t, &account_upgrade_item::modified>, std::greater<uint64_t>>,
boost::multi_index::hashed_unique<boost::multi_index::tag<account_tag>, boost::multi_index::member<account_upgrade_item, nano::account, &account_upgrade_item::account>>>>
accounts_list;
bool finished_upgrade (false);
while (!finished_upgrade && !node_a->stopped)
{
bool finished_accounts (false);
uint64_t total_upgraded_accounts (0);
while (!finished_accounts && count_limit != 0 && !node_a->stopped)
{
{
auto transaction (node_a->store.tx_begin_read ());
// Collect accounts to upgrade
for (auto i (node_a->store.latest_begin (transaction)), n (node_a->store.latest_end ()); i != n; ++i)
{
nano::account const & account (i->first);
nano::account_info const & info (i->second);
if (info.epoch () < epoch_a)
{
release_assert (nano::epochs::is_sequential (info.epoch (), epoch_a));
accounts_list.insert (account_upgrade_item{ account, info.modified });
}
}
}
/* Upgrade accounts
Repeat until accounts with previous epoch exist in latest table */
uint64_t upgraded_accounts (0);
for (auto i (accounts_list.get<modified_tag> ().begin ()), n (accounts_list.get<modified_tag> ().end ()); i != n && upgraded_accounts < upgrade_batch_size && upgraded_accounts < count_limit && !node_a->stopped; ++i)
{
auto transaction (node_a->store.tx_begin_read ());
nano::account_info info;
if (!node_a->store.account_get (transaction, i->account, info) && info.epoch () < epoch_a)
{
auto epoch = builder.state ()
.account (i->account)
.previous (info.head)
.representative (info.representative)
.balance (info.balance)
.link (link)
.sign (raw_key, signer)
.work (node_a->work_generate_blocking (info.head).value_or (0))
.build ();
bool valid_signature (!nano::validate_message (signer, epoch->hash (), epoch->block_signature ()));
bool valid_work (!nano::work_validate (*epoch.get ()));
nano::process_result result (nano::process_result::old);
if (valid_signature && valid_work)
{
result = node_a->process_local (std::move (epoch)).code;
}
if (result == nano::process_result::progress)
{
++upgraded_accounts;
}
else
{
bool fork (result == nano::process_result::fork);
node_a->logger.always_log (boost::str (boost::format ("Failed to upgrade account %1%. Valid signature: %2%. Valid work: %3%. Block processor fork: %4%") % i->account.to_account () % valid_signature % valid_work % fork));
}
}
}
total_upgraded_accounts += upgraded_accounts;
count_limit -= upgraded_accounts;
if (!accounts_list.empty ())
{
node_a->logger.always_log (boost::str (boost::format ("%1% accounts were upgraded to new epoch, %2% remain...") % total_upgraded_accounts % (accounts_list.size () - upgraded_accounts)));
accounts_list.clear ();
}
else
{
node_a->logger.always_log (boost::str (boost::format ("%1% total accounts were upgraded to new epoch") % total_upgraded_accounts));
finished_accounts = true;
}
}
// Pending blocks upgrade
bool finished_pending (false);
uint64_t total_upgraded_pending (0);
while (!finished_pending && count_limit != 0 && !node_a->stopped)
{
uint64_t upgraded_pending (0);
auto transaction (node_a->store.tx_begin_read ());
for (auto i (node_a->store.pending_begin (transaction, nano::pending_key (1, 0))), n (node_a->store.pending_end ()); i != n && upgraded_pending < upgrade_batch_size && upgraded_pending < count_limit && !node_a->stopped;)
{
bool to_next_account (false);
nano::pending_key const & key (i->first);
if (!node_a->store.account_exists (transaction, key.account))
{
nano::pending_info const & info (i->second);
if (info.epoch < epoch_a)
{
release_assert (nano::epochs::is_sequential (info.epoch, epoch_a));
auto epoch = builder.state ()
.account (key.account)
.previous (0)
.representative (0)
.balance (0)
.link (link)
.sign (raw_key, signer)
.work (node_a->work_generate_blocking (key.account).value_or (0))
.build ();
bool valid_signature (!nano::validate_message (signer, epoch->hash (), epoch->block_signature ()));
bool valid_work (!nano::work_validate (*epoch.get ()));
nano::process_result result (nano::process_result::old);
if (valid_signature && valid_work)
{
result = node_a->process_local (std::move (epoch)).code;
}
if (result == nano::process_result::progress)
{
++upgraded_pending;
to_next_account = true;
}
else
{
bool fork (result == nano::process_result::fork);
node_a->logger.always_log (boost::str (boost::format ("Failed to upgrade account with pending blocks %1%. Valid signature: %2%. Valid work: %3%. Block processor fork: %4%") % key.account.to_account () % valid_signature % valid_work % fork));
}
}
}
else
{
to_next_account = true;
}
if (to_next_account)
{
// Move to next account if pending account exists or was upgraded
if (key.account.number () == std::numeric_limits<nano::uint256_t>::max ())
{
break;
}
else
{
i = node_a->store.pending_begin (transaction, nano::pending_key (key.account.number () + 1, 0));
}
}
else
{
// Move to next pending item
++i;
}
}
total_upgraded_pending += upgraded_pending;
count_limit -= upgraded_pending;
// Repeat if some pending accounts were upgraded
if (upgraded_pending != 0)
{
node_a->logger.always_log (boost::str (boost::format ("%1% unopened accounts with pending blocks were upgraded to new epoch...") % total_upgraded_pending));
}
else
{
node_a->logger.always_log (boost::str (boost::format ("%1% total unopened accounts with pending blocks were upgraded to new epoch") % total_upgraded_pending));
finished_pending = true;
}
}
finished_upgrade = (total_upgraded_accounts == 0) && (total_upgraded_pending == 0);
}
node_a->logger.always_log ("Epoch upgrade is completed");
}
/*
* @warning This is an internal/diagnostic RPC, do not rely on its interface being stable
*/
void nano::json_handler::epoch_upgrade ()
{
nano::epoch epoch (nano::epoch::invalid);
uint8_t epoch_int (request.get<uint8_t> ("epoch"));
switch (epoch_int)
{
case 1:
epoch = nano::epoch::epoch_1;
break;
case 2:
epoch = nano::epoch::epoch_2;
break;
default:
break;
}
if (epoch != nano::epoch::invalid)
{
uint64_t count_limit (count_optional_impl ());
std::string key_text (request.get<std::string> ("key"));
nano::private_key prv;
if (!prv.decode_hex (key_text))
{
if (nano::pub_key (prv) == node.ledger.epoch_signer (node.ledger.epoch_link (epoch)))
{
auto node_l (node.shared ());
node.worker.push_task ([node_l, prv, epoch, count_limit]() {
epoch_upgrader (node_l, prv, epoch, count_limit);
});
response_l.put ("started", "1");
}
else
{
ec = nano::error_rpc::invalid_epoch_signer;
}
}
else
{
ec = nano::error_common::bad_private_key;
}
}
else
{
ec = nano::error_rpc::invalid_epoch;
}
response_errors ();
}
void nano::json_handler::frontiers ()
{
auto start (account_impl ());
auto count (count_impl ());
if (!ec)
{
boost::property_tree::ptree frontiers;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.latest_begin (transaction, start)), n (node.store.latest_end ()); i != n && frontiers.size () < count; ++i)
{
frontiers.put (i->first.to_account (), i->second.head.to_string ());
}
response_l.add_child ("frontiers", frontiers);
}
response_errors ();
}
void nano::json_handler::account_count ()
{
auto transaction (node.store.tx_begin_read ());
auto size (node.store.account_count (transaction));
response_l.put ("count", std::to_string (size));
response_errors ();
}
namespace
{
class history_visitor : public nano::block_visitor
{
public:
history_visitor (nano::json_handler & handler_a, bool raw_a, nano::transaction & transaction_a, boost::property_tree::ptree & tree_a, nano::block_hash const & hash_a, std::vector<nano::public_key> const & accounts_filter_a) :
handler (handler_a),
raw (raw_a),
transaction (transaction_a),
tree (tree_a),
hash (hash_a),
accounts_filter (accounts_filter_a)
{
}
virtual ~history_visitor () = default;
void send_block (nano::send_block const & block_a)
{
if (should_ignore_account (block_a.hashables.destination))
{
return;
}
tree.put ("type", "send");
auto account (block_a.hashables.destination.to_account ());
tree.put ("account", account);
auto amount (handler.node.ledger.amount (transaction, hash).convert_to<std::string> ());
tree.put ("amount", amount);
if (raw)
{
tree.put ("destination", account);
tree.put ("balance", block_a.hashables.balance.to_string_dec ());
tree.put ("previous", block_a.hashables.previous.to_string ());
}
}
void receive_block (nano::receive_block const & block_a)
{
tree.put ("type", "receive");
auto account (handler.node.ledger.account (transaction, block_a.hashables.source).to_account ());
tree.put ("account", account);
auto amount (handler.node.ledger.amount (transaction, hash).convert_to<std::string> ());
tree.put ("amount", amount);
if (raw)
{
tree.put ("source", block_a.hashables.source.to_string ());
tree.put ("previous", block_a.hashables.previous.to_string ());
}
}
void open_block (nano::open_block const & block_a)
{
if (raw)
{
tree.put ("type", "open");
tree.put ("representative", block_a.hashables.representative.to_account ());
tree.put ("source", block_a.hashables.source.to_string ());
tree.put ("opened", block_a.hashables.account.to_account ());
}
else
{
// Report opens as a receive
tree.put ("type", "receive");
}
if (block_a.hashables.source != network_params.ledger.genesis_account)
{
tree.put ("account", handler.node.ledger.account (transaction, block_a.hashables.source).to_account ());
tree.put ("amount", handler.node.ledger.amount (transaction, hash).convert_to<std::string> ());
}
else
{
tree.put ("account", network_params.ledger.genesis_account.to_account ());
tree.put ("amount", network_params.ledger.genesis_amount.convert_to<std::string> ());
}
}
void change_block (nano::change_block const & block_a)
{
if (raw && accounts_filter.empty ())
{
tree.put ("type", "change");
tree.put ("representative", block_a.hashables.representative.to_account ());
tree.put ("previous", block_a.hashables.previous.to_string ());
}
}
void state_block (nano::state_block const & block_a)
{
if (raw)
{
tree.put ("type", "state");
tree.put ("representative", block_a.hashables.representative.to_account ());
tree.put ("link", block_a.hashables.link.to_string ());
tree.put ("balance", block_a.hashables.balance.to_string_dec ());
tree.put ("previous", block_a.hashables.previous.to_string ());
}
auto balance (block_a.hashables.balance.number ());
auto previous_balance (handler.node.ledger.balance (transaction, block_a.hashables.previous));
if (balance < previous_balance)
{
if (should_ignore_account (block_a.hashables.link))
{
tree.clear ();
return;
}
if (raw)
{
tree.put ("subtype", "send");
}
else
{
tree.put ("type", "send");
}
tree.put ("account", block_a.hashables.link.to_account ());
tree.put ("amount", (previous_balance - balance).convert_to<std::string> ());
}
else
{
if (block_a.hashables.link.is_zero ())
{
if (raw && accounts_filter.empty ())
{
tree.put ("subtype", "change");
}
}
else if (balance == previous_balance && handler.node.ledger.is_epoch_link (block_a.hashables.link))
{
if (raw && accounts_filter.empty ())
{
tree.put ("subtype", "epoch");
tree.put ("account", handler.node.ledger.epoch_signer (block_a.link ()).to_account ());
}
}
else
{
auto account (handler.node.ledger.account (transaction, block_a.hashables.link));
if (should_ignore_account (account))
{
tree.clear ();
return;
}
if (raw)
{
tree.put ("subtype", "receive");
}
else
{
tree.put ("type", "receive");
}
tree.put ("account", account.to_account ());
tree.put ("amount", (balance - previous_balance).convert_to<std::string> ());
}
}
}
bool should_ignore_account (nano::public_key const & account)
{
bool ignore (false);
if (!accounts_filter.empty ())
{
if (std::find (accounts_filter.begin (), accounts_filter.end (), account) == accounts_filter.end ())
{
ignore = true;
}
}
return ignore;
}
nano::json_handler & handler;
bool raw;
nano::transaction & transaction;
boost::property_tree::ptree & tree;
nano::block_hash const & hash;
nano::network_params network_params;
std::vector<nano::public_key> const & accounts_filter;
};
}
void nano::json_handler::account_history ()
{
std::vector<nano::public_key> accounts_to_filter;
const auto accounts_filter_node = request.get_child_optional ("account_filter");
if (accounts_filter_node.is_initialized ())
{
for (auto & a : (*accounts_filter_node))
{
auto account (account_impl (a.second.get<std::string> ("")));
if (!ec)
{
accounts_to_filter.push_back (account);
}
else
{
break;
}
}
}
nano::account account;
nano::block_hash hash;
bool reverse (request.get_optional<bool> ("reverse") == true);
auto head_str (request.get_optional<std::string> ("head"));
auto transaction (node.store.tx_begin_read ());
auto count (count_impl ());
auto offset (offset_optional_impl (0));
if (head_str)
{
if (!hash.decode_hex (*head_str))
{
if (node.store.block_exists (transaction, hash))
{
account = node.ledger.account (transaction, hash);
}
else
{
ec = nano::error_blocks::not_found;
}
}
else
{
ec = nano::error_blocks::bad_hash_number;
}
}
else
{
account = account_impl ();
if (!ec)
{
if (reverse)
{
auto info (account_info_impl (transaction, account));
if (!ec)
{
hash = info.open_block;
}
}
else
{
hash = node.ledger.latest (transaction, account);
}
}
}
if (!ec)
{
boost::property_tree::ptree history;
bool output_raw (request.get_optional<bool> ("raw") == true);
response_l.put ("account", account.to_account ());
nano::block_sideband sideband;
auto block (node.store.block_get (transaction, hash, &sideband));
while (block != nullptr && count > 0)
{
if (offset > 0)
{
--offset;
}
else
{
boost::property_tree::ptree entry;
history_visitor visitor (*this, output_raw, transaction, entry, hash, accounts_to_filter);
block->visit (visitor);
if (!entry.empty ())
{
entry.put ("local_timestamp", std::to_string (sideband.timestamp));
entry.put ("height", std::to_string (sideband.height));
entry.put ("hash", hash.to_string ());
if (output_raw)
{
entry.put ("work", nano::to_string_hex (block->block_work ()));
entry.put ("signature", block->block_signature ().to_string ());
}
history.push_back (std::make_pair ("", entry));
--count;
}
}
hash = reverse ? node.store.block_successor (transaction, hash) : block->previous ();
block = node.store.block_get (transaction, hash, &sideband);
}
response_l.add_child ("history", history);
if (!hash.is_zero ())
{
response_l.put (reverse ? "next" : "previous", hash.to_string ());
}
}
response_errors ();
}
void nano::json_handler::keepalive ()
{
if (!ec)
{
std::string address_text (request.get<std::string> ("address"));
std::string port_text (request.get<std::string> ("port"));
uint16_t port;
if (!nano::parse_port (port_text, port))
{
node.keepalive (address_text, port);
response_l.put ("started", "1");
}
else
{
ec = nano::error_common::invalid_port;
}
}
response_errors ();
}
void nano::json_handler::key_create ()
{
nano::keypair pair;
response_l.put ("private", pair.prv.data.to_string ());
response_l.put ("public", pair.pub.to_string ());
response_l.put ("account", pair.pub.to_account ());
response_errors ();
}
void nano::json_handler::key_expand ()
{
std::string key_text (request.get<std::string> ("key"));
nano::private_key prv;
if (!prv.decode_hex (key_text))
{
nano::public_key pub (nano::pub_key (prv));
response_l.put ("private", prv.to_string ());
response_l.put ("public", pub.to_string ());
response_l.put ("account", pub.to_account ());
}
else
{
ec = nano::error_common::bad_private_key;
}
response_errors ();
}
void nano::json_handler::ledger ()
{
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
if (!ec)
{
nano::account start (0);
boost::optional<std::string> account_text (request.get_optional<std::string> ("account"));
if (account_text.is_initialized ())
{
start = account_impl (account_text.get ());
}
uint64_t modified_since (0);
boost::optional<std::string> modified_since_text (request.get_optional<std::string> ("modified_since"));
if (modified_since_text.is_initialized ())
{
if (decode_unsigned (modified_since_text.get (), modified_since))
{
ec = nano::error_rpc::invalid_timestamp;
}
}
const bool sorting = request.get<bool> ("sorting", false);
const bool representative = request.get<bool> ("representative", false);
const bool weight = request.get<bool> ("weight", false);
const bool pending = request.get<bool> ("pending", false);
boost::property_tree::ptree accounts;
auto transaction (node.store.tx_begin_read ());
if (!ec && !sorting) // Simple
{
for (auto i (node.store.latest_begin (transaction, start)), n (node.store.latest_end ()); i != n && accounts.size () < count; ++i)
{
nano::account_info const & info (i->second);
if (info.modified >= modified_since && (pending || info.balance.number () >= threshold.number ()))
{
nano::account const & account (i->first);
boost::property_tree::ptree response_a;
if (pending)
{
auto account_pending (node.ledger.account_pending (transaction, account));
if (info.balance.number () + account_pending < threshold.number ())
{
continue;
}
response_a.put ("pending", account_pending.convert_to<std::string> ());
}
response_a.put ("frontier", info.head.to_string ());
response_a.put ("open_block", info.open_block.to_string ());
response_a.put ("representative_block", node.ledger.representative (transaction, info.head).to_string ());
std::string balance;
nano::uint128_union (info.balance).encode_dec (balance);
response_a.put ("balance", balance);
response_a.put ("modified_timestamp", std::to_string (info.modified));
response_a.put ("block_count", std::to_string (info.block_count));
if (representative)
{
response_a.put ("representative", info.representative.to_account ());
}
if (weight)
{
auto account_weight (node.ledger.weight (account));
response_a.put ("weight", account_weight.convert_to<std::string> ());
}
accounts.push_back (std::make_pair (account.to_account (), response_a));
}
}
}
else if (!ec) // Sorting
{
std::vector<std::pair<nano::uint128_union, nano::account>> ledger_l;
for (auto i (node.store.latest_begin (transaction, start)), n (node.store.latest_end ()); i != n; ++i)
{
nano::account_info const & info (i->second);
nano::uint128_union balance (info.balance);
if (info.modified >= modified_since)
{
ledger_l.emplace_back (balance, i->first);
}
}
std::sort (ledger_l.begin (), ledger_l.end ());
std::reverse (ledger_l.begin (), ledger_l.end ());
nano::account_info info;
for (auto i (ledger_l.begin ()), n (ledger_l.end ()); i != n && accounts.size () < count; ++i)
{
node.store.account_get (transaction, i->second, info);
if (pending || info.balance.number () >= threshold.number ())
{
nano::account const & account (i->second);
boost::property_tree::ptree response_a;
if (pending)
{
auto account_pending (node.ledger.account_pending (transaction, account));
if (info.balance.number () + account_pending < threshold.number ())
{
continue;
}
response_a.put ("pending", account_pending.convert_to<std::string> ());
}
response_a.put ("frontier", info.head.to_string ());
response_a.put ("open_block", info.open_block.to_string ());
response_a.put ("representative_block", node.ledger.representative (transaction, info.head).to_string ());
std::string balance;
(i->first).encode_dec (balance);
response_a.put ("balance", balance);
response_a.put ("modified_timestamp", std::to_string (info.modified));
response_a.put ("block_count", std::to_string (info.block_count));
if (representative)
{
response_a.put ("representative", info.representative.to_account ());
}
if (weight)
{
auto account_weight (node.ledger.weight (account));
response_a.put ("weight", account_weight.convert_to<std::string> ());
}
accounts.push_back (std::make_pair (account.to_account (), response_a));
}
}
}
response_l.add_child ("accounts", accounts);
}
response_errors ();
}
void nano::json_handler::mnano_from_raw (nano::uint128_t ratio)
{
auto amount (amount_impl ());
if (!ec)
{
auto result (amount.number () / ratio);
response_l.put ("amount", result.convert_to<std::string> ());
}
response_errors ();
}
void nano::json_handler::mnano_to_raw (nano::uint128_t ratio)
{
auto amount (amount_impl ());
if (!ec)
{
auto result (amount.number () * ratio);
if (result > amount.number ())
{
response_l.put ("amount", result.convert_to<std::string> ());
}
else
{
ec = nano::error_common::invalid_amount_big;
}
}
response_errors ();
}
/*
* @warning This is an internal/diagnostic RPC, do not rely on its interface being stable
*/
void nano::json_handler::node_id ()
{
if (!ec)
{
response_l.put ("private", node.node_id.prv.data.to_string ());
response_l.put ("public", node.node_id.pub.to_string ());
response_l.put ("as_account", node.node_id.pub.to_account ());
response_l.put ("node_id", node.node_id.pub.to_node_id ());
}
response_errors ();
}
/*
* @warning This is an internal/diagnostic RPC, do not rely on its interface being stable
*/
void nano::json_handler::node_id_delete ()
{
response_l.put ("deprecated", "1");
response_errors ();
}
void nano::json_handler::password_change ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
rpc_l->wallet_locked_impl (transaction, wallet);
if (!rpc_l->ec)
{
std::string password_text (rpc_l->request.get<std::string> ("password"));
bool error (wallet->store.rekey (transaction, password_text));
rpc_l->response_l.put ("changed", error ? "0" : "1");
if (!error)
{
rpc_l->node.logger.try_log ("Wallet password changed");
}
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::password_enter ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
std::string password_text (rpc_l->request.get<std::string> ("password"));
auto transaction (wallet->wallets.tx_begin_write ());
auto error (wallet->enter_password (transaction, password_text));
rpc_l->response_l.put ("valid", error ? "0" : "1");
}
rpc_l->response_errors ();
});
}
void nano::json_handler::password_valid (bool wallet_locked)
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto valid (wallet->store.valid_password (transaction));
if (!wallet_locked)
{
response_l.put ("valid", valid ? "1" : "0");
}
else
{
response_l.put ("locked", valid ? "0" : "1");
}
}
response_errors ();
}
void nano::json_handler::peers ()
{
boost::property_tree::ptree peers_l;
const bool peer_details = request.get<bool> ("peer_details", false);
auto peers_list (node.network.list (std::numeric_limits<size_t>::max ()));
std::sort (peers_list.begin (), peers_list.end (), [](const auto & lhs, const auto & rhs) {
return lhs->get_endpoint () < rhs->get_endpoint ();
});
for (auto i (peers_list.begin ()), n (peers_list.end ()); i != n; ++i)
{
std::stringstream text;
auto channel (*i);
text << channel->to_string ();
if (peer_details)
{
boost::property_tree::ptree pending_tree;
pending_tree.put ("protocol_version", std::to_string (channel->get_network_version ()));
auto node_id_l (channel->get_node_id_optional ());
if (node_id_l.is_initialized ())
{
pending_tree.put ("node_id", node_id_l.get ().to_node_id ());
}
else
{
pending_tree.put ("node_id", "");
}
pending_tree.put ("type", channel->get_type () == nano::transport::transport_type::tcp ? "tcp" : "udp");
peers_l.push_back (boost::property_tree::ptree::value_type (text.str (), pending_tree));
}
else
{
peers_l.push_back (boost::property_tree::ptree::value_type (text.str (), boost::property_tree::ptree (std::to_string (channel->get_network_version ()))));
}
}
response_l.add_child ("peers", peers_l);
response_errors ();
}
void nano::json_handler::pending ()
{
auto account (account_impl ());
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
const bool source = request.get<bool> ("source", false);
const bool min_version = request.get<bool> ("min_version", false);
const bool include_active = request.get<bool> ("include_active", false);
const bool include_only_confirmed = request.get<bool> ("include_only_confirmed", false);
const bool sorting = request.get<bool> ("sorting", false);
auto simple (threshold.is_zero () && !source && !min_version && !sorting); // if simple, response is a list of hashes
if (!ec)
{
boost::property_tree::ptree peers_l;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.pending_begin (transaction, nano::pending_key (account, 0))); nano::pending_key (i->first).account == account && peers_l.size () < count; ++i)
{
nano::pending_key const & key (i->first);
if (block_confirmed (node, transaction, key.hash, include_active, include_only_confirmed))
{
if (simple)
{
boost::property_tree::ptree entry;
entry.put ("", key.hash.to_string ());
peers_l.push_back (std::make_pair ("", entry));
}
else
{
nano::pending_info const & info (i->second);
if (info.amount.number () >= threshold.number ())
{
if (source || min_version)
{
boost::property_tree::ptree pending_tree;
pending_tree.put ("amount", info.amount.number ().convert_to<std::string> ());
if (source)
{
pending_tree.put ("source", info.source.to_account ());
}
if (min_version)
{
pending_tree.put ("min_version", epoch_as_string (info.epoch));
}
peers_l.add_child (key.hash.to_string (), pending_tree);
}
else
{
peers_l.put (key.hash.to_string (), info.amount.number ().convert_to<std::string> ());
}
}
}
}
}
if (sorting && !simple)
{
if (source || min_version)
{
peers_l.sort ([](const auto & child1, const auto & child2) -> bool {
return child1.second.template get<nano::uint128_t> ("amount") > child2.second.template get<nano::uint128_t> ("amount");
});
}
else
{
peers_l.sort ([](const auto & child1, const auto & child2) -> bool {
return child1.second.template get<nano::uint128_t> ("") > child2.second.template get<nano::uint128_t> ("");
});
}
}
response_l.add_child ("blocks", peers_l);
}
response_errors ();
}
void nano::json_handler::pending_exists ()
{
auto hash (hash_impl ());
const bool include_active = request.get<bool> ("include_active", false);
const bool include_only_confirmed = request.get<bool> ("include_only_confirmed", false);
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto block (node.store.block_get (transaction, hash));
if (block != nullptr)
{
auto exists (false);
auto destination (node.ledger.block_destination (transaction, *block));
if (!destination.is_zero ())
{
exists = node.store.pending_exists (transaction, nano::pending_key (destination, hash));
}
exists = exists && (block_confirmed (node, transaction, block->hash (), include_active, include_only_confirmed));
response_l.put ("exists", exists ? "1" : "0");
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::payment_begin ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
std::string id_text (rpc_l->request.get<std::string> ("wallet"));
nano::wallet_id id;
if (!id.decode_hex (id_text))
{
auto existing (rpc_l->node.wallets.items.find (id));
if (existing != rpc_l->node.wallets.items.end ())
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
std::shared_ptr<nano::wallet> wallet (existing->second);
if (wallet->store.valid_password (transaction))
{
nano::account account (0);
do
{
auto existing (wallet->free_accounts.begin ());
if (existing != wallet->free_accounts.end ())
{
account = *existing;
wallet->free_accounts.erase (existing);
if (wallet->store.find (transaction, account) == wallet->store.end ())
{
rpc_l->node.logger.always_log (boost::str (boost::format ("Transaction wallet %1% externally modified listing account %2% as free but no longer exists") % id.to_string () % account.to_account ()));
account.clear ();
}
else
{
auto block_transaction (rpc_l->node.store.tx_begin_read ());
if (!rpc_l->node.ledger.account_balance (block_transaction, account).is_zero ())
{
rpc_l->node.logger.always_log (boost::str (boost::format ("Skipping account %1% for use as a transaction account: non-zero balance") % account.to_account ()));
account.clear ();
}
}
}
else
{
account = wallet->deterministic_insert (transaction);
break;
}
} while (account.is_zero ());
if (!account.is_zero ())
{
rpc_l->response_l.put ("deprecated", "1");
rpc_l->response_l.put ("account", account.to_account ());
}
else
{
rpc_l->ec = nano::error_rpc::payment_unable_create_account;
}
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
else
{
rpc_l->ec = nano::error_common::wallet_not_found;
}
}
else
{
rpc_l->ec = nano::error_common::bad_wallet_number;
}
rpc_l->response_errors ();
});
}
void nano::json_handler::payment_init ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
if (wallet->store.valid_password (transaction))
{
wallet->init_free_accounts (transaction);
rpc_l->response_l.put ("deprecated", "1");
rpc_l->response_l.put ("status", "Ready");
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::payment_end ()
{
auto account (account_impl ());
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
wallet_account_impl (transaction, wallet, account);
if (!ec)
{
if (node.ledger.account_balance (block_transaction, account).is_zero ())
{
wallet->free_accounts.insert (account);
response_l.put ("deprecated", "1");
response_l.put ("ended", "1");
}
else
{
ec = nano::error_rpc::payment_account_balance;
}
}
}
response_errors ();
}
void nano::json_handler::payment_wait ()
{
std::string timeout_text (request.get<std::string> ("timeout"));
auto account (account_impl ());
auto amount (amount_impl ());
if (!ec)
{
uint64_t timeout;
if (!decode_unsigned (timeout_text, timeout))
{
{
auto observer (std::make_shared<nano::json_payment_observer> (node, response, account, amount));
observer->start (timeout);
node.payment_observer_processor.add (account, observer);
}
node.payment_observer_processor.observer_action (account);
}
else
{
ec = nano::error_rpc::bad_timeout;
}
}
if (ec)
{
response_errors ();
}
}
void nano::json_handler::process ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
const bool json_block_l = rpc_l->request.get<bool> ("json_block", false);
const bool watch_work_l = rpc_l->request.get<bool> ("watch_work", true);
std::shared_ptr<nano::block> block;
if (json_block_l)
{
block = rpc_l->block_json_impl (true);
}
else
{
block = rpc_l->block_impl (true);
}
// State blocks subtype check
if (!rpc_l->ec && block->type () == nano::block_type::state)
{
std::string subtype_text (rpc_l->request.get<std::string> ("subtype", ""));
if (!subtype_text.empty ())
{
std::shared_ptr<nano::state_block> block_state (std::static_pointer_cast<nano::state_block> (block));
auto transaction (rpc_l->node.store.tx_begin_read ());
if (!block_state->hashables.previous.is_zero () && !rpc_l->node.store.block_exists (transaction, block_state->hashables.previous))
{
rpc_l->ec = nano::error_process::gap_previous;
}
else
{
auto balance (rpc_l->node.ledger.account_balance (transaction, block_state->hashables.account));
if (subtype_text == "send")
{
if (balance <= block_state->hashables.balance.number ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_balance;
}
// Send with previous == 0 fails balance check. No previous != 0 check required
}
else if (subtype_text == "receive")
{
if (balance > block_state->hashables.balance.number ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_balance;
}
// Receive can be point to open block. No previous != 0 check required
}
else if (subtype_text == "open")
{
if (!block_state->hashables.previous.is_zero ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_previous;
}
}
else if (subtype_text == "change")
{
if (balance != block_state->hashables.balance.number ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_balance;
}
else if (block_state->hashables.previous.is_zero ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_previous;
}
}
else if (subtype_text == "epoch")
{
if (balance != block_state->hashables.balance.number ())
{
rpc_l->ec = nano::error_rpc::invalid_subtype_balance;
}
else if (!rpc_l->node.ledger.is_epoch_link (block_state->hashables.link))
{
rpc_l->ec = nano::error_rpc::invalid_subtype_epoch_link;
}
}
else
{
rpc_l->ec = nano::error_rpc::invalid_subtype;
}
}
}
}
if (!rpc_l->ec)
{
if (!nano::work_validate (*block))
{
auto result (rpc_l->node.process_local (block, watch_work_l));
switch (result.code)
{
case nano::process_result::progress:
{
rpc_l->response_l.put ("hash", block->hash ().to_string ());
break;
}
case nano::process_result::gap_previous:
{
rpc_l->ec = nano::error_process::gap_previous;
break;
}
case nano::process_result::gap_source:
{
rpc_l->ec = nano::error_process::gap_source;
break;
}
case nano::process_result::old:
{
rpc_l->ec = nano::error_process::old;
break;
}
case nano::process_result::bad_signature:
{
rpc_l->ec = nano::error_process::bad_signature;
break;
}
case nano::process_result::negative_spend:
{
// TODO once we get RPC versioning, this should be changed to "negative spend"
rpc_l->ec = nano::error_process::negative_spend;
break;
}
case nano::process_result::balance_mismatch:
{
rpc_l->ec = nano::error_process::balance_mismatch;
break;
}
case nano::process_result::unreceivable:
{
rpc_l->ec = nano::error_process::unreceivable;
break;
}
case nano::process_result::block_position:
{
rpc_l->ec = nano::error_process::block_position;
break;
}
case nano::process_result::fork:
{
const bool force = rpc_l->request.get<bool> ("force", false);
if (force)
{
rpc_l->node.active.erase (*block);
rpc_l->node.block_processor.force (block);
rpc_l->response_l.put ("hash", block->hash ().to_string ());
}
else
{
rpc_l->ec = nano::error_process::fork;
}
break;
}
default:
{
rpc_l->ec = nano::error_process::other;
break;
}
}
}
else
{
rpc_l->ec = nano::error_blocks::work_low;
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::receive ()
{
auto wallet (wallet_impl ());
auto account (account_impl ());
auto hash (hash_impl ("block"));
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
wallet_locked_impl (transaction, wallet);
wallet_account_impl (transaction, wallet, account);
if (!ec)
{
auto block_transaction (node.store.tx_begin_read ());
auto block (node.store.block_get (block_transaction, hash));
if (block != nullptr)
{
if (node.store.pending_exists (block_transaction, nano::pending_key (account, hash)))
{
auto work (work_optional_impl ());
if (!ec && work)
{
nano::account_info info;
nano::root head;
if (!node.store.account_get (block_transaction, account, info))
{
head = info.head;
}
else
{
head = account;
}
if (nano::work_validate (head, work))
{
ec = nano::error_common::invalid_work;
}
}
else if (!ec) // && work == 0
{
if (!node.work_generation_enabled ())
{
ec = nano::error_common::disabled_work_generation;
}
}
if (!ec)
{
bool generate_work (work == 0); // Disable work generation if "work" option is provided
auto response_a (response);
// clang-format off
wallet->receive_async(std::move(block), account, node.network_params.ledger.genesis_amount, [response_a](std::shared_ptr<nano::block> block_a) {
if (block_a != nullptr)
{
boost::property_tree::ptree response_l;
response_l.put("block", block_a->hash().to_string());
std::stringstream ostream;
boost::property_tree::write_json(ostream, response_l);
response_a(ostream.str());
}
else
{
json_error_response(response_a, "Error generating block");
}
},
work, generate_work);
// clang-format on
}
}
else
{
ec = nano::error_process::unreceivable;
}
}
else
{
ec = nano::error_blocks::not_found;
}
}
}
// Because of receive_async
if (ec)
{
response_errors ();
}
}
void nano::json_handler::receive_minimum ()
{
if (!ec)
{
response_l.put ("amount", node.config.receive_minimum.to_string_dec ());
}
response_errors ();
}
void nano::json_handler::receive_minimum_set ()
{
auto amount (amount_impl ());
if (!ec)
{
node.config.receive_minimum = amount;
response_l.put ("success", "");
}
response_errors ();
}
void nano::json_handler::representatives ()
{
auto count (count_optional_impl ());
if (!ec)
{
const bool sorting = request.get<bool> ("sorting", false);
boost::property_tree::ptree representatives;
auto rep_amounts = node.ledger.rep_weights.get_rep_amounts ();
if (!sorting) // Simple
{
std::map<nano::account, nano::uint128_t> ordered (rep_amounts.begin (), rep_amounts.end ());
for (auto & rep_amount : rep_amounts)
{
auto const & account (rep_amount.first);
auto const & amount (rep_amount.second);
representatives.put (account.to_account (), amount.convert_to<std::string> ());
if (representatives.size () > count)
{
break;
}
}
}
else // Sorting
{
std::vector<std::pair<nano::uint128_t, std::string>> representation;
for (auto & rep_amount : rep_amounts)
{
auto const & account (rep_amount.first);
auto const & amount (rep_amount.second);
representation.emplace_back (amount, account.to_account ());
}
std::sort (representation.begin (), representation.end ());
std::reverse (representation.begin (), representation.end ());
for (auto i (representation.begin ()), n (representation.end ()); i != n && representatives.size () < count; ++i)
{
representatives.put (i->second, (i->first).convert_to<std::string> ());
}
}
response_l.add_child ("representatives", representatives);
}
response_errors ();
}
void nano::json_handler::representatives_online ()
{
const auto accounts_node = request.get_child_optional ("accounts");
const bool weight = request.get<bool> ("weight", false);
std::vector<nano::public_key> accounts_to_filter;
if (accounts_node.is_initialized ())
{
for (auto & a : (*accounts_node))
{
auto account (account_impl (a.second.get<std::string> ("")));
if (!ec)
{
accounts_to_filter.push_back (account);
}
else
{
break;
}
}
}
if (!ec)
{
boost::property_tree::ptree representatives;
auto reps (node.online_reps.list ());
for (auto & i : reps)
{
if (accounts_node.is_initialized ())
{
if (accounts_to_filter.empty ())
{
break;
}
auto found_acc = std::find (accounts_to_filter.begin (), accounts_to_filter.end (), i);
if (found_acc == accounts_to_filter.end ())
{
continue;
}
else
{
accounts_to_filter.erase (found_acc);
}
}
if (weight)
{
boost::property_tree::ptree weight_node;
auto account_weight (node.ledger.weight (i));
weight_node.put ("weight", account_weight.convert_to<std::string> ());
representatives.add_child (i.to_account (), weight_node);
}
else
{
boost::property_tree::ptree entry;
entry.put ("", i.to_account ());
representatives.push_back (std::make_pair ("", entry));
}
}
response_l.add_child ("representatives", representatives);
}
response_errors ();
}
void nano::json_handler::republish ()
{
auto count (count_optional_impl (1024U));
uint64_t sources (0);
uint64_t destinations (0);
boost::optional<std::string> sources_text (request.get_optional<std::string> ("sources"));
if (!ec && sources_text.is_initialized ())
{
if (decode_unsigned (sources_text.get (), sources))
{
ec = nano::error_rpc::invalid_sources;
}
}
boost::optional<std::string> destinations_text (request.get_optional<std::string> ("destinations"));
if (!ec && destinations_text.is_initialized ())
{
if (decode_unsigned (destinations_text.get (), destinations))
{
ec = nano::error_rpc::invalid_destinations;
}
}
auto hash (hash_impl ());
if (!ec)
{
boost::property_tree::ptree blocks;
auto transaction (node.store.tx_begin_read ());
auto block (node.store.block_get (transaction, hash));
if (block != nullptr)
{
std::deque<std::shared_ptr<nano::block>> republish_bundle;
for (auto i (0); !hash.is_zero () && i < count; ++i)
{
block = node.store.block_get (transaction, hash);
if (sources != 0) // Republish source chain
{
nano::block_hash source (node.ledger.block_source (transaction, *block));
auto block_a (node.store.block_get (transaction, source));
std::vector<nano::block_hash> hashes;
while (block_a != nullptr && hashes.size () < sources)
{
hashes.push_back (source);
source = block_a->previous ();
block_a = node.store.block_get (transaction, source);
}
std::reverse (hashes.begin (), hashes.end ());
for (auto & hash_l : hashes)
{
block_a = node.store.block_get (transaction, hash_l);
republish_bundle.push_back (std::move (block_a));
boost::property_tree::ptree entry_l;
entry_l.put ("", hash_l.to_string ());
blocks.push_back (std::make_pair ("", entry_l));
}
}
republish_bundle.push_back (std::move (block)); // Republish block
boost::property_tree::ptree entry;
entry.put ("", hash.to_string ());
blocks.push_back (std::make_pair ("", entry));
if (destinations != 0) // Republish destination chain
{
auto block_b (node.store.block_get (transaction, hash));
auto destination (node.ledger.block_destination (transaction, *block_b));
if (!destination.is_zero ())
{
if (!node.store.pending_exists (transaction, nano::pending_key (destination, hash)))
{
nano::block_hash previous (node.ledger.latest (transaction, destination));
auto block_d (node.store.block_get (transaction, previous));
nano::block_hash source;
std::vector<nano::block_hash> hashes;
while (block_d != nullptr && hash != source)
{
hashes.push_back (previous);
source = node.ledger.block_source (transaction, *block_d);
previous = block_d->previous ();
block_d = node.store.block_get (transaction, previous);
}
std::reverse (hashes.begin (), hashes.end ());
if (hashes.size () > destinations)
{
hashes.resize (destinations);
}
for (auto & hash_l : hashes)
{
block_d = node.store.block_get (transaction, hash_l);
republish_bundle.push_back (std::move (block_d));
boost::property_tree::ptree entry_l;
entry_l.put ("", hash_l.to_string ());
blocks.push_back (std::make_pair ("", entry_l));
}
}
}
}
hash = node.store.block_successor (transaction, hash);
}
node.network.flood_block_many (std::move (republish_bundle), nullptr, 25);
response_l.put ("success", ""); // obsolete
response_l.add_child ("blocks", blocks);
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::search_pending ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto error (wallet->search_pending ());
response_l.put ("started", !error);
}
response_errors ();
}
void nano::json_handler::search_pending_all ()
{
if (!ec)
{
node.wallets.search_pending_all ();
response_l.put ("success", "");
}
response_errors ();
}
void nano::json_handler::send ()
{
auto wallet (wallet_impl ());
auto amount (amount_impl ());
// Sending 0 amount is invalid with state blocks
if (!ec && amount.is_zero ())
{
ec = nano::error_common::invalid_amount;
}
std::string source_text (request.get<std::string> ("source"));
auto source (account_impl (source_text, nano::error_rpc::bad_source));
std::string destination_text (request.get<std::string> ("destination"));
auto destination (account_impl (destination_text, nano::error_rpc::bad_destination));
if (!ec)
{
auto work (work_optional_impl ());
nano::uint128_t balance (0);
if (!ec && work == 0 && !node.work_generation_enabled ())
{
ec = nano::error_common::disabled_work_generation;
}
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
wallet_locked_impl (transaction, wallet);
wallet_account_impl (transaction, wallet, source);
auto info (account_info_impl (block_transaction, source));
if (!ec)
{
balance = (info.balance).number ();
}
if (!ec && work)
{
if (nano::work_validate (info.head, work))
{
ec = nano::error_common::invalid_work;
}
}
}
if (!ec)
{
bool generate_work (work == 0); // Disable work generation if "work" option is provided
boost::optional<std::string> send_id (request.get_optional<std::string> ("id"));
auto response_a (response);
auto response_data (std::make_shared<boost::property_tree::ptree> (response_l));
// clang-format off
wallet->send_async(source, destination, amount.number(), [balance, amount, response_a, response_data](std::shared_ptr<nano::block> block_a) {
if (block_a != nullptr)
{
response_data->put("block", block_a->hash().to_string());
std::stringstream ostream;
boost::property_tree::write_json(ostream, *response_data);
response_a(ostream.str());
}
else
{
if (balance >= amount.number())
{
json_error_response(response_a, "Error generating block");
}
else
{
std::error_code ec(nano::error_common::insufficient_balance);
json_error_response(response_a, ec.message());
}
}
},
work, generate_work, send_id);
// clang-format on
}
}
// Because of send_async
if (ec)
{
response_errors ();
}
}
void nano::json_handler::sign ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
// Retrieving hash
nano::block_hash hash (0);
boost::optional<std::string> hash_text (request.get_optional<std::string> ("hash"));
if (hash_text.is_initialized ())
{
hash = hash_impl ();
}
// Retrieving block
std::shared_ptr<nano::block> block;
boost::optional<std::string> block_text (request.get_optional<std::string> ("block"));
if (!ec && block_text.is_initialized ())
{
if (json_block_l)
{
block = block_json_impl (true);
}
else
{
block = block_impl (true);
}
if (block != nullptr)
{
hash = block->hash ();
}
}
// Hash or block are not initialized
if (!ec && hash.is_zero ())
{
ec = nano::error_blocks::invalid_block;
}
// Hash is initialized without config permission
else if (!ec && !hash.is_zero () && block == nullptr && !node_rpc_config.enable_sign_hash)
{
ec = nano::error_rpc::sign_hash_disabled;
}
if (!ec)
{
nano::raw_key prv;
prv.data.clear ();
// Retrieving private key from request
boost::optional<std::string> key_text (request.get_optional<std::string> ("key"));
if (key_text.is_initialized ())
{
if (prv.data.decode_hex (key_text.get ()))
{
ec = nano::error_common::bad_private_key;
}
}
else
{
// Retrieving private key from wallet
boost::optional<std::string> account_text (request.get_optional<std::string> ("account"));
boost::optional<std::string> wallet_text (request.get_optional<std::string> ("wallet"));
if (wallet_text.is_initialized () && account_text.is_initialized ())
{
auto account (account_impl ());
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
wallet_locked_impl (transaction, wallet);
wallet_account_impl (transaction, wallet, account);
if (!ec)
{
wallet->store.fetch (transaction, account, prv);
}
}
}
}
// Signing
if (prv.data != 0)
{
nano::public_key pub (nano::pub_key (prv.as_private_key ()));
nano::signature signature (nano::sign_message (prv, pub, hash));
response_l.put ("signature", signature.to_string ());
if (block != nullptr)
{
block->signature_set (signature);
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
block->serialize_json (block_node_l);
response_l.add_child ("block", block_node_l);
}
else
{
std::string contents;
block->serialize_json (contents);
response_l.put ("block", contents);
}
}
}
else
{
ec = nano::error_rpc::block_create_key_required;
}
}
response_errors ();
}
void nano::json_handler::stats ()
{
auto sink = node.stats.log_sink_json ();
std::string type (request.get<std::string> ("type", ""));
bool use_sink = false;
if (type == "counters")
{
node.stats.log_counters (*sink);
use_sink = true;
}
else if (type == "objects")
{
construct_json (collect_seq_con_info (node, "node").get (), response_l);
}
else if (type == "samples")
{
node.stats.log_samples (*sink);
use_sink = true;
}
else
{
ec = nano::error_rpc::invalid_missing_type;
}
if (!ec && use_sink)
{
auto stat_tree_l (*static_cast<boost::property_tree::ptree *> (sink->to_object ()));
stat_tree_l.put ("stat_duration_seconds", node.stats.last_reset ().count ());
std::stringstream ostream;
boost::property_tree::write_json (ostream, stat_tree_l);
response (ostream.str ());
}
else
{
response_errors ();
}
}
void nano::json_handler::stats_clear ()
{
node.stats.clear ();
response_l.put ("success", "");
std::stringstream ostream;
boost::property_tree::write_json (ostream, response_l);
response (ostream.str ());
}
void nano::json_handler::stop ()
{
response_l.put ("success", "");
response_errors ();
if (!ec)
{
node.stop ();
stop_callback ();
}
}
void nano::json_handler::unchecked ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
auto count (count_optional_impl ());
if (!ec)
{
boost::property_tree::ptree unchecked;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.unchecked_begin (transaction)), n (node.store.unchecked_end ()); i != n && unchecked.size () < count; ++i)
{
nano::unchecked_info const & info (i->second);
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
info.block->serialize_json (block_node_l);
unchecked.add_child (info.block->hash ().to_string (), block_node_l);
}
else
{
std::string contents;
info.block->serialize_json (contents);
unchecked.put (info.block->hash ().to_string (), contents);
}
}
response_l.add_child ("blocks", unchecked);
}
response_errors ();
}
void nano::json_handler::unchecked_clear ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto transaction (rpc_l->node.store.tx_begin_write ());
rpc_l->node.store.unchecked_clear (transaction);
rpc_l->response_l.put ("success", "");
rpc_l->response_errors ();
});
}
void nano::json_handler::unchecked_get ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
auto hash (hash_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.unchecked_begin (transaction)), n (node.store.unchecked_end ()); i != n; ++i)
{
nano::unchecked_key const & key (i->first);
if (key.hash == hash)
{
nano::unchecked_info const & info (i->second);
response_l.put ("modified_timestamp", std::to_string (info.modified));
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
info.block->serialize_json (block_node_l);
response_l.add_child ("contents", block_node_l);
}
else
{
std::string contents;
info.block->serialize_json (contents);
response_l.put ("contents", contents);
}
break;
}
}
if (response_l.empty ())
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::json_handler::unchecked_keys ()
{
const bool json_block_l = request.get<bool> ("json_block", false);
auto count (count_optional_impl ());
nano::block_hash key (0);
boost::optional<std::string> hash_text (request.get_optional<std::string> ("key"));
if (!ec && hash_text.is_initialized ())
{
if (key.decode_hex (hash_text.get ()))
{
ec = nano::error_rpc::bad_key;
}
}
if (!ec)
{
boost::property_tree::ptree unchecked;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.unchecked_begin (transaction, nano::unchecked_key (key, 0))), n (node.store.unchecked_end ()); i != n && unchecked.size () < count; ++i)
{
boost::property_tree::ptree entry;
nano::unchecked_info const & info (i->second);
entry.put ("key", i->first.key ().to_string ());
entry.put ("hash", info.block->hash ().to_string ());
entry.put ("modified_timestamp", std::to_string (info.modified));
if (json_block_l)
{
boost::property_tree::ptree block_node_l;
info.block->serialize_json (block_node_l);
entry.add_child ("contents", block_node_l);
}
else
{
std::string contents;
info.block->serialize_json (contents);
entry.put ("contents", contents);
}
unchecked.push_back (std::make_pair ("", entry));
}
response_l.add_child ("unchecked", unchecked);
}
response_errors ();
}
void nano::json_handler::unopened ()
{
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
nano::account start (1); // exclude burn account by default
boost::optional<std::string> account_text (request.get_optional<std::string> ("account"));
if (account_text.is_initialized ())
{
start = account_impl (account_text.get ());
}
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto iterator (node.store.pending_begin (transaction, nano::pending_key (start, 0)));
auto end (node.store.pending_end ());
nano::account current_account (start);
nano::uint128_t current_account_sum{ 0 };
boost::property_tree::ptree accounts;
while (iterator != end && accounts.size () < count)
{
nano::pending_key key (iterator->first);
nano::account account (key.account);
nano::pending_info info (iterator->second);
if (node.store.account_exists (transaction, account))
{
if (account.number () == std::numeric_limits<nano::uint256_t>::max ())
{
break;
}
// Skip existing accounts
iterator = node.store.pending_begin (transaction, nano::pending_key (account.number () + 1, 0));
}
else
{
if (account != current_account)
{
if (current_account_sum > 0)
{
if (current_account_sum >= threshold.number ())
{
accounts.put (current_account.to_account (), current_account_sum.convert_to<std::string> ());
}
current_account_sum = 0;
}
current_account = account;
}
current_account_sum += info.amount.number ();
++iterator;
}
}
// last one after iterator reaches end
if (accounts.size () < count && current_account_sum > 0 && current_account_sum >= threshold.number ())
{
accounts.put (current_account.to_account (), current_account_sum.convert_to<std::string> ());
}
response_l.add_child ("accounts", accounts);
}
response_errors ();
}
void nano::json_handler::uptime ()
{
response_l.put ("seconds", std::chrono::duration_cast<std::chrono::seconds> (std::chrono::steady_clock::now () - node.startup_time).count ());
response_errors ();
}
void nano::json_handler::version ()
{
response_l.put ("rpc_version", "1");
response_l.put ("store_version", std::to_string (node.store_version ()));
response_l.put ("protocol_version", std::to_string (node.network_params.protocol.protocol_version));
response_l.put ("node_vendor", boost::str (boost::format ("Nano %1%") % NANO_VERSION_STRING));
response_l.put ("network", node.network_params.network.get_current_network_as_string ());
response_l.put ("network_identifier", nano::genesis ().hash ().to_string ());
response_l.put ("build_info", BUILD_INFO);
response_errors ();
}
void nano::json_handler::validate_account_number ()
{
auto account (account_impl ());
(void)account;
response_l.put ("valid", ec ? "0" : "1");
ec = std::error_code (); // error is just invalid account
response_errors ();
}
void nano::json_handler::wallet_add ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
std::string key_text (rpc_l->request.get<std::string> ("key"));
nano::raw_key key;
if (!key.data.decode_hex (key_text))
{
const bool generate_work = rpc_l->request.get<bool> ("work", true);
auto pub (wallet->insert_adhoc (key, generate_work));
if (!pub.is_zero ())
{
rpc_l->response_l.put ("account", pub.to_account ());
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
else
{
rpc_l->ec = nano::error_common::bad_private_key;
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::wallet_add_watch ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
if (wallet->store.valid_password (transaction))
{
for (auto & accounts : rpc_l->request.get_child ("accounts"))
{
auto account (rpc_l->account_impl (accounts.second.data ()));
if (!rpc_l->ec)
{
if (wallet->insert_watch (transaction, account))
{
rpc_l->ec = nano::error_common::bad_public_key;
}
}
}
if (!rpc_l->ec)
{
rpc_l->response_l.put ("success", "");
}
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::wallet_info ()
{
auto wallet (wallet_impl ());
if (!ec)
{
nano::uint128_t balance (0);
nano::uint128_t pending (0);
uint64_t count (0);
uint64_t deterministic_count (0);
uint64_t adhoc_count (0);
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
balance = balance + node.ledger.account_balance (block_transaction, account);
pending = pending + node.ledger.account_pending (block_transaction, account);
nano::key_type key_type (wallet->store.key_type (i->second));
if (key_type == nano::key_type::deterministic)
{
deterministic_count++;
}
else if (key_type == nano::key_type::adhoc)
{
adhoc_count++;
}
count++;
}
uint32_t deterministic_index (wallet->store.deterministic_index_get (transaction));
response_l.put ("balance", balance.convert_to<std::string> ());
response_l.put ("pending", pending.convert_to<std::string> ());
response_l.put ("accounts_count", std::to_string (count));
response_l.put ("deterministic_count", std::to_string (deterministic_count));
response_l.put ("adhoc_count", std::to_string (adhoc_count));
response_l.put ("deterministic_index", std::to_string (deterministic_index));
}
response_errors ();
}
void nano::json_handler::wallet_balances ()
{
auto wallet (wallet_impl ());
auto threshold (threshold_optional_impl ());
if (!ec)
{
boost::property_tree::ptree balances;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
nano::uint128_t balance = node.ledger.account_balance (block_transaction, account);
if (balance >= threshold.number ())
{
boost::property_tree::ptree entry;
nano::uint128_t pending = node.ledger.account_pending (block_transaction, account);
entry.put ("balance", balance.convert_to<std::string> ());
entry.put ("pending", pending.convert_to<std::string> ());
balances.push_back (std::make_pair (account.to_account (), entry));
}
}
response_l.add_child ("balances", balances);
}
response_errors ();
}
void nano::json_handler::wallet_change_seed ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
if (!rpc_l->ec)
{
std::string seed_text (rpc_l->request.get<std::string> ("seed"));
nano::raw_key seed;
if (!seed.data.decode_hex (seed_text))
{
auto count (static_cast<uint32_t> (rpc_l->count_optional_impl (0)));
auto transaction (rpc_l->node.wallets.tx_begin_write ());
if (wallet->store.valid_password (transaction))
{
nano::public_key account (wallet->change_seed (transaction, seed, count));
rpc_l->response_l.put ("success", "");
rpc_l->response_l.put ("last_restored_account", account.to_account ());
auto index (wallet->store.deterministic_index_get (transaction));
assert (index > 0);
rpc_l->response_l.put ("restored_count", std::to_string (index));
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
else
{
rpc_l->ec = nano::error_common::bad_seed;
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::wallet_contains ()
{
auto account (account_impl ());
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto exists (wallet->store.find (transaction, account) != wallet->store.end ());
response_l.put ("exists", exists ? "1" : "0");
}
response_errors ();
}
void nano::json_handler::wallet_create ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
nano::raw_key seed;
auto seed_text (rpc_l->request.get_optional<std::string> ("seed"));
if (seed_text.is_initialized () && seed.data.decode_hex (seed_text.get ()))
{
rpc_l->ec = nano::error_common::bad_seed;
}
if (!rpc_l->ec)
{
auto wallet_id = random_wallet_id ();
auto wallet (rpc_l->node.wallets.create (wallet_id));
auto existing (rpc_l->node.wallets.items.find (wallet_id));
if (existing != rpc_l->node.wallets.items.end ())
{
rpc_l->response_l.put ("wallet", wallet_id.to_string ());
}
else
{
rpc_l->ec = nano::error_common::wallet_lmdb_max_dbs;
}
if (!rpc_l->ec && seed_text.is_initialized ())
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
nano::public_key account (wallet->change_seed (transaction, seed));
rpc_l->response_l.put ("last_restored_account", account.to_account ());
auto index (wallet->store.deterministic_index_get (transaction));
assert (index > 0);
rpc_l->response_l.put ("restored_count", std::to_string (index));
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::wallet_destroy ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
std::string wallet_text (rpc_l->request.get<std::string> ("wallet"));
nano::wallet_id wallet;
if (!wallet.decode_hex (wallet_text))
{
auto existing (rpc_l->node.wallets.items.find (wallet));
if (existing != rpc_l->node.wallets.items.end ())
{
rpc_l->node.wallets.destroy (wallet);
bool destroyed (rpc_l->node.wallets.items.find (wallet) == rpc_l->node.wallets.items.end ());
rpc_l->response_l.put ("destroyed", destroyed ? "1" : "0");
}
else
{
rpc_l->ec = nano::error_common::wallet_not_found;
}
}
else
{
rpc_l->ec = nano::error_common::bad_wallet_number;
}
rpc_l->response_errors ();
});
}
void nano::json_handler::wallet_export ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
std::string json;
wallet->store.serialize_json (transaction, json);
response_l.put ("json", json);
}
response_errors ();
}
void nano::json_handler::wallet_frontiers ()
{
auto wallet (wallet_impl ());
if (!ec)
{
boost::property_tree::ptree frontiers;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
auto latest (node.ledger.latest (block_transaction, account));
if (!latest.is_zero ())
{
frontiers.put (account.to_account (), latest.to_string ());
}
}
response_l.add_child ("frontiers", frontiers);
}
response_errors ();
}
void nano::json_handler::wallet_history ()
{
uint64_t modified_since (1);
boost::optional<std::string> modified_since_text (request.get_optional<std::string> ("modified_since"));
if (modified_since_text.is_initialized ())
{
if (decode_unsigned (modified_since_text.get (), modified_since))
{
ec = nano::error_rpc::invalid_timestamp;
}
}
auto wallet (wallet_impl ());
if (!ec)
{
std::multimap<uint64_t, boost::property_tree::ptree, std::greater<uint64_t>> entries;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
nano::account_info info;
if (!node.store.account_get (block_transaction, account, info))
{
auto timestamp (info.modified);
auto hash (info.head);
while (timestamp >= modified_since && !hash.is_zero ())
{
nano::block_sideband sideband;
auto block (node.store.block_get (block_transaction, hash, &sideband));
timestamp = sideband.timestamp;
if (block != nullptr && timestamp >= modified_since)
{
boost::property_tree::ptree entry;
std::vector<nano::public_key> no_filter;
history_visitor visitor (*this, false, block_transaction, entry, hash, no_filter);
block->visit (visitor);
if (!entry.empty ())
{
entry.put ("block_account", account.to_account ());
entry.put ("hash", hash.to_string ());
entry.put ("local_timestamp", std::to_string (timestamp));
entries.insert (std::make_pair (timestamp, entry));
}
hash = block->previous ();
}
else
{
hash.clear ();
}
}
}
}
boost::property_tree::ptree history;
for (auto i (entries.begin ()), n (entries.end ()); i != n; ++i)
{
history.push_back (std::make_pair ("", i->second));
}
response_l.add_child ("history", history);
}
response_errors ();
}
void nano::json_handler::wallet_key_valid ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto valid (wallet->store.valid_password (transaction));
response_l.put ("valid", valid ? "1" : "0");
}
response_errors ();
}
void nano::json_handler::wallet_ledger ()
{
const bool representative = request.get<bool> ("representative", false);
const bool weight = request.get<bool> ("weight", false);
const bool pending = request.get<bool> ("pending", false);
uint64_t modified_since (0);
boost::optional<std::string> modified_since_text (request.get_optional<std::string> ("modified_since"));
if (modified_since_text.is_initialized ())
{
modified_since = strtoul (modified_since_text.get ().c_str (), NULL, 10);
}
auto wallet (wallet_impl ());
if (!ec)
{
boost::property_tree::ptree accounts;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
nano::account_info info;
if (!node.store.account_get (block_transaction, account, info))
{
if (info.modified >= modified_since)
{
boost::property_tree::ptree entry;
entry.put ("frontier", info.head.to_string ());
entry.put ("open_block", info.open_block.to_string ());
entry.put ("representative_block", node.ledger.representative (block_transaction, info.head).to_string ());
std::string balance;
nano::uint128_union (info.balance).encode_dec (balance);
entry.put ("balance", balance);
entry.put ("modified_timestamp", std::to_string (info.modified));
entry.put ("block_count", std::to_string (info.block_count));
if (representative)
{
entry.put ("representative", info.representative.to_account ());
}
if (weight)
{
auto account_weight (node.ledger.weight (account));
entry.put ("weight", account_weight.convert_to<std::string> ());
}
if (pending)
{
auto account_pending (node.ledger.account_pending (block_transaction, account));
entry.put ("pending", account_pending.convert_to<std::string> ());
}
accounts.push_back (std::make_pair (account.to_account (), entry));
}
}
}
response_l.add_child ("accounts", accounts);
}
response_errors ();
}
void nano::json_handler::wallet_lock ()
{
auto wallet (wallet_impl ());
if (!ec)
{
nano::raw_key empty;
empty.data.clear ();
wallet->store.password.value_set (empty);
response_l.put ("locked", "1");
node.logger.try_log ("Wallet locked");
}
response_errors ();
}
void nano::json_handler::wallet_pending ()
{
auto wallet (wallet_impl ());
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
const bool source = request.get<bool> ("source", false);
const bool min_version = request.get<bool> ("min_version", false);
const bool include_active = request.get<bool> ("include_active", false);
const bool include_only_confirmed = request.get<bool> ("include_only_confirmed", false);
if (!ec)
{
boost::property_tree::ptree pending;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
boost::property_tree::ptree peers_l;
for (auto ii (node.store.pending_begin (block_transaction, nano::pending_key (account, 0))); nano::pending_key (ii->first).account == account && peers_l.size () < count; ++ii)
{
nano::pending_key key (ii->first);
if (block_confirmed (node, block_transaction, key.hash, include_active, include_only_confirmed))
{
if (threshold.is_zero () && !source)
{
boost::property_tree::ptree entry;
entry.put ("", key.hash.to_string ());
peers_l.push_back (std::make_pair ("", entry));
}
else
{
nano::pending_info info (ii->second);
if (info.amount.number () >= threshold.number ())
{
if (source || min_version)
{
boost::property_tree::ptree pending_tree;
pending_tree.put ("amount", info.amount.number ().convert_to<std::string> ());
if (source)
{
pending_tree.put ("source", info.source.to_account ());
}
if (min_version)
{
pending_tree.put ("min_version", epoch_as_string (info.epoch));
}
peers_l.add_child (key.hash.to_string (), pending_tree);
}
else
{
peers_l.put (key.hash.to_string (), info.amount.number ().convert_to<std::string> ());
}
}
}
}
}
if (!peers_l.empty ())
{
pending.add_child (account.to_account (), peers_l);
}
}
response_l.add_child ("blocks", pending);
}
response_errors ();
}
void nano::json_handler::wallet_representative ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
response_l.put ("representative", wallet->store.representative (transaction).to_account ());
}
response_errors ();
}
void nano::json_handler::wallet_representative_set ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
std::string representative_text (rpc_l->request.get<std::string> ("representative"));
auto representative (rpc_l->account_impl (representative_text, nano::error_rpc::bad_representative_number));
if (!rpc_l->ec)
{
bool update_existing_accounts (rpc_l->request.get<bool> ("update_existing_accounts", false));
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
if (wallet->store.valid_password (transaction) || !update_existing_accounts)
{
wallet->store.representative_set (transaction, representative);
rpc_l->response_l.put ("set", "1");
}
else
{
rpc_l->ec = nano::error_common::wallet_locked;
}
}
// Change representative for all wallet accounts
if (!rpc_l->ec && update_existing_accounts)
{
std::vector<nano::account> accounts;
{
auto transaction (rpc_l->node.wallets.tx_begin_read ());
auto block_transaction (rpc_l->node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
nano::account_info info;
if (!rpc_l->node.store.account_get (block_transaction, account, info))
{
if (info.representative != representative)
{
accounts.push_back (account);
}
}
}
}
for (auto & account : accounts)
{
// clang-format off
wallet->change_async(account, representative, [](std::shared_ptr<nano::block>) {}, 0, false);
// clang-format on
}
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::wallet_republish ()
{
auto wallet (wallet_impl ());
auto count (count_impl ());
if (!ec)
{
boost::property_tree::ptree blocks;
std::deque<std::shared_ptr<nano::block>> republish_bundle;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
auto latest (node.ledger.latest (block_transaction, account));
std::shared_ptr<nano::block> block;
std::vector<nano::block_hash> hashes;
while (!latest.is_zero () && hashes.size () < count)
{
hashes.push_back (latest);
block = node.store.block_get (block_transaction, latest);
latest = block->previous ();
}
std::reverse (hashes.begin (), hashes.end ());
for (auto & hash : hashes)
{
block = node.store.block_get (block_transaction, hash);
republish_bundle.push_back (std::move (block));
boost::property_tree::ptree entry;
entry.put ("", hash.to_string ());
blocks.push_back (std::make_pair ("", entry));
}
}
node.network.flood_block_many (std::move (republish_bundle), nullptr, 25);
response_l.add_child ("blocks", blocks);
}
response_errors ();
}
void nano::json_handler::wallet_seed ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
if (wallet->store.valid_password (transaction))
{
nano::raw_key seed;
wallet->store.seed (seed, transaction);
response_l.put ("seed", seed.data.to_string ());
}
else
{
ec = nano::error_common::wallet_locked;
}
}
response_errors ();
}
void nano::json_handler::wallet_work_get ()
{
auto wallet (wallet_impl ());
if (!ec)
{
boost::property_tree::ptree works;
auto transaction (node.wallets.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account const & account (i->first);
uint64_t work (0);
auto error_work (wallet->store.work_get (transaction, account, work));
(void)error_work;
works.put (account.to_account (), nano::to_string_hex (work));
}
response_l.add_child ("works", works);
}
response_errors ();
}
void nano::json_handler::work_generate ()
{
boost::optional<nano::account> account;
auto account_opt (request.get_optional<std::string> ("account"));
if (account_opt.is_initialized ())
{
account = account_impl (account_opt.get ());
}
if (!ec)
{
auto hash (hash_impl ());
auto difficulty (difficulty_optional_impl ());
multiplier_optional_impl (difficulty);
if (!ec && (difficulty > node.config.max_work_generate_difficulty || difficulty < node.network_params.network.publish_threshold))
{
ec = nano::error_rpc::difficulty_limit;
}
if (!ec)
{
auto use_peers (request.get<bool> ("use_peers", false));
auto rpc_l (shared_from_this ());
auto callback = [rpc_l, hash, this](boost::optional<uint64_t> const & work_a) {
if (work_a)
{
boost::property_tree::ptree response_l;
response_l.put ("hash", hash.to_string ());
uint64_t work (work_a.value ());
response_l.put ("work", nano::to_string_hex (work));
std::stringstream ostream;
uint64_t result_difficulty;
nano::work_validate (hash, work, &result_difficulty);
response_l.put ("difficulty", nano::to_string_hex (result_difficulty));
auto result_multiplier = nano::difficulty::to_multiplier (result_difficulty, this->node.network_params.network.publish_threshold);
response_l.put ("multiplier", nano::to_string (result_multiplier));
boost::property_tree::write_json (ostream, response_l);
rpc_l->response (ostream.str ());
}
else
{
json_error_response (rpc_l->response, "Cancelled");
}
};
if (!use_peers)
{
if (node.local_work_generation_enabled ())
{
node.work.generate (hash, callback, difficulty);
}
else
{
ec = nano::error_common::disabled_local_work_generation;
}
}
else
{
if (!account_opt.is_initialized ())
{
// Fetch account from block if not given
auto transaction_l (node.store.tx_begin_read ());
if (node.store.block_exists (transaction_l, hash))
{
account = node.store.block_account (transaction_l, hash);
}
}
auto secondary_work_peers_l (request.get<bool> ("secondary_work_peers", false));
auto const & peers_l (secondary_work_peers_l ? node.config.secondary_work_peers : node.config.work_peers);
if (node.work_generation_enabled (peers_l))
{
node.work_generate (hash, callback, difficulty, account, secondary_work_peers_l);
}
else
{
ec = nano::error_common::disabled_work_generation;
}
}
}
}
// Because of callback
if (ec)
{
response_errors ();
}
}
void nano::json_handler::work_cancel ()
{
auto hash (hash_impl ());
if (!ec)
{
node.observers.work_cancel.notify (hash);
}
response_errors ();
}
void nano::json_handler::work_get ()
{
auto wallet (wallet_impl ());
auto account (account_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
wallet_account_impl (transaction, wallet, account);
if (!ec)
{
uint64_t work (0);
auto error_work (wallet->store.work_get (transaction, account, work));
(void)error_work;
response_l.put ("work", nano::to_string_hex (work));
}
}
response_errors ();
}
void nano::json_handler::work_set ()
{
auto rpc_l (shared_from_this ());
node.worker.push_task ([rpc_l]() {
auto wallet (rpc_l->wallet_impl ());
auto account (rpc_l->account_impl ());
auto work (rpc_l->work_optional_impl ());
if (!rpc_l->ec)
{
auto transaction (rpc_l->node.wallets.tx_begin_write ());
rpc_l->wallet_account_impl (transaction, wallet, account);
if (!rpc_l->ec)
{
wallet->store.work_put (transaction, account, work);
rpc_l->response_l.put ("success", "");
}
}
rpc_l->response_errors ();
});
}
void nano::json_handler::work_validate ()
{
auto hash (hash_impl ());
auto work (work_optional_impl ());
auto difficulty (difficulty_optional_impl ());
multiplier_optional_impl (difficulty);
if (!ec)
{
uint64_t result_difficulty (0);
nano::work_validate (hash, work, &result_difficulty);
response_l.put ("valid", (result_difficulty >= difficulty) ? "1" : "0");
response_l.put ("difficulty", nano::to_string_hex (result_difficulty));
auto result_multiplier = nano::difficulty::to_multiplier (result_difficulty, node.network_params.network.publish_threshold);
response_l.put ("multiplier", nano::to_string (result_multiplier));
}
response_errors ();
}
void nano::json_handler::work_peer_add ()
{
std::string address_text = request.get<std::string> ("address");
std::string port_text = request.get<std::string> ("port");
uint16_t port;
if (!nano::parse_port (port_text, port))
{
node.config.work_peers.push_back (std::make_pair (address_text, port));
response_l.put ("success", "");
}
else
{
ec = nano::error_common::invalid_port;
}
response_errors ();
}
void nano::json_handler::work_peers ()
{
boost::property_tree::ptree work_peers_l;
for (auto i (node.config.work_peers.begin ()), n (node.config.work_peers.end ()); i != n; ++i)
{
boost::property_tree::ptree entry;
entry.put ("", boost::str (boost::format ("%1%:%2%") % i->first % i->second));
work_peers_l.push_back (std::make_pair ("", entry));
}
response_l.add_child ("work_peers", work_peers_l);
response_errors ();
}
void nano::json_handler::work_peers_clear ()
{
node.config.work_peers.clear ();
response_l.put ("success", "");
response_errors ();
}
namespace
{
void construct_json (nano::seq_con_info_component * component, boost::property_tree::ptree & parent)
{
// We are a leaf node, print name and exit
if (!component->is_composite ())
{
auto & leaf_info = static_cast<nano::seq_con_info_leaf *> (component)->get_info ();
boost::property_tree::ptree child;
child.put ("count", leaf_info.count);
child.put ("size", leaf_info.count * leaf_info.sizeof_element);
parent.add_child (leaf_info.name, child);
return;
}
auto composite = static_cast<nano::seq_con_info_composite *> (component);
boost::property_tree::ptree current;
for (auto & child : composite->get_children ())
{
construct_json (child.get (), current);
}
parent.add_child (composite->get_name (), current);
}
// Any RPC handlers which require no arguments (excl default arguments) should go here.
// This is to prevent large if/else chains which compilers can have limits for (MSVC for instance has 128).
ipc_json_handler_no_arg_func_map create_ipc_json_handler_no_arg_func_map ()
{
ipc_json_handler_no_arg_func_map no_arg_funcs;
no_arg_funcs.emplace ("account_balance", &nano::json_handler::account_balance);
no_arg_funcs.emplace ("account_block_count", &nano::json_handler::account_block_count);
no_arg_funcs.emplace ("account_count", &nano::json_handler::account_count);
no_arg_funcs.emplace ("account_create", &nano::json_handler::account_create);
no_arg_funcs.emplace ("account_get", &nano::json_handler::account_get);
no_arg_funcs.emplace ("account_history", &nano::json_handler::account_history);
no_arg_funcs.emplace ("account_info", &nano::json_handler::account_info);
no_arg_funcs.emplace ("account_key", &nano::json_handler::account_key);
no_arg_funcs.emplace ("account_list", &nano::json_handler::account_list);
no_arg_funcs.emplace ("account_move", &nano::json_handler::account_move);
no_arg_funcs.emplace ("account_remove", &nano::json_handler::account_remove);
no_arg_funcs.emplace ("account_representative", &nano::json_handler::account_representative);
no_arg_funcs.emplace ("account_representative_set", &nano::json_handler::account_representative_set);
no_arg_funcs.emplace ("account_weight", &nano::json_handler::account_weight);
no_arg_funcs.emplace ("accounts_balances", &nano::json_handler::accounts_balances);
no_arg_funcs.emplace ("accounts_create", &nano::json_handler::accounts_create);
no_arg_funcs.emplace ("accounts_frontiers", &nano::json_handler::accounts_frontiers);
no_arg_funcs.emplace ("accounts_pending", &nano::json_handler::accounts_pending);
no_arg_funcs.emplace ("active_difficulty", &nano::json_handler::active_difficulty);
no_arg_funcs.emplace ("available_supply", &nano::json_handler::available_supply);
no_arg_funcs.emplace ("block_info", &nano::json_handler::block_info);
no_arg_funcs.emplace ("block", &nano::json_handler::block_info);
no_arg_funcs.emplace ("block_confirm", &nano::json_handler::block_confirm);
no_arg_funcs.emplace ("blocks", &nano::json_handler::blocks);
no_arg_funcs.emplace ("blocks_info", &nano::json_handler::blocks_info);
no_arg_funcs.emplace ("block_account", &nano::json_handler::block_account);
no_arg_funcs.emplace ("block_count", &nano::json_handler::block_count);
no_arg_funcs.emplace ("block_count_type", &nano::json_handler::block_count_type);
no_arg_funcs.emplace ("block_create", &nano::json_handler::block_create);
no_arg_funcs.emplace ("block_hash", &nano::json_handler::block_hash);
no_arg_funcs.emplace ("bootstrap", &nano::json_handler::bootstrap);
no_arg_funcs.emplace ("bootstrap_any", &nano::json_handler::bootstrap_any);
no_arg_funcs.emplace ("bootstrap_lazy", &nano::json_handler::bootstrap_lazy);
no_arg_funcs.emplace ("bootstrap_status", &nano::json_handler::bootstrap_status);
no_arg_funcs.emplace ("confirmation_active", &nano::json_handler::confirmation_active);
no_arg_funcs.emplace ("confirmation_height_currently_processing", &nano::json_handler::confirmation_height_currently_processing);
no_arg_funcs.emplace ("confirmation_history", &nano::json_handler::confirmation_history);
no_arg_funcs.emplace ("confirmation_info", &nano::json_handler::confirmation_info);
no_arg_funcs.emplace ("confirmation_quorum", &nano::json_handler::confirmation_quorum);
no_arg_funcs.emplace ("database_txn_tracker", &nano::json_handler::database_txn_tracker);
no_arg_funcs.emplace ("delegators", &nano::json_handler::delegators);
no_arg_funcs.emplace ("delegators_count", &nano::json_handler::delegators_count);
no_arg_funcs.emplace ("deterministic_key", &nano::json_handler::deterministic_key);
no_arg_funcs.emplace ("epoch_upgrade", &nano::json_handler::epoch_upgrade);
no_arg_funcs.emplace ("frontiers", &nano::json_handler::frontiers);
no_arg_funcs.emplace ("frontier_count", &nano::json_handler::account_count);
no_arg_funcs.emplace ("keepalive", &nano::json_handler::keepalive);
no_arg_funcs.emplace ("key_create", &nano::json_handler::key_create);
no_arg_funcs.emplace ("key_expand", &nano::json_handler::key_expand);
no_arg_funcs.emplace ("ledger", &nano::json_handler::ledger);
no_arg_funcs.emplace ("node_id", &nano::json_handler::node_id);
no_arg_funcs.emplace ("node_id_delete", &nano::json_handler::node_id_delete);
no_arg_funcs.emplace ("password_change", &nano::json_handler::password_change);
no_arg_funcs.emplace ("password_enter", &nano::json_handler::password_enter);
no_arg_funcs.emplace ("wallet_unlock", &nano::json_handler::password_enter);
no_arg_funcs.emplace ("payment_begin", &nano::json_handler::payment_begin);
no_arg_funcs.emplace ("payment_init", &nano::json_handler::payment_init);
no_arg_funcs.emplace ("payment_end", &nano::json_handler::payment_end);
no_arg_funcs.emplace ("payment_wait", &nano::json_handler::payment_wait);
no_arg_funcs.emplace ("peers", &nano::json_handler::peers);
no_arg_funcs.emplace ("pending", &nano::json_handler::pending);
no_arg_funcs.emplace ("pending_exists", &nano::json_handler::pending_exists);
no_arg_funcs.emplace ("process", &nano::json_handler::process);
no_arg_funcs.emplace ("receive", &nano::json_handler::receive);
no_arg_funcs.emplace ("receive_minimum", &nano::json_handler::receive_minimum);
no_arg_funcs.emplace ("receive_minimum_set", &nano::json_handler::receive_minimum_set);
no_arg_funcs.emplace ("representatives", &nano::json_handler::representatives);
no_arg_funcs.emplace ("representatives_online", &nano::json_handler::representatives_online);
no_arg_funcs.emplace ("republish", &nano::json_handler::republish);
no_arg_funcs.emplace ("search_pending", &nano::json_handler::search_pending);
no_arg_funcs.emplace ("search_pending_all", &nano::json_handler::search_pending_all);
no_arg_funcs.emplace ("send", &nano::json_handler::send);
no_arg_funcs.emplace ("sign", &nano::json_handler::sign);
no_arg_funcs.emplace ("stats", &nano::json_handler::stats);
no_arg_funcs.emplace ("stats_clear", &nano::json_handler::stats_clear);
no_arg_funcs.emplace ("stop", &nano::json_handler::stop);
no_arg_funcs.emplace ("unchecked", &nano::json_handler::unchecked);
no_arg_funcs.emplace ("unchecked_clear", &nano::json_handler::unchecked_clear);
no_arg_funcs.emplace ("unchecked_get", &nano::json_handler::unchecked_get);
no_arg_funcs.emplace ("unchecked_keys", &nano::json_handler::unchecked_keys);
no_arg_funcs.emplace ("unopened", &nano::json_handler::unopened);
no_arg_funcs.emplace ("uptime", &nano::json_handler::uptime);
no_arg_funcs.emplace ("validate_account_number", &nano::json_handler::validate_account_number);
no_arg_funcs.emplace ("version", &nano::json_handler::version);
no_arg_funcs.emplace ("wallet_add", &nano::json_handler::wallet_add);
no_arg_funcs.emplace ("wallet_add_watch", &nano::json_handler::wallet_add_watch);
no_arg_funcs.emplace ("wallet_balances", &nano::json_handler::wallet_balances);
no_arg_funcs.emplace ("wallet_change_seed", &nano::json_handler::wallet_change_seed);
no_arg_funcs.emplace ("wallet_contains", &nano::json_handler::wallet_contains);
no_arg_funcs.emplace ("wallet_create", &nano::json_handler::wallet_create);
no_arg_funcs.emplace ("wallet_destroy", &nano::json_handler::wallet_destroy);
no_arg_funcs.emplace ("wallet_export", &nano::json_handler::wallet_export);
no_arg_funcs.emplace ("wallet_frontiers", &nano::json_handler::wallet_frontiers);
no_arg_funcs.emplace ("wallet_history", &nano::json_handler::wallet_history);
no_arg_funcs.emplace ("wallet_info", &nano::json_handler::wallet_info);
no_arg_funcs.emplace ("wallet_balance_total", &nano::json_handler::wallet_info);
no_arg_funcs.emplace ("wallet_key_valid", &nano::json_handler::wallet_key_valid);
no_arg_funcs.emplace ("wallet_ledger", &nano::json_handler::wallet_ledger);
no_arg_funcs.emplace ("wallet_lock", &nano::json_handler::wallet_lock);
no_arg_funcs.emplace ("wallet_pending", &nano::json_handler::wallet_pending);
no_arg_funcs.emplace ("wallet_representative", &nano::json_handler::wallet_representative);
no_arg_funcs.emplace ("wallet_representative_set", &nano::json_handler::wallet_representative_set);
no_arg_funcs.emplace ("wallet_republish", &nano::json_handler::wallet_republish);
no_arg_funcs.emplace ("wallet_work_get", &nano::json_handler::wallet_work_get);
no_arg_funcs.emplace ("work_generate", &nano::json_handler::work_generate);
no_arg_funcs.emplace ("work_cancel", &nano::json_handler::work_cancel);
no_arg_funcs.emplace ("work_get", &nano::json_handler::work_get);
no_arg_funcs.emplace ("work_set", &nano::json_handler::work_set);
no_arg_funcs.emplace ("work_validate", &nano::json_handler::work_validate);
no_arg_funcs.emplace ("work_peer_add", &nano::json_handler::work_peer_add);
no_arg_funcs.emplace ("work_peers", &nano::json_handler::work_peers);
no_arg_funcs.emplace ("work_peers_clear", &nano::json_handler::work_peers_clear);
return no_arg_funcs;
}
/** Due to the asynchronous nature of updating confirmation heights, it can also be necessary to check active roots */
bool block_confirmed (nano::node & node, nano::transaction & transaction, nano::block_hash const & hash, bool include_active, bool include_only_confirmed)
{
bool is_confirmed = false;
if (include_active && !include_only_confirmed)
{
is_confirmed = true;
}
// Check whether the confirmation height is set
else if (node.ledger.block_confirmed (transaction, hash))
{
is_confirmed = true;
}
// This just checks it's not currently undergoing an active transaction
else if (!include_only_confirmed)
{
auto block (node.store.block_get (transaction, hash));
is_confirmed = (block != nullptr && !node.active.active (*block));
}
return is_confirmed;
}
const char * epoch_as_string (nano::epoch epoch)
{
switch (epoch)
{
case nano::epoch::epoch_2:
return "2";
case nano::epoch::epoch_1:
return "1";
default:
return "0";
}
}
}
| 1 | 15,961 | I think `bypass_frontier_confirmation` conveys the intention better (default false as well) | nanocurrency-nano-node | cpp |
@@ -5,12 +5,9 @@ import (
"fmt"
"testing"
- "github.com/ipfs/go-cid"
- "github.com/ipfs/go-hamt-ipld"
- "github.com/pkg/errors"
"github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
+ "github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/chain"
"github.com/filecoin-project/go-filecoin/config"
"github.com/filecoin-project/go-filecoin/core" | 1 | package core_test
import (
"context"
"fmt"
"testing"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-hamt-ipld"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-filecoin/chain"
"github.com/filecoin-project/go-filecoin/config"
"github.com/filecoin-project/go-filecoin/core"
th "github.com/filecoin-project/go-filecoin/testhelpers"
tf "github.com/filecoin-project/go-filecoin/testhelpers/testflags"
"github.com/filecoin-project/go-filecoin/types"
)
func TestUpdateMessagePool(t *testing.T) {
tf.UnitTest(t)
ctx := context.Background()
type msgs []*types.SignedMessage
type msgsSet [][]*types.SignedMessage
var mockSigner, _ = types.NewMockSignersAndKeyInfo(10)
t.Run("Replace head", func(t *testing.T) {
// Msg pool: [m0, m1], Chain: b[]
// to
// Msg pool: [m0], Chain: b[m1]
store, chainProvider := newStoreAndProvider(0)
p := core.NewMessagePool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator())
ib := core.NewInbox(p, 10, chainProvider)
m := types.NewSignedMsgs(2, mockSigner)
mustAdd(ib, m[0], m[1])
blk := types.Block{Height: 0}
parent := th.MustNewTipSet(&blk)
oldChain := core.NewChainWithMessages(store, parent, msgsSet{})
oldTipSet := headOf(oldChain)
newChain := core.NewChainWithMessages(store, parent, msgsSet{msgs{m[1]}})
newTipSet := headOf(newChain)
assert.NoError(t, ib.HandleNewHead(ctx, oldTipSet, newTipSet))
assertPoolEquals(t, p, m[0])
})
t.Run("Replace head with self", func(t *testing.T) {
// Msg pool: [m0, m1], Chain: b[m2]
// to
// Msg pool: [m0, m1], Chain: b[m2]
store, chainProvider := newStoreAndProvider(0)
p := core.NewMessagePool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator())
ib := core.NewInbox(p, 10, chainProvider)
m := types.NewSignedMsgs(3, mockSigner)
mustAdd(ib, m[0], m[1])
oldChain := core.NewChainWithMessages(store, types.TipSet{}, msgsSet{msgs{m[2]}})
oldTipSet := headOf(oldChain)
assert.NoError(t, ib.HandleNewHead(ctx, oldTipSet, oldTipSet)) // sic
assertPoolEquals(t, p, m[0], m[1])
})
t.Run("Replace head with a long chain", func(t *testing.T) {
// Msg pool: [m2, m5], Chain: b[m0, m1]
// to
// Msg pool: [m1], Chain: b[m2, m3] -> b[m4] -> b[m0] -> b[] -> b[m5, m6]
store, chainProvider := newStoreAndProvider(0)
p := core.NewMessagePool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator())
ib := core.NewInbox(p, 10, chainProvider)
m := types.NewSignedMsgs(7, mockSigner)
mustAdd(ib, m[2], m[5])
blk := types.Block{Height: 0}
parent := th.MustNewTipSet(&blk)
oldChain := core.NewChainWithMessages(store, parent, msgsSet{msgs{m[0], m[1]}})
oldTipSet := headOf(oldChain)
newChain := core.NewChainWithMessages(store, parent,
msgsSet{msgs{m[2], m[3]}},
msgsSet{msgs{m[4]}},
msgsSet{msgs{m[0]}},
msgsSet{msgs{}},
msgsSet{msgs{m[5], m[6]}},
)
newTipSet := headOf(newChain)
assert.NoError(t, ib.HandleNewHead(ctx, oldTipSet, newTipSet))
assertPoolEquals(t, p, m[1])
})
t.Run("Replace head with multi-block tipset chains", func(t *testing.T) {
// Msg pool: [m2, m5], Chain: {b[m0], b[m1]}
// to
// Msg pool: [m1], Chain: b[m2, m3] -> {b[m4], b[m0], b[], b[]} -> {b[], b[m6,m5]}
store, chainProvider := newStoreAndProvider(0)
p := core.NewMessagePool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator())
ib := core.NewInbox(p, 10, chainProvider)
m := types.NewSignedMsgs(7, mockSigner)
mustAdd(ib, m[2], m[5])
blk := types.Block{Height: 0}
parent := th.MustNewTipSet(&blk)
oldChain := core.NewChainWithMessages(store, parent, msgsSet{msgs{m[0]}, msgs{m[1]}})
oldTipSet := headOf(oldChain)
newChain := core.NewChainWithMessages(store, parent,
msgsSet{msgs{m[2], m[3]}},
msgsSet{msgs{m[4]}, msgs{m[0]}, msgs{}, msgs{}},
msgsSet{msgs{}, msgs{m[5], m[6]}},
)
newTipSet := headOf(newChain)
assert.NoError(t, ib.HandleNewHead(ctx, oldTipSet, newTipSet))
assertPoolEquals(t, p, m[1])
})
t.Run("Replace internal node (second one)", func(t *testing.T) {
// Msg pool: [m3, m5], Chain: b[m0] -> b[m1] -> b[m2]
// to
// Msg pool: [m1, m2], Chain: b[m0] -> b[m3] -> b[m4, m5]
store, chainProvider := newStoreAndProvider(0)
p := core.NewMessagePool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator())
ib := core.NewInbox(p, 10, chainProvider)
m := types.NewSignedMsgs(6, mockSigner)
mustAdd(ib, m[3], m[5])
oldChain := core.NewChainWithMessages(store, types.TipSet{}, msgsSet{msgs{m[0]}}, msgsSet{msgs{m[1]}}, msgsSet{msgs{m[2]}})
oldTipSet := headOf(oldChain)
newChain := core.NewChainWithMessages(store, oldChain[0], msgsSet{msgs{m[3]}}, msgsSet{msgs{m[4], m[5]}})
newTipSet := headOf(newChain)
assert.NoError(t, ib.HandleNewHead(ctx, oldTipSet, newTipSet))
assertPoolEquals(t, p, m[1], m[2])
})
t.Run("Replace internal node (second one) with a long chain", func(t *testing.T) {
// Msg pool: [m6], Chain: b[m0] -> b[m1] -> b[m2]
// to
// Msg pool: [m6], Chain: b[m0] -> b[m3] -> b[m4] -> b[m5] -> b[m1, m2]
store, chainProvider := newStoreAndProvider(0)
p := core.NewMessagePool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator())
ib := core.NewInbox(p, 10, chainProvider)
m := types.NewSignedMsgs(7, mockSigner)
mustAdd(ib, m[6])
oldChain := core.NewChainWithMessages(store, types.TipSet{},
msgsSet{msgs{m[0]}},
msgsSet{msgs{m[1]}},
msgsSet{msgs{m[2]}},
)
oldTipSet := headOf(oldChain)
newChain := core.NewChainWithMessages(store, oldChain[0],
msgsSet{msgs{m[3]}},
msgsSet{msgs{m[4]}},
msgsSet{msgs{m[5]}},
msgsSet{msgs{m[1], m[2]}},
)
newTipSet := headOf(newChain)
assert.NoError(t, ib.HandleNewHead(ctx, oldTipSet, newTipSet))
assertPoolEquals(t, p, m[6])
})
t.Run("Replace internal node with multi-block tipset chains", func(t *testing.T) {
// Msg pool: [m6], Chain: {b[m0], b[m1]} -> b[m2]
// to
// Msg pool: [m6], Chain: {b[m0], b[m1]} -> b[m3] -> b[m4] -> {b[m5], b[m1, m2]}
store, chainProvider := newStoreAndProvider(0)
p := core.NewMessagePool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator())
ib := core.NewInbox(p, 10, chainProvider)
m := types.NewSignedMsgs(7, mockSigner)
mustAdd(ib, m[6])
oldChain := core.NewChainWithMessages(store, types.TipSet{},
msgsSet{msgs{m[0]}, msgs{m[1]}},
msgsSet{msgs{m[2]}},
)
oldTipSet := headOf(oldChain)
newChain := core.NewChainWithMessages(store, oldChain[0],
msgsSet{msgs{m[3]}},
msgsSet{msgs{m[4]}},
msgsSet{msgs{m[5]}, msgs{m[1], m[2]}},
)
newTipSet := headOf(newChain)
assert.NoError(t, ib.HandleNewHead(ctx, oldTipSet, newTipSet))
assertPoolEquals(t, p, m[6])
})
t.Run("Replace with same messages in different block structure", func(t *testing.T) {
// Msg pool: [m3, m5], Chain: b[m0] -> b[m1] -> b[m2]
// to
// Msg pool: [m3, m5], Chain: {b[m0], b[m1], b[m2]}
store, chainProvider := newStoreAndProvider(0)
p := core.NewMessagePool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator())
ib := core.NewInbox(p, 10, chainProvider)
m := types.NewSignedMsgs(6, mockSigner)
mustAdd(ib, m[3], m[5])
blk := types.Block{Height: 0}
parent := th.MustNewTipSet(&blk)
oldChain := core.NewChainWithMessages(store, parent,
msgsSet{msgs{m[0]}},
msgsSet{msgs{m[1]}},
msgsSet{msgs{m[2]}},
)
oldTipSet := headOf(oldChain)
newChain := core.NewChainWithMessages(store, parent,
msgsSet{msgs{m[0]}, msgs{m[1]}, msgs{m[2]}},
)
newTipSet := headOf(newChain)
assert.NoError(t, ib.HandleNewHead(ctx, oldTipSet, newTipSet))
assertPoolEquals(t, p, m[3], m[5])
})
t.Run("Truncate to internal node", func(t *testing.T) {
// Msg pool: [], Chain: b[m0] -> b[m1] -> b[m2] -> b[m3]
// to
// Msg pool: [m2, m3], Chain: b[m0] -> b[m1]
store, chainProvider := newStoreAndProvider(0)
p := core.NewMessagePool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator())
ib := core.NewInbox(p, 10, chainProvider)
m := types.NewSignedMsgs(4, mockSigner)
oldChain := core.NewChainWithMessages(store, types.TipSet{},
msgsSet{msgs{m[0]}},
msgsSet{msgs{m[1]}},
msgsSet{msgs{m[2]}},
msgsSet{msgs{m[3]}},
)
oldTipSet := headOf(oldChain)
oldTipSetPrev := oldChain[1]
assert.NoError(t, ib.HandleNewHead(ctx, oldTipSet, oldTipSetPrev))
assertPoolEquals(t, p, m[2], m[3])
})
t.Run("Extend head", func(t *testing.T) {
// Msg pool: [m0, m1], Chain: b[]
// to
// Msg pool: [m0], Chain: b[] -> b[m1, m2]
store, chainProvider := newStoreAndProvider(0)
p := core.NewMessagePool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator())
ib := core.NewInbox(p, 10, chainProvider)
m := types.NewSignedMsgs(3, mockSigner)
mustAdd(ib, m[0], m[1])
oldChain := core.NewChainWithMessages(store, types.TipSet{}, msgsSet{msgs{}})
oldTipSet := headOf(oldChain)
newChain := core.NewChainWithMessages(store, oldChain[len(oldChain)-1], msgsSet{msgs{m[1], m[2]}})
newTipSet := headOf(newChain)
assert.NoError(t, ib.HandleNewHead(ctx, oldTipSet, newTipSet))
assertPoolEquals(t, p, m[0])
})
t.Run("Extend head with a longer chain and more messages", func(t *testing.T) {
// Msg pool: [m2, m5], Chain: b[m0] -> b[m1]
// to
// Msg pool: [], Chain: b[m0] -> b[m1] -> b[m2, m3] -> b[m4] -> b[m5, m6]
store, chainProvider := newStoreAndProvider(0)
p := core.NewMessagePool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator())
ib := core.NewInbox(p, 10, chainProvider)
m := types.NewSignedMsgs(7, mockSigner)
mustAdd(ib, m[2], m[5])
oldChain := core.NewChainWithMessages(store, types.TipSet{}, msgsSet{msgs{m[0]}}, msgsSet{msgs{m[1]}})
oldTipSet := headOf(oldChain)
newChain := core.NewChainWithMessages(store, oldChain[1],
msgsSet{msgs{m[2], m[3]}},
msgsSet{msgs{m[4]}},
msgsSet{msgs{m[5], m[6]}},
)
newTipSet := headOf(newChain)
assert.NoError(t, ib.HandleNewHead(ctx, oldTipSet, newTipSet))
assertPoolEquals(t, p)
})
t.Run("Times out old messages", func(t *testing.T) {
var err error
store, chainProvider := newStoreAndProvider(0)
p := core.NewMessagePool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator())
maxAge := uint(10)
ib := core.NewInbox(p, maxAge, chainProvider)
m := types.NewSignedMsgs(maxAge, mockSigner)
head := headOf(core.NewChainWithMessages(store, types.TipSet{}, msgsSet{msgs{}}))
// Add a message at each block height until maxAge is reached
for i := uint(0); i < maxAge; i++ {
// api.Height determines block time at which message is added
chainProvider.height, err = head.Height()
require.NoError(t, err)
mustAdd(ib, m[i])
// update pool with tipset that has no messages
next := headOf(core.NewChainWithMessages(store, head, msgsSet{msgs{}}))
assert.NoError(t, ib.HandleNewHead(ctx, head, next))
// assert all added messages still in pool
assertPoolEquals(t, p, m[:i+1]...)
head = next
}
// next tipset times out first message only
next := headOf(core.NewChainWithMessages(store, head, msgsSet{msgs{}}))
assert.NoError(t, ib.HandleNewHead(ctx, head, next))
assertPoolEquals(t, p, m[1:]...)
// adding a chain of multiple tipsets times out based on final state
for i := 0; i < 4; i++ {
next = headOf(core.NewChainWithMessages(store, next, msgsSet{msgs{}}))
}
assert.NoError(t, ib.HandleNewHead(ctx, head, next))
assertPoolEquals(t, p, m[5:]...)
})
t.Run("Message timeout is unaffected by null tipsets", func(t *testing.T) {
var err error
store, chainProvider := newStoreAndProvider(0)
p := core.NewMessagePool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator())
maxAge := uint(10)
ib := core.NewInbox(p, maxAge, chainProvider)
m := types.NewSignedMsgs(maxAge, mockSigner)
head := headOf(core.NewChainWithMessages(store, types.TipSet{}, msgsSet{msgs{}}))
// Add a message at each block height until maxAge is reached
for i := uint(0); i < maxAge; i++ {
// blockTimer.Height determines block time at which message is added
chainProvider.height, err = head.Height()
require.NoError(t, err)
mustAdd(ib, m[i])
// update pool with tipset that has no messages
height, err := head.Height()
require.NoError(t, err)
// create a tipset at given height with one block containing no messages
nextHeight := types.Uint64(height + 5) // simulate 4 null blocks
blk := &types.Block{
Height: nextHeight,
Parents: head.Key(),
}
core.MustPut(store, blk)
next := th.MustNewTipSet(blk)
assert.NoError(t, ib.HandleNewHead(ctx, head, next))
// assert all added messages still in pool
assertPoolEquals(t, p, m[:i+1]...)
head = next
}
// next tipset times out first message only
next := headOf(core.NewChainWithMessages(store, head, msgsSet{msgs{}}))
assert.NoError(t, ib.HandleNewHead(ctx, head, next))
assertPoolEquals(t, p, m[1:]...)
})
}
func newStoreAndProvider(height uint64) (*hamt.CborIpldStore, *fakeChainProvider) {
store := hamt.NewCborStore()
return store, &fakeChainProvider{height, store}
}
type fakeChainProvider struct {
height uint64
store *hamt.CborIpldStore
}
func (p *fakeChainProvider) GetBlock(ctx context.Context, cid cid.Cid) (*types.Block, error) {
var blk types.Block
if err := p.store.Get(ctx, cid, &blk); err != nil {
return nil, errors.Wrapf(err, "failed to get block %s", cid)
}
return &blk, nil
}
func (p *fakeChainProvider) GetTipSet(tsKey types.TipSetKey) (types.TipSet, error) {
ctx := context.TODO() // Should GetTipSet require a context everywhere?
return chain.LoadTipSetBlocks(ctx, p, tsKey)
}
func (p *fakeChainProvider) BlockHeight() (uint64, error) {
return p.height, nil
}
func mustAdd(ib *core.Inbox, msgs ...*types.SignedMessage) {
ctx := context.Background()
for _, m := range msgs {
if _, err := ib.Add(ctx, m); err != nil {
panic(err)
}
}
}
func msgAsString(msg *types.SignedMessage) string {
// When using NewMessageForTestGetter msg.Method is set
// to "msgN" so we print that (it will correspond
// to a variable of the same name in the tests
// below).
return msg.Message.Method
}
func msgsAsString(msgs []*types.SignedMessage) string {
s := ""
for _, m := range msgs {
s = fmt.Sprintf("%s%s ", s, msgAsString(m))
}
return "[" + s + "]"
}
// assertPoolEquals returns true if p contains exactly the expected messages.
func assertPoolEquals(t *testing.T, p *core.MessagePool, expMsgs ...*types.SignedMessage) {
msgs := p.Pending()
if len(msgs) != len(expMsgs) {
assert.Failf(t, "wrong messages in pool", "expMsgs %v, got msgs %v", msgsAsString(expMsgs), msgsAsString(msgs))
}
for _, m1 := range expMsgs {
found := false
for _, m2 := range msgs {
if types.SmsgCidsEqual(m1, m2) {
found = true
break
}
}
if !found {
assert.Failf(t, "wrong messages in pool", "expMsgs %v, got msgs %v (msgs doesn't contain %v)", msgsAsString(expMsgs), msgsAsString(msgs), msgAsString(m1))
}
}
}
func headOf(chain []types.TipSet) types.TipSet {
return chain[len(chain)-1]
}
| 1 | 20,257 | This looks like it goes beyond the scope of removing `BlockHeight()`, in the future please do this in a separate commit at a minimum -- separate PR is fine too. | filecoin-project-venus | go |
@@ -666,9 +666,10 @@ class AbstractAudio(QObject):
muted_changed = pyqtSignal(bool)
recently_audible_changed = pyqtSignal(bool)
- def __init__(self, parent=None):
+ def __init__(self, tab, parent=None):
super().__init__(parent)
self._widget = None
+ self._tab = tab
def set_muted(self, muted: bool):
"""Set this tab as muted or not.""" | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Base class for a wrapper over QWebView/QWebEngineView."""
import enum
import itertools
import attr
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QUrl, QObject, QSizeF, Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QWidget, QApplication, QDialog
from PyQt5.QtPrintSupport import QPrintDialog
import pygments
import pygments.lexers
import pygments.formatters
from qutebrowser.keyinput import modeman
from qutebrowser.config import config
from qutebrowser.utils import (utils, objreg, usertypes, log, qtutils,
urlutils, message)
from qutebrowser.misc import miscwidgets, objects
from qutebrowser.browser import mouse, hints
from qutebrowser.qt import sip
tab_id_gen = itertools.count(0)
def create(win_id, private, parent=None):
"""Get a QtWebKit/QtWebEngine tab object.
Args:
win_id: The window ID where the tab will be shown.
private: Whether the tab is a private/off the record tab.
parent: The Qt parent to set.
"""
# Importing modules here so we don't depend on QtWebEngine without the
# argument and to avoid circular imports.
mode_manager = modeman.instance(win_id)
if objects.backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webenginetab
tab_class = webenginetab.WebEngineTab
else:
from qutebrowser.browser.webkit import webkittab
tab_class = webkittab.WebKitTab
return tab_class(win_id=win_id, mode_manager=mode_manager, private=private,
parent=parent)
def init():
"""Initialize backend-specific modules."""
if objects.backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webenginetab
webenginetab.init()
class WebTabError(Exception):
"""Base class for various errors."""
class UnsupportedOperationError(WebTabError):
"""Raised when an operation is not supported with the given backend."""
TerminationStatus = enum.Enum('TerminationStatus', [
'normal',
'abnormal', # non-zero exit status
'crashed', # e.g. segfault
'killed',
'unknown',
])
@attr.s
class TabData:
"""A simple namespace with a fixed set of attributes.
Attributes:
keep_icon: Whether the (e.g. cloned) icon should not be cleared on page
load.
inspector: The QWebInspector used for this webview.
viewing_source: Set if we're currently showing a source view.
Only used when sources are shown via pygments.
open_target: Where to open the next link.
Only used for QtWebKit.
override_target: Override for open_target for fake clicks (like hints).
Only used for QtWebKit.
pinned: Flag to pin the tab.
fullscreen: Whether the tab has a video shown fullscreen currently.
netrc_used: Whether netrc authentication was performed.
input_mode: current input mode for the tab.
"""
keep_icon = attr.ib(False)
viewing_source = attr.ib(False)
inspector = attr.ib(None)
open_target = attr.ib(usertypes.ClickTarget.normal)
override_target = attr.ib(None)
pinned = attr.ib(False)
fullscreen = attr.ib(False)
netrc_used = attr.ib(False)
input_mode = attr.ib(usertypes.KeyMode.normal)
def should_show_icon(self):
return (config.val.tabs.favicons.show == 'always' or
config.val.tabs.favicons.show == 'pinned' and self.pinned)
class AbstractAction:
"""Attribute of AbstractTab for Qt WebActions.
Class attributes (overridden by subclasses):
action_class: The class actions are defined on (QWeb{Engine,}Page)
action_base: The type of the actions (QWeb{Engine,}Page.WebAction)
"""
action_class = None
action_base = None
def __init__(self, tab):
self._widget = None
self._tab = tab
def exit_fullscreen(self):
"""Exit the fullscreen mode."""
raise NotImplementedError
def save_page(self):
"""Save the current page."""
raise NotImplementedError
def run_string(self, name):
"""Run a webaction based on its name."""
member = getattr(self.action_class, name, None)
if not isinstance(member, self.action_base):
raise WebTabError("{} is not a valid web action!".format(name))
self._widget.triggerPageAction(member)
def show_source(self,
pygments=False): # pylint: disable=redefined-outer-name
"""Show the source of the current page in a new tab."""
raise NotImplementedError
def _show_source_pygments(self):
def show_source_cb(source):
"""Show source as soon as it's ready."""
# WORKAROUND for https://github.com/PyCQA/pylint/issues/491
# pylint: disable=no-member
lexer = pygments.lexers.HtmlLexer()
formatter = pygments.formatters.HtmlFormatter(
full=True, linenos='table')
# pylint: enable=no-member
highlighted = pygments.highlight(source, lexer, formatter)
tb = objreg.get('tabbed-browser', scope='window',
window=self._tab.win_id)
new_tab = tb.tabopen(background=False, related=True)
new_tab.set_html(highlighted, self._tab.url())
new_tab.data.viewing_source = True
self._tab.dump_async(show_source_cb)
class AbstractPrinting:
"""Attribute of AbstractTab for printing the page."""
def __init__(self, tab):
self._widget = None
self._tab = tab
def check_pdf_support(self):
raise NotImplementedError
def check_printer_support(self):
raise NotImplementedError
def check_preview_support(self):
raise NotImplementedError
def to_pdf(self, filename):
raise NotImplementedError
def to_printer(self, printer, callback=None):
"""Print the tab.
Args:
printer: The QPrinter to print to.
callback: Called with a boolean
(True if printing succeeded, False otherwise)
"""
raise NotImplementedError
def show_dialog(self):
"""Print with a QPrintDialog."""
self.check_printer_support()
def print_callback(ok):
"""Called when printing finished."""
if not ok:
message.error("Printing failed!")
diag.deleteLater()
def do_print():
"""Called when the dialog was closed."""
self.to_printer(diag.printer(), print_callback)
diag = QPrintDialog(self._tab)
if utils.is_mac:
# For some reason we get a segfault when using open() on macOS
ret = diag.exec_()
if ret == QDialog.Accepted:
do_print()
else:
diag.open(do_print)
class AbstractSearch(QObject):
"""Attribute of AbstractTab for doing searches.
Attributes:
text: The last thing this view was searched for.
search_displayed: Whether we're currently displaying search results in
this view.
_flags: The flags of the last search (needs to be set by subclasses).
_widget: The underlying WebView widget.
"""
def __init__(self, parent=None):
super().__init__(parent)
self._widget = None
self.text = None
self.search_displayed = False
def _is_case_sensitive(self, ignore_case):
"""Check if case-sensitivity should be used.
This assumes self.text is already set properly.
Arguments:
ignore_case: The ignore_case value from the config.
"""
mapping = {
'smart': not self.text.islower(),
'never': True,
'always': False,
}
return mapping[ignore_case]
def search(self, text, *, ignore_case='never', reverse=False,
result_cb=None):
"""Find the given text on the page.
Args:
text: The text to search for.
ignore_case: Search case-insensitively. ('always'/'never/'smart')
reverse: Reverse search direction.
result_cb: Called with a bool indicating whether a match was found.
"""
raise NotImplementedError
def clear(self):
"""Clear the current search."""
raise NotImplementedError
def prev_result(self, *, result_cb=None):
"""Go to the previous result of the current search.
Args:
result_cb: Called with a bool indicating whether a match was found.
"""
raise NotImplementedError
def next_result(self, *, result_cb=None):
"""Go to the next result of the current search.
Args:
result_cb: Called with a bool indicating whether a match was found.
"""
raise NotImplementedError
class AbstractZoom(QObject):
"""Attribute of AbstractTab for controlling zoom.
Attributes:
_neighborlist: A NeighborList with the zoom levels.
_default_zoom_changed: Whether the zoom was changed from the default.
"""
def __init__(self, tab, parent=None):
super().__init__(parent)
self._tab = tab
self._widget = None
self._default_zoom_changed = False
self._init_neighborlist()
config.instance.changed.connect(self._on_config_changed)
self._zoom_factor = float(config.val.zoom.default) / 100
# # FIXME:qtwebengine is this needed?
# # For some reason, this signal doesn't get disconnected automatically
# # when the WebView is destroyed on older PyQt versions.
# # See https://github.com/qutebrowser/qutebrowser/issues/390
# self.destroyed.connect(functools.partial(
# cfg.changed.disconnect, self.init_neighborlist))
@pyqtSlot(str)
def _on_config_changed(self, option):
if option in ['zoom.levels', 'zoom.default']:
if not self._default_zoom_changed:
factor = float(config.val.zoom.default) / 100
self.set_factor(factor)
self._init_neighborlist()
def _init_neighborlist(self):
"""Initialize self._neighborlist."""
levels = config.val.zoom.levels
self._neighborlist = usertypes.NeighborList(
levels, mode=usertypes.NeighborList.Modes.edge)
self._neighborlist.fuzzyval = config.val.zoom.default
def offset(self, offset):
"""Increase/Decrease the zoom level by the given offset.
Args:
offset: The offset in the zoom level list.
Return:
The new zoom percentage.
"""
level = self._neighborlist.getitem(offset)
self.set_factor(float(level) / 100, fuzzyval=False)
return level
def _set_factor_internal(self, factor):
raise NotImplementedError
def set_factor(self, factor, *, fuzzyval=True):
"""Zoom to a given zoom factor.
Args:
factor: The zoom factor as float.
fuzzyval: Whether to set the NeighborLists fuzzyval.
"""
if fuzzyval:
self._neighborlist.fuzzyval = int(factor * 100)
if factor < 0:
raise ValueError("Can't zoom to factor {}!".format(factor))
default_zoom_factor = float(config.val.zoom.default) / 100
self._default_zoom_changed = (factor != default_zoom_factor)
self._zoom_factor = factor
self._set_factor_internal(factor)
def factor(self):
return self._zoom_factor
def set_default(self):
self._set_factor_internal(float(config.val.zoom.default) / 100)
def set_current(self):
self._set_factor_internal(self._zoom_factor)
class AbstractCaret(QObject):
"""Attribute of AbstractTab for caret browsing.
Signals:
selection_toggled: Emitted when the selection was toggled.
arg: Whether the selection is now active.
"""
selection_toggled = pyqtSignal(bool)
def __init__(self, tab, mode_manager, parent=None):
super().__init__(parent)
self._tab = tab
self._widget = None
self.selection_enabled = False
mode_manager.entered.connect(self._on_mode_entered)
mode_manager.left.connect(self._on_mode_left)
def _on_mode_entered(self, mode):
raise NotImplementedError
def _on_mode_left(self, mode):
raise NotImplementedError
def move_to_next_line(self, count=1):
raise NotImplementedError
def move_to_prev_line(self, count=1):
raise NotImplementedError
def move_to_next_char(self, count=1):
raise NotImplementedError
def move_to_prev_char(self, count=1):
raise NotImplementedError
def move_to_end_of_word(self, count=1):
raise NotImplementedError
def move_to_next_word(self, count=1):
raise NotImplementedError
def move_to_prev_word(self, count=1):
raise NotImplementedError
def move_to_start_of_line(self):
raise NotImplementedError
def move_to_end_of_line(self):
raise NotImplementedError
def move_to_start_of_next_block(self, count=1):
raise NotImplementedError
def move_to_start_of_prev_block(self, count=1):
raise NotImplementedError
def move_to_end_of_next_block(self, count=1):
raise NotImplementedError
def move_to_end_of_prev_block(self, count=1):
raise NotImplementedError
def move_to_start_of_document(self):
raise NotImplementedError
def move_to_end_of_document(self):
raise NotImplementedError
def toggle_selection(self):
raise NotImplementedError
def drop_selection(self):
raise NotImplementedError
def selection(self, callback):
raise NotImplementedError
def _follow_enter(self, tab):
"""Follow a link by faking an enter press."""
if tab:
self._tab.key_press(Qt.Key_Enter, modifier=Qt.ControlModifier)
else:
self._tab.key_press(Qt.Key_Enter)
def follow_selected(self, *, tab=False):
raise NotImplementedError
class AbstractScroller(QObject):
"""Attribute of AbstractTab to manage scroll position."""
perc_changed = pyqtSignal(int, int)
def __init__(self, tab, parent=None):
super().__init__(parent)
self._tab = tab
self._widget = None
self.perc_changed.connect(self._log_scroll_pos_change)
@pyqtSlot()
def _log_scroll_pos_change(self):
log.webview.vdebug("Scroll position changed to {}".format(
self.pos_px()))
def _init_widget(self, widget):
self._widget = widget
def pos_px(self):
raise NotImplementedError
def pos_perc(self):
raise NotImplementedError
def to_perc(self, x=None, y=None):
raise NotImplementedError
def to_point(self, point):
raise NotImplementedError
def to_anchor(self, name):
raise NotImplementedError
def delta(self, x=0, y=0):
raise NotImplementedError
def delta_page(self, x=0, y=0):
raise NotImplementedError
def up(self, count=1):
raise NotImplementedError
def down(self, count=1):
raise NotImplementedError
def left(self, count=1):
raise NotImplementedError
def right(self, count=1):
raise NotImplementedError
def top(self):
raise NotImplementedError
def bottom(self):
raise NotImplementedError
def page_up(self, count=1):
raise NotImplementedError
def page_down(self, count=1):
raise NotImplementedError
def at_top(self):
raise NotImplementedError
def at_bottom(self):
raise NotImplementedError
class AbstractHistory:
"""The history attribute of a AbstractTab."""
def __init__(self, tab):
self._tab = tab
self._history = None
def __len__(self):
return len(self._history)
def __iter__(self):
return iter(self._history.items())
def current_idx(self):
raise NotImplementedError
def back(self, count=1):
"""Go back in the tab's history."""
idx = self.current_idx() - count
if idx >= 0:
self._go_to_item(self._item_at(idx))
else:
self._go_to_item(self._item_at(0))
raise WebTabError("At beginning of history.")
def forward(self, count=1):
"""Go forward in the tab's history."""
idx = self.current_idx() + count
if idx < len(self):
self._go_to_item(self._item_at(idx))
else:
self._go_to_item(self._item_at(len(self) - 1))
raise WebTabError("At end of history.")
def can_go_back(self):
raise NotImplementedError
def can_go_forward(self):
raise NotImplementedError
def _item_at(self, i):
raise NotImplementedError
def _go_to_item(self, item):
raise NotImplementedError
def serialize(self):
"""Serialize into an opaque format understood by self.deserialize."""
raise NotImplementedError
def deserialize(self, data):
"""Serialize from a format produced by self.serialize."""
raise NotImplementedError
def load_items(self, items):
"""Deserialize from a list of WebHistoryItems."""
raise NotImplementedError
class AbstractElements:
"""Finding and handling of elements on the page."""
def __init__(self, tab):
self._widget = None
self._tab = tab
def find_css(self, selector, callback, *, only_visible=False):
"""Find all HTML elements matching a given selector async.
Args:
callback: The callback to be called when the search finished.
selector: The CSS selector to search for.
only_visible: Only show elements which are visible on screen.
"""
raise NotImplementedError
def find_id(self, elem_id, callback):
"""Find the HTML element with the given ID async.
Args:
callback: The callback to be called when the search finished.
elem_id: The ID to search for.
"""
raise NotImplementedError
def find_focused(self, callback):
"""Find the focused element on the page async.
Args:
callback: The callback to be called when the search finished.
Called with a WebEngineElement or None.
"""
raise NotImplementedError
def find_at_pos(self, pos, callback):
"""Find the element at the given position async.
This is also called "hit test" elsewhere.
Args:
pos: The QPoint to get the element for.
callback: The callback to be called when the search finished.
Called with a WebEngineElement or None.
"""
raise NotImplementedError
class AbstractAudio(QObject):
"""Handling of audio/muting for this tab."""
muted_changed = pyqtSignal(bool)
recently_audible_changed = pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent)
self._widget = None
def set_muted(self, muted: bool):
"""Set this tab as muted or not."""
raise NotImplementedError
def is_muted(self):
"""Whether this tab is muted."""
raise NotImplementedError
def toggle_muted(self):
self.set_muted(not self.is_muted())
def is_recently_audible(self):
"""Whether this tab has had audio playing recently."""
raise NotImplementedError
class AbstractTab(QWidget):
"""A wrapper over the given widget to hide its API and expose another one.
We use this to unify QWebView and QWebEngineView.
Attributes:
history: The AbstractHistory for the current tab.
registry: The ObjectRegistry associated with this tab.
private: Whether private browsing is turned on for this tab.
_load_status: loading status of this page
Accessible via load_status() method.
_has_ssl_errors: Whether SSL errors happened.
Needs to be set by subclasses.
for properties, see WebView/WebEngineView docs.
Signals:
See related Qt signals.
new_tab_requested: Emitted when a new tab should be opened with the
given URL.
load_status_changed: The loading status changed
fullscreen_requested: Fullscreen display was requested by the page.
arg: True if fullscreen should be turned on,
False if it should be turned off.
renderer_process_terminated: Emitted when the underlying renderer
process terminated.
arg 0: A TerminationStatus member.
arg 1: The exit code.
predicted_navigation: Emitted before we tell Qt to open a URL.
"""
window_close_requested = pyqtSignal()
link_hovered = pyqtSignal(str)
load_started = pyqtSignal()
load_progress = pyqtSignal(int)
load_finished = pyqtSignal(bool)
icon_changed = pyqtSignal(QIcon)
title_changed = pyqtSignal(str)
load_status_changed = pyqtSignal(str)
new_tab_requested = pyqtSignal(QUrl)
url_changed = pyqtSignal(QUrl)
shutting_down = pyqtSignal()
contents_size_changed = pyqtSignal(QSizeF)
add_history_item = pyqtSignal(QUrl, QUrl, str) # url, requested url, title
fullscreen_requested = pyqtSignal(bool)
renderer_process_terminated = pyqtSignal(TerminationStatus, int)
predicted_navigation = pyqtSignal(QUrl)
def __init__(self, *, win_id, mode_manager, private, parent=None):
self.private = private
self.win_id = win_id
self.tab_id = next(tab_id_gen)
super().__init__(parent)
self.registry = objreg.ObjectRegistry()
tab_registry = objreg.get('tab-registry', scope='window',
window=win_id)
tab_registry[self.tab_id] = self
objreg.register('tab', self, registry=self.registry)
self.data = TabData()
self._layout = miscwidgets.WrapperLayout(self)
self._widget = None
self._progress = 0
self._has_ssl_errors = False
self._mode_manager = mode_manager
self._load_status = usertypes.LoadStatus.none
self._mouse_event_filter = mouse.MouseEventFilter(
self, parent=self)
self.backend = None
# FIXME:qtwebengine Should this be public api via self.hints?
# Also, should we get it out of objreg?
hintmanager = hints.HintManager(win_id, self.tab_id, parent=self)
objreg.register('hintmanager', hintmanager, scope='tab',
window=self.win_id, tab=self.tab_id)
self.predicted_navigation.connect(self._on_predicted_navigation)
def _set_widget(self, widget):
# pylint: disable=protected-access
self._widget = widget
self._layout.wrap(self, widget)
self.history._history = widget.history()
self.scroller._init_widget(widget)
self.caret._widget = widget
self.zoom._widget = widget
self.search._widget = widget
self.printing._widget = widget
self.action._widget = widget
self.elements._widget = widget
self.audio._widget = widget
self.settings._settings = widget.settings()
self._install_event_filter()
self.zoom.set_default()
def _install_event_filter(self):
raise NotImplementedError
def _set_load_status(self, val):
"""Setter for load_status."""
if not isinstance(val, usertypes.LoadStatus):
raise TypeError("Type {} is no LoadStatus member!".format(val))
log.webview.debug("load status for {}: {}".format(repr(self), val))
self._load_status = val
self.load_status_changed.emit(val.name)
def event_target(self):
"""Return the widget events should be sent to."""
raise NotImplementedError
def send_event(self, evt):
"""Send the given event to the underlying widget.
The event will be sent via QApplication.postEvent.
Note that a posted event may not be re-used in any way!
"""
# This only gives us some mild protection against re-using events, but
# it's certainly better than a segfault.
if getattr(evt, 'posted', False):
raise utils.Unreachable("Can't re-use an event which was already "
"posted!")
recipient = self.event_target()
if recipient is None:
# https://github.com/qutebrowser/qutebrowser/issues/3888
log.webview.warning("Unable to find event target!")
return
evt.posted = True
QApplication.postEvent(recipient, evt)
@pyqtSlot(QUrl)
def _on_predicted_navigation(self, url):
"""Adjust the title if we are going to visit an URL soon."""
qtutils.ensure_valid(url)
url_string = url.toDisplayString()
log.webview.debug("Predicted navigation: {}".format(url_string))
self.title_changed.emit(url_string)
@pyqtSlot(QUrl)
def _on_url_changed(self, url):
"""Update title when URL has changed and no title is available."""
if url.isValid() and not self.title():
self.title_changed.emit(url.toDisplayString())
self.url_changed.emit(url)
@pyqtSlot()
def _on_load_started(self):
self._progress = 0
self._has_ssl_errors = False
self.data.viewing_source = False
self._set_load_status(usertypes.LoadStatus.loading)
self.load_started.emit()
@pyqtSlot(usertypes.NavigationRequest)
def _on_navigation_request(self, navigation):
"""Handle common acceptNavigationRequest code."""
url = utils.elide(navigation.url.toDisplayString(), 100)
log.webview.debug("navigation request: url {}, type {}, is_main_frame "
"{}".format(url,
navigation.navigation_type,
navigation.is_main_frame))
if not navigation.url.isValid():
# Also a WORKAROUND for missing IDNA 2008 support in QUrl, see
# https://bugreports.qt.io/browse/QTBUG-60364
if navigation.navigation_type == navigation.Type.link_clicked:
msg = urlutils.get_errstring(navigation.url,
"Invalid link clicked")
message.error(msg)
self.data.open_target = usertypes.ClickTarget.normal
log.webview.debug("Ignoring invalid URL {} in "
"acceptNavigationRequest: {}".format(
navigation.url.toDisplayString(),
navigation.url.errorString()))
navigation.accepted = False
def handle_auto_insert_mode(self, ok):
"""Handle `input.insert_mode.auto_load` after loading finished."""
if not config.val.input.insert_mode.auto_load or not ok:
return
cur_mode = self._mode_manager.mode
if cur_mode == usertypes.KeyMode.insert:
return
def _auto_insert_mode_cb(elem):
"""Called from JS after finding the focused element."""
if elem is None:
log.webview.debug("No focused element!")
return
if elem.is_editable():
modeman.enter(self.win_id, usertypes.KeyMode.insert,
'load finished', only_if_normal=True)
self.elements.find_focused(_auto_insert_mode_cb)
@pyqtSlot(bool)
def _on_load_finished(self, ok):
if sip.isdeleted(self._widget):
# https://github.com/qutebrowser/qutebrowser/issues/3498
return
sess_manager = objreg.get('session-manager')
sess_manager.save_autosave()
if ok and not self._has_ssl_errors:
if self.url().scheme() == 'https':
self._set_load_status(usertypes.LoadStatus.success_https)
else:
self._set_load_status(usertypes.LoadStatus.success)
elif ok:
self._set_load_status(usertypes.LoadStatus.warn)
else:
self._set_load_status(usertypes.LoadStatus.error)
self.load_finished.emit(ok)
if not self.title():
self.title_changed.emit(self.url().toDisplayString())
self.zoom.set_current()
@pyqtSlot()
def _on_history_trigger(self):
"""Emit add_history_item when triggered by backend-specific signal."""
raise NotImplementedError
@pyqtSlot(int)
def _on_load_progress(self, perc):
self._progress = perc
self.load_progress.emit(perc)
def url(self, requested=False):
raise NotImplementedError
def progress(self):
return self._progress
def load_status(self):
return self._load_status
def _openurl_prepare(self, url, *, predict=True):
qtutils.ensure_valid(url)
if predict:
self.predicted_navigation.emit(url)
def openurl(self, url, *, predict=True):
raise NotImplementedError
def reload(self, *, force=False):
raise NotImplementedError
def stop(self):
raise NotImplementedError
def clear_ssl_errors(self):
raise NotImplementedError
def key_press(self, key, modifier=Qt.NoModifier):
"""Send a fake key event to this tab."""
raise NotImplementedError
def dump_async(self, callback, *, plain=False):
"""Dump the current page's html asynchronously.
The given callback will be called with the result when dumping is
complete.
"""
raise NotImplementedError
def run_js_async(self, code, callback=None, *, world=None):
"""Run javascript async.
The given callback will be called with the result when running JS is
complete.
Args:
code: The javascript code to run.
callback: The callback to call with the result, or None.
world: A world ID (int or usertypes.JsWorld member) to run the JS
in the main world or in another isolated world.
"""
raise NotImplementedError
def shutdown(self):
raise NotImplementedError
def title(self):
raise NotImplementedError
def icon(self):
raise NotImplementedError
def set_html(self, html, base_url=QUrl()):
raise NotImplementedError
def networkaccessmanager(self):
"""Get the QNetworkAccessManager for this tab.
This is only implemented for QtWebKit.
For QtWebEngine, always returns None.
"""
raise NotImplementedError
def user_agent(self):
"""Get the user agent for this tab.
This is only implemented for QtWebKit.
For QtWebEngine, always returns None.
"""
raise NotImplementedError
def __repr__(self):
try:
url = utils.elide(self.url().toDisplayString(QUrl.EncodeUnicode),
100)
except (AttributeError, RuntimeError) as exc:
url = '<{}>'.format(exc.__class__.__name__)
return utils.get_repr(self, tab_id=self.tab_id, url=url)
def is_deleted(self):
return sip.isdeleted(self._widget)
| 1 | 22,155 | You'll also need to adjust `FakeWebTabAudio` in `tests/helpers/stubs.py`. | qutebrowser-qutebrowser | py |
@@ -1,19 +1,3 @@
-/*
-Copyright 2019 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
package helpers
import ( | 1 | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helpers
import (
"fmt"
"hash/fnv"
"k8s.io/apimachinery/pkg/util/validation"
)
// GetName returns a name given a base ("deployment-5") and a suffix ("deploy")
// It will first attempt to join them with a dash. If the resulting name is longer
// than maxLength: if the suffix is too long, it will truncate the base name and add
// an 8-character hash of the [base]-[suffix] string. If the suffix is not too long,
// it will truncate the base, add the hash of the base and return [base]-[hash]-[suffix]
func GetName(base, suffix string, maxLength int) string {
if maxLength <= 0 {
return ""
}
name := fmt.Sprintf("%s-%s", base, suffix)
if len(name) <= maxLength {
return name
}
baseLength := maxLength - 10 /*length of -hash-*/ - len(suffix)
// if the suffix is too long, ignore it
if baseLength < 0 {
prefix := base[0:min(len(base), max(0, maxLength-9))]
// Calculate hash on initial base-suffix string
shortName := fmt.Sprintf("%s-%s", prefix, hash(name))
return shortName[:min(maxLength, len(shortName))]
}
prefix := base[0:baseLength]
// Calculate hash on initial base-suffix string
return fmt.Sprintf("%s-%s-%s", prefix, hash(base), suffix)
}
// GetResourceName returns a generated name with the default max length
// for most kubernetes resources. This should only be used for resources that
// have default name validation.
func GetResourceName(base, suffix string) string {
return GetName(base, suffix, validation.DNS1123LabelMaxLength)
}
// max returns the greater of its 2 inputs
func max(a, b int) int {
if b > a {
return b
}
return a
}
// min returns the lesser of its 2 inputs
func min(a, b int) int {
if b < a {
return b
}
return a
}
// hash calculates the hexadecimal representation (8-chars)
// of the hash of the passed in string using the FNV-a algorithm
func hash(s string) string {
hash := fnv.New32a()
hash.Write([]byte(s))
intHash := hash.Sum32()
result := fmt.Sprintf("%08x", intHash)
return result
}
| 1 | 6,589 | @csrwng what should we do here? This one is from origin. | openshift-hive | go |
@@ -17,6 +17,7 @@
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/threading.h>
+#include <LightGBM/utils/LocaleContext.h>
#include <string>
#include <cstdio> | 1 | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#include <LightGBM/c_api.h>
#include <LightGBM/boosting.h>
#include <LightGBM/config.h>
#include <LightGBM/dataset.h>
#include <LightGBM/dataset_loader.h>
#include <LightGBM/metric.h>
#include <LightGBM/network.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/threading.h>
#include <string>
#include <cstdio>
#include <functional>
#include <memory>
#include <mutex>
#include <stdexcept>
#include <vector>
#include "application/predictor.hpp"
namespace LightGBM {
inline int LGBM_APIHandleException(const std::exception& ex) {
LGBM_SetLastError(ex.what());
return -1;
}
inline int LGBM_APIHandleException(const std::string& ex) {
LGBM_SetLastError(ex.c_str());
return -1;
}
#define API_BEGIN() try {
#define API_END() } \
catch(std::exception& ex) { return LGBM_APIHandleException(ex); } \
catch(std::string& ex) { return LGBM_APIHandleException(ex); } \
catch(...) { return LGBM_APIHandleException("unknown exception"); } \
return 0;
const int PREDICTOR_TYPES = 4;
// Single row predictor to abstract away caching logic
class SingleRowPredictor {
public:
PredictFunction predict_function;
int64_t num_pred_in_one_row;
SingleRowPredictor(int predict_type, Boosting* boosting, const Config& config, int iter) {
bool is_predict_leaf = false;
bool is_raw_score = false;
bool predict_contrib = false;
if (predict_type == C_API_PREDICT_LEAF_INDEX) {
is_predict_leaf = true;
} else if (predict_type == C_API_PREDICT_RAW_SCORE) {
is_raw_score = true;
} else if (predict_type == C_API_PREDICT_CONTRIB) {
predict_contrib = true;
} else {
is_raw_score = false;
}
early_stop_ = config.pred_early_stop;
early_stop_freq_ = config.pred_early_stop_freq;
early_stop_margin_ = config.pred_early_stop_margin;
iter_ = iter;
predictor_.reset(new Predictor(boosting, iter_, is_raw_score, is_predict_leaf, predict_contrib,
early_stop_, early_stop_freq_, early_stop_margin_));
num_pred_in_one_row = boosting->NumPredictOneRow(iter_, is_predict_leaf, predict_contrib);
predict_function = predictor_->GetPredictFunction();
num_total_model_ = boosting->NumberOfTotalModel();
}
~SingleRowPredictor() {}
bool IsPredictorEqual(const Config& config, int iter, Boosting* boosting) {
return early_stop_ == config.pred_early_stop &&
early_stop_freq_ == config.pred_early_stop_freq &&
early_stop_margin_ == config.pred_early_stop_margin &&
iter_ == iter &&
num_total_model_ == boosting->NumberOfTotalModel();
}
private:
std::unique_ptr<Predictor> predictor_;
bool early_stop_;
int early_stop_freq_;
double early_stop_margin_;
int iter_;
int num_total_model_;
};
class Booster {
public:
explicit Booster(const char* filename) {
boosting_.reset(Boosting::CreateBoosting("gbdt", filename));
}
Booster(const Dataset* train_data,
const char* parameters) {
auto param = Config::Str2Map(parameters);
config_.Set(param);
if (config_.num_threads > 0) {
omp_set_num_threads(config_.num_threads);
}
// create boosting
if (config_.input_model.size() > 0) {
Log::Warning("Continued train from model is not supported for c_api,\n"
"please use continued train with input score");
}
boosting_.reset(Boosting::CreateBoosting(config_.boosting, nullptr));
train_data_ = train_data;
CreateObjectiveAndMetrics();
// initialize the boosting
if (config_.tree_learner == std::string("feature")) {
Log::Fatal("Do not support feature parallel in c api");
}
if (Network::num_machines() == 1 && config_.tree_learner != std::string("serial")) {
Log::Warning("Only find one worker, will switch to serial tree learner");
config_.tree_learner = "serial";
}
boosting_->Init(&config_, train_data_, objective_fun_.get(),
Common::ConstPtrInVectorWrapper<Metric>(train_metric_));
}
void MergeFrom(const Booster* other) {
std::lock_guard<std::mutex> lock(mutex_);
boosting_->MergeFrom(other->boosting_.get());
}
~Booster() {
}
void CreateObjectiveAndMetrics() {
// create objective function
objective_fun_.reset(ObjectiveFunction::CreateObjectiveFunction(config_.objective,
config_));
if (objective_fun_ == nullptr) {
Log::Warning("Using self-defined objective function");
}
// initialize the objective function
if (objective_fun_ != nullptr) {
objective_fun_->Init(train_data_->metadata(), train_data_->num_data());
}
// create training metric
train_metric_.clear();
for (auto metric_type : config_.metric) {
auto metric = std::unique_ptr<Metric>(
Metric::CreateMetric(metric_type, config_));
if (metric == nullptr) { continue; }
metric->Init(train_data_->metadata(), train_data_->num_data());
train_metric_.push_back(std::move(metric));
}
train_metric_.shrink_to_fit();
}
void ResetTrainingData(const Dataset* train_data) {
if (train_data != train_data_) {
std::lock_guard<std::mutex> lock(mutex_);
train_data_ = train_data;
CreateObjectiveAndMetrics();
// reset the boosting
boosting_->ResetTrainingData(train_data_,
objective_fun_.get(), Common::ConstPtrInVectorWrapper<Metric>(train_metric_));
}
}
static void CheckDatasetResetConfig(
const Config& old_config,
const std::unordered_map<std::string, std::string>& new_param) {
Config new_config;
new_config.Set(new_param);
if (new_param.count("data_random_seed") &&
new_config.data_random_seed != old_config.data_random_seed) {
Log::Fatal("Cannot change data_random_seed after constructed Dataset handle.");
}
if (new_param.count("max_bin") &&
new_config.max_bin != old_config.max_bin) {
Log::Fatal("Cannot change max_bin after constructed Dataset handle.");
}
if (new_param.count("max_bin_by_feature") &&
new_config.max_bin_by_feature != old_config.max_bin_by_feature) {
Log::Fatal(
"Cannot change max_bin_by_feature after constructed Dataset handle.");
}
if (new_param.count("bin_construct_sample_cnt") &&
new_config.bin_construct_sample_cnt !=
old_config.bin_construct_sample_cnt) {
Log::Fatal(
"Cannot change bin_construct_sample_cnt after constructed Dataset "
"handle.");
}
if (new_param.count("min_data_in_bin") &&
new_config.min_data_in_bin != old_config.min_data_in_bin) {
Log::Fatal(
"Cannot change min_data_in_bin after constructed Dataset handle.");
}
if (new_param.count("use_missing") &&
new_config.use_missing != old_config.use_missing) {
Log::Fatal("Cannot change use_missing after constructed Dataset handle.");
}
if (new_param.count("zero_as_missing") &&
new_config.zero_as_missing != old_config.zero_as_missing) {
Log::Fatal(
"Cannot change zero_as_missing after constructed Dataset handle.");
}
if (new_param.count("categorical_feature") &&
new_config.categorical_feature != old_config.categorical_feature) {
Log::Fatal(
"Cannot change categorical_feature after constructed Dataset "
"handle.");
}
if (new_param.count("feature_pre_filter") &&
new_config.feature_pre_filter != old_config.feature_pre_filter) {
Log::Fatal(
"Cannot change feature_pre_filter after constructed Dataset handle.");
}
if (new_param.count("is_enable_sparse") &&
new_config.is_enable_sparse != old_config.is_enable_sparse) {
Log::Fatal(
"Cannot change is_enable_sparse after constructed Dataset handle.");
}
if (new_param.count("pre_partition") &&
new_config.pre_partition != old_config.pre_partition) {
Log::Fatal(
"Cannot change pre_partition after constructed Dataset handle.");
}
if (new_param.count("enable_bundle") &&
new_config.enable_bundle != old_config.enable_bundle) {
Log::Fatal(
"Cannot change enable_bundle after constructed Dataset handle.");
}
if (new_param.count("header") && new_config.header != old_config.header) {
Log::Fatal("Cannot change header after constructed Dataset handle.");
}
if (new_param.count("two_round") &&
new_config.two_round != old_config.two_round) {
Log::Fatal("Cannot change two_round after constructed Dataset handle.");
}
if (new_param.count("label_column") &&
new_config.label_column != old_config.label_column) {
Log::Fatal(
"Cannot change label_column after constructed Dataset handle.");
}
if (new_param.count("weight_column") &&
new_config.weight_column != old_config.weight_column) {
Log::Fatal(
"Cannot change weight_column after constructed Dataset handle.");
}
if (new_param.count("group_column") &&
new_config.group_column != old_config.group_column) {
Log::Fatal(
"Cannot change group_column after constructed Dataset handle.");
}
if (new_param.count("ignore_column") &&
new_config.ignore_column != old_config.ignore_column) {
Log::Fatal(
"Cannot change ignore_column after constructed Dataset handle.");
}
if (new_param.count("forcedbins_filename")) {
Log::Fatal("Cannot change forced bins after constructed Dataset handle.");
}
if (new_param.count("min_data_in_leaf") &&
new_config.min_data_in_leaf < old_config.min_data_in_leaf &&
old_config.feature_pre_filter) {
Log::Fatal(
"Reducing `min_data_in_leaf` with `feature_pre_filter=true` may "
"cause unexpected behaviour "
"for features that were pre-filtered by the larger "
"`min_data_in_leaf`.\n"
"You need to set `feature_pre_filter=false` to dynamically change "
"the `min_data_in_leaf`.");
}
}
void ResetConfig(const char* parameters) {
std::lock_guard<std::mutex> lock(mutex_);
auto param = Config::Str2Map(parameters);
if (param.count("num_class")) {
Log::Fatal("Cannot change num_class during training");
}
if (param.count("boosting")) {
Log::Fatal("Cannot change boosting during training");
}
if (param.count("metric")) {
Log::Fatal("Cannot change metric during training");
}
CheckDatasetResetConfig(config_, param);
config_.Set(param);
if (config_.num_threads > 0) {
omp_set_num_threads(config_.num_threads);
}
if (param.count("objective")) {
// create objective function
objective_fun_.reset(ObjectiveFunction::CreateObjectiveFunction(config_.objective,
config_));
if (objective_fun_ == nullptr) {
Log::Warning("Using self-defined objective function");
}
// initialize the objective function
if (objective_fun_ != nullptr) {
objective_fun_->Init(train_data_->metadata(), train_data_->num_data());
}
boosting_->ResetTrainingData(train_data_,
objective_fun_.get(), Common::ConstPtrInVectorWrapper<Metric>(train_metric_));
}
boosting_->ResetConfig(&config_);
}
void AddValidData(const Dataset* valid_data) {
std::lock_guard<std::mutex> lock(mutex_);
valid_metrics_.emplace_back();
for (auto metric_type : config_.metric) {
auto metric = std::unique_ptr<Metric>(Metric::CreateMetric(metric_type, config_));
if (metric == nullptr) { continue; }
metric->Init(valid_data->metadata(), valid_data->num_data());
valid_metrics_.back().push_back(std::move(metric));
}
valid_metrics_.back().shrink_to_fit();
boosting_->AddValidDataset(valid_data,
Common::ConstPtrInVectorWrapper<Metric>(valid_metrics_.back()));
}
bool TrainOneIter() {
std::lock_guard<std::mutex> lock(mutex_);
return boosting_->TrainOneIter(nullptr, nullptr);
}
void Refit(const int32_t* leaf_preds, int32_t nrow, int32_t ncol) {
std::lock_guard<std::mutex> lock(mutex_);
std::vector<std::vector<int32_t>> v_leaf_preds(nrow, std::vector<int32_t>(ncol, 0));
for (int i = 0; i < nrow; ++i) {
for (int j = 0; j < ncol; ++j) {
v_leaf_preds[i][j] = leaf_preds[i * ncol + j];
}
}
boosting_->RefitTree(v_leaf_preds);
}
bool TrainOneIter(const score_t* gradients, const score_t* hessians) {
std::lock_guard<std::mutex> lock(mutex_);
return boosting_->TrainOneIter(gradients, hessians);
}
void RollbackOneIter() {
std::lock_guard<std::mutex> lock(mutex_);
boosting_->RollbackOneIter();
}
void PredictSingleRow(int num_iteration, int predict_type, int ncol,
std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun,
const Config& config,
double* out_result, int64_t* out_len) {
if (!config.predict_disable_shape_check && ncol != boosting_->MaxFeatureIdx() + 1) {
Log::Fatal("The number of features in data (%d) is not the same as it was in training data (%d).\n"\
"You can set ``predict_disable_shape_check=true`` to discard this error, but please be aware what you are doing.", ncol, boosting_->MaxFeatureIdx() + 1);
}
std::lock_guard<std::mutex> lock(mutex_);
if (single_row_predictor_[predict_type].get() == nullptr ||
!single_row_predictor_[predict_type]->IsPredictorEqual(config, num_iteration, boosting_.get())) {
single_row_predictor_[predict_type].reset(new SingleRowPredictor(predict_type, boosting_.get(),
config, num_iteration));
}
auto one_row = get_row_fun(0);
auto pred_wrt_ptr = out_result;
single_row_predictor_[predict_type]->predict_function(one_row, pred_wrt_ptr);
*out_len = single_row_predictor_[predict_type]->num_pred_in_one_row;
}
void Predict(int num_iteration, int predict_type, int nrow, int ncol,
std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun,
const Config& config,
double* out_result, int64_t* out_len) {
if (!config.predict_disable_shape_check && ncol != boosting_->MaxFeatureIdx() + 1) {
Log::Fatal("The number of features in data (%d) is not the same as it was in training data (%d).\n" \
"You can set ``predict_disable_shape_check=true`` to discard this error, but please be aware what you are doing.", ncol, boosting_->MaxFeatureIdx() + 1);
}
std::lock_guard<std::mutex> lock(mutex_);
bool is_predict_leaf = false;
bool is_raw_score = false;
bool predict_contrib = false;
if (predict_type == C_API_PREDICT_LEAF_INDEX) {
is_predict_leaf = true;
} else if (predict_type == C_API_PREDICT_RAW_SCORE) {
is_raw_score = true;
} else if (predict_type == C_API_PREDICT_CONTRIB) {
predict_contrib = true;
} else {
is_raw_score = false;
}
Predictor predictor(boosting_.get(), num_iteration, is_raw_score, is_predict_leaf, predict_contrib,
config.pred_early_stop, config.pred_early_stop_freq, config.pred_early_stop_margin);
int64_t num_pred_in_one_row = boosting_->NumPredictOneRow(num_iteration, is_predict_leaf, predict_contrib);
auto pred_fun = predictor.GetPredictFunction();
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < nrow; ++i) {
OMP_LOOP_EX_BEGIN();
auto one_row = get_row_fun(i);
auto pred_wrt_ptr = out_result + static_cast<size_t>(num_pred_in_one_row) * i;
pred_fun(one_row, pred_wrt_ptr);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
*out_len = num_pred_in_one_row * nrow;
}
void Predict(int num_iteration, int predict_type, const char* data_filename,
int data_has_header, const Config& config,
const char* result_filename) {
std::lock_guard<std::mutex> lock(mutex_);
bool is_predict_leaf = false;
bool is_raw_score = false;
bool predict_contrib = false;
if (predict_type == C_API_PREDICT_LEAF_INDEX) {
is_predict_leaf = true;
} else if (predict_type == C_API_PREDICT_RAW_SCORE) {
is_raw_score = true;
} else if (predict_type == C_API_PREDICT_CONTRIB) {
predict_contrib = true;
} else {
is_raw_score = false;
}
Predictor predictor(boosting_.get(), num_iteration, is_raw_score, is_predict_leaf, predict_contrib,
config.pred_early_stop, config.pred_early_stop_freq, config.pred_early_stop_margin);
bool bool_data_has_header = data_has_header > 0 ? true : false;
predictor.Predict(data_filename, result_filename, bool_data_has_header, config.predict_disable_shape_check);
}
void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) {
boosting_->GetPredictAt(data_idx, out_result, out_len);
}
void SaveModelToFile(int start_iteration, int num_iteration, const char* filename) {
boosting_->SaveModelToFile(start_iteration, num_iteration, filename);
}
void LoadModelFromString(const char* model_str) {
size_t len = std::strlen(model_str);
boosting_->LoadModelFromString(model_str, len);
}
std::string SaveModelToString(int start_iteration, int num_iteration) {
return boosting_->SaveModelToString(start_iteration, num_iteration);
}
std::string DumpModel(int start_iteration, int num_iteration) {
return boosting_->DumpModel(start_iteration, num_iteration);
}
std::vector<double> FeatureImportance(int num_iteration, int importance_type) {
return boosting_->FeatureImportance(num_iteration, importance_type);
}
double UpperBoundValue() const {
std::lock_guard<std::mutex> lock(mutex_);
return boosting_->GetUpperBoundValue();
}
double LowerBoundValue() const {
std::lock_guard<std::mutex> lock(mutex_);
return boosting_->GetLowerBoundValue();
}
double GetLeafValue(int tree_idx, int leaf_idx) const {
return dynamic_cast<GBDTBase*>(boosting_.get())->GetLeafValue(tree_idx, leaf_idx);
}
void SetLeafValue(int tree_idx, int leaf_idx, double val) {
std::lock_guard<std::mutex> lock(mutex_);
dynamic_cast<GBDTBase*>(boosting_.get())->SetLeafValue(tree_idx, leaf_idx, val);
}
void ShuffleModels(int start_iter, int end_iter) {
std::lock_guard<std::mutex> lock(mutex_);
boosting_->ShuffleModels(start_iter, end_iter);
}
int GetEvalCounts() const {
int ret = 0;
for (const auto& metric : train_metric_) {
ret += static_cast<int>(metric->GetName().size());
}
return ret;
}
int GetEvalNames(char** out_strs) const {
int idx = 0;
for (const auto& metric : train_metric_) {
for (const auto& name : metric->GetName()) {
std::memcpy(out_strs[idx], name.c_str(), name.size() + 1);
++idx;
}
}
return idx;
}
int GetFeatureNames(char** out_strs) const {
int idx = 0;
for (const auto& name : boosting_->FeatureNames()) {
std::memcpy(out_strs[idx], name.c_str(), name.size() + 1);
++idx;
}
return idx;
}
const Boosting* GetBoosting() const { return boosting_.get(); }
private:
const Dataset* train_data_;
std::unique_ptr<Boosting> boosting_;
std::unique_ptr<SingleRowPredictor> single_row_predictor_[PREDICTOR_TYPES];
/*! \brief All configs */
Config config_;
/*! \brief Metric for training data */
std::vector<std::unique_ptr<Metric>> train_metric_;
/*! \brief Metrics for validation data */
std::vector<std::vector<std::unique_ptr<Metric>>> valid_metrics_;
/*! \brief Training objective function */
std::unique_ptr<ObjectiveFunction> objective_fun_;
/*! \brief mutex for threading safe call */
mutable std::mutex mutex_;
};
} // namespace LightGBM
using namespace LightGBM;
// some help functions used to convert data
std::function<std::vector<double>(int row_idx)>
RowFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major);
std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major);
std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseRows(const void** data, int num_col, int data_type);
std::function<std::vector<std::pair<int, double>>(int idx)>
RowFunctionFromCSR(const void* indptr, int indptr_type, const int32_t* indices,
const void* data, int data_type, int64_t nindptr, int64_t nelem);
// Row iterator of on column for CSC matrix
class CSC_RowIterator {
public:
CSC_RowIterator(const void* col_ptr, int col_ptr_type, const int32_t* indices,
const void* data, int data_type, int64_t ncol_ptr, int64_t nelem, int col_idx);
~CSC_RowIterator() {}
// return value at idx, only can access by ascent order
double Get(int idx);
// return next non-zero pair, if index < 0, means no more data
std::pair<int, double> NextNonZero();
private:
int nonzero_idx_ = 0;
int cur_idx_ = -1;
double cur_val_ = 0.0f;
bool is_end_ = false;
std::function<std::pair<int, double>(int idx)> iter_fun_;
};
// start of c_api functions
const char* LGBM_GetLastError() {
return LastErrorMsg();
}
int LGBM_DatasetCreateFromFile(const char* filename,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
API_BEGIN();
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
DatasetLoader loader(config, nullptr, 1, filename);
if (reference == nullptr) {
if (Network::num_machines() == 1) {
*out = loader.LoadFromFile(filename);
} else {
*out = loader.LoadFromFile(filename, Network::rank(), Network::num_machines());
}
} else {
*out = loader.LoadFromFileAlignWithOtherDataset(filename,
reinterpret_cast<const Dataset*>(reference));
}
API_END();
}
int LGBM_DatasetCreateFromSampledColumn(double** sample_data,
int** sample_indices,
int32_t ncol,
const int* num_per_col,
int32_t num_sample_row,
int32_t num_total_row,
const char* parameters,
DatasetHandle* out) {
API_BEGIN();
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
DatasetLoader loader(config, nullptr, 1, nullptr);
*out = loader.CostructFromSampleData(sample_data, sample_indices, ncol, num_per_col,
num_sample_row,
static_cast<data_size_t>(num_total_row));
API_END();
}
int LGBM_DatasetCreateByReference(const DatasetHandle reference,
int64_t num_total_row,
DatasetHandle* out) {
API_BEGIN();
std::unique_ptr<Dataset> ret;
ret.reset(new Dataset(static_cast<data_size_t>(num_total_row)));
ret->CreateValid(reinterpret_cast<const Dataset*>(reference));
*out = ret.release();
API_END();
}
int LGBM_DatasetPushRows(DatasetHandle dataset,
const void* data,
int data_type,
int32_t nrow,
int32_t ncol,
int32_t start_row) {
API_BEGIN();
auto p_dataset = reinterpret_cast<Dataset*>(dataset);
auto get_row_fun = RowFunctionFromDenseMatric(data, nrow, ncol, data_type, 1);
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < nrow; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
auto one_row = get_row_fun(i);
p_dataset->PushOneRow(tid, start_row + i, one_row);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
if (start_row + nrow == p_dataset->num_data()) {
p_dataset->FinishLoad();
}
API_END();
}
int LGBM_DatasetPushRowsByCSR(DatasetHandle dataset,
const void* indptr,
int indptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t nindptr,
int64_t nelem,
int64_t,
int64_t start_row) {
API_BEGIN();
auto p_dataset = reinterpret_cast<Dataset*>(dataset);
auto get_row_fun = RowFunctionFromCSR(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
int32_t nrow = static_cast<int32_t>(nindptr - 1);
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < nrow; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
auto one_row = get_row_fun(i);
p_dataset->PushOneRow(tid,
static_cast<data_size_t>(start_row + i), one_row);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
if (start_row + nrow == static_cast<int64_t>(p_dataset->num_data())) {
p_dataset->FinishLoad();
}
API_END();
}
int LGBM_DatasetCreateFromMat(const void* data,
int data_type,
int32_t nrow,
int32_t ncol,
int is_row_major,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
return LGBM_DatasetCreateFromMats(1,
&data,
data_type,
&nrow,
ncol,
is_row_major,
parameters,
reference,
out);
}
int LGBM_DatasetCreateFromMats(int32_t nmat,
const void** data,
int data_type,
int32_t* nrow,
int32_t ncol,
int is_row_major,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
API_BEGIN();
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
std::unique_ptr<Dataset> ret;
int32_t total_nrow = 0;
for (int j = 0; j < nmat; ++j) {
total_nrow += nrow[j];
}
std::vector<std::function<std::vector<double>(int row_idx)>> get_row_fun;
for (int j = 0; j < nmat; ++j) {
get_row_fun.push_back(RowFunctionFromDenseMatric(data[j], nrow[j], ncol, data_type, is_row_major));
}
if (reference == nullptr) {
// sample data first
Random rand(config.data_random_seed);
int sample_cnt = static_cast<int>(total_nrow < config.bin_construct_sample_cnt ? total_nrow : config.bin_construct_sample_cnt);
auto sample_indices = rand.Sample(total_nrow, sample_cnt);
sample_cnt = static_cast<int>(sample_indices.size());
std::vector<std::vector<double>> sample_values(ncol);
std::vector<std::vector<int>> sample_idx(ncol);
int offset = 0;
int j = 0;
for (size_t i = 0; i < sample_indices.size(); ++i) {
auto idx = sample_indices[i];
while ((idx - offset) >= nrow[j]) {
offset += nrow[j];
++j;
}
auto row = get_row_fun[j](static_cast<int>(idx - offset));
for (size_t k = 0; k < row.size(); ++k) {
if (std::fabs(row[k]) > kZeroThreshold || std::isnan(row[k])) {
sample_values[k].emplace_back(row[k]);
sample_idx[k].emplace_back(static_cast<int>(i));
}
}
}
DatasetLoader loader(config, nullptr, 1, nullptr);
ret.reset(loader.CostructFromSampleData(Common::Vector2Ptr<double>(&sample_values).data(),
Common::Vector2Ptr<int>(&sample_idx).data(),
ncol,
Common::VectorSize<double>(sample_values).data(),
sample_cnt, total_nrow));
} else {
ret.reset(new Dataset(total_nrow));
ret->CreateValid(
reinterpret_cast<const Dataset*>(reference));
}
int32_t start_row = 0;
for (int j = 0; j < nmat; ++j) {
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < nrow[j]; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
auto one_row = get_row_fun[j](i);
ret->PushOneRow(tid, start_row + i, one_row);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
start_row += nrow[j];
}
ret->FinishLoad();
*out = ret.release();
API_END();
}
int LGBM_DatasetCreateFromCSR(const void* indptr,
int indptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t nindptr,
int64_t nelem,
int64_t num_col,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
API_BEGIN();
if (num_col <= 0) {
Log::Fatal("The number of columns should be greater than zero.");
} else if (num_col >= INT32_MAX) {
Log::Fatal("The number of columns should be smaller than INT32_MAX.");
}
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
std::unique_ptr<Dataset> ret;
auto get_row_fun = RowFunctionFromCSR(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
int32_t nrow = static_cast<int32_t>(nindptr - 1);
if (reference == nullptr) {
// sample data first
Random rand(config.data_random_seed);
int sample_cnt = static_cast<int>(nrow < config.bin_construct_sample_cnt ? nrow : config.bin_construct_sample_cnt);
auto sample_indices = rand.Sample(nrow, sample_cnt);
sample_cnt = static_cast<int>(sample_indices.size());
std::vector<std::vector<double>> sample_values(num_col);
std::vector<std::vector<int>> sample_idx(num_col);
for (size_t i = 0; i < sample_indices.size(); ++i) {
auto idx = sample_indices[i];
auto row = get_row_fun(static_cast<int>(idx));
for (std::pair<int, double>& inner_data : row) {
CHECK_LT(inner_data.first, num_col);
if (std::fabs(inner_data.second) > kZeroThreshold || std::isnan(inner_data.second)) {
sample_values[inner_data.first].emplace_back(inner_data.second);
sample_idx[inner_data.first].emplace_back(static_cast<int>(i));
}
}
}
DatasetLoader loader(config, nullptr, 1, nullptr);
ret.reset(loader.CostructFromSampleData(Common::Vector2Ptr<double>(&sample_values).data(),
Common::Vector2Ptr<int>(&sample_idx).data(),
static_cast<int>(num_col),
Common::VectorSize<double>(sample_values).data(),
sample_cnt, nrow));
} else {
ret.reset(new Dataset(nrow));
ret->CreateValid(
reinterpret_cast<const Dataset*>(reference));
}
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < nindptr - 1; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
auto one_row = get_row_fun(i);
ret->PushOneRow(tid, i, one_row);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
ret->FinishLoad();
*out = ret.release();
API_END();
}
int LGBM_DatasetCreateFromCSRFunc(void* get_row_funptr,
int num_rows,
int64_t num_col,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
API_BEGIN();
if (num_col <= 0) {
Log::Fatal("The number of columns should be greater than zero.");
} else if (num_col >= INT32_MAX) {
Log::Fatal("The number of columns should be smaller than INT32_MAX.");
}
auto get_row_fun = *static_cast<std::function<void(int idx, std::vector<std::pair<int, double>>&)>*>(get_row_funptr);
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
std::unique_ptr<Dataset> ret;
int32_t nrow = num_rows;
if (reference == nullptr) {
// sample data first
Random rand(config.data_random_seed);
int sample_cnt = static_cast<int>(nrow < config.bin_construct_sample_cnt ? nrow : config.bin_construct_sample_cnt);
auto sample_indices = rand.Sample(nrow, sample_cnt);
sample_cnt = static_cast<int>(sample_indices.size());
std::vector<std::vector<double>> sample_values(num_col);
std::vector<std::vector<int>> sample_idx(num_col);
// local buffer to re-use memory
std::vector<std::pair<int, double>> buffer;
for (size_t i = 0; i < sample_indices.size(); ++i) {
auto idx = sample_indices[i];
get_row_fun(static_cast<int>(idx), buffer);
for (std::pair<int, double>& inner_data : buffer) {
CHECK_LT(inner_data.first, num_col);
if (std::fabs(inner_data.second) > kZeroThreshold || std::isnan(inner_data.second)) {
sample_values[inner_data.first].emplace_back(inner_data.second);
sample_idx[inner_data.first].emplace_back(static_cast<int>(i));
}
}
}
DatasetLoader loader(config, nullptr, 1, nullptr);
ret.reset(loader.CostructFromSampleData(Common::Vector2Ptr<double>(&sample_values).data(),
Common::Vector2Ptr<int>(&sample_idx).data(),
static_cast<int>(num_col),
Common::VectorSize<double>(sample_values).data(),
sample_cnt, nrow));
} else {
ret.reset(new Dataset(nrow));
ret->CreateValid(
reinterpret_cast<const Dataset*>(reference));
}
OMP_INIT_EX();
std::vector<std::pair<int, double>> thread_buffer;
#pragma omp parallel for schedule(static) private(thread_buffer)
for (int i = 0; i < num_rows; ++i) {
OMP_LOOP_EX_BEGIN();
{
const int tid = omp_get_thread_num();
get_row_fun(i, thread_buffer);
ret->PushOneRow(tid, i, thread_buffer);
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
ret->FinishLoad();
*out = ret.release();
API_END();
}
int LGBM_DatasetCreateFromCSC(const void* col_ptr,
int col_ptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t ncol_ptr,
int64_t nelem,
int64_t num_row,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
API_BEGIN();
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
std::unique_ptr<Dataset> ret;
int32_t nrow = static_cast<int32_t>(num_row);
if (reference == nullptr) {
// sample data first
Random rand(config.data_random_seed);
int sample_cnt = static_cast<int>(nrow < config.bin_construct_sample_cnt ? nrow : config.bin_construct_sample_cnt);
auto sample_indices = rand.Sample(nrow, sample_cnt);
sample_cnt = static_cast<int>(sample_indices.size());
std::vector<std::vector<double>> sample_values(ncol_ptr - 1);
std::vector<std::vector<int>> sample_idx(ncol_ptr - 1);
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(sample_values.size()); ++i) {
OMP_LOOP_EX_BEGIN();
CSC_RowIterator col_it(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, i);
for (int j = 0; j < sample_cnt; j++) {
auto val = col_it.Get(sample_indices[j]);
if (std::fabs(val) > kZeroThreshold || std::isnan(val)) {
sample_values[i].emplace_back(val);
sample_idx[i].emplace_back(j);
}
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
DatasetLoader loader(config, nullptr, 1, nullptr);
ret.reset(loader.CostructFromSampleData(Common::Vector2Ptr<double>(&sample_values).data(),
Common::Vector2Ptr<int>(&sample_idx).data(),
static_cast<int>(sample_values.size()),
Common::VectorSize<double>(sample_values).data(),
sample_cnt, nrow));
} else {
ret.reset(new Dataset(nrow));
ret->CreateValid(
reinterpret_cast<const Dataset*>(reference));
}
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < ncol_ptr - 1; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
int feature_idx = ret->InnerFeatureIndex(i);
if (feature_idx < 0) { continue; }
int group = ret->Feature2Group(feature_idx);
int sub_feature = ret->Feture2SubFeature(feature_idx);
CSC_RowIterator col_it(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, i);
auto bin_mapper = ret->FeatureBinMapper(feature_idx);
if (bin_mapper->GetDefaultBin() == bin_mapper->GetMostFreqBin()) {
int row_idx = 0;
while (row_idx < nrow) {
auto pair = col_it.NextNonZero();
row_idx = pair.first;
// no more data
if (row_idx < 0) { break; }
ret->PushOneData(tid, row_idx, group, sub_feature, pair.second);
}
} else {
for (int row_idx = 0; row_idx < nrow; ++row_idx) {
auto val = col_it.Get(row_idx);
ret->PushOneData(tid, row_idx, group, sub_feature, val);
}
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
ret->FinishLoad();
*out = ret.release();
API_END();
}
int LGBM_DatasetGetSubset(
const DatasetHandle handle,
const int32_t* used_row_indices,
int32_t num_used_row_indices,
const char* parameters,
DatasetHandle* out) {
API_BEGIN();
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
auto full_dataset = reinterpret_cast<const Dataset*>(handle);
CHECK_GT(num_used_row_indices, 0);
const int32_t lower = 0;
const int32_t upper = full_dataset->num_data() - 1;
Common::CheckElementsIntervalClosed(used_row_indices, lower, upper, num_used_row_indices, "Used indices of subset");
if (!std::is_sorted(used_row_indices, used_row_indices + num_used_row_indices)) {
Log::Fatal("used_row_indices should be sorted in Subset");
}
auto ret = std::unique_ptr<Dataset>(new Dataset(num_used_row_indices));
ret->CopyFeatureMapperFrom(full_dataset);
ret->CopySubrow(full_dataset, used_row_indices, num_used_row_indices, true);
*out = ret.release();
API_END();
}
int LGBM_DatasetSetFeatureNames(
DatasetHandle handle,
const char** feature_names,
int num_feature_names) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
std::vector<std::string> feature_names_str;
for (int i = 0; i < num_feature_names; ++i) {
feature_names_str.emplace_back(feature_names[i]);
}
dataset->set_feature_names(feature_names_str);
API_END();
}
int LGBM_DatasetGetFeatureNames(
DatasetHandle handle,
char** feature_names,
int* num_feature_names) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
auto inside_feature_name = dataset->feature_names();
*num_feature_names = static_cast<int>(inside_feature_name.size());
for (int i = 0; i < *num_feature_names; ++i) {
std::memcpy(feature_names[i], inside_feature_name[i].c_str(), inside_feature_name[i].size() + 1);
}
API_END();
}
#pragma warning(disable : 4702)
int LGBM_DatasetFree(DatasetHandle handle) {
API_BEGIN();
delete reinterpret_cast<Dataset*>(handle);
API_END();
}
int LGBM_DatasetSaveBinary(DatasetHandle handle,
const char* filename) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
dataset->SaveBinaryFile(filename);
API_END();
}
int LGBM_DatasetDumpText(DatasetHandle handle,
const char* filename) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
dataset->DumpTextFile(filename);
API_END();
}
int LGBM_DatasetSetField(DatasetHandle handle,
const char* field_name,
const void* field_data,
int num_element,
int type) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
bool is_success = false;
if (type == C_API_DTYPE_FLOAT32) {
is_success = dataset->SetFloatField(field_name, reinterpret_cast<const float*>(field_data), static_cast<int32_t>(num_element));
} else if (type == C_API_DTYPE_INT32) {
is_success = dataset->SetIntField(field_name, reinterpret_cast<const int*>(field_data), static_cast<int32_t>(num_element));
} else if (type == C_API_DTYPE_FLOAT64) {
is_success = dataset->SetDoubleField(field_name, reinterpret_cast<const double*>(field_data), static_cast<int32_t>(num_element));
}
if (!is_success) { Log::Fatal("Input data type error or field not found"); }
API_END();
}
int LGBM_DatasetGetField(DatasetHandle handle,
const char* field_name,
int* out_len,
const void** out_ptr,
int* out_type) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
bool is_success = false;
if (dataset->GetFloatField(field_name, out_len, reinterpret_cast<const float**>(out_ptr))) {
*out_type = C_API_DTYPE_FLOAT32;
is_success = true;
} else if (dataset->GetIntField(field_name, out_len, reinterpret_cast<const int**>(out_ptr))) {
*out_type = C_API_DTYPE_INT32;
is_success = true;
} else if (dataset->GetDoubleField(field_name, out_len, reinterpret_cast<const double**>(out_ptr))) {
*out_type = C_API_DTYPE_FLOAT64;
is_success = true;
}
if (!is_success) { Log::Fatal("Field not found"); }
if (*out_ptr == nullptr) { *out_len = 0; }
API_END();
}
int LGBM_DatasetUpdateParamChecking(const char* old_parameters, const char* new_parameters) {
API_BEGIN();
auto old_param = Config::Str2Map(old_parameters);
Config old_config;
old_config.Set(old_param);
auto new_param = Config::Str2Map(new_parameters);
Booster::CheckDatasetResetConfig(old_config, new_param);
API_END();
}
int LGBM_DatasetGetNumData(DatasetHandle handle,
int* out) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
*out = dataset->num_data();
API_END();
}
int LGBM_DatasetGetNumFeature(DatasetHandle handle,
int* out) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
*out = dataset->num_total_features();
API_END();
}
int LGBM_DatasetAddFeaturesFrom(DatasetHandle target,
DatasetHandle source) {
API_BEGIN();
auto target_d = reinterpret_cast<Dataset*>(target);
auto source_d = reinterpret_cast<Dataset*>(source);
target_d->AddFeaturesFrom(source_d);
API_END();
}
// ---- start of booster
int LGBM_BoosterCreate(const DatasetHandle train_data,
const char* parameters,
BoosterHandle* out) {
API_BEGIN();
const Dataset* p_train_data = reinterpret_cast<const Dataset*>(train_data);
auto ret = std::unique_ptr<Booster>(new Booster(p_train_data, parameters));
*out = ret.release();
API_END();
}
int LGBM_BoosterCreateFromModelfile(
const char* filename,
int* out_num_iterations,
BoosterHandle* out) {
API_BEGIN();
auto ret = std::unique_ptr<Booster>(new Booster(filename));
*out_num_iterations = ret->GetBoosting()->GetCurrentIteration();
*out = ret.release();
API_END();
}
int LGBM_BoosterLoadModelFromString(
const char* model_str,
int* out_num_iterations,
BoosterHandle* out) {
API_BEGIN();
auto ret = std::unique_ptr<Booster>(new Booster(nullptr));
ret->LoadModelFromString(model_str);
*out_num_iterations = ret->GetBoosting()->GetCurrentIteration();
*out = ret.release();
API_END();
}
#pragma warning(disable : 4702)
int LGBM_BoosterFree(BoosterHandle handle) {
API_BEGIN();
delete reinterpret_cast<Booster*>(handle);
API_END();
}
int LGBM_BoosterShuffleModels(BoosterHandle handle, int start_iter, int end_iter) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->ShuffleModels(start_iter, end_iter);
API_END();
}
int LGBM_BoosterMerge(BoosterHandle handle,
BoosterHandle other_handle) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
Booster* ref_other_booster = reinterpret_cast<Booster*>(other_handle);
ref_booster->MergeFrom(ref_other_booster);
API_END();
}
int LGBM_BoosterAddValidData(BoosterHandle handle,
const DatasetHandle valid_data) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
const Dataset* p_dataset = reinterpret_cast<const Dataset*>(valid_data);
ref_booster->AddValidData(p_dataset);
API_END();
}
int LGBM_BoosterResetTrainingData(BoosterHandle handle,
const DatasetHandle train_data) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
const Dataset* p_dataset = reinterpret_cast<const Dataset*>(train_data);
ref_booster->ResetTrainingData(p_dataset);
API_END();
}
int LGBM_BoosterResetParameter(BoosterHandle handle, const char* parameters) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->ResetConfig(parameters);
API_END();
}
int LGBM_BoosterGetNumClasses(BoosterHandle handle, int* out_len) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = ref_booster->GetBoosting()->NumberOfClasses();
API_END();
}
int LGBM_BoosterRefit(BoosterHandle handle, const int32_t* leaf_preds, int32_t nrow, int32_t ncol) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->Refit(leaf_preds, nrow, ncol);
API_END();
}
int LGBM_BoosterUpdateOneIter(BoosterHandle handle, int* is_finished) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
if (ref_booster->TrainOneIter()) {
*is_finished = 1;
} else {
*is_finished = 0;
}
API_END();
}
int LGBM_BoosterUpdateOneIterCustom(BoosterHandle handle,
const float* grad,
const float* hess,
int* is_finished) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
#ifdef SCORE_T_USE_DOUBLE
Log::Fatal("Don't support custom loss function when SCORE_T_USE_DOUBLE is enabled");
#else
if (ref_booster->TrainOneIter(grad, hess)) {
*is_finished = 1;
} else {
*is_finished = 0;
}
#endif
API_END();
}
int LGBM_BoosterRollbackOneIter(BoosterHandle handle) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->RollbackOneIter();
API_END();
}
int LGBM_BoosterGetCurrentIteration(BoosterHandle handle, int* out_iteration) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_iteration = ref_booster->GetBoosting()->GetCurrentIteration();
API_END();
}
int LGBM_BoosterNumModelPerIteration(BoosterHandle handle, int* out_tree_per_iteration) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_tree_per_iteration = ref_booster->GetBoosting()->NumModelPerIteration();
API_END();
}
int LGBM_BoosterNumberOfTotalModel(BoosterHandle handle, int* out_models) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_models = ref_booster->GetBoosting()->NumberOfTotalModel();
API_END();
}
int LGBM_BoosterGetEvalCounts(BoosterHandle handle, int* out_len) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = ref_booster->GetEvalCounts();
API_END();
}
int LGBM_BoosterGetEvalNames(BoosterHandle handle, int* out_len, char** out_strs) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = ref_booster->GetEvalNames(out_strs);
API_END();
}
int LGBM_BoosterGetFeatureNames(BoosterHandle handle, int* out_len, char** out_strs) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = ref_booster->GetFeatureNames(out_strs);
API_END();
}
int LGBM_BoosterGetNumFeature(BoosterHandle handle, int* out_len) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = ref_booster->GetBoosting()->MaxFeatureIdx() + 1;
API_END();
}
int LGBM_BoosterGetEval(BoosterHandle handle,
int data_idx,
int* out_len,
double* out_results) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto boosting = ref_booster->GetBoosting();
auto result_buf = boosting->GetEvalAt(data_idx);
*out_len = static_cast<int>(result_buf.size());
for (size_t i = 0; i < result_buf.size(); ++i) {
(out_results)[i] = static_cast<double>(result_buf[i]);
}
API_END();
}
int LGBM_BoosterGetNumPredict(BoosterHandle handle,
int data_idx,
int64_t* out_len) {
API_BEGIN();
auto boosting = reinterpret_cast<Booster*>(handle)->GetBoosting();
*out_len = boosting->GetNumPredictAt(data_idx);
API_END();
}
int LGBM_BoosterGetPredict(BoosterHandle handle,
int data_idx,
int64_t* out_len,
double* out_result) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->GetPredictAt(data_idx, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForFile(BoosterHandle handle,
const char* data_filename,
int data_has_header,
int predict_type,
int num_iteration,
const char* parameter,
const char* result_filename) {
API_BEGIN();
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->Predict(num_iteration, predict_type, data_filename, data_has_header,
config, result_filename);
API_END();
}
int LGBM_BoosterCalcNumPredict(BoosterHandle handle,
int num_row,
int predict_type,
int num_iteration,
int64_t* out_len) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = static_cast<int64_t>(num_row) * ref_booster->GetBoosting()->NumPredictOneRow(
num_iteration, predict_type == C_API_PREDICT_LEAF_INDEX, predict_type == C_API_PREDICT_CONTRIB);
API_END();
}
int LGBM_BoosterPredictForCSR(BoosterHandle handle,
const void* indptr,
int indptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t nindptr,
int64_t nelem,
int64_t num_col,
int predict_type,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
if (num_col <= 0) {
Log::Fatal("The number of columns should be greater than zero.");
} else if (num_col >= INT32_MAX) {
Log::Fatal("The number of columns should be smaller than INT32_MAX.");
}
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto get_row_fun = RowFunctionFromCSR(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
int nrow = static_cast<int>(nindptr - 1);
ref_booster->Predict(num_iteration, predict_type, nrow, static_cast<int>(num_col), get_row_fun,
config, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForCSRSingleRow(BoosterHandle handle,
const void* indptr,
int indptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t nindptr,
int64_t nelem,
int64_t num_col,
int predict_type,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
if (num_col <= 0) {
Log::Fatal("The number of columns should be greater than zero.");
} else if (num_col >= INT32_MAX) {
Log::Fatal("The number of columns should be smaller than INT32_MAX.");
}
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto get_row_fun = RowFunctionFromCSR(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
ref_booster->PredictSingleRow(num_iteration, predict_type, static_cast<int32_t>(num_col), get_row_fun, config, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForCSC(BoosterHandle handle,
const void* col_ptr,
int col_ptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t ncol_ptr,
int64_t nelem,
int64_t num_row,
int predict_type,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
int num_threads = OMP_NUM_THREADS();
int ncol = static_cast<int>(ncol_ptr - 1);
std::vector<std::vector<CSC_RowIterator>> iterators(num_threads, std::vector<CSC_RowIterator>());
for (int i = 0; i < num_threads; ++i) {
for (int j = 0; j < ncol; ++j) {
iterators[i].emplace_back(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, j);
}
}
std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun =
[&iterators, ncol](int i) {
std::vector<std::pair<int, double>> one_row;
one_row.reserve(ncol);
const int tid = omp_get_thread_num();
for (int j = 0; j < ncol; ++j) {
auto val = iterators[tid][j].Get(i);
if (std::fabs(val) > kZeroThreshold || std::isnan(val)) {
one_row.emplace_back(j, val);
}
}
return one_row;
};
ref_booster->Predict(num_iteration, predict_type, static_cast<int>(num_row), ncol, get_row_fun, config,
out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForMat(BoosterHandle handle,
const void* data,
int data_type,
int32_t nrow,
int32_t ncol,
int is_row_major,
int predict_type,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto get_row_fun = RowPairFunctionFromDenseMatric(data, nrow, ncol, data_type, is_row_major);
ref_booster->Predict(num_iteration, predict_type, nrow, ncol, get_row_fun,
config, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForMatSingleRow(BoosterHandle handle,
const void* data,
int data_type,
int32_t ncol,
int is_row_major,
int predict_type,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto get_row_fun = RowPairFunctionFromDenseMatric(data, 1, ncol, data_type, is_row_major);
ref_booster->PredictSingleRow(num_iteration, predict_type, ncol, get_row_fun, config, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForMats(BoosterHandle handle,
const void** data,
int data_type,
int32_t nrow,
int32_t ncol,
int predict_type,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto get_row_fun = RowPairFunctionFromDenseRows(data, ncol, data_type);
ref_booster->Predict(num_iteration, predict_type, nrow, ncol, get_row_fun, config, out_result, out_len);
API_END();
}
int LGBM_BoosterSaveModel(BoosterHandle handle,
int start_iteration,
int num_iteration,
const char* filename) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->SaveModelToFile(start_iteration, num_iteration, filename);
API_END();
}
int LGBM_BoosterSaveModelToString(BoosterHandle handle,
int start_iteration,
int num_iteration,
int64_t buffer_len,
int64_t* out_len,
char* out_str) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
std::string model = ref_booster->SaveModelToString(start_iteration, num_iteration);
*out_len = static_cast<int64_t>(model.size()) + 1;
if (*out_len <= buffer_len) {
std::memcpy(out_str, model.c_str(), *out_len);
}
API_END();
}
int LGBM_BoosterDumpModel(BoosterHandle handle,
int start_iteration,
int num_iteration,
int64_t buffer_len,
int64_t* out_len,
char* out_str) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
std::string model = ref_booster->DumpModel(start_iteration, num_iteration);
*out_len = static_cast<int64_t>(model.size()) + 1;
if (*out_len <= buffer_len) {
std::memcpy(out_str, model.c_str(), *out_len);
}
API_END();
}
int LGBM_BoosterGetLeafValue(BoosterHandle handle,
int tree_idx,
int leaf_idx,
double* out_val) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_val = static_cast<double>(ref_booster->GetLeafValue(tree_idx, leaf_idx));
API_END();
}
int LGBM_BoosterSetLeafValue(BoosterHandle handle,
int tree_idx,
int leaf_idx,
double val) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->SetLeafValue(tree_idx, leaf_idx, val);
API_END();
}
int LGBM_BoosterFeatureImportance(BoosterHandle handle,
int num_iteration,
int importance_type,
double* out_results) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
std::vector<double> feature_importances = ref_booster->FeatureImportance(num_iteration, importance_type);
for (size_t i = 0; i < feature_importances.size(); ++i) {
(out_results)[i] = feature_importances[i];
}
API_END();
}
int LGBM_BoosterGetUpperBoundValue(BoosterHandle handle,
double* out_results) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
double max_value = ref_booster->UpperBoundValue();
*out_results = max_value;
API_END();
}
int LGBM_BoosterGetLowerBoundValue(BoosterHandle handle,
double* out_results) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
double min_value = ref_booster->LowerBoundValue();
*out_results = min_value;
API_END();
}
int LGBM_NetworkInit(const char* machines,
int local_listen_port,
int listen_time_out,
int num_machines) {
API_BEGIN();
Config config;
config.machines = Common::RemoveQuotationSymbol(std::string(machines));
config.local_listen_port = local_listen_port;
config.num_machines = num_machines;
config.time_out = listen_time_out;
if (num_machines > 1) {
Network::Init(config);
}
API_END();
}
int LGBM_NetworkFree() {
API_BEGIN();
Network::Dispose();
API_END();
}
int LGBM_NetworkInitWithFunctions(int num_machines, int rank,
void* reduce_scatter_ext_fun,
void* allgather_ext_fun) {
API_BEGIN();
if (num_machines > 1) {
Network::Init(num_machines, rank, (ReduceScatterFunction)reduce_scatter_ext_fun, (AllgatherFunction)allgather_ext_fun);
}
API_END();
}
// ---- start of some help functions
std::function<std::vector<double>(int row_idx)>
RowFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major) {
if (data_type == C_API_DTYPE_FLOAT32) {
const float* data_ptr = reinterpret_cast<const float*>(data);
if (is_row_major) {
return [=] (int row_idx) {
std::vector<double> ret(num_col);
auto tmp_ptr = data_ptr + static_cast<size_t>(num_col) * row_idx;
for (int i = 0; i < num_col; ++i) {
ret[i] = static_cast<double>(*(tmp_ptr + i));
}
return ret;
};
} else {
return [=] (int row_idx) {
std::vector<double> ret(num_col);
for (int i = 0; i < num_col; ++i) {
ret[i] = static_cast<double>(*(data_ptr + static_cast<size_t>(num_row) * i + row_idx));
}
return ret;
};
}
} else if (data_type == C_API_DTYPE_FLOAT64) {
const double* data_ptr = reinterpret_cast<const double*>(data);
if (is_row_major) {
return [=] (int row_idx) {
std::vector<double> ret(num_col);
auto tmp_ptr = data_ptr + static_cast<size_t>(num_col) * row_idx;
for (int i = 0; i < num_col; ++i) {
ret[i] = static_cast<double>(*(tmp_ptr + i));
}
return ret;
};
} else {
return [=] (int row_idx) {
std::vector<double> ret(num_col);
for (int i = 0; i < num_col; ++i) {
ret[i] = static_cast<double>(*(data_ptr + static_cast<size_t>(num_row) * i + row_idx));
}
return ret;
};
}
}
Log::Fatal("Unknown data type in RowFunctionFromDenseMatric");
return nullptr;
}
std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major) {
auto inner_function = RowFunctionFromDenseMatric(data, num_row, num_col, data_type, is_row_major);
if (inner_function != nullptr) {
return [inner_function] (int row_idx) {
auto raw_values = inner_function(row_idx);
std::vector<std::pair<int, double>> ret;
ret.reserve(raw_values.size());
for (int i = 0; i < static_cast<int>(raw_values.size()); ++i) {
if (std::fabs(raw_values[i]) > kZeroThreshold || std::isnan(raw_values[i])) {
ret.emplace_back(i, raw_values[i]);
}
}
return ret;
};
}
return nullptr;
}
// data is array of pointers to individual rows
std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseRows(const void** data, int num_col, int data_type) {
return [=](int row_idx) {
auto inner_function = RowFunctionFromDenseMatric(data[row_idx], 1, num_col, data_type, /* is_row_major */ true);
auto raw_values = inner_function(0);
std::vector<std::pair<int, double>> ret;
ret.reserve(raw_values.size());
for (int i = 0; i < static_cast<int>(raw_values.size()); ++i) {
if (std::fabs(raw_values[i]) > kZeroThreshold || std::isnan(raw_values[i])) {
ret.emplace_back(i, raw_values[i]);
}
}
return ret;
};
}
std::function<std::vector<std::pair<int, double>>(int idx)>
RowFunctionFromCSR(const void* indptr, int indptr_type, const int32_t* indices, const void* data, int data_type, int64_t , int64_t ) {
if (data_type == C_API_DTYPE_FLOAT32) {
const float* data_ptr = reinterpret_cast<const float*>(data);
if (indptr_type == C_API_DTYPE_INT32) {
const int32_t* ptr_indptr = reinterpret_cast<const int32_t*>(indptr);
return [=] (int idx) {
std::vector<std::pair<int, double>> ret;
int64_t start = ptr_indptr[idx];
int64_t end = ptr_indptr[idx + 1];
if (end - start > 0) {
ret.reserve(end - start);
}
for (int64_t i = start; i < end; ++i) {
ret.emplace_back(indices[i], data_ptr[i]);
}
return ret;
};
} else if (indptr_type == C_API_DTYPE_INT64) {
const int64_t* ptr_indptr = reinterpret_cast<const int64_t*>(indptr);
return [=] (int idx) {
std::vector<std::pair<int, double>> ret;
int64_t start = ptr_indptr[idx];
int64_t end = ptr_indptr[idx + 1];
if (end - start > 0) {
ret.reserve(end - start);
}
for (int64_t i = start; i < end; ++i) {
ret.emplace_back(indices[i], data_ptr[i]);
}
return ret;
};
}
} else if (data_type == C_API_DTYPE_FLOAT64) {
const double* data_ptr = reinterpret_cast<const double*>(data);
if (indptr_type == C_API_DTYPE_INT32) {
const int32_t* ptr_indptr = reinterpret_cast<const int32_t*>(indptr);
return [=] (int idx) {
std::vector<std::pair<int, double>> ret;
int64_t start = ptr_indptr[idx];
int64_t end = ptr_indptr[idx + 1];
if (end - start > 0) {
ret.reserve(end - start);
}
for (int64_t i = start; i < end; ++i) {
ret.emplace_back(indices[i], data_ptr[i]);
}
return ret;
};
} else if (indptr_type == C_API_DTYPE_INT64) {
const int64_t* ptr_indptr = reinterpret_cast<const int64_t*>(indptr);
return [=] (int idx) {
std::vector<std::pair<int, double>> ret;
int64_t start = ptr_indptr[idx];
int64_t end = ptr_indptr[idx + 1];
if (end - start > 0) {
ret.reserve(end - start);
}
for (int64_t i = start; i < end; ++i) {
ret.emplace_back(indices[i], data_ptr[i]);
}
return ret;
};
}
}
Log::Fatal("Unknown data type in RowFunctionFromCSR");
return nullptr;
}
std::function<std::pair<int, double>(int idx)>
IterateFunctionFromCSC(const void* col_ptr, int col_ptr_type, const int32_t* indices, const void* data, int data_type, int64_t ncol_ptr, int64_t , int col_idx) {
CHECK(col_idx < ncol_ptr && col_idx >= 0);
if (data_type == C_API_DTYPE_FLOAT32) {
const float* data_ptr = reinterpret_cast<const float*>(data);
if (col_ptr_type == C_API_DTYPE_INT32) {
const int32_t* ptr_col_ptr = reinterpret_cast<const int32_t*>(col_ptr);
int64_t start = ptr_col_ptr[col_idx];
int64_t end = ptr_col_ptr[col_idx + 1];
return [=] (int offset) {
int64_t i = static_cast<int64_t>(start + offset);
if (i >= end) {
return std::make_pair(-1, 0.0);
}
int idx = static_cast<int>(indices[i]);
double val = static_cast<double>(data_ptr[i]);
return std::make_pair(idx, val);
};
} else if (col_ptr_type == C_API_DTYPE_INT64) {
const int64_t* ptr_col_ptr = reinterpret_cast<const int64_t*>(col_ptr);
int64_t start = ptr_col_ptr[col_idx];
int64_t end = ptr_col_ptr[col_idx + 1];
return [=] (int offset) {
int64_t i = static_cast<int64_t>(start + offset);
if (i >= end) {
return std::make_pair(-1, 0.0);
}
int idx = static_cast<int>(indices[i]);
double val = static_cast<double>(data_ptr[i]);
return std::make_pair(idx, val);
};
}
} else if (data_type == C_API_DTYPE_FLOAT64) {
const double* data_ptr = reinterpret_cast<const double*>(data);
if (col_ptr_type == C_API_DTYPE_INT32) {
const int32_t* ptr_col_ptr = reinterpret_cast<const int32_t*>(col_ptr);
int64_t start = ptr_col_ptr[col_idx];
int64_t end = ptr_col_ptr[col_idx + 1];
return [=] (int offset) {
int64_t i = static_cast<int64_t>(start + offset);
if (i >= end) {
return std::make_pair(-1, 0.0);
}
int idx = static_cast<int>(indices[i]);
double val = static_cast<double>(data_ptr[i]);
return std::make_pair(idx, val);
};
} else if (col_ptr_type == C_API_DTYPE_INT64) {
const int64_t* ptr_col_ptr = reinterpret_cast<const int64_t*>(col_ptr);
int64_t start = ptr_col_ptr[col_idx];
int64_t end = ptr_col_ptr[col_idx + 1];
return [=] (int offset) {
int64_t i = static_cast<int64_t>(start + offset);
if (i >= end) {
return std::make_pair(-1, 0.0);
}
int idx = static_cast<int>(indices[i]);
double val = static_cast<double>(data_ptr[i]);
return std::make_pair(idx, val);
};
}
}
Log::Fatal("Unknown data type in CSC matrix");
return nullptr;
}
CSC_RowIterator::CSC_RowIterator(const void* col_ptr, int col_ptr_type, const int32_t* indices,
const void* data, int data_type, int64_t ncol_ptr, int64_t nelem, int col_idx) {
iter_fun_ = IterateFunctionFromCSC(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, col_idx);
}
double CSC_RowIterator::Get(int idx) {
while (idx > cur_idx_ && !is_end_) {
auto ret = iter_fun_(nonzero_idx_);
if (ret.first < 0) {
is_end_ = true;
break;
}
cur_idx_ = ret.first;
cur_val_ = ret.second;
++nonzero_idx_;
}
if (idx == cur_idx_) {
return cur_val_;
} else {
return 0.0f;
}
}
std::pair<int, double> CSC_RowIterator::NextNonZero() {
if (!is_end_) {
auto ret = iter_fun_(nonzero_idx_);
++nonzero_idx_;
if (ret.first < 0) {
is_end_ = true;
}
return ret;
} else {
return std::make_pair(-1, 0.0);
}
}
| 1 | 22,664 | Follow alphabetical order. | microsoft-LightGBM | cpp |
@@ -0,0 +1,16 @@
+// <copyright file="IAutomaticTracer.cs" company="Datadog">
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
+// </copyright>
+
+namespace Datadog.Trace.ClrProfiler
+{
+ internal interface IAutomaticTracer : ICommonTracer
+ {
+ object GetDistributedTrace();
+
+ void SetDistributedTrace(object trace);
+
+ void Register(object manualTracer);
+ }
+} | 1 | 1 | 23,854 | As far as I can tell, these always get and set the context as an `IReadOnlyDictionary<string, string>`. Can we use that here instead of `object` and get rid of the `as IReadOnlyDictionary<string, string>`? | DataDog-dd-trace-dotnet | .cs |
|
@@ -206,7 +206,7 @@ class CompletionItemDelegate(QStyledItemDelegate):
else:
self._doc.setPlainText(self._opt.text)
else:
- self._doc.setHtml('<b>{}</b>'.format(html.escape(self._opt.text)))
+ self._doc.setHtml('{}'.format(html.escape(self._opt.text)))
def _draw_focus_rect(self):
"""Draw the focus rectangle of an ItemViewItem.""" | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Completion item delegate for CompletionView.
We use this to be able to highlight parts of the text.
"""
import re
import html
from PyQt5.QtWidgets import QStyle, QStyleOptionViewItem, QStyledItemDelegate
from PyQt5.QtCore import QRectF, QSize, Qt
from PyQt5.QtGui import (QIcon, QPalette, QTextDocument, QTextOption,
QAbstractTextDocumentLayout)
from qutebrowser.config import config, configexc, style
from qutebrowser.utils import qtutils
class CompletionItemDelegate(QStyledItemDelegate):
"""Delegate used by CompletionView to draw individual items.
Mainly a cleaned up port of Qt's way to draw a TreeView item, except it
uses a QTextDocument to draw the text and add marking.
Original implementation:
qt/src/gui/styles/qcommonstyle.cpp:drawControl:2153
Attributes:
_opt: The QStyleOptionViewItem which is used.
_style: The style to be used.
_painter: The QPainter to be used.
_doc: The QTextDocument to be used.
"""
# FIXME this is horribly slow when resizing.
# We should probably cache something in _get_textdoc or so, but as soon as
# we implement eliding that cache probably isn't worth much anymore...
# https://github.com/The-Compiler/qutebrowser/issues/121
def __init__(self, parent=None):
self._painter = None
self._opt = None
self._doc = None
self._style = None
super().__init__(parent)
def _draw_background(self):
"""Draw the background of an ItemViewItem."""
self._style.drawPrimitive(self._style.PE_PanelItemViewItem, self._opt,
self._painter, self._opt.widget)
def _draw_icon(self):
"""Draw the icon of an ItemViewItem."""
icon_rect = self._style.subElementRect(
self._style.SE_ItemViewItemDecoration, self._opt, self._opt.widget)
if not icon_rect.isValid():
# The rect seems to be wrong in all kind of ways if no icon should
# be displayed.
return
mode = QIcon.Normal
if not self._opt.state & QStyle.State_Enabled:
mode = QIcon.Disabled
elif self._opt.state & QStyle.State_Selected:
mode = QIcon.Selected
state = QIcon.On if self._opt.state & QStyle.State_Open else QIcon.Off
self._opt.icon.paint(self._painter, icon_rect,
self._opt.decorationAlignment, mode, state)
def _draw_text(self, index):
"""Draw the text of an ItemViewItem.
This is the main part where we differ from the original implementation
in Qt: We use a QTextDocument to draw text.
Args:
index: The QModelIndex of the item to draw.
"""
if not self._opt.text:
return
text_rect_ = self._style.subElementRect(
self._style.SE_ItemViewItemText, self._opt, self._opt.widget)
qtutils.ensure_valid(text_rect_)
margin = self._style.pixelMetric(QStyle.PM_FocusFrameHMargin,
self._opt, self._opt.widget) + 1
# remove width padding
text_rect = text_rect_.adjusted(margin, 0, -margin, 0)
qtutils.ensure_valid(text_rect)
# move text upwards a bit
if index.parent().isValid():
text_rect.adjust(0, -1, 0, -1)
else:
text_rect.adjust(0, -2, 0, -2)
self._painter.save()
state = self._opt.state
if state & QStyle.State_Enabled and state & QStyle.State_Active:
cg = QPalette.Normal
elif state & QStyle.State_Enabled:
cg = QPalette.Inactive
else:
cg = QPalette.Disabled
if state & QStyle.State_Selected:
self._painter.setPen(self._opt.palette.color(
cg, QPalette.HighlightedText))
# This is a dirty fix for the text jumping by one pixel for
# whatever reason.
text_rect.adjust(0, -1, 0, 0)
else:
self._painter.setPen(self._opt.palette.color(cg, QPalette.Text))
if state & QStyle.State_Editing:
self._painter.setPen(self._opt.palette.color(cg, QPalette.Text))
self._painter.drawRect(text_rect_.adjusted(0, 0, -1, -1))
self._painter.translate(text_rect.left(), text_rect.top())
self._get_textdoc(index)
self._draw_textdoc(text_rect)
self._painter.restore()
def _draw_textdoc(self, rect):
"""Draw the QTextDocument of an item.
Args:
rect: The QRect to clip the drawing to.
"""
# We can't use drawContents because then the color would be ignored.
clip = QRectF(0, 0, rect.width(), rect.height())
self._painter.save()
if self._opt.state & QStyle.State_Selected:
option = 'completion.item.selected.fg'
elif not self._opt.state & QStyle.State_Enabled:
option = 'completion.category.fg'
else:
option = 'completion.fg'
try:
self._painter.setPen(config.get('colors', option))
except configexc.NoOptionError:
self._painter.setPen(config.get('colors', 'completion.fg'))
ctx = QAbstractTextDocumentLayout.PaintContext()
ctx.palette.setColor(QPalette.Text, self._painter.pen().color())
if clip.isValid():
self._painter.setClipRect(clip)
ctx.clip = clip
self._doc.documentLayout().draw(self._painter, ctx)
self._painter.restore()
def _get_textdoc(self, index):
"""Create the QTextDocument of an item.
Args:
index: The QModelIndex of the item to draw.
"""
# FIXME we probably should do eliding here. See
# qcommonstyle.cpp:viewItemDrawText
# https://github.com/The-Compiler/qutebrowser/issues/118
text_option = QTextOption()
if self._opt.features & QStyleOptionViewItem.WrapText:
text_option.setWrapMode(QTextOption.WordWrap)
else:
text_option.setWrapMode(QTextOption.ManualWrap)
text_option.setTextDirection(self._opt.direction)
text_option.setAlignment(QStyle.visualAlignment(
self._opt.direction, self._opt.displayAlignment))
if self._doc is not None:
self._doc.deleteLater()
self._doc = QTextDocument(self)
self._doc.setDefaultFont(self._opt.font)
self._doc.setDefaultTextOption(text_option)
self._doc.setDefaultStyleSheet(style.get_stylesheet("""
.highlight {
color: {{ color['completion.match.fg'] }};
}
"""))
self._doc.setDocumentMargin(2)
if index.parent().isValid():
pattern = index.model().pattern
columns_to_filter = index.model().srcmodel.columns_to_filter
if index.column() in columns_to_filter and pattern:
repl = r'<span class="highlight">\g<0></span>'
text = re.sub(re.escape(pattern).replace(r'\ ', r'|'),
repl, self._opt.text, flags=re.IGNORECASE)
self._doc.setHtml(text)
else:
self._doc.setPlainText(self._opt.text)
else:
self._doc.setHtml('<b>{}</b>'.format(html.escape(self._opt.text)))
def _draw_focus_rect(self):
"""Draw the focus rectangle of an ItemViewItem."""
state = self._opt.state
if not state & QStyle.State_HasFocus:
return
o = self._opt
o.rect = self._style.subElementRect(
self._style.SE_ItemViewItemFocusRect, self._opt, self._opt.widget)
o.state |= QStyle.State_KeyboardFocusChange | QStyle.State_Item
qtutils.ensure_valid(o.rect)
if state & QStyle.State_Enabled:
cg = QPalette.Normal
else:
cg = QPalette.Disabled
if state & QStyle.State_Selected:
role = QPalette.Highlight
else:
role = QPalette.Window
o.backgroundColor = self._opt.palette.color(cg, role)
self._style.drawPrimitive(QStyle.PE_FrameFocusRect, o, self._painter,
self._opt.widget)
def sizeHint(self, option, index):
"""Override sizeHint of QStyledItemDelegate.
Return the cell size based on the QTextDocument size, but might not
work correctly yet.
Args:
option: const QStyleOptionViewItem & option
index: const QModelIndex & index
Return:
A QSize with the recommended size.
"""
value = index.data(Qt.SizeHintRole)
if value is not None:
return value
self._opt = QStyleOptionViewItem(option)
self.initStyleOption(self._opt, index)
self._style = self._opt.widget.style()
self._get_textdoc(index)
docsize = self._doc.size().toSize()
size = self._style.sizeFromContents(QStyle.CT_ItemViewItem, self._opt,
docsize, self._opt.widget)
qtutils.ensure_valid(size)
return size + QSize(10, 3)
def paint(self, painter, option, index):
"""Override the QStyledItemDelegate paint function.
Args:
painter: QPainter * painter
option: const QStyleOptionViewItem & option
index: const QModelIndex & index
"""
self._painter = painter
self._painter.save()
self._opt = QStyleOptionViewItem(option)
self.initStyleOption(self._opt, index)
self._style = self._opt.widget.style()
self._draw_background()
self._draw_icon()
self._draw_text(index)
self._draw_focus_rect()
self._painter.restore()
| 1 | 15,466 | As you only have `{}` as the format string (without anything else in it), this is the same as doing `self._doc.setHtml(html.escape(self._opt.text))` | qutebrowser-qutebrowser | py |
@@ -161,6 +161,8 @@ public class Constants {
// enable Quartz Scheduler if true.
public static final String ENABLE_QUARTZ= "azkaban.server.schedule.enable_quartz";
+
+ public static final String CUSTOM_CREDENTIAL_NAME = "azkaban.security.credential";
}
public static class FlowProperties { | 1 | /*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package azkaban;
/**
* Constants used in configuration files or shared among classes.
*
* <p>Conventions:
*
* <p>Internal constants to be put in the {@link Constants} class
*
* <p>Configuration keys to be put in the {@link ConfigurationKeys} class
*
* <p>Flow level properties keys to be put in the {@link FlowProperties} class
*
* <p>Job level Properties keys to be put in the {@link JobProperties} class
*/
public class Constants {
// Azkaban Flow Versions
public static final String AZKABAN_FLOW_VERSION_2_0 = "2.0";
// Flow 2.0 file suffix
public static final String PROJECT_FILE_SUFFIX = ".project";
public static final String FLOW_FILE_SUFFIX = ".flow";
// Names and paths of various file names to configure Azkaban
public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties";
public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties";
public static final String DEFAULT_CONF_PATH = "conf";
public static final String AZKABAN_EXECUTOR_PORT_FILENAME = "executor.port";
public static final String AZKABAN_EXECUTOR_PORT_FILE = "executor.portfile";
public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app";
// Internal username used to perform SLA action
public static final String AZKABAN_SLA_CHECKER_USERNAME = "azkaban_sla";
// Memory check retry interval when OOM in ms
public static final long MEMORY_CHECK_INTERVAL_MS = 1000 * 60 * 1;
// Max number of memory check retry
public static final int MEMORY_CHECK_RETRY_LIMIT = 720;
public static final int DEFAULT_PORT_NUMBER = 8081;
public static final int DEFAULT_SSL_PORT_NUMBER = 8443;
public static final int DEFAULT_JETTY_MAX_THREAD_COUNT = 20;
// One Schedule's default End Time: 01/01/2050, 00:00:00, UTC
public static final long DEFAULT_SCHEDULE_END_EPOCH_TIME = 2524608000000L;
public static class ConfigurationKeys {
// These properties are configurable through azkaban.properties
public static final String AZKABAN_PID_FILENAME = "azkaban.pid.filename";
// Defines a list of external links, each referred to as a topic
public static final String AZKABAN_SERVER_EXTERNAL_TOPICS = "azkaban.server.external.topics";
// External URL template of a given topic, specified in the list defined above
public static final String AZKABAN_SERVER_EXTERNAL_TOPIC_URL = "azkaban.server.external.${topic}.url";
// Designates one of the external link topics to correspond to an execution analyzer
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC = "azkaban.server.external.analyzer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_LABEL = "azkaban.server.external.analyzer.label";
// Designates one of the external link topics to correspond to a job log viewer
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_TOPIC = "azkaban.server.external.logviewer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_LABEL = "azkaban.server.external.logviewer.label";
// Configures the Kafka appender for logging user jobs, specified for the exec server
public static final String AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST = "azkaban.server.logging.kafka.brokerList";
public static final String AZKABAN_SERVER_LOGGING_KAFKA_TOPIC = "azkaban.server.logging.kafka.topic";
// Represent the class name of azkaban metrics reporter.
public static final String CUSTOM_METRICS_REPORTER_CLASS_NAME = "azkaban.metrics.reporter.name";
// Represent the metrics server URL.
public static final String METRICS_SERVER_URL = "azkaban.metrics.server.url";
public static final String IS_METRICS_ENABLED = "azkaban.is.metrics.enabled";
// User facing web server configurations used to construct the user facing server URLs. They are useful when there is a reverse proxy between Azkaban web servers and users.
// enduser -> myazkabanhost:443 -> proxy -> localhost:8081
// when this parameters set then these parameters are used to generate email links.
// if these parameters are not set then jetty.hostname, and jetty.port(if ssl configured jetty.ssl.port) are used.
public static final String AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME = "azkaban.webserver.external_hostname";
public static final String AZKABAN_WEBSERVER_EXTERNAL_SSL_PORT = "azkaban.webserver.external_ssl_port";
public static final String AZKABAN_WEBSERVER_EXTERNAL_PORT = "azkaban.webserver.external_port";
// Hostname for the host, if not specified, canonical hostname will be used
public static final String AZKABAN_SERVER_HOST_NAME = "azkaban.server.hostname";
// List of users we prevent azkaban from running flows as. (ie: root, azkaban)
public static final String BLACK_LISTED_USERS = "azkaban.server.blacklist.users";
// Path name of execute-as-user executable
public static final String AZKABAN_SERVER_NATIVE_LIB_FOLDER = "azkaban.native.lib";
// Name of *nix group associated with the process running Azkaban
public static final String AZKABAN_SERVER_GROUP_NAME = "azkaban.group.name";
// Legacy configs section, new configs should follow the naming convention of azkaban.server.<rest of the name> for server configs.
// The property is used for the web server to get the host name of the executor when running in SOLO mode.
public static final String EXECUTOR_HOST = "executor.host";
// Max flow running time in mins, server will kill flows running longer than this setting.
// if not set or <= 0, then there's no restriction on running time.
public static final String AZKABAN_MAX_FLOW_RUNNING_MINS = "azkaban.server.flow.max.running.minutes";
public static final String AZKABAN_STORAGE_TYPE = "azkaban.storage.type";
public static final String AZKABAN_STORAGE_LOCAL_BASEDIR = "azkaban.storage.local.basedir";
public static final String HADOOP_CONF_DIR_PATH = "hadoop.conf.dir.path";
public static final String AZKABAN_STORAGE_HDFS_ROOT_URI = "azkaban.storage.hdfs.root.uri";
public static final String AZKABAN_KERBEROS_PRINCIPAL = "azkaban.kerberos.principal";
public static final String AZKABAN_KEYTAB_PATH = "azkaban.keytab.path";
public static final String PROJECT_TEMP_DIR = "project.temp.dir";
// Event reporting properties
public static final String AZKABAN_EVENT_REPORTING_CLASS_PARAM =
"azkaban.event.reporting.class";
public static final String AZKABAN_EVENT_REPORTING_ENABLED = "azkaban.event.reporting.enabled";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_BROKERS =
"azkaban.event.reporting.kafka.brokers";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_TOPIC =
"azkaban.event.reporting.kafka.topic";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_SCHEMA_REGISTRY_URL =
"azkaban.event.reporting.kafka.schema.registry.url";
/*
* The max number of artifacts retained per project.
* Accepted Values:
* - 0 : Save all artifacts. No clean up is done on storage.
* - 1, 2, 3, ... (any +ve integer 'n') : Maintain 'n' latest versions in storage
*
* Note: Having an unacceptable value results in an exception and the service would REFUSE
* to start.
*
* Example:
* a) azkaban.storage.artifact.max.retention=all
* implies save all artifacts
* b) azkaban.storage.artifact.max.retention=3
* implies save latest 3 versions saved in storage.
**/
public static final String AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION = "azkaban.storage.artifact.max.retention";
// enable Quartz Scheduler if true.
public static final String ENABLE_QUARTZ= "azkaban.server.schedule.enable_quartz";
}
public static class FlowProperties {
// Basic properties of flows as set by the executor server
public static final String AZKABAN_FLOW_PROJECT_NAME = "azkaban.flow.projectname";
public static final String AZKABAN_FLOW_FLOW_ID = "azkaban.flow.flowid";
public static final String AZKABAN_FLOW_SUBMIT_USER = "azkaban.flow.submituser";
public static final String AZKABAN_FLOW_EXEC_ID = "azkaban.flow.execid";
public static final String AZKABAN_FLOW_PROJECT_VERSION = "azkaban.flow.projectversion";
}
public static class JobProperties {
// Job property that enables/disables using Kafka logging of user job logs
public static final String AZKABAN_JOB_LOGGING_KAFKA_ENABLE = "azkaban.job.logging.kafka.enable";
/*
* this parameter is used to replace EXTRA_HCAT_LOCATION that could fail when one of the uris is not available.
* EXTRA_HCAT_CLUSTERS has the following format:
* other_hcat_clusters = "thrift://hcat1:port,thrift://hcat2:port;thrift://hcat3:port,thrift://hcat4:port"
* Each string in the parenthesis is regarded as a "cluster", and we will get a delegation token from each cluster.
* The uris(hcat servers) in a "cluster" ensures HA is provided.
**/
public static final String EXTRA_HCAT_CLUSTERS = "azkaban.job.hive.other_hcat_clusters";
/*
* the settings to be defined by user indicating if there are hcat locations other than the
* default one the system should pre-fetch hcat token from. Note: Multiple thrift uris are
* supported, use comma to separate the values, values are case insensitive.
**/
// Use EXTRA_HCAT_CLUSTERS instead
@Deprecated
public static final String EXTRA_HCAT_LOCATION = "other_hcat_location";
// Job properties that indicate maximum memory size
public static final String JOB_MAX_XMS = "job.max.Xms";
public static final String MAX_XMS_DEFAULT = "1G";
public static final String JOB_MAX_XMX = "job.max.Xmx";
public static final String MAX_XMX_DEFAULT = "2G";
}
public static class JobCallbackProperties {
public static final String JOBCALLBACK_CONNECTION_REQUEST_TIMEOUT = "jobcallback.connection.request.timeout";
public static final String JOBCALLBACK_CONNECTION_TIMEOUT = "jobcallback.connection.timeout";
public static final String JOBCALLBACK_SOCKET_TIMEOUT = "jobcallback.socket.timeout";
public static final String JOBCALLBACK_RESPONSE_WAIT_TIMEOUT = "jobcallback.response.wait.timeout";
public static final String JOBCALLBACK_THREAD_POOL_SIZE = "jobcallback.thread.pool.size";
}
}
| 1 | 15,268 | Where would this be configured? In azkaban.properties? | azkaban-azkaban | java |
@@ -2101,6 +2101,12 @@ Collection.prototype.findOneAndUpdate = function(filter, update, options, callba
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
+ var err = checkForAtomicOperators(update);
+ if (err) {
+ if (typeof callback === 'function') return callback(err);
+ return this.s.promiseLibrary.reject(err);
+ }
+
// Basic validation
if (filter == null || typeof filter !== 'object')
throw toError('filter parameter must be an object'); | 1 | 'use strict';
const checkCollectionName = require('./utils').checkCollectionName;
const ObjectID = require('mongodb-core').BSON.ObjectID;
const Long = require('mongodb-core').BSON.Long;
const Code = require('mongodb-core').BSON.Code;
const f = require('util').format;
const AggregationCursor = require('./aggregation_cursor');
const MongoError = require('mongodb-core').MongoError;
const shallowClone = require('./utils').shallowClone;
const isObject = require('./utils').isObject;
const toError = require('./utils').toError;
const normalizeHintField = require('./utils').normalizeHintField;
const handleCallback = require('./utils').handleCallback;
const decorateCommand = require('./utils').decorateCommand;
const formattedOrderClause = require('./utils').formattedOrderClause;
const ReadPreference = require('mongodb-core').ReadPreference;
const CommandCursor = require('./command_cursor');
const unordered = require('./bulk/unordered');
const ordered = require('./bulk/ordered');
const ChangeStream = require('./change_stream');
const executeOperation = require('./utils').executeOperation;
const applyWriteConcern = require('./utils').applyWriteConcern;
/**
* @fileOverview The **Collection** class is an internal class that embodies a MongoDB collection
* allowing for insert/update/remove/find and other command operation on that MongoDB collection.
*
* **COLLECTION Cannot directly be instantiated**
* @example
* const MongoClient = require('mongodb').MongoClient;
* const test = require('assert');
* // Connection url
* const url = 'mongodb://localhost:27017';
* // Database Name
* const dbName = 'test';
* // Connect using MongoClient
* MongoClient.connect(url, function(err, client) {
* // Create a collection we want to drop later
* const col = client.db(dbName).collection('createIndexExample1');
* // Show that duplicate records got dropped
* col.find({}).toArray(function(err, items) {
* test.equal(null, err);
* test.equal(4, items.length);
* client.close();
* });
* });
*/
var mergeKeys = ['readPreference', 'ignoreUndefined'];
/**
* Create a new Collection instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @property {string} collectionName Get the collection name.
* @property {string} namespace Get the full collection namespace.
* @property {object} writeConcern The current write concern values.
* @property {object} readConcern The current read concern values.
* @property {object} hint Get current index hint for collection.
* @return {Collection} a Collection instance.
*/
var Collection = function(db, topology, dbName, name, pkFactory, options) {
checkCollectionName(name);
// Unpack variables
var internalHint = null;
var slaveOk = options == null || options.slaveOk == null ? db.slaveOk : options.slaveOk;
var serializeFunctions =
options == null || options.serializeFunctions == null
? db.s.options.serializeFunctions
: options.serializeFunctions;
var raw = options == null || options.raw == null ? db.s.options.raw : options.raw;
var promoteLongs =
options == null || options.promoteLongs == null
? db.s.options.promoteLongs
: options.promoteLongs;
var promoteValues =
options == null || options.promoteValues == null
? db.s.options.promoteValues
: options.promoteValues;
var promoteBuffers =
options == null || options.promoteBuffers == null
? db.s.options.promoteBuffers
: options.promoteBuffers;
var readPreference = null;
var collectionHint = null;
var namespace = f('%s.%s', dbName, name);
// Get the promiseLibrary
var promiseLibrary = options.promiseLibrary || Promise;
// Assign the right collection level readPreference
if (options && options.readPreference) {
readPreference = options.readPreference;
} else if (db.options.readPreference) {
readPreference = db.options.readPreference;
}
// Set custom primary key factory if provided
pkFactory = pkFactory == null ? ObjectID : pkFactory;
// Internal state
this.s = {
// Set custom primary key factory if provided
pkFactory: pkFactory,
// Db
db: db,
// Topology
topology: topology,
// dbName
dbName: dbName,
// Options
options: options,
// Namespace
namespace: namespace,
// Read preference
readPreference: readPreference,
// SlaveOK
slaveOk: slaveOk,
// Serialize functions
serializeFunctions: serializeFunctions,
// Raw
raw: raw,
// promoteLongs
promoteLongs: promoteLongs,
// promoteValues
promoteValues: promoteValues,
// promoteBuffers
promoteBuffers: promoteBuffers,
// internalHint
internalHint: internalHint,
// collectionHint
collectionHint: collectionHint,
// Name
name: name,
// Promise library
promiseLibrary: promiseLibrary,
// Read Concern
readConcern: options.readConcern
};
};
Object.defineProperty(Collection.prototype, 'dbName', {
enumerable: true,
get: function() {
return this.s.dbName;
}
});
Object.defineProperty(Collection.prototype, 'collectionName', {
enumerable: true,
get: function() {
return this.s.name;
}
});
Object.defineProperty(Collection.prototype, 'namespace', {
enumerable: true,
get: function() {
return this.s.namespace;
}
});
Object.defineProperty(Collection.prototype, 'readConcern', {
enumerable: true,
get: function() {
return this.s.readConcern || { level: 'local' };
}
});
Object.defineProperty(Collection.prototype, 'writeConcern', {
enumerable: true,
get: function() {
var ops = {};
if (this.s.options.w != null) ops.w = this.s.options.w;
if (this.s.options.j != null) ops.j = this.s.options.j;
if (this.s.options.fsync != null) ops.fsync = this.s.options.fsync;
if (this.s.options.wtimeout != null) ops.wtimeout = this.s.options.wtimeout;
return ops;
}
});
/**
* @ignore
*/
Object.defineProperty(Collection.prototype, 'hint', {
enumerable: true,
get: function() {
return this.s.collectionHint;
},
set: function(v) {
this.s.collectionHint = normalizeHintField(v);
}
});
const DEPRECATED_FIND_OPTIONS = ['maxScan', 'snapshot'];
/**
* Creates a cursor for a query that can be used to iterate over results from MongoDB
* @method
* @param {object} [query={}] The cursor query object.
* @param {object} [options=null] Optional settings.
* @param {number} [options.limit=0] Sets the limit of documents returned in the query.
* @param {(array|object)} [options.sort=null] Set to sort the documents coming back from the query. Array of indexes, [['a', 1]] etc.
* @param {object} [options.projection=null] The fields to return in the query. Object of fields to include or exclude (not both), {'a':1}
* @param {object} [options.fields=null] **Deprecated** Use `options.projection` instead
* @param {number} [options.skip=0] Set to skip N documents ahead in your query (useful for pagination).
* @param {Object} [options.hint=null] Tell the query to use specific indexes in the query. Object of indexes to use, {'_id':1}
* @param {boolean} [options.explain=false] Explain the query instead of returning the data.
* @param {boolean} [options.snapshot=false] DEPRECATED: Snapshot query.
* @param {boolean} [options.timeout=false] Specify if the cursor can timeout.
* @param {boolean} [options.tailable=false] Specify if the cursor is tailable.
* @param {number} [options.batchSize=0] Set the batchSize for the getMoreCommand when iterating over the query results.
* @param {boolean} [options.returnKey=false] Only return the index key.
* @param {number} [options.maxScan=null] DEPRECATED: Limit the number of items to scan.
* @param {number} [options.min=null] Set index bounds.
* @param {number} [options.max=null] Set index bounds.
* @param {boolean} [options.showDiskLoc=false] Show disk location of results.
* @param {string} [options.comment=null] You can put a $comment field on a query to make looking in the profiler logs simpler.
* @param {boolean} [options.raw=false] Return document results as raw BSON buffers.
* @param {boolean} [options.promoteLongs=true] Promotes Long values to number if they fit inside the 53 bits resolution.
* @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types.
* @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers.
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {boolean} [options.partial=false] Specify if the cursor should return partial results when querying against a sharded system
* @param {number} [options.maxTimeMS=null] Number of miliseconds to wait before aborting the query.
* @param {object} [options.collation=null] Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).
* @param {ClientSession} [options.session] optional session to use for this operation
* @throws {MongoError}
* @return {Cursor}
*/
Collection.prototype.find = function(query, options, callback) {
let selector = query;
// figuring out arguments
if (typeof callback !== 'function') {
if (typeof options === 'function') {
callback = options;
options = undefined;
} else if (options == null) {
callback = typeof selector === 'function' ? selector : undefined;
selector = typeof selector === 'object' ? selector : undefined;
}
}
// Ensure selector is not null
selector = selector == null ? {} : selector;
// Validate correctness off the selector
var object = selector;
if (Buffer.isBuffer(object)) {
var object_size = object[0] | (object[1] << 8) | (object[2] << 16) | (object[3] << 24);
if (object_size !== object.length) {
var error = new Error(
'query selector raw message size does not match message header size [' +
object.length +
'] != [' +
object_size +
']'
);
error.name = 'MongoError';
throw error;
}
}
// Check special case where we are using an objectId
if (selector != null && selector._bsontype === 'ObjectID') {
selector = { _id: selector };
}
if (!options) options = {};
let projection = options.projection || options.fields;
if (projection && !Buffer.isBuffer(projection) && Array.isArray(projection)) {
projection = projection.length
? projection.reduce((result, field) => {
result[field] = 1;
return result;
}, {})
: { _id: 1 };
}
var newOptions = {};
// Make a shallow copy of the collection options
for (var key in this.s.options) {
if (mergeKeys.indexOf(key) !== -1) {
newOptions[key] = this.s.options[key];
}
}
// Make a shallow copy of options
for (var optKey in options) {
newOptions[optKey] = options[optKey];
}
// Unpack options
newOptions.skip = options.skip ? options.skip : 0;
newOptions.limit = options.limit ? options.limit : 0;
newOptions.raw = typeof options.raw === 'boolean' ? options.raw : this.s.raw;
newOptions.hint = options.hint != null ? normalizeHintField(options.hint) : this.s.collectionHint;
newOptions.timeout = typeof options.timeout === 'undefined' ? undefined : options.timeout;
// // If we have overridden slaveOk otherwise use the default db setting
newOptions.slaveOk = options.slaveOk != null ? options.slaveOk : this.s.db.slaveOk;
// Add read preference if needed
newOptions = getReadPreference(this, newOptions, this.s.db);
// Set slave ok to true if read preference different from primary
if (
newOptions.readPreference != null &&
(newOptions.readPreference !== 'primary' || newOptions.readPreference.mode !== 'primary')
) {
newOptions.slaveOk = true;
}
// Ensure the query is an object
if (selector != null && typeof selector !== 'object') {
throw MongoError.create({ message: 'query selector must be an object', driver: true });
}
// Build the find command
var findCommand = {
find: this.s.namespace,
limit: newOptions.limit,
skip: newOptions.skip,
query: selector
};
// Ensure we use the right await data option
if (typeof newOptions.awaitdata === 'boolean') {
newOptions.awaitData = newOptions.awaitdata;
}
// Translate to new command option noCursorTimeout
if (typeof newOptions.timeout === 'boolean') newOptions.noCursorTimeout = newOptions.timeout;
// Merge in options to command
for (var name in newOptions) {
if (newOptions[name] != null && name !== 'session') {
findCommand[name] = newOptions[name];
}
}
DEPRECATED_FIND_OPTIONS.forEach(deprecatedOption => {
if (findCommand[deprecatedOption]) {
console.warn(
`Find option ${deprecatedOption} is deprecated, and will be removed in a later version`
);
}
});
if (projection) findCommand.fields = projection;
// Add db object to the new options
newOptions.db = this.s.db;
// Add the promise library
newOptions.promiseLibrary = this.s.promiseLibrary;
// Set raw if available at collection level
if (newOptions.raw == null && typeof this.s.raw === 'boolean') newOptions.raw = this.s.raw;
// Set promoteLongs if available at collection level
if (newOptions.promoteLongs == null && typeof this.s.promoteLongs === 'boolean')
newOptions.promoteLongs = this.s.promoteLongs;
if (newOptions.promoteValues == null && typeof this.s.promoteValues === 'boolean')
newOptions.promoteValues = this.s.promoteValues;
if (newOptions.promoteBuffers == null && typeof this.s.promoteBuffers === 'boolean')
newOptions.promoteBuffers = this.s.promoteBuffers;
// Sort options
if (findCommand.sort) {
findCommand.sort = formattedOrderClause(findCommand.sort);
}
// Set the readConcern
decorateWithReadConcern(findCommand, this, options);
// Decorate find command with collation options
decorateWithCollation(findCommand, this, options);
const cursor = this.s.topology.cursor(this.s.namespace, findCommand, newOptions);
// automatically call map on the cursor if the map option is set
if (typeof this.s.options.map === 'function') {
cursor.map(this.s.options.map);
}
return typeof callback === 'function' ? handleCallback(callback, null, cursor) : cursor;
};
/**
* Inserts a single document into MongoDB. If documents passed in do not contain the **_id** field,
* one will be added to each of the documents missing it by the driver, mutating the document. This behavior
* can be overridden by setting the **forceServerObjectId** flag.
*
* @method
* @param {object} doc Document to insert.
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.serializeFunctions=false] Serialize functions on any object.
* @param {boolean} [options.forceServerObjectId=false] Force server to assign _id values instead of driver.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~insertOneWriteOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.insertOne = function(doc, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = shallowClone(options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, insertOne, [this, doc, options, callback]);
};
var insertOne = function(self, doc, options, callback) {
if (Array.isArray(doc)) {
return callback(
MongoError.create({ message: 'doc parameter must be an object', driver: true })
);
}
insertDocuments(self, [doc], options, function(err, r) {
if (callback == null) return;
if (err && callback) return callback(err);
// Workaround for pre 2.6 servers
if (r == null) return callback(null, { result: { ok: 1 } });
// Add values to top level to ensure crud spec compatibility
r.insertedCount = r.result.n;
r.insertedId = doc._id;
if (callback) callback(null, r);
});
};
var mapInserManyResults = function(docs, r) {
var finalResult = {
result: { ok: 1, n: r.insertedCount },
ops: docs,
insertedCount: r.insertedCount,
insertedIds: r.insertedIds
};
if (r.getLastOp()) {
finalResult.result.opTime = r.getLastOp();
}
return finalResult;
};
/**
* Inserts an array of documents into MongoDB. If documents passed in do not contain the **_id** field,
* one will be added to each of the documents missing it by the driver, mutating the document. This behavior
* can be overridden by setting the **forceServerObjectId** flag.
*
* @method
* @param {object[]} docs Documents to insert.
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.serializeFunctions=false] Serialize functions on any object.
* @param {boolean} [options.forceServerObjectId=false] Force server to assign _id values instead of driver.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {boolean} [options.ordered=true] If true, when an insert fails, don't execute the remaining writes. If false, continue with remaining inserts when one fails.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~insertWriteOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.insertMany = function(docs, options, callback) {
var self = this;
if (typeof options === 'function') (callback = options), (options = {});
options = options ? shallowClone(options) : { ordered: true };
if (!Array.isArray(docs) && typeof callback === 'function') {
return callback(
MongoError.create({ message: 'docs parameter must be an array of documents', driver: true })
);
} else if (!Array.isArray(docs)) {
return new this.s.promiseLibrary(function(resolve, reject) {
reject(
MongoError.create({ message: 'docs parameter must be an array of documents', driver: true })
);
});
}
// If keep going set unordered
options['serializeFunctions'] = options['serializeFunctions'] || self.s.serializeFunctions;
docs = prepareDocs(this, docs, options);
// Generate the bulk write operations
var operations = [
{
insertMany: docs
}
];
return executeOperation(this.s.topology, bulkWrite, [this, operations, options, callback], {
resultMutator: result => mapInserManyResults(docs, result)
});
};
/**
* @typedef {Object} Collection~BulkWriteOpResult
* @property {number} insertedCount Number of documents inserted.
* @property {number} matchedCount Number of documents matched for update.
* @property {number} modifiedCount Number of documents modified.
* @property {number} deletedCount Number of documents deleted.
* @property {number} upsertedCount Number of documents upserted.
* @property {object} insertedIds Inserted document generated Id's, hash key is the index of the originating operation
* @property {object} upsertedIds Upserted document generated Id's, hash key is the index of the originating operation
* @property {object} result The command result object.
*/
/**
* The callback format for inserts
* @callback Collection~bulkWriteOpCallback
* @param {BulkWriteError} error An error instance representing the error during the execution.
* @param {Collection~BulkWriteOpResult} result The result object if the command was executed successfully.
*/
/**
* Perform a bulkWrite operation without a fluent API
*
* Legal operation types are
*
* { insertOne: { document: { a: 1 } } }
*
* { updateOne: { filter: {a:2}, update: {$set: {a:2}}, upsert:true } }
*
* { updateMany: { filter: {a:2}, update: {$set: {a:2}}, upsert:true } }
*
* { deleteOne: { filter: {c:1} } }
*
* { deleteMany: { filter: {c:1} } }
*
* { replaceOne: { filter: {c:3}, replacement: {c:4}, upsert:true}}
*
* If documents passed in do not contain the **_id** field,
* one will be added to each of the documents missing it by the driver, mutating the document. This behavior
* can be overridden by setting the **forceServerObjectId** flag.
*
* @method
* @param {object[]} operations Bulk operations to perform.
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.serializeFunctions=false] Serialize functions on any object.
* @param {boolean} [options.ordered=true] Execute write operation in ordered or unordered fashion.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~bulkWriteOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.bulkWrite = function(operations, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || { ordered: true };
if (!Array.isArray(operations)) {
throw MongoError.create({ message: 'operations must be an array of documents', driver: true });
}
return executeOperation(this.s.topology, bulkWrite, [this, operations, options, callback]);
};
var bulkWrite = function(self, operations, options, callback) {
// Add ignoreUndfined
if (self.s.options.ignoreUndefined) {
options = shallowClone(options);
options.ignoreUndefined = self.s.options.ignoreUndefined;
}
// Create the bulk operation
var bulk =
options.ordered === true || options.ordered == null
? self.initializeOrderedBulkOp(options)
: self.initializeUnorderedBulkOp(options);
// Do we have a collation
var collation = false;
// for each op go through and add to the bulk
try {
for (var i = 0; i < operations.length; i++) {
// Get the operation type
var key = Object.keys(operations[i])[0];
// Check if we have a collation
if (operations[i][key].collation) {
collation = true;
}
// Pass to the raw bulk
bulk.raw(operations[i]);
}
} catch (err) {
return callback(err, null);
}
// Final options for write concern
var finalOptions = applyWriteConcern(
shallowClone(options),
{ db: self.s.db, collection: self },
options
);
var writeCon = finalOptions.writeConcern ? finalOptions.writeConcern : {};
var capabilities = self.s.topology.capabilities();
// Did the user pass in a collation, check if our write server supports it
if (collation && capabilities && !capabilities.commandsTakeCollation) {
return callback(new MongoError(f('server/primary/mongos does not support collation')));
}
// Execute the bulk
bulk.execute(writeCon, finalOptions, function(err, r) {
// We have connection level error
if (!r && err) {
return callback(err, null);
}
r.insertedCount = r.nInserted;
r.matchedCount = r.nMatched;
r.modifiedCount = r.nModified || 0;
r.deletedCount = r.nRemoved;
r.upsertedCount = r.getUpsertedIds().length;
r.upsertedIds = {};
r.insertedIds = {};
// Update the n
r.n = r.insertedCount;
// Inserted documents
var inserted = r.getInsertedIds();
// Map inserted ids
for (var i = 0; i < inserted.length; i++) {
r.insertedIds[inserted[i].index] = inserted[i]._id;
}
// Upserted documents
var upserted = r.getUpsertedIds();
// Map upserted ids
for (i = 0; i < upserted.length; i++) {
r.upsertedIds[upserted[i].index] = upserted[i]._id;
}
// Return the results
callback(null, r);
});
};
var insertDocuments = function(self, docs, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Ensure we are operating on an array op docs
docs = Array.isArray(docs) ? docs : [docs];
// Get the write concern options
var finalOptions = applyWriteConcern(
shallowClone(options),
{ db: self.s.db, collection: self },
options
);
// If keep going set unordered
if (finalOptions.keepGoing === true) finalOptions.ordered = false;
finalOptions['serializeFunctions'] = options['serializeFunctions'] || self.s.serializeFunctions;
docs = prepareDocs(self, docs, options);
// File inserts
self.s.topology.insert(self.s.namespace, docs, finalOptions, function(err, result) {
if (callback == null) return;
if (err) return handleCallback(callback, err);
if (result == null) return handleCallback(callback, null, null);
if (result.result.code) return handleCallback(callback, toError(result.result));
if (result.result.writeErrors)
return handleCallback(callback, toError(result.result.writeErrors[0]));
// Add docs to the list
result.ops = docs;
// Return the results
handleCallback(callback, null, result);
});
};
/**
* @typedef {Object} Collection~WriteOpResult
* @property {object[]} ops All the documents inserted using insertOne/insertMany/replaceOne. Documents contain the _id field if forceServerObjectId == false for insertOne/insertMany
* @property {object} connection The connection object used for the operation.
* @property {object} result The command result object.
*/
/**
* The callback format for inserts
* @callback Collection~writeOpCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Collection~WriteOpResult} result The result object if the command was executed successfully.
*/
/**
* @typedef {Object} Collection~insertWriteOpResult
* @property {Number} insertedCount The total amount of documents inserted.
* @property {object[]} ops All the documents inserted using insertOne/insertMany/replaceOne. Documents contain the _id field if forceServerObjectId == false for insertOne/insertMany
* @property {Object.<Number, ObjectId>} insertedIds Map of the index of the inserted document to the id of the inserted document.
* @property {object} connection The connection object used for the operation.
* @property {object} result The raw command result object returned from MongoDB (content might vary by server version).
* @property {Number} result.ok Is 1 if the command executed correctly.
* @property {Number} result.n The total count of documents inserted.
*/
/**
* @typedef {Object} Collection~insertOneWriteOpResult
* @property {Number} insertedCount The total amount of documents inserted.
* @property {object[]} ops All the documents inserted using insertOne/insertMany/replaceOne. Documents contain the _id field if forceServerObjectId == false for insertOne/insertMany
* @property {ObjectId} insertedId The driver generated ObjectId for the insert operation.
* @property {object} connection The connection object used for the operation.
* @property {object} result The raw command result object returned from MongoDB (content might vary by server version).
* @property {Number} result.ok Is 1 if the command executed correctly.
* @property {Number} result.n The total count of documents inserted.
*/
/**
* The callback format for inserts
* @callback Collection~insertWriteOpCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Collection~insertWriteOpResult} result The result object if the command was executed successfully.
*/
/**
* The callback format for inserts
* @callback Collection~insertOneWriteOpCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Collection~insertOneWriteOpResult} result The result object if the command was executed successfully.
*/
/**
* Inserts a single document or a an array of documents into MongoDB. If documents passed in do not contain the **_id** field,
* one will be added to each of the documents missing it by the driver, mutating the document. This behavior
* can be overridden by setting the **forceServerObjectId** flag.
*
* @method
* @param {(object|object[])} docs Documents to insert.
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.serializeFunctions=false] Serialize functions on any object.
* @param {boolean} [options.forceServerObjectId=false] Force server to assign _id values instead of driver.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~insertWriteOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
* @deprecated Use insertOne, insertMany or bulkWrite
*/
Collection.prototype.insert = function(docs, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || { ordered: false };
docs = !Array.isArray(docs) ? [docs] : docs;
if (options.keepGoing === true) {
options.ordered = false;
}
return this.insertMany(docs, options, callback);
};
/**
* @typedef {Object} Collection~updateWriteOpResult
* @property {Object} result The raw result returned from MongoDB, field will vary depending on server version.
* @property {Number} result.ok Is 1 if the command executed correctly.
* @property {Number} result.n The total count of documents scanned.
* @property {Number} result.nModified The total count of documents modified.
* @property {Object} connection The connection object used for the operation.
* @property {Number} matchedCount The number of documents that matched the filter.
* @property {Number} modifiedCount The number of documents that were modified.
* @property {Number} upsertedCount The number of documents upserted.
* @property {Object} upsertedId The upserted id.
* @property {ObjectId} upsertedId._id The upserted _id returned from the server.
*/
/**
* The callback format for inserts
* @callback Collection~updateWriteOpCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Collection~updateWriteOpResult} result The result object if the command was executed successfully.
*/
/**
* Update a single document on MongoDB
* @method
* @param {object} filter The Filter used to select the document to update
* @param {object} update The update operations to be applied to the document
* @param {object} [options=null] Optional settings.
* @param {boolean} [options.upsert=false] Update operation is an upsert.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {Array} [options.arrayFilters=null] optional list of array filters referenced in filtered positional operators
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~updateWriteOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.updateOne = function(filter, update, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
var err = checkForAtomicOperators(update);
if (err) {
if (typeof callback === 'function') return callback(err);
return this.s.promiseLibrary.reject(err);
}
options = shallowClone(options);
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = shallowClone(options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, updateOne, [this, filter, update, options, callback]);
};
var checkForAtomicOperators = function(update) {
var keys = Object.keys(update);
// same errors as the server would give for update doc lacking atomic operators
if (keys.length === 0) {
return toError('The update operation document must contain at least one atomic operator.');
}
if (keys[0][0] !== '$') {
return toError('the update operation document must contain atomic operators.');
}
};
var updateOne = function(self, filter, update, options, callback) {
// Set single document update
options.multi = false;
// Execute update
updateDocuments(self, filter, update, options, function(err, r) {
if (callback == null) return;
if (err && callback) return callback(err);
if (r == null) return callback(null, { result: { ok: 1 } });
r.modifiedCount = r.result.nModified != null ? r.result.nModified : r.result.n;
r.upsertedId =
Array.isArray(r.result.upserted) && r.result.upserted.length > 0
? r.result.upserted[0] // FIXME(major): should be `r.result.upserted[0]._id`
: null;
r.upsertedCount =
Array.isArray(r.result.upserted) && r.result.upserted.length ? r.result.upserted.length : 0;
r.matchedCount =
Array.isArray(r.result.upserted) && r.result.upserted.length > 0 ? 0 : r.result.n;
if (callback) callback(null, r);
});
};
/**
* Replace a document on MongoDB
* @method
* @param {object} filter The Filter used to select the document to update
* @param {object} doc The Document that replaces the matching document
* @param {object} [options=null] Optional settings.
* @param {boolean} [options.upsert=false] Update operation is an upsert.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~updateWriteOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.replaceOne = function(filter, doc, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = shallowClone(options);
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = shallowClone(options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
if (typeof this.s.options.unmap === 'function') {
doc = this.s.options.unmap(doc);
}
return executeOperation(this.s.topology, replaceOne, [this, filter, doc, options, callback]);
};
var replaceOne = function(self, filter, doc, options, callback) {
// Set single document update
options.multi = false;
// Execute update
updateDocuments(self, filter, doc, options, function(err, r) {
if (callback == null) return;
if (err && callback) return callback(err);
if (r == null) return callback(null, { result: { ok: 1 } });
r.modifiedCount = r.result.nModified != null ? r.result.nModified : r.result.n;
r.upsertedId =
Array.isArray(r.result.upserted) && r.result.upserted.length > 0
? r.result.upserted[0] // FIXME(major): should be `r.result.upserted[0]._id`
: null;
r.upsertedCount =
Array.isArray(r.result.upserted) && r.result.upserted.length ? r.result.upserted.length : 0;
r.matchedCount =
Array.isArray(r.result.upserted) && r.result.upserted.length > 0 ? 0 : r.result.n;
r.ops = [doc];
if (callback) callback(null, r);
});
};
/**
* Update multiple documents on MongoDB
* @method
* @param {object} filter The Filter used to select the documents to update
* @param {object} update The update operations to be applied to the document
* @param {object} [options=null] Optional settings.
* @param {boolean} [options.upsert=false] Update operation is an upsert.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {Array} [options.arrayFilters=null] optional list of array filters referenced in filtered positional operators
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~updateWriteOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.updateMany = function(filter, update, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
var err = checkForAtomicOperators(update);
if (err) {
if (typeof callback === 'function') return callback(err);
return this.s.promiseLibrary.reject(err);
}
options = shallowClone(options);
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = shallowClone(options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, updateMany, [this, filter, update, options, callback]);
};
var updateMany = function(self, filter, update, options, callback) {
// Set single document update
options.multi = true;
// Execute update
updateDocuments(self, filter, update, options, function(err, r) {
if (callback == null) return;
if (err && callback) return callback(err);
if (r == null) return callback(null, { result: { ok: 1 } });
r.modifiedCount = r.result.nModified != null ? r.result.nModified : r.result.n;
r.upsertedId =
Array.isArray(r.result.upserted) && r.result.upserted.length > 0
? r.result.upserted[0] // FIXME(major): should be `r.result.upserted[0]._id`
: null;
r.upsertedCount =
Array.isArray(r.result.upserted) && r.result.upserted.length ? r.result.upserted.length : 0;
r.matchedCount =
Array.isArray(r.result.upserted) && r.result.upserted.length > 0 ? 0 : r.result.n;
if (callback) callback(null, r);
});
};
var updateDocuments = function(self, selector, document, options, callback) {
if ('function' === typeof options) (callback = options), (options = null);
if (options == null) options = {};
if (!('function' === typeof callback)) callback = null;
// If we are not providing a selector or document throw
if (selector == null || typeof selector !== 'object')
return callback(toError('selector must be a valid JavaScript object'));
if (document == null || typeof document !== 'object')
return callback(toError('document must be a valid JavaScript object'));
// Get the write concern options
var finalOptions = applyWriteConcern(
shallowClone(options),
{ db: self.s.db, collection: self },
options
);
// Do we return the actual result document
// Either use override on the function, or go back to default on either the collection
// level or db
finalOptions['serializeFunctions'] = options['serializeFunctions'] || self.s.serializeFunctions;
// Execute the operation
var op = { q: selector, u: document };
op.upsert = options.upsert !== void 0 ? !!options.upsert : false;
op.multi = options.multi !== void 0 ? !!options.multi : false;
if (finalOptions.arrayFilters) {
op.arrayFilters = finalOptions.arrayFilters;
delete finalOptions.arrayFilters;
}
// Have we specified collation
decorateWithCollation(finalOptions, self, options);
// Update options
self.s.topology.update(self.s.namespace, [op], finalOptions, function(err, result) {
if (callback == null) return;
if (err) return handleCallback(callback, err, null);
if (result == null) return handleCallback(callback, null, null);
if (result.result.code) return handleCallback(callback, toError(result.result));
if (result.result.writeErrors)
return handleCallback(callback, toError(result.result.writeErrors[0]));
// Return the results
handleCallback(callback, null, result);
});
};
/**
* Updates documents.
* @method
* @param {object} selector The selector for the update operation.
* @param {object} document The update document.
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.upsert=false] Update operation is an upsert.
* @param {boolean} [options.multi=false] Update one/all documents with operation.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {object} [options.collation=null] Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).
* @param {Array} [options.arrayFilters=null] optional list of array filters referenced in filtered positional operators
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~writeOpCallback} [callback] The command result callback
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
* @deprecated use updateOne, updateMany or bulkWrite
*/
Collection.prototype.update = function(selector, document, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = shallowClone(options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, updateDocuments, [
this,
selector,
document,
options,
callback
]);
};
/**
* @typedef {Object} Collection~deleteWriteOpResult
* @property {Object} result The raw result returned from MongoDB, field will vary depending on server version.
* @property {Number} result.ok Is 1 if the command executed correctly.
* @property {Number} result.n The total count of documents deleted.
* @property {Object} connection The connection object used for the operation.
* @property {Number} deletedCount The number of documents deleted.
*/
/**
* The callback format for inserts
* @callback Collection~deleteWriteOpCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Collection~deleteWriteOpResult} result The result object if the command was executed successfully.
*/
/**
* Delete a document on MongoDB
* @method
* @param {object} filter The Filter used to select the document to remove
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~deleteWriteOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.deleteOne = function(filter, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = shallowClone(options);
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = shallowClone(options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, deleteOne, [this, filter, options, callback]);
};
var deleteOne = function(self, filter, options, callback) {
options.single = true;
removeDocuments(self, filter, options, function(err, r) {
if (callback == null) return;
if (err && callback) return callback(err);
if (r == null) return callback(null, { result: { ok: 1 } });
r.deletedCount = r.result.n;
if (callback) callback(null, r);
});
};
Collection.prototype.removeOne = Collection.prototype.deleteOne;
/**
* Delete multiple documents on MongoDB
* @method
* @param {object} filter The Filter used to select the documents to remove
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~deleteWriteOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.deleteMany = function(filter, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = shallowClone(options);
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = shallowClone(options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, deleteMany, [this, filter, options, callback]);
};
var deleteMany = function(self, filter, options, callback) {
options.single = false;
removeDocuments(self, filter, options, function(err, r) {
if (callback == null) return;
if (err && callback) return callback(err);
if (r == null) return callback(null, { result: { ok: 1 } });
r.deletedCount = r.result.n;
if (callback) callback(null, r);
});
};
var removeDocuments = function(self, selector, options, callback) {
if (typeof options === 'function') {
(callback = options), (options = {});
} else if (typeof selector === 'function') {
callback = selector;
options = {};
selector = {};
}
// Create an empty options object if the provided one is null
options = options || {};
// Get the write concern options
var finalOptions = applyWriteConcern(
shallowClone(options),
{ db: self.s.db, collection: self },
options
);
// If selector is null set empty
if (selector == null) selector = {};
// Build the op
var op = { q: selector, limit: 0 };
if (options.single) op.limit = 1;
// Have we specified collation
decorateWithCollation(finalOptions, self, options);
// Execute the remove
self.s.topology.remove(self.s.namespace, [op], finalOptions, function(err, result) {
if (callback == null) return;
if (err) return handleCallback(callback, err, null);
if (result == null) return handleCallback(callback, null, null);
if (result.result.code) return handleCallback(callback, toError(result.result));
if (result.result.writeErrors)
return handleCallback(callback, toError(result.result.writeErrors[0]));
// Return the results
handleCallback(callback, null, result);
});
};
Collection.prototype.removeMany = Collection.prototype.deleteMany;
/**
* Remove documents.
* @method
* @param {object} selector The selector for the update operation.
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.single=false] Removes the first document found.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~writeOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
* @deprecated use deleteOne, deleteMany or bulkWrite
*/
Collection.prototype.remove = function(selector, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = shallowClone(options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, removeDocuments, [this, selector, options, callback]);
};
/**
* Save a document. Simple full document replacement function. Not recommended for efficiency, use atomic
* operators and update instead for more efficient operations.
* @method
* @param {object} doc Document to save
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~writeOpCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
* @deprecated use insertOne, insertMany, updateOne or updateMany
*/
Collection.prototype.save = function(doc, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Add ignoreUndfined
if (this.s.options.ignoreUndefined) {
options = shallowClone(options);
options.ignoreUndefined = this.s.options.ignoreUndefined;
}
return executeOperation(this.s.topology, save, [this, doc, options, callback]);
};
var save = function(self, doc, options, callback) {
// Get the write concern options
var finalOptions = applyWriteConcern(
shallowClone(options),
{ db: self.s.db, collection: self },
options
);
// Establish if we need to perform an insert or update
if (doc._id != null) {
finalOptions.upsert = true;
return updateDocuments(self, { _id: doc._id }, doc, finalOptions, callback);
}
// Insert the document
insertDocuments(self, [doc], finalOptions, function(err, r) {
if (callback == null) return;
if (doc == null) return handleCallback(callback, null, null);
if (err) return handleCallback(callback, err, null);
handleCallback(callback, null, r);
});
};
/**
* The callback format for results
* @callback Collection~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {object} result The result object if the command was executed successfully.
*/
/**
* The callback format for an aggregation call
* @callback Collection~aggregationCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {AggregationCursor} cursor The cursor if the aggregation command was executed successfully.
*/
/**
* Fetches the first document that matches the query
* @method
* @param {object} query Query for find Operation
* @param {object} [options=null] Optional settings.
* @param {number} [options.limit=0] Sets the limit of documents returned in the query.
* @param {(array|object)} [options.sort=null] Set to sort the documents coming back from the query. Array of indexes, [['a', 1]] etc.
* @param {object} [options.projection=null] The fields to return in the query. Object of fields to include or exclude (not both), {'a':1}
* @param {object} [options.fields=null] **Deprecated** Use `options.projection` instead
* @param {number} [options.skip=0] Set to skip N documents ahead in your query (useful for pagination).
* @param {Object} [options.hint=null] Tell the query to use specific indexes in the query. Object of indexes to use, {'_id':1}
* @param {boolean} [options.explain=false] Explain the query instead of returning the data.
* @param {boolean} [options.snapshot=false] DEPRECATED: Snapshot query.
* @param {boolean} [options.timeout=false] Specify if the cursor can timeout.
* @param {boolean} [options.tailable=false] Specify if the cursor is tailable.
* @param {number} [options.batchSize=0] Set the batchSize for the getMoreCommand when iterating over the query results.
* @param {boolean} [options.returnKey=false] Only return the index key.
* @param {number} [options.maxScan=null] DEPRECATED: Limit the number of items to scan.
* @param {number} [options.min=null] Set index bounds.
* @param {number} [options.max=null] Set index bounds.
* @param {boolean} [options.showDiskLoc=false] Show disk location of results.
* @param {string} [options.comment=null] You can put a $comment field on a query to make looking in the profiler logs simpler.
* @param {boolean} [options.raw=false] Return document results as raw BSON buffers.
* @param {boolean} [options.promoteLongs=true] Promotes Long values to number if they fit inside the 53 bits resolution.
* @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types.
* @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers.
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {boolean} [options.partial=false] Specify if the cursor should return partial results when querying against a sharded system
* @param {number} [options.maxTimeMS=null] Number of miliseconds to wait before aborting the query.
* @param {object} [options.collation=null] Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.findOne = function(query, options, callback) {
if (typeof query === 'function') (callback = query), (query = {}), (options = {});
if (typeof options === 'function') (callback = options), (options = {});
query = query || {};
options = options || {};
return executeOperation(this.s.topology, findOne, [this, query, options, callback]);
};
var findOne = function(self, query, options, callback) {
const cursor = self
.find(query, options)
.limit(-1)
.batchSize(1);
// Return the item
cursor.next(function(err, item) {
if (err != null) return handleCallback(callback, toError(err), null);
handleCallback(callback, null, item);
});
};
/**
* The callback format for the collection method, must be used if strict is specified
* @callback Collection~collectionResultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Collection} collection The collection instance.
*/
/**
* Rename the collection.
*
* @method
* @param {string} newName New name of of the collection.
* @param {object} [options=null] Optional settings.
* @param {boolean} [options.dropTarget=false] Drop the target name collection if it previously exists.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~collectionResultCallback} [callback] The results callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.rename = function(newName, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = Object.assign({}, options, { readPreference: ReadPreference.PRIMARY });
return executeOperation(this.s.topology, rename, [this, newName, options, callback]);
};
var rename = function(self, newName, options, callback) {
// Check the collection name
checkCollectionName(newName);
// Build the command
var renameCollection = f('%s.%s', self.s.dbName, self.s.name);
var toCollection = f('%s.%s', self.s.dbName, newName);
var dropTarget = typeof options.dropTarget === 'boolean' ? options.dropTarget : false;
var cmd = { renameCollection: renameCollection, to: toCollection, dropTarget: dropTarget };
// Decorate command with writeConcern if supported
applyWriteConcern(cmd, { db: self.s.db, collection: self }, options);
// Execute against admin
self.s.db.admin().command(cmd, options, function(err, doc) {
if (err) return handleCallback(callback, err, null);
// We have an error
if (doc.errmsg) return handleCallback(callback, toError(doc), null);
try {
return handleCallback(
callback,
null,
new Collection(
self.s.db,
self.s.topology,
self.s.dbName,
newName,
self.s.pkFactory,
self.s.options
)
);
} catch (err) {
return handleCallback(callback, toError(err), null);
}
});
};
/**
* Drop the collection from the database, removing it permanently. New accesses will create a new collection.
*
* @method
* @param {object} [options=null] Optional settings.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The results callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.drop = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.topology, this.s.db.dropCollection.bind(this.s.db), [
this.s.name,
options,
callback
]);
};
/**
* Returns the options of the collection.
*
* @method
* @param {Object} [options] Optional settings
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The results callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.options = function(opts, callback) {
if (typeof opts === 'function') (callback = opts), (opts = {});
opts = opts || {};
return executeOperation(this.s.topology, options, [this, opts, callback]);
};
var options = function(self, opts, callback) {
self.s.db.listCollections({ name: self.s.name }, opts).toArray(function(err, collections) {
if (err) return handleCallback(callback, err);
if (collections.length === 0) {
return handleCallback(
callback,
MongoError.create({ message: f('collection %s not found', self.s.namespace), driver: true })
);
}
handleCallback(callback, err, collections[0].options || null);
});
};
/**
* Returns if the collection is a capped collection
*
* @method
* @param {Object} [options] Optional settings
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The results callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.isCapped = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.topology, isCapped, [this, options, callback]);
};
var isCapped = function(self, options, callback) {
self.options(options, function(err, document) {
if (err) return handleCallback(callback, err);
handleCallback(callback, null, document && document.capped);
});
};
/**
* Creates an index on the db and collection collection.
* @method
* @param {(string|object)} fieldOrSpec Defines the index.
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.unique=false] Creates an unique index.
* @param {boolean} [options.sparse=false] Creates a sparse index.
* @param {boolean} [options.background=false] Creates the index in the background, yielding whenever possible.
* @param {boolean} [options.dropDups=false] A unique index cannot be created on a key that has pre-existing duplicate values. If you would like to create the index anyway, keeping the first document the database indexes and deleting all subsequent documents that have duplicate value
* @param {number} [options.min=null] For geospatial indexes set the lower bound for the co-ordinates.
* @param {number} [options.max=null] For geospatial indexes set the high bound for the co-ordinates.
* @param {number} [options.v=null] Specify the format version of the indexes.
* @param {number} [options.expireAfterSeconds=null] Allows you to expire data on indexes applied to a data (MongoDB 2.2 or higher)
* @param {string} [options.name=null] Override the autogenerated index name (useful if the resulting name is larger than 128 bytes)
* @param {object} [options.partialFilterExpression=null] Creates a partial index based on the given filter object (MongoDB 3.2 or higher)
* @param {object} [options.collation=null] Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.createIndex = function(fieldOrSpec, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.topology, createIndex, [this, fieldOrSpec, options, callback]);
};
var createIndex = function(self, fieldOrSpec, options, callback) {
self.s.db.createIndex(self.s.name, fieldOrSpec, options, callback);
};
/**
* Creates multiple indexes in the collection, this method is only supported for
* MongoDB 2.6 or higher. Earlier version of MongoDB will throw a command not supported
* error. Index specifications are defined at http://docs.mongodb.org/manual/reference/command/createIndexes/.
* @method
* @param {array} indexSpecs An array of index specifications to be created
* @param {Object} [options] Optional settings
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.createIndexes = function(indexSpecs, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options ? shallowClone(options) : {};
if (typeof options.maxTimeMS !== 'number') delete options.maxTimeMS;
return executeOperation(this.s.topology, createIndexes, [this, indexSpecs, options, callback]);
};
var createIndexes = function(self, indexSpecs, options, callback) {
var capabilities = self.s.topology.capabilities();
// Ensure we generate the correct name if the parameter is not set
for (var i = 0; i < indexSpecs.length; i++) {
if (indexSpecs[i].name == null) {
var keys = [];
// Did the user pass in a collation, check if our write server supports it
if (indexSpecs[i].collation && capabilities && !capabilities.commandsTakeCollation) {
return callback(new MongoError(f('server/primary/mongos does not support collation')));
}
for (var name in indexSpecs[i].key) {
keys.push(f('%s_%s', name, indexSpecs[i].key[name]));
}
// Set the name
indexSpecs[i].name = keys.join('_');
}
}
options = Object.assign({}, options, { readPreference: ReadPreference.PRIMARY });
// Execute the index
self.s.db.command(
{
createIndexes: self.s.name,
indexes: indexSpecs
},
options,
callback
);
};
/**
* Drops an index from this collection.
* @method
* @param {string} indexName Name of the index to drop.
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {number} [options.maxTimeMS] Number of miliseconds to wait before aborting the query.
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.dropIndex = function(indexName, options, callback) {
var args = Array.prototype.slice.call(arguments, 1);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
options = args.length ? args.shift() || {} : {};
// Run only against primary
options.readPreference = ReadPreference.PRIMARY;
return executeOperation(this.s.topology, dropIndex, [this, indexName, options, callback]);
};
var dropIndex = function(self, indexName, options, callback) {
// Delete index command
var cmd = { dropIndexes: self.s.name, index: indexName };
// Decorate command with writeConcern if supported
applyWriteConcern(cmd, { db: self.s.db, collection: self }, options);
// Execute command
self.s.db.command(cmd, options, function(err, result) {
if (typeof callback !== 'function') return;
if (err) return handleCallback(callback, err, null);
handleCallback(callback, null, result);
});
};
/**
* Drops all indexes from this collection.
* @method
* @param {Object} [options] Optional settings
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {number} [options.maxTimeMS] Number of miliseconds to wait before aborting the query.
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.dropIndexes = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options ? shallowClone(options) : {};
if (typeof options.maxTimeMS !== 'number') delete options.maxTimeMS;
return executeOperation(this.s.topology, dropIndexes, [this, options, callback]);
};
var dropIndexes = function(self, options, callback) {
self.dropIndex('*', options, function(err) {
if (err) return handleCallback(callback, err, false);
handleCallback(callback, null, true);
});
};
/**
* Drops all indexes from this collection.
* @method
* @deprecated use dropIndexes
* @param {Collection~resultCallback} callback The command result callback
* @return {Promise} returns Promise if no [callback] passed
*/
Collection.prototype.dropAllIndexes = Collection.prototype.dropIndexes;
/**
* Reindex all indexes on the collection
* Warning: reIndex is a blocking operation (indexes are rebuilt in the foreground) and will be slow for large collections.
* @method
* @param {Object} [options] Optional settings
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.reIndex = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.topology, reIndex, [this, options, callback]);
};
var reIndex = function(self, options, callback) {
// Reindex
var cmd = { reIndex: self.s.name };
// Execute the command
self.s.db.command(cmd, options, function(err, result) {
if (callback == null) return;
if (err) return handleCallback(callback, err, null);
handleCallback(callback, null, result.ok ? true : false);
});
};
/**
* Get the list of all indexes information for the collection.
*
* @method
* @param {object} [options=null] Optional settings.
* @param {number} [options.batchSize=null] The batchSize for the returned command cursor or if pre 2.8 the systems batch collection
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {ClientSession} [options.session] optional session to use for this operation
* @return {CommandCursor}
*/
Collection.prototype.listIndexes = function(options) {
options = options || {};
// Clone the options
options = shallowClone(options);
// Determine the read preference in the options.
options = getReadPreference(this, options, this.s.db, this);
// Set the CommandCursor constructor
options.cursorFactory = CommandCursor;
// Set the promiseLibrary
options.promiseLibrary = this.s.promiseLibrary;
if (!this.s.topology.capabilities()) {
throw new MongoError('cannot connect to server');
}
// We have a list collections command
if (this.s.topology.capabilities().hasListIndexesCommand) {
// Cursor options
var cursor = options.batchSize ? { batchSize: options.batchSize } : {};
// Build the command
var command = { listIndexes: this.s.name, cursor: cursor };
// Execute the cursor
cursor = this.s.topology.cursor(f('%s.$cmd', this.s.dbName), command, options);
// Do we have a readPreference, apply it
if (options.readPreference) cursor.setReadPreference(options.readPreference);
// Return the cursor
return cursor;
}
// Get the namespace
var ns = f('%s.system.indexes', this.s.dbName);
// Get the query
cursor = this.s.topology.cursor(ns, { find: ns, query: { ns: this.s.namespace } }, options);
// Do we have a readPreference, apply it
if (options.readPreference) cursor.setReadPreference(options.readPreference);
// Set the passed in batch size if one was provided
if (options.batchSize) cursor = cursor.batchSize(options.batchSize);
// Return the cursor
return cursor;
};
/**
* Ensures that an index exists, if it does not it creates it
* @method
* @deprecated use createIndexes instead
* @param {(string|object)} fieldOrSpec Defines the index.
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.unique=false] Creates an unique index.
* @param {boolean} [options.sparse=false] Creates a sparse index.
* @param {boolean} [options.background=false] Creates the index in the background, yielding whenever possible.
* @param {boolean} [options.dropDups=false] A unique index cannot be created on a key that has pre-existing duplicate values. If you would like to create the index anyway, keeping the first document the database indexes and deleting all subsequent documents that have duplicate value
* @param {number} [options.min=null] For geospatial indexes set the lower bound for the co-ordinates.
* @param {number} [options.max=null] For geospatial indexes set the high bound for the co-ordinates.
* @param {number} [options.v=null] Specify the format version of the indexes.
* @param {number} [options.expireAfterSeconds=null] Allows you to expire data on indexes applied to a data (MongoDB 2.2 or higher)
* @param {number} [options.name=null] Override the autogenerated index name (useful if the resulting name is larger than 128 bytes)
* @param {object} [options.collation=null] Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.ensureIndex = function(fieldOrSpec, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.topology, ensureIndex, [this, fieldOrSpec, options, callback]);
};
var ensureIndex = function(self, fieldOrSpec, options, callback) {
self.s.db.ensureIndex(self.s.name, fieldOrSpec, options, callback);
};
/**
* Checks if one or more indexes exist on the collection, fails on first non-existing index
* @method
* @param {(string|array)} indexes One or more index names to check.
* @param {Object} [options] Optional settings
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.indexExists = function(indexes, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.topology, indexExists, [this, indexes, options, callback]);
};
var indexExists = function(self, indexes, options, callback) {
self.indexInformation(options, function(err, indexInformation) {
// If we have an error return
if (err != null) return handleCallback(callback, err, null);
// Let's check for the index names
if (!Array.isArray(indexes))
return handleCallback(callback, null, indexInformation[indexes] != null);
// Check in list of indexes
for (var i = 0; i < indexes.length; i++) {
if (indexInformation[indexes[i]] == null) {
return handleCallback(callback, null, false);
}
}
// All keys found return true
return handleCallback(callback, null, true);
});
};
/**
* Retrieves this collections index info.
* @method
* @param {object} [options=null] Optional settings.
* @param {boolean} [options.full=false] Returns the full raw index information.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.indexInformation = function(options, callback) {
var args = Array.prototype.slice.call(arguments, 0);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
options = args.length ? args.shift() || {} : {};
return executeOperation(this.s.topology, indexInformation, [this, options, callback]);
};
var indexInformation = function(self, options, callback) {
self.s.db.indexInformation(self.s.name, options, callback);
};
/**
* The callback format for results
* @callback Collection~countCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {number} result The count of documents that matched the query.
*/
/**
* Count number of matching documents in the db to a query.
* @method
* @param {object} query The query for the count.
* @param {object} [options=null] Optional settings.
* @param {boolean} [options.limit=null] The limit of documents to count.
* @param {boolean} [options.skip=null] The number of documents to skip for the count.
* @param {string} [options.hint=null] An index name hint for the query.
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {number} [options.maxTimeMS=null] Number of miliseconds to wait before aborting the query.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~countCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.count = function(query, options, callback) {
var args = Array.prototype.slice.call(arguments, 0);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
query = args.length ? args.shift() || {} : {};
options = args.length ? args.shift() || {} : {};
return executeOperation(this.s.topology, count, [this, query, options, callback]);
};
var count = function(self, query, options, callback) {
var skip = options.skip;
var limit = options.limit;
var hint = options.hint;
var maxTimeMS = options.maxTimeMS;
// Final query
var cmd = {
count: self.s.name,
query: query
};
// Add limit, skip and maxTimeMS if defined
if (typeof skip === 'number') cmd.skip = skip;
if (typeof limit === 'number') cmd.limit = limit;
if (typeof maxTimeMS === 'number') cmd.maxTimeMS = maxTimeMS;
if (hint) cmd.hint = hint;
options = shallowClone(options);
// Ensure we have the right read preference inheritance
options = getReadPreference(self, options, self.s.db);
// Do we have a readConcern specified
decorateWithReadConcern(cmd, self, options);
// Have we specified collation
decorateWithCollation(cmd, self, options);
// Execute command
self.s.db.command(cmd, options, function(err, result) {
if (err) return handleCallback(callback, err);
handleCallback(callback, null, result.n);
});
};
/**
* The distinct command returns returns a list of distinct values for the given key across a collection.
* @method
* @param {string} key Field of the document to find distinct values for.
* @param {object} query The query for filtering the set of documents to which we apply the distinct filter.
* @param {object} [options=null] Optional settings.
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {number} [options.maxTimeMS=null] Number of miliseconds to wait before aborting the query.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.distinct = function(key, query, options, callback) {
var args = Array.prototype.slice.call(arguments, 1);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
var queryOption = args.length ? args.shift() || {} : {};
var optionsOption = args.length ? args.shift() || {} : {};
return executeOperation(this.s.topology, distinct, [
this,
key,
queryOption,
optionsOption,
callback
]);
};
var distinct = function(self, key, query, options, callback) {
// maxTimeMS option
var maxTimeMS = options.maxTimeMS;
// Distinct command
var cmd = {
distinct: self.s.name,
key: key,
query: query
};
options = shallowClone(options);
// Ensure we have the right read preference inheritance
options = getReadPreference(self, options, self.s.db, self);
// Add maxTimeMS if defined
if (typeof maxTimeMS === 'number') cmd.maxTimeMS = maxTimeMS;
// Do we have a readConcern specified
decorateWithReadConcern(cmd, self, options);
// Have we specified collation
decorateWithCollation(cmd, self, options);
// Execute the command
self.s.db.command(cmd, options, function(err, result) {
if (err) return handleCallback(callback, err);
handleCallback(callback, null, result.values);
});
};
/**
* Retrieve all the indexes on the collection.
* @method
* @param {Object} [options] Optional settings
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.indexes = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
return executeOperation(this.s.topology, indexes, [this, options, callback]);
};
var indexes = function(self, options, callback) {
options = Object.assign({}, { full: true }, options);
self.s.db.indexInformation(self.s.name, options, callback);
};
/**
* Get all the collection statistics.
*
* @method
* @param {object} [options=null] Optional settings.
* @param {number} [options.scale=null] Divide the returned sizes by scale value.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The collection result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.stats = function(options, callback) {
var args = Array.prototype.slice.call(arguments, 0);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
options = args.length ? args.shift() || {} : {};
return executeOperation(this.s.topology, stats, [this, options, callback]);
};
var stats = function(self, options, callback) {
// Build command object
var commandObject = {
collStats: self.s.name
};
// Check if we have the scale value
if (options['scale'] != null) commandObject['scale'] = options['scale'];
options = shallowClone(options);
// Ensure we have the right read preference inheritance
options = getReadPreference(self, options, self.s.db, self);
// Execute the command
self.s.db.command(commandObject, options, callback);
};
/**
* @typedef {Object} Collection~findAndModifyWriteOpResult
* @property {object} value Document returned from findAndModify command.
* @property {object} lastErrorObject The raw lastErrorObject returned from the command.
* @property {Number} ok Is 1 if the command executed correctly.
*/
/**
* The callback format for inserts
* @callback Collection~findAndModifyCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Collection~findAndModifyWriteOpResult} result The result object if the command was executed successfully.
*/
/**
* Find a document and delete it in one atomic operation, requires a write lock for the duration of the operation.
*
* @method
* @param {object} filter Document selection filter.
* @param {object} [options=null] Optional settings.
* @param {object} [options.projection=null] Limits the fields to return for all matching documents.
* @param {object} [options.sort=null] Determines which document the operation modifies if the query selects multiple documents.
* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~findAndModifyCallback} [callback] The collection result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.findOneAndDelete = function(filter, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Basic validation
if (filter == null || typeof filter !== 'object')
throw toError('filter parameter must be an object');
return executeOperation(this.s.topology, findOneAndDelete, [this, filter, options, callback]);
};
var findOneAndDelete = function(self, filter, options, callback) {
// Final options
var finalOptions = shallowClone(options);
finalOptions['fields'] = options.projection;
finalOptions['remove'] = true;
// Execute find and Modify
self.findAndModify(filter, options.sort, null, finalOptions, callback);
};
/**
* Find a document and replace it in one atomic operation, requires a write lock for the duration of the operation.
*
* @method
* @param {object} filter Document selection filter.
* @param {object} replacement Document replacing the matching document.
* @param {object} [options=null] Optional settings.
* @param {object} [options.projection=null] Limits the fields to return for all matching documents.
* @param {object} [options.sort=null] Determines which document the operation modifies if the query selects multiple documents.
* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
* @param {boolean} [options.upsert=false] Upsert the document if it does not exist.
* @param {boolean} [options.returnOriginal=true] When false, returns the updated document rather than the original. The default is true.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~findAndModifyCallback} [callback] The collection result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.findOneAndReplace = function(filter, replacement, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Basic validation
if (filter == null || typeof filter !== 'object')
throw toError('filter parameter must be an object');
if (replacement == null || typeof replacement !== 'object')
throw toError('replacement parameter must be an object');
return executeOperation(this.s.topology, findOneAndReplace, [
this,
filter,
replacement,
options,
callback
]);
};
var findOneAndReplace = function(self, filter, replacement, options, callback) {
// Final options
var finalOptions = shallowClone(options);
finalOptions['fields'] = options.projection;
finalOptions['update'] = true;
finalOptions['new'] = options.returnOriginal !== void 0 ? !options.returnOriginal : false;
finalOptions['upsert'] = options.upsert !== void 0 ? !!options.upsert : false;
// Execute findAndModify
self.findAndModify(filter, options.sort, replacement, finalOptions, callback);
};
/**
* Find a document and update it in one atomic operation, requires a write lock for the duration of the operation.
*
* @method
* @param {object} filter Document selection filter.
* @param {object} update Update operations to be performed on the document
* @param {object} [options=null] Optional settings.
* @param {object} [options.projection=null] Limits the fields to return for all matching documents.
* @param {object} [options.sort=null] Determines which document the operation modifies if the query selects multiple documents.
* @param {number} [options.maxTimeMS=null] The maximum amount of time to allow the query to run.
* @param {boolean} [options.upsert=false] Upsert the document if it does not exist.
* @param {boolean} [options.returnOriginal=true] When false, returns the updated document rather than the original. The default is true.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~findAndModifyCallback} [callback] The collection result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.findOneAndUpdate = function(filter, update, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Basic validation
if (filter == null || typeof filter !== 'object')
throw toError('filter parameter must be an object');
if (update == null || typeof update !== 'object')
throw toError('update parameter must be an object');
return executeOperation(this.s.topology, findOneAndUpdate, [
this,
filter,
update,
options,
callback
]);
};
var findOneAndUpdate = function(self, filter, update, options, callback) {
// Final options
var finalOptions = shallowClone(options);
finalOptions['fields'] = options.projection;
finalOptions['update'] = true;
finalOptions['new'] =
typeof options.returnOriginal === 'boolean' ? !options.returnOriginal : false;
finalOptions['upsert'] = typeof options.upsert === 'boolean' ? options.upsert : false;
// Execute findAndModify
self.findAndModify(filter, options.sort, update, finalOptions, callback);
};
/**
* Find and update a document.
* @method
* @param {object} query Query object to locate the object to modify.
* @param {array} sort If multiple docs match, choose the first one in the specified sort order as the object to manipulate.
* @param {object} doc The fields/vals to be updated.
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.remove=false] Set to true to remove the object before returning.
* @param {boolean} [options.upsert=false] Perform an upsert operation.
* @param {boolean} [options.new=false] Set to true if you want to return the modified object rather than the original. Ignored for remove.
* @param {object} [options.projection=null] Object containing the field projection for the result returned from the operation.
* @param {object} [options.fields=null] **Deprecated** Use `options.projection` instead
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~findAndModifyCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
* @deprecated use findOneAndUpdate, findOneAndReplace or findOneAndDelete instead
*/
Collection.prototype.findAndModify = function(query, sort, doc, options, callback) {
var args = Array.prototype.slice.call(arguments, 1);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
sort = args.length ? args.shift() || [] : [];
doc = args.length ? args.shift() : null;
options = args.length ? args.shift() || {} : {};
// Clone options
options = shallowClone(options);
// Force read preference primary
options.readPreference = ReadPreference.PRIMARY;
return executeOperation(this.s.topology, findAndModify, [
this,
query,
sort,
doc,
options,
callback
]);
};
var findAndModify = function(self, query, sort, doc, options, callback) {
// Create findAndModify command object
var queryObject = {
findAndModify: self.s.name,
query: query
};
sort = formattedOrderClause(sort);
if (sort) {
queryObject.sort = sort;
}
queryObject.new = options.new ? true : false;
queryObject.remove = options.remove ? true : false;
queryObject.upsert = options.upsert ? true : false;
const projection = options.projection || options.fields;
if (projection) {
queryObject.fields = projection;
}
if (options.arrayFilters) {
queryObject.arrayFilters = options.arrayFilters;
delete options.arrayFilters;
}
if (doc && !options.remove) {
queryObject.update = doc;
}
if (options.maxTimeMS) queryObject.maxTimeMS = options.maxTimeMS;
// Either use override on the function, or go back to default on either the collection
// level or db
if (options['serializeFunctions'] != null) {
options['serializeFunctions'] = options['serializeFunctions'];
} else {
options['serializeFunctions'] = self.s.serializeFunctions;
}
// No check on the documents
options.checkKeys = false;
// Get the write concern settings
var finalOptions = applyWriteConcern(options, { db: self.s.db, collection: self }, options);
// Decorate the findAndModify command with the write Concern
if (finalOptions.writeConcern) {
queryObject.writeConcern = finalOptions.writeConcern;
}
// Have we specified bypassDocumentValidation
if (typeof finalOptions.bypassDocumentValidation === 'boolean') {
queryObject.bypassDocumentValidation = finalOptions.bypassDocumentValidation;
}
// Have we specified collation
decorateWithCollation(queryObject, self, finalOptions);
// Execute the command
self.s.db.command(queryObject, finalOptions, function(err, result) {
if (err) return handleCallback(callback, err, null);
if (result && result.value && typeof self.s.options.map === 'function') {
result.value = self.s.options.map(result.value);
}
return handleCallback(callback, null, result);
});
};
/**
* Find and remove a document.
* @method
* @param {object} query Query object to locate the object to modify.
* @param {array} sort If multiple docs match, choose the first one in the specified sort order as the object to manipulate.
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
* @deprecated use findOneAndDelete instead
*/
Collection.prototype.findAndRemove = function(query, sort, options, callback) {
var args = Array.prototype.slice.call(arguments, 1);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
sort = args.length ? args.shift() || [] : [];
options = args.length ? args.shift() || {} : {};
return executeOperation(this.s.topology, findAndRemove, [this, query, sort, options, callback]);
};
var findAndRemove = function(self, query, sort, options, callback) {
// Add the remove option
options['remove'] = true;
// Execute the callback
self.findAndModify(query, sort, null, options, callback);
};
function decorateWithCollation(command, self, options) {
// Do we support collation 3.4 and higher
var capabilities = self.s.topology.capabilities();
// Do we support write concerns 3.4 and higher
if (capabilities && capabilities.commandsTakeCollation) {
if (options.collation && typeof options.collation === 'object') {
command.collation = options.collation;
}
}
}
function decorateWithReadConcern(command, self, options) {
let readConcern = Object.assign({}, command.readConcern || {});
if (self.s.readConcern) {
Object.assign(readConcern, self.s.readConcern);
}
if (
options.session &&
options.session.supports.causalConsistency &&
options.session.operationTime
) {
Object.assign(readConcern, { afterClusterTime: options.session.operationTime });
}
if (Object.keys(readConcern).length > 0) {
Object.assign(command, { readConcern: readConcern });
}
}
/**
* Execute an aggregation framework pipeline against the collection, needs MongoDB >= 2.2
* @method
* @param {object} pipeline Array containing all the aggregation framework commands for the execution.
* @param {object} [options=null] Optional settings.
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {object} [options.cursor=null] Return the query as cursor, on 2.6 > it returns as a real cursor on pre 2.6 it returns as an emulated cursor.
* @param {number} [options.cursor.batchSize=null] The batchSize for the cursor
* @param {boolean} [options.explain=false] Explain returns the aggregation execution plan (requires mongodb 2.6 >).
* @param {boolean} [options.allowDiskUse=false] allowDiskUse lets the server know if it can use disk to store temporary results for the aggregation (requires mongodb 2.6 >).
* @param {number} [options.maxTimeMS=null] maxTimeMS specifies a cumulative time limit in milliseconds for processing operations on the cursor. MongoDB interrupts the operation at the earliest following interrupt point.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {boolean} [options.raw=false] Return document results as raw BSON buffers.
* @param {boolean} [options.promoteLongs=true] Promotes Long values to number if they fit inside the 53 bits resolution.
* @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types.
* @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers.
* @param {object} [options.collation=null] Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).
* @param {string} [options.comment] Add a comment to an aggregation command
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~aggregationCallback} callback The command result callback
* @return {(null|AggregationCursor)}
*/
Collection.prototype.aggregate = function(pipeline, options, callback) {
var self = this;
if (Array.isArray(pipeline)) {
// Set up callback if one is provided
if (typeof options === 'function') {
callback = options;
options = {};
}
// If we have no options or callback we are doing
// a cursor based aggregation
if (options == null && callback == null) {
options = {};
}
} else {
// Aggregation pipeline passed as arguments on the method
var args = Array.prototype.slice.call(arguments, 0);
// Get the callback
callback = args.pop();
// Get the possible options object
var opts = args[args.length - 1];
// If it contains any of the admissible options pop it of the args
options =
opts &&
(opts.readPreference ||
opts.explain ||
opts.cursor ||
opts.out ||
opts.maxTimeMS ||
opts.hint ||
opts.allowDiskUse)
? args.pop()
: {};
// Left over arguments is the pipeline
pipeline = args;
}
// Ignore readConcern option
var ignoreReadConcern = false;
// Build the command
var command = { aggregate: this.s.name, pipeline: pipeline };
// If out was specified
if (typeof options.out === 'string') {
pipeline.push({ $out: options.out });
// Ignore read concern
ignoreReadConcern = true;
} else if (pipeline.length > 0 && pipeline[pipeline.length - 1]['$out']) {
ignoreReadConcern = true;
}
// Decorate command with writeConcern if out has been specified
if (
pipeline.length > 0 &&
pipeline[pipeline.length - 1]['$out'] &&
self.s.topology.capabilities().commandsTakeWriteConcern
) {
applyWriteConcern(command, { db: self.s.db, collection: self }, options);
}
// Have we specified collation
decorateWithCollation(command, self, options);
// If we have bypassDocumentValidation set
if (typeof options.bypassDocumentValidation === 'boolean') {
command.bypassDocumentValidation = options.bypassDocumentValidation;
}
// Do we have a readConcern specified
if (!ignoreReadConcern) {
decorateWithReadConcern(command, self, options);
}
// If we have allowDiskUse defined
if (options.allowDiskUse) command.allowDiskUse = options.allowDiskUse;
if (typeof options.maxTimeMS === 'number') command.maxTimeMS = options.maxTimeMS;
// If we are giving a hint
if (options.hint) command.hint = options.hint;
options = shallowClone(options);
// Ensure we have the right read preference inheritance
options = getReadPreference(this, options, this.s.db, this);
// If explain has been specified add it
if (options.explain) {
if (command.readConcern || command.writeConcern) {
throw toError('"explain" cannot be used on an aggregate call with readConcern/writeConcern');
}
command.explain = options.explain;
}
if (typeof options.comment === 'string') command.comment = options.comment;
// Validate that cursor options is valid
if (options.cursor != null && typeof options.cursor !== 'object') {
throw toError('cursor options must be an object');
}
options.cursor = options.cursor || {};
if (options.batchSize) options.cursor.batchSize = options.batchSize;
command.cursor = options.cursor;
// promiseLibrary
options.promiseLibrary = this.s.promiseLibrary;
// Set the AggregationCursor constructor
options.cursorFactory = AggregationCursor;
if (typeof callback !== 'function') {
if (!this.s.topology.capabilities()) {
throw new MongoError('cannot connect to server');
}
// Allow disk usage command
if (typeof options.allowDiskUse === 'boolean') command.allowDiskUse = options.allowDiskUse;
if (typeof options.maxTimeMS === 'number') command.maxTimeMS = options.maxTimeMS;
// Execute the cursor
return this.s.topology.cursor(this.s.namespace, command, options);
}
return handleCallback(callback, null, this.s.topology.cursor(this.s.namespace, command, options));
};
/**
* Create a new Change Stream, watching for new changes (insertions, updates, replacements, deletions, and invalidations) in this collection.
* @method
* @since 3.0.0
* @param {Array} [pipeline=null] An array of {@link https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline/|aggregation pipeline stages} through which to pass change stream documents. This allows for filtering (using $match) and manipulating the change stream documents.
* @param {object} [options=null] Optional settings
* @param {string} [options.fullDocument='default'] Allowed values: ‘default’, ‘updateLookup’. When set to ‘updateLookup’, the change stream will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred.
* @param {object} [options.resumeAfter=null] Specifies the logical starting point for the new change stream. This should be the _id field from a previously returned change stream document.
* @param {number} [options.maxAwaitTimeMS] The maximum amount of time for the server to wait on new documents to satisfy a change stream query
* @param {number} [options.batchSize=null] The number of documents to return per batch. See {@link https://docs.mongodb.com/manual/reference/command/aggregate|aggregation documentation}.
* @param {object} [options.collation=null] Specify collation settings for operation. See {@link https://docs.mongodb.com/manual/reference/command/aggregate|aggregation documentation}.
* @param {ReadPreference} [options.readPreference=null] The read preference. Defaults to the read preference of the database or collection. See {@link https://docs.mongodb.com/manual/reference/read-preference|read preference documentation}.
* @param {ClientSession} [options.session] optional session to use for this operation
* @return {ChangeStream} a ChangeStream instance.
*/
Collection.prototype.watch = function(pipeline, options) {
pipeline = pipeline || [];
options = options || {};
// Allow optionally not specifying a pipeline
if (!Array.isArray(pipeline)) {
options = pipeline;
pipeline = [];
}
return new ChangeStream(this, pipeline, options);
};
/**
* The callback format for results
* @callback Collection~parallelCollectionScanCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Cursor[]} cursors A list of cursors returned allowing for parallel reading of collection.
*/
/**
* Return N number of parallel cursors for a collection allowing parallel reading of entire collection. There are
* no ordering guarantees for returned results.
* @method
* @param {object} [options=null] Optional settings.
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {number} [options.batchSize=null] Set the batchSize for the getMoreCommand when iterating over the query results.
* @param {number} [options.numCursors=1] The maximum number of parallel command cursors to return (the number of returned cursors will be in the range 1:numCursors)
* @param {boolean} [options.raw=false] Return all BSON documents as Raw Buffer documents.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~parallelCollectionScanCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.parallelCollectionScan = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = { numCursors: 1 });
// Set number of cursors to 1
options.numCursors = options.numCursors || 1;
options.batchSize = options.batchSize || 1000;
options = shallowClone(options);
// Ensure we have the right read preference inheritance
options = getReadPreference(this, options, this.s.db, this);
// Add a promiseLibrary
options.promiseLibrary = this.s.promiseLibrary;
return executeOperation(this.s.topology, parallelCollectionScan, [this, options, callback], {
skipSessions: true
});
};
var parallelCollectionScan = function(self, options, callback) {
// Create command object
var commandObject = {
parallelCollectionScan: self.s.name,
numCursors: options.numCursors
};
// Do we have a readConcern specified
decorateWithReadConcern(commandObject, self, options);
// Store the raw value
var raw = options.raw;
delete options['raw'];
// Execute the command
self.s.db.command(commandObject, options, function(err, result) {
if (err) return handleCallback(callback, err, null);
if (result == null)
return handleCallback(
callback,
new Error('no result returned for parallelCollectionScan'),
null
);
var cursors = [];
// Add the raw back to the option
if (raw) options.raw = raw;
// Create command cursors for each item
for (var i = 0; i < result.cursors.length; i++) {
var rawId = result.cursors[i].cursor.id;
// Convert cursorId to Long if needed
var cursorId = typeof rawId === 'number' ? Long.fromNumber(rawId) : rawId;
// Add a command cursor
cursors.push(self.s.topology.cursor(self.s.namespace, cursorId, options));
}
handleCallback(callback, null, cursors);
});
};
/**
* Execute a geo search using a geo haystack index on a collection.
*
* @method
* @param {number} x Point to search on the x axis, ensure the indexes are ordered in the same order.
* @param {number} y Point to search on the y axis, ensure the indexes are ordered in the same order.
* @param {object} [options=null] Optional settings.
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {number} [options.maxDistance=null] Include results up to maxDistance from the point.
* @param {object} [options.search=null] Filter the results by a query.
* @param {number} [options.limit=false] Max number of results to return.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.geoHaystackSearch = function(x, y, options, callback) {
var args = Array.prototype.slice.call(arguments, 2);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
options = args.length ? args.shift() || {} : {};
return executeOperation(this.s.topology, geoHaystackSearch, [this, x, y, options, callback]);
};
var geoHaystackSearch = function(self, x, y, options, callback) {
// Build command object
var commandObject = {
geoSearch: self.s.name,
near: [x, y]
};
// Remove read preference from hash if it exists
commandObject = decorateCommand(commandObject, options, { readPreference: true, session: true });
options = shallowClone(options);
// Ensure we have the right read preference inheritance
options = getReadPreference(self, options, self.s.db, self);
// Do we have a readConcern specified
decorateWithReadConcern(commandObject, self, options);
// Execute the command
self.s.db.command(commandObject, options, function(err, res) {
if (err) return handleCallback(callback, err);
if (res.err || res.errmsg) handleCallback(callback, toError(res));
// should we only be returning res.results here? Not sure if the user
// should see the other return information
handleCallback(callback, null, res);
});
};
/**
* Group function helper
* @ignore
*/
// var groupFunction = function () {
// var c = db[ns].find(condition);
// var map = new Map();
// var reduce_function = reduce;
//
// while (c.hasNext()) {
// var obj = c.next();
// var key = {};
//
// for (var i = 0, len = keys.length; i < len; ++i) {
// var k = keys[i];
// key[k] = obj[k];
// }
//
// var aggObj = map.get(key);
//
// if (aggObj == null) {
// var newObj = Object.extend({}, key);
// aggObj = Object.extend(newObj, initial);
// map.put(key, aggObj);
// }
//
// reduce_function(obj, aggObj);
// }
//
// return { "result": map.values() };
// }.toString();
var groupFunction =
'function () {\nvar c = db[ns].find(condition);\nvar map = new Map();\nvar reduce_function = reduce;\n\nwhile (c.hasNext()) {\nvar obj = c.next();\nvar key = {};\n\nfor (var i = 0, len = keys.length; i < len; ++i) {\nvar k = keys[i];\nkey[k] = obj[k];\n}\n\nvar aggObj = map.get(key);\n\nif (aggObj == null) {\nvar newObj = Object.extend({}, key);\naggObj = Object.extend(newObj, initial);\nmap.put(key, aggObj);\n}\n\nreduce_function(obj, aggObj);\n}\n\nreturn { "result": map.values() };\n}';
/**
* Run a group command across a collection
*
* @method
* @param {(object|array|function|code)} keys An object, array or function expressing the keys to group by.
* @param {object} condition An optional condition that must be true for a row to be considered.
* @param {object} initial Initial value of the aggregation counter object.
* @param {(function|Code)} reduce The reduce function aggregates (reduces) the objects iterated
* @param {(function|Code)} finalize An optional function to be run on each item in the result set just before the item is returned.
* @param {boolean} command Specify if you wish to run using the internal group command or using eval, default is true.
* @param {object} [options=null] Optional settings.
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
* @deprecated MongoDB 3.6 or higher will no longer support the group command. We recommend rewriting using the aggregation framework.
*/
Collection.prototype.group = function(
keys,
condition,
initial,
reduce,
finalize,
command,
options,
callback
) {
var args = Array.prototype.slice.call(arguments, 3);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
reduce = args.length ? args.shift() : null;
finalize = args.length ? args.shift() : null;
command = args.length ? args.shift() : null;
options = args.length ? args.shift() || {} : {};
// Make sure we are backward compatible
if (!(typeof finalize === 'function')) {
command = finalize;
finalize = null;
}
if (
!Array.isArray(keys) &&
keys instanceof Object &&
typeof keys !== 'function' &&
!(keys._bsontype === 'Code')
) {
keys = Object.keys(keys);
}
if (typeof reduce === 'function') {
reduce = reduce.toString();
}
if (typeof finalize === 'function') {
finalize = finalize.toString();
}
// Set up the command as default
command = command == null ? true : command;
return executeOperation(this.s.topology, group, [
this,
keys,
condition,
initial,
reduce,
finalize,
command,
options,
callback
]);
};
var group = function(self, keys, condition, initial, reduce, finalize, command, options, callback) {
// Execute using the command
if (command) {
var reduceFunction = reduce && reduce._bsontype === 'Code' ? reduce : new Code(reduce);
var selector = {
group: {
ns: self.s.name,
$reduce: reduceFunction,
cond: condition,
initial: initial,
out: 'inline'
}
};
// if finalize is defined
if (finalize != null) selector.group['finalize'] = finalize;
// Set up group selector
if ('function' === typeof keys || (keys && keys._bsontype === 'Code')) {
selector.group.$keyf = keys && keys._bsontype === 'Code' ? keys : new Code(keys);
} else {
var hash = {};
keys.forEach(function(key) {
hash[key] = 1;
});
selector.group.key = hash;
}
options = shallowClone(options);
// Ensure we have the right read preference inheritance
options = getReadPreference(self, options, self.s.db, self);
// Do we have a readConcern specified
decorateWithReadConcern(selector, self, options);
// Have we specified collation
decorateWithCollation(selector, self, options);
// Execute command
self.s.db.command(selector, options, function(err, result) {
if (err) return handleCallback(callback, err, null);
handleCallback(callback, null, result.retval);
});
} else {
// Create execution scope
var scope = reduce != null && reduce._bsontype === 'Code' ? reduce.scope : {};
scope.ns = self.s.name;
scope.keys = keys;
scope.condition = condition;
scope.initial = initial;
// Pass in the function text to execute within mongodb.
var groupfn = groupFunction.replace(/ reduce;/, reduce.toString() + ';');
self.s.db.eval(new Code(groupfn, scope), null, options, function(err, results) {
if (err) return handleCallback(callback, err, null);
handleCallback(callback, null, results.result || results);
});
}
};
/**
* Functions that are passed as scope args must
* be converted to Code instances.
* @ignore
*/
function processScope(scope) {
if (!isObject(scope) || scope._bsontype === 'ObjectID') {
return scope;
}
var keys = Object.keys(scope);
var i = keys.length;
var key;
var new_scope = {};
while (i--) {
key = keys[i];
if ('function' === typeof scope[key]) {
new_scope[key] = new Code(String(scope[key]));
} else {
new_scope[key] = processScope(scope[key]);
}
}
return new_scope;
}
/**
* Run Map Reduce across a collection. Be aware that the inline option for out will return an array of results not a collection.
*
* @method
* @param {(function|string)} map The mapping function.
* @param {(function|string)} reduce The reduce function.
* @param {object} [options=null] Optional settings.
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {object} [options.out=null] Sets the output target for the map reduce job. *{inline:1} | {replace:'collectionName'} | {merge:'collectionName'} | {reduce:'collectionName'}*
* @param {object} [options.query=null] Query filter object.
* @param {object} [options.sort=null] Sorts the input objects using this key. Useful for optimization, like sorting by the emit key for fewer reduces.
* @param {number} [options.limit=null] Number of objects to return from collection.
* @param {boolean} [options.keeptemp=false] Keep temporary data.
* @param {(function|string)} [options.finalize=null] Finalize function.
* @param {object} [options.scope=null] Can pass in variables that can be access from map/reduce/finalize.
* @param {boolean} [options.jsMode=false] It is possible to make the execution stay in JS. Provided in MongoDB > 2.0.X.
* @param {boolean} [options.verbose=false] Provide statistics on job execution time.
* @param {boolean} [options.bypassDocumentValidation=false] Allow driver to bypass schema validation in MongoDB 3.2 or higher.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {Collection~resultCallback} [callback] The command result callback
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
*/
Collection.prototype.mapReduce = function(map, reduce, options, callback) {
if ('function' === typeof options) (callback = options), (options = {});
// Out must allways be defined (make sure we don't break weirdly on pre 1.8+ servers)
if (null == options.out) {
throw new Error(
'the out option parameter must be defined, see mongodb docs for possible values'
);
}
if ('function' === typeof map) {
map = map.toString();
}
if ('function' === typeof reduce) {
reduce = reduce.toString();
}
if ('function' === typeof options.finalize) {
options.finalize = options.finalize.toString();
}
return executeOperation(this.s.topology, mapReduce, [this, map, reduce, options, callback]);
};
var mapReduce = function(self, map, reduce, options, callback) {
var mapCommandHash = {
mapreduce: self.s.name,
map: map,
reduce: reduce
};
// Exclusion list
var exclusionList = ['readPreference', 'session'];
// Add any other options passed in
for (var n in options) {
if ('scope' === n) {
mapCommandHash[n] = processScope(options[n]);
} else {
// Only include if not in exclusion list
if (exclusionList.indexOf(n) === -1) {
mapCommandHash[n] = options[n];
}
}
}
options = shallowClone(options);
// Ensure we have the right read preference inheritance
options = getReadPreference(self, options, self.s.db, self);
// If we have a read preference and inline is not set as output fail hard
if (
options.readPreference !== false &&
options.readPreference !== 'primary' &&
options['out'] &&
(options['out'].inline !== 1 && options['out'] !== 'inline')
) {
// Force readPreference to primary
options.readPreference = 'primary';
// Decorate command with writeConcern if supported
applyWriteConcern(mapCommandHash, { db: self.s.db, collection: self }, options);
} else {
decorateWithReadConcern(mapCommandHash, self, options);
}
// Is bypassDocumentValidation specified
if (typeof options.bypassDocumentValidation === 'boolean') {
mapCommandHash.bypassDocumentValidation = options.bypassDocumentValidation;
}
// Have we specified collation
decorateWithCollation(mapCommandHash, self, options);
// Execute command
self.s.db.command(mapCommandHash, options, function(err, result) {
if (err) return handleCallback(callback, err);
// Check if we have an error
if (1 !== result.ok || result.err || result.errmsg) {
return handleCallback(callback, toError(result));
}
// Create statistics value
var stats = {};
if (result.timeMillis) stats['processtime'] = result.timeMillis;
if (result.counts) stats['counts'] = result.counts;
if (result.timing) stats['timing'] = result.timing;
// invoked with inline?
if (result.results) {
// If we wish for no verbosity
if (options['verbose'] == null || !options['verbose']) {
return handleCallback(callback, null, result.results);
}
return handleCallback(callback, null, { results: result.results, stats: stats });
}
// The returned collection
var collection = null;
// If we have an object it's a different db
if (result.result != null && typeof result.result === 'object') {
var doc = result.result;
// Return a collection from another db
var Db = require('./db');
collection = new Db(doc.db, self.s.db.s.topology, self.s.db.s.options).collection(
doc.collection
);
} else {
// Create a collection object that wraps the result collection
collection = self.s.db.collection(result.result);
}
// If we wish for no verbosity
if (options['verbose'] == null || !options['verbose']) {
return handleCallback(callback, err, collection);
}
// Return stats as third set of values
handleCallback(callback, err, { collection: collection, stats: stats });
});
};
/**
* Initiate a Out of order batch write operation. All operations will be buffered into insert/update/remove commands executed out of order.
*
* @method
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {ClientSession} [options.session] optional session to use for this operation
* @return {UnorderedBulkOperation}
*/
Collection.prototype.initializeUnorderedBulkOp = function(options) {
options = options || {};
options.promiseLibrary = this.s.promiseLibrary;
return unordered(this.s.topology, this, options);
};
/**
* Initiate an In order bulk write operation, operations will be serially executed in the order they are added, creating a new operation for each switch in types.
*
* @method
* @param {object} [options=null] Optional settings.
* @param {(number|string)} [options.w=null] The write concern.
* @param {number} [options.wtimeout=null] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {ClientSession} [options.session] optional session to use for this operation
* @param {OrderedBulkOperation} callback The command result callback
* @return {null}
*/
Collection.prototype.initializeOrderedBulkOp = function(options) {
options = options || {};
options.promiseLibrary = this.s.promiseLibrary;
return ordered(this.s.topology, this, options);
};
// Figure out the read preference
var getReadPreference = function(self, options, db) {
let r = null;
if (options.readPreference) {
r = options.readPreference;
} else if (self.s.readPreference) {
r = self.s.readPreference;
} else if (db.s.readPreference) {
r = db.s.readPreference;
} else {
return options;
}
if (typeof r === 'string') {
options.readPreference = new ReadPreference(r);
} else if (r && !(r instanceof ReadPreference) && typeof r === 'object') {
const mode = r.mode || r.preference;
if (mode && typeof mode === 'string') {
options.readPreference = new ReadPreference(mode, r.tags, {
maxStalenessSeconds: r.maxStalenessSeconds
});
}
} else if (!(r instanceof ReadPreference)) {
throw new TypeError('Invalid read preference: ' + r);
}
return options;
};
// modifies documents before being inserted or updated
const prepareDocs = function(self, docs, options) {
const forceServerObjectId =
typeof options.forceServerObjectId === 'boolean'
? options.forceServerObjectId
: self.s.db.options.forceServerObjectId;
const unmap = typeof self.s.options.unmap === 'function' ? self.s.options.unmap : false;
// no need to modify the docs if server sets the ObjectId
// and unmap collection option is unset
if (forceServerObjectId === true && !unmap) {
return docs;
}
return docs.map(function(doc) {
if (forceServerObjectId !== true && doc._id == null) {
doc._id = self.s.pkFactory.createPk();
}
return unmap ? unmap(doc) : doc;
});
};
module.exports = Collection;
| 1 | 14,340 | 1. Use `const` instead of `var` here. 2. Move the check to after the `if (update == null || typeof update !== 'object')`. | mongodb-node-mongodb-native | js |
@@ -29,6 +29,13 @@ struct st_h2o_multithread_queue_t {
#if H2O_USE_LIBUV
uv_async_t async;
#else
+#if defined(__linux__) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+/**
+ * The kernel overhead of an eventfd file descriptor is
+ * much lower than that of a pipe, and only one file descriptor is required
+ */
+#define H2O_ASYNC_NOTIFY_USING_EVENTFD
+#endif
struct {
int write;
h2o_socket_t *read; | 1 | /*
* Copyright (c) 2015-2016 DeNA Co., Ltd., Kazuho Oku, Tatsuhiko Kubo,
* Chul-Woong Yang
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <pthread.h>
#include "cloexec.h"
#include "h2o/multithread.h"
struct st_h2o_multithread_queue_t {
#if H2O_USE_LIBUV
uv_async_t async;
#else
struct {
int write;
h2o_socket_t *read;
} async;
#endif
pthread_mutex_t mutex;
struct {
h2o_linklist_t active;
h2o_linklist_t inactive;
} receivers;
};
static void queue_cb(h2o_multithread_queue_t *queue)
{
pthread_mutex_lock(&queue->mutex);
while (!h2o_linklist_is_empty(&queue->receivers.active)) {
h2o_multithread_receiver_t *receiver =
H2O_STRUCT_FROM_MEMBER(h2o_multithread_receiver_t, _link, queue->receivers.active.next);
/* detach all the messages from the receiver */
h2o_linklist_t messages;
h2o_linklist_init_anchor(&messages);
h2o_linklist_insert_list(&messages, &receiver->_messages);
/* relink the receiver to the inactive list */
h2o_linklist_unlink(&receiver->_link);
h2o_linklist_insert(&queue->receivers.inactive, &receiver->_link);
/* dispatch the messages */
pthread_mutex_unlock(&queue->mutex);
receiver->cb(receiver, &messages);
assert(h2o_linklist_is_empty(&messages));
pthread_mutex_lock(&queue->mutex);
}
pthread_mutex_unlock(&queue->mutex);
}
#ifdef H2O_NO_64BIT_ATOMICS
pthread_mutex_t h2o_conn_id_mutex = PTHREAD_MUTEX_INITIALIZER;
#endif
#if H2O_USE_LIBUV
#else
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
static void on_read(h2o_socket_t *sock, const char *err)
{
if (err != NULL) {
fprintf(stderr, "pipe error\n");
abort();
}
h2o_buffer_consume(&sock->input, sock->input->size);
queue_cb(sock->data);
}
static void init_async(h2o_multithread_queue_t *queue, h2o_loop_t *loop)
{
int fds[2];
if (cloexec_pipe(fds) != 0) {
perror("pipe");
abort();
}
fcntl(fds[1], F_SETFL, O_NONBLOCK);
queue->async.write = fds[1];
queue->async.read = h2o_evloop_socket_create(loop, fds[0], 0);
queue->async.read->data = queue;
h2o_socket_read_start(queue->async.read, on_read);
}
#endif
h2o_multithread_queue_t *h2o_multithread_create_queue(h2o_loop_t *loop)
{
h2o_multithread_queue_t *queue = h2o_mem_alloc(sizeof(*queue));
memset(queue, 0, sizeof(*queue));
#if H2O_USE_LIBUV
uv_async_init(loop, &queue->async, (uv_async_cb)queue_cb);
#else
init_async(queue, loop);
#endif
pthread_mutex_init(&queue->mutex, NULL);
h2o_linklist_init_anchor(&queue->receivers.active);
h2o_linklist_init_anchor(&queue->receivers.inactive);
return queue;
}
void h2o_multithread_destroy_queue(h2o_multithread_queue_t *queue)
{
assert(h2o_linklist_is_empty(&queue->receivers.active));
assert(h2o_linklist_is_empty(&queue->receivers.inactive));
#if H2O_USE_LIBUV
uv_close((uv_handle_t *)&queue->async, (uv_close_cb)free);
#else
h2o_socket_read_stop(queue->async.read);
h2o_socket_close(queue->async.read);
close(queue->async.write);
#endif
pthread_mutex_destroy(&queue->mutex);
}
void h2o_multithread_register_receiver(h2o_multithread_queue_t *queue, h2o_multithread_receiver_t *receiver,
h2o_multithread_receiver_cb cb)
{
receiver->queue = queue;
receiver->_link = (h2o_linklist_t){NULL};
h2o_linklist_init_anchor(&receiver->_messages);
receiver->cb = cb;
pthread_mutex_lock(&queue->mutex);
h2o_linklist_insert(&queue->receivers.inactive, &receiver->_link);
pthread_mutex_unlock(&queue->mutex);
}
void h2o_multithread_unregister_receiver(h2o_multithread_queue_t *queue, h2o_multithread_receiver_t *receiver)
{
assert(queue == receiver->queue);
assert(h2o_linklist_is_empty(&receiver->_messages));
pthread_mutex_lock(&queue->mutex);
h2o_linklist_unlink(&receiver->_link);
pthread_mutex_unlock(&queue->mutex);
}
void h2o_multithread_send_message(h2o_multithread_receiver_t *receiver, h2o_multithread_message_t *message)
{
int do_send = 0;
pthread_mutex_lock(&receiver->queue->mutex);
if (message != NULL) {
assert(!h2o_linklist_is_linked(&message->link));
if (h2o_linklist_is_empty(&receiver->_messages)) {
h2o_linklist_unlink(&receiver->_link);
h2o_linklist_insert(&receiver->queue->receivers.active, &receiver->_link);
do_send = 1;
}
h2o_linklist_insert(&receiver->_messages, &message->link);
} else {
if (h2o_linklist_is_empty(&receiver->_messages))
do_send = 1;
}
pthread_mutex_unlock(&receiver->queue->mutex);
if (do_send) {
#if H2O_USE_LIBUV
uv_async_send(&receiver->queue->async);
#else
while (write(receiver->queue->async.write, "", 1) == -1 && errno == EINTR)
;
#endif
}
}
void h2o_multithread_create_thread(pthread_t *tid, const pthread_attr_t *attr, void *(*func)(void *), void *arg)
{
if (pthread_create(tid, attr, func, arg) != 0) {
perror("pthread_create");
abort();
}
}
void h2o_sem_init(h2o_sem_t *sem, ssize_t capacity)
{
pthread_mutex_init(&sem->_mutex, NULL);
pthread_cond_init(&sem->_cond, NULL);
sem->_cur = capacity;
sem->_capacity = capacity;
}
void h2o_sem_destroy(h2o_sem_t *sem)
{
assert(sem->_cur == sem->_capacity);
pthread_cond_destroy(&sem->_cond);
pthread_mutex_destroy(&sem->_mutex);
}
void h2o_sem_wait(h2o_sem_t *sem)
{
pthread_mutex_lock(&sem->_mutex);
while (sem->_cur <= 0)
pthread_cond_wait(&sem->_cond, &sem->_mutex);
--sem->_cur;
pthread_mutex_unlock(&sem->_mutex);
}
void h2o_sem_post(h2o_sem_t *sem)
{
pthread_mutex_lock(&sem->_mutex);
++sem->_cur;
pthread_cond_signal(&sem->_cond);
pthread_mutex_unlock(&sem->_mutex);
}
void h2o_sem_set_capacity(h2o_sem_t *sem, ssize_t new_capacity)
{
pthread_mutex_lock(&sem->_mutex);
sem->_cur += new_capacity - sem->_capacity;
sem->_capacity = new_capacity;
pthread_cond_broadcast(&sem->_cond);
pthread_mutex_unlock(&sem->_mutex);
}
/* barrier */
void h2o_barrier_init(h2o_barrier_t *barrier, size_t count)
{
pthread_mutex_init(&barrier->_mutex, NULL);
pthread_cond_init(&barrier->_cond, NULL);
barrier->_count = count;
barrier->_out_of_wait = count;
}
int h2o_barrier_wait(h2o_barrier_t *barrier)
{
int ret;
pthread_mutex_lock(&barrier->_mutex);
barrier->_count--;
if (barrier->_count == 0) {
pthread_cond_broadcast(&barrier->_cond);
ret = 1;
} else {
while (barrier->_count)
pthread_cond_wait(&barrier->_cond, &barrier->_mutex);
ret = 0;
}
pthread_mutex_unlock(&barrier->_mutex);
/*
* this is needed to synchronize h2o_barrier_destroy with the
* exit of this function, so make sure that we can't destroy the
* mutex or the condition before all threads have exited wait()
*/
__sync_sub_and_fetch(&barrier->_out_of_wait, 1);
return ret;
}
int h2o_barrier_done(h2o_barrier_t *barrier)
{
return __sync_add_and_fetch(&barrier->_count, 0) == 0;
}
void h2o_barrier_destroy(h2o_barrier_t *barrier)
{
while (__sync_add_and_fetch(&barrier->_out_of_wait, 0) != 0) {
sched_yield();
}
pthread_mutex_destroy(&barrier->_mutex);
pthread_cond_destroy(&barrier->_cond);
}
| 1 | 12,703 | Let's assume that on Linux `eventfd` is always available as a wrapper of the eventfd2 system call. It is supported since linux 2.6.27 + glibc 2.9, as well as from the ancient versions of bionic. In other words, it would be fair to argue that the pair would be available on practically all platforms that provide `pipe2`. Therefore switching from using `pipe2` to `eventfd` on linux unconditionally would not cause issues on our users. | h2o-h2o | c |
@@ -61,6 +61,7 @@ public class DatabaseTransferConsumer implements IDataTransferConsumer<DatabaseC
IDataTransferNodePrimary, DBPReferentialIntegrityController {
private static final Log log = Log.getLog(DatabaseTransferConsumer.class);
+ private DBCStatistics statistics = new DBCStatistics();
private DatabaseConsumerSettings settings;
private DatabaseMappingContainer containerMapping;
private ColumnMapping[] columnMappings; | 1 | /*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2021 DBeaver Corp and others
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.tools.transfer.database;
import org.jkiss.code.NotNull;
import org.jkiss.code.Nullable;
import org.jkiss.dbeaver.DBException;
import org.jkiss.dbeaver.Log;
import org.jkiss.dbeaver.model.*;
import org.jkiss.dbeaver.model.data.DBDAttributeBinding;
import org.jkiss.dbeaver.model.data.DBDAttributeBindingCustom;
import org.jkiss.dbeaver.model.data.DBDInsertReplaceMethod;
import org.jkiss.dbeaver.model.data.DBDValueHandler;
import org.jkiss.dbeaver.model.edit.DBEPersistAction;
import org.jkiss.dbeaver.model.exec.*;
import org.jkiss.dbeaver.model.impl.AbstractExecutionSource;
import org.jkiss.dbeaver.model.impl.struct.AbstractAttribute;
import org.jkiss.dbeaver.model.meta.DBSerializable;
import org.jkiss.dbeaver.model.navigator.DBNDatabaseNode;
import org.jkiss.dbeaver.model.navigator.DBNEvent;
import org.jkiss.dbeaver.model.navigator.DBNUtils;
import org.jkiss.dbeaver.model.runtime.DBRProgressMonitor;
import org.jkiss.dbeaver.model.runtime.VoidProgressMonitor;
import org.jkiss.dbeaver.model.sql.registry.SQLInsertReplaceMethodDescriptor;
import org.jkiss.dbeaver.model.sql.registry.SQLInsertReplaceMethodRegistry;
import org.jkiss.dbeaver.model.struct.*;
import org.jkiss.dbeaver.model.struct.rdb.DBSCatalog;
import org.jkiss.dbeaver.model.struct.rdb.DBSManipulationType;
import org.jkiss.dbeaver.model.struct.rdb.DBSSchema;
import org.jkiss.dbeaver.runtime.DBWorkbench;
import org.jkiss.dbeaver.runtime.ui.DBPPlatformUI;
import org.jkiss.dbeaver.tools.transfer.IDataTransferAttributeTransformer;
import org.jkiss.dbeaver.tools.transfer.IDataTransferConsumer;
import org.jkiss.dbeaver.tools.transfer.IDataTransferNodePrimary;
import org.jkiss.dbeaver.tools.transfer.IDataTransferProcessor;
import org.jkiss.dbeaver.tools.transfer.internal.DTMessages;
import org.jkiss.utils.CommonUtils;
import java.lang.reflect.InvocationTargetException;
import java.util.*;
/**
* Stream transfer consumer
*/
@DBSerializable("databaseTransferConsumer")
public class DatabaseTransferConsumer implements IDataTransferConsumer<DatabaseConsumerSettings, IDataTransferProcessor>,
IDataTransferNodePrimary, DBPReferentialIntegrityController {
private static final Log log = Log.getLog(DatabaseTransferConsumer.class);
private DatabaseConsumerSettings settings;
private DatabaseMappingContainer containerMapping;
private ColumnMapping[] columnMappings;
private DBDAttributeBinding[] sourceBindings;
private DBCExecutionContext targetContext;
private DBCSession targetSession;
private DBSDataManipulator.ExecuteBatch executeBatch;
private DBSDataBulkLoader.BulkLoadManager bulkLoadManager;
private long rowsExported = 0;
private boolean ignoreErrors = false;
private List<DBSAttributeBase> targetAttributes;
private boolean useIsolatedConnection;
private Boolean oldAutoCommit;
// Used only for non-explicit import
// In this case consumer will be replaced with explicit consumers during configuration
private DBSObjectContainer targetObjectContainer;
// Used in deserialized or directly instantiated consumers
private DBSDataManipulator localTargetObject;
private boolean isPreview;
private List<Object[]> previewRows;
private DBDAttributeBinding[] rsAttributes;
public static class ColumnMapping {
public DBDAttributeBinding sourceAttr;
public DatabaseMappingAttribute targetAttr;
public DBDValueHandler sourceValueHandler;
public DBDValueHandler targetValueHandler;
public int targetIndex = -1;
public IDataTransferAttributeTransformer valueTransformer;
public Map<String, Object> valueTransformerProperties;
private ColumnMapping(DBDAttributeBinding sourceAttr) {
this.sourceAttr = sourceAttr;
}
@Override
public String toString() {
return sourceAttr + "->" + targetAttr;
}
}
public DatabaseTransferConsumer() {
}
public DatabaseTransferConsumer(DBSDataManipulator targetObject) {
this.localTargetObject = targetObject;
}
public DatabaseTransferConsumer(DBSObjectContainer targetObjectContainer) {
this.targetObjectContainer = targetObjectContainer;
}
public DBSObjectContainer getTargetObjectContainer() {
return targetObjectContainer;
}
public ColumnMapping[] getColumnMappings() {
return columnMappings;
}
@Override
public DBSObject getDatabaseObject() {
if (targetObjectContainer != null) {
return targetObjectContainer;
}
return containerMapping == null ? localTargetObject : containerMapping.getTarget();
}
protected boolean isPreview() {
return isPreview;
}
protected void setPreview(boolean preview) {
isPreview = preview;
}
protected List<Object[]> getPreviewRows() {
return previewRows;
}
@Override
public void fetchStart(DBCSession session, DBCResultSet resultSet, long offset, long maxRows) throws DBCException {
try {
initExporter(session.getProgressMonitor());
} catch (DBException e) {
throw new DBCException("Error initializing exporter", e);
}
if (containerMapping == null) {
throw new DBCException("Internal error: consumer mappings not set");
}
AbstractExecutionSource executionSource = new AbstractExecutionSource(containerMapping.getSource(), targetContext, this);
DBSDataManipulator targetObject = getTargetObject();
if (!isPreview && offset <= 0 && settings.isTruncateBeforeLoad() && (containerMapping == null || containerMapping.getMappingType() == DatabaseMappingType.existing)) {
// Truncate target tables
if ((targetObject.getSupportedFeatures() & DBSDataManipulator.DATA_TRUNCATE) != 0) {
targetObject.truncateData(
targetSession,
executionSource);
} else {
log.error("Table '" + targetObject.getName() + "' doesn't support truncate operation");
}
}
boolean dynamicTarget = targetContext.getDataSource().getInfo().isDynamicMetadata();
DBSDataContainer sourceObject = getSourceObject();
if (dynamicTarget) {
// Document-based datasource
rsAttributes = DBUtils.getAttributeBindings(session, sourceObject, resultSet.getMeta());
} else {
rsAttributes = DBUtils.makeLeafAttributeBindings(session, sourceObject, resultSet);
}
columnMappings = new ColumnMapping[rsAttributes.length];
sourceBindings = rsAttributes;
targetAttributes = new ArrayList<>(columnMappings.length);
for (int i = 0; i < rsAttributes.length; i++) {
if (isSkipColumn(rsAttributes[i])) {
continue;
}
ColumnMapping columnMapping = new ColumnMapping(rsAttributes[i]);
if (containerMapping == null) {
// No explicit mappings. Mapping must be provided by data producer
// Map all attributes directly.
if (targetObject instanceof DBSEntity) {
try {
DBSEntityAttribute attribute = ((DBSEntity) targetObject).getAttribute(session.getProgressMonitor(), columnMapping.sourceAttr.getName());
if (attribute != null) {
columnMapping.targetAttr = new DatabaseMappingAttribute(null, columnMapping.sourceAttr);
columnMapping.targetAttr.setTarget(attribute);
columnMapping.targetAttr.setMappingType(DatabaseMappingType.existing);
}
} catch (DBException e) {
log.error("Error getting target attribute");
}
}
if (columnMapping.targetAttr == null) {
throw new DBCException("Can't resolve target attribute for [" + columnMapping.sourceAttr.getName() + "]");
}
} else if (sourceObject instanceof DBSDocumentContainer && dynamicTarget) {
try {
DBSDocumentContainer docContainer = (DBSDocumentContainer) (targetObject instanceof DBSDocumentContainer ? targetObject : sourceObject);
DBSEntityAttribute docAttribute = docContainer.getDocumentAttribute(session.getProgressMonitor());
if (docAttribute != null) {
columnMapping.targetAttr = new DatabaseMappingAttribute(containerMapping, columnMapping.sourceAttr);
columnMapping.targetAttr.setTarget(docAttribute);
columnMapping.targetAttr.setMappingType(DatabaseMappingType.existing);
}
} catch (DBException e) {
throw new DBCException("Error getting document attribute", e);
}
} else {
columnMapping.targetAttr = containerMapping.getAttributeMapping(columnMapping.sourceAttr);
if (columnMapping.targetAttr == null) {
throw new DBCException("Can't find target attribute [" + columnMapping.sourceAttr.getName() + "]");
}
}
if (columnMapping.targetAttr.getMappingType() == DatabaseMappingType.skip) {
continue;
}
if (columnMapping.targetAttr.getTransformer() != null) {
try {
columnMapping.valueTransformer = columnMapping.targetAttr.getTransformer().createTransformer();
columnMapping.valueTransformerProperties = columnMapping.targetAttr.getTransformerProperties();
} catch (DBException e) {
throw new DBCException("Can't create attribute transformer", e);
}
}
DBSAttributeBase targetAttr = columnMapping.targetAttr.getTarget();
if (targetAttr == null) {
if (isPreview) {
targetAttr = new PreviewColumnInfo(null, columnMapping.sourceAttr, columnMapping.targetIndex);
} else if (columnMapping.targetAttr.getSource() instanceof DBSEntityAttribute || targetObject instanceof DBSDocumentContainer) {
// Use source attr. Some datasource (e.g. document oriented do not have strict set of attributes)
targetAttr = columnMapping.targetAttr.getSource();
} else {
throw new DBCException("Target attribute for [" + columnMapping.sourceAttr.getName() + "] wasn't resolved");
}
}
columnMapping.sourceValueHandler = columnMapping.sourceAttr.getValueHandler();
columnMapping.targetValueHandler = DBUtils.findValueHandler(targetContext.getDataSource(), targetAttr);
columnMapping.targetIndex = targetAttributes.size();
columnMappings[i] = columnMapping;
targetAttributes.add(targetAttr);
}
DBSAttributeBase[] attributes = targetAttributes.toArray(new DBSAttributeBase[0]);
Map<String, Object> options = new HashMap<>();
options.put(DBSDataManipulator.OPTION_USE_MULTI_INSERT, settings.isUseMultiRowInsert());
options.put(DBSDataManipulator.OPTION_SKIP_BIND_VALUES, settings.isSkipBindValues());
if (!isPreview) {
if (settings.isUseBulkLoad()) {
DBSDataBulkLoader bulkLoader = DBUtils.getAdapter(DBSDataBulkLoader.class, targetContext.getDataSource());
if (targetObject != null && bulkLoader != null) {
try {
bulkLoadManager = bulkLoader.createBulkLoad(
targetSession, targetObject, attributes, executionSource, settings.getCommitAfterRows(), options);
} catch (Exception e) {
throw new DBCException("Error creating bulk loader", e);
}
}
}
if (bulkLoadManager == null) {
if (targetObject instanceof DBSDataManipulatorExt) {
((DBSDataManipulatorExt) targetObject).beforeDataChange(targetSession, DBSManipulationType.INSERT, attributes, executionSource);
}
executeBatch = targetObject.insertData(
targetSession,
attributes,
null,
executionSource,
options);
}
} else {
previewRows = new ArrayList<>();
executeBatch = new PreviewBatch();
}
}
private boolean isSkipColumn(DBDAttributeBinding attr) {
return attr.isPseudoAttribute() ||
(!settings.isTransferAutoGeneratedColumns() && attr.isAutoGenerated()) ||
attr instanceof DBDAttributeBindingCustom;
}
@Override
public void fetchRow(DBCSession session, DBCResultSet resultSet) throws DBCException {
Object[] rowValues = new Object[targetAttributes.size()];
for (int i = 0; i < columnMappings.length; i++) {
ColumnMapping column = columnMappings[i];
if (column == null || column.targetIndex < 0) {
continue;
}
Object attrValue;
if (column.sourceValueHandler != null) {
if (column.sourceAttr instanceof DBDAttributeBindingCustom) {
attrValue = DBUtils.getAttributeValue(column.sourceAttr, sourceBindings, rowValues);
} else {
attrValue = column.sourceValueHandler.fetchValueObject(session, resultSet, column.sourceAttr, i);
}
} else {
// No value handler - get raw value
attrValue = resultSet.getAttributeValue(i);
}
if (containerMapping != null && containerMapping.getTarget() instanceof DBSDocumentContainer) {
rowValues[column.targetIndex] = attrValue;
} else {
DatabaseMappingAttribute targetAttr = column.targetAttr;
rowValues[column.targetIndex] = column.targetValueHandler.getValueFromObject(
targetSession,
targetAttr.getTarget() == null ? targetAttr.getSource() : targetAttr.getTarget(),
attrValue,
false, false);
}
}
// Transform value
for (ColumnMapping column : columnMappings) {
if (column == null || column.targetIndex < 0) {
continue;
}
if (column.valueTransformer != null) {
Object attrValue = rowValues[column.targetIndex];
try {
rowValues[column.targetIndex] = column.valueTransformer.transformAttribute(
session,
rsAttributes,
rowValues,
column.sourceAttr,
attrValue,
column.valueTransformerProperties);
} catch (DBException e) {
throw new DBCException(
"Error transforming attribute '" + column.sourceAttr.getName() +
"' value with transformer '" + column.targetAttr.getTransformer().getName() + "'", e);
}
}
}
if (bulkLoadManager != null) {
bulkLoadManager.addRow(targetSession, rowValues);
} else {
executeBatch.add(rowValues);
}
rowsExported++;
// No need. monitor is incremented in data reader
//session.getProgressMonitor().worked(1);
insertBatch(false);
}
private void insertBatch(boolean force) throws DBCException {
if (isPreview) {
return;
}
boolean needCommit = force || ((rowsExported % settings.getCommitAfterRows()) == 0);
if (bulkLoadManager != null) {
if (needCommit) {
bulkLoadManager.flushRows(targetSession);
}
return;
} else {
boolean disableUsingBatches = settings.isDisableUsingBatches();
if ((needCommit || disableUsingBatches) && executeBatch != null) {
if (DBFetchProgress.monitorFetchProgress(rowsExported)) {
targetSession.getProgressMonitor().subTask("Insert rows (" + rowsExported + ")");
}
Map<String, Object> options = new HashMap<>();
options.put(DBSDataManipulator.OPTION_DISABLE_BATCHES, disableUsingBatches);
options.put(DBSDataManipulator.OPTION_MULTI_INSERT_BATCH_SIZE, settings.getMultiRowInsertBatch());
options.put(DBSDataManipulator.OPTION_SKIP_BIND_VALUES, settings.isSkipBindValues());
boolean onDuplicateKeyCaseOn = settings.getOnDuplicateKeyInsertMethodId() != null &&
!settings.getOnDuplicateKeyInsertMethodId().equals(DBSDataManipulator.INSERT_NONE_METHOD);
if (onDuplicateKeyCaseOn) {
String insertMethodId = settings.getOnDuplicateKeyInsertMethodId();
if (!CommonUtils.isEmpty(insertMethodId)) {
SQLInsertReplaceMethodDescriptor insertReplaceMethod = SQLInsertReplaceMethodRegistry.getInstance().getInsertMethod(insertMethodId);
if (insertReplaceMethod != null) {
try {
DBDInsertReplaceMethod insertMethod = insertReplaceMethod.createInsertMethod();
options.put(DBSDataManipulator.OPTION_INSERT_REPLACE_METHOD, insertMethod);
} catch (DBException e) {
log.debug("Can't get insert replace method", e);
}
}
}
}
boolean retryInsert;
do {
retryInsert = false;
try {
DBExecUtils.tryExecuteRecover(targetSession, targetSession.getDataSource(), param -> {
try {
executeBatch.execute(targetSession, options);
} catch (Throwable e) {
throw new InvocationTargetException(e);
}
});
} catch (Throwable e) {
log.error("Error inserting row", e);
if (ignoreErrors) {
break;
}
String message;
if (disableUsingBatches) {
message = DTMessages.database_transfer_consumer_task_error_occurred_during_data_load;
} else {
message = DTMessages.database_transfer_consumer_task_error_occurred_during_batch_insert;
}
DBPPlatformUI.UserResponse response = DBWorkbench.getPlatformUI().showErrorStopRetryIgnore(message, e, true);
switch (response) {
case STOP:
throw new DBCException("Can't insert row", e);
case RETRY:
retryInsert = true;
break;
case IGNORE:
retryInsert = false;
break;
case IGNORE_ALL:
ignoreErrors = true;
retryInsert = false;
break;
}
}
} while (retryInsert);
}
}
if (settings.isUseTransactions() && needCommit && !targetSession.getProgressMonitor().isCanceled()) {
DBCTransactionManager txnManager = DBUtils.getTransactionManager(targetSession.getExecutionContext());
if (txnManager != null && txnManager.isSupportsTransactions() && !txnManager.isAutoCommit()) {
targetSession.getProgressMonitor().subTask("Commit changes");
txnManager.commit(targetSession);
}
}
}
@Override
public void fetchEnd(DBCSession session, DBCResultSet resultSet) throws DBCException {
try {
if (rowsExported > 0) {
insertBatch(true);
}
if (bulkLoadManager != null) {
bulkLoadManager.finishBulkLoad(targetSession);
} else if (executeBatch != null) {
executeBatch.close();
executeBatch = null;
}
} finally {
DBSDataManipulator targetObject = getTargetObject();
if (!isPreview && targetObject instanceof DBSDataManipulatorExt) {
((DBSDataManipulatorExt) targetObject).afterDataChange(
targetSession,
DBSManipulationType.INSERT,
targetAttributes.toArray(new DBSAttributeBase[0]),
new AbstractExecutionSource(getSourceObject(), targetContext, this));
}
}
}
@Override
public void close() {
closeExporter();
}
private void initExporter(DBRProgressMonitor monitor) throws DBException {
DBSObject targetDB = checkTargetContainer(monitor);
DBPDataSourceContainer dataSourceContainer = targetDB.getDataSource().getContainer();
if (!dataSourceContainer.hasModifyPermission(DBPDataSourcePermission.PERMISSION_IMPORT_DATA)) {
throw new DBCException("Data transfer to database [" + dataSourceContainer.getName() + "] restricted by connection configuration");
}
try {
useIsolatedConnection = !isPreview && settings.isOpenNewConnections() && !dataSourceContainer.getDriver().isEmbedded();
targetContext = useIsolatedConnection ?
DBUtils.getObjectOwnerInstance(targetDB).openIsolatedContext(monitor, "Data transfer consumer", null) : DBUtils.getDefaultContext(targetDB, false);
} catch (DBException e) {
throw new DBCException("Error opening new connection", e);
}
targetSession = targetContext.openSession(monitor, DBCExecutionPurpose.UTIL, "Data load");
targetSession.enableLogging(false);
if (!isPreview) {
DBCTransactionManager txnManager = DBUtils.getTransactionManager(targetSession.getExecutionContext());
if (txnManager != null && txnManager.isSupportsTransactions()) {
oldAutoCommit = txnManager.isAutoCommit();
if (settings.isUseTransactions()) {
if (oldAutoCommit) {
txnManager.setAutoCommit(monitor, false);
}
} else {
if (!oldAutoCommit) {
txnManager.setAutoCommit(monitor, true);
}
}
}
}
}
private DBSObject checkTargetContainer(DBRProgressMonitor monitor) throws DBException {
DBSDataManipulator targetObject = getTargetObject();
if (targetObject == null) {
if (settings.getContainerNode() != null && settings.getContainerNode().getDataSource() == null) {
// Init connection
settings.getContainerNode().initializeNode(monitor, null);
}
if (settings.getContainer() == null) {
throw new DBCException("Can't initialize database consumer. No target object and no target container");
}
}
return targetObject == null ? settings.getContainer() : targetObject;
}
private void closeExporter() {
if (!isPreview && targetSession != null && oldAutoCommit != null) {
try {
DBCTransactionManager txnManager = DBUtils.getTransactionManager(targetSession.getExecutionContext());
if (txnManager != null) {
txnManager.setAutoCommit(targetSession.getProgressMonitor(), oldAutoCommit);
}
} catch (Exception e) {
log.debug("Error reverting auto-commit mode", e);
}
}
try {
if (targetSession != null) {
targetSession.close();
targetSession = null;
}
} catch (Throwable e) {
log.debug(e);
}
if (targetContext != null && useIsolatedConnection) {
targetContext.close();
targetContext = null;
}
if (bulkLoadManager != null) {
bulkLoadManager.close();
bulkLoadManager = null;
}
}
@Override
public void initTransfer(DBSObject sourceObject, DatabaseConsumerSettings settings, TransferParameters parameters, IDataTransferProcessor processor, Map<String, Object> processorProperties) {
this.settings = settings;
this.containerMapping = settings.getDataMapping((DBSDataContainer) sourceObject);
}
@Override
public void startTransfer(DBRProgressMonitor monitor) throws DBException {
// Create all necessary database objects
monitor.beginTask("Create necessary database objects", 1);
try {
DBSObject dbObject = checkTargetContainer(monitor);
if (!isPreview && containerMapping != null) {
DBSObjectContainer container = settings.getContainer();
if (container == null) {
throw new DBException("No target datasource - can't create target objects");
}
boolean hasNewObjects = createTargetDatabaseObjects(monitor, dbObject);
if (hasNewObjects) {
DatabaseTransferUtils.refreshDatabaseModel(monitor, settings, containerMapping);
}
}
} finally {
monitor.done();
}
}
private boolean createTargetDatabaseObjects(DBRProgressMonitor monitor, DBSObject dbObject) throws DBException {
try (DBCSession session = DBUtils.openMetaSession(monitor, dbObject, "Create target metadata")) {
// We may need to change active catalog to create target object in the proper location
DBSCatalog oldCatalog = null;
DBSSchema oldSchema = null;
DBSCatalog catalog = dbObject instanceof DBSSchema ? DBUtils.getParentOfType(DBSCatalog.class, dbObject) : null;
if (catalog != null) {
DBCExecutionContextDefaults contextDefaults = session.getExecutionContext().getContextDefaults();
if (contextDefaults != null && contextDefaults.supportsCatalogChange() && contextDefaults.getDefaultCatalog() != catalog) {
oldCatalog = contextDefaults.getDefaultCatalog();
try {
contextDefaults.setDefaultCatalog(monitor, catalog, (DBSSchema) dbObject);
} catch (DBCException e) {
log.debug(e);
}
}
}
try {
switch (containerMapping.getMappingType()) {
case create:
case existing:
return createTargetTable(session, containerMapping);
default:
return false;
}
} finally {
if (oldCatalog != null) {
// Revert to old catalog
try {
session.getExecutionContext().getContextDefaults().setDefaultCatalog(monitor, oldCatalog, oldSchema);
} catch (DBCException e) {
log.debug(e);
}
}
}
}
}
private boolean createTargetTable(DBCSession session, DatabaseMappingContainer containerMapping) throws DBException {
DBPDataSourceContainer dataSourceContainer = session.getDataSource().getContainer();
if (!dataSourceContainer.hasModifyPermission(DBPDataSourcePermission.PERMISSION_EDIT_METADATA)) {
throw new DBCException("New table creation in database [" + dataSourceContainer.getName() + "] restricted by connection configuration");
}
DBSObjectContainer schema = settings.getContainer();
if (schema == null) {
throw new DBException("No target container selected");
}
if (session.getDataSource().getInfo().isDynamicMetadata()) {
DatabaseTransferUtils.createTargetDynamicTable(session.getProgressMonitor(), session.getExecutionContext(), schema, containerMapping);
return true;
} else {
DBEPersistAction[] actions = DatabaseTransferUtils.generateTargetTableDDL(session.getProgressMonitor(), session.getExecutionContext(), schema, containerMapping);
try {
DatabaseTransferUtils.executeDDL(session, actions);
} catch (DBCException e) {
throw new DBCException("Can't create or update target table:\n" + Arrays.toString(actions), e);
}
return actions.length > 0;
}
}
@Override
public void finishTransfer(DBRProgressMonitor monitor, boolean last) {
if (last) {
// Refresh navigator
monitor.subTask("Refresh navigator model");
try {
settings.getContainerNode().refreshNode(monitor, this);
} catch (Exception e) {
log.debug("Error refreshing navigator model after data consumer", e);
}
}
if (!last && settings.isOpenTableOnFinish()) {
DBSDataManipulator targetObject = getTargetObject();
if (targetObject != null) {
// Refresh node first (this will refresh table data as well)
try {
DBNDatabaseNode objectNode = DBNUtils.getNodeByObject(targetObject);
if (objectNode != null) {
objectNode.refreshNode(monitor, DBNEvent.FORCE_REFRESH);
}
} catch (Exception e) {
log.error("Error refreshing object '" + targetObject.getName() + "'", e);
}
DBWorkbench.getPlatformUI().openEntityEditor(targetObject);
}
}
}
public DBSDataContainer getSourceObject() {
return containerMapping == null ? null : containerMapping.getSource();
}
public DBSDataManipulator getTargetObject() {
return containerMapping == null ? localTargetObject : containerMapping.getTarget();
}
public void setTargetObject(DBSDataManipulator targetObject) {
this.localTargetObject = targetObject;
}
@Override
public String getObjectName() {
if (targetObjectContainer != null) {
return targetObjectContainer.getName();
}
DBSDataManipulator targetObject = getTargetObject();
String targetName = null;
if (targetObject != null) {
targetName = DBUtils.getObjectFullName(targetObject, DBPEvaluationContext.UI);
}
if (targetName != null) {
return targetName + " [Existing]";
}
if (containerMapping == null) {
return "?";
}
targetName = containerMapping.getTargetFullName();
switch (containerMapping.getMappingType()) {
case create:
return targetName + " [Create]";
case existing:
for (DatabaseMappingAttribute attr : containerMapping.getAttributeMappings(new VoidProgressMonitor())) {
if (attr.getMappingType() == DatabaseMappingType.create) {
return targetName + " [Alter]";
}
}
return targetName;// + " [No changes]";
case skip:
return "[Skip]";
default:
return targetName + " [Existing]";
}
}
@Override
public DBPImage getObjectIcon() {
if (targetObjectContainer != null) {
return DBIcon.TREE_FOLDER_TABLE;
}
DBSDataManipulator targetObject = getTargetObject();
if (targetObject instanceof DBPImageProvider) {
return DBValueFormatting.getObjectImage(targetObject);
}
return DBIcon.TREE_TABLE;
}
@Override
public String getObjectContainerName() {
if (targetObjectContainer != null) {
return DBUtils.getObjectFullName(targetObjectContainer, DBPEvaluationContext.UI);
}
DBPDataSourceContainer container = getDataSourceContainer();
return container != null ? container.getName() : "?";
}
@Override
public DBPImage getObjectContainerIcon() {
if (targetObjectContainer != null) {
return DBIcon.TREE_FOLDER_TABLE;
}
DBPDataSourceContainer container = getDataSourceContainer();
return container != null ? container.getDriver().getIcon() : null;
}
@Override
public boolean isConfigurationComplete() {
if (localTargetObject != null) {
return true;
}
return containerMapping != null &&
(containerMapping.getTarget() != null || !CommonUtils.isEmpty(containerMapping.getTargetName()));
}
DBPDataSourceContainer getDataSourceContainer() {
if (targetObjectContainer != null) {
return targetObjectContainer.getDataSource().getContainer();
}
DBSDataManipulator targetObject = getTargetObject();
if (targetObject != null) {
return targetObject.getDataSource().getContainer();
}
DBSObjectContainer container = settings.getContainer();
if (container != null) {
return container.getDataSource().getContainer();
}
return null;
}
@Override
public boolean equals(Object obj) {
return obj instanceof DatabaseTransferConsumer &&
CommonUtils.equalObjects(getTargetObject(), ((DatabaseTransferConsumer) obj).getTargetObject());
}
@Override
public boolean supportsChangingReferentialIntegrity(@NotNull DBRProgressMonitor monitor) throws DBException {
return checkTargetContainer(monitor) instanceof DBPReferentialIntegrityController;
}
@Override
public void enableReferentialIntegrity(@NotNull DBRProgressMonitor monitor, boolean enable) throws DBException {
DBSObject dbsObject = checkTargetContainer(monitor);
if (!(dbsObject instanceof DBPReferentialIntegrityController)) {
throw new DBException("Changing referential integrity is unsupported!");
}
DBPReferentialIntegrityController controller = (DBPReferentialIntegrityController) dbsObject;
controller.enableReferentialIntegrity(monitor, enable);
}
@Nullable
@Override
public String getChangeReferentialIntegrityStatement(@NotNull DBRProgressMonitor monitor, boolean enable) throws DBException {
DBSObject dbsObject = checkTargetContainer(monitor);
if (dbsObject instanceof DBPReferentialIntegrityController) {
return ((DBPReferentialIntegrityController) dbsObject).getChangeReferentialIntegrityStatement(monitor, enable);
}
return null;
}
public DatabaseConsumerSettings getSettings() {
return settings;
}
private class PreviewBatch implements DBSDataManipulator.ExecuteBatch {
@Override
public void add(@NotNull Object[] attributeValues) throws DBCException {
previewRows.add(attributeValues);
}
@NotNull
@Override
public DBCStatistics execute(@NotNull DBCSession session, Map<String, Object> options) throws DBCException {
return new DBCStatistics();
}
@Override
public void generatePersistActions(@NotNull DBCSession session, @NotNull List<DBEPersistAction> actions, Map<String, Object> options) throws DBCException {
}
@Override
public void close() {
}
}
/*
* This class is only suitable for data transfer preview.
*/
private static class PreviewColumnInfo extends AbstractAttribute implements DBSEntityAttribute {
private final DBSEntity entity;
private final DBDAttributeBinding binding;
public PreviewColumnInfo(DBSEntity entity, DBDAttributeBinding binding, int index) {
super(binding.getName(), binding.getTypeName(), -1, index, binding.getMaxLength(), null, null, false, false);
this.entity = entity;
this.binding = binding;
}
@Nullable
@Override
public String getDefaultValue() {
return null;
}
@NotNull
@Override
public DBSEntity getParentObject() {
return entity;
}
@NotNull
@Override
public DBPDataSource getDataSource() {
return this.binding.getDataSource();
}
@Override
public DBPDataKind getDataKind() {
return this.binding.getDataKind();
}
}
}
| 1 | 11,682 | This field could be `final`. | dbeaver-dbeaver | java |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.