patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -30,8 +30,11 @@ abstract class BaseAction<R> implements Action<R> {
String tableName = table().toString();
if (tableName.contains("/")) {
return tableName + "#" + type;
- } else if (tableName.startsWith("hadoop.") || tableName.startsWith("hive.")) {
- // HiveCatalog and HadoopCatalog prepend a logical name which we need to drop for Spark 2.4
+ } else if (tableName.startsWith("hadoop.")) {
+ // Load a path by HadoopCatalog or HadoopTables
+ return table().location() + "#" + type;
+ } else if (tableName.startsWith("hive.")) {
+ // HiveCatalog prepend a logical name which we need to drop for Spark 2.4
return tableName.replaceFirst("(hadoop\\.)|(hive\\.)", "") + "." + type;
} else {
return tableName + "." + type; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.actions;
import org.apache.iceberg.MetadataTableType;
import org.apache.iceberg.Table;
abstract class BaseAction<R> implements Action<R> {
protected abstract Table table();
protected String metadataTableName(MetadataTableType type) {
String tableName = table().toString();
if (tableName.contains("/")) {
return tableName + "#" + type;
} else if (tableName.startsWith("hadoop.") || tableName.startsWith("hive.")) {
// HiveCatalog and HadoopCatalog prepend a logical name which we need to drop for Spark 2.4
return tableName.replaceFirst("(hadoop\\.)|(hive\\.)", "") + "." + type;
} else {
return tableName + "." + type;
}
}
}
| 1 | 21,175 | Looks like this needs to be updated. There is no need to remove `hadoop.` if Hadoop tables don't use this code path. | apache-iceberg | java |
@@ -1,4 +1,5 @@
require 'fileutils'
+require 'digest'
module RSpec
module Core | 1 | require 'fileutils'
module RSpec
module Core
# Stores runtime configuration information.
#
# @example Standard settings
# RSpec.configure do |c|
# c.drb = true
# c.drb_port = 1234
# c.default_path = 'behavior'
# end
#
# @example Hooks
# RSpec.configure do |c|
# c.before(:suite) { establish_connection }
# c.before(:each) { log_in_as :authorized }
# c.around(:each) { |ex| Database.transaction(&ex) }
# end
#
# @see RSpec.configure
# @see Hooks
class Configuration
include RSpec::Core::Hooks
class MustBeConfiguredBeforeExampleGroupsError < StandardError; end
# @private
def self.define_reader(name)
eval <<-CODE
def #{name}
value_for(#{name.inspect}, defined?(@#{name}) ? @#{name} : nil)
end
CODE
end
# @private
def self.deprecate_alias_key
RSpec.warn_deprecation <<-MESSAGE
The :alias option to add_setting is deprecated. Use :alias_with on the original setting instead.
Called from #{caller(0)[5]}
MESSAGE
end
# @private
def self.define_aliases(name, alias_name)
alias_method alias_name, name
alias_method "#{alias_name}=", "#{name}="
define_predicate_for alias_name
end
# @private
def self.define_predicate_for(*names)
names.each {|name| alias_method "#{name}?", name}
end
# @private
#
# Invoked by the `add_setting` instance method. Use that method on a
# `Configuration` instance rather than this class method.
def self.add_setting(name, opts={})
raise "Use the instance add_setting method if you want to set a default" if opts.has_key?(:default)
if opts[:alias]
deprecate_alias_key
define_aliases(opts[:alias], name)
else
attr_writer name
define_reader name
define_predicate_for name
end
[opts[:alias_with]].flatten.compact.each do |alias_name|
define_aliases(name, alias_name)
end
end
# @macro [attach] add_setting
# @attribute $1
# Patterns to match against lines in backtraces presented in failure
# messages in order to filter them out (default:
# DEFAULT_BACKTRACE_PATTERNS). You can either replace this list using
# the setter or modify it using the getter.
#
# To override this behavior and display a full backtrace, use
# `--backtrace` on the command line, in a `.rspec` file, or in the
# `rspec_options` attribute of RSpec's rake task.
add_setting :backtrace_clean_patterns
# Path to use if no path is provided to the `rspec` command (default:
# `"spec"`). Allows you to just type `rspec` instead of `rspec spec` to
# run all the examples in the `spec` directory.
add_setting :default_path
# Run examples over DRb (default: `false`). RSpec doesn't supply the DRb
# server, but you can use tools like spork.
add_setting :drb
# The drb_port (default: `8989`).
add_setting :drb_port
# Default: `$stderr`.
add_setting :error_stream
# Clean up and exit after the first failure (default: `false`).
add_setting :fail_fast
# The exit code to return if there are any failures (default: 1).
add_setting :failure_exit_code
# Determines the order in which examples are run (default: OS standard
# load order for files, declaration order for groups and examples).
define_reader :order
# Default: `$stdout`.
# Also known as `output` and `out`
add_setting :output_stream, :alias_with => [:output, :out]
# Load files matching this pattern (default: `'**/*_spec.rb'`)
add_setting :pattern, :alias_with => :filename_pattern
# Report the times for the 10 slowest examples (default: `false`).
add_setting :profile_examples
# Run all examples if none match the configured filters (default: `false`).
add_setting :run_all_when_everything_filtered
# Seed for random ordering (default: generated randomly each run).
#
# When you run specs with `--order random`, RSpec generates a random seed
# for the randomization and prints it to the `output_stream` (assuming
# you're using RSpec's built-in formatters). If you discover an ordering
# dependency (i.e. examples fail intermittently depending on order), set
# this (on Configuration or on the command line with `--seed`) to run
# using the same seed while you debug the issue.
#
# We recommend, actually, that you use the command line approach so you
# don't accidentally leave the seed encoded.
define_reader :seed
# When a block passed to pending fails (as expected), display the failure
# without reporting it as a failure (default: false).
add_setting :show_failures_in_pending_blocks
# Convert symbols to hashes with the symbol as a key with a value of
# `true` (default: false).
#
# This allows you to tag a group or example like this:
#
# describe "something slow", :slow do
# # ...
# end
#
# ... instead of having to type:
#
# describe "something slow", :slow => true do
# # ...
# end
add_setting :treat_symbols_as_metadata_keys_with_true_values
# @private
add_setting :tty
# @private
add_setting :include_or_extend_modules
# @private
add_setting :files_to_run
# @private
add_setting :expecting_with_rspec
# @private
attr_accessor :filter_manager
DEFAULT_BACKTRACE_PATTERNS = [
/\/lib\d*\/ruby\//,
/org\/jruby\//,
/bin\//,
/gems/,
/spec\/spec_helper\.rb/,
/lib\/rspec\/(core|expectations|matchers|mocks)/
]
def initialize
@expectation_frameworks = []
@include_or_extend_modules = []
@mock_framework = nil
@files_to_run = []
@formatters = []
@color = false
@pattern = '**/*_spec.rb'
@failure_exit_code = 1
@backtrace_clean_patterns = DEFAULT_BACKTRACE_PATTERNS.dup
@default_path = 'spec'
@filter_manager = FilterManager.new
@preferred_options = {}
@seed = srand % 0xFFFF
end
# @private
#
# Used to set higher priority option values from the command line.
def force(hash)
if hash.has_key?(:seed)
hash[:order], hash[:seed] = order_and_seed_from_seed(hash[:seed])
elsif hash.has_key?(:order)
set_order_and_seed(hash)
end
@preferred_options.merge!(hash)
end
# @private
def reset
@reporter = nil
@formatters.clear
end
# @overload add_setting(name)
# @overload add_setting(name, opts)
# @option opts [Symbol] :default
#
# set a default value for the generated getter and predicate methods:
#
# add_setting(:foo, :default => "default value")
#
# @option opts [Symbol] :alias_with
#
# Use `:alias_with` to alias the setter, getter, and predicate to another
# name, or names:
#
# add_setting(:foo, :alias_with => :bar)
# add_setting(:foo, :alias_with => [:bar, :baz])
#
# Adds a custom setting to the RSpec.configuration object.
#
# RSpec.configuration.add_setting :foo
#
# Used internally and by extension frameworks like rspec-rails, so they
# can add config settings that are domain specific. For example:
#
# RSpec.configure do |c|
# c.add_setting :use_transactional_fixtures,
# :default => true,
# :alias_with => :use_transactional_examples
# end
#
# `add_setting` creates three methods on the configuration object, a
# setter, a getter, and a predicate:
#
# RSpec.configuration.foo=(value)
# RSpec.configuration.foo
# RSpec.configuration.foo? # returns true if foo returns anything but nil or false
def add_setting(name, opts={})
default = opts.delete(:default)
(class << self; self; end).class_eval do
add_setting(name, opts)
end
send("#{name}=", default) if default
end
# Used by formatters to ask whether a backtrace line should be displayed
# or not, based on the line matching any `backtrace_clean_patterns`.
def cleaned_from_backtrace?(line)
# TODO (David 2011-12-25) why are we asking the configuration to do
# stuff? Either use the patterns directly or enapsulate the filtering
# in a BacktraceCleaner object.
backtrace_clean_patterns.any? { |regex| line =~ regex }
end
# Returns the configured mock framework adapter module
def mock_framework
mock_with :rspec unless @mock_framework
@mock_framework
end
# Delegates to mock_framework=(framework)
def mock_framework=(framework)
mock_with framework
end
# Sets the mock framework adapter module.
#
# `framework` can be a Symbol or a Module.
#
# Given any of `:rspec`, `:mocha`, `:flexmock`, or `:rr`, configures the
# named framework.
#
# Given `:nothing`, configures no framework. Use this if you don't use
# any mocking framework to save a little bit of overhead.
#
# Given a Module, includes that module in every example group. The module
# should adhere to RSpec's mock framework adapter API:
#
# setup_mocks_for_rspec
# - called before each example
#
# verify_mocks_for_rspec
# - called after each example. Framework should raise an exception
# when expectations fail
#
# teardown_mocks_for_rspec
# - called after verify_mocks_for_rspec (even if there are errors)
#
# If the module responds to `configuration` and `mock_with` receives a block,
# it will yield the configuration object to the block e.g.
#
# config.mock_with OtherMockFrameworkAdapter do |mod_config|
# mod_config.custom_setting = true
# end
def mock_with(framework)
framework_module = case framework
when Module
framework
when String, Symbol
require case framework.to_s
when /rspec/i
'rspec/core/mocking/with_rspec'
when /mocha/i
'rspec/core/mocking/with_mocha'
when /rr/i
'rspec/core/mocking/with_rr'
when /flexmock/i
'rspec/core/mocking/with_flexmock'
else
'rspec/core/mocking/with_absolutely_nothing'
end
RSpec::Core::MockFrameworkAdapter
end
new_name, old_name = [framework_module, @mock_framework].map do |mod|
mod.respond_to?(:framework_name) ? mod.framework_name : :unnamed
end
unless new_name == old_name
assert_no_example_groups_defined(:mock_framework)
end
if block_given?
raise "#{framework_module} must respond to `configuration` so that mock_with can yield it." unless framework_module.respond_to?(:configuration)
yield framework_module.configuration
end
@mock_framework = framework_module
end
# Returns the configured expectation framework adapter module(s)
def expectation_frameworks
expect_with :rspec if @expectation_frameworks.empty?
@expectation_frameworks
end
# Delegates to expect_with(framework)
def expectation_framework=(framework)
expect_with(framework)
end
# Sets the expectation framework module(s) to be included in each example
# group.
#
# `frameworks` can be `:rspec`, `:stdlib`, a custom module, or any
# combination thereof:
#
# config.expect_with :rspec
# config.expect_with :stdlib
# config.expect_with :rspec, :stdlib
# config.expect_with OtherExpectationFramework
#
# RSpec will translate `:rspec` and `:stdlib` into the appropriate
# modules.
#
# ## Configuration
#
# If the module responds to `configuration`, `expect_with` will
# yield the `configuration` object if given a block:
#
# config.expect_with OtherExpectationFramework do |custom_config|
# custom_config.custom_setting = true
# end
def expect_with(*frameworks)
modules = frameworks.map do |framework|
case framework
when Module
framework
when :rspec
require 'rspec/expectations'
self.expecting_with_rspec = true
::RSpec::Matchers
when :stdlib
require 'test/unit/assertions'
::Test::Unit::Assertions
else
raise ArgumentError, "#{framework.inspect} is not supported"
end
end
if (modules - @expectation_frameworks).any?
assert_no_example_groups_defined(:expect_with)
end
if block_given?
raise "expect_with only accepts a block with a single argument. Call expect_with #{modules.length} times, once with each argument, instead." if modules.length > 1
raise "#{modules.first} must respond to `configuration` so that expect_with can yield it." unless modules.first.respond_to?(:configuration)
yield modules.first.configuration
end
@expectation_frameworks.push(*modules)
end
def full_backtrace=(true_or_false)
@backtrace_clean_patterns = true_or_false ? [] : DEFAULT_BACKTRACE_PATTERNS
end
def color(output=output_stream)
# rspec's built-in formatters all call this with the output argument,
# but defaulting to output_stream for backward compatibility with
# formatters in extension libs
return false unless output_to_tty?(output)
value_for(:color, @color)
end
def color=(bool)
if bool
if RSpec.windows_os? and not ENV['ANSICON']
warn "You must use ANSICON 1.31 or later (http://adoxa.3eeweb.com/ansicon/) to use colour on Windows"
@color = false
else
@color = true
end
end
end
# TODO - deprecate color_enabled - probably not until the last 2.x
# release before 3.0
alias_method :color_enabled, :color
alias_method :color_enabled=, :color=
define_predicate_for :color_enabled, :color
def libs=(libs)
libs.map {|lib| $LOAD_PATH.unshift lib}
end
def requires=(paths)
paths.map {|path| require path}
end
def debug=(bool)
return unless bool
begin
require 'ruby-debug'
Debugger.start
rescue LoadError => e
raise <<-EOM
#{'*'*50}
#{e.message}
If you have it installed as a ruby gem, then you need to either require
'rubygems' or configure the RUBYOPT environment variable with the value
'rubygems'.
#{e.backtrace.join("\n")}
#{'*'*50}
EOM
end
end
# Run examples defined on `line_numbers` in all files to run.
def line_numbers=(line_numbers)
filter_run :line_numbers => line_numbers.map{|l| l.to_i}
end
def full_description=(description)
filter_run :full_description => Regexp.union(*Array(description).map {|d| Regexp.new(d) })
end
# @overload add_formatter(formatter)
#
# Adds a formatter to the formatters collection. `formatter` can be a
# string representing any of the built-in formatters (see
# `built_in_formatter`), or a custom formatter class.
#
# ### Note
#
# For internal purposes, `add_formatter` also accepts the name of a class
# and path to a file that contains that class definition, but you should
# consider that a private api that may change at any time without notice.
def add_formatter(formatter_to_use, path=nil)
formatter_class =
built_in_formatter(formatter_to_use) ||
custom_formatter(formatter_to_use) ||
(raise ArgumentError, "Formatter '#{formatter_to_use}' unknown - maybe you meant 'documentation' or 'progress'?.")
formatters << formatter_class.new(path ? file_at(path) : output)
end
alias_method :formatter=, :add_formatter
def formatters
@formatters ||= []
end
def reporter
@reporter ||= begin
add_formatter('progress') if formatters.empty?
Reporter.new(*formatters)
end
end
# @private
def files_or_directories_to_run=(*files)
files = files.flatten
files << default_path if (command == 'rspec' || Runner.running_in_drb?) && default_path && files.empty?
self.files_to_run = get_files_to_run(files)
end
# Creates a method that delegates to `example` including the submitted
# `args`. Used internally to add variants of `example` like `pending`:
#
# @example
# alias_example_to :pending, :pending => true
#
# # This lets you do this:
#
# describe Thing do
# pending "does something" do
# thing = Thing.new
# end
# end
#
# # ... which is the equivalent of
#
# describe Thing do
# it "does something", :pending => true do
# thing = Thing.new
# end
# end
def alias_example_to(new_name, *args)
extra_options = build_metadata_hash_from(args)
RSpec::Core::ExampleGroup.alias_example_to(new_name, extra_options)
end
# Define an alias for it_should_behave_like that allows different
# language (like "it_has_behavior" or "it_behaves_like") to be
# employed when including shared examples.
#
# Example:
#
# alias_it_behaves_like_to(:it_has_behavior, 'has behavior:')
#
# allows the user to include a shared example group like:
#
# describe Entity do
# it_has_behavior 'sortability' do
# let(:sortable) { Entity.new }
# end
# end
#
# which is reported in the output as:
#
# Entity
# has behavior: sortability
# # sortability examples here
def alias_it_behaves_like_to(new_name, report_label = '')
RSpec::Core::ExampleGroup.alias_it_behaves_like_to(new_name, report_label)
end
alias_method :alias_it_should_behave_like_to, :alias_it_behaves_like_to
# Adds key/value pairs to the `inclusion_filter`. If the
# `treat_symbols_as_metadata_keys_with_true_values` config option is set
# to true and `args` includes any symbols that are not part of a hash,
# each symbol is treated as a key in the hash with the value `true`.
#
# ### Note
#
# Filters set using this method can be overridden from the command line
# or config files (e.g. `.rspec`).
#
# @example
# # given this declaration
# describe "something", :foo => 'bar' do
# # ...
# end
#
# # any of the following will include that group
# config.filter_run_including :foo => 'bar'
# config.filter_run_including :foo => /^ba/
# config.filter_run_including :foo => lambda {|v| v == 'bar'}
# config.filter_run_including :foo => lambda {|v,m| m[:foo] == 'bar'}
#
# # given a proc with an arity of 1, the lambda is passed the value related to the key, e.g.
# config.filter_run_including :foo => lambda {|v| v == 'bar'}
#
# # given a proc with an arity of 2, the lambda is passed the value related to the key,
# # and the metadata itself e.g.
# config.filter_run_including :foo => lambda {|v,m| m[:foo] == 'bar'}
#
# # with treat_symbols_as_metadata_keys_with_true_values = true
# filter_run_including :foo # same as filter_run_including :foo => true
def filter_run_including(*args)
filter_manager.include_with_low_priority build_metadata_hash_from(args)
end
alias_method :filter_run, :filter_run_including
# Clears and reassigns the `inclusion_filter`. Set to `nil` if you don't
# want any inclusion filter at all.
#
# ### Warning
#
# This overrides any inclusion filters/tags set on the command line or in
# configuration files.
def inclusion_filter=(filter)
filter_manager.include! build_metadata_hash_from([filter])
end
alias_method :filter=, :inclusion_filter=
# Returns the `inclusion_filter`. If none has been set, returns an empty
# hash.
def inclusion_filter
filter_manager.inclusions
end
alias_method :filter, :inclusion_filter
# Adds key/value pairs to the `exclusion_filter`. If the
# `treat_symbols_as_metadata_keys_with_true_values` config option is set
# to true and `args` excludes any symbols that are not part of a hash,
# each symbol is treated as a key in the hash with the value `true`.
#
# ### Note
#
# Filters set using this method can be overridden from the command line
# or config files (e.g. `.rspec`).
#
# @example
# # given this declaration
# describe "something", :foo => 'bar' do
# # ...
# end
#
# # any of the following will exclude that group
# config.filter_run_excluding :foo => 'bar'
# config.filter_run_excluding :foo => /^ba/
# config.filter_run_excluding :foo => lambda {|v| v == 'bar'}
# config.filter_run_excluding :foo => lambda {|v,m| m[:foo] == 'bar'}
#
# # given a proc with an arity of 1, the lambda is passed the value related to the key, e.g.
# config.filter_run_excluding :foo => lambda {|v| v == 'bar'}
#
# # given a proc with an arity of 2, the lambda is passed the value related to the key,
# # and the metadata itself e.g.
# config.filter_run_excluding :foo => lambda {|v,m| m[:foo] == 'bar'}
#
# # with treat_symbols_as_metadata_keys_with_true_values = true
# filter_run_excluding :foo # same as filter_run_excluding :foo => true
def filter_run_excluding(*args)
filter_manager.exclude_with_low_priority build_metadata_hash_from(args)
end
# Clears and reassigns the `exclusion_filter`. Set to `nil` if you don't
# want any exclusion filter at all.
#
# ### Warning
#
# This overrides any exclusion filters/tags set on the command line or in
# configuration files.
def exclusion_filter=(filter)
filter_manager.exclude! build_metadata_hash_from([filter])
end
# Returns the `exclusion_filter`. If none has been set, returns an empty
# hash.
def exclusion_filter
filter_manager.exclusions
end
# Tells RSpec to include `mod` in example groups. Methods defined in
# `mod` are exposed to examples (not example groups). Use `filters` to
# constrain the groups in which to include the module.
#
# @example
#
# module AuthenticationHelpers
# def login_as(user)
# # ...
# end
# end
#
# module UserHelpers
# def users(username)
# # ...
# end
# end
#
# RSpec.configure do |config|
# config.include(UserHelpers) # included in all modules
# config.include(AuthenticationHelpers, :type => :request)
# end
#
# describe "edit profile", :type => :request do
# it "can be viewed by owning user" do
# login_as users(:jdoe)
# get "/profiles/jdoe"
# assert_select ".username", :text => 'jdoe'
# end
# end
#
# @see #extend
def include(mod, *filters)
include_or_extend_modules << [:include, mod, build_metadata_hash_from(filters)]
end
# Tells RSpec to extend example groups with `mod`. Methods defined in
# `mod` are exposed to example groups (not examples). Use `filters` to
# constrain the groups to extend.
#
# Similar to `include`, but behavior is added to example groups, which
# are classes, rather than the examples, which are instances of those
# classes.
#
# @example
#
# module UiHelpers
# def run_in_browser
# # ...
# end
# end
#
# RSpec.configure do |config|
# config.extend(UiHelpers, :type => :request)
# end
#
# describe "edit profile", :type => :request do
# run_in_browser
#
# it "does stuff in the client" do
# # ...
# end
# end
#
# @see #include
def extend(mod, *filters)
include_or_extend_modules << [:extend, mod, build_metadata_hash_from(filters)]
end
# @private
#
# Used internally to extend a group with modules using `include` and/or
# `extend`.
def configure_group(group)
include_or_extend_modules.each do |include_or_extend, mod, filters|
next unless filters.empty? || group.any_apply?(filters)
send("safe_#{include_or_extend}", mod, group)
end
end
# @private
def safe_include(mod, host)
host.send(:include,mod) unless host < mod
end
# @private
if RUBY_VERSION.to_f >= 1.9
def safe_extend(mod, host)
host.extend(mod) unless (class << host; self; end) < mod
end
else
def safe_extend(mod, host)
host.extend(mod) unless (class << host; self; end).included_modules.include?(mod)
end
end
# @private
def configure_mock_framework
RSpec::Core::ExampleGroup.send(:include, mock_framework)
end
# @private
def configure_expectation_framework
expectation_frameworks.each do |framework|
RSpec::Core::ExampleGroup.send(:include, framework)
end
end
# @private
def load_spec_files
files_to_run.uniq.each {|f| load File.expand_path(f) }
raise_if_rspec_1_is_loaded
end
# @private
DEFAULT_FORMATTER = lambda { |string| string }
# Formats the docstring output using the block provided.
#
# @example
# # This will strip the descriptions of both examples and example groups.
# RSpec.configure do |config|
# config.format_docstrings { |s| s.strip }
# end
def format_docstrings(&block)
@format_docstrings_block = block_given? ? block : DEFAULT_FORMATTER
end
# @private
def format_docstrings_block
@format_docstrings_block ||= DEFAULT_FORMATTER
end
# @api
#
# Sets the seed value and sets `order='rand'`
def seed=(seed)
order_and_seed_from_seed(seed)
end
# @api
#
# Sets the order and, if order is `'rand:<seed>'`, also sets the seed.
def order=(type)
order_and_seed_from_order(type)
end
def randomize?
order.to_s.match(/rand/)
end
# @private
DEFAULT_ORDERING = lambda { |list| list }
# @private
RANDOM_ORDERING = lambda do |list|
Kernel.srand RSpec.configuration.seed
list.sort_by { Kernel.rand(list.size) }
end
# Sets a strategy by which to order examples.
#
# @example
# RSpec.configure do |config|
# config.order_examples do |examples|
# examples.reverse
# end
# end
#
# @see #order_groups
# @see #order_groups_and_examples
# @see #order=
# @see #seed=
def order_examples(&block)
@example_ordering_block = block
@order = "custom" unless built_in_orderer?(block)
end
# @private
def example_ordering_block
@example_ordering_block ||= DEFAULT_ORDERING
end
# Sets a strategy by which to order groups.
#
# @example
# RSpec.configure do |config|
# config.order_groups do |groups|
# groups.reverse
# end
# end
#
# @see #order_examples
# @see #order_groups_and_examples
# @see #order=
# @see #seed=
def order_groups(&block)
@group_ordering_block = block
@order = "custom" unless built_in_orderer?(block)
end
# @private
def group_ordering_block
@group_ordering_block ||= DEFAULT_ORDERING
end
# Sets a strategy by which to order groups and examples.
#
# @example
# RSpec.configure do |config|
# config.order_groups_and_examples do |groups_or_examples|
# groups_or_examples.reverse
# end
# end
#
# @see #order_groups
# @see #order_examples
# @see #order=
# @see #seed=
def order_groups_and_examples(&block)
order_groups(&block)
order_examples(&block)
end
private
def get_files_to_run(paths)
patterns = pattern.split(",")
paths.map do |path|
path = path.gsub(File::ALT_SEPARATOR, File::SEPARATOR) if File::ALT_SEPARATOR
File.directory?(path) ? gather_directories(path, patterns) : extract_location(path)
end.flatten
end
def gather_directories(path, patterns)
patterns.map do |pattern|
pattern =~ /^#{path}/ ? Dir[pattern.strip].sort : Dir["#{path}/{#{pattern.strip}}"].sort
end
end
def extract_location(path)
if path =~ /^(.*?)((?:\:\d+)+)$/
path, lines = $1, $2[1..-1].split(":").map{|n| n.to_i}
filter_manager.add_location path, lines
end
path
end
def command
$0.split(File::SEPARATOR).last
end
def value_for(key, default=nil)
@preferred_options.has_key?(key) ? @preferred_options[key] : default
end
def assert_no_example_groups_defined(config_option)
if RSpec.world.example_groups.any?
raise MustBeConfiguredBeforeExampleGroupsError.new(
"RSpec's #{config_option} configuration option must be configured before " +
"any example groups are defined, but you have already defined a group."
)
end
end
def raise_if_rspec_1_is_loaded
if defined?(Spec) && defined?(Spec::VERSION::MAJOR) && Spec::VERSION::MAJOR == 1
raise <<-MESSAGE
#{'*'*80}
You are running rspec-2, but it seems as though rspec-1 has been loaded as
well. This is likely due to a statement like this somewhere in the specs:
require 'spec'
Please locate that statement, remove it, and try again.
#{'*'*80}
MESSAGE
end
end
def output_to_tty?(output=output_stream)
tty? || (output.respond_to?(:tty?) && output.tty?)
end
def built_in_formatter(key)
case key.to_s
when 'd', 'doc', 'documentation', 's', 'n', 'spec', 'nested'
require 'rspec/core/formatters/documentation_formatter'
RSpec::Core::Formatters::DocumentationFormatter
when 'h', 'html'
require 'rspec/core/formatters/html_formatter'
RSpec::Core::Formatters::HtmlFormatter
when 't', 'textmate'
require 'rspec/core/formatters/text_mate_formatter'
RSpec::Core::Formatters::TextMateFormatter
when 'p', 'progress'
require 'rspec/core/formatters/progress_formatter'
RSpec::Core::Formatters::ProgressFormatter
when 'j', 'json'
require 'rspec/core/formatters/json_formatter'
RSpec::Core::Formatters::JsonFormatter
end
end
def custom_formatter(formatter_ref)
if Class === formatter_ref
formatter_ref
elsif string_const?(formatter_ref)
begin
eval(formatter_ref)
rescue NameError
require path_for(formatter_ref)
eval(formatter_ref)
end
end
end
def string_const?(str)
str.is_a?(String) && /\A[A-Z][a-zA-Z0-9_:]*\z/ =~ str
end
def path_for(const_ref)
underscore_with_fix_for_non_standard_rspec_naming(const_ref)
end
def underscore_with_fix_for_non_standard_rspec_naming(string)
underscore(string).sub(%r{(^|/)r_spec($|/)}, '\\1rspec\\2')
end
# activesupport/lib/active_support/inflector/methods.rb, line 48
def underscore(camel_cased_word)
word = camel_cased_word.to_s.dup
word.gsub!(/::/, '/')
word.gsub!(/([A-Z]+)([A-Z][a-z])/,'\1_\2')
word.gsub!(/([a-z\d])([A-Z])/,'\1_\2')
word.tr!("-", "_")
word.downcase!
word
end
def file_at(path)
FileUtils.mkdir_p(File.dirname(path))
File.new(path, 'w')
end
def order_and_seed_from_seed(value)
order_groups_and_examples(&RANDOM_ORDERING)
@order, @seed = 'rand', value.to_i
[@order, @seed]
end
def set_order_and_seed(hash)
hash[:order], seed = order_and_seed_from_order(hash[:order])
hash[:seed] = seed if seed
end
def order_and_seed_from_order(type)
order, seed = type.to_s.split(':')
@order = order
@seed = seed = seed.to_i if seed
if randomize?
order_groups_and_examples(&RANDOM_ORDERING)
elsif order == 'default'
@order, @seed = nil, nil
order_groups_and_examples(&DEFAULT_ORDERING)
end
return order, seed
end
def built_in_orderer?(block)
[DEFAULT_ORDERING, RANDOM_ORDERING].include?(block)
end
end
end
end
| 1 | 8,280 | This require isn't need anymore, right? | rspec-rspec-core | rb |
@@ -32,6 +32,7 @@ import (
"github.com/mysteriumnetwork/node/session"
"github.com/mysteriumnetwork/node/session/balance"
"github.com/mysteriumnetwork/node/session/promise"
+ "github.com/mysteriumnetwork/payments/crypto"
"github.com/pkg/errors"
)
| 1 | /*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package connection
import (
"context"
"sync"
"time"
"github.com/mysteriumnetwork/node/communication"
"github.com/mysteriumnetwork/node/consumer"
"github.com/mysteriumnetwork/node/firewall"
"github.com/mysteriumnetwork/node/identity"
"github.com/mysteriumnetwork/node/market"
"github.com/mysteriumnetwork/node/money"
"github.com/mysteriumnetwork/node/services/openvpn/discovery/dto"
"github.com/mysteriumnetwork/node/session"
"github.com/mysteriumnetwork/node/session/balance"
"github.com/mysteriumnetwork/node/session/promise"
"github.com/pkg/errors"
)
var (
// ErrNoConnection error indicates that action applied to manager expects active connection (i.e. disconnect)
ErrNoConnection = errors.New("no connection exists")
// ErrAlreadyExists error indicates that action applied to manager expects no active connection (i.e. connect)
ErrAlreadyExists = errors.New("connection already exists")
// ErrConnectionCancelled indicates that connection in progress was cancelled by request of api user
ErrConnectionCancelled = errors.New("connection was cancelled")
// ErrConnectionFailed indicates that Connect method didn't reach "Connected" phase due to connection error
ErrConnectionFailed = errors.New("connection has failed")
// ErrUnsupportedServiceType indicates that target proposal contains unsupported service type
ErrUnsupportedServiceType = errors.New("unsupported service type in proposal")
)
// Creator creates new connection by given options and uses state channel to report state changes
type Creator func(serviceType string, stateChannel StateChannel, statisticsChannel StatisticsChannel) (Connection, error)
// SessionInfo contains all the relevant info of the current session
type SessionInfo struct {
SessionID session.ID
ConsumerID identity.Identity
Proposal market.ServiceProposal
acknowledge func()
}
// Publisher is responsible for publishing given events
type Publisher interface {
Publish(topic string, data interface{})
}
// PaymentIssuer handles the payments for service
type PaymentIssuer interface {
Start() error
Stop()
}
// PaymentIssuerFactory creates a new payment issuer from the given params
type PaymentIssuerFactory func(
initialState promise.PaymentInfo,
paymentDefinition dto.PaymentPerTime,
messageChan chan balance.Message,
dialog communication.Dialog,
consumer, provider identity.Identity) (PaymentIssuer, error)
type connectionManager struct {
//these are passed on creation
newDialog DialogCreator
paymentIssuerFactory PaymentIssuerFactory
newConnection Creator
eventPublisher Publisher
//these are populated by Connect at runtime
ctx context.Context
status Status
statusLock sync.RWMutex
sessionInfo SessionInfo
cleanup []func() error
cancel func()
discoLock sync.Mutex
}
// NewManager creates connection manager with given dependencies
func NewManager(
dialogCreator DialogCreator,
paymentIssuerFactory PaymentIssuerFactory,
connectionCreator Creator,
eventPublisher Publisher,
) *connectionManager {
return &connectionManager{
newDialog: dialogCreator,
paymentIssuerFactory: paymentIssuerFactory,
newConnection: connectionCreator,
status: statusNotConnected(),
eventPublisher: eventPublisher,
cleanup: make([]func() error, 0),
}
}
func (manager *connectionManager) Connect(consumerID identity.Identity, proposal market.ServiceProposal, params ConnectParams) (err error) {
if manager.Status().State != NotConnected {
return ErrAlreadyExists
}
manager.ctx, manager.cancel = context.WithCancel(context.Background())
manager.setStatus(statusConnecting())
defer func() {
if err != nil {
manager.setStatus(statusNotConnected())
}
}()
providerID := identity.FromAddress(proposal.ProviderID)
dialog, err := manager.createDialog(consumerID, providerID, proposal.ProviderContacts[0])
if err != nil {
return err
}
stateChannel := make(chan State, 10)
statisticsChannel := make(chan consumer.SessionStatistics, 10)
connection, err := manager.newConnection(proposal.ServiceType, stateChannel, statisticsChannel)
if err != nil {
return err
}
sessionDTO, paymentInfo, err := manager.createSession(connection, dialog, consumerID, proposal)
if err != nil {
return err
}
err = manager.launchPayments(paymentInfo, dialog, consumerID, providerID)
if err != nil {
return err
}
err = manager.startConnection(connection, consumerID, proposal, params, sessionDTO, stateChannel, statisticsChannel)
if err == context.Canceled {
return ErrConnectionCancelled
}
return err
}
func (manager *connectionManager) launchPayments(paymentInfo *promise.PaymentInfo, dialog communication.Dialog, consumerID, providerID identity.Identity) error {
var promiseState promise.PaymentInfo
if paymentInfo != nil {
promiseState.FreeCredit = paymentInfo.FreeCredit
promiseState.LastPromise = paymentInfo.LastPromise
}
messageChan := make(chan balance.Message, 1)
// TODO: set the time and proper payment info
payment := dto.PaymentPerTime{
Price: money.Money{
Currency: money.CurrencyMyst,
Amount: uint64(0),
},
Duration: time.Minute,
}
payments, err := manager.paymentIssuerFactory(promiseState, payment, messageChan, dialog, consumerID, providerID)
if err != nil {
return err
}
manager.cleanup = append(manager.cleanup, func() error {
payments.Stop()
return nil
})
go manager.payForService(payments)
return nil
}
func (manager *connectionManager) cleanConnection() {
manager.cancel()
for i := len(manager.cleanup) - 1; i >= 0; i-- {
err := manager.cleanup[i]()
if err != nil {
log.Warn("cleanup error:", err)
}
}
manager.cleanup = make([]func() error, 0)
}
func (manager *connectionManager) createDialog(consumerID, providerID identity.Identity, contact market.Contact) (communication.Dialog, error) {
dialog, err := manager.newDialog(consumerID, providerID, contact)
if err != nil {
return nil, err
}
manager.cleanup = append(manager.cleanup, dialog.Close)
return dialog, err
}
func (manager *connectionManager) createSession(c Connection, dialog communication.Dialog, consumerID identity.Identity, proposal market.ServiceProposal) (session.SessionDto, *promise.PaymentInfo, error) {
sessionCreateConfig, err := c.GetConfig()
if err != nil {
return session.SessionDto{}, nil, err
}
consumerInfo := session.ConsumerInfo{
// TODO: once we're supporting payments from another identity make the changes accordingly
IssuerID: consumerID,
}
s, paymentInfo, err := session.RequestSessionCreate(dialog, proposal.ID, sessionCreateConfig, consumerInfo)
if err != nil {
return session.SessionDto{}, nil, err
}
manager.cleanup = append(manager.cleanup, func() error { return session.RequestSessionDestroy(dialog, s.ID) })
// set the session info for future use
manager.sessionInfo = SessionInfo{
SessionID: s.ID,
ConsumerID: consumerID,
Proposal: proposal,
acknowledge: func() {
err := session.AcknowledgeSession(dialog, string(s.ID))
if err != nil {
log.Warn("acknowledge failed", err)
}
},
}
manager.eventPublisher.Publish(SessionEventTopic, SessionEvent{
Status: SessionCreatedStatus,
SessionInfo: manager.sessionInfo,
})
manager.cleanup = append(manager.cleanup, func() error {
manager.eventPublisher.Publish(SessionEventTopic, SessionEvent{
Status: SessionEndedStatus,
SessionInfo: manager.sessionInfo,
})
return nil
})
return s, paymentInfo, nil
}
func (manager *connectionManager) startConnection(
connection Connection,
consumerID identity.Identity,
proposal market.ServiceProposal,
params ConnectParams,
sessionDTO session.SessionDto,
stateChannel chan State,
statisticsChannel chan consumer.SessionStatistics) (err error) {
defer func() {
if err != nil {
log.Info("cancelling connection initiation: ", err)
manager.Cancel()
}
}()
connectOptions := ConnectOptions{
SessionID: sessionDTO.ID,
SessionConfig: sessionDTO.Config,
EnableDNS: params.EnableDNS,
ConsumerID: consumerID,
ProviderID: identity.FromAddress(proposal.ProviderID),
Proposal: proposal,
}
if err = connection.Start(connectOptions); err != nil {
return err
}
manager.cleanup = append(manager.cleanup, func() error {
connection.Stop()
return nil
})
err = manager.setupTrafficBlock(params.DisableKillSwitch)
if err != nil {
return err
}
//consume statistics right after start - openvpn3 will publish them even before connected state
go manager.consumeStats(statisticsChannel)
err = manager.waitForConnectedState(stateChannel, sessionDTO.ID)
if err != nil {
return err
}
go manager.consumeConnectionStates(stateChannel)
go manager.connectionWaiter(connection)
return nil
}
func (manager *connectionManager) Status() Status {
manager.statusLock.RLock()
defer manager.statusLock.RUnlock()
return manager.status
}
func (manager *connectionManager) setStatus(cs Status) {
manager.statusLock.Lock()
manager.status = cs
manager.statusLock.Unlock()
}
func (manager *connectionManager) Cancel() {
status := statusCanceled()
manager.setStatus(status)
manager.onStateChanged(status.State)
logDisconnectError(manager.Disconnect())
}
func (manager *connectionManager) Disconnect() error {
manager.discoLock.Lock()
defer manager.discoLock.Unlock()
if manager.Status().State == NotConnected {
return ErrNoConnection
}
manager.setStatus(statusDisconnecting())
manager.cleanConnection()
manager.setStatus(statusNotConnected())
manager.eventPublisher.Publish(StateEventTopic, StateEvent{
State: NotConnected,
SessionInfo: manager.sessionInfo,
})
return nil
}
func (manager *connectionManager) payForService(payments PaymentIssuer) {
err := payments.Start()
if err != nil {
log.Error("payment error: ", err)
err = manager.Disconnect()
if err != nil {
log.Error("could not disconnect gracefully:", err)
}
}
}
func (manager *connectionManager) connectionWaiter(connection Connection) {
err := connection.Wait()
if err != nil {
log.Warn("connection exited with error: ", err)
} else {
log.Info("connection exited")
}
logDisconnectError(manager.Disconnect())
}
func (manager *connectionManager) waitForConnectedState(stateChannel <-chan State, sessionID session.ID) error {
log.Trace("waiting for connected state")
for {
select {
case state, more := <-stateChannel:
if !more {
return ErrConnectionFailed
}
switch state {
case Connected:
log.Trace("connected started event received")
go manager.sessionInfo.acknowledge()
manager.onStateChanged(state)
return nil
default:
manager.onStateChanged(state)
}
case <-manager.ctx.Done():
return manager.ctx.Err()
}
}
}
func (manager *connectionManager) consumeConnectionStates(stateChannel <-chan State) {
for state := range stateChannel {
manager.onStateChanged(state)
}
log.Debug("state updater stopCalled")
logDisconnectError(manager.Disconnect())
}
func (manager *connectionManager) consumeStats(statisticsChannel <-chan consumer.SessionStatistics) {
for stats := range statisticsChannel {
manager.eventPublisher.Publish(StatisticsEventTopic, stats)
}
}
func (manager *connectionManager) onStateChanged(state State) {
log.Trace("onStateChanged called")
manager.eventPublisher.Publish(StateEventTopic, StateEvent{
State: state,
SessionInfo: manager.sessionInfo,
})
switch state {
case Connected:
log.Trace("connected state issued")
manager.setStatus(statusConnected(manager.sessionInfo.SessionID, manager.sessionInfo.Proposal))
case Reconnecting:
manager.setStatus(statusReconnecting())
}
}
func (manager *connectionManager) setupTrafficBlock(disableKillSwitch bool) error {
if disableKillSwitch {
return nil
}
removeRule, err := firewall.BlockNonTunnelTraffic(firewall.Session)
if err != nil {
return err
}
manager.cleanup = append(manager.cleanup, func() error {
removeRule()
return nil
})
return nil
}
func logDisconnectError(err error) {
if err != nil && err != ErrNoConnection {
log.Error("disconnect error", err)
}
}
| 1 | 14,950 | a bit weird to see payments crypto package inside connection manager. There probably will be services not having any relation with payments that will use connection manager. Thats why probably ping-ping by itself can be useful without any relation to payments.. | mysteriumnetwork-node | go |
@@ -889,7 +889,7 @@ trait MarcAdvancedTrait
'http://www.loc.gov/standards/marcxml/schema/MARC21slim.xsd',
'http://www.w3.org/2001/XMLSchema-instance'
);
- $xml->record->addAttribute('type', 'Bibliographic');
+ $xml->record->addAttribute('type', $this->xmlType);
return $xml->record->asXML();
}
| 1 | <?php
/**
* Functions to add advanced MARC-driven functionality to a record driver already
* powered by the standard index spec. Depends upon MarcReaderTrait.
*
* PHP version 7
*
* Copyright (C) Villanova University 2017.
* Copyright (C) The National Library of Finland 2020.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package RecordDrivers
* @author Demian Katz <[email protected]>
* @author Ere Maijala <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:record_drivers Wiki
*/
namespace VuFind\RecordDriver\Feature;
use VuFind\View\Helper\Root\RecordLinker;
use VuFind\XSLT\Processor as XSLTProcessor;
/**
* Functions to add advanced MARC-driven functionality to a record driver already
* powered by the standard index spec. Depends upon MarcReaderTrait.
*
* @category VuFind
* @package RecordDrivers
* @author Demian Katz <[email protected]>
* @author Ere Maijala <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:record_drivers Wiki
*/
trait MarcAdvancedTrait
{
/**
* Fields that may contain subject headings, and their descriptions
*
* @var array
*/
protected $subjectFields = [
'600' => 'personal name',
'610' => 'corporate name',
'611' => 'meeting name',
'630' => 'uniform title',
'648' => 'chronological',
'650' => 'topic',
'651' => 'geographic',
'653' => '',
'655' => 'genre/form',
'656' => 'occupation'
];
/**
* Mappings from subject source indicators (2nd indicator of subject fields in
* MARC 21) to the their codes.
*
* @var array
* @link https://www.loc.gov/marc/bibliographic/bd6xx.html Subject field docs
* @link https://www.loc.gov/standards/sourcelist/subject.html Code list
*/
protected $subjectSources = [
'0' => 'lcsh',
'1' => 'lcshac',
'2' => 'mesh',
'3' => 'nal',
'4' => 'unknown',
'5' => 'cash',
'6' => 'rvm'
];
/**
* Get access restriction notes for the record.
*
* @return array
*/
public function getAccessRestrictions()
{
return $this->getFieldArray('506');
}
/**
* Get all subject headings associated with this record. Each heading is
* returned as an array of chunks, increasing from least specific to most
* specific.
*
* @param bool $extended Whether to return a keyed array with the following
* keys:
* - heading: the actual subject heading chunks
* - type: heading type
* - source: source vocabulary
*
* @return array
*/
public function getAllSubjectHeadings($extended = false)
{
// This is all the collected data:
$retval = [];
// Try each MARC field one at a time:
foreach ($this->subjectFields as $field => $fieldType) {
// Do we have any results for the current field? If not, try the next.
$results = $this->getMarcReader()->getFields($field);
if (!$results) {
continue;
}
// If we got here, we found results -- let's loop through them.
foreach ($results as $result) {
// Start an array for holding the chunks of the current heading:
$current = [];
// Get all the chunks and collect them together:
foreach ($result['subfields'] as $subfield) {
// Numeric subfields are for control purposes and should not
// be displayed:
if (!is_numeric($subfield['code'])) {
$current[] = $subfield['data'];
}
}
// If we found at least one chunk, add a heading to our result:
if (!empty($current)) {
if ($extended) {
$sourceIndicator = $result['i2'];
$source = '';
if (isset($this->subjectSources[$sourceIndicator])) {
$source = $this->subjectSources[$sourceIndicator] ?? '';
} else {
$source = $this->getSubfield($result, '2');
}
$retval[] = [
'heading' => $current,
'type' => $fieldType,
'source' => $source,
'id' => $this->getSubfield($result, '0')
];
} else {
$retval[] = $current;
}
}
}
}
// Remove duplicates and then send back everything we collected:
return array_map(
'unserialize',
array_unique(array_map('serialize', $retval))
);
}
/**
* Get award notes for the record.
*
* @return array
*/
public function getAwards()
{
return $this->getFieldArray('586');
}
/**
* Get the bibliographic level of the current record.
*
* @return string
*/
public function getBibliographicLevel()
{
$leader = $this->getMarcReader()->getLeader();
$biblioLevel = strtoupper($leader[7]);
switch ($biblioLevel) {
case 'M': // Monograph
return "Monograph";
case 'S': // Serial
return "Serial";
case 'A': // Monograph Part
return "MonographPart";
case 'B': // Serial Part
return "SerialPart";
case 'C': // Collection
return "Collection";
case 'D': // Collection Part
return "CollectionPart";
case 'I': // Integrating Resource
return "IntegratingResource";
default:
return "Unknown";
}
}
/**
* Get notes on bibliography content.
*
* @return array
*/
public function getBibliographyNotes()
{
return $this->getFieldArray('504');
}
/**
* Return full record as filtered XML for public APIs.
*
* @return string
*/
public function getFilteredXML()
{
$record = clone $this->getMarcReader();
// The default implementation does not filter out any fields
// $marc = new \File_MARCXML(
// $record->toFormat('MARCXML'), \File_MARCXML::SOURCE_STRING
//);
// $marc->deleteFields('9', true);
// return $marc->toXML();
//
return $record->toFormat('MARCXML');
}
/**
* Get notes on finding aids related to the record.
*
* @return array
*/
public function getFindingAids()
{
return $this->getFieldArray('555');
}
/**
* Get general notes on the record.
*
* @return array
*/
public function getGeneralNotes()
{
return $this->getFieldArray('500');
}
/**
* Get human readable publication dates for display purposes (may not be suitable
* for computer processing -- use getPublicationDates() for that).
*
* @return array
*/
public function getHumanReadablePublicationDates()
{
return $this->getPublicationInfo('c');
}
/**
* Get an array of newer titles for the record.
*
* @return array
*/
public function getNewerTitles()
{
// If the MARC links are being used, return blank array
$fieldsNames = isset($this->mainConfig->Record->marc_links)
? array_map('trim', explode(',', $this->mainConfig->Record->marc_links))
: [];
return in_array('785', $fieldsNames) ? [] : parent::getNewerTitles();
}
/**
* Get the item's places of publication.
*
* @return array
*/
public function getPlacesOfPublication()
{
return $this->getPublicationInfo();
}
/**
* Get an array of playing times for the record (if applicable).
*
* @return array
*/
public function getPlayingTimes()
{
$times = $this->getFieldArray('306', ['a'], false);
// Format the times to include colons ("HH:MM:SS" format).
foreach ($times as $x => $time) {
if (!preg_match('/\d\d:\d\d:\d\d/', $time)) {
$times[$x] = substr($time, 0, 2) . ':' .
substr($time, 2, 2) . ':' .
substr($time, 4, 2);
}
}
return $times;
}
/**
* Get an array of previous titles for the record.
*
* @return array
*/
public function getPreviousTitles()
{
// If the MARC links are being used, return blank array
$fieldsNames = isset($this->mainConfig->Record->marc_links)
? array_map('trim', explode(',', $this->mainConfig->Record->marc_links))
: [];
return in_array('780', $fieldsNames) ? [] : parent::getPreviousTitles();
}
/**
* Get credits of people involved in production of the item.
*
* @return array
*/
public function getProductionCredits()
{
return $this->getFieldArray('508');
}
/**
* Get an array of publication frequency information.
*
* @return array
*/
public function getPublicationFrequency()
{
return $this->getFieldArray('310', ['a', 'b']);
}
/**
* Get an array of strings describing relationships to other items.
*
* @return array
*/
public function getRelationshipNotes()
{
return $this->getFieldArray('580');
}
/**
* Get an array of all series names containing the record. Array entries may
* be either the name string, or an associative array with 'name' and 'number'
* keys.
*
* @return array
*/
public function getSeries()
{
$matches = [];
// First check the 440, 800 and 830 fields for series information:
$primaryFields = [
'440' => ['a', 'p'],
'800' => ['a', 'b', 'c', 'd', 'f', 'p', 'q', 't'],
'830' => ['a', 'p']];
$matches = $this->getSeriesFromMARC($primaryFields);
if (!empty($matches)) {
return $matches;
}
// Now check 490 and display it only if 440/800/830 were empty:
$secondaryFields = ['490' => ['a']];
$matches = $this->getSeriesFromMARC($secondaryFields);
if (!empty($matches)) {
return $matches;
}
// Still no results found? Resort to the Solr-based method just in case!
return parent::getSeries();
}
/**
* Support method for getSeries() -- given a field specification, look for
* series information in the MARC record.
*
* @param array $fieldInfo Associative array of field => subfield information
* (used to find series name)
*
* @return array
*/
protected function getSeriesFromMARC($fieldInfo)
{
$matches = [];
// Loop through the field specification....
foreach ($fieldInfo as $field => $subfields) {
// Did we find any matching fields?
$series = $this->getMarcReader()->getFields($field);
foreach ($series as $currentField) {
// Can we find a name using the specified subfield list?
$name = $this->getSubfieldArray($currentField, $subfields);
if (isset($name[0])) {
$currentArray = ['name' => $name[0]];
// Can we find a number in subfield v? (Note that number is
// always in subfield v regardless of whether we are dealing
// with 440, 490, 800 or 830 -- hence the hard-coded array
// rather than another parameter in $fieldInfo).
$number = $this->getSubfieldArray($currentField, ['v']);
if (isset($number[0])) {
$currentArray['number'] = $number[0];
}
// Save the current match:
$matches[] = $currentArray;
}
}
}
return $matches;
}
/**
* Get an array of summary strings for the record.
*
* @return array
*/
public function getSummary()
{
return $this->getFieldArray('520');
}
/**
* Get an array of technical details on the item represented by the record.
*
* @return array
*/
public function getSystemDetails()
{
return $this->getFieldArray('538');
}
/**
* Get an array of note about the record's target audience.
*
* @return array
*/
public function getTargetAudienceNotes()
{
return $this->getFieldArray('521');
}
/**
* Get the text of the part/section portion of the title.
*
* @return string
*/
public function getTitleSection()
{
return $this->getFirstFieldValue('245', ['n', 'p']);
}
/**
* Get the statement of responsibility that goes with the title (i.e. "by John
* Smith").
*
* @return string
*/
public function getTitleStatement()
{
return $this->getFirstFieldValue('245', ['c']);
}
/**
* Get an array of lines from the table of contents.
*
* @return array
*/
public function getTOC()
{
$toc = [];
if ($fields = $this->getMarcReader()->getFields('505')) {
foreach ($fields as $field) {
// Implode all the subfields into a single string, then explode
// on the -- separators (filtering out empty chunks). Due to
// inconsistent application of subfield codes, this is the most
// reliable way to split up a table of contents.
$str = '';
foreach ($field['subfields'] as $subfield) {
$str .= trim($subfield['data']) . ' ';
}
$toc = array_merge(
$toc,
array_filter(array_map('trim', preg_split('/[.\s]--/', $str)))
);
}
}
return $toc;
}
/**
* Get hierarchical place names (MARC field 752)
*
* Returns an array of formatted hierarchical place names, consisting of all
* alpha-subfields, concatenated for display
*
* @return array
*/
public function getHierarchicalPlaceNames()
{
$placeNames = [];
if ($fields = $this->getMarcReader()->getFields('752')) {
foreach ($fields as $field) {
$current = [];
foreach ($field['subfields'] as $subfield) {
if (!is_numeric($subfield['code'])) {
$current[] = $subfield['data'];
}
}
$placeNames[] = implode(' -- ', $current);
}
}
return $placeNames;
}
/**
* Return an array of associative URL arrays with one or more of the following
* keys:
*
* <li>
* <ul>desc: URL description text to display (optional)</ul>
* <ul>url: fully-formed URL (required if 'route' is absent)</ul>
* <ul>route: VuFind route to build URL with (required if 'url' is absent)</ul>
* <ul>routeParams: Parameters for route (optional)</ul>
* <ul>queryString: Query params to append after building route (optional)</ul>
* </li>
*
* @return array
*/
public function getURLs()
{
$retVal = [];
// Which fields/subfields should we check for URLs?
$fieldsToCheck = [
'856' => ['y', 'z', '3'], // Standard URL
'555' => ['a'] // Cumulative index/finding aids
];
foreach ($fieldsToCheck as $field => $subfields) {
$urls = $this->getMarcReader()->getFields($field);
foreach ($urls as $url) {
// Is there an address in the current field?
$address = $this->getSubfield($url, 'u');
if ($address) {
// Is there a description? If not, just use the URL itself.
foreach ($subfields as $current) {
$desc = $this->getSubfield($url, $current);
if ($desc) {
break;
}
}
$retVal[] = ['url' => $address, 'desc' => $desc ?: $address];
}
}
}
return $retVal;
}
/**
* Get all record links related to the current record. Each link is returned as
* array.
* Format:
* array(
* array(
* 'title' => label_for_title
* 'value' => link_name
* 'link' => link_URI
* ),
* ...
* )
*
* @return null|array
*/
public function getAllRecordLinks()
{
// Load configurations:
$fieldsNames = isset($this->mainConfig->Record->marc_links)
? explode(',', $this->mainConfig->Record->marc_links) : [];
$useVisibilityIndicator
= $this->mainConfig->Record->marc_links_use_visibility_indicator ?? true;
$retVal = [];
foreach ($fieldsNames as $value) {
$value = trim($value);
$fields = $this->getMarcReader()->getFields($value);
foreach ($fields as $field) {
// Check to see if we should display at all
if ($useVisibilityIndicator) {
$visibilityIndicator = $field['i1'];
if ($visibilityIndicator == '1') {
continue;
}
}
// Get data for field
$tmp = $this->getFieldData($field);
if (is_array($tmp)) {
$retVal[] = $tmp;
}
}
}
return empty($retVal) ? null : $retVal;
}
/**
* Support method for getFieldData() -- factor the relationship indicator
* into the field number where relevant to generate a note to associate
* with a record link.
*
* @param array $field Field to examine
*
* @return string
*/
protected function getRecordLinkNote($field)
{
// If set, use relationship information from subfield i
if ($subfieldI = $this->getSubfield($field, 'i')) {
// VuFind will add a colon to the label, so prevent double colons:
$data = rtrim($subfieldI, ':');
if (!empty($data)) {
return $data;
}
}
// Normalize blank relationship indicator to 0:
$relationshipIndicator = $field['i2'];
if ($relationshipIndicator == ' ') {
$relationshipIndicator = '0';
}
// Assign notes based on the relationship type
$value = $field['tag'];
switch ($value) {
case '780':
if (in_array($relationshipIndicator, range('0', '7'))) {
$value .= '_' . $relationshipIndicator;
}
break;
case '785':
if (in_array($relationshipIndicator, range('0', '8'))) {
$value .= '_' . $relationshipIndicator;
}
break;
}
return 'note_' . $value;
}
/**
* Returns the array element for the 'getAllRecordLinks' method
*
* @param array $field Field to examine
*
* @return array|bool Array on success, boolean false if no valid link could be
* found in the data.
*/
protected function getFieldData($field)
{
// Make sure that there is a t field to be displayed:
if (!($title = $this->getSubfield($field, 't'))) {
return false;
}
$linkTypeSetting = $this->mainConfig->Record->marc_links_link_types
?? 'id,oclc,dlc,isbn,issn,title';
$linkTypes = explode(',', $linkTypeSetting);
$linkFields = $this->getSubfields($field, 'w');
// Run through the link types specified in the config.
// For each type, check field for reference
// If reference found, exit loop and go straight to end
// If no reference found, check the next link type instead
foreach ($linkTypes as $linkType) {
switch (trim($linkType)) {
case 'oclc':
foreach ($linkFields as $current) {
if ($oclc = $this->getIdFromLinkingField($current, 'OCoLC')) {
$link = ['type' => 'oclc', 'value' => $oclc];
}
}
break;
case 'dlc':
foreach ($linkFields as $current) {
if ($dlc = $this->getIdFromLinkingField($current, 'DLC', true)) {
$link = ['type' => 'dlc', 'value' => $dlc];
}
}
break;
case 'id':
foreach ($linkFields as $current) {
if ($bibLink = $this->getIdFromLinkingField($current)) {
$link = ['type' => 'bib', 'value' => $bibLink];
}
}
break;
case 'isbn':
if ($isbn = $this->getSubfield($field, 'z')) {
$link = [
'type' => 'isn', 'value' => $isbn,
'exclude' => $this->getUniqueId()
];
}
break;
case 'issn':
if ($issn = $this->getSubfield($field, 'x')) {
$link = [
'type' => 'isn', 'value' => $issn,
'exclude' => $this->getUniqueId()
];
}
break;
case 'title':
$link = ['type' => 'title', 'value' => $title];
break;
}
// Exit loop if we have a link
if (isset($link)) {
break;
}
}
// Make sure we have something to display:
return !isset($link) ? false : [
'title' => $this->getRecordLinkNote($field),
'value' => $title,
'link' => $link
];
}
/**
* Returns an id extracted from the identifier subfield passed in
*
* @param string $idField MARC subfield containing id information
* @param string $prefix Prefix to search for in id field
* @param bool $raw Return raw match, or normalize?
*
* @return string|bool ID on success, false on failure
*/
protected function getIdFromLinkingField($idField, $prefix = null, $raw = false)
{
if (preg_match('/\(([^)]+)\)(.+)/', $idField, $matches)) {
// If prefix matches, return ID:
if ($matches[1] == $prefix) {
// Special case -- LCCN should not be stripped:
return $raw
? $matches[2]
: trim(str_replace(range('a', 'z'), '', ($matches[2])));
}
} elseif ($prefix == null) {
// If no prefix was given or found, we presume it is a raw bib record
return $idField;
}
return false;
}
/**
* Support method for getFormattedMarcDetails() -- extract a single result
*
* @param array $currentField Result from MarcReader::getFields
* @param array $details Parsed instructions from getFormattedMarcDetails()
*
* @return string|bool
*/
protected function extractSingleMarcDetail($currentField, $details)
{
// Simplest case -- "msg" mode (just return a configured message):
if ($details['mode'] === 'msg') {
// Map 'true' and 'false' to boolean equivalents:
$msgMap = ['true' => true, 'false' => false];
return $msgMap[$details['params']] ?? $details['params'];
}
// Standard case -- "marc" mode (extract subfield data):
$result = $this->getSubfieldArray(
$currentField,
// Default to subfield a if nothing is specified:
str_split($details['params'] ?? 'a'),
true
);
return count($result) > 0 ? (string)$result[0] : '';
}
/**
* Get Status/Holdings Information from the internally stored MARC Record
* (support method used by the NoILS driver).
*
* @param string $defaultField The MARC Field to retrieve if $data commands do
* not request something more specific
* @param array $data The type of data to retrieve from the MARC field;
* an array of pipe-delimited commands where the first part determines the data
* retrieval mode, the second part provides further instructions, and the
* optional third part provides a field to override $defaultField; supported
* modes: "msg" (for a hard-coded message) and "marc" (for fetching subfield
* data)
*
* @return array
*/
public function getFormattedMarcDetails($defaultField, $data)
{
// First, parse the instructions into a more useful format, so we know
// which fields we're going to have to look up.
$instructions = [];
foreach ($data as $key => $rawInstruction) {
$instructionParts = explode('|', $rawInstruction);
$instructions[$key] = [
'mode' => $instructionParts[0],
'params' => $instructionParts[1] ?? null,
'field' => $instructionParts[2] ?? $defaultField
];
}
// Now fetch all of the MARC data that we need.
$getTagCallback = function ($instruction) {
return $instruction['field'];
};
$fields = [];
foreach (array_unique(array_map($getTagCallback, $instructions)) as $field) {
$fields[$field] = $this->getMarcReader()->getFields($field);
}
// Initialize return array
$matches = [];
// Process the instructions on the requested data.
foreach ($instructions as $key => $details) {
foreach ($fields[$details['field']] as $i => $currentField) {
if (!isset($matches[$i])) {
$matches[$i] = ['id' => $this->getUniqueId()];
}
$matches[$i][$key] = $this->extractSingleMarcDetail(
$currentField,
$details
);
}
}
return $matches;
}
/**
* Return an XML representation of the record using the specified format.
* Return false if the format is unsupported.
*
* @param string $format Name of format to use (corresponds with
* OAI-PMH metadataPrefix parameter).
* @param string $baseUrl Base URL of host containing VuFind (optional;
* may be used to inject record URLs into XML when appropriate).
* @param RecordLinker $linker Record linker helper (optional; may be used to
* inject record URLs into XML when appropriate).
*
* @return mixed XML, or false if format unsupported.
*/
public function getXML($format, $baseUrl = null, $linker = null)
{
// Special case for MARC:
if ($format == 'marc21') {
$sanitizeXmlRegEx
= '[^\x{0009}\x{000a}\x{000d}\x{0020}-\x{D7FF}\x{E000}-\x{FFFD}]+';
$xml = simplexml_load_string(
trim(
preg_replace(
"/$sanitizeXmlRegEx/u",
' ',
$this->getMarcReader()->toFormat('MARCXML')
)
)
);
if (!$xml || !isset($xml->record)) {
return false;
}
// Set up proper namespacing and extract just the <record> tag:
$xml->record->addAttribute('xmlns', "http://www.loc.gov/MARC21/slim");
$xml->record->addAttribute(
'xsi:schemaLocation',
'http://www.loc.gov/MARC21/slim ' .
'http://www.loc.gov/standards/marcxml/schema/MARC21slim.xsd',
'http://www.w3.org/2001/XMLSchema-instance'
);
$xml->record->addAttribute('type', 'Bibliographic');
return $xml->record->asXML();
}
// Try the parent method:
return parent::getXML($format, $baseUrl, $linker);
}
/**
* Get an XML RDF representation of the data in this record.
*
* @return mixed XML RDF data (empty if unsupported or error).
*/
public function getRDFXML()
{
return XSLTProcessor::process(
'record-rdf-mods.xsl',
trim($this->getMarcReader()->toFormat('MARCXML'))
);
}
/**
* Return the list of "source records" for this consortial record.
*
* @return array
*/
public function getConsortialIDs()
{
return $this->getFieldArray('035');
}
/**
* Return first ISMN found for this record, or false if no one fonund
*
* @return mixed
*/
public function getCleanISMN()
{
$fields024 = $this->getMarcReader()->getFields('024');
foreach ($fields024 as $field) {
if ($field['i1'] == 2
&& $subfield = $this->getSubfield($field, 'a')
) {
return $subfield;
}
}
return false;
}
/**
* Return first national bibliography number found, or false if not found
*
* @return mixed
*/
public function getCleanNBN()
{
$field = $this->getMarcReader()->getField('015');
if ($field && $nbn = $this->getSubfield($field, 'a')) {
$result = compact('nbn');
if ($source = $this->getSubfield($field, '7')) {
$result['source'] = $source;
}
return $result;
}
return false;
}
/**
* Get the full titles of the record in alternative scripts.
*
* @return array
*/
public function getTitlesAltScript(): array
{
return $this->getMarcReader()
->getLinkedFieldsSubfields('880', '245', ['a', 'b']);
}
/**
* Get the full titles of the record including section and part information in
* alternative scripts.
*
* @return array
*/
public function getFullTitlesAltScript(): array
{
return $this->getMarcReader()
->getLinkedFieldsSubfields('880', '245', ['a', 'b', 'n', 'p']);
}
/**
* Get the short (pre-subtitle) title of the record in alternative scripts.
*
* @return array
*/
public function getShortTitlesAltScript(): array
{
return $this->getMarcReader()->getLinkedFieldsSubfields('880', '245', ['a']);
}
/**
* Get the subtitle of the record in alternative script.
*
* @return array
*/
public function getSubtitlesAltScript(): array
{
return $this->getMarcReader()->getLinkedFieldsSubFields('880', '245', ['b']);
}
/**
* Get the text of the part/section portion of the title in alternative scripts.
*
* @return array
*/
public function getTitleSectionsAltScript(): array
{
return $this->getMarcReader()
->getLinkedFieldsSubfields('880', '245', ['n', 'p']);
}
}
| 1 | 32,625 | I like the overall idea here, but I wonder if it would be more clear to define the property in the trait, and then set it in the constructors of the classes using the trait. Alternatively, at a bare minimum, it may be a good idea to add `?? 'Bibliographic'` here, to account for the possibility of the trait being used in a class that does not define the property at all. | vufind-org-vufind | php |
@@ -2,6 +2,10 @@ package protocol
import "time"
+// NonForwardSecurePacketSizeReduction is the number of bytes a non forward-secure packet has to be smaller than a forward-secure packet
+// This makes sure that those packets can always be retransmitted without splitting the contained StreamFrames
+const NonForwardSecurePacketSizeReduction = 50
+
// DefaultMaxCongestionWindow is the default for the max congestion window
const DefaultMaxCongestionWindow = 1000
| 1 | package protocol
import "time"
// DefaultMaxCongestionWindow is the default for the max congestion window
const DefaultMaxCongestionWindow = 1000
// InitialCongestionWindow is the initial congestion window in QUIC packets
const InitialCongestionWindow = 32
// MaxUndecryptablePackets limits the number of undecryptable packets that a
// session queues for later until it sends a public reset.
const MaxUndecryptablePackets = 10
// AckSendDelay is the maximum delay that can be applied to an ACK for a retransmittable packet
// This is the value Chromium is using
const AckSendDelay = 25 * time.Millisecond
// ReceiveStreamFlowControlWindow is the stream-level flow control window for receiving data
// This is the value that Google servers are using
const ReceiveStreamFlowControlWindow ByteCount = (1 << 10) * 32 // 32 kB
// ReceiveConnectionFlowControlWindow is the connection-level flow control window for receiving data
// This is the value that Google servers are using
const ReceiveConnectionFlowControlWindow ByteCount = (1 << 10) * 48 // 48 kB
// MaxReceiveStreamFlowControlWindowServer is the maximum stream-level flow control window for receiving data
// This is the value that Google servers are using
const MaxReceiveStreamFlowControlWindowServer ByteCount = 1 * (1 << 20) // 1 MB
// MaxReceiveConnectionFlowControlWindowServer is the connection-level flow control window for receiving data
// This is the value that Google servers are using
const MaxReceiveConnectionFlowControlWindowServer ByteCount = 1.5 * (1 << 20) // 1.5 MB
// MaxReceiveStreamFlowControlWindowClient is the maximum stream-level flow control window for receiving data, for the client
// This is the value that Chromium is using
const MaxReceiveStreamFlowControlWindowClient ByteCount = 6 * (1 << 20) // 6 MB
// MaxReceiveConnectionFlowControlWindowClient is the connection-level flow control window for receiving data, for the server
// This is the value that Google servers are using
const MaxReceiveConnectionFlowControlWindowClient ByteCount = 15 * (1 << 20) // 15 MB
// ConnectionFlowControlMultiplier determines how much larger the connection flow control windows needs to be relative to any stream's flow control window
// This is the value that Chromium is using
const ConnectionFlowControlMultiplier = 1.5
// MaxStreamsPerConnection is the maximum value accepted for the number of streams per connection
const MaxStreamsPerConnection = 100
// MaxIncomingDynamicStreamsPerConnection is the maximum value accepted for the incoming number of dynamic streams per connection
const MaxIncomingDynamicStreamsPerConnection = 100
// MaxStreamsMultiplier is the slack the client is allowed for the maximum number of streams per connection, needed e.g. when packets are out of order or dropped. The minimum of this procentual increase and the absolute increment specified by MaxStreamsMinimumIncrement is used.
const MaxStreamsMultiplier = 1.1
// MaxStreamsMinimumIncrement is the slack the client is allowed for the maximum number of streams per connection, needed e.g. when packets are out of order or dropped. The minimum of this absolute increment and the procentual increase specified by MaxStreamsMultiplier is used.
const MaxStreamsMinimumIncrement = 10
// MaxNewStreamIDDelta is the maximum difference between and a newly opened Stream and the highest StreamID that a client has ever opened
// note that the number of streams is half this value, since the client can only open streams with open StreamID
const MaxNewStreamIDDelta = 4 * MaxStreamsPerConnection
// MaxSessionUnprocessedPackets is the max number of packets stored in each session that are not yet processed.
const MaxSessionUnprocessedPackets = DefaultMaxCongestionWindow
// RetransmissionThreshold + 1 is the number of times a packet has to be NACKed so that it gets retransmitted
const RetransmissionThreshold = 3
// SkipPacketAveragePeriodLength is the average period length in which one packet number is skipped to prevent an Optimistic ACK attack
const SkipPacketAveragePeriodLength PacketNumber = 500
// MaxTrackedSkippedPackets is the maximum number of skipped packet numbers the SentPacketHandler keep track of for Optimistic ACK attack mitigation
const MaxTrackedSkippedPackets = 10
// STKExpiryTimeSec is the valid time of a source address token in seconds
const STKExpiryTimeSec = 24 * 60 * 60
// MaxTrackedSentPackets is maximum number of sent packets saved for either later retransmission or entropy calculation
const MaxTrackedSentPackets = 2 * DefaultMaxCongestionWindow
// MaxTrackedReceivedPackets is the maximum number of received packets saved for doing the entropy calculations
const MaxTrackedReceivedPackets = 2 * DefaultMaxCongestionWindow
// MaxTrackedReceivedAckRanges is the maximum number of ACK ranges tracked
const MaxTrackedReceivedAckRanges = DefaultMaxCongestionWindow
// MaxPacketsReceivedBeforeAckSend is the number of packets that can be received before an ACK frame is sent
const MaxPacketsReceivedBeforeAckSend = 20
// RetransmittablePacketsBeforeAck is the number of retransmittable that an ACK is sent for
const RetransmittablePacketsBeforeAck = 2
// MaxStreamFrameSorterGaps is the maximum number of gaps between received StreamFrames
// prevents DoS attacks against the streamFrameSorter
const MaxStreamFrameSorterGaps = 1000
// CryptoMaxParams is the upper limit for the number of parameters in a crypto message.
// Value taken from Chrome.
const CryptoMaxParams = 128
// CryptoParameterMaxLength is the upper limit for the length of a parameter in a crypto message.
const CryptoParameterMaxLength = 4000
// EphermalKeyLifetime is the lifetime of the ephermal key during the handshake, see handshake.getEphermalKEX.
const EphermalKeyLifetime = time.Minute
// InitialIdleTimeout is the timeout before the handshake succeeds.
const InitialIdleTimeout = 5 * time.Second
// DefaultIdleTimeout is the default idle timeout, for the server
const DefaultIdleTimeout = 30 * time.Second
// MaxIdleTimeoutServer is the maximum idle timeout that can be negotiated, for the server
const MaxIdleTimeoutServer = 1 * time.Minute
// MaxIdleTimeoutClient is the idle timeout that the client suggests to the server
const MaxIdleTimeoutClient = 2 * time.Minute
// MaxTimeForCryptoHandshake is the default timeout for a connection until the crypto handshake succeeds.
const MaxTimeForCryptoHandshake = 10 * time.Second
// ClosedSessionDeleteTimeout the server ignores packets arriving on a connection that is already closed
// after this time all information about the old connection will be deleted
const ClosedSessionDeleteTimeout = time.Minute
// NumCachedCertificates is the number of cached compressed certificate chains, each taking ~1K space
const NumCachedCertificates = 128
| 1 | 5,718 | I would prefer to not have a magic number, but construct this value out of other constants we already have. | lucas-clemente-quic-go | go |
@@ -101,6 +101,15 @@ def setup_authentication(config):
from configuration.
"""
config.include('pyramid_multiauth')
+ settings = config.get_settings()
+
+ policies = aslist(settings['multiauth.policies'])
+ if 'basicauth' in policies:
+ config.add_api_capability(
+ "basicauth",
+ description="Very basic authentication sessions. Not for production use.",
+ url="http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html",
+ )
# Track policy used, for prefixing user_id and for logging.
def on_policy_selected(event): | 1 | import re
import warnings
from datetime import datetime
from dateutil import parser as dateparser
import structlog
from pyramid.events import NewRequest, NewResponse
from pyramid.exceptions import ConfigurationError
from pyramid.httpexceptions import (HTTPTemporaryRedirect, HTTPGone,
HTTPBadRequest)
from pyramid.renderers import JSON as JSONRenderer
from pyramid.response import Response
from pyramid.security import NO_PERMISSION_REQUIRED
from pyramid.interfaces import IAuthenticationPolicy
from pyramid.settings import asbool, aslist
from pyramid_multiauth import (MultiAuthenticationPolicy,
MultiAuthPolicySelected)
try:
import newrelic.agent
except ImportError: # pragma: no cover
newrelic = None
try:
from werkzeug.contrib.profiler import ProfilerMiddleware
except ImportError: # pragma: no cover
pass
from kinto.core import errors
from kinto.core import utils
from kinto.core import cache
from kinto.core import storage
from kinto.core import permission
from kinto.core.logs import logger
from kinto.core.events import ResourceRead, ResourceChanged, ACTIONS
def setup_request_bound_data(config):
"""Attach custom data on request object, and share it with parent
requests during batch."""
def attach_bound_data(request):
parent = getattr(request, 'parent', None)
return parent.bound_data if parent else {}
config.add_request_method(attach_bound_data, name='bound_data', reify=True)
def setup_json_serializer(config):
import requests
import webob
# Monkey patch to use ujson
webob.request.json = utils.json
requests.models.json = utils.json
# Override json renderer using ujson
renderer = JSONRenderer(serializer=utils.json_serializer)
config.add_renderer('json', renderer)
def setup_version_redirection(config):
"""Add a view which redirects to the current version of the API.
"""
settings = config.get_settings()
redirect_enabled = settings['version_prefix_redirect_enabled']
version_prefix_redirection_enabled = asbool(redirect_enabled)
route_prefix = config.route_prefix
config.registry.route_prefix = route_prefix
# Redirect to the current version of the API if the prefix isn't used.
# Do not redirect if kinto.version_prefix_redirect_enabled is set to
# False.
if not version_prefix_redirection_enabled:
return
def _redirect_to_version_view(request):
if request.method.lower() == 'options':
# CORS responses should always have status 200.
return utils.reapply_cors(request, Response())
path = request.matchdict['path']
querystring = request.url[(request.url.rindex(request.path) +
len(request.path)):]
redirect = '/%s/%s%s' % (route_prefix, path, querystring)
raise HTTPTemporaryRedirect(redirect)
# Disable the route prefix passed by the app.
config.route_prefix = None
config.add_route(name='redirect_to_version',
pattern=r'/{path:(?!v[0-9]+)[^\r]*}')
config.add_view(view=_redirect_to_version_view,
route_name='redirect_to_version',
permission=NO_PERMISSION_REQUIRED)
config.route_prefix = route_prefix
def setup_authentication(config):
"""Let pyramid_multiauth manage authentication and authorization
from configuration.
"""
config.include('pyramid_multiauth')
# Track policy used, for prefixing user_id and for logging.
def on_policy_selected(event):
authn_type = event.policy_name.lower()
event.request.authn_type = authn_type
event.request.selected_userid = event.userid
# Add authentication info to context.
logger.bind(uid=event.userid, authn_type=authn_type)
config.add_subscriber(on_policy_selected, MultiAuthPolicySelected)
def setup_backoff(config):
"""Attach HTTP requests/responses objects.
This is useful to attach objects to the request object for easier
access, and to pre-process responses.
"""
def on_new_response(event):
# Add backoff in response headers.
backoff = config.registry.settings['backoff']
if backoff is not None:
backoff = utils.encode_header('%s' % backoff)
event.response.headers['Backoff'] = backoff
config.add_subscriber(on_new_response, NewResponse)
def setup_requests_scheme(config):
"""Force server scheme, host and port at the application level."""
settings = config.get_settings()
http_scheme = settings['http_scheme']
http_host = settings['http_host']
def on_new_request(event):
if http_scheme:
event.request.scheme = http_scheme
if http_host:
event.request.host = http_host
if http_scheme or http_host:
config.add_subscriber(on_new_request, NewRequest)
def setup_deprecation(config):
config.add_tween("kinto.core.initialization._end_of_life_tween_factory")
def _end_of_life_tween_factory(handler, registry):
"""Pyramid tween to handle service end of life."""
deprecation_msg = ("The service you are trying to connect no longer exists"
" at this location.")
def eos_tween(request):
eos_date = registry.settings['eos']
eos_url = registry.settings['eos_url']
eos_message = registry.settings['eos_message']
if not eos_date:
return handler(request)
eos_date = dateparser.parse(eos_date)
if eos_date > datetime.now():
code = "soft-eol"
request.response = handler(request)
else:
code = "hard-eol"
request.response = errors.http_error(
HTTPGone(),
errno=errors.ERRORS.SERVICE_DEPRECATED,
message=deprecation_msg)
errors.send_alert(request, eos_message, url=eos_url, code=code)
return request.response
return eos_tween
def setup_storage(config):
settings = config.get_settings()
# Id generators by resource name.
config.registry.id_generators = {}
for key, value in settings.items():
m = re.match(r'^([^_]*)_?id_generator', key)
if m is None:
continue
resource_name = m.group(1)
id_generator = config.maybe_dotted(value)
config.registry.id_generators[resource_name] = id_generator()
storage_mod = settings['storage_backend']
if not storage_mod:
return
storage_mod = config.maybe_dotted(storage_mod)
backend = storage_mod.load_from_config(config)
if not isinstance(backend, storage.StorageBase):
raise ConfigurationError("Invalid storage backend: %s" % backend)
config.registry.storage = backend
heartbeat = storage.heartbeat(backend)
config.registry.heartbeats['storage'] = heartbeat
def setup_permission(config):
settings = config.get_settings()
permission_mod = settings['permission_backend']
if not permission_mod:
return
permission_mod = config.maybe_dotted(permission_mod)
backend = permission_mod.load_from_config(config)
if not isinstance(backend, permission.PermissionBase):
raise ConfigurationError("Invalid permission backend: %s" % backend)
config.registry.permission = backend
heartbeat = permission.heartbeat(backend)
config.registry.heartbeats['permission'] = heartbeat
def setup_cache(config):
settings = config.get_settings()
cache_mod = settings['cache_backend']
if not cache_mod:
return
cache_mod = config.maybe_dotted(cache_mod)
backend = cache_mod.load_from_config(config)
if not isinstance(backend, cache.CacheBase):
raise ConfigurationError("Invalid cache backend: %s" % backend)
config.registry.cache = backend
heartbeat = cache.heartbeat(backend)
config.registry.heartbeats['cache'] = heartbeat
def setup_statsd(config):
settings = config.get_settings()
config.registry.statsd = None
if settings['statsd_url']:
statsd_mod = settings['statsd_backend']
statsd_mod = config.maybe_dotted(statsd_mod)
client = statsd_mod.load_from_config(config)
config.registry.statsd = client
client.watch_execution_time(config.registry.cache, prefix='backend')
client.watch_execution_time(config.registry.storage, prefix='backend')
client.watch_execution_time(config.registry.permission, prefix='backend')
# Commit so that configured policy can be queried.
config.commit()
policy = config.registry.queryUtility(IAuthenticationPolicy)
if isinstance(policy, MultiAuthenticationPolicy):
for name, subpolicy in policy.get_policies():
client.watch_execution_time(subpolicy,
prefix='authentication',
classname=name)
else:
client.watch_execution_time(policy, prefix='authentication')
def on_new_response(event):
request = event.request
# Count unique users.
user_id = request.prefixed_userid
if user_id:
client.count('users', unique=user_id)
# Count authentication verifications.
if hasattr(request, 'authn_type'):
client.count('authn_type.%s' % request.authn_type)
# Count view calls.
service = request.current_service
if service:
client.count('view.%s.%s' % (service.name, request.method))
config.add_subscriber(on_new_response, NewResponse)
return client
def install_middlewares(app, settings):
"Install a set of middlewares defined in the ini file on the given app."
# Setup new-relic.
if settings.get('newrelic_config'):
ini_file = settings['newrelic_config']
env = settings['newrelic_env']
newrelic.agent.initialize(ini_file, env)
app = newrelic.agent.WSGIApplicationWrapper(app)
# Adds the Werkzeug profiler.
if asbool(settings.get('profiler_enabled')):
profile_dir = settings['profiler_dir']
app = ProfilerMiddleware(app, profile_dir=profile_dir,
restrictions=('*kinto.core*'))
return app
def setup_logging(config):
"""Setup structured logging, and emit `request.summary` event on each
request, as recommanded by Mozilla Services standard:
* https://mana.mozilla.org/wiki/display/CLOUDSERVICES/Logging+Standard
* http://12factor.net/logs
"""
settings = config.get_settings()
renderer_klass = config.maybe_dotted(settings['logging_renderer'])
renderer = renderer_klass(settings)
structlog.configure(
# Share the logger context by thread.
context_class=structlog.threadlocal.wrap_dict(dict),
# Integrate with Pyramid logging facilities.
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
# Setup logger output format.
processors=[
structlog.stdlib.filter_by_level,
structlog.processors.format_exc_info,
renderer,
])
def on_new_request(event):
request = event.request
# Save the time the request was received by the server.
event.request._received_at = utils.msec_time()
try:
# Pyramid fails if the URL contains invalid UTF-8 characters.
request_path = event.request.path
except UnicodeDecodeError:
raise errors.http_error(
HTTPBadRequest(),
errno=errors.ERRORS.INVALID_PARAMETERS,
message="Invalid URL path.")
# New logger context, with infos for request summary logger.
logger.new(agent=request.headers.get('User-Agent'),
path=request_path,
method=request.method,
querystring=dict(request.GET),
lang=request.headers.get('Accept-Language'),
uid=None,
authn_type=None,
errno=None)
config.add_subscriber(on_new_request, NewRequest)
def on_new_response(event):
response = event.response
request = event.request
# Compute the request processing time in msec (-1 if unknown)
current = utils.msec_time()
duration = current - getattr(request, '_received_at', current - 1)
isotimestamp = datetime.fromtimestamp(current/1000).isoformat()
# Bind infos for request summary logger.
logger.bind(time=isotimestamp,
code=response.status_code,
t=duration)
# Ouput application request summary.
if not hasattr(request, 'parent'):
logger.info('request.summary')
config.add_subscriber(on_new_response, NewResponse)
class EventActionFilter(object):
def __init__(self, actions, config):
actions = ACTIONS.from_string_list(actions)
self.actions = [action.value for action in actions]
def phash(self):
return 'for_actions = %s' % (','.join(self.actions))
def __call__(self, event):
action = event.payload.get('action')
return not action or action in self.actions
class EventResourceFilter(object):
def __init__(self, resources, config):
self.resources = resources
def phash(self):
return 'for_resources = %s' % (','.join(self.resources))
def __call__(self, event):
resource = event.payload.get('resource_name')
return not resource or not self.resources or resource in self.resources
def setup_listeners(config):
# Register basic subscriber predicates, to filter events.
config.add_subscriber_predicate('for_actions', EventActionFilter)
config.add_subscriber_predicate('for_resources', EventResourceFilter)
write_actions = (ACTIONS.CREATE, ACTIONS.UPDATE, ACTIONS.DELETE)
settings = config.get_settings()
project_name = settings.get('project_name', '')
listeners = aslist(settings['event_listeners'])
for name in listeners:
logger.info('Setting up %r listener' % name)
prefix = 'event_listeners.%s.' % name
try:
listener_mod = config.maybe_dotted(name)
prefix = 'event_listeners.%s.' % name.split('.')[-1]
listener = listener_mod.load_from_config(config, prefix)
except (ImportError, AttributeError):
module_setting = prefix + "use"
# Read from ENV or settings.
module_value = utils.read_env(project_name + "." + module_setting,
settings.get(module_setting))
listener_mod = config.maybe_dotted(module_value)
listener = listener_mod.load_from_config(config, prefix)
# If StatsD is enabled, monitor execution time of listeners.
if getattr(config.registry, "statsd", None):
statsd_client = config.registry.statsd
key = 'listeners.%s' % name
listener = statsd_client.timer(key)(listener.__call__)
# Optional filter by event action.
actions_setting = prefix + "actions"
# Read from ENV or settings.
actions_value = utils.read_env(project_name + "." + actions_setting,
settings.get(actions_setting, ""))
actions = aslist(actions_value)
if len(actions) > 0:
actions = ACTIONS.from_string_list(actions)
else:
actions = write_actions
# Optional filter by event resource name.
resource_setting = prefix + "resources"
# Read from ENV or settings.
resource_value = utils.read_env(project_name + "." + resource_setting,
settings.get(resource_setting, ""))
resource_names = aslist(resource_value)
# Pyramid event predicates.
options = dict(for_actions=actions, for_resources=resource_names)
if ACTIONS.READ in actions:
config.add_subscriber(listener, ResourceRead, **options)
if len(actions) == 1:
return
config.add_subscriber(listener, ResourceChanged, **options)
def load_default_settings(config, default_settings):
"""Read settings provided in Paste ini file, set default values and
replace if defined as environment variable.
"""
settings = config.get_settings()
project_name = settings['project_name']
def _prefixed_keys(key):
unprefixed = key
if key.startswith('kinto.') or key.startswith(project_name + '.'):
unprefixed = key.split('.', 1)[1]
project_prefix = project_name + '.' + unprefixed
kinto_prefix = 'kinto.' + unprefixed
return unprefixed, project_prefix, kinto_prefix
# Fill settings with default values if not defined.
for key, default_value in sorted(default_settings.items()):
unprefixed, project_prefix, kinto_prefix = keys = _prefixed_keys(key)
is_defined = len(set(settings.keys()).intersection(set(keys))) > 0
if not is_defined:
settings[unprefixed] = default_value
for key, value in sorted(settings.items()):
unprefixed, project_prefix, kinto_prefix = keys = _prefixed_keys(key)
# Fail if not only one is defined.
defined = set(settings.keys()).intersection(set(keys))
distinct_values = set([str(settings[d]) for d in defined])
if len(defined) > 1 and len(distinct_values) > 1:
names = "', '".join(defined)
raise ValueError("Settings '%s' are in conflict." % names)
# Maintain backwards compatibility with old settings files that
# have backend settings like cliquet.foo (which is now
# kinto.core.foo).
unprefixed, _, _ = _prefixed_keys(key)
CONTAIN_CLIQUET_MODULE_NAMES = [
'storage_backend',
'cache_backend',
'permission_backend',
'logging_renderer',
]
if unprefixed in CONTAIN_CLIQUET_MODULE_NAMES and \
value.startswith('cliquet.'):
new_value = value.replace('cliquet.', 'kinto.core.')
logger.warn(
"Backend settings referring to cliquet are DEPRECATED. "
"Please update your {} setting to {} (was: {}).".format(
key, new_value, value))
value = new_value
# Override settings from OS env values.
# e.g. HTTP_PORT, READINGLIST_HTTP_PORT, KINTO_HTTP_PORT
from_env = utils.read_env(unprefixed, value)
from_env = utils.read_env(project_prefix, from_env)
from_env = utils.read_env(kinto_prefix, from_env)
settings[unprefixed] = from_env
config.add_settings(settings)
def initialize(config, version=None, project_name='', default_settings=None):
"""Initialize kinto.core with the given configuration, version and project
name.
This will basically include kinto.core in Pyramid and set route prefix
based on the specified version.
:param config: Pyramid configuration
:type config: ~pyramid:pyramid.config.Configurator
:param str version: Current project version (e.g. '0.0.1') if not defined
in application settings.
:param str project_name: Project name if not defined
in application settings.
:param dict default_settings: Override kinto.core default settings values.
"""
from kinto.core import DEFAULT_SETTINGS
settings = config.get_settings()
project_name = settings.pop('kinto.project_name',
settings.get('project_name')) or project_name
settings['project_name'] = project_name
if not project_name:
warnings.warn('No value specified for `project_name`')
kinto_core_defaults = DEFAULT_SETTINGS.copy()
if default_settings:
kinto_core_defaults.update(default_settings)
load_default_settings(config, kinto_core_defaults)
http_scheme = settings['http_scheme']
if http_scheme != 'https':
warnings.warn('HTTPS is not enabled')
# Override project version from settings.
project_version = settings.get('project_version') or version
if not project_version:
error_msg = "Invalid project version: %s" % project_version
raise ConfigurationError(error_msg)
settings['project_version'] = project_version = str(project_version)
# HTTP API version.
http_api_version = settings.get('http_api_version')
if http_api_version is None:
# The API version is derivated from the module version if not provided.
http_api_version = '.'.join(project_version.split('.')[0:2])
settings['http_api_version'] = http_api_version = str(http_api_version)
api_version = 'v%s' % http_api_version.split('.')[0]
# Include kinto.core views with the correct api version prefix.
config.include("kinto.core", route_prefix=api_version)
config.route_prefix = api_version
| 1 | 10,236 | The limitation is if somebody configure another policy with the same name, but it is an edge case we can ignore I guess. | Kinto-kinto | py |
@@ -31,8 +31,8 @@ import (
_ "gocloud.dev/secrets/awskms"
_ "gocloud.dev/secrets/azurekeyvault"
_ "gocloud.dev/secrets/gcpkms"
- _ "gocloud.dev/secrets/hashivault"
_ "gocloud.dev/secrets/localsecrets"
+ _ "gocloud.dev/secrets/vault"
)
const helpSuffix = ` | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// gocdk-secrets demonstrates the use of the Go CDK secrets package in a
// simple command-line application.
package main
import (
"context"
"encoding/base64"
"flag"
"fmt"
"log"
"os"
"github.com/google/subcommands"
"gocloud.dev/secrets"
// Import the secrets driver packages we want to be able to open.
_ "gocloud.dev/secrets/awskms"
_ "gocloud.dev/secrets/azurekeyvault"
_ "gocloud.dev/secrets/gcpkms"
_ "gocloud.dev/secrets/hashivault"
_ "gocloud.dev/secrets/localsecrets"
)
const helpSuffix = `
See https://gocloud.dev/concepts/urls/ for more background on
Go CDK URLs, and sub-packages under gocloud.dev/secrets
(https://godoc.org/gocloud.dev/secrets#pkg-subdirectories)
for details on the secrets.Keeper URL format.
`
func main() {
subcommands.Register(subcommands.HelpCommand(), "")
subcommands.Register(&decryptCmd{}, "")
subcommands.Register(&encryptCmd{}, "")
log.SetFlags(0)
log.SetPrefix("gocdk-secrets: ")
flag.Parse()
os.Exit(int(subcommands.Execute(context.Background())))
}
type decryptCmd struct {
base64in bool
base64out bool
}
func (*decryptCmd) Name() string { return "decrypt" }
func (*decryptCmd) Synopsis() string { return "Decrypt data" }
func (*decryptCmd) Usage() string {
return `decrypt [-base64in] [-base64out] <keeper URL> <ciphertext>
Decrypt the ciphertext using <keeper URL> and print the result to stdout.
Example:
gocdk-secrets decrypt stringkey://mykey nzam9AJHqH1sqeEr1ZLMbWOf4pp5NRHKYBx/h8loARL83+CBc0WPh8dYzHfccQYFUQ==` + helpSuffix
}
func (cmd *decryptCmd) SetFlags(f *flag.FlagSet) {
f.BoolVar(&cmd.base64in, "base64in", true, "the ciphertext is base64 encoded")
f.BoolVar(&cmd.base64out, "base64out", false, "the resulting plaintext should be base64 encoded before printing it out")
}
func (cmd *decryptCmd) Execute(ctx context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
if f.NArg() != 2 {
f.Usage()
return subcommands.ExitUsageError
}
keeperURL := f.Arg(0)
ciphertext := f.Arg(1)
cipher := []byte(ciphertext)
if cmd.base64in {
var err error
cipher, err = base64.StdEncoding.DecodeString(ciphertext)
if err != nil {
log.Printf("Failed to base64 decode ciphertext: %v\n", err)
return subcommands.ExitFailure
}
}
// Open a *secrets.Keeper using the keeperURL.
keeper, err := secrets.OpenKeeper(ctx, keeperURL)
if err != nil {
log.Printf("Failed to open keeper: %v\n", err)
return subcommands.ExitFailure
}
defer keeper.Close()
plain, err := keeper.Decrypt(ctx, cipher)
if err != nil {
log.Printf("Failed to decrypt: %v\n", err)
return subcommands.ExitFailure
}
plaintext := string(plain)
if cmd.base64out {
plaintext = base64.StdEncoding.EncodeToString(plain)
}
fmt.Println(plaintext)
return subcommands.ExitSuccess
}
type encryptCmd struct {
base64in bool
base64out bool
}
func (*encryptCmd) Name() string { return "encrypt" }
func (*encryptCmd) Synopsis() string { return "Encrypt data" }
func (*encryptCmd) Usage() string {
return `encrypt [-base64in] [-base64out] <keeper URL> <plaintext>
Encrypt the plaintext using <keeper URL> and print the result to stdout.
Example:
gocdk-secrets encrypt --base64out stringkey://mykey my-plaintext` + helpSuffix
}
func (cmd *encryptCmd) SetFlags(f *flag.FlagSet) {
f.BoolVar(&cmd.base64in, "base64in", false, "the plaintext is base64 encoded")
f.BoolVar(&cmd.base64out, "base64out", true, "the resulting ciphertext should be base64 encoded before printing it out")
}
func (cmd *encryptCmd) Execute(ctx context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
if f.NArg() != 2 {
f.Usage()
return subcommands.ExitUsageError
}
keeperURL := f.Arg(0)
plaintext := f.Arg(1)
plain := []byte(plaintext)
if cmd.base64in {
var err error
plain, err = base64.StdEncoding.DecodeString(plaintext)
if err != nil {
log.Printf("Failed to base64 decode plaintext: %v\n", err)
return subcommands.ExitFailure
}
}
// Open a *secrets.Keeper using the keeperURL.
keeper, err := secrets.OpenKeeper(ctx, keeperURL)
if err != nil {
log.Printf("Failed to open keeper: %v\n", err)
return subcommands.ExitFailure
}
defer keeper.Close()
cipher, err := keeper.Encrypt(ctx, plain)
if err != nil {
log.Printf("Failed to encrypt: %v\n", err)
return subcommands.ExitFailure
}
ciphertext := string(cipher)
if cmd.base64out {
ciphertext = base64.StdEncoding.EncodeToString(cipher)
}
fmt.Println(ciphertext)
return subcommands.ExitSuccess
}
| 1 | 19,073 | ? The package is named `hashivault`. | google-go-cloud | go |
@@ -240,6 +240,9 @@ static int config_ini_handler(void *user, const char *section, const char *name,
if (strcmp(name, "xwayland") == 0) {
if (strcasecmp(value, "true") == 0) {
config->xwayland = true;
+ } else if (strcasecmp(value, "lazy") == 0) {
+ config->xwayland = true;
+ config->xwayland_lazy = true;
} else if (strcasecmp(value, "false") == 0) {
config->xwayland = false;
} else { | 1 | #ifndef _POSIX_C_SOURCE
#define _POSIX_C_SOURCE 200809L
#endif
#include <assert.h>
#include <getopt.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <sys/param.h>
#include <unistd.h>
#include <wlr/config.h>
#include <wlr/types/wlr_box.h>
#include <wlr/util/log.h>
#include "rootston/config.h"
#include "rootston/ini.h"
#include "rootston/input.h"
#include "rootston/keyboard.h"
static void usage(const char *name, int ret) {
fprintf(stderr,
"usage: %s [-C <FILE>] [-E <COMMAND>]\n"
"\n"
" -C <FILE> Path to the configuration file\n"
" (default: rootston.ini).\n"
" See `rootston.ini.example` for config\n"
" file documentation.\n"
" -E <COMMAND> Command that will be ran at startup.\n"
" -D Enable damage tracking debugging.\n",
name);
exit(ret);
}
static struct wlr_box *parse_geometry(const char *str) {
// format: {width}x{height}+{x}+{y}
if (strlen(str) > 255) {
wlr_log(L_ERROR, "cannot parse geometry string, too long");
return NULL;
}
char *buf = strdup(str);
struct wlr_box *box = calloc(1, sizeof(struct wlr_box));
bool has_width = false;
bool has_height = false;
bool has_x = false;
bool has_y = false;
char *pch = strtok(buf, "x+");
while (pch != NULL) {
errno = 0;
char *endptr;
long val = strtol(pch, &endptr, 0);
if ((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN)) ||
(errno != 0 && val == 0)) {
goto invalid_input;
}
if (endptr == pch) {
goto invalid_input;
}
if (!has_width) {
box->width = val;
has_width = true;
} else if (!has_height) {
box->height = val;
has_height = true;
} else if (!has_x) {
box->x = val;
has_x = true;
} else if (!has_y) {
box->y = val;
has_y = true;
} else {
break;
}
pch = strtok(NULL, "x+");
}
if (!has_width || !has_height) {
goto invalid_input;
}
free(buf);
return box;
invalid_input:
wlr_log(L_ERROR, "could not parse geometry string: %s", str);
free(buf);
free(box);
return NULL;
}
static uint32_t parse_modifier(const char *symname) {
if (strcmp(symname, "Shift") == 0) {
return WLR_MODIFIER_SHIFT;
} else if (strcmp(symname, "Caps") == 0) {
return WLR_MODIFIER_CAPS;
} else if (strcmp(symname, "Ctrl") == 0) {
return WLR_MODIFIER_CTRL;
} else if (strcmp(symname, "Alt") == 0) {
return WLR_MODIFIER_ALT;
} else if (strcmp(symname, "Mod2") == 0) {
return WLR_MODIFIER_MOD2;
} else if (strcmp(symname, "Mod3") == 0) {
return WLR_MODIFIER_MOD3;
} else if (strcmp(symname, "Logo") == 0) {
return WLR_MODIFIER_LOGO;
} else if (strcmp(symname, "Mod5") == 0) {
return WLR_MODIFIER_MOD5;
} else {
return 0;
}
}
void add_binding_config(struct wl_list *bindings, const char* combination,
const char* command) {
struct roots_binding_config *bc =
calloc(1, sizeof(struct roots_binding_config));
xkb_keysym_t keysyms[ROOTS_KEYBOARD_PRESSED_KEYSYMS_CAP];
char *symnames = strdup(combination);
char *symname = strtok(symnames, "+");
while (symname) {
uint32_t modifier = parse_modifier(symname);
if (modifier != 0) {
bc->modifiers |= modifier;
} else {
xkb_keysym_t sym = xkb_keysym_from_name(symname,
XKB_KEYSYM_NO_FLAGS);
if (sym == XKB_KEY_NoSymbol) {
wlr_log(L_ERROR, "got unknown key binding symbol: %s",
symname);
free(bc);
bc = NULL;
break;
}
keysyms[bc->keysyms_len] = sym;
bc->keysyms_len++;
}
symname = strtok(NULL, "+");
}
free(symnames);
if (bc) {
wl_list_insert(bindings, &bc->link);
bc->command = strdup(command);
bc->keysyms = malloc(bc->keysyms_len * sizeof(xkb_keysym_t));
memcpy(bc->keysyms, keysyms, bc->keysyms_len * sizeof(xkb_keysym_t));
}
}
static void config_handle_cursor(struct roots_config *config,
const char *seat_name, const char *name, const char *value) {
struct roots_cursor_config *cc;
bool found = false;
wl_list_for_each(cc, &config->cursors, link) {
if (strcmp(cc->seat, seat_name) == 0) {
found = true;
break;
}
}
if (!found) {
cc = calloc(1, sizeof(struct roots_cursor_config));
cc->seat = strdup(seat_name);
wl_list_insert(&config->cursors, &cc->link);
}
if (strcmp(name, "map-to-output") == 0) {
free(cc->mapped_output);
cc->mapped_output = strdup(value);
} else if (strcmp(name, "geometry") == 0) {
free(cc->mapped_box);
cc->mapped_box = parse_geometry(value);
} else if (strcmp(name, "theme") == 0) {
free(cc->theme);
cc->theme = strdup(value);
} else if (strcmp(name, "default-image") == 0) {
free(cc->default_image);
cc->default_image = strdup(value);
} else {
wlr_log(L_ERROR, "got unknown cursor config: %s", name);
}
}
static void config_handle_keyboard(struct roots_config *config,
const char *device_name, const char *name, const char *value) {
struct roots_keyboard_config *kc;
bool found = false;
wl_list_for_each(kc, &config->keyboards, link) {
if (strcmp(kc->name, device_name) == 0) {
found = true;
break;
}
}
if (!found) {
kc = calloc(1, sizeof(struct roots_keyboard_config));
kc->name = strdup(device_name);
wl_list_insert(&config->keyboards, &kc->link);
}
if (strcmp(name, "meta-key") == 0) {
kc->meta_key = parse_modifier(value);
if (kc->meta_key == 0) {
wlr_log(L_ERROR, "got unknown meta key: %s", name);
}
} else if (strcmp(name, "rules") == 0) {
kc->rules = strdup(value);
} else if (strcmp(name, "model") == 0) {
kc->model = strdup(value);
} else if (strcmp(name, "layout") == 0) {
kc->layout = strdup(value);
} else if (strcmp(name, "variant") == 0) {
kc->variant = strdup(value);
} else if (strcmp(name, "options") == 0) {
kc->options = strdup(value);
} else if (strcmp(name, "repeat-rate") == 0) {
kc->repeat_rate = strtol(value, NULL, 10);
} else if (strcmp(name, "repeat-delay") == 0) {
kc->repeat_delay = strtol(value, NULL, 10);
} else {
wlr_log(L_ERROR, "got unknown keyboard config: %s", name);
}
}
static const char *output_prefix = "output:";
static const char *device_prefix = "device:";
static const char *keyboard_prefix = "keyboard:";
static const char *cursor_prefix = "cursor:";
static int config_ini_handler(void *user, const char *section, const char *name,
const char *value) {
struct roots_config *config = user;
if (strcmp(section, "core") == 0) {
if (strcmp(name, "xwayland") == 0) {
if (strcasecmp(value, "true") == 0) {
config->xwayland = true;
} else if (strcasecmp(value, "false") == 0) {
config->xwayland = false;
} else {
wlr_log(L_ERROR, "got unknown xwayland value: %s", value);
}
} else {
wlr_log(L_ERROR, "got unknown core config: %s", name);
}
} else if (strncmp(output_prefix, section, strlen(output_prefix)) == 0) {
const char *output_name = section + strlen(output_prefix);
struct roots_output_config *oc;
bool found = false;
wl_list_for_each(oc, &config->outputs, link) {
if (strcmp(oc->name, output_name) == 0) {
found = true;
break;
}
}
if (!found) {
oc = calloc(1, sizeof(struct roots_output_config));
oc->name = strdup(output_name);
oc->transform = WL_OUTPUT_TRANSFORM_NORMAL;
oc->scale = 1;
oc->enable = true;
wl_list_insert(&config->outputs, &oc->link);
}
if (strcmp(name, "enable") == 0) {
if (strcasecmp(value, "true") == 0) {
oc->enable = true;
} else if (strcasecmp(value, "false") == 0) {
oc->enable = false;
} else {
wlr_log(L_ERROR, "got invalid output enable value: %s", value);
}
} else if (strcmp(name, "x") == 0) {
oc->x = strtol(value, NULL, 10);
} else if (strcmp(name, "y") == 0) {
oc->y = strtol(value, NULL, 10);
} else if (strcmp(name, "scale") == 0) {
oc->scale = strtof(value, NULL);
assert(oc->scale > 0);
} else if (strcmp(name, "rotate") == 0) {
if (strcmp(value, "normal") == 0) {
oc->transform = WL_OUTPUT_TRANSFORM_NORMAL;
} else if (strcmp(value, "90") == 0) {
oc->transform = WL_OUTPUT_TRANSFORM_90;
} else if (strcmp(value, "180") == 0) {
oc->transform = WL_OUTPUT_TRANSFORM_180;
} else if (strcmp(value, "270") == 0) {
oc->transform = WL_OUTPUT_TRANSFORM_270;
} else if (strcmp(value, "flipped") == 0) {
oc->transform = WL_OUTPUT_TRANSFORM_FLIPPED;
} else if (strcmp(value, "flipped-90") == 0) {
oc->transform = WL_OUTPUT_TRANSFORM_FLIPPED_90;
} else if (strcmp(value, "flipped-180") == 0) {
oc->transform = WL_OUTPUT_TRANSFORM_FLIPPED_180;
} else if (strcmp(value, "flipped-270") == 0) {
oc->transform = WL_OUTPUT_TRANSFORM_FLIPPED_270;
} else {
wlr_log(L_ERROR, "got unknown transform value: %s", value);
}
} else if (strcmp(name, "mode") == 0) {
char *end;
oc->mode.width = strtol(value, &end, 10);
assert(*end == 'x');
++end;
oc->mode.height = strtol(end, &end, 10);
if (*end) {
assert(*end == '@');
++end;
oc->mode.refresh_rate = strtof(end, &end);
assert(strcmp("Hz", end) == 0);
}
wlr_log(L_DEBUG, "Configured output %s with mode %dx%d@%f",
oc->name, oc->mode.width, oc->mode.height,
oc->mode.refresh_rate);
}
} else if (strncmp(cursor_prefix, section, strlen(cursor_prefix)) == 0) {
const char *seat_name = section + strlen(cursor_prefix);
config_handle_cursor(config, seat_name, name, value);
} else if (strcmp(section, "cursor") == 0) {
config_handle_cursor(config, ROOTS_CONFIG_DEFAULT_SEAT_NAME, name,
value);
} else if (strncmp(device_prefix, section, strlen(device_prefix)) == 0) {
const char *device_name = section + strlen(device_prefix);
struct roots_device_config *dc;
bool found = false;
wl_list_for_each(dc, &config->devices, link) {
if (strcmp(dc->name, device_name) == 0) {
found = true;
break;
}
}
if (!found) {
dc = calloc(1, sizeof(struct roots_device_config));
dc->name = strdup(device_name);
dc->seat = strdup(ROOTS_CONFIG_DEFAULT_SEAT_NAME);
wl_list_insert(&config->devices, &dc->link);
}
if (strcmp(name, "map-to-output") == 0) {
free(dc->mapped_output);
dc->mapped_output = strdup(value);
} else if (strcmp(name, "geometry") == 0) {
free(dc->mapped_box);
dc->mapped_box = parse_geometry(value);
} else if (strcmp(name, "seat") == 0) {
free(dc->seat);
dc->seat = strdup(value);
} else if (strcmp(name, "tap_enabled") == 0) {
if (strcasecmp(value, "true") == 0) {
dc->tap_enabled = true;
} else if (strcasecmp(value, "false") == 0) {
dc->tap_enabled = false;
} else {
wlr_log(L_ERROR,
"got unknown tap_enabled value: %s",
value);
}
} else {
wlr_log(L_ERROR, "got unknown device config: %s", name);
}
} else if (strcmp(section, "keyboard") == 0) {
config_handle_keyboard(config, "", name, value);
} else if (strncmp(keyboard_prefix,
section, strlen(keyboard_prefix)) == 0) {
const char *device_name = section + strlen(keyboard_prefix);
config_handle_keyboard(config, device_name, name, value);
} else if (strcmp(section, "bindings") == 0) {
add_binding_config(&config->bindings, name, value);
} else {
wlr_log(L_ERROR, "got unknown config section: %s", section);
}
return 1;
}
struct roots_config *roots_config_create_from_args(int argc, char *argv[]) {
struct roots_config *config = calloc(1, sizeof(struct roots_config));
if (config == NULL) {
return NULL;
}
config->xwayland = true;
wl_list_init(&config->outputs);
wl_list_init(&config->devices);
wl_list_init(&config->keyboards);
wl_list_init(&config->cursors);
wl_list_init(&config->bindings);
int c;
while ((c = getopt(argc, argv, "C:E:hD")) != -1) {
switch (c) {
case 'C':
config->config_path = strdup(optarg);
break;
case 'E':
config->startup_cmd = strdup(optarg);
break;
case 'D':
config->debug_damage_tracking = true;
break;
case 'h':
case '?':
usage(argv[0], c != 'h');
}
}
if (!config->config_path) {
// get the config path from the current directory
char cwd[MAXPATHLEN];
if (getcwd(cwd, sizeof(cwd)) != NULL) {
char buf[MAXPATHLEN];
if (snprintf(buf, MAXPATHLEN, "%s/%s", cwd, "rootston.ini") >= MAXPATHLEN) {
wlr_log(L_ERROR, "config path too long");
exit(1);
}
config->config_path = strdup(buf);
} else {
wlr_log(L_ERROR, "could not get cwd");
exit(1);
}
}
int result = ini_parse(config->config_path, config_ini_handler, config);
if (result == -1) {
wlr_log(L_DEBUG, "No config file found. Using sensible defaults.");
add_binding_config(&config->bindings, "Logo+Shift+E", "exit");
add_binding_config(&config->bindings, "Ctrl+q", "close");
add_binding_config(&config->bindings, "Alt+Tab", "next_window");
struct roots_keyboard_config *kc =
calloc(1, sizeof(struct roots_keyboard_config));
kc->meta_key = WLR_MODIFIER_LOGO;
kc->name = strdup("");
wl_list_insert(&config->keyboards, &kc->link);
} else if (result == -2) {
wlr_log(L_ERROR, "Could not allocate memory to parse config file");
exit(1);
} else if (result != 0) {
wlr_log(L_ERROR, "Could not parse config file");
exit(1);
}
return config;
}
void roots_config_destroy(struct roots_config *config) {
struct roots_output_config *oc, *otmp = NULL;
wl_list_for_each_safe(oc, otmp, &config->outputs, link) {
free(oc->name);
free(oc);
}
struct roots_device_config *dc, *dtmp = NULL;
wl_list_for_each_safe(dc, dtmp, &config->devices, link) {
free(dc->name);
free(dc->seat);
free(dc->mapped_output);
free(dc->mapped_box);
free(dc);
}
struct roots_keyboard_config *kc, *ktmp = NULL;
wl_list_for_each_safe(kc, ktmp, &config->keyboards, link) {
free(kc->name);
free(kc->rules);
free(kc->model);
free(kc->layout);
free(kc->variant);
free(kc->options);
free(kc);
}
struct roots_cursor_config *cc, *ctmp = NULL;
wl_list_for_each_safe(cc, ctmp, &config->cursors, link) {
free(cc->seat);
free(cc->mapped_output);
free(cc->mapped_box);
free(cc->theme);
free(cc->default_image);
free(cc);
}
struct roots_binding_config *bc, *btmp = NULL;
wl_list_for_each_safe(bc, btmp, &config->bindings, link) {
free(bc->keysyms);
free(bc->command);
free(bc);
}
free(config->config_path);
free(config);
}
struct roots_output_config *roots_config_get_output(struct roots_config *config,
struct wlr_output *output) {
char name[83];
snprintf(name, sizeof(name), "%s %s %s", output->make, output->model,
output->serial);
struct roots_output_config *oc;
wl_list_for_each(oc, &config->outputs, link) {
if (strcmp(oc->name, output->name) == 0 ||
strcmp(oc->name, name) == 0) {
return oc;
}
}
return NULL;
}
struct roots_device_config *roots_config_get_device(struct roots_config *config,
struct wlr_input_device *device) {
struct roots_device_config *d_config;
wl_list_for_each(d_config, &config->devices, link) {
if (strcmp(d_config->name, device->name) == 0) {
return d_config;
}
}
return NULL;
}
struct roots_keyboard_config *roots_config_get_keyboard(
struct roots_config *config, struct wlr_input_device *device) {
const char *device_name = "";
if (device != NULL) {
device_name = device->name;
}
struct roots_keyboard_config *kc;
wl_list_for_each(kc, &config->keyboards, link) {
if (strcmp(kc->name, device_name) == 0) {
return kc;
}
}
return NULL;
}
struct roots_cursor_config *roots_config_get_cursor(struct roots_config *config,
const char *seat_name) {
if (seat_name == NULL) {
seat_name = ROOTS_CONFIG_DEFAULT_SEAT_NAME;
}
struct roots_cursor_config *cc;
wl_list_for_each(cc, &config->cursors, link) {
if (strcmp(cc->seat, seat_name) == 0) {
return cc;
}
}
return NULL;
}
| 1 | 11,276 | I don't think this should enable xwayland, because it's convenient to toggle xwayland just by setting `xwayland` to `false` | swaywm-wlroots | c |
@@ -28,7 +28,6 @@ import (
fakeclientset "k8s.io/client-go/kubernetes/fake"
certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil"
-
fakeaggregatorclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake"
)
| 1 | // Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certificate
import (
"bytes"
"io/ioutil"
"os"
"path"
"testing"
"time"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/util/wait"
genericoptions "k8s.io/apiserver/pkg/server/options"
fakeclientset "k8s.io/client-go/kubernetes/fake"
certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil"
fakeaggregatorclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake"
)
const (
fakeTLSCert = `-----BEGIN CERTIFICATE-----
MIICBDCCAW2gAwIBAgIJAPgVBh+4xbGoMA0GCSqGSIb3DQEBCwUAMBsxGTAXBgNV
BAMMEHdlYmhvb2tfdGVzdHNfY2EwIBcNMTcwNzI4MjMxNTI4WhgPMjI5MTA1MTMy
MzE1MjhaMB8xHTAbBgNVBAMMFHdlYmhvb2tfdGVzdHNfY2xpZW50MIGfMA0GCSqG
SIb3DQEBAQUAA4GNADCBiQKBgQDkGXXSm6Yun5o3Jlmx45rItcQ2pmnoDk4eZfl0
rmPa674s2pfYo3KywkXQ1Fp3BC8GUgzPLSfJ8xXya9Lg1Wo8sHrDln0iRg5HXxGu
uFNhRBvj2S0sIff0ZG/IatB9I6WXVOUYuQj6+A0CdULNj1vBqH9+7uWbLZ6lrD4b
a44x/wIDAQABo0owSDAJBgNVHRMEAjAAMAsGA1UdDwQEAwIF4DAdBgNVHSUEFjAU
BggrBgEFBQcDAgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATANBgkqhkiG9w0B
AQsFAAOBgQCpN27uh/LjUVCaBK7Noko25iih/JSSoWzlvc8CaipvSPofNWyGx3Vu
OdcSwNGYX/pp4ZoAzFij/Y5u0vKTVLkWXATeTMVmlPvhmpYjj9gPkCSY6j/SiKlY
kGy0xr+0M5UQkMBcfIh9oAp9um1fZHVWAJAGP/ikZgkcUey0LmBn8w==
-----END CERTIFICATE-----`
fakeTLSKey = `-----BEGIN RSA PRIVATE KEY-----
MIICWwIBAAKBgQDkGXXSm6Yun5o3Jlmx45rItcQ2pmnoDk4eZfl0rmPa674s2pfY
o3KywkXQ1Fp3BC8GUgzPLSfJ8xXya9Lg1Wo8sHrDln0iRg5HXxGuuFNhRBvj2S0s
Iff0ZG/IatB9I6WXVOUYuQj6+A0CdULNj1vBqH9+7uWbLZ6lrD4ba44x/wIDAQAB
AoGAZbWwowvCq1GBq4vPPRI3h739Uz0bRl1ymf1woYXNguXRtCB4yyH+2BTmmrrF
6AIWkePuUEdbUaKyK5nGu3iOWM+/i6NP3kopQANtbAYJ2ray3kwvFlhqyn1bxX4n
gl/Cbdw1If4zrDrB66y8mYDsjzK7n/gFaDNcY4GArjvOXKkCQQD9Lgv+WD73y4RP
yS+cRarlEeLLWVsX/pg2oEBLM50jsdUnrLSW071MjBgP37oOXzqynF9SoDbP2Y5C
x+aGux9LAkEA5qPlQPv0cv8Wc3qTI+LixZ/86PPHKWnOnwaHm3b9vQjZAkuVQg3n
Wgg9YDmPM87t3UFH7ZbDihUreUxwr9ZjnQJAZ9Z95shMsxbOYmbSVxafu6m1Sc+R
M+sghK7/D5jQpzYlhUspGf8n0YBX0hLhXUmjamQGGH5LXL4Owcb4/mM6twJAEVio
SF/qva9jv+GrKVrKFXT374lOJFY53Qn/rvifEtWUhLCslCA5kzLlctRBafMZPrfH
Mh5RrJP1BhVysDbenQJASGcc+DiF7rB6K++ZGyC11E2AP29DcZ0pgPESSV7npOGg
+NqPRZNVCSZOiVmNuejZqmwKhZNGZnBFx1Y+ChAAgw==
-----END RSA PRIVATE KEY-----`
fakeCACert = `-----BEGIN CERTIFICATE-----
MIICBDCCAW2gAwIBAgIJAPgVBh+4xbGoMA0GCSqGSIb3DQEBCwUAMBsxGTAXBgNV
BAMMEHdlYmhvb2tfdGVzdHNfY2EwIBcNMTcwNzI4MjMxNTI4WhgPMjI5MTA1MTMy
MzE1MjhaMB8xHTAbBgNVBAMMFHdlYmhvb2tfdGVzdHNfY2xpZW50MIGfMA0GCSqG
SIb3DQEBAQUAA4GNADCBiQKBgQDkGXXSm6Yun5o3Jlmx45rItcQ2pmnoDk4eZfl0
rmPa674s2pfYo3KywkXQ1Fp3BC8GUgzPLSfJ8xXya9Lg1Wo8sHrDln0iRg5HXxGu
uFNhRBvj2S0sIff0ZG/IatB9I6WXVOUYuQj6+A0CdULNj1vBqH9+7uWbLZ6lrD4b
a44x/wIDAQABo0owSDAJBgNVHRMEAjAAMAsGA1UdDwQEAwIF4DAdBgNVHSUEFjAU
BggrBgEFBQcDAgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATANBgkqhkiG9w0B
AQsFAAOBgQCpN27uh/LjUVCaBK7Noko25iih/JSSoWzlvc8CaipvSPofNWyGx3Vu
OdcSwNGYX/pp4ZoAzFij/Y5u0vKTVLkWXATeTMVmlPvhmpYjj9gPkCSY6j/SiKlY
kGy0xr+0M5UQkMBcfIh9oAp9um1fZHVWAJAGP/ikZgkcUey0LmBn8w==
-----END CERTIFICATE-----`
)
func TestApplyServerCert(t *testing.T) {
tests := []struct {
name string
selfSignedCert bool
tlsCert []byte
tlsKey []byte
caCert []byte
wantErr bool
wantCertKey bool
wantGeneratedCert bool
wantCACert []byte
testRotate bool
}{
{
name: "self-signed",
selfSignedCert: true,
tlsCert: nil,
tlsKey: nil,
caCert: nil,
wantErr: false,
wantCertKey: false,
wantGeneratedCert: true,
wantCACert: nil,
testRotate: false,
},
{
name: "user-provided",
selfSignedCert: false,
tlsCert: []byte(fakeTLSCert),
tlsKey: []byte(fakeTLSKey),
caCert: []byte(fakeCACert),
wantErr: false,
wantCertKey: true,
wantGeneratedCert: false,
wantCACert: []byte(fakeCACert),
testRotate: false,
},
{
name: "user-provided-missing-tls-crt",
selfSignedCert: false,
tlsCert: nil,
tlsKey: []byte(fakeTLSKey),
caCert: []byte(fakeCACert),
wantErr: true,
testRotate: false,
},
{
name: "user-provided-missing-tls-key",
selfSignedCert: false,
tlsCert: []byte(fakeTLSCert),
tlsKey: nil,
caCert: []byte(fakeCACert),
wantErr: true,
testRotate: false,
},
{
name: "user-provided-missing-ca-crt",
selfSignedCert: false,
tlsCert: []byte(fakeTLSCert),
tlsKey: []byte(fakeTLSKey),
caCert: nil,
wantErr: true,
testRotate: false,
},
{
name: "self-signed-rotate",
selfSignedCert: true,
tlsCert: nil,
tlsKey: nil,
caCert: nil,
wantErr: false,
wantCertKey: false,
wantGeneratedCert: true,
wantCACert: nil,
testRotate: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var err error
certDir, err = ioutil.TempDir("", "antrea-tls-test")
if err != nil {
t.Fatalf("Unable to create temporary directory: %v", err)
}
defer os.RemoveAll(certDir)
selfSignedCertDir, err = ioutil.TempDir("", "antrea-self-signed")
if err != nil {
t.Fatalf("Unable to create temporary directory: %v", err)
}
defer os.RemoveAll(selfSignedCertDir)
certReadyTimeout = 100 * time.Millisecond
secureServing := genericoptions.NewSecureServingOptions().WithLoopback()
if tt.tlsCert != nil {
certutil.WriteCert(path.Join(certDir, TLSCertFile), tt.tlsCert)
}
if tt.tlsKey != nil {
keyutil.WriteKey(path.Join(certDir, TLSKeyFile), tt.tlsKey)
}
if tt.caCert != nil {
certutil.WriteCert(path.Join(certDir, CACertFile), tt.caCert)
}
if tt.testRotate {
maxRotateDuration = 2 * time.Second
}
clientset := fakeclientset.NewSimpleClientset()
aggregatorClientset := fakeaggregatorclientset.NewSimpleClientset()
got, err := ApplyServerCert(tt.selfSignedCert, clientset, aggregatorClientset, secureServing)
if err != nil || tt.wantErr {
if (err != nil) != tt.wantErr {
t.Errorf("ApplyServerCert() error = %v, wantErr %v", err, tt.wantErr)
}
return
}
if tt.selfSignedCert && tt.testRotate {
oldCertKeyContent := got.getCertificate()
err := wait.Poll(time.Second, 8*time.Second, func() (bool, error) {
newCertKeyContent := got.getCertificate()
equal := bytes.Equal(oldCertKeyContent, newCertKeyContent)
return !equal, nil
})
assert.Nil(t, err, "CA cert not updated")
}
if tt.wantCertKey {
assert.Equal(t, genericoptions.CertKey{CertFile: certDir + "/tls.crt", KeyFile: certDir + "/tls.key"}, secureServing.ServerCert.CertKey, "CertKey doesn't match")
}
if tt.wantGeneratedCert {
assert.Equal(t, genericoptions.CertKey{CertFile: selfSignedCertDir + "/antrea-controller.crt", KeyFile: selfSignedCertDir + "/antrea-controller.key"}, secureServing.ServerCert.CertKey, "SelfSigned certs not generated")
} else {
assert.NotEqual(t, genericoptions.CertKey{CertFile: selfSignedCertDir + "/antrea-controller.crt", KeyFile: selfSignedCertDir + "/antrea-controller.key"}, secureServing.ServerCert.CertKey, "SelfSigned certs generated erroneously")
}
if tt.wantCACert != nil {
assert.Equal(t, tt.wantCACert, got.caContentProvider.CurrentCABundleContent(), "CA cert doesn't match")
} else {
assert.NotEmpty(t, got.caContentProvider.CurrentCABundleContent(), "CA cert is empty")
}
})
}
}
| 1 | 33,415 | Remove this line by accident? | antrea-io-antrea | go |
@@ -16,12 +16,13 @@
package com.palantir.baseline.plugins;
+import java.util.Collections;
import java.util.Objects;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import org.gradle.api.artifacts.Dependency;
-import org.gradle.api.plugins.JavaPluginConvention;
import org.gradle.api.tasks.SourceSet;
+import org.gradle.api.tasks.SourceSetContainer;
import org.gradle.api.tasks.compile.JavaCompile;
public final class BaselineImmutables implements Plugin<Project> { | 1 | /*
* (c) Copyright 2021 Palantir Technologies Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.palantir.baseline.plugins;
import java.util.Objects;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import org.gradle.api.artifacts.Dependency;
import org.gradle.api.plugins.JavaPluginConvention;
import org.gradle.api.tasks.SourceSet;
import org.gradle.api.tasks.compile.JavaCompile;
public final class BaselineImmutables implements Plugin<Project> {
@Override
public void apply(Project project) {
project.getPluginManager().withPlugin("java", unused -> {
project.afterEvaluate(proj -> {
proj.getConvention().getPlugin(JavaPluginConvention.class).getSourceSets().stream()
.filter(sourceSet -> hasImmutablesProcessor(project, sourceSet))
.forEach(sourceSet -> addImmutablesIncrementalCompilerArg(project, sourceSet));
});
});
}
private static boolean hasImmutablesProcessor(Project project, SourceSet sourceSet) {
return project
.getConfigurations()
.getByName(sourceSet.getAnnotationProcessorConfigurationName())
.getDependencies()
.stream()
.anyMatch(BaselineImmutables::isImmutablesValue);
}
private static boolean isImmutablesValue(Dependency dep) {
return Objects.equals(dep.getGroup(), "org.immutables") && Objects.equals(dep.getName(), "value");
}
private static void addImmutablesIncrementalCompilerArg(Project project, SourceSet sourceSet) {
project.getTasks()
.named(sourceSet.getCompileJavaTaskName(), JavaCompile.class)
.get()
.getOptions()
.getCompilerArgs()
.add("-Aimmutables.gradle.incremental");
}
}
| 1 | 9,063 | Calling `.stream()` on a `DomainObjectCollection` is pretty much always a bug, as it doesn't include objects added later (and encourages people to use afterEvaluate). I wonder if we should make this an error prone check? | palantir-gradle-baseline | java |
@@ -201,7 +201,10 @@ func CreateGitIgnore(targetDir string, ignores ...string) error {
if fileutil.FileExists(gitIgnoreFilePath) {
sigFound, err := fileutil.FgrepStringInFile(gitIgnoreFilePath, DdevFileSignature)
- util.CheckErr(err)
+ if err != nil {
+ return err
+ }
+
// If we sigFound the file and did not find the signature in .ddev/.gitignore, warn about it.
if !sigFound {
util.Warning("User-managed %s will not be managed/overwritten by ddev", gitIgnoreFilePath) | 1 | package ddevapp
import (
"fmt"
"path"
"path/filepath"
"strings"
"github.com/fatih/color"
"github.com/fsouza/go-dockerclient"
"github.com/gosuri/uitable"
"errors"
"os"
"text/template"
"github.com/Masterminds/sprig"
"github.com/drud/ddev/pkg/dockerutil"
"github.com/drud/ddev/pkg/fileutil"
"github.com/drud/ddev/pkg/output"
"github.com/drud/ddev/pkg/util"
gohomedir "github.com/mitchellh/go-homedir"
)
// GetApps returns an array of ddev applications.
func GetApps() []*DdevApp {
apps := make([]*DdevApp, 0)
labels := map[string]string{
"com.ddev.platform": "ddev",
"com.docker.compose.service": "web",
}
containers, err := dockerutil.FindContainersByLabels(labels)
if err == nil {
for _, siteContainer := range containers {
app := &DdevApp{}
approot, ok := siteContainer.Labels["com.ddev.approot"]
if !ok {
break
}
err = app.Init(approot)
// Artificially populate sitename and apptype based on labels
// if app.Init() failed.
if err != nil {
app.Name = siteContainer.Labels["com.ddev.site-name"]
app.Type = siteContainer.Labels["com.ddev.app-type"]
app.AppRoot = siteContainer.Labels["com.ddev.approot"]
}
apps = append(apps, app)
}
}
return apps
}
// CreateAppTable will create a new app table for describe and list output
func CreateAppTable() *uitable.Table {
table := uitable.New()
table.MaxColWidth = 140
table.Separator = " "
table.Wrap = true
table.AddRow("NAME", "TYPE", "LOCATION", "URL(s)", "STATUS")
return table
}
// RenderHomeRootedDir shortens a directory name to replace homedir with ~
func RenderHomeRootedDir(path string) string {
userDir, err := gohomedir.Dir()
util.CheckErr(err)
result := strings.Replace(path, userDir, "~", 1)
result = strings.Replace(result, "\\", "/", -1)
return result
}
// RenderAppRow will add an application row to an existing table for describe and list output.
func RenderAppRow(table *uitable.Table, row map[string]interface{}) {
status := fmt.Sprint(row["status"])
switch {
case strings.Contains(status, SiteStopped):
status = color.YellowString(status)
case strings.Contains(status, SiteNotFound):
status = color.RedString(status)
case strings.Contains(status, SiteDirMissing):
status = color.RedString(status)
case strings.Contains(status, SiteConfigMissing):
status = color.RedString(status)
default:
status = color.CyanString(status)
}
urls := row["httpurl"].(string)
if row["httpsurl"] != "" {
urls = urls + "\n" + row["httpsurl"].(string)
}
table.AddRow(
row["name"],
row["type"],
row["shortroot"],
urls,
status,
)
}
// Cleanup will remove ddev containers and volumes even if docker-compose.yml
// has been deleted.
func Cleanup(app *DdevApp) error {
client := dockerutil.GetDockerClient()
// Find all containers which match the current site name.
labels := map[string]string{
"com.ddev.site-name": app.GetName(),
}
containers, err := dockerutil.FindContainersByLabels(labels)
if err != nil {
return err
}
// First, try stopping the listed containers if they are running.
for i := range containers {
containerName := containers[i].Names[0][1:len(containers[i].Names[0])]
removeOpts := docker.RemoveContainerOptions{
ID: containers[i].ID,
RemoveVolumes: true,
Force: true,
}
output.UserOut.Printf("Removing container: %s", containerName)
if err = client.RemoveContainer(removeOpts); err != nil {
return fmt.Errorf("could not remove container %s: %v", containerName, err)
}
}
err = StopRouterIfNoContainers()
return err
}
// CheckForConf checks for a config.yaml at the cwd or parent dirs.
func CheckForConf(confPath string) (string, error) {
if fileutil.FileExists(confPath + "/.ddev/config.yaml") {
return confPath, nil
}
pathList := strings.Split(confPath, "/")
for range pathList {
confPath = filepath.Dir(confPath)
if fileutil.FileExists(confPath + "/.ddev/config.yaml") {
return confPath, nil
}
}
return "", errors.New("no .ddev/config.yaml file was found in this directory or any parent")
}
// ddevContainersRunning determines if any ddev-controlled containers are currently running.
func ddevContainersRunning() (bool, error) {
containers, err := dockerutil.GetDockerContainers(false)
if err != nil {
return false, err
}
for _, container := range containers {
if _, ok := container.Labels["com.ddev.platform"]; ok {
return true, nil
}
}
return false, nil
}
// getTemplateFuncMap will return a map of useful template functions.
func getTemplateFuncMap() map[string]interface{} {
// Use sprig's template function map as a base
m := sprig.FuncMap()
// Add helpful utilities on top of it
m["joinPath"] = path.Join
return m
}
// gitIgnoreTemplate will write a .gitignore file.
// This template expects string slice to be provided, with each string corresponding to
// a line in the resulting .gitignore.
const gitIgnoreTemplate = `{{.Signature}}: Automatically generated ddev .gitignore.
{{range .IgnoredItems}}
/{{.}}{{end}}
`
type ignoreTemplateContents struct {
Signature string
IgnoredItems []string
}
// CreateGitIgnore will create a .gitignore file in the target directory if one does not exist.
// Each value in ignores will be added as a new line to the .gitignore.
func CreateGitIgnore(targetDir string, ignores ...string) error {
gitIgnoreFilePath := filepath.Join(targetDir, ".gitignore")
if fileutil.FileExists(gitIgnoreFilePath) {
sigFound, err := fileutil.FgrepStringInFile(gitIgnoreFilePath, DdevFileSignature)
util.CheckErr(err)
// If we sigFound the file and did not find the signature in .ddev/.gitignore, warn about it.
if !sigFound {
util.Warning("User-managed %s will not be managed/overwritten by ddev", gitIgnoreFilePath)
return nil
}
// Otherwise, remove the existing file to prevent surprising template results
err = os.Remove(gitIgnoreFilePath)
if err != nil {
return err
}
}
tmpl, err := template.New("gitignore").Funcs(getTemplateFuncMap()).Parse(gitIgnoreTemplate)
if err != nil {
return err
}
file, err := os.OpenFile(gitIgnoreFilePath, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return err
}
defer util.CheckClose(file)
parms := ignoreTemplateContents{
Signature: DdevFileSignature,
IgnoredItems: ignores,
}
if err = tmpl.Execute(file, parms); err != nil {
return err
}
return nil
}
// isTar determines whether the object at the filepath is a .tar archive.
func isTar(filepath string) bool {
if strings.HasSuffix(filepath, ".tar") {
return true
}
if strings.HasSuffix(filepath, ".tar.gz") {
return true
}
if strings.HasSuffix(filepath, ".tgz") {
return true
}
return false
}
// isZip determines if the object at hte filepath is a .zip.
func isZip(filepath string) bool {
if strings.HasSuffix(filepath, ".zip") {
return true
}
return false
}
| 1 | 13,134 | Thanks for paying attention to other places this might happen. This one is particularly important; I probably never should have gotten in the habit of CheckErr(), since it does a log.Panic() explicitly, which looks like something else until you look closely. It's supposed to be used places where "can't happen" but Things Can Happen. | drud-ddev | php |
@@ -275,6 +275,15 @@ func (b *ofFlowBuilder) MatchARPOp(op uint16) FlowBuilder {
return b
}
+// MatchIPDscp adds match condition for matching DSCP field in the IP header. Note, OVS use TOS to present DSCP, and
+// the field name is shown as "nw_tos" with OVS command line, and the value is calculated by shifting the given value
+// left 2 bits.
+func (b *ofFlowBuilder) MatchIPDscp(dscp uint8) FlowBuilder {
+ b.matchers = append(b.matchers, fmt.Sprintf("nw_tos=%d", dscp<<2))
+ b.Match.IpDscp = dscp
+ return b
+}
+
// MatchConjID adds match condition for matching conj_id.
func (b *ofFlowBuilder) MatchConjID(value uint32) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("conj_id=%d", value)) | 1 | package openflow
import (
"fmt"
"net"
"strings"
"github.com/contiv/libOpenflow/openflow13"
"github.com/contiv/ofnet/ofctrl"
)
type ofFlowBuilder struct {
ofFlow
}
func (b *ofFlowBuilder) MatchTunMetadata(index int, data uint32) FlowBuilder {
rng := openflow13.NewNXRange(0, 31)
tm := &ofctrl.NXTunMetadata{
ID: index,
Data: data,
Range: rng,
}
b.ofFlow.Match.TunMetadatas = append(b.ofFlow.Match.TunMetadatas, tm)
return b
}
func (b *ofFlowBuilder) SetHardTimeout(timout uint16) FlowBuilder {
b.ofFlow.HardTimeout = timout
return b
}
func (b *ofFlowBuilder) SetIdleTimeout(timeout uint16) FlowBuilder {
b.ofFlow.IdleTimeout = timeout
return b
}
func (b *ofFlowBuilder) Done() Flow {
if b.ctStates != nil {
b.Flow.Match.CtStates = b.ctStates
b.ctStates = nil
}
if b.ctStateString != "" {
b.matchers = append(b.matchers, b.ctStateString)
b.ctStateString = ""
}
return &b.ofFlow
}
// MatchReg adds match condition for matching data in the target register.
func (b *ofFlowBuilder) MatchReg(regID int, data uint32) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("reg%d=0x%x", regID, data))
reg := &ofctrl.NXRegister{
ID: regID,
Data: data,
}
b.Match.NxRegs = append(b.Match.NxRegs, reg)
return b
}
// MatchRegRange adds match condition for matching data in the target register at specified range.
func (b *ofFlowBuilder) MatchRegRange(regID int, data uint32, rng Range) FlowBuilder {
if rng[0] > 0 {
data <<= rng[0]
}
reg := &ofctrl.NXRegister{
ID: regID,
Data: data,
Range: rng.ToNXRange(),
}
b.Match.NxRegs = append(b.Match.NxRegs, reg)
return b
}
func (b *ofFlowBuilder) addCTStateString(value string) {
if b.ctStateString == "" {
b.ctStateString = fmt.Sprintf("ct_state=%s", value)
} else {
b.ctStateString += value
}
}
func (b *ofFlowBuilder) MatchCTStateNew(set bool) FlowBuilder {
if b.ctStates == nil {
b.ctStates = openflow13.NewCTStates()
}
if set {
b.ctStates.SetNew()
b.addCTStateString("+new")
} else {
b.ctStates.UnsetNew()
b.addCTStateString("-trk")
}
return b
}
func (b *ofFlowBuilder) MatchCTStateRel(set bool) FlowBuilder {
if b.ctStates == nil {
b.ctStates = openflow13.NewCTStates()
}
if set {
b.ctStates.SetRel()
b.addCTStateString("+rel")
} else {
b.ctStates.UnsetRel()
b.addCTStateString("-rel")
}
return b
}
func (b *ofFlowBuilder) MatchCTStateRpl(set bool) FlowBuilder {
if b.ctStates == nil {
b.ctStates = openflow13.NewCTStates()
}
if set {
b.ctStates.SetRpl()
b.addCTStateString("+rpl")
} else {
b.ctStates.UnsetRpl()
b.addCTStateString("-rpl")
}
return b
}
func (b *ofFlowBuilder) MatchCTStateEst(set bool) FlowBuilder {
if b.ctStates == nil {
b.ctStates = openflow13.NewCTStates()
}
if set {
b.ctStates.SetEst()
b.addCTStateString("+est")
} else {
b.ctStates.UnsetEst()
b.addCTStateString("-est")
}
return b
}
func (b *ofFlowBuilder) MatchCTStateTrk(set bool) FlowBuilder {
if b.ctStates == nil {
b.ctStates = openflow13.NewCTStates()
}
if set {
b.ctStates.SetTrk()
b.addCTStateString("+trk")
} else {
b.ctStates.UnsetTrk()
b.addCTStateString("-trk")
}
return b
}
func (b *ofFlowBuilder) MatchCTStateInv(set bool) FlowBuilder {
if b.ctStates == nil {
b.ctStates = openflow13.NewCTStates()
}
if set {
b.ctStates.SetInv()
b.addCTStateString("+inv")
} else {
b.ctStates.UnsetInv()
b.addCTStateString("-inv")
}
return b
}
// MatchCTMark adds match condition for matching ct_mark.
func (b *ofFlowBuilder) MatchCTMark(value uint32) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("ct_mark=%d", value))
b.ofFlow.Match.CtMark = value
return b
}
// MatchCTMarkMask sets the mask of ct_mark. The mask is used only if ct_mark is set.
func (b *ofFlowBuilder) MatchCTMarkMask(mask uint32) FlowBuilder {
if b.Flow.Match.CtMark > 0 {
b.ofFlow.Match.CtMarkMask = &mask
for i, data := range b.matchers {
if strings.HasPrefix(data, "ct_mark=") {
b.matchers[i] = fmt.Sprintf("%s/0x%x", data, mask)
break
}
}
}
return b
}
// MatchInPort adds match condition for matching in_port.
func (b *ofFlowBuilder) MatchInPort(inPort uint32) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("in_port=%d", inPort))
b.Match.InputPort = inPort
return b
}
// MatchDstIP adds match condition for matching destination IP address.
func (b *ofFlowBuilder) MatchDstIP(ip net.IP) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("nw_dst=%s", ip.String()))
b.Match.IpDa = &ip
return b
}
// MatchDstIPNet adds match condition for matching destination IP CIDR.
func (b *ofFlowBuilder) MatchDstIPNet(ipnet net.IPNet) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("nw_dst=%s", ipnet.String()))
b.Match.IpDa = &ipnet.IP
b.Match.IpDaMask = maskToIPv4(ipnet.Mask)
return b
}
func maskToIPv4(mask net.IPMask) *net.IP {
ip := net.IPv4(mask[0], mask[1], mask[2], mask[3])
return &ip
}
// MatchSrcIP adds match condition for matching source IP address.
func (b *ofFlowBuilder) MatchSrcIP(ip net.IP) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("nw_src=%s", ip.String()))
b.Match.IpSa = &ip
return b
}
// MatchSrcIPNet adds match condition for matching source IP CIDR.
func (b *ofFlowBuilder) MatchSrcIPNet(ipnet net.IPNet) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("nw_src=%s", ipnet.String()))
b.Match.IpSa = &ipnet.IP
b.Match.IpSaMask = maskToIPv4(ipnet.Mask)
return b
}
// MatchDstMAC adds match condition for matching destination MAC address.
func (b *ofFlowBuilder) MatchDstMAC(mac net.HardwareAddr) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("dl_dst=%s", mac.String()))
b.Match.MacDa = &mac
return b
}
// MatchSrcMAC adds match condition for matching source MAC address.
func (b *ofFlowBuilder) MatchSrcMAC(mac net.HardwareAddr) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("dl_src=%s", mac.String()))
b.Match.MacSa = &mac
return b
}
// MatchARPSha adds match condition for matching ARP source host address.
func (b *ofFlowBuilder) MatchARPSha(mac net.HardwareAddr) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("arp_sha=%s", mac.String()))
b.Match.ArpSha = &mac
return b
}
// MatchARPTha adds match condition for matching ARP target host address.
func (b *ofFlowBuilder) MatchARPTha(mac net.HardwareAddr) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("arp_tha=%s", mac.String()))
b.Match.ArpTha = &mac
return b
}
// MatchARPSpa adds match condition for matching ARP source protocol address.
func (b *ofFlowBuilder) MatchARPSpa(ip net.IP) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("arp_spa=%s", ip.String()))
b.Match.ArpSpa = &ip
return b
}
// MatchARPTpa adds match condition for matching ARP target protocol address.
func (b *ofFlowBuilder) MatchARPTpa(ip net.IP) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("arp_tpa=%s", ip.String()))
b.Match.ArpTpa = &ip
return b
}
// MatchARPOp adds match condition for matching ARP operator.
func (b *ofFlowBuilder) MatchARPOp(op uint16) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("arp_op=%d", op))
b.Match.ArpOper = op
return b
}
// MatchConjID adds match condition for matching conj_id.
func (b *ofFlowBuilder) MatchConjID(value uint32) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("conj_id=%d", value))
b.Match.ConjunctionID = &value
return b
}
func (b *ofFlowBuilder) MatchPriority(priority uint16) FlowBuilder {
b.Match.Priority = priority
return b
}
// MatchProtocol adds match condition for matching protocol type.
func (b *ofFlowBuilder) MatchProtocol(protocol Protocol) FlowBuilder {
switch protocol {
case ProtocolIP:
b.Match.Ethertype = 0x0800
case ProtocolARP:
b.Match.Ethertype = 0x0806
case ProtocolTCP:
b.Match.Ethertype = 0x0800
b.Match.IpProto = 6
case ProtocolUDP:
b.Match.Ethertype = 0x0800
b.Match.IpProto = 17
case ProtocolSCTP:
b.Match.Ethertype = 0x0800
b.Match.IpProto = 132
case ProtocolICMP:
b.Match.Ethertype = 0x0800
b.Match.IpProto = 1
}
b.protocol = protocol
return b
}
// MatchTCPDstPort adds match condition for matching TCP destination port.
func (b *ofFlowBuilder) MatchTCPDstPort(port uint16) FlowBuilder {
b.MatchProtocol(ProtocolTCP)
b.Match.TcpDstPort = port
// According to ovs-ofctl(8) man page, "tp_dst" is deprecated and "tcp_dst",
// "udp_dst", "sctp_dst" should be used for the destination port of TCP, UDP,
// SCTP respectively. However, OVS command line tools like ovs-ofctl and
// ovs-appctl still print flows with "tp_dst", so we also use "tp_dst" in flow
// matching string, as flow matching string can be used to look up matched
// flows from these tools' outputs.
b.matchers = append(b.matchers, fmt.Sprintf("tp_dst=%d", port))
return b
}
// MatchUDPDstPort adds match condition for matching UDP destination port.
func (b *ofFlowBuilder) MatchUDPDstPort(port uint16) FlowBuilder {
b.MatchProtocol(ProtocolUDP)
b.Match.UdpDstPort = port
b.matchers = append(b.matchers, fmt.Sprintf("tp_dst=%d", port))
return b
}
// MatchSCTPDstPort adds match condition for matching SCTP destination port.
func (b *ofFlowBuilder) MatchSCTPDstPort(port uint16) FlowBuilder {
b.MatchProtocol(ProtocolSCTP)
b.Match.SctpDstPort = port
b.matchers = append(b.matchers, fmt.Sprintf("tp_dst=%d", port))
return b
}
// MatchCTSrcIP matches the source IPv4 address of the connection tracker original direction tuple. This match requires
// a match to valid connection tracking state as a prerequisite, and valid connection tracking state matches include
// "+new", "+est", "+rel" and "+trk-inv".
func (b *ofFlowBuilder) MatchCTSrcIP(ip net.IP) FlowBuilder {
b.Match.CtIpSa = &ip
b.matchers = append(b.matchers, fmt.Sprintf("ct_nw_src=%s", ip.String()))
return b
}
// MatchCTSrcIPNet is the same as MatchCTSrcIP but supports IP masking.
func (b *ofFlowBuilder) MatchCTSrcIPNet(ipNet net.IPNet) FlowBuilder {
b.matchers = append(b.matchers, fmt.Sprintf("nw_dst=%s", ipNet.String()))
b.Match.CtIpSa = &ipNet.IP
b.Match.CtIpSaMask = maskToIPv4(ipNet.Mask)
return b
}
// MatchCTDstIP matches the destination IPv4 address of the connection tracker original direction tuple. This match
// requires a match to valid connection tracking state as a prerequisite, and valid connection tracking state matches
// include "+new", "+est", "+rel" and "+trk-inv".
func (b *ofFlowBuilder) MatchCTDstIP(ip net.IP) FlowBuilder {
b.Match.CtIpDa = &ip
b.matchers = append(b.matchers, fmt.Sprintf("ct_nw_dst=%s", ip.String()))
return b
}
// MatchCTDstIPNet is the same as MatchCTDstIP but supports IP masking.
func (b *ofFlowBuilder) MatchCTDstIPNet(ipNet net.IPNet) FlowBuilder {
b.Match.CtIpDa = &ipNet.IP
b.Match.CtIpDaMask = maskToIPv4(ipNet.Mask)
b.matchers = append(b.matchers, fmt.Sprintf("ct_nw_dst=%s", ipNet.String()))
return b
}
// MatchCTSrcPort matches the transport source port of the connection tracker original direction tuple. This match requires
// a match to valid connection tracking state as a prerequisite, and valid connection tracking state matches include
// "+new", "+est", "+rel" and "+trk-inv".
func (b *ofFlowBuilder) MatchCTSrcPort(port uint16) FlowBuilder {
b.Match.CtTpSrcPort = port
b.matchers = append(b.matchers, fmt.Sprintf("ct_tp_src=%d", port))
return b
}
// MatchCTDstPort matches the transport destination port of the connection tracker original direction tuple. This match
// requires a match to valid connection tracking state as a prerequisite, and valid connection tracking state matches
// include "+new", "+est", "+rel" and "+trk-inv".
func (b *ofFlowBuilder) MatchCTDstPort(port uint16) FlowBuilder {
b.Match.CtTpDstPort = port
b.matchers = append(b.matchers, fmt.Sprintf("ct_tp_dst=%d", port))
return b
}
// MatchCTProtocol matches the IP protocol type of the connection tracker original direction tuple. This match requires
// a match to valid connection tracking state as a prerequisite, and a valid connection tracking state matches include
// "+new", "+est", "+rel" and "+trk-inv".
func (b *ofFlowBuilder) MatchCTProtocol(proto Protocol) FlowBuilder {
switch proto {
case ProtocolTCP:
b.Match.CtIpProto = 6
case ProtocolUDP:
b.Match.CtIpProto = 17
case ProtocolSCTP:
b.Match.CtIpProto = 132
case ProtocolICMP:
b.Match.CtIpProto = 1
}
b.matchers = append(b.matchers, fmt.Sprintf("ct_nw_proto=%d", b.Match.CtIpProto))
return b
}
// Cookie sets cookie ID for the flow entry.
func (b *ofFlowBuilder) Cookie(cookieID uint64) FlowBuilder {
b.Flow.CookieID = cookieID
return b
}
// CookieMask sets cookie mask for the flow entry.
func (b *ofFlowBuilder) CookieMask(cookieMask uint64) FlowBuilder {
b.Flow.CookieMask = cookieMask
return b
}
func (b *ofFlowBuilder) Action() Action {
return &ofFlowAction{b}
}
| 1 | 21,861 | What is the different between nw_tos and ip_dscp? Only high 6 bits vs low 6 bits and supported version? | antrea-io-antrea | go |
@@ -329,12 +329,17 @@ extern "C" bool isValidMolBlob(char *data, int len) {
return res;
}
-extern "C" char *makeMolText(CROMol data, int *len, bool asSmarts) {
+extern "C" char *makeMolText(CROMol data, int *len, bool asSmarts,
+ bool cxSmiles) {
ROMol *mol = (ROMol *)data;
try {
if (!asSmarts) {
- StringData = MolToSmiles(*mol, true);
+ if (!cxSmiles) {
+ StringData = MolToSmiles(*mol);
+ } else {
+ StringData = MolToCXSmiles(*mol);
+ }
} else {
StringData = MolToSmarts(*mol, false);
} | 1 | //
// Copyright (c) 2010-2013, Novartis Institutes for BioMedical Research Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Novartis Institutes for BioMedical Research Inc.
// nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <GraphMol/RDKitBase.h>
#include <GraphMol/MolPickler.h>
#include <GraphMol/ChemReactions/ReactionPickler.h>
#include <GraphMol/ChemReactions/ReactionParser.h>
#include <GraphMol/ChemReactions/Reaction.h>
#include <GraphMol/SmilesParse/SmilesParse.h>
#include <GraphMol/SmilesParse/SmartsWrite.h>
#include <GraphMol/SmilesParse/SmilesWrite.h>
#include <GraphMol/Fingerprints/Fingerprints.h>
#include <GraphMol/FileParsers/FileParsers.h>
#include <GraphMol/Depictor/RDDepictor.h>
#include <GraphMol/Fingerprints/AtomPairs.h>
#include <GraphMol/Fingerprints/MorganFingerprints.h>
#include <GraphMol/Fingerprints/MACCS.h>
#include <GraphMol/Substruct/SubstructMatch.h>
#include <GraphMol/Descriptors/MolDescriptors.h>
#include <GraphMol/ChemTransforms/ChemTransforms.h>
#include <GraphMol/MolHash/MolHash.h>
#include <GraphMol/FMCS/FMCS.h>
#include <DataStructs/BitOps.h>
#include <DataStructs/SparseIntVect.h>
#include <GraphMol/MolDraw2D/MolDraw2D.h>
#include <GraphMol/MolDraw2D/MolDraw2DSVG.h>
#include <GraphMol/MolDraw2D/MolDraw2DUtils.h>
#include <RDGeneral/BoostStartInclude.h>
#include <boost/integer_traits.hpp>
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/json_parser.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/tokenizer.hpp>
#include <RDGeneral/BoostEndInclude.h>
#ifdef RDK_BUILD_INCHI_SUPPORT
#include <INCHI-API/inchi.h>
#endif
#ifdef RDK_BUILD_AVALON_SUPPORT
#include <AvalonTools/AvalonTools.h>
#endif
#include <GraphMol/ChemReactions/ReactionFingerprints.h>
#include <GraphMol/ChemReactions/ReactionUtils.h>
#include "rdkit.h"
#include "guc.h"
#include "bitstring.h"
using namespace std;
using namespace RDKit;
class ByteA : public std::string {
public:
ByteA() : string(){};
ByteA(bytea *b) : string(VARDATA(b), VARSIZE(b) - VARHDRSZ){};
ByteA(string &s) : string(s){};
/*
* Convert string to bytea. Convertaion is in pgsql's memory
*/
bytea *toByteA() {
bytea *res;
int len;
len = this->size();
res = (bytea *)palloc(VARHDRSZ + len);
memcpy(VARDATA(res), this->data(), len);
SET_VARSIZE(res, VARHDRSZ + len);
return res;
};
/* Just the copy of string's method */
ByteA &operator=(const string &__str) {
return (ByteA &)this->assign(__str);
};
};
/*
* Constant io
*/
static string StringData;
/*
* Real sparse vector
*/
typedef SparseIntVect<std::uint32_t> SparseFP;
/*******************************************
* ROMol transformation *
*******************************************/
extern "C" void freeCROMol(CROMol data) {
ROMol *mol = (ROMol *)data;
delete mol;
}
extern "C" CROMol constructROMol(Mol *data) {
auto *mol = new ROMol();
try {
ByteA b(data);
MolPickler::molFromPickle(b, mol);
} catch (MolPicklerException &e) {
elog(ERROR, "molFromPickle: %s", e.message());
} catch (...) {
elog(ERROR, "constructROMol: Unknown exception");
}
return (CROMol)mol;
}
extern "C" Mol *deconstructROMol(CROMol data) {
ROMol *mol = (ROMol *)data;
ByteA b;
try {
MolPickler::pickleMol(mol, b);
} catch (MolPicklerException &e) {
elog(ERROR, "pickleMol: %s", e.message());
} catch (...) {
elog(ERROR, "deconstructROMol: Unknown exception");
}
return (Mol *)b.toByteA();
}
extern "C" CROMol parseMolText(char *data, bool asSmarts, bool warnOnFail,
bool asQuery) {
RWMol *mol = nullptr;
try {
if (!asSmarts) {
if (!asQuery) {
mol = SmilesToMol(data);
} else {
mol = SmilesToMol(data, 0, false);
MolOps::sanitizeMol(*mol);
MolOps::mergeQueryHs(*mol);
}
} else {
mol = SmartsToMol(data, 0, false);
}
} catch (...) {
mol = nullptr;
}
if (mol == nullptr) {
if (warnOnFail) {
ereport(WARNING,
(errcode(ERRCODE_WARNING),
errmsg("could not create molecule from SMILES '%s'", data)));
} else {
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
errmsg("could not create molecule from SMILES '%s'", data)));
}
}
return (CROMol)mol;
}
extern "C" CROMol parseMolBlob(char *data, int len) {
ROMol *mol = nullptr;
try {
string binStr(data, len);
mol = new ROMol(binStr);
} catch (...) {
ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION),
errmsg("problem generating molecule from blob data")));
}
if (mol == nullptr) {
ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION),
errmsg("blob data could not be parsed")));
}
return (CROMol)mol;
}
extern "C" CROMol parseMolCTAB(char *data, bool keepConformer, bool warnOnFail,
bool asQuery) {
RWMol *mol = nullptr;
try {
if (!asQuery) {
mol = MolBlockToMol(data);
} else {
mol = MolBlockToMol(data, true, false);
MolOps::mergeQueryHs(*mol);
}
} catch (...) {
mol = nullptr;
}
if (mol == nullptr) {
if (warnOnFail) {
ereport(WARNING,
(errcode(ERRCODE_WARNING),
errmsg("could not create molecule from CTAB '%s'", data)));
} else {
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
errmsg("could not create molecule from CTAB '%s'", data)));
}
} else {
if (!keepConformer) mol->clearConformers();
}
return (CROMol)mol;
}
extern "C" bool isValidSmiles(char *data) {
RWMol *mol = nullptr;
bool res;
try {
string str(data);
if (str.empty()) {
// Pass the test - No-Structure input is allowed. No cleanup necessary.
return true;
}
mol = SmilesToMol(str, 0, 0);
if (mol) {
MolOps::cleanUp(*mol);
mol->updatePropertyCache();
MolOps::Kekulize(*mol);
MolOps::assignRadicals(*mol);
MolOps::setAromaticity(*mol);
MolOps::adjustHs(*mol);
}
} catch (...) {
mol = nullptr;
}
if (mol == nullptr) {
res = false;
} else {
res = true;
delete mol;
}
return res;
}
extern "C" bool isValidSmarts(char *data) {
ROMol *mol = nullptr;
bool res;
try {
mol = SmartsToMol(data);
} catch (...) {
mol = nullptr;
}
if (mol == nullptr) {
res = false;
} else {
res = true;
delete mol;
}
return res;
}
extern "C" bool isValidCTAB(char *data) {
RWMol *mol = nullptr;
bool res;
try {
mol = MolBlockToMol(data, false, false);
if (mol) {
MolOps::cleanUp(*mol);
mol->updatePropertyCache();
MolOps::Kekulize(*mol);
MolOps::assignRadicals(*mol);
MolOps::setAromaticity(*mol);
MolOps::adjustHs(*mol);
}
} catch (...) {
mol = nullptr;
}
if (mol == nullptr) {
res = false;
} else {
res = true;
delete mol;
}
return res;
}
extern "C" bool isValidMolBlob(char *data, int len) {
ROMol *mol = nullptr;
bool res = false;
try {
string binStr(data, len);
mol = new ROMol(binStr);
} catch (...) {
mol = nullptr;
}
if (mol == nullptr) {
res = false;
} else {
delete mol;
res = true;
}
return res;
}
extern "C" char *makeMolText(CROMol data, int *len, bool asSmarts) {
ROMol *mol = (ROMol *)data;
try {
if (!asSmarts) {
StringData = MolToSmiles(*mol, true);
} else {
StringData = MolToSmarts(*mol, false);
}
} catch (...) {
ereport(
WARNING,
(errcode(ERRCODE_WARNING),
errmsg("makeMolText: problems converting molecule to SMILES/SMARTS")));
StringData = "";
}
*len = StringData.size();
return (char *)StringData.c_str();
}
extern "C" char *makeCtabText(CROMol data, int *len,
bool createDepictionIfMissing) {
ROMol *mol = (ROMol *)data;
try {
if (createDepictionIfMissing && mol->getNumConformers() == 0) {
RDDepict::compute2DCoords(*mol);
}
StringData = MolToMolBlock(*mol);
} catch (...) {
ereport(WARNING,
(errcode(ERRCODE_WARNING),
errmsg("makeCtabText: problems converting molecule to CTAB")));
StringData = "";
}
*len = StringData.size();
return (char *)StringData.c_str();
}
extern "C" char *makeMolBlob(CROMol data, int *len) {
ROMol *mol = (ROMol *)data;
StringData.clear();
try {
MolPickler::pickleMol(*mol, StringData);
} catch (...) {
elog(ERROR, "makeMolBlob: Unknown exception");
}
*len = StringData.size();
return (char *)StringData.data();
}
extern "C" bytea *makeMolSignature(CROMol data) {
ROMol *mol = (ROMol *)data;
ExplicitBitVect *res = nullptr;
bytea *ret = nullptr;
try {
res = RDKit::PatternFingerprintMol(*mol, getSubstructFpSize());
// res =
// RDKit::LayeredFingerprintMol(*mol,RDKit::substructLayers,1,5,SSS_FP_SIZE);
if (res) {
std::string sres = BitVectToBinaryText(*res);
unsigned int varsize = VARHDRSZ + sres.size();
ret = (bytea *)palloc0(varsize);
memcpy(VARDATA(ret), sres.data(), sres.size());
SET_VARSIZE(ret, varsize);
delete res;
res = nullptr;
}
} catch (...) {
elog(ERROR, "makeMolSignature: Unknown exception");
if (res) delete res;
}
return ret;
}
extern "C" int molcmp(CROMol i, CROMol a) {
ROMol *im = (ROMol *)i;
ROMol *am = (ROMol *)a;
if (!im) {
if (!am) return 0;
return -1;
}
if (!am) return 1;
int res = im->getNumAtoms() - am->getNumAtoms();
if (res) return res;
res = im->getNumBonds() - am->getNumBonds();
if (res) return res;
res = int(RDKit::Descriptors::calcAMW(*im, false)) -
int(RDKit::Descriptors::calcAMW(*am, false));
if (res) return res;
res = im->getRingInfo()->numRings() - am->getRingInfo()->numRings();
if (res) return res;
RDKit::MatchVectType matchVect;
bool ss1 = RDKit::SubstructMatch(*im, *am, matchVect, false, false);
bool ss2 = RDKit::SubstructMatch(*am, *im, matchVect, false, false);
if (ss1 != ss2) return ss1;
// the above can still fail in some chirality cases
std::string smi1 = MolToSmiles(*im, true);
std::string smi2 = MolToSmiles(*am, true);
return smi1 == smi2 ? 0 : (smi1 < smi2 ? -1 : 1);
}
extern "C" int MolSubstruct(CROMol i, CROMol a) {
ROMol *im = (ROMol *)i;
ROMol *am = (ROMol *)a;
RDKit::MatchVectType matchVect;
return RDKit::SubstructMatch(*im, *am, matchVect, true, getDoChiralSSS());
}
extern "C" int MolSubstructCount(CROMol i, CROMol a, bool uniquify) {
ROMol *im = (ROMol *)i;
ROMol *am = (ROMol *)a;
std::vector<RDKit::MatchVectType> matchVect;
return static_cast<int>(RDKit::SubstructMatch(*im, *am, matchVect, uniquify,
true, getDoChiralSSS()));
}
/*******************************************
* Molecule operations *
*******************************************/
#define MOLDESCR(name, func, ret) \
extern "C" ret Mol##name(CROMol i) { \
const ROMol *im = (ROMol *)i; \
return func(*im); \
}
MOLDESCR(FractionCSP3, RDKit::Descriptors::calcFractionCSP3, double)
MOLDESCR(TPSA, RDKit::Descriptors::calcTPSA, double)
MOLDESCR(AMW, RDKit::Descriptors::calcAMW, double)
MOLDESCR(HBA, RDKit::Descriptors::calcLipinskiHBA, int)
MOLDESCR(HBD, RDKit::Descriptors::calcLipinskiHBD, int)
MOLDESCR(NumHeteroatoms, RDKit::Descriptors::calcNumHeteroatoms, int)
MOLDESCR(NumRings, RDKit::Descriptors::calcNumRings, int)
MOLDESCR(NumAromaticRings, RDKit::Descriptors::calcNumAromaticRings, int)
MOLDESCR(NumAliphaticRings, RDKit::Descriptors::calcNumAliphaticRings, int)
MOLDESCR(NumSaturatedRings, RDKit::Descriptors::calcNumSaturatedRings, int)
MOLDESCR(NumAromaticHeterocycles,
RDKit::Descriptors::calcNumAromaticHeterocycles, int)
MOLDESCR(NumAliphaticHeterocycles,
RDKit::Descriptors::calcNumAliphaticHeterocycles, int)
MOLDESCR(NumSaturatedHeterocycles,
RDKit::Descriptors::calcNumSaturatedHeterocycles, int)
MOLDESCR(NumAromaticCarbocycles, RDKit::Descriptors::calcNumAromaticCarbocycles,
int)
MOLDESCR(NumAliphaticCarbocycles,
RDKit::Descriptors::calcNumAliphaticCarbocycles, int)
MOLDESCR(NumSaturatedCarbocycles,
RDKit::Descriptors::calcNumSaturatedCarbocycles, int)
MOLDESCR(NumHeterocycles, RDKit::Descriptors::calcNumHeterocycles, int)
MOLDESCR(NumRotatableBonds, RDKit::Descriptors::calcNumRotatableBonds, int)
MOLDESCR(Chi0v, RDKit::Descriptors::calcChi0v, double)
MOLDESCR(Chi1v, RDKit::Descriptors::calcChi1v, double)
MOLDESCR(Chi2v, RDKit::Descriptors::calcChi2v, double)
MOLDESCR(Chi3v, RDKit::Descriptors::calcChi3v, double)
MOLDESCR(Chi4v, RDKit::Descriptors::calcChi4v, double)
MOLDESCR(Chi0n, RDKit::Descriptors::calcChi0n, double)
MOLDESCR(Chi1n, RDKit::Descriptors::calcChi1n, double)
MOLDESCR(Chi2n, RDKit::Descriptors::calcChi2n, double)
MOLDESCR(Chi3n, RDKit::Descriptors::calcChi3n, double)
MOLDESCR(Chi4n, RDKit::Descriptors::calcChi4n, double)
MOLDESCR(Kappa1, RDKit::Descriptors::calcKappa1, double)
MOLDESCR(Kappa2, RDKit::Descriptors::calcKappa2, double)
MOLDESCR(Kappa3, RDKit::Descriptors::calcKappa3, double)
MOLDESCR(NumSpiroAtoms, RDKit::Descriptors::calcNumSpiroAtoms, int)
MOLDESCR(NumBridgeheadAtoms, RDKit::Descriptors::calcNumBridgeheadAtoms, int)
extern "C" double MolLogP(CROMol i) {
double logp, mr;
RDKit::Descriptors::calcCrippenDescriptors(*(ROMol *)i, logp, mr);
return logp;
}
extern "C" int MolNumAtoms(CROMol i) {
const ROMol *im = (ROMol *)i;
return im->getNumAtoms(false);
}
extern "C" int MolNumHeavyAtoms(CROMol i) {
const ROMol *im = (ROMol *)i;
return im->getNumHeavyAtoms();
}
extern "C" char *makeMolFormulaText(CROMol data, int *len,
bool separateIsotopes,
bool abbreviateHIsotopes) {
ROMol *mol = (ROMol *)data;
try {
StringData = RDKit::Descriptors::calcMolFormula(*mol, separateIsotopes,
abbreviateHIsotopes);
} catch (...) {
ereport(WARNING,
(errcode(ERRCODE_WARNING),
errmsg("makeMolFormulaText: problems converting molecule to "
"sum formula")));
StringData = "";
}
*len = StringData.size();
return (char *)StringData.c_str();
}
extern "C" const char *MolInchi(CROMol i, const char *opts) {
std::string inchi = "InChI not available";
#ifdef RDK_BUILD_INCHI_SUPPORT
const ROMol *im = (ROMol *)i;
ExtraInchiReturnValues rv;
try {
std::string sopts = "/AuxNone /WarnOnEmptyStructure";
if (strlen(opts)) {
sopts += std::string(" ") + std::string(opts);
}
inchi = MolToInchi(*im, rv, sopts.c_str());
} catch (MolSanitizeException &e) {
inchi = "";
elog(ERROR, "MolInchi: cannot kekulize molecule");
} catch (...) {
inchi = "";
elog(ERROR, "MolInchi: Unknown exception");
}
#endif
return strdup(inchi.c_str());
}
extern "C" const char *MolInchiKey(CROMol i, const char *opts) {
std::string key = "InChI not available";
#ifdef RDK_BUILD_INCHI_SUPPORT
const ROMol *im = (ROMol *)i;
ExtraInchiReturnValues rv;
try {
std::string sopts = "/AuxNone /WarnOnEmptyStructure";
if (strlen(opts)) {
sopts += std::string(" ") + std::string(opts);
}
std::string inchi = MolToInchi(*im, rv, sopts.c_str());
key = InchiToInchiKey(inchi);
} catch (MolSanitizeException &e) {
key = "";
elog(ERROR, "MolInchiKey: cannot kekulize molecule");
} catch (...) {
key = "";
elog(ERROR, "MolInchiKey: Unknown exception");
}
#endif
return strdup(key.c_str());
}
extern "C" CROMol MolMurckoScaffold(CROMol i) {
const ROMol *im = (ROMol *)i;
ROMol *mol = MurckoDecompose(*im);
if (mol && !mol->getNumAtoms()) {
delete mol;
mol = nullptr;
} else {
try {
MolOps::sanitizeMol(*(RWMol *)mol);
} catch (...) {
delete mol;
mol = nullptr;
}
}
return (CROMol)mol;
}
namespace {
typedef boost::tokenizer<boost::char_separator<char>> tokenizer;
unsigned int parseWhichString(const std::string &txt) {
unsigned int res = MolOps::ADJUST_IGNORENONE;
boost::char_separator<char> sep("|");
tokenizer tokens(txt, sep);
tokenizer::iterator token = tokens.begin();
while (token != tokens.end()) {
std::string v = *token;
++token;
if (v == "IGNORENONE") {
res |= MolOps::ADJUST_IGNORENONE;
} else if (v == "IGNORERINGS") {
res |= MolOps::ADJUST_IGNORERINGS;
} else if (v == "IGNORECHAINS") {
res |= MolOps::ADJUST_IGNORECHAINS;
} else if (v == "IGNOREDUMMIES") {
res |= MolOps::ADJUST_IGNOREDUMMIES;
} else if (v == "IGNORENONDUMMIES") {
res |= MolOps::ADJUST_IGNORENONDUMMIES;
} else if (v == "IGNOREALL") {
res |= MolOps::ADJUST_IGNOREALL;
} else {
elog(ERROR, "bad which string provided '%s'", v.c_str());
}
}
return res;
}
void parseAdjustQueryParameters(MolOps::AdjustQueryParameters &p,
const char *json) {
PRECONDITION(json && strlen(json), "empty json");
std::istringstream ss;
ss.str(json);
boost::property_tree::ptree pt;
boost::property_tree::read_json(ss, pt);
p.adjustDegree = pt.get("adjustDegree", p.adjustDegree);
p.adjustHeavyDegree = pt.get("adjustHeavyDegree", p.adjustHeavyDegree);
p.adjustRingCount = pt.get("adjustRingCount", p.adjustRingCount);
p.makeDummiesQueries = pt.get("makeDummiesQueries", p.makeDummiesQueries);
p.aromatizeIfPossible = pt.get("aromatizeIfPossible", p.aromatizeIfPossible);
p.makeAtomsGeneric = pt.get("makeAtomsGeneric", p.makeAtomsGeneric);
p.makeBondsGeneric = pt.get("makeBondsGeneric", p.makeBondsGeneric);
std::string which;
which = boost::to_upper_copy<std::string>(pt.get("adjustDegreeFlags", ""));
if (which != "") p.adjustDegreeFlags = parseWhichString(which);
which =
boost::to_upper_copy<std::string>(pt.get("adjustHeavyDegreeFlags", ""));
if (which != "") p.adjustHeavyDegreeFlags = parseWhichString(which);
which = boost::to_upper_copy<std::string>(pt.get("adjustRingCountFlags", ""));
if (which != "") p.adjustRingCountFlags = parseWhichString(which);
which =
boost::to_upper_copy<std::string>(pt.get("makeBondsGenericFlags", ""));
if (which != "") p.makeBondsGenericFlags = parseWhichString(which);
which =
boost::to_upper_copy<std::string>(pt.get("makeAtomsGenericFlags", ""));
if (which != "") p.makeAtomsGenericFlags = parseWhichString(which);
}
} // namespace
extern "C" CROMol MolAdjustQueryProperties(CROMol i, const char *params) {
const ROMol *im = (ROMol *)i;
MolOps::AdjustQueryParameters p;
if (params && strlen(params)) {
try {
parseAdjustQueryParameters(p, params);
} catch (...) {
elog(WARNING,
"adjustQueryProperties: Invalid argument \'params\' ignored");
}
}
ROMol *mol = MolOps::adjustQueryProperties(*im, &p);
return (CROMol)mol;
}
extern "C" char *MolGetSVG(CROMol i, unsigned int w, unsigned int h,
const char *legend, const char *params) {
// SVG routines need an RWMol since they change the
// molecule as they prepare it for drawing. We don't
// want a plain SQL function (mol_to_svg) to have
// unexpected side effects, so take a copy and render
// (and change) that.
RWMol input_copy(*(ROMol *)i);
MolDraw2DUtils::prepareMolForDrawing(input_copy);
std::string slegend(legend ? legend : "");
MolDraw2DSVG drawer(w, h);
if (params && strlen(params)) {
try {
MolDraw2DUtils::updateDrawerParamsFromJSON(drawer, params);
} catch (...) {
elog(WARNING,
"adjustQueryProperties: Invalid argument \'params\' ignored");
}
}
drawer.drawMolecule(input_copy, legend);
drawer.finishDrawing();
std::string txt = drawer.getDrawingText();
return strdup(txt.c_str());
}
extern "C" char *ReactionGetSVG(CChemicalReaction i, unsigned int w,
unsigned int h, bool highlightByReactant,
const char *params) {
ChemicalReaction *rxn = (ChemicalReaction *)i;
MolDraw2DSVG drawer(w, h);
if (params && strlen(params)) {
try {
MolDraw2DUtils::updateDrawerParamsFromJSON(drawer, params);
} catch (...) {
elog(WARNING,
"adjustQueryProperties: Invalid argument \'params\' ignored");
}
}
drawer.drawReaction(*rxn, highlightByReactant);
drawer.finishDrawing();
std::string txt = drawer.getDrawingText();
return strdup(txt.c_str());
}
/*******************************************
* CBfp transformation *
*******************************************/
extern "C" void freeCBfp(CBfp data) {
std::string *fp = (std::string *)data;
delete fp;
}
extern "C" CBfp constructCBfp(Bfp *data) {
std::string *ebv = nullptr;
try {
ebv = new std::string(VARDATA(data), VARSIZE(data) - VARHDRSZ);
} catch (...) {
elog(ERROR, "constructMolFingerPrint: Unknown exception");
}
return (CBfp)ebv;
}
extern "C" Bfp *deconstructCBfp(CBfp data) {
std::string *ebv = (std::string *)data;
ByteA b;
try {
b = *ebv;
} catch (...) {
elog(ERROR, "deconstructMolFingerPrint: Unknown exception");
}
return b.toByteA();
}
extern "C" BfpSignature *makeBfpSignature(CBfp data) {
std::string *ebv = (std::string *)data;
int siglen = ebv->size();
unsigned int varsize = sizeof(BfpSignature) + siglen;
BfpSignature *res = (BfpSignature *)palloc0(varsize);
SET_VARSIZE(res, varsize);
res->weight = bitstringWeight(siglen, (uint8 *)ebv->data());
memcpy(res->fp, ebv->data(), siglen);
return res;
}
extern "C" int CBfpSize(CBfp a) {
std::string *ebv = (std::string *)a;
int numBits = ebv->size() * 8;
return numBits;
}
extern "C" double calcBitmapTanimotoSml(CBfp a, CBfp b) {
std::string *abv = (std::string *)a;
std::string *bbv = (std::string *)b;
const unsigned char *afp = (const unsigned char *)abv->c_str();
const unsigned char *bfp = (const unsigned char *)bbv->c_str();
/* return CalcBitmapTanimoto(afp, bfp, abv->size()); */
return bitstringTanimotoSimilarity(abv->size(), (uint8 *)afp, (uint8 *)bfp);
}
extern "C" double calcBitmapDiceSml(CBfp a, CBfp b) {
std::string *abv = (std::string *)a;
std::string *bbv = (std::string *)b;
const unsigned char *afp = (const unsigned char *)abv->c_str();
const unsigned char *bfp = (const unsigned char *)bbv->c_str();
return CalcBitmapDice(afp, bfp, abv->size());
}
double calcBitmapTverskySml(CBfp a, CBfp b, float ca, float cb) {
std::string *abv = (std::string *)a;
std::string *bbv = (std::string *)b;
const unsigned char *afp = (const unsigned char *)abv->c_str();
const unsigned char *bfp = (const unsigned char *)bbv->c_str();
return CalcBitmapTversky(afp, bfp, abv->size(), ca, cb);
}
/*******************************************
* CSfp transformation *
*******************************************/
extern "C" void freeCSfp(CSfp data) {
SparseFP *fp = (SparseFP *)data;
delete fp;
}
extern "C" CSfp constructCSfp(Sfp *data) {
SparseFP *ebv = nullptr;
try {
ebv = new SparseFP(VARDATA(data), VARSIZE(data) - VARHDRSZ);
} catch (...) {
elog(ERROR, "constructMolFingerPrint: Unknown exception");
}
return (CSfp)ebv;
}
extern "C" Sfp *deconstructCSfp(CSfp data) {
SparseFP *ebv = (SparseFP *)data;
ByteA b;
try {
b = ebv->toString();
} catch (...) {
elog(ERROR, "deconstructMolFingerPrint: Unknown exception");
}
return b.toByteA();
}
extern "C" bytea *makeSfpSignature(CSfp data, int numBits) {
SparseFP *v = (SparseFP *)data;
int n, numBytes;
bytea *res;
unsigned char *s;
SparseFP::StorageType::const_iterator iter;
numBytes = VARHDRSZ + (numBits / 8);
if ((numBits % 8) != 0) numBytes++;
res = (bytea *)palloc0(numBytes);
SET_VARSIZE(res, numBytes);
s = (unsigned char *)VARDATA(res);
for (iter = v->getNonzeroElements().begin();
iter != v->getNonzeroElements().end(); iter++) {
n = iter->first % numBits;
s[n / 8] |= 1 << (n % 8);
}
return res;
}
extern "C" bytea *makeLowSparseFingerPrint(CSfp data, int numInts) {
SparseFP *v = (SparseFP *)data;
int numBytes;
bytea *res;
IntRange *s;
int n;
SparseFP::StorageType::const_iterator iter;
numBytes = VARHDRSZ + (numInts * sizeof(IntRange));
res = (bytea *)palloc0(numBytes);
SET_VARSIZE(res, numBytes);
s = (IntRange *)VARDATA(res);
for (iter = v->getNonzeroElements().begin();
iter != v->getNonzeroElements().end(); iter++) {
uint32 iterV = (uint32)iter->second;
n = iter->first % numInts;
if (iterV > INTRANGEMAX) {
#if 0
elog(ERROR, "sparse fingerprint is too big, increase INTRANGEMAX in rdkit.h");
#else
iterV = INTRANGEMAX;
#endif
}
if (s[n].low == 0 || s[n].low > iterV) s[n].low = iterV;
if (s[n].high < iterV) s[n].high = iterV;
}
return res;
}
extern "C" void countOverlapValues(bytea *sign, CSfp data, int numBits,
int *sum, int *overlapSum, int *overlapN) {
SparseFP *v = (SparseFP *)data;
SparseFP::StorageType::const_iterator iter;
*sum = *overlapSum = *overlapN = 0;
if (sign) {
unsigned char *s = (unsigned char *)VARDATA(sign);
int n;
for (iter = v->getNonzeroElements().begin();
iter != v->getNonzeroElements().end(); iter++) {
*sum += iter->second;
n = iter->first % numBits;
if (s[n / 8] & (1 << (n % 8))) {
*overlapSum += iter->second;
*overlapN += 1;
}
}
} else {
/* Assume, sign has only true bits */
for (iter = v->getNonzeroElements().begin();
iter != v->getNonzeroElements().end(); iter++)
*sum += iter->second;
*overlapSum = *sum;
*overlapN = v->getNonzeroElements().size();
}
}
extern "C" void countLowOverlapValues(bytea *sign, CSfp data, int numInts,
int *querySum, int *keySum,
int *overlapUp, int *overlapDown) {
SparseFP *v = (SparseFP *)data;
SparseFP::StorageType::const_iterator iter;
IntRange *s = (IntRange *)VARDATA(sign);
int n;
*querySum = *keySum = *overlapUp = *overlapDown = 0;
for (iter = v->getNonzeroElements().begin();
iter != v->getNonzeroElements().end(); iter++) {
*querySum += iter->second;
n = iter->first % numInts;
if (s[n].low == 0) {
Assert(s[n].high == 0);
continue;
}
*overlapDown += Min(s[n].low, (uint32)iter->second);
*overlapUp += Min(s[n].high, (uint32)iter->second);
}
Assert(*overlapDown <= *overlapUp);
for (n = 0; n < numInts; n++) {
*keySum += s[n].low;
if (s[n].low != s[n].high)
*keySum += s[n].high; /* there is at least two key mapped into current
backet */
}
Assert(*overlapUp <= *keySum);
}
extern "C" double calcSparseTanimotoSml(CSfp a, CSfp b) {
double res = -1.0;
/*
* Nsame / (Na + Nb - Nsame)
*/
try {
res = TanimotoSimilarity(*(SparseFP *)a, *(SparseFP *)b);
} catch (ValueErrorException &e) {
elog(ERROR, "TanimotoSimilarity: %s", e.message().c_str());
} catch (...) {
elog(ERROR, "calcSparseTanimotoSml: Unknown exception");
}
return res;
}
extern "C" double calcSparseDiceSml(CSfp a, CSfp b) {
double res = -1.0;
/*
* 2 * Nsame / (Na + Nb)
*/
try {
res = DiceSimilarity(*(SparseFP *)a, *(SparseFP *)b);
} catch (ValueErrorException &e) {
elog(ERROR, "DiceSimilarity: %s", e.message().c_str());
} catch (...) {
elog(ERROR, "calcSparseDiceSml: Unknown exception");
}
return res;
}
extern "C" double calcSparseStringDiceSml(const char *a, unsigned int sza,
const char *b, unsigned int szb) {
const unsigned char *t1 = (const unsigned char *)a;
const unsigned char *t2 = (const unsigned char *)b;
std::uint32_t tmp;
tmp = *(reinterpret_cast<const std::uint32_t *>(t1));
t1 += sizeof(std::uint32_t);
if (tmp != (std::uint32_t)ci_SPARSEINTVECT_VERSION) {
elog(ERROR, "calcSparseStringDiceSml: could not convert argument 1");
}
tmp = *(reinterpret_cast<const std::uint32_t *>(t2));
t2 += sizeof(std::uint32_t);
if (tmp != (std::uint32_t)ci_SPARSEINTVECT_VERSION) {
elog(ERROR, "calcSparseStringDiceSml: could not convert argument 2");
}
// check the element size:
tmp = *(reinterpret_cast<const std::uint32_t *>(t1));
t1 += sizeof(std::uint32_t);
if (tmp != sizeof(std::uint32_t)) {
elog(ERROR,
"calcSparseStringDiceSml: could not convert argument 1 -> uint32_t");
}
tmp = *(reinterpret_cast<const std::uint32_t *>(t2));
t2 += sizeof(std::uint32_t);
if (tmp != sizeof(std::uint32_t)) {
elog(ERROR,
"calcSparseStringDiceSml: could not convert argument 2 -> uint32_t");
}
double res = 0.;
// start reading:
std::uint32_t len1, len2;
len1 = *(reinterpret_cast<const std::uint32_t *>(t1));
t1 += sizeof(std::uint32_t);
len2 = *(reinterpret_cast<const std::uint32_t *>(t2));
t2 += sizeof(std::uint32_t);
if (len1 != len2) {
elog(ERROR, "attempt to compare fingerprints of different length");
}
std::uint32_t nElem1, nElem2;
nElem1 = *(reinterpret_cast<const std::uint32_t *>(t1));
t1 += sizeof(std::uint32_t);
nElem2 = *(reinterpret_cast<const std::uint32_t *>(t2));
t2 += sizeof(std::uint32_t);
if (!nElem1 || !nElem2) {
return 0.0;
}
double v1Sum = 0, v2Sum = 0, numer = 0;
std::uint32_t idx1 = 0;
std::int32_t v1;
std::uint32_t idx2 = 0;
std::int32_t v2;
idx1 = *(reinterpret_cast<const std::uint32_t *>(t1));
t1 += sizeof(std::uint32_t);
v1 = *(reinterpret_cast<const std::int32_t *>(t1));
t1 += sizeof(std::int32_t);
nElem1--;
v1Sum += v1;
idx2 = *(reinterpret_cast<const std::uint32_t *>(t2));
t2 += sizeof(std::uint32_t);
v2 = *(reinterpret_cast<const std::int32_t *>(t2));
t2 += sizeof(std::int32_t);
nElem2--;
v2Sum += v2;
while (1) {
while (nElem2 && idx2 < idx1) {
idx2 = *(reinterpret_cast<const std::uint32_t *>(t2));
t2 += sizeof(std::uint32_t);
v2 = *(reinterpret_cast<const std::int32_t *>(t2));
t2 += sizeof(std::int32_t);
nElem2--;
v2Sum += v2;
}
if (idx2 == idx1) {
// std::cerr<<" --- "<<idx1<<" "<<v1<<" - "<<idx2<<" "<<v2<<std::endl;
numer += std::min(v1, v2);
}
if (nElem1) {
idx1 = *(reinterpret_cast<const std::uint32_t *>(t1));
t1 += sizeof(std::uint32_t);
v1 = *(reinterpret_cast<const std::int32_t *>(t1));
t1 += sizeof(std::int32_t);
nElem1--;
v1Sum += v1;
} else {
break;
}
}
while (nElem2) {
idx2 = *(reinterpret_cast<const std::uint32_t *>(t2));
t2 += sizeof(std::uint32_t);
v2 = *(reinterpret_cast<const std::int32_t *>(t2));
t2 += sizeof(std::int32_t);
nElem2--;
v2Sum += v2;
}
double denom = v1Sum + v2Sum;
if (fabs(denom) < 1e-6) {
res = 0.0;
} else {
res = 2. * numer / denom;
}
return res;
}
extern "C" bool calcSparseStringAllValsGT(const char *a, unsigned int sza,
int tgt) {
const unsigned char *t1 = (const unsigned char *)a;
std::uint32_t tmp;
tmp = *(reinterpret_cast<const std::uint32_t *>(t1));
t1 += sizeof(std::uint32_t);
if (tmp != (std::uint32_t)ci_SPARSEINTVECT_VERSION) {
elog(ERROR, "calcSparseStringAllValsGT: could not convert argument 1");
}
// check the element size:
tmp = *(reinterpret_cast<const std::uint32_t *>(t1));
t1 += sizeof(std::uint32_t);
if (tmp != sizeof(std::uint32_t)) {
elog(ERROR,
"calcSparseStringAllValsGT: could not convert argument 1 -> "
"uint32_t");
}
// std::uint32_t len1;
// len1 = *(reinterpret_cast<const std::uint32_t *>(t1));
t1 += sizeof(std::uint32_t);
std::uint32_t nElem1;
nElem1 = *(reinterpret_cast<const std::uint32_t *>(t1));
t1 += sizeof(std::uint32_t);
while (nElem1) {
--nElem1;
// skip the index:
t1 += sizeof(std::uint32_t);
std::int32_t v1 = *(reinterpret_cast<const std::int32_t *>(t1));
t1 += sizeof(std::int32_t);
if (v1 <= tgt) return false;
}
return true;
}
extern "C" bool calcSparseStringAllValsLT(const char *a, unsigned int sza,
int tgt) {
const unsigned char *t1 = (const unsigned char *)a;
std::uint32_t tmp;
tmp = *(reinterpret_cast<const std::uint32_t *>(t1));
t1 += sizeof(std::uint32_t);
if (tmp != (std::uint32_t)ci_SPARSEINTVECT_VERSION) {
elog(ERROR, "calcSparseStringAllValsGT: could not convert argument 1");
}
// check the element size:
tmp = *(reinterpret_cast<const std::uint32_t *>(t1));
t1 += sizeof(std::uint32_t);
if (tmp != sizeof(std::uint32_t)) {
elog(ERROR,
"calcSparseStringAllValsGT: could not convert argument 1 -> "
"uint32_t");
}
// std::uint32_t len1;
// len1 = *(reinterpret_cast<const std::uint32_t *>(t1));
t1 += sizeof(std::uint32_t);
std::uint32_t nElem1;
nElem1 = *(reinterpret_cast<const std::uint32_t *>(t1));
t1 += sizeof(std::uint32_t);
while (nElem1) {
--nElem1;
// skip the index:
t1 += sizeof(std::uint32_t);
std::int32_t v1 = *(reinterpret_cast<const std::int32_t *>(t1));
t1 += sizeof(std::int32_t);
if (v1 >= tgt) return false;
}
return true;
}
extern "C" CSfp addSFP(CSfp a, CSfp b) {
SparseFP *res = nullptr;
try {
SparseFP tmp = (*(SparseFP *)a + *(SparseFP *)b);
res = (SparseFP *)new SparseFP(tmp);
} catch (...) {
elog(ERROR, "addSFP: Unknown exception");
}
return (CSfp)res;
}
extern "C" CSfp subtractSFP(CSfp a, CSfp b) {
SparseFP *res = nullptr;
try {
SparseFP tmp = (*(SparseFP *)a - *(SparseFP *)b);
res = (SparseFP *)new SparseFP(tmp);
} catch (...) {
elog(ERROR, "addSFP: Unknown exception");
}
return (CSfp)res;
}
/*
* Mol -> fp
*/
extern "C" CBfp makeLayeredBFP(CROMol data) {
ROMol *mol = (ROMol *)data;
ExplicitBitVect *res = nullptr;
try {
res = RDKit::LayeredFingerprintMol(*mol, 0xFFFFFFFF, 1, 7,
getLayeredFpSize());
} catch (...) {
elog(ERROR, "makeLayeredBFP: Unknown exception");
if (res) delete res;
res = nullptr;
}
if (res) {
std::string *sres = new std::string(BitVectToBinaryText(*res));
delete res;
return (CBfp)sres;
} else {
return nullptr;
}
}
extern "C" CBfp makeRDKitBFP(CROMol data) {
ROMol *mol = (ROMol *)data;
ExplicitBitVect *res = nullptr;
try {
res = RDKit::RDKFingerprintMol(*mol, 1, 6, getRDKitFpSize(), 2);
} catch (...) {
elog(ERROR, "makeRDKitBFP: Unknown exception");
if (res) delete res;
res = nullptr;
}
if (res) {
std::string *sres = new std::string(BitVectToBinaryText(*res));
delete res;
return (CBfp)sres;
} else {
return nullptr;
}
}
extern "C" CSfp makeMorganSFP(CROMol data, int radius) {
ROMol *mol = (ROMol *)data;
SparseFP *res = nullptr;
std::vector<std::uint32_t> invars(mol->getNumAtoms());
try {
RDKit::MorganFingerprints::getConnectivityInvariants(*mol, invars, true);
res = (SparseFP *)RDKit::MorganFingerprints::getFingerprint(*mol, radius,
&invars);
} catch (...) {
elog(ERROR, "makeMorganSFP: Unknown exception");
}
return (CSfp)res;
}
extern "C" CBfp makeMorganBFP(CROMol data, int radius) {
ROMol *mol = (ROMol *)data;
ExplicitBitVect *res = nullptr;
std::vector<std::uint32_t> invars(mol->getNumAtoms());
try {
RDKit::MorganFingerprints::getConnectivityInvariants(*mol, invars, true);
res = RDKit::MorganFingerprints::getFingerprintAsBitVect(
*mol, radius, getMorganFpSize(), &invars);
} catch (...) {
elog(ERROR, "makeMorganBFP: Unknown exception");
}
if (res) {
std::string *sres = new std::string(BitVectToBinaryText(*res));
delete res;
return (CBfp)sres;
} else {
return nullptr;
}
}
extern "C" CSfp makeFeatMorganSFP(CROMol data, int radius) {
ROMol *mol = (ROMol *)data;
SparseFP *res = nullptr;
std::vector<std::uint32_t> invars(mol->getNumAtoms());
try {
RDKit::MorganFingerprints::getFeatureInvariants(*mol, invars);
res = (SparseFP *)RDKit::MorganFingerprints::getFingerprint(*mol, radius,
&invars);
} catch (...) {
elog(ERROR, "makeMorganSFP: Unknown exception");
}
return (CSfp)res;
}
extern "C" CBfp makeFeatMorganBFP(CROMol data, int radius) {
ROMol *mol = (ROMol *)data;
ExplicitBitVect *res = nullptr;
std::vector<std::uint32_t> invars(mol->getNumAtoms());
try {
RDKit::MorganFingerprints::getFeatureInvariants(*mol, invars);
res = RDKit::MorganFingerprints::getFingerprintAsBitVect(
*mol, radius, getFeatMorganFpSize(), &invars);
} catch (...) {
elog(ERROR, "makeMorganBFP: Unknown exception");
}
if (res) {
std::string *sres = new std::string(BitVectToBinaryText(*res));
delete res;
return (CBfp)sres;
} else {
return nullptr;
}
}
extern "C" CSfp makeAtomPairSFP(CROMol data) {
ROMol *mol = (ROMol *)data;
SparseFP *res = nullptr;
#ifdef UNHASHED_PAIR_FPS
try {
SparseIntVect<std::int32_t> *afp =
RDKit::AtomPairs::getAtomPairFingerprint(*mol);
res = new SparseFP(1 << RDKit::AtomPairs::numAtomPairFingerprintBits);
for (SparseIntVect<std::int32_t>::StorageType::const_iterator iter =
afp->getNonzeroElements().begin();
iter != afp->getNonzeroElements().end(); ++iter) {
res->setVal(iter->first, iter->second);
}
delete afp;
} catch (...) {
elog(ERROR, "makeAtomPairSFP: Unknown exception");
}
#else
try {
SparseIntVect<std::int32_t> *afp =
RDKit::AtomPairs::getHashedAtomPairFingerprint(
*mol, getHashedAtomPairFpSize());
res = new SparseFP(getHashedAtomPairFpSize());
for (const auto &iter : afp->getNonzeroElements()) {
res->setVal(iter.first, iter.second);
}
delete afp;
} catch (...) {
elog(ERROR, "makeAtomPairSFP: Unknown exception");
}
#endif
return (CSfp)res;
}
extern "C" CSfp makeTopologicalTorsionSFP(CROMol data) {
ROMol *mol = (ROMol *)data;
SparseFP *res = nullptr;
#ifdef UNHASHED_PAIR_FPS
try {
SparseIntVect<boost::int64_t> *afp =
RDKit::AtomPairs::getHashedTopologicalTorsionFingerprint(
*mol, boost::integer_traits<std::uint32_t>::const_max);
res = new SparseFP(boost::integer_traits<std::uint32_t>::const_max);
for (SparseIntVect<boost::int64_t>::StorageType::const_iterator iter =
afp->getNonzeroElements().begin();
iter != afp->getNonzeroElements().end(); ++iter) {
res->setVal(iter->first, iter->second);
}
delete afp;
} catch (...) {
elog(ERROR, "makeTopologicalTorsionSFP: Unknown exception");
}
#else
try {
SparseIntVect<boost::int64_t> *afp =
RDKit::AtomPairs::getHashedTopologicalTorsionFingerprint(
*mol, getHashedTorsionFpSize());
res = new SparseFP(getHashedTorsionFpSize());
for (const auto &iter : afp->getNonzeroElements()) {
res->setVal(iter.first, iter.second);
}
delete afp;
} catch (...) {
elog(ERROR, "makeTopologicalTorsionSFP: Unknown exception");
}
#endif
return (CSfp)res;
}
extern "C" CBfp makeAtomPairBFP(CROMol data) {
ROMol *mol = (ROMol *)data;
ExplicitBitVect *res = nullptr;
try {
res = RDKit::AtomPairs::getHashedAtomPairFingerprintAsBitVect(
*mol, getHashedAtomPairFpSize());
} catch (...) {
elog(ERROR, "makeAtomPairBFP: Unknown exception");
}
if (res) {
std::string *sres = new std::string(BitVectToBinaryText(*res));
delete res;
return (CBfp)sres;
} else {
return nullptr;
}
}
extern "C" CBfp makeTopologicalTorsionBFP(CROMol data) {
ROMol *mol = (ROMol *)data;
ExplicitBitVect *res = nullptr;
try {
res = RDKit::AtomPairs::getHashedTopologicalTorsionFingerprintAsBitVect(
*mol, getHashedTorsionFpSize());
} catch (...) {
elog(ERROR, "makeTopologicalTorsionBFP: Unknown exception");
}
if (res) {
std::string *sres = new std::string(BitVectToBinaryText(*res));
delete res;
return (CBfp)sres;
} else {
return nullptr;
}
}
extern "C" CBfp makeMACCSBFP(CROMol data) {
ROMol *mol = (ROMol *)data;
ExplicitBitVect *res = nullptr;
try {
res = RDKit::MACCSFingerprints::getFingerprintAsBitVect(*mol);
} catch (...) {
elog(ERROR, "makeMACCSBFP: Unknown exception");
}
if (res) {
std::string *sres = new std::string(BitVectToBinaryText(*res));
delete res;
return (CBfp)sres;
} else {
return nullptr;
}
}
extern "C" CBfp makeAvalonBFP(CROMol data, bool isQuery,
unsigned int bitFlags) {
#ifdef RDK_BUILD_AVALON_SUPPORT
ROMol *mol = (ROMol *)data;
ExplicitBitVect *res = nullptr;
try {
res = new ExplicitBitVect(getAvalonFpSize());
AvalonTools::getAvalonFP(*mol, *res, getAvalonFpSize(), isQuery, true,
bitFlags);
} catch (...) {
elog(ERROR, "makeAvalonBFP: Unknown exception");
}
if (res) {
std::string *sres = new std::string(BitVectToBinaryText(*res));
delete res;
return (CBfp)sres;
} else {
return nullptr;
}
#else
elog(ERROR, "Avalon support not enabled");
return NULL;
#endif
}
/* chemical reactions */
extern "C" void freeChemReaction(CChemicalReaction data) {
ChemicalReaction *rxn = (ChemicalReaction *)data;
delete rxn;
}
extern "C" CChemicalReaction constructChemReact(Reaction *data) {
auto *rxn = new ChemicalReaction();
try {
ByteA b(data);
ReactionPickler::reactionFromPickle(b, rxn);
} catch (ReactionPicklerException &e) {
elog(ERROR, "reactionFromPickle: %s", e.message());
} catch (...) {
elog(ERROR, "constructChemReact: Unknown exception");
}
return (CChemicalReaction)rxn;
}
extern "C" Reaction *deconstructChemReact(CChemicalReaction data) {
ChemicalReaction *rxn = (ChemicalReaction *)data;
ByteA b;
try {
ReactionPickler::pickleReaction(rxn, b);
} catch (ReactionPicklerException &e) {
elog(ERROR, "pickleReaction: %s", e.message());
} catch (...) {
elog(ERROR, "deconstructChemReact: Unknown exception");
}
return (Reaction *)b.toByteA();
}
extern "C" CChemicalReaction parseChemReactText(char *data, bool asSmarts,
bool warnOnFail) {
ChemicalReaction *rxn = nullptr;
try {
if (asSmarts) {
rxn = RxnSmartsToChemicalReaction(data);
} else {
rxn = RxnSmartsToChemicalReaction(data, nullptr, true);
}
if (getInitReaction()) {
rxn->initReactantMatchers();
}
if (getMoveUnmappedReactantsToAgents() && hasReactionAtomMapping(*rxn)) {
rxn->removeUnmappedReactantTemplates(getThresholdUnmappedReactantAtoms());
}
} catch (...) {
rxn = nullptr;
}
if (rxn == nullptr) {
if (warnOnFail) {
ereport(WARNING,
(errcode(ERRCODE_WARNING),
errmsg("could not create chemical reaction from SMILES '%s'",
data)));
} else {
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
errmsg("could not create chemical reaction from SMILES '%s'",
data)));
}
}
return (CChemicalReaction)rxn;
}
extern "C" CChemicalReaction parseChemReactBlob(char *data, int len) {
ChemicalReaction *rxn = nullptr;
try {
string binStr(data, len);
rxn = new ChemicalReaction(binStr);
if (getInitReaction()) {
rxn->initReactantMatchers();
}
if (getMoveUnmappedReactantsToAgents() && hasReactionAtomMapping(*rxn)) {
rxn->removeUnmappedReactantTemplates(getThresholdUnmappedReactantAtoms());
}
} catch (...) {
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
errmsg("problem generating chemical reaction from blob data")));
}
if (rxn == nullptr) {
ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION),
errmsg("blob data could not be parsed")));
}
return (CChemicalReaction)rxn;
}
extern "C" char *makeChemReactText(CChemicalReaction data, int *len,
bool asSmarts) {
ChemicalReaction *rxn = (ChemicalReaction *)data;
try {
if (!asSmarts) {
StringData = ChemicalReactionToRxnSmiles(*rxn);
} else {
StringData = ChemicalReactionToRxnSmarts(*rxn);
}
} catch (...) {
ereport(WARNING, (errcode(ERRCODE_WARNING),
errmsg("makeChemReactText: problems converting chemical "
"reaction to SMILES/SMARTS")));
StringData = "";
}
*len = StringData.size();
return (char *)StringData.c_str();
}
extern "C" char *makeChemReactBlob(CChemicalReaction data, int *len) {
ChemicalReaction *rxn = (ChemicalReaction *)data;
StringData.clear();
try {
ReactionPickler::pickleReaction(*rxn, StringData);
} catch (...) {
elog(ERROR, "makeChemReactBlob: Unknown exception");
}
*len = StringData.size();
return (char *)StringData.data();
}
extern "C" CChemicalReaction parseChemReactCTAB(char *data, bool warnOnFail) {
ChemicalReaction *rxn = nullptr;
try {
rxn = RxnBlockToChemicalReaction(data);
if (getInitReaction()) {
rxn->initReactantMatchers();
}
if (getMoveUnmappedReactantsToAgents() && hasReactionAtomMapping(*rxn)) {
rxn->removeUnmappedReactantTemplates(getThresholdUnmappedReactantAtoms());
}
} catch (...) {
rxn = nullptr;
}
if (rxn == nullptr) {
if (warnOnFail) {
ereport(WARNING,
(errcode(ERRCODE_WARNING),
errmsg("could not create reaction from CTAB '%s'", data)));
} else {
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
errmsg("could not create reaction from CTAB '%s'", data)));
}
}
return (CChemicalReaction)rxn;
}
extern "C" char *makeCTABChemReact(CChemicalReaction data, int *len) {
ChemicalReaction *rxn = (ChemicalReaction *)data;
try {
StringData = ChemicalReactionToRxnBlock(*rxn);
} catch (...) {
ereport(
WARNING,
(errcode(ERRCODE_WARNING),
errmsg("makeCTABChemReact: problems converting reaction to CTAB")));
StringData = "";
}
*len = StringData.size();
return (char *)StringData.c_str();
}
extern "C" int ChemReactNumReactants(CChemicalReaction crxn) {
const ChemicalReaction *rxn = (ChemicalReaction *)crxn;
return rxn->getNumReactantTemplates();
}
extern "C" int ChemReactNumProducts(CChemicalReaction crxn) {
const ChemicalReaction *rxn = (ChemicalReaction *)crxn;
return rxn->getNumProductTemplates();
}
extern "C" int ChemReactNumAgents(CChemicalReaction crxn) {
const ChemicalReaction *rxn = (ChemicalReaction *)crxn;
return rxn->getNumAgentTemplates();
}
extern "C" bytea *makeReactionSign(CChemicalReaction data) {
ChemicalReaction *rxn = (ChemicalReaction *)data;
ExplicitBitVect *res = nullptr;
bytea *ret = nullptr;
try {
RDKit::ReactionFingerprintParams params;
params.fpType = static_cast<FingerprintType>(getReactionSubstructFpType());
params.fpSize = getReactionSubstructFpSize();
params.includeAgents = (!getIgnoreReactionAgents());
params.bitRatioAgents = getReactionStructuralFPAgentBitRatio();
res = RDKit::StructuralFingerprintChemReaction(*rxn, params);
if (res) {
std::string sres = BitVectToBinaryText(*res);
unsigned int varsize = VARHDRSZ + sres.size();
ret = (bytea *)palloc0(varsize);
memcpy(VARDATA(ret), sres.data(), sres.size());
SET_VARSIZE(ret, varsize);
delete res;
res = nullptr;
}
} catch (...) {
elog(ERROR, "makeReactionSign: Unknown exception");
if (res) delete res;
}
return ret;
}
extern "C" int ReactionSubstruct(CChemicalReaction rxn,
CChemicalReaction rxn2) {
ChemicalReaction *rxnm = (ChemicalReaction *)rxn;
ChemicalReaction *rxn2m = (ChemicalReaction *)rxn2;
/* Reaction search */
if (rxn2m->getNumReactantTemplates() != 0 &&
rxn2m->getNumProductTemplates() != 0) {
return hasReactionSubstructMatch(*rxnm, *rxn2m,
(!getIgnoreReactionAgents()));
}
/* Product search */
if (rxn2m->getNumReactantTemplates() == 0 &&
rxn2m->getNumProductTemplates() != 0) {
if (rxn2m->getNumAgentTemplates() != 0 && !getIgnoreReactionAgents()) {
return (hasProductTemplateSubstructMatch(*rxnm, *rxn2m) &&
hasAgentTemplateSubstructMatch(*rxnm, *rxn2m));
}
return hasProductTemplateSubstructMatch(*rxnm, *rxn2m);
}
/* Reactant search */
if (rxn2m->getNumReactantTemplates() != 0 &&
rxn2m->getNumProductTemplates() == 0) {
if (rxn2m->getNumAgentTemplates() != 0 && !getIgnoreReactionAgents()) {
return (hasReactantTemplateSubstructMatch(*rxnm, *rxn2m) &&
hasAgentTemplateSubstructMatch(*rxnm, *rxn2m));
}
return hasReactantTemplateSubstructMatch(*rxnm, *rxn2m);
}
/* Agent search */
if (rxn2m->getNumReactantTemplates() == 0 &&
rxn2m->getNumProductTemplates() == 0 &&
rxn2m->getNumAgentTemplates() != 0) {
return hasAgentTemplateSubstructMatch(*rxnm, *rxn2m);
}
return false;
}
extern "C" int ReactionSubstructFP(CChemicalReaction rxn,
CChemicalReaction rxnquery) {
ChemicalReaction *rxnm = (ChemicalReaction *)rxn;
ChemicalReaction *rxnqm = (ChemicalReaction *)rxnquery;
RDKit::ReactionFingerprintParams params;
params.fpType = static_cast<FingerprintType>(getReactionSubstructFpType());
params.fpSize = getReactionSubstructFpSize();
params.includeAgents = (!getIgnoreReactionAgents());
params.bitRatioAgents = getReactionStructuralFPAgentBitRatio();
ExplicitBitVect *fp1 = StructuralFingerprintChemReaction(*rxnm, params);
ExplicitBitVect *fp2 = StructuralFingerprintChemReaction(*rxnqm, params);
if (fp1->getNumOnBits() < fp2->getNumOnBits()) {
return false;
}
for (unsigned i = 0; i < fp1->getNumBits(); i++) {
if ((fp1->getBit(i) & fp2->getBit(i)) != fp2->getBit(i)) {
return false;
}
}
return true;
}
// some helper functions in anonymous namespace
namespace {
struct MoleculeDescriptors {
MoleculeDescriptors() : nAtoms(0), nBonds(0), nRings(0), MW(0.0) {}
unsigned nAtoms;
unsigned nBonds;
unsigned nRings;
double MW;
};
MoleculeDescriptors *calcMolecularDescriptorsReaction(
RDKit::ChemicalReaction *rxn, RDKit::ReactionMoleculeType t) {
auto *des = new MoleculeDescriptors();
auto begin = getStartIterator(*rxn, t);
auto end = getEndIterator(*rxn, t);
for (; begin != end; ++begin) {
des->nAtoms += begin->get()->getNumHeavyAtoms();
des->nBonds += begin->get()->getNumBonds(true);
des->MW = RDKit::Descriptors::calcAMW(*begin->get(), true);
if (!begin->get()->getRingInfo()->isInitialized()) {
begin->get()->updatePropertyCache();
RDKit::MolOps::findSSSR(*begin->get());
}
des->nRings += begin->get()->getRingInfo()->numRings();
}
return des;
}
int compareMolDescriptors(const MoleculeDescriptors &md1,
const MoleculeDescriptors &md2) {
int res = md1.nAtoms - md2.nAtoms;
if (res) {
return res;
}
res = md1.nBonds - md2.nBonds;
if (res) {
return res;
}
res = md1.nRings - md2.nRings;
if (res) {
return res;
}
res = int(md1.MW - md2.MW);
if (res) {
return res;
}
return 0;
}
} // namespace
extern "C" int reactioncmp(CChemicalReaction rxn, CChemicalReaction rxn2) {
ChemicalReaction *rxnm = (ChemicalReaction *)rxn;
ChemicalReaction *rxn2m = (ChemicalReaction *)rxn2;
if (!rxnm) {
if (!rxn2m) return 0;
return -1;
}
if (!rxn2m) return 1;
int res = rxnm->getNumReactantTemplates() - rxn2m->getNumReactantTemplates();
if (res) {
return res;
}
res = rxnm->getNumProductTemplates() - rxn2m->getNumProductTemplates();
if (res) {
return res;
}
if (!getIgnoreReactionAgents()) {
res = rxnm->getNumAgentTemplates() - rxn2m->getNumAgentTemplates();
if (res) {
return res;
}
}
MoleculeDescriptors *rxn_react =
calcMolecularDescriptorsReaction(rxnm, Reactant);
MoleculeDescriptors *rxn2_react =
calcMolecularDescriptorsReaction(rxn2m, Reactant);
res = compareMolDescriptors(*rxn_react, *rxn2_react);
delete (rxn_react);
delete (rxn2_react);
if (res) {
return res;
}
MoleculeDescriptors *rxn_product =
calcMolecularDescriptorsReaction(rxnm, Product);
MoleculeDescriptors *rxn2_product =
calcMolecularDescriptorsReaction(rxn2m, Product);
res = compareMolDescriptors(*rxn_product, *rxn2_product);
delete (rxn_product);
delete (rxn2_product);
if (res) {
return res;
}
if (!getIgnoreReactionAgents()) {
MoleculeDescriptors *rxn_agent =
calcMolecularDescriptorsReaction(rxnm, Agent);
MoleculeDescriptors *rxn2_agent =
calcMolecularDescriptorsReaction(rxn2m, Agent);
res = compareMolDescriptors(*rxn_agent, *rxn2_agent);
delete (rxn_agent);
delete (rxn2_agent);
if (res) {
return res;
}
}
RDKit::MatchVectType matchVect;
if (hasReactionSubstructMatch(*rxnm, *rxn2m, (!getIgnoreReactionAgents()))) {
return 0;
}
return -1;
}
extern "C" CSfp makeReactionDifferenceSFP(CChemicalReaction data, int size,
int fpType) {
ChemicalReaction *rxn = (ChemicalReaction *)data;
SparseFP *res = nullptr;
try {
if (fpType > 3 || fpType < 1) {
elog(ERROR, "makeReactionDifferenceSFP: Unknown Fingerprint type");
}
FingerprintType fp = static_cast<RDKit::FingerprintType>(fpType);
RDKit::ReactionFingerprintParams params;
params.fpType = static_cast<FingerprintType>(fpType);
params.fpSize = size;
params.includeAgents = (!getIgnoreReactionAgents());
params.agentWeight = getReactionDifferenceFPWeightAgents();
params.nonAgentWeight = getReactionDifferenceFPWeightNonagents();
res = (SparseFP *)RDKit::DifferenceFingerprintChemReaction(*rxn, params);
} catch (...) {
elog(ERROR, "makeReactionDifferenceSFP: Unknown exception");
}
return (CSfp)res;
}
extern "C" CBfp makeReactionBFP(CChemicalReaction data, int size, int fpType) {
ChemicalReaction *rxn = (ChemicalReaction *)data;
ExplicitBitVect *res = nullptr;
try {
if (fpType > 5 || fpType < 1) {
elog(ERROR, "makeReactionBFP: Unknown Fingerprint type");
}
FingerprintType fp = static_cast<RDKit::FingerprintType>(fpType);
RDKit::ReactionFingerprintParams params;
params.fpType = static_cast<FingerprintType>(fpType);
params.fpSize = size;
params.includeAgents = (!getIgnoreReactionAgents());
params.bitRatioAgents = getReactionStructuralFPAgentBitRatio();
res = (ExplicitBitVect *)RDKit::StructuralFingerprintChemReaction(*rxn,
params);
} catch (...) {
elog(ERROR, "makeReactionBFP: Unknown exception");
}
if (res) {
std::string *sres = new std::string(BitVectToBinaryText(*res));
delete res;
return (CBfp)sres;
} else {
return nullptr;
}
}
extern "C" char *computeMolHash(CROMol data, int *len) {
ROMol &mol = *(ROMol *)data;
static string text;
text.clear();
try {
// FIX: once R/S values are stored on the atoms, this will no longer be
// needed
MolOps::assignStereochemistry(mol);
text = RDKit::MolHash::generateMoleculeHashSet(mol);
} catch (...) {
ereport(WARNING,
(errcode(ERRCODE_WARNING), errmsg("computeMolHash: failed")));
text.clear();
}
*len = text.length();
return strdup(text.c_str());
}
extern "C" char *findMCSsmiles(char *smiles, char *params) {
static string mcs;
mcs.clear();
char *str = smiles;
char *s = str;
int len, nmols = 0;
std::vector<RDKit::ROMOL_SPTR> molecules;
while (*s && *s <= ' ') s++;
while (*s > ' ') {
len = 0;
while (s[len] > ' ') len++;
s[len] = '\0';
if (0 == strlen(s)) continue;
molecules.push_back(RDKit::ROMOL_SPTR(RDKit::SmilesToMol(s)));
// elog(WARNING, s);
s += len;
s++; // do s++; while(*s && *s <= ' ');
}
RDKit::MCSParameters p;
if (params && 0 != strlen(params)) {
try {
RDKit::parseMCSParametersJSON(params, &p);
} catch (...) {
ereport(WARNING, (errcode(ERRCODE_WARNING),
errmsg("findMCS: Invalid argument \'params\'")));
return strdup("");
}
}
try {
MCSResult res = RDKit::findMCS(molecules, &p);
mcs = res.SmartsString;
if (!res.isCompleted())
ereport(WARNING, (errcode(ERRCODE_WARNING),
errmsg("findMCS timed out, result is not maximal")));
} catch (...) {
ereport(WARNING, (errcode(ERRCODE_WARNING), errmsg("findMCS: failed")));
mcs.clear();
}
return mcs.empty() ? strdup("") : strdup(mcs.c_str());
}
extern "C" void *addMol2list(void *lst, Mol *mol) {
try {
if (!lst) {
// elog(WARNING, "addMol2list: allocate new list");
lst = new std::vector<RDKit::ROMOL_SPTR>;
}
std::vector<RDKit::ROMOL_SPTR> &mlst =
*(std::vector<RDKit::ROMOL_SPTR> *)lst;
// elog(WARNING, "addMol2list: create a copy of new mol");
ROMol *m = (ROMol *)constructROMol(
mol); // new ROMol(*(const ROMol*)mol, false); // create a copy
// elog(WARNING, "addMol2list: append new mol into list");
mlst.push_back(RDKit::ROMOL_SPTR(m));
// elog(WARNING, "addMol2list: finished");
} catch (...) {
// elog(WARNING, "addMol2list: ERROR");
ereport(WARNING, (errcode(ERRCODE_WARNING), errmsg("addMol2list: failed")));
}
return lst;
}
extern "C" char *findMCS(void *vmols, char *params) {
static string mcs;
mcs.clear();
std::vector<RDKit::ROMOL_SPTR> *molecules =
(std::vector<RDKit::ROMOL_SPTR> *)vmols;
// char t[256];
// sprintf(t,"findMCS(): lst=%p, size=%u", molecules, molecules->size());
// elog(WARNING, t);
RDKit::MCSParameters p;
if (params && 0 != strlen(params)) {
try {
RDKit::parseMCSParametersJSON(params, &p);
} catch (...) {
// mcs = params; //DEBUG
ereport(WARNING, (errcode(ERRCODE_WARNING),
errmsg("findMCS: Invalid argument \'params\'")));
return strdup(mcs.c_str());
}
}
try {
MCSResult res = RDKit::findMCS(*molecules, &p);
if (!res.isCompleted())
ereport(WARNING, (errcode(ERRCODE_WARNING),
errmsg("findMCS timed out, result is not maximal")));
mcs = res.SmartsString;
} catch (...) {
ereport(WARNING, (errcode(ERRCODE_WARNING), errmsg("findMCS: failed")));
mcs.clear();
}
// sprintf(t,"findMCS(): MCS='%s'", mcs.c_str());
// elog(WARNING, t);
delete molecules;
// elog(WARNING, "findMCS(): molecules deleted. FINISHED.");
return strdup(mcs.c_str());
}
| 1 | 19,864 | I'm assuming that asSmarts & cxSmiles == asSmarts | rdkit-rdkit | cpp |
@@ -780,7 +780,9 @@ class LabelledData(param.Parameterized):
except:
self.warning("Could not unpickle custom style information.")
self.__dict__.update(d)
-
+ # TODO: super's setstate not called?
+ if "param" not in self.__dict__:
+ self.param = type(self.param)(self.__class__, self=self)
class Dimensioned(LabelledData): | 1 | """
Provides Dimension objects for tracking the properties of a value,
axis or map dimension. Also supplies the Dimensioned abstract
baseclass for classes that accept Dimension values.
"""
from __future__ import unicode_literals
import re
import datetime as dt
from operator import itemgetter
import numpy as np
import param
from ..core.util import (basestring, sanitize_identifier, isfinite,
group_sanitizer, label_sanitizer, max_range,
find_range, dimension_sanitizer, OrderedDict,
bytes_to_unicode, unicode, dt64_to_dt, unique_array,
builtins, config, dimension_range, disable_constant)
from .options import Store, StoreOptions
from .pprint import PrettyPrinter
# Alias parameter support for pickle loading
ALIASES = {'key_dimensions': 'kdims', 'value_dimensions': 'vdims',
'constant_dimensions': 'cdims'}
title_format = "{name}: {val}{unit}"
def param_aliases(d):
"""
Called from __setstate__ in LabelledData in order to load
old pickles with outdated parameter names.
Warning: We want to keep pickle hacking to a minimum!
"""
for old, new in ALIASES.items():
old_param = '_%s_param_value' % old
new_param = '_%s_param_value' % new
if old_param in d:
d[new_param] = d.pop(old_param)
return d
def asdim(dimension):
"""
Converts tuple, dict and basestring types to Dimension and leaves
Dimension types untouched.
"""
if isinstance(dimension, Dimension):
return dimension
elif isinstance(dimension, (tuple, dict, basestring)):
return Dimension(dimension)
else:
raise ValueError('%s type could not be interpreted as Dimension. '
'Dimensions must be declared as a string, tuple, '
'dictionary or Dimension type.')
def dimension_name(dimension):
"""
Looks up the dimension name on a Dimension or Dimension-like object.
"""
if isinstance(dimension, Dimension):
return dimension.name
elif isinstance(dimension, basestring):
return dimension
elif isinstance(dimension, tuple):
return dimension[0]
elif isinstance(dimension, dict):
return dimension['name']
elif dimension is None:
return None
else:
raise ValueError('%s type could not be interpreted as Dimension. '
'Dimensions must be declared as a string, tuple, '
'dictionary or Dimension type.'
% type(dimension).__name__)
def process_dimensions(kdims, vdims):
"""
Processes kdims and vdims specifications into a dictionary
of dimensions which can be passed to params.
"""
dimensions = {}
for group, dims in [('kdims', kdims), ('vdims', vdims)]:
if dims is None:
continue
elif isinstance(dims, (tuple, basestring, Dimension, dict)):
dims = [dims]
elif not isinstance(dims, list):
raise ValueError("%s argument expects a Dimension or list of dimensions, "
"specified as tuples, strings, dictionaries or Dimension "
"instances, not a %s type. Ensure you passed the data as the "
"first argument." % (group, type(dims).__name__))
for dim in dims:
if not isinstance(dim, (tuple, basestring, Dimension, dict)):
raise ValueError('Dimensions must be defined as a tuple, '
'string, dictionary or Dimension instance, '
'found a %s type.' % type(dim).__name__)
dimensions[group] = [asdim(d) for d in dims]
return dimensions
class redim(object):
"""
Utility that supports re-dimensioning any HoloViews object via the
redim method.
"""
def __init__(self, parent, mode=None):
self.parent = parent
# Can be 'dataset', 'dynamic' or None
self.mode = mode
def __str__(self):
return "<holoviews.core.dimension.redim method>"
@classmethod
def replace_dimensions(cls, dimensions, overrides):
"""
Replaces dimensions in a list with a dictionary of overrides.
Overrides should be indexed by the dimension name with values that
is either a Dimension object, a string name or a dictionary
specifying the dimension parameters to override.
"""
replaced = []
for d in dimensions:
if d.name in overrides:
override = overrides[d.name]
elif d.label in overrides:
override = overrides[d.label]
else:
override = None
if override is None:
replaced.append(d)
elif isinstance(override, (basestring, tuple)):
replaced.append(d(override))
elif isinstance(override, Dimension):
replaced.append(override)
elif isinstance(override, dict):
replaced.append(d.clone(override.get('name',None),
**{k:v for k,v in override.items() if k != 'name'}))
else:
raise ValueError('Dimension can only be overridden '
'with another dimension or a dictionary '
'of attributes')
return replaced
def _filter_cache(self, dmap, kdims):
"""
Returns a filtered version of the DynamicMap cache leaving only
keys consistently with the newly specified values
"""
filtered = []
for key, value in dmap.data.items():
if not any(kd.values and v not in kd.values for kd, v in zip(kdims, key)):
filtered.append((key, value))
return filtered
def __call__(self, specs=None, **dimensions):
"""
Replace dimensions on the dataset and allows renaming
dimensions in the dataset. Dimension mapping should map
between the old dimension name and a dictionary of the new
attributes, a completely new dimension or a new string name.
"""
parent = self.parent
redimmed = parent
if parent._deep_indexable and self.mode != 'dataset':
deep_mapped = [(k, v.redim(specs, **dimensions))
for k, v in parent.items()]
redimmed = parent.clone(deep_mapped)
if specs is not None:
if not isinstance(specs, list):
specs = [specs]
matches = any(parent.matches(spec) for spec in specs)
if self.mode != 'dynamic' and not matches:
return redimmed
kdims = self.replace_dimensions(parent.kdims, dimensions)
vdims = self.replace_dimensions(parent.vdims, dimensions)
zipped_dims = zip(parent.kdims+parent.vdims, kdims+vdims)
renames = {pk.name: nk for pk, nk in zipped_dims if pk != nk}
if self.mode == 'dataset':
data = parent.data
if renames:
data = parent.interface.redim(parent, renames)
return parent.clone(data, kdims=kdims, vdims=vdims)
if self.mode != 'dynamic':
return redimmed.clone(kdims=kdims, vdims=vdims)
from ..util import Dynamic
def dynamic_redim(obj, **dynkwargs):
return obj.redim(specs, **dimensions)
dmap = Dynamic(parent, streams=parent.streams, operation=dynamic_redim)
dmap.data = OrderedDict(self._filter_cache(redimmed, kdims))
with disable_constant(dmap):
dmap.kdims = kdims
dmap.vdims = vdims
return dmap
def _redim(self, name, specs, **dims):
dimensions = {k:{name:v} for k,v in dims.items()}
return self(specs, **dimensions)
def cyclic(self, specs=None, **values):
return self._redim('cyclic', specs, **values)
def value_format(self, specs=None, **values):
return self._redim('value_format', specs, **values)
def range(self, specs=None, **values):
return self._redim('range', specs, **values)
def label(self, specs=None, **values):
for k, v in values.items():
dim = self.parent.get_dimension(k)
if dim and dim.name != dim.label and dim.label != v:
raise ValueError('Cannot override an existing Dimension label')
return self._redim('label', specs, **values)
def soft_range(self, specs=None, **values):
return self._redim('soft_range', specs, **values)
def type(self, specs=None, **values):
return self._redim('type', specs, **values)
def step(self, specs=None, **values):
return self._redim('step', specs, **values)
def default(self, specs=None, **values):
return self._redim('default', specs, **values)
def unit(self, specs=None, **values):
return self._redim('unit', specs, **values)
def values(self, specs=None, **ranges):
return self._redim('values', specs, **ranges)
class Dimension(param.Parameterized):
"""
Dimension objects are used to specify some important general
features that may be associated with a collection of values.
For instance, a Dimension may specify that a set of numeric values
actually correspond to 'Height' (dimension name), in units of
meters, with a descriptive label 'Height of adult males'.
All dimensions object have a name that identifies them and a label
containing a suitable description. If the label is not explicitly
specified it matches the name.
These two parameters define the core identity of the dimension
object and must match if two dimension objects are to be considered
equivalent. All other parameters are considered optional metadata
and are not used when testing for equality.
Unlike all the other parameters, these core parameters can be used
to construct a Dimension object from a tuple. This format is
sufficient to define an identical Dimension:
Dimension('a', label='Dimension A') == Dimension(('a', 'Dimension A'))
Everything else about a dimension is considered to reflect
non-semantic preferences. Examples include the default value (which
may be used in a visualization to set an initial slider position),
how the value is to rendered as text (which may be used to specify
the printed floating point precision) or a suitable range of values
to consider for a particular analysis.
Units
-----
Full unit support with automated conversions are on the HoloViews
roadmap. Once rich unit objects are supported, the unit (or more
specifically the type of unit) will be part of the core dimension
specification used to establish equality.
Until this feature is implemented, there are two auxiliary
parameters that hold some partial information about the unit: the
name of the unit and whether or not it is cyclic. The name of the
unit is used as part of the pretty-printed representation and
knowing whether it is cyclic is important for certain operations.
"""
name = param.String(doc="""
Short name associated with the Dimension, such as 'height' or
'weight'. Valid Python identifiers make good names, because they
can be used conveniently as a keyword in many contexts.""")
label = param.String(default=None, doc="""
Unrestricted label used to describe the dimension. A label
should succinctly describe the dimension and may contain any
characters, including Unicode and LaTeX expression.""")
cyclic = param.Boolean(default=False, doc="""
Whether the range of this feature is cyclic such that the
maximum allowed value (defined by the range parameter) is
continuous with the minimum allowed value.""")
value_format = param.Callable(default=None, doc="""
Formatting function applied to each value before display.""")
range = param.Tuple(default=(None, None), doc="""
Specifies the minimum and maximum allowed values for a
Dimension. None is used to represent an unlimited bound.""")
soft_range = param.Tuple(default=(None, None), doc="""
Specifies a minimum and maximum reference value, which
may be overridden by the data.""")
type = param.Parameter(default=None, doc="""
Optional type associated with the Dimension values. The type
may be an inbuilt constructor (such as int, str, float) or a
custom class object.""")
default = param.Parameter(default=None, doc="""
Default value of the Dimension which may be useful for widget
or other situations that require an initial or default value.""")
step = param.Number(default=None, doc="""
Optional floating point step specifying how frequently the
underlying space should be sampled. May be used to define a
discrete sampling over the range.""")
unit = param.String(default=None, allow_None=True, doc="""
Optional unit string associated with the Dimension. For
instance, the string 'm' may be used represent units of meters
and 's' to represent units of seconds.""")
values = param.List(default=[], doc="""
Optional specification of the allowed value set for the
dimension that may also be used to retain a categorical
ordering.""")
# Defines default formatting by type
type_formatters = {}
unit_format = ' ({unit})'
presets = {} # A dictionary-like mapping name, (name,) or
# (name, unit) to a preset Dimension object
def __init__(self, spec, **params):
"""
Initializes the Dimension object with the given name.
"""
if 'name' in params:
raise KeyError('Dimension name must only be passed as the positional argument')
if isinstance(spec, Dimension):
existing_params = dict(spec.get_param_values())
elif (spec, params.get('unit', None)) in self.presets.keys():
preset = self.presets[(str(spec), str(params['unit']))]
existing_params = dict(preset.get_param_values())
elif isinstance(spec, dict):
existing_params = spec
elif spec in self.presets:
existing_params = dict(self.presets[spec].get_param_values())
elif (spec,) in self.presets:
existing_params = dict(self.presets[(spec,)].get_param_values())
else:
existing_params = {}
all_params = dict(existing_params, **params)
if isinstance(spec, tuple):
if not all(isinstance(s, basestring) for s in spec) or len(spec) != 2:
raise ValueError("Dimensions specified as a tuple must be a tuple "
"consisting of the name and label not: %s" % str(spec))
name, label = spec
all_params['name'] = name
all_params['label'] = label
if 'label' in params and (label != params['label']):
if params['label'] != label:
self.warning('Using label as supplied by keyword ({!r}), ignoring '
'tuple value {!r}'.format(params['label'], label))
all_params['label'] = params['label']
elif isinstance(spec, basestring):
all_params['name'] = spec
all_params['label'] = params.get('label', spec)
if all_params['name'] == '':
raise ValueError('Dimension name cannot be the empty string')
if all_params['label'] in ['', None]:
raise ValueError('Dimension label cannot be None or the empty string')
values = params.get('values', [])
if isinstance(values, basestring) and values == 'initial':
self.warning("The 'initial' string for dimension values is no longer supported.")
values = []
all_params['values'] = list(unique_array(values))
super(Dimension, self).__init__(**all_params)
if self.default is not None:
if self.values and self.default not in values:
raise ValueError('%r default %s not found in declared values: %s' %
(self, self.default, self.values))
elif (self.range != (None, None) and
((self.range[0] is not None and self.default < self.range[0]) or
(self.range[0] is not None and self.default > self.range[1]))):
raise ValueError('%r default %s not in declared range: %s' %
(self, self.default, self.range))
@property
def spec(self):
"Returns the corresponding tuple specification"
return (self.name, self.label)
def __call__(self, spec=None, **overrides):
"Aliased to clone method. To be deprecated in 2.0"
return self.clone(spec=spec, **overrides)
def clone(self, spec=None, **overrides):
"""
Derive a new Dimension that inherits existing parameters
except for the supplied, explicit overrides
"""
settings = dict(self.get_param_values(onlychanged=True), **overrides)
if spec is None:
spec = (self.name, overrides.get('label', self.label))
if 'label' in overrides and isinstance(spec, basestring) :
spec = (spec, overrides['label'])
elif 'label' in overrides and isinstance(spec, tuple) :
if overrides['label'] != spec[1]:
self.warning('Using label as supplied by keyword ({!r}), ignoring '
'tuple value {!r}'.format(overrides['label'], spec[1]))
spec = (spec[0], overrides['label'])
return self.__class__(spec, **{k:v for k,v in settings.items()
if k not in ['name', 'label']})
def __hash__(self):
"""
The hash allows Dimension objects to be used as dictionary keys in Python 3.
"""
return hash(self.spec)
def __setstate__(self, d):
"""
Compatibility for pickles before alias attribute was introduced.
"""
super(Dimension, self).__setstate__(d)
self.label = self.name
def __eq__(self, other):
"Implements equals operator including sanitized comparison."
if isinstance(other, Dimension):
return self.spec == other.spec
# For comparison to strings. Name may be sanitized.
return other in [self.name, self.label, dimension_sanitizer(self.name)]
def __ne__(self, other):
"Implements not equal operator including sanitized comparison."
return not self.__eq__(other)
def __lt__(self, other):
"Dimensions are sorted alphanumerically by name"
return self.name < other.name if isinstance(other, Dimension) else self.name < other
def __str__(self):
return self.name
def __repr__(self):
return self.pprint()
@property
def pprint_label(self):
"The pretty-printed label string for the Dimension"
unit = ('' if self.unit is None
else type(self.unit)(self.unit_format).format(unit=self.unit))
return bytes_to_unicode(self.label) + bytes_to_unicode(unit)
def pprint(self):
changed = dict(self.get_param_values(onlychanged=True))
if len(set([changed.get(k, k) for k in ['name','label']])) == 1:
return 'Dimension({spec})'.format(spec=repr(self.name))
ordering = sorted( sorted(changed.keys()),
key=lambda k: (- float('inf')
if self.params(k).precedence is None
else self.params(k).precedence))
kws = ", ".join('%s=%r' % (k, changed[k]) for k in ordering if k != 'name')
return 'Dimension({spec}, {kws})'.format(spec=repr(self.name), kws=kws)
def pprint_value(self, value):
"""
Applies the defined formatting to the value.
"""
own_type = type(value) if self.type is None else self.type
formatter = (self.value_format if self.value_format
else self.type_formatters.get(own_type))
if formatter:
if callable(formatter):
return formatter(value)
elif isinstance(formatter, basestring):
if isinstance(value, (dt.datetime, dt.date)):
return value.strftime(formatter)
elif isinstance(value, np.datetime64):
return dt64_to_dt(value).strftime(formatter)
elif re.findall(r"\{(\w+)\}", formatter):
return formatter.format(value)
else:
return formatter % value
return unicode(bytes_to_unicode(value))
def pprint_value_string(self, value):
"""
Pretty prints the dimension name and value using the global
title_format variable, including the unit string (if
set). Numeric types are printed to the stated rounding level.
"""
unit = '' if self.unit is None else ' ' + bytes_to_unicode(self.unit)
value = self.pprint_value(value)
return title_format.format(name=bytes_to_unicode(self.label), val=value, unit=unit)
class LabelledData(param.Parameterized):
"""
LabelledData is a mix-in class designed to introduce the group and
label parameters (and corresponding methods) to any class
containing data. This class assumes that the core data contents
will be held in the attribute called 'data'.
Used together, group and label are designed to allow a simple and
flexible means of addressing data. For instance, if you are
collecting the heights of people in different demographics, you
could specify the values of your objects as 'Height' and then use
the label to specify the (sub)population.
In this scheme, one object may have the parameters set to
[group='Height', label='Children'] and another may use
[group='Height', label='Adults'].
Note: Another level of specification is implicit in the type (i.e
class) of the LabelledData object. A full specification of a
LabelledData object is therefore given by the tuple
(<type>, <group>, label>). This additional level of specification is
used in the traverse method.
Any strings can be used for the group and label, but it can be
convenient to use a capitalized string of alphanumeric characters,
in which case the keys used for matching in the matches and
traverse method will correspond exactly to {type}.{group}.{label}.
Otherwise the strings provided will be sanitized to be valid
capitalized Python identifiers, which works fine but can sometimes
be confusing.
"""
group = param.String(default='LabelledData', constant=True, doc="""
A string describing the type of data contained by the object.
By default this will typically mirror the class name.""")
label = param.String(default='', constant=True, doc="""
Optional label describing the data, typically reflecting where
or how it was measured. The label should allow a specific
measurement or dataset to be referenced for a given group.""")
_deep_indexable = False
def __init__(self, data, id=None, plot_id=None, **params):
"""
All LabelledData subclasses must supply data to the
constructor, which will be held on the .data attribute.
This class also has an id instance attribute, which
may be set to associate some custom options with the object.
"""
self.data = data
self.id = id
self._plot_id = plot_id or builtins.id(self)
if isinstance(params.get('label',None), tuple):
(alias, long_name) = params['label']
label_sanitizer.add_aliases(**{alias:long_name})
params['label'] = long_name
if isinstance(params.get('group',None), tuple):
(alias, long_name) = params['group']
group_sanitizer.add_aliases(**{alias:long_name})
params['group'] = long_name
super(LabelledData, self).__init__(**params)
if not group_sanitizer.allowable(self.group):
raise ValueError("Supplied group %r contains invalid characters." %
self.group)
elif not label_sanitizer.allowable(self.label):
raise ValueError("Supplied label %r contains invalid characters." %
self.label)
def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides):
"""
Returns a clone of the object with matching parameter values
containing the specified args and kwargs.
If shared_data is set to True and no data explicitly supplied,
the clone will share data with the original. May also supply
a new_type, which will inherit all shared parameters.
"""
params = dict(self.get_param_values())
if new_type is None:
clone_type = self.__class__
else:
clone_type = new_type
new_params = new_type.params()
params = {k: v for k, v in params.items()
if k in new_params}
if params.get('group') == self.params()['group'].default:
params.pop('group')
settings = dict(params, **overrides)
if 'id' not in settings:
settings['id'] = self.id
if data is None and shared_data:
data = self.data
settings['plot_id'] = self._plot_id
# Apply name mangling for __ attribute
pos_args = getattr(self, '_' + type(self).__name__ + '__pos_params', [])
return clone_type(data, *args, **{k:v for k,v in settings.items()
if k not in pos_args})
def relabel(self, label=None, group=None, depth=0):
"""
Assign a new label and/or group to an existing LabelledData
object, creating a clone of the object with the new settings.
"""
new_data = self.data
if (depth > 0) and getattr(self, '_deep_indexable', False):
new_data = []
for k, v in self.data.items():
relabelled = v.relabel(group=group, label=label, depth=depth-1)
new_data.append((k, relabelled))
keywords = [('label', label), ('group', group)]
kwargs = {k: v for k, v in keywords if v is not None}
return self.clone(new_data, **kwargs)
def matches(self, spec):
"""
A specification may be a class, a tuple or a string.
Equivalent to isinstance if a class is supplied, otherwise
matching occurs on type, group and label. These may be supplied
as a tuple of strings or as a single string of the
form "{type}.{group}.{label}". Matching may be done on {type}
alone, {type}.{group}, or {type}.{group}.{label}. The strings
for the type, group, and label will each be sanitized before
the match, and so the sanitized versions of those values will
need to be provided if the match is to succeed.
"""
if callable(spec) and not isinstance(spec, type): return spec(self)
elif isinstance(spec, type): return isinstance(self, spec)
specification = (self.__class__.__name__, self.group, self.label)
split_spec = tuple(spec.split('.')) if not isinstance(spec, tuple) else spec
split_spec, nocompare = zip(*((None, True) if s == '*' or s is None else (s, False)
for s in split_spec))
if all(nocompare): return True
match_fn = itemgetter(*(idx for idx, nc in enumerate(nocompare) if not nc))
self_spec = match_fn(split_spec)
unescaped_match = match_fn(specification[:len(split_spec)]) == self_spec
if unescaped_match: return True
sanitizers = [sanitize_identifier, group_sanitizer, label_sanitizer]
identifier_specification = tuple(fn(ident, escape=False)
for ident, fn in zip(specification, sanitizers))
identifier_match = match_fn(identifier_specification[:len(split_spec)]) == self_spec
return identifier_match
def traverse(self, fn, specs=None, full_breadth=True):
"""
Traverses any nested LabelledData object (i.e LabelledData
objects containing LabelledData objects), applying the
supplied function to each constituent element if the supplied
specifications. The output of these function calls are
collected and returned in the accumulator list.
If specs is None, all constituent elements are
processed. Otherwise, specs must be a list of
type.group.label specs, types, and functions.
"""
accumulator = []
matches = specs is None
if not matches:
for spec in specs:
matches = self.matches(spec)
if matches: break
if matches:
accumulator.append(fn(self))
# Assumes composite objects are iterables
if self._deep_indexable:
for el in self:
if el is None:
continue
accumulator += el.traverse(fn, specs, full_breadth)
if not full_breadth: break
return accumulator
def map(self, map_fn, specs=None, clone=True):
"""
Recursively replaces elements using a map function when the
specification applies.
"""
if specs and not isinstance(specs, list): specs = [specs]
applies = specs is None or any(self.matches(spec) for spec in specs)
if self._deep_indexable:
deep_mapped = self.clone(shared_data=False) if clone else self
for k, v in self.items():
new_val = v.map(map_fn, specs, clone)
if new_val is not None:
deep_mapped[k] = new_val
if applies: deep_mapped = map_fn(deep_mapped)
return deep_mapped
else:
return map_fn(self) if applies else self
def __getstate__(self):
"""
When pickling, make sure to save the relevant style and
plotting options as well.
"""
obj_dict = self.__dict__.copy()
try:
if Store.save_option_state and (obj_dict.get('id', None) is not None):
custom_key = '_custom_option_%d' % obj_dict['id']
if custom_key not in obj_dict:
obj_dict[custom_key] = {backend:s[obj_dict['id']]
for backend,s in Store._custom_options.items()
if obj_dict['id'] in s}
else:
obj_dict['id'] = None
except:
self.warning("Could not pickle custom style information.")
return obj_dict
def __setstate__(self, d):
"""
When unpickled, restore the saved style and plotting options
to ViewableElement.options.
"""
d = param_aliases(d)
try:
load_options = Store.load_counter_offset is not None
if load_options:
matches = [k for k in d if k.startswith('_custom_option')]
for match in matches:
custom_id = int(match.split('_')[-1])
if not isinstance(d[match], dict):
# Backward compatibility before multiple backends
backend_info = {'matplotlib':d[match]}
else:
backend_info = d[match]
for backend, info in backend_info.items():
if backend not in Store._custom_options:
Store._custom_options[backend] = {}
Store._custom_options[backend][Store.load_counter_offset + custom_id] = info
d.pop(match)
if d['id'] is not None:
d['id'] += Store.load_counter_offset
else:
d['id'] = None
except:
self.warning("Could not unpickle custom style information.")
self.__dict__.update(d)
class Dimensioned(LabelledData):
"""
Dimensioned is a base class that allows the data contents of a
class to be associated with dimensions. The contents associated
with dimensions may be partitioned into one of three types
* key dimensions: These are the dimensions that can be indexed via
the __getitem__ method. Dimension objects
supporting key dimensions must support indexing
over these dimensions and may also support
slicing. This list ordering of dimensions
describes the positional components of each
multi-dimensional indexing operation.
For instance, if the key dimension names are
'weight' followed by 'height' for Dimensioned
object 'obj', then obj[80,175] indexes a weight
of 80 and height of 175.
Accessed using either kdims or key_dimensions.
* value dimensions: These dimensions correspond to any data held
on the Dimensioned object not in the key
dimensions. Indexing by value dimension is
supported by dimension name (when there are
multiple possible value dimensions); no
slicing semantics is supported and all the
data associated with that dimension will be
returned at once. Note that it is not possible
to mix value dimensions and deep dimensions.
Accessed using either vdims or value_dimensions.
* deep dimensions: These are dynamically computed dimensions that
belong to other Dimensioned objects that are
nested in the data. Objects that support this
should enable the _deep_indexable flag. Note
that it is not possible to mix value dimensions
and deep dimensions.
Accessed using either ddims or deep_dimensions.
Dimensioned class support generalized methods for finding the
range and type of values along a particular Dimension. The range
method relies on the appropriate implementation of the
dimension_values methods on subclasses.
The index of an arbitrary dimension is its positional index in the
list of all dimensions, starting with the key dimensions, followed
by the value dimensions and ending with the deep dimensions.
"""
cdims = param.Dict(default=OrderedDict(), doc="""
The constant dimensions defined as a dictionary of Dimension:value
pairs providing additional dimension information about the object.
Aliased with constant_dimensions.""")
kdims = param.List(bounds=(0, None), constant=True, doc="""
The key dimensions defined as list of dimensions that may be
used in indexing (and potential slicing) semantics. The order
of the dimensions listed here determines the semantics of each
component of a multi-dimensional indexing operation.
Aliased with key_dimensions.""")
vdims = param.List(bounds=(0, None), constant=True, doc="""
The value dimensions defined as the list of dimensions used to
describe the components of the data. If multiple value
dimensions are supplied, a particular value dimension may be
indexed by name after the key dimensions.
Aliased with value_dimensions.""")
group = param.String(default='Dimensioned', constant=True, doc="""
A string describing the data wrapped by the object.""")
__abstract = True
_dim_groups = ['kdims', 'vdims', 'cdims', 'ddims']
_dim_aliases = dict(key_dimensions='kdims', value_dimensions='vdims',
constant_dimensions='cdims', deep_dimensions='ddims')
def __init__(self, data, kdims=None, vdims=None, **params):
params.update(process_dimensions(kdims, vdims))
if 'cdims' in params:
params['cdims'] = {d if isinstance(d, Dimension) else Dimension(d): val
for d, val in params['cdims'].items()}
super(Dimensioned, self).__init__(data, **params)
self.ndims = len(self.kdims)
cdims = [(d.name, val) for d, val in self.cdims.items()]
self._cached_constants = OrderedDict(cdims)
self._settings = None
self.redim = redim(self)
def _valid_dimensions(self, dimensions):
"""Validates key dimension input
Returns kdims if no dimensions are specified"""
if dimensions is None:
dimensions = self.kdims
elif not isinstance(dimensions, list):
dimensions = [dimensions]
valid_dimensions = []
for dim in dimensions:
if isinstance(dim, Dimension): dim = dim.name
if dim not in self.kdims:
raise Exception("Supplied dimensions %s not found." % dim)
valid_dimensions.append(dim)
return valid_dimensions
@property
def ddims(self):
"The list of deep dimensions"
if self._deep_indexable and self:
return self.values()[0].dimensions()
else:
return []
def dimensions(self, selection='all', label=False):
"""
Provides convenient access to Dimensions on nested
Dimensioned objects. Dimensions can be selected
by their type, i.e. 'key' or 'value' dimensions.
By default 'all' dimensions are returned.
"""
if label in ['name', True]:
label = 'short'
elif label == 'label':
label = 'long'
elif label:
raise ValueError("label needs to be one of True, False, 'name' or 'label'")
lambdas = {'k': (lambda x: x.kdims, {'full_breadth': False}),
'v': (lambda x: x.vdims, {}),
'c': (lambda x: x.cdims, {})}
aliases = {'key': 'k', 'value': 'v', 'constant': 'c'}
if selection in ['all', 'ranges']:
groups = [d for d in self._dim_groups if d != 'cdims']
dims = [dim for group in groups
for dim in getattr(self, group)]
elif isinstance(selection, list):
dims = [dim for group in selection
for dim in getattr(self, '%sdims' % aliases.get(group))]
elif aliases.get(selection) in lambdas:
selection = aliases.get(selection, selection)
lmbd, kwargs = lambdas[selection]
key_traversal = self.traverse(lmbd, **kwargs)
dims = [dim for keydims in key_traversal for dim in keydims]
else:
raise KeyError("Invalid selection %r, valid selections include"
"'all', 'value' and 'key' dimensions" % repr(selection))
return [(dim.label if label == 'long' else dim.name)
if label else dim for dim in dims]
def get_dimension(self, dimension, default=None, strict=False):
"""
Access a Dimension object by name or index.
Returns the default value if the dimension is not found and
strict is False. If strict is True, a KeyError is raised
instead.
"""
if dimension is not None and not isinstance(dimension, (int, basestring, Dimension)):
raise TypeError('Dimension lookup supports int, string, '
'and Dimension instances, cannot lookup '
'Dimensions using %s type.' % type(dimension).__name__)
all_dims = self.dimensions()
if isinstance(dimension, int):
if 0 <= dimension < len(all_dims):
return all_dims[dimension]
elif strict:
raise KeyError("Dimension %r not found" % dimension)
else:
return default
dimension = dimension_name(dimension)
name_map = {dim.name: dim for dim in all_dims}
name_map.update({dim.label: dim for dim in all_dims})
name_map.update({dimension_sanitizer(dim.name): dim for dim in all_dims})
if strict and dimension not in name_map:
raise KeyError("Dimension %r not found." % dimension)
else:
return name_map.get(dimension, default)
def get_dimension_index(self, dim):
"""
Returns the index of the requested dimension.
"""
if isinstance(dim, int):
if (dim < (self.ndims + len(self.vdims)) or
dim < len(self.dimensions())):
return dim
else:
return IndexError('Dimension index out of bounds')
dim = dimension_name(dim)
try:
dimensions = self.kdims+self.vdims
return [i for i, d in enumerate(dimensions) if d == dim][0]
except IndexError:
raise Exception("Dimension %s not found in %s." %
(dim, self.__class__.__name__))
def get_dimension_type(self, dim):
"""
Returns the specified Dimension type if specified or
if the dimension_values types are consistent otherwise
None is returned.
"""
dim_obj = self.get_dimension(dim)
if dim_obj and dim_obj.type is not None:
return dim_obj.type
dim_vals = [type(v) for v in self.dimension_values(dim)]
if len(set(dim_vals)) == 1:
return dim_vals[0]
else:
return None
def __getitem__(self, key):
"""
Multi-dimensional indexing semantics is determined by the list
of key dimensions. For instance, the first indexing component
will index the first key dimension.
After the key dimensions are given, *either* a value dimension
name may follow (if there are multiple value dimensions) *or*
deep dimensions may then be listed (for applicable deep
dimensions).
"""
return self
def select(self, selection_specs=None, **kwargs):
"""
Allows slicing or indexing into the Dimensioned object
by supplying the dimension and index/slice as key
value pairs. Select descends recursively through the
data structure applying the key dimension selection.
The 'value' keyword allows selecting the
value dimensions on objects which have any declared.
The selection may also be selectively applied to
specific objects by supplying the selection_specs
as an iterable of type.group.label specs, types or
functions.
"""
# Apply all indexes applying on this object
vdims = self.vdims+['value'] if self.vdims else []
kdims = self.kdims
local_kwargs = {k: v for k, v in kwargs.items()
if k in kdims+vdims}
# Check selection_spec applies
if selection_specs is not None:
if not isinstance(selection_specs, (list, tuple)):
selection_specs = [selection_specs]
matches = any(self.matches(spec)
for spec in selection_specs)
else:
matches = True
# Apply selection to self
if local_kwargs and matches:
ndims = self.ndims
if any(d in self.vdims for d in kwargs):
ndims = len(self.kdims+self.vdims)
select = [slice(None) for _ in range(ndims)]
for dim, val in local_kwargs.items():
if dim == 'value':
select += [val]
else:
if isinstance(val, tuple): val = slice(*val)
select[self.get_dimension_index(dim)] = val
if self._deep_indexable:
selection = self.get(tuple(select), None)
if selection is None:
selection = self.clone(shared_data=False)
else:
selection = self[tuple(select)]
else:
selection = self
if not isinstance(selection, Dimensioned):
return selection
elif type(selection) is not type(self) and isinstance(selection, Dimensioned):
# Apply the selection on the selected object of a different type
dimensions = selection.dimensions() + ['value']
if any(kw in dimensions for kw in kwargs):
selection = selection.select(selection_specs, **kwargs)
elif isinstance(selection, Dimensioned) and selection._deep_indexable:
# Apply the deep selection on each item in local selection
items = []
for k, v in selection.items():
dimensions = v.dimensions() + ['value']
if any(kw in dimensions for kw in kwargs):
items.append((k, v.select(selection_specs, **kwargs)))
else:
items.append((k, v))
selection = selection.clone(items)
return selection
def dimension_values(self, dimension, expanded=True, flat=True):
"""
Returns the values along the specified dimension. This method
must be implemented for all Dimensioned type.
"""
val = self._cached_constants.get(dimension, None)
if val:
return np.array([val])
else:
raise Exception("Dimension %s not found in %s." %
(dimension, self.__class__.__name__))
def range(self, dimension, data_range=True):
"""
Returns the range of values along the specified dimension.
If data_range is True, the data may be used to try and infer
the appropriate range. Otherwise, (None,None) is returned to
indicate that no range is defined.
"""
dimension = self.get_dimension(dimension)
if dimension is None:
return (None, None)
elif all(isfinite(v) for v in dimension.range):
return dimension.range
elif data_range:
if dimension in self.kdims+self.vdims:
dim_vals = self.dimension_values(dimension.name)
lower, upper = find_range(dim_vals)
else:
dname = dimension.name
match_fn = lambda x: dname in x.kdims + x.vdims
range_fn = lambda x: x.range(dname)
ranges = self.traverse(range_fn, [match_fn])
lower, upper = max_range(ranges)
else:
lower, upper = (np.NaN, np.NaN)
return dimension_range(lower, upper, dimension)
def __repr__(self):
return PrettyPrinter.pprint(self)
def __str__(self):
return repr(self)
def __unicode__(self):
return unicode(PrettyPrinter.pprint(self))
def __call__(self, options=None, **kwargs):
if config.warn_options_call:
self.warning('Use of __call__ to set options will be deprecated '
'in future. Use the equivalent opts method instead.')
return self.opts(options, **kwargs)
def opts(self, options=None, backend=None, clone=True, **kwargs):
"""
Applies options on an object or nested group of objects in a
by options group returning a new object with the options
applied. If the options are to be set directly on the object a
simple format may be used, e.g.:
obj.opts(style={'cmap': 'viridis'}, plot={'show_title': False})
If the object is nested the options must be qualified using
a type[.group][.label] specification, e.g.:
obj.opts({'Image': {'plot': {'show_title': False},
'style': {'cmap': 'viridis}}})
If no opts are supplied all options on the object will be reset.
Disabling clone will modify the object inplace.
"""
backend = backend or Store.current_backend
if isinstance(options, basestring):
from ..util.parser import OptsSpec
try:
options = OptsSpec.parse(options)
except SyntaxError:
options = OptsSpec.parse(
'{clsname} {options}'.format(clsname=self.__class__.__name__,
options=options))
backend_options = Store.options(backend=backend)
groups = set(backend_options.groups.keys())
if kwargs and set(kwargs) <= groups:
if not all(isinstance(v, dict) for v in kwargs.values()):
raise Exception("The %s options must be specified using dictionary groups" %
','.join(repr(k) for k in kwargs.keys()))
# Check whether the user is specifying targets (such as 'Image.Foo')
entries = backend_options.children
targets = [k.split('.')[0] in entries for grp in kwargs.values() for k in grp]
if any(targets) and not all(targets):
raise Exception("Cannot mix target specification keys such as 'Image' with non-target keywords.")
elif not any(targets):
# Not targets specified - add current object as target
sanitized_group = group_sanitizer(self.group)
if self.label:
identifier = ('%s.%s.%s' % (self.__class__.__name__,
sanitized_group,
label_sanitizer(self.label)))
elif sanitized_group != self.__class__.__name__:
identifier = '%s.%s' % (self.__class__.__name__, sanitized_group)
else:
identifier = self.__class__.__name__
kwargs = {k:{identifier:v} for k,v in kwargs.items()}
obj = self
if options is None and kwargs == {}:
if clone:
obj = self.map(lambda x: x.clone(id=None))
else:
self.map(lambda x: setattr(x, 'id', None))
elif clone:
obj = self.map(lambda x: x.clone(id=x.id))
StoreOptions.set_options(obj, options, backend=backend, **kwargs)
return obj
def options(self, options=None, backend=None, clone=True, **kwargs):
"""
Applies options on an object or nested group of objects in a
flat format returning a new object with the options
applied. If the options are to be set directly on the object a
simple format may be used, e.g.:
obj.options(cmap='viridis', show_title=False)
If the object is nested the options must be qualified using
a type[.group][.label] specification, e.g.:
obj.options('Image', cmap='viridis', show_title=False)
or using:
obj.options({'Image': dict(cmap='viridis', show_title=False)})
If no options are supplied all options on the object will be reset.
Disabling clone will modify the object inplace.
"""
if isinstance(options, basestring):
options = {options: kwargs}
elif options and kwargs:
raise ValueError("Options must be defined in one of two formats."
"Either supply keywords defining the options for "
"the current object, e.g. obj.options(cmap='viridis'), "
"or explicitly define the type, e.g."
"obj.options({'Image': {'cmap': 'viridis'}})."
"Supplying both formats is not supported.")
elif kwargs:
options = {type(self).__name__: kwargs}
from ..util import opts
if options is None:
expanded = {}
else:
expanded = opts.expand_options(options, backend)
return self.opts(expanded, backend, clone)
def _repr_mimebundle_(self, include=None, exclude=None):
"""
Resolves the class hierarchy for the class rendering the
object using any display hooks registered on Store.display
hooks. The output of all registered display_hooks is then
combined and returned.
"""
return Store.render(self)
class ViewableElement(Dimensioned):
"""
A ViewableElement is a dimensioned datastructure that may be
associated with a corresponding atomic visualization. An atomic
visualization will display the data on a single set of axes
(i.e. excludes multiple subplots that are displayed at once). The
only new parameter introduced by ViewableElement is the title
associated with the object for display.
"""
__abstract = True
_auxiliary_component = False
group = param.String(default='ViewableElement', constant=True)
| 1 | 21,411 | Seems like an oversight but I don't think the super would do anything else in this case unless I'm mistaken. | holoviz-holoviews | py |
@@ -30,8 +30,8 @@ namespace Examples.Console
internal class InstrumentationWithActivitySource : IDisposable
{
private const string RequestPath = "/api/request";
- private SampleServer server = new SampleServer();
- private SampleClient client = new SampleClient();
+ private readonly SampleServer server = new SampleServer();
+ private readonly SampleClient client = new SampleClient();
public void Start(ushort port = 19999)
{ | 1 | // <copyright file="InstrumentationWithActivitySource.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Globalization;
using System.IO;
using System.Net;
using System.Net.Http;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
namespace Examples.Console
{
internal class InstrumentationWithActivitySource : IDisposable
{
private const string RequestPath = "/api/request";
private SampleServer server = new SampleServer();
private SampleClient client = new SampleClient();
public void Start(ushort port = 19999)
{
var url = $"http://localhost:{port.ToString(CultureInfo.InvariantCulture)}{RequestPath}/";
this.server.Start(url);
this.client.Start(url);
}
public void Dispose()
{
this.client.Dispose();
this.server.Dispose();
}
private class SampleServer : IDisposable
{
private HttpListener listener = new HttpListener();
public void Start(string url)
{
this.listener.Prefixes.Add(url);
this.listener.Start();
Task.Run(() =>
{
using var source = new ActivitySource("Samples.SampleServer");
while (this.listener.IsListening)
{
try
{
var context = this.listener.GetContext();
using var activity = source.StartActivity(
$"{context.Request.HttpMethod}:{context.Request.Url.AbsolutePath}",
ActivityKind.Server);
var headerKeys = context.Request.Headers.AllKeys;
foreach (var headerKey in headerKeys)
{
string headerValue = context.Request.Headers[headerKey];
activity?.SetTag($"http.header.{headerKey}", headerValue);
}
string requestContent;
using (var childSpan = source.StartActivity("ReadStream", ActivityKind.Consumer))
using (var reader = new StreamReader(context.Request.InputStream, context.Request.ContentEncoding))
{
requestContent = reader.ReadToEnd();
childSpan.AddEvent(new ActivityEvent("StreamReader.ReadToEnd"));
}
activity?.SetTag("request.content", requestContent);
activity?.SetTag("request.length", requestContent.Length.ToString());
var echo = Encoding.UTF8.GetBytes("echo: " + requestContent);
context.Response.ContentEncoding = Encoding.UTF8;
context.Response.ContentLength64 = echo.Length;
context.Response.OutputStream.Write(echo, 0, echo.Length);
context.Response.Close();
}
catch (Exception)
{
// expected when closing the listener.
}
}
});
}
public void Dispose()
{
((IDisposable)this.listener).Dispose();
}
}
private class SampleClient : IDisposable
{
private CancellationTokenSource cts;
private Task requestTask;
public void Start(string url)
{
this.cts = new CancellationTokenSource();
var cancellationToken = this.cts.Token;
this.requestTask = Task.Run(
async () =>
{
using var source = new ActivitySource("Samples.SampleClient");
using var client = new HttpClient();
var count = 1;
while (!cancellationToken.IsCancellationRequested)
{
var content = new StringContent($"client message: {DateTime.Now}", Encoding.UTF8);
using (var activity = source.StartActivity("POST:" + RequestPath, ActivityKind.Client))
{
count++;
activity?.AddEvent(new ActivityEvent("PostAsync:Started"));
using var response = await client.PostAsync(url, content, cancellationToken).ConfigureAwait(false);
activity?.AddEvent(new ActivityEvent("PostAsync:Ended"));
activity?.SetTag("http.status_code", (int)response.StatusCode);
var responseContent = await response.Content.ReadAsStringAsync();
activity?.SetTag("response.content", responseContent);
activity?.SetTag("response.length", responseContent.Length.ToString(CultureInfo.InvariantCulture));
foreach (var header in response.Headers)
{
if (header.Value is IEnumerable<object> enumerable)
{
activity?.SetTag($"http.header.{header.Key}", string.Join(",", enumerable));
}
else
{
activity?.SetTag($"http.header.{header.Key}", header.Value.ToString());
}
}
}
try
{
await Task.Delay(TimeSpan.FromSeconds(1), cancellationToken).ConfigureAwait(false);
}
catch (TaskCanceledException)
{
return;
}
}
},
cancellationToken);
}
public void Dispose()
{
if (this.cts != null)
{
this.cts.Cancel();
this.requestTask.Wait();
this.requestTask.Dispose();
this.cts.Dispose();
}
}
}
}
}
| 1 | 20,460 | Curious, do we consider this as pedantic for example code? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -609,7 +609,7 @@ class AdminController extends Controller
$formType = $this->useLegacyFormComponent() ? 'easyadmin' : 'JavierEguiluz\\Bundle\\EasyAdminBundle\\Form\\Type\\EasyAdminFormType';
- return $this->get('form.factory')->createNamedBuilder('form', $formType, $entity, $formOptions);
+ return $this->get('form.factory')->createNamedBuilder(strtolower($this->entity['name']), $formType, $entity, $formOptions);
}
/** | 1 | <?php
/*
* This file is part of the EasyAdminBundle.
*
* (c) Javier Eguiluz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace JavierEguiluz\Bundle\EasyAdminBundle\Controller;
use Doctrine\DBAL\Platforms\PostgreSqlPlatform;
use Doctrine\ORM\EntityManager;
use Doctrine\ORM\QueryBuilder;
use JavierEguiluz\Bundle\EasyAdminBundle\Event\EasyAdminEvents;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\ForbiddenActionException;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\NoEntitiesConfiguredException;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\UndefinedEntityException;
use Pagerfanta\Adapter\DoctrineORMAdapter;
use Pagerfanta\Pagerfanta;
use Sensio\Bundle\FrameworkExtraBundle\Configuration\Route;
use Symfony\Bundle\FrameworkBundle\Controller\Controller;
use Symfony\Component\EventDispatcher\GenericEvent;
use Symfony\Component\Form\Form;
use Symfony\Component\Form\FormBuilder;
use Symfony\Component\Form\FormBuilderInterface;
use Symfony\Component\Form\FormInterface;
use Symfony\Component\HttpFoundation\RedirectResponse;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpFoundation\Response;
/**
* The controller used to render all the default EasyAdmin actions.
*
* @author Javier Eguiluz <[email protected]>
*/
class AdminController extends Controller
{
protected $config;
protected $entity = array();
/** @var Request */
protected $request;
/** @var EntityManager */
protected $em;
/**
* @Route("/", name="easyadmin")
* @Route("/", name="admin")
*
* The 'admin' route is deprecated since version 1.8.0 and it will be removed in 2.0.
*
* @param Request $request
*
* @return RedirectResponse|Response
*/
public function indexAction(Request $request)
{
$this->initialize($request);
if (null === $request->query->get('entity')) {
return $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->config['default_entity_name'])));
}
$action = $request->query->get('action', 'list');
if (!$this->isActionAllowed($action)) {
throw new ForbiddenActionException(array('action' => $action, 'entity' => $this->entity['name']));
}
return $this->executeDynamicMethod($action.'<EntityName>Action');
}
/**
* It renders the main CSS applied to the backend design. This controller
* allows to generate dynamic CSS files that use variables without the need
* to set up a CSS preprocessing toolchain.
*
* @Route("/_css/admin.css", name="_easyadmin_render_css")
*
* @return Response
*/
public function renderCssAction()
{
$config = $this->container->getParameter('easyadmin.config');
$cssContent = $this->renderView('@EasyAdmin/css/admin.css.twig', array(
'brand_color' => $config['design']['brand_color'],
'color_scheme' => $config['design']['color_scheme'],
));
return Response::create($cssContent, 200, array('Content-Type' => 'text/css'))
->setPublic()
->setSharedMaxAge(600)
;
}
/**
* Utility method which initializes the configuration of the entity on which
* the user is performing the action.
*
* @param Request $request
*/
protected function initialize(Request $request)
{
$this->dispatch(EasyAdminEvents::PRE_INITIALIZE);
$this->config = $this->container->getParameter('easyadmin.config');
if (0 === count($this->config['entities'])) {
throw new NoEntitiesConfiguredException();
}
// this condition happens when accessing the backend homepage, which
// then redirects to the 'list' action of the first configured entity
if (null === $entityName = $request->query->get('entity')) {
return;
}
if (!array_key_exists($entityName, $this->config['entities'])) {
throw new UndefinedEntityException(array('entity_name' => $entityName));
}
$this->entity = $this->get('easyadmin.configurator')->getEntityConfiguration($entityName);
if (!$request->query->has('sortField')) {
$request->query->set('sortField', $this->entity['primary_key_field_name']);
}
if (!$request->query->has('sortDirection') || !in_array(strtoupper($request->query->get('sortDirection')), array('ASC', 'DESC'))) {
$request->query->set('sortDirection', 'DESC');
}
$this->em = $this->getDoctrine()->getManagerForClass($this->entity['class']);
$this->request = $request;
$this->dispatch(EasyAdminEvents::POST_INITIALIZE);
}
protected function dispatch($eventName, array $arguments = array())
{
$arguments = array_replace(array(
'config' => $this->config,
'em' => $this->em,
'entity' => $this->entity,
'request' => $this->request,
), $arguments);
$subject = isset($arguments['paginator']) ? $arguments['paginator'] : $arguments['entity'];
$event = new GenericEvent($subject, $arguments);
$this->get('event_dispatcher')->dispatch($eventName, $event);
}
/**
* The method that is executed when the user performs a 'list' action on an entity.
*
* @return Response
*/
protected function listAction()
{
$this->dispatch(EasyAdminEvents::PRE_LIST);
$fields = $this->entity['list']['fields'];
$paginator = $this->findAll($this->entity['class'], $this->request->query->get('page', 1), $this->config['list']['max_results'], $this->request->query->get('sortField'), $this->request->query->get('sortDirection'));
$this->dispatch(EasyAdminEvents::POST_LIST, array('paginator' => $paginator));
return $this->render($this->entity['templates']['list'], array(
'paginator' => $paginator,
'fields' => $fields,
'delete_form_template' => $this->createDeleteForm($this->entity['name'], '__id__')->createView(),
));
}
/**
* The method that is executed when the user performs a 'edit' action on an entity.
*
* @return RedirectResponse|Response
*/
protected function editAction()
{
$this->dispatch(EasyAdminEvents::PRE_EDIT);
$id = $this->request->query->get('id');
$easyadmin = $this->request->attributes->get('easyadmin');
$entity = $easyadmin['item'];
if ($this->request->isXmlHttpRequest() && $property = $this->request->query->get('property')) {
$newValue = 'true' === strtolower($this->request->query->get('newValue'));
$fieldsMetadata = $this->entity['list']['fields'];
if (!isset($fieldsMetadata[$property]) || 'toggle' != $fieldsMetadata[$property]['dataType']) {
throw new \Exception(sprintf('The type of the "%s" property is not "toggle".', $property));
}
$this->updateEntityProperty($entity, $property, $newValue);
return new Response((string) $newValue);
}
$fields = $this->entity['edit']['fields'];
$editForm = $this->executeDynamicMethod('create<EntityName>EditForm', array($entity, $fields));
$deleteForm = $this->createDeleteForm($this->entity['name'], $id);
$editForm->handleRequest($this->request);
if ($editForm->isValid()) {
$this->dispatch(EasyAdminEvents::PRE_UPDATE, array('entity' => $entity));
$this->executeDynamicMethod('preUpdate<EntityName>Entity', array($entity));
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_UPDATE, array('entity' => $entity));
$refererUrl = $this->request->query->get('referer', '');
return !empty($refererUrl)
? $this->redirect(urldecode($refererUrl))
: $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
$this->dispatch(EasyAdminEvents::POST_EDIT);
return $this->render($this->entity['templates']['edit'], array(
'form' => $editForm->createView(),
'entity_fields' => $fields,
'entity' => $entity,
'delete_form' => $deleteForm->createView(),
));
}
/**
* The method that is executed when the user performs a 'show' action on an entity.
*
* @return Response
*/
protected function showAction()
{
$this->dispatch(EasyAdminEvents::PRE_SHOW);
$id = $this->request->query->get('id');
$easyadmin = $this->request->attributes->get('easyadmin');
$entity = $easyadmin['item'];
$fields = $this->entity['show']['fields'];
$deleteForm = $this->createDeleteForm($this->entity['name'], $id);
$this->dispatch(EasyAdminEvents::POST_SHOW, array(
'deleteForm' => $deleteForm,
'fields' => $fields,
'entity' => $entity,
));
return $this->render($this->entity['templates']['show'], array(
'entity' => $entity,
'fields' => $fields,
'delete_form' => $deleteForm->createView(),
));
}
/**
* The method that is executed when the user performs a 'new' action on an entity.
*
* @return RedirectResponse|Response
*/
protected function newAction()
{
$this->dispatch(EasyAdminEvents::PRE_NEW);
$entity = $this->executeDynamicMethod('createNew<EntityName>Entity');
$easyadmin = $this->request->attributes->get('easyadmin');
$easyadmin['item'] = $entity;
$this->request->attributes->set('easyadmin', $easyadmin);
$fields = $this->entity['new']['fields'];
$newForm = $this->executeDynamicMethod('create<EntityName>NewForm', array($entity, $fields));
$newForm->handleRequest($this->request);
if ($newForm->isValid()) {
$this->dispatch(EasyAdminEvents::PRE_PERSIST, array('entity' => $entity));
$this->executeDynamicMethod('prePersist<EntityName>Entity', array($entity));
$this->em->persist($entity);
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_PERSIST, array('entity' => $entity));
return $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
$this->dispatch(EasyAdminEvents::POST_NEW, array(
'entity_fields' => $fields,
'form' => $newForm,
'entity' => $entity,
));
return $this->render($this->entity['templates']['new'], array(
'form' => $newForm->createView(),
'entity_fields' => $fields,
'entity' => $entity,
));
}
/**
* The method that is executed when the user performs a 'delete' action to
* remove any entity.
*
* @return RedirectResponse
*/
protected function deleteAction()
{
$this->dispatch(EasyAdminEvents::PRE_DELETE);
if ('DELETE' !== $this->request->getMethod()) {
return $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
$id = $this->request->query->get('id');
$form = $this->createDeleteForm($this->entity['name'], $id);
$form->handleRequest($this->request);
if ($form->isValid()) {
$easyadmin = $this->request->attributes->get('easyadmin');
$entity = $easyadmin['item'];
$this->dispatch(EasyAdminEvents::PRE_REMOVE, array('entity' => $entity));
$this->executeDynamicMethod('preRemove<EntityName>Entity', array($entity));
$this->em->remove($entity);
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_REMOVE, array('entity' => $entity));
}
$refererUrl = $this->request->query->get('referer', '');
$this->dispatch(EasyAdminEvents::POST_DELETE);
return !empty($refererUrl)
? $this->redirect(urldecode($refererUrl))
: $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
/**
* The method that is executed when the user performs a query on an entity.
*
* @return Response
*/
protected function searchAction()
{
$this->dispatch(EasyAdminEvents::PRE_SEARCH);
$searchableFields = $this->entity['search']['fields'];
$paginator = $this->findBy($this->entity['class'], $this->request->query->get('query'), $searchableFields, $this->request->query->get('page', 1), $this->config['list']['max_results']);
$fields = $this->entity['list']['fields'];
$this->dispatch(EasyAdminEvents::POST_SEARCH, array(
'fields' => $fields,
'paginator' => $paginator,
));
return $this->render($this->entity['templates']['list'], array(
'paginator' => $paginator,
'fields' => $fields,
'delete_form_template' => $this->createDeleteForm($this->entity['name'], '__id__')->createView(),
));
}
/**
* It updates the value of some property of some entity to the new given value.
*
* @param mixed $entity The instance of the entity to modify
* @param string $property The name of the property to change
* @param bool $value The new value of the property
*/
private function updateEntityProperty($entity, $property, $value)
{
$entityConfig = $this->entity;
// the method_exists() check is needed because Symfony 2.3 doesn't have isWritable() method
if (method_exists($this->get('property_accessor'), 'isWritable') && !$this->get('property_accessor')->isWritable($entity, $property)) {
throw new \Exception(sprintf('The "%s" property of the "%s" entity is not writable.', $property, $entityConfig['name']));
}
$this->dispatch(EasyAdminEvents::PRE_UPDATE, array('entity' => $entity, 'newValue' => $value));
$this->get('property_accessor')->setValue($entity, $property, $value);
$this->em->persist($entity);
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_UPDATE, array('entity' => $entity, 'newValue' => $value));
$this->dispatch(EasyAdminEvents::POST_EDIT);
}
/**
* Creates a new object of the current managed entity.
* This method is mostly here for override convenience, because it allows
* the user to use his own method to customize the entity instantiation.
*
* @return object
*/
protected function createNewEntity()
{
$entityFullyQualifiedClassName = $this->entity['class'];
return new $entityFullyQualifiedClassName();
}
/**
* Allows applications to modify the entity associated with the item being
* created before persisting it.
*
* @param object $entity
*/
protected function prePersistEntity($entity)
{
}
/**
* Allows applications to modify the entity associated with the item being
* edited before persisting it.
*
* @param object $entity
*/
protected function preUpdateEntity($entity)
{
}
/**
* Allows applications to modify the entity associated with the item being
* deleted before removing it.
*
* @param object $entity
*/
protected function preRemoveEntity($entity)
{
}
/**
* Performs a database query to get all the records related to the given
* entity. It supports pagination and field sorting.
*
* @param string $entityClass
* @param int $page
* @param int $maxPerPage
* @param string|null $sortField
* @param string|null $sortDirection
*
* @return Pagerfanta The paginated query results
*/
protected function findAll($entityClass, $page = 1, $maxPerPage = 15, $sortField = null, $sortDirection = null)
{
if (empty($sortDirection) || !in_array(strtoupper($sortDirection), array('ASC', 'DESC'))) {
$sortDirection = 'DESC';
}
$queryBuilder = $this->executeDynamicMethod('create<EntityName>ListQueryBuilder', array($entityClass, $sortDirection, $sortField));
$this->dispatch(EasyAdminEvents::POST_LIST_QUERY_BUILDER, array(
'query_builder' => $queryBuilder,
'sort_field' => $sortField,
'sort_direction' => $sortDirection,
));
$paginator = new Pagerfanta(new DoctrineORMAdapter($queryBuilder, false, false));
$paginator->setMaxPerPage($maxPerPage);
$paginator->setCurrentPage($page);
return $paginator;
}
/**
* Creates Query Builder instance for all the records.
*
* @param string $entityClass
* @param string $sortDirection
* @param string|null $sortField
*
* @return QueryBuilder The Query Builder instance
*/
protected function createListQueryBuilder($entityClass, $sortDirection, $sortField = null)
{
$queryBuilder = $this->em->createQueryBuilder()->select('entity')->from($entityClass, 'entity');
if (null !== $sortField) {
$queryBuilder->orderBy('entity.'.$sortField, $sortDirection);
}
return $queryBuilder;
}
/**
* Performs a database query based on the search query provided by the user.
* It supports pagination and field sorting.
*
* @param string $entityClass
* @param string $searchQuery
* @param array $searchableFields
* @param int $page
* @param int $maxPerPage
*
* @return Pagerfanta The paginated query results
*/
protected function findBy($entityClass, $searchQuery, array $searchableFields, $page = 1, $maxPerPage = 15)
{
$queryBuilder = $this->executeDynamicMethod('create<EntityName>SearchQueryBuilder', array($entityClass, $searchQuery, $searchableFields));
$this->dispatch(EasyAdminEvents::POST_SEARCH_QUERY_BUILDER, array(
'query_builder' => $queryBuilder,
'search_query' => $searchQuery,
'searchable_fields' => $searchableFields,
));
$paginator = new Pagerfanta(new DoctrineORMAdapter($queryBuilder, false, false));
$paginator->setMaxPerPage($maxPerPage);
$paginator->setCurrentPage($page);
return $paginator;
}
/**
* Creates Query Builder instance for search query.
*
* @param string $entityClass
* @param string $searchQuery
* @param array $searchableFields
*
* @return QueryBuilder The Query Builder instance
*/
protected function createSearchQueryBuilder($entityClass, $searchQuery, array $searchableFields)
{
$databaseIsPostgreSql = $this->isPostgreSqlUsedByEntity($entityClass);
$queryBuilder = $this->em->createQueryBuilder()->select('entity')->from($entityClass, 'entity');
$queryConditions = $queryBuilder->expr()->orX();
$queryParameters = array();
foreach ($searchableFields as $name => $metadata) {
$isNumericField = in_array($metadata['dataType'], array('integer', 'number', 'smallint', 'bigint', 'decimal', 'float'));
$isTextField = in_array($metadata['dataType'], array('string', 'text', 'guid'));
if (is_numeric($searchQuery) && $isNumericField) {
$queryConditions->add(sprintf('entity.%s = :exact_query', $name));
$queryParameters['exact_query'] = 0 + $searchQuery; // adding '0' turns the string into a numeric value
} elseif ($isTextField) {
$queryConditions->add(sprintf('entity.%s LIKE :fuzzy_query', $name));
$queryParameters['fuzzy_query'] = '%'.$searchQuery.'%';
} else {
// PostgreSQL doesn't allow to compare string values with non-string columns (e.g. 'id')
if ($databaseIsPostgreSql) {
continue;
}
$queryConditions->add(sprintf('entity.%s IN (:words)', $name));
$queryParameters['words'] = explode(' ', $searchQuery);
}
}
$queryBuilder->add('where', $queryConditions)->setParameters($queryParameters);
return $queryBuilder;
}
/**
* Creates the form used to edit an entity.
*
* @param object $entity
* @param array $entityProperties
*
* @return Form
*/
protected function createEditForm($entity, array $entityProperties)
{
return $this->createEntityForm($entity, $entityProperties, 'edit');
}
/**
* Creates the form used to create an entity.
*
* @param object $entity
* @param array $entityProperties
*
* @return Form
*/
protected function createNewForm($entity, array $entityProperties)
{
return $this->createEntityForm($entity, $entityProperties, 'new');
}
/**
* Creates the form builder of the form used to create or edit the given entity.
*
* @param object $entity
* @param string $view The name of the view where this form is used ('new' or 'edit')
*
* @return FormBuilder
*/
protected function createEntityFormBuilder($entity, $view)
{
$formOptions = $this->executeDynamicMethod('get<EntityName>EntityFormOptions', array($entity, $view));
$formType = $this->useLegacyFormComponent() ? 'easyadmin' : 'JavierEguiluz\\Bundle\\EasyAdminBundle\\Form\\Type\\EasyAdminFormType';
return $this->get('form.factory')->createNamedBuilder('form', $formType, $entity, $formOptions);
}
/**
* Retrieves the list of form options before sending them to the form builder.
* This allows adding dynamic logic to the default form options.
*
* @param object $entity
* @param string $view
*
* @return array
*/
protected function getEntityFormOptions($entity, $view)
{
$formOptions = $this->entity[$view]['form_options'];
$formOptions['entity'] = $this->entity['name'];
$formOptions['view'] = $view;
return $formOptions;
}
/**
* Creates the form object used to create or edit the given entity.
*
* @param object $entity
* @param array $entityProperties
* @param string $view
*
* @return Form
*
* @throws \Exception
*/
protected function createEntityForm($entity, array $entityProperties, $view)
{
if (method_exists($this, $customMethodName = 'create'.$this->entity['name'].'EntityForm')) {
$form = $this->{$customMethodName}($entity, $entityProperties, $view);
if (!$form instanceof FormInterface) {
throw new \Exception(sprintf(
'The "%s" method must return a FormInterface, "%s" given.',
$customMethodName, is_object($form) ? get_class($form) : gettype($form)
));
}
return $form;
}
$formBuilder = $this->executeDynamicMethod('create<EntityName>EntityFormBuilder', array($entity, $view));
if (!$formBuilder instanceof FormBuilderInterface) {
throw new \Exception(sprintf(
'The "%s" method must return a FormBuilderInterface, "%s" given.',
'createEntityForm', is_object($formBuilder) ? get_class($formBuilder) : gettype($formBuilder)
));
}
return $formBuilder->getForm();
}
/**
* Creates the form used to delete an entity. It must be a form because
* the deletion of the entity are always performed with the 'DELETE' HTTP method,
* which requires a form to work in the current browsers.
*
* @param string $entityName
* @param int $entityId
*
* @return Form
*/
protected function createDeleteForm($entityName, $entityId)
{
/** @var FormBuilder $formBuilder */
$formBuilder = $this->get('form.factory')->createNamedBuilder('delete_form')
->setAction($this->generateUrl('easyadmin', array('action' => 'delete', 'entity' => $entityName, 'id' => $entityId)))
->setMethod('DELETE')
;
$submitButtonType = $this->useLegacyFormComponent() ? 'submit' : 'Symfony\\Component\\Form\\Extension\\Core\\Type\\SubmitType';
$formBuilder->add('submit', $submitButtonType, array('label' => 'Delete'));
return $formBuilder->getForm();
}
/**
* Utility method that checks if the given action is allowed for
* the current entity.
*
* @param string $actionName
*
* @return bool
*/
protected function isActionAllowed($actionName)
{
return false === in_array($actionName, $this->entity['disabled_actions'], true);
}
/**
* Utility shortcut to render an error when the requested action is not allowed
* for the given entity.
*
* @param string $action
*
* @deprecated Use the ForbiddenException instead of this method.
*
* @return Response
*/
protected function renderForbiddenActionError($action)
{
return $this->render('@EasyAdmin/error/forbidden_action.html.twig', array('action' => $action), new Response('', 403));
}
/**
* Returns true if the data of the given entity are stored in a database
* of Type PostgreSQL.
*
* @param string $entityClass
*
* @return bool
*/
private function isPostgreSqlUsedByEntity($entityClass)
{
$em = $this->get('doctrine')->getManagerForClass($entityClass);
return $em->getConnection()->getDatabasePlatform() instanceof PostgreSqlPlatform;
}
/**
* Given a method name pattern, it looks for the customized version of that
* method (based on the entity name) and executes it. If the custom method
* does not exist, it executes the regular method.
*
* For example:
* executeDynamicMethod('create<EntityName>Entity') and the entity name is 'User'
* if 'createUserEntity()' exists, execute it; otherwise execute 'createEntity()'
*
* @param string $methodNamePattern The pattern of the method name (dynamic parts are enclosed with <> angle brackets)
* @param array $arguments The arguments passed to the executed method
*
* @return mixed
*/
private function executeDynamicMethod($methodNamePattern, array $arguments = array())
{
$methodName = str_replace('<EntityName>', $this->entity['name'], $methodNamePattern);
if (!is_callable(array($this, $methodName))) {
$methodName = str_replace('<EntityName>', '', $methodNamePattern);
}
return call_user_func_array(array($this, $methodName), $arguments);
}
/**
* Returns true if the legacy Form component is being used by the application.
*
* @return bool
*/
private function useLegacyFormComponent()
{
return false === class_exists('Symfony\\Component\\Form\\Util\\StringUtil');
}
}
| 1 | 9,846 | Should we convert the entity name to snake case using `Doctrine\Common\Inflector\Inflector::tableize` or similar ? Should a `snaked_name` or whatever be part of the entity metadata in order to use it for form names, ids, & other html attributes ? | EasyCorp-EasyAdminBundle | php |
@@ -12,9 +12,15 @@ func NewClientFake(IP string) Client {
type clientFake struct {
ip string
+ outboundIp string
}
-func (client *clientFake) GetIp() (string, error) {
+func (client *clientFake) GetPublicIP() (string, error) {
+ log.Info(IPIFY_API_LOG_PREFIX, "IP faked: ", client.ip)
+ return client.ip, nil
+}
+
+func (client *clientFake) GetOutboundIP() (string, error) {
log.Info(IPIFY_API_LOG_PREFIX, "IP faked: ", client.ip)
return client.ip, nil
} | 1 | package ipify
import (
log "github.com/cihub/seelog"
)
func NewClientFake(IP string) Client {
return &clientFake{
ip: IP,
}
}
type clientFake struct {
ip string
}
func (client *clientFake) GetIp() (string, error) {
log.Info(IPIFY_API_LOG_PREFIX, "IP faked: ", client.ip)
return client.ip, nil
}
| 1 | 9,838 | Should be `client.outboundIp` | mysteriumnetwork-node | go |
@@ -31,6 +31,9 @@
// THE POSSIBILITY OF SUCH DAMAGE.
//
+using System.IO;
+using System.Threading.Tasks;
+
namespace NLog.UnitTests.LayoutRenderers
{
using System; | 1 | //
// Copyright (c) 2004-2011 Jaroslaw Kowalski <[email protected]>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
namespace NLog.UnitTests.LayoutRenderers
{
using System;
using System.Reflection;
using System.Threading;
using Xunit;
public class CallSiteTests : NLogTestBase
{
#if !SILVERLIGHT
[Fact]
public void HiddenAssemblyTest()
{
const string code = @"
namespace Foo
{
public class HiddenAssemblyLogger
{
public void LogDebug(NLog.Logger logger)
{
logger.Debug(""msg"");
}
}
}
";
var provider = new Microsoft.CSharp.CSharpCodeProvider();
var parameters = new System.CodeDom.Compiler.CompilerParameters();
// reference the NLog dll
parameters.ReferencedAssemblies.Add("NLog.dll");
// the assembly should be generated in memory
parameters.GenerateInMemory = true;
// generate a dll instead of an executable
parameters.GenerateExecutable = false;
// compile code and generate assembly
System.CodeDom.Compiler.CompilerResults results = provider.CompileAssemblyFromSource(parameters, code);
Assert.False(results.Errors.HasErrors);
// create nlog configuration
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
// create logger
Logger logger = LogManager.GetLogger("A");
// load HiddenAssemblyLogger type
Assembly compiledAssembly = results.CompiledAssembly;
Type hiddenAssemblyLoggerType = compiledAssembly.GetType("Foo.HiddenAssemblyLogger");
Assert.NotNull(hiddenAssemblyLoggerType);
// load methodinfo
MethodInfo logDebugMethod = hiddenAssemblyLoggerType.GetMethod("LogDebug");
Assert.NotNull(logDebugMethod);
// instantiate the HiddenAssemblyLogger from previously generated assembly
object instance = Activator.CreateInstance(hiddenAssemblyLoggerType);
// Add the previously generated assembly to the "blacklist"
LogManager.AddHiddenAssembly(compiledAssembly);
// call the log method
logDebugMethod.Invoke(instance, new object[] { logger });
MethodBase currentMethod = MethodBase.GetCurrentMethod();
AssertDebugLastMessage("debug", currentMethod.DeclaringType.FullName + "." + currentMethod.Name + " msg");
}
#endif
#if !SILVERLIGHT
#if MONO
[Fact(Skip="Not working under MONO - not sure if unit test is wrong, or the code")]
#else
[Fact]
#endif
public void LineNumberTest()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:filename=true} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
#if !NET4_5 && !MONO
#line 100000
#endif
logger.Debug("msg");
var linenumber = GetPrevLineNumber();
string lastMessage = GetDebugLastMessage("debug");
// There's a difference in handling line numbers between .NET and Mono
// We're just interested in checking if it's above 100000
Assert.True(lastMessage.IndexOf("callsitetests.cs:" + linenumber, StringComparison.OrdinalIgnoreCase) >= 0, "Invalid line number. Expected prefix of 10000, got: " + lastMessage);
#if !NET4_5 && !MONO
#line default
#endif
}
#endif
[Fact]
public void MethodNameTest()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
logger.Debug("msg");
MethodBase currentMethod = MethodBase.GetCurrentMethod();
AssertDebugLastMessage("debug", currentMethod.DeclaringType.FullName + "." + currentMethod.Name + " msg");
}
[Fact]
public void ClassNameTest()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:classname=true:methodname=false} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
logger.Debug("msg");
MethodBase currentMethod = MethodBase.GetCurrentMethod();
AssertDebugLastMessage("debug", currentMethod.DeclaringType.FullName + " msg");
}
[Fact]
public void ClassNameWithPaddingTestPadLeftAlignLeftTest()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:classname=true:methodname=false:padding=3:fixedlength=true} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
logger.Debug("msg");
MethodBase currentMethod = MethodBase.GetCurrentMethod();
AssertDebugLastMessage("debug", currentMethod.DeclaringType.FullName.Substring(0, 3) + " msg");
}
[Fact]
public void ClassNameWithPaddingTestPadLeftAlignRightTest()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:classname=true:methodname=false:padding=3:fixedlength=true:alignmentOnTruncation=right} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
logger.Debug("msg");
MethodBase currentMethod = MethodBase.GetCurrentMethod();
var typeName = currentMethod.DeclaringType.FullName;
AssertDebugLastMessage("debug", typeName.Substring(typeName.Length - 3) + " msg");
}
[Fact]
public void ClassNameWithPaddingTestPadRightAlignLeftTest()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:classname=true:methodname=false:padding=-3:fixedlength=true:alignmentOnTruncation=left} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
logger.Debug("msg");
MethodBase currentMethod = MethodBase.GetCurrentMethod();
AssertDebugLastMessage("debug", currentMethod.DeclaringType.FullName.Substring(0, 3) + " msg");
}
[Fact]
public void ClassNameWithPaddingTestPadRightAlignRightTest()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:classname=true:methodname=false:padding=-3:fixedlength=true:alignmentOnTruncation=right} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
logger.Debug("msg");
MethodBase currentMethod = MethodBase.GetCurrentMethod();
var typeName = currentMethod.DeclaringType.FullName;
AssertDebugLastMessage("debug", typeName.Substring(typeName.Length - 3) + " msg");
}
[Fact]
public void MethodNameWithPaddingTestPadLeftAlignLeftTest()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:classname=false:methodname=true:padding=16:fixedlength=true} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
logger.Debug("msg");
AssertDebugLastMessage("debug", "MethodNameWithPa msg");
}
[Fact]
public void MethodNameWithPaddingTestPadLeftAlignRightTest()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:classname=false:methodname=true:padding=16:fixedlength=true:alignmentOnTruncation=right} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
logger.Debug("msg");
AssertDebugLastMessage("debug", "ftAlignRightTest msg");
}
[Fact]
public void MethodNameWithPaddingTestPadRightAlignLeftTest()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:classname=false:methodname=true:padding=-16:fixedlength=true:alignmentOnTruncation=left} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
logger.Debug("msg");
AssertDebugLastMessage("debug", "MethodNameWithPa msg");
}
[Fact]
public void MethodNameWithPaddingTestPadRightAlignRightTest()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:classname=false:methodname=true:padding=-16:fixedlength=true:alignmentOnTruncation=right} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
logger.Debug("msg");
AssertDebugLastMessage("debug", "htAlignRightTest msg");
}
[Fact]
public void GivenSkipFrameNotDefined_WhenLogging_ThenLogFirstUserStackFrame()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
logger.Debug("msg");
AssertDebugLastMessage("debug", "NLog.UnitTests.LayoutRenderers.CallSiteTests.GivenSkipFrameNotDefined_WhenLogging_ThenLogFirstUserStackFrame msg");
}
[Fact]
public void GivenOneSkipFrameDefined_WhenLogging_ShouldSkipOneUserStackFrame()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:skipframes=1} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
Action action = () => logger.Debug("msg");
action.Invoke();
AssertDebugLastMessage("debug", "NLog.UnitTests.LayoutRenderers.CallSiteTests.GivenOneSkipFrameDefined_WhenLogging_ShouldSkipOneUserStackFrame msg");
}
#if MONO
[Fact(Skip="Not working under MONO - not sure if unit test is wrong, or the code")]
#else
[Fact]
#endif
public void CleanMethodNamesOfAnonymousDelegatesTest()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:ClassName=false:CleanNamesOfAnonymousDelegates=true}' /></targets>
<rules>
<logger name='*' levels='Fatal' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
bool done = false;
ThreadPool.QueueUserWorkItem(
state =>
{
logger.Fatal("message");
done = true;
},
null);
while (done == false)
{
Thread.Sleep(10);
}
if (done == true)
{
AssertDebugLastMessage("debug", "CleanMethodNamesOfAnonymousDelegatesTest");
}
}
#if MONO
[Fact(Skip="Not working under MONO - not sure if unit test is wrong, or the code")]
#else
[Fact]
#endif
public void DontCleanMethodNamesOfAnonymousDelegatesTest()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:ClassName=false:CleanNamesOfAnonymousDelegates=false}' /></targets>
<rules>
<logger name='*' levels='Fatal' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
bool done = false;
ThreadPool.QueueUserWorkItem(
state =>
{
logger.Fatal("message");
done = true;
},
null);
while (done == false)
{
Thread.Sleep(10);
}
if (done == true)
{
string lastMessage = GetDebugLastMessage("debug");
Assert.True(lastMessage.StartsWith("<DontCleanMethodNamesOfAnonymousDelegatesTest>"));
}
}
#if MONO
[Fact(Skip="Not working under MONO - not sure if unit test is wrong, or the code")]
#else
[Fact]
#endif
public void CleanClassNamesOfAnonymousDelegatesTest()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:ClassName=true:MethodName=false:CleanNamesOfAnonymousDelegates=true}' /></targets>
<rules>
<logger name='*' levels='Fatal' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
bool done = false;
ThreadPool.QueueUserWorkItem(
state =>
{
logger.Fatal("message");
done = true;
},
null);
while (done == false)
{
Thread.Sleep(10);
}
if (done == true)
{
AssertDebugLastMessage("debug", "NLog.UnitTests.LayoutRenderers.CallSiteTests");
}
}
#if MONO
[Fact(Skip="Not working under MONO - not sure if unit test is wrong, or the code")]
#else
[Fact]
#endif
public void DontCleanClassNamesOfAnonymousDelegatesTest()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:ClassName=true:MethodName=false:CleanNamesOfAnonymousDelegates=false}' /></targets>
<rules>
<logger name='*' levels='Fatal' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("A");
bool done = false;
ThreadPool.QueueUserWorkItem(
state =>
{
logger.Fatal("message");
done = true;
},
null);
while (done == false)
{
Thread.Sleep(10);
}
if (done == true)
{
string lastMessage = GetDebugLastMessage("debug");
Assert.True(lastMessage.Contains("+<>"));
}
}
[Fact]
public void When_Wrapped_Ignore_Wrapper_Methods_In_Callstack()
{
//namespace en name of current method
const string currentMethodFullName = "NLog.UnitTests.LayoutRenderers.CallSiteTests.When_Wrapped_Ignore_Wrapper_Methods_In_Callstack";
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite}|${message}' /></targets>
<rules>
<logger name='*' levels='Warn' writeTo='debug' />
</rules>
</nlog>");
var logger = LogManager.GetLogger("A");
logger.Warn("direct");
AssertDebugLastMessage("debug", string.Format("{0}|direct", currentMethodFullName));
LoggerTests.BaseWrapper wrappedLogger = new LoggerTests.MyWrapper();
wrappedLogger.Log("wrapped");
AssertDebugLastMessage("debug", string.Format("{0}|wrapped", currentMethodFullName));
}
#region Compositio unit test
[Fact]
public void When_WrappedInCompsition_Ignore_Wrapper_Methods_In_Callstack()
{
//namespace en name of current method
const string currentMethodFullName = "NLog.UnitTests.LayoutRenderers.CallSiteTests.When_WrappedInCompsition_Ignore_Wrapper_Methods_In_Callstack";
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite}|${message}' /></targets>
<rules>
<logger name='*' levels='Warn' writeTo='debug' />
</rules>
</nlog>");
var logger = LogManager.GetLogger("A");
logger.Warn("direct");
AssertDebugLastMessage("debug", string.Format("{0}|direct", currentMethodFullName));
CompositeWrapper wrappedLogger = new CompositeWrapper();
wrappedLogger.Log("wrapped");
AssertDebugLastMessage("debug", string.Format("{0}|wrapped", currentMethodFullName));
}
public class CompositeWrapper
{
private readonly MyWrapper wrappedLogger;
public CompositeWrapper()
{
wrappedLogger = new MyWrapper();
}
public void Log(string what)
{
wrappedLogger.Log(typeof(CompositeWrapper), what);
}
}
public abstract class BaseWrapper
{
public void Log(string what)
{
InternalLog(typeof(BaseWrapper), what);
}
public void Log(Type type, string what) //overloaded with type for composition
{
InternalLog(type, what);
}
protected abstract void InternalLog(Type type, string what);
}
public class MyWrapper : BaseWrapper
{
private readonly ILogger wrapperLogger;
public MyWrapper()
{
wrapperLogger = LogManager.GetLogger("WrappedLogger");
}
protected override void InternalLog(Type type, string what) //added type for composition
{
LogEventInfo info = new LogEventInfo(LogLevel.Warn, wrapperLogger.Name, what);
// Provide BaseWrapper as wrapper type.
// Expected: UserStackFrame should point to the method that calls a
// method of BaseWrapper.
wrapperLogger.Log(type, info);
}
}
#endregion
private class MyLogger : Logger
{
}
[Fact]
public void CallsiteBySubclass_interface()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:classname=true:methodname=true} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
ILogger logger = LogManager.GetLogger("mylogger", typeof(MyLogger));
Assert.True(logger is MyLogger, "logger isn't MyLogger");
logger.Debug("msg");
AssertDebugLastMessage("debug", "NLog.UnitTests.LayoutRenderers.CallSiteTests.CallsiteBySubclass_interface msg");
}
[Fact]
public void CallsiteBySubclass_mylogger()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:classname=true:methodname=true} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
MyLogger logger = LogManager.GetLogger("mylogger", typeof(MyLogger)) as MyLogger;
Assert.NotNull(logger);
logger.Debug("msg");
AssertDebugLastMessage("debug", "NLog.UnitTests.LayoutRenderers.CallSiteTests.CallsiteBySubclass_mylogger msg");
}
[Fact]
public void CallsiteBySubclass_logger()
{
LogManager.Configuration = CreateConfigurationFromString(@"
<nlog>
<targets><target name='debug' type='Debug' layout='${callsite:classname=true:methodname=true} ${message}' /></targets>
<rules>
<logger name='*' minlevel='Debug' writeTo='debug' />
</rules>
</nlog>");
Logger logger = LogManager.GetLogger("mylogger", typeof(MyLogger)) as Logger;
Assert.NotNull(logger);
logger.Debug("msg");
AssertDebugLastMessage("debug", "NLog.UnitTests.LayoutRenderers.CallSiteTests.CallsiteBySubclass_logger msg");
}
}
}
| 1 | 12,441 | Must these `using` statements not go inside the `namespace` block? | NLog-NLog | .cs |
@@ -92,8 +92,8 @@ type fboMutexLevel mutexLevel
const (
fboMDWriter fboMutexLevel = 1
- fboHead = 2
- fboBlock = 3
+ fboHead fboMutexLevel = 2
+ fboBlock fboMutexLevel = 3
)
func (o fboMutexLevel) String() string { | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"errors"
"fmt"
"os"
"reflect"
"strings"
"sync"
"time"
"github.com/keybase/backoff"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfssync"
"golang.org/x/net/context"
)
// mdReqType indicates whether an operation makes MD modifications or not
type mdReqType int
const (
// A read request that doesn't need an identify to be
// performed.
mdReadNoIdentify mdReqType = iota
// A read request that needs an identify to be performed (if
// it hasn't been already).
mdReadNeedIdentify
// A write request.
mdWrite
// A rekey request. Doesn't need an identify to be performed, as
// a rekey does its own (finer-grained) identifies.
mdRekey
)
type branchType int
const (
standard branchType = iota // an online, read-write branch
archive // an online, read-only branch
offline // an offline, read-write branch
archiveOffline // an offline, read-only branch
)
// Constants used in this file. TODO: Make these configurable?
const (
// MaxBlockSizeBytesDefault is the default maximum block size for KBFS.
// 512K blocks by default, block changes embedded max == 8K.
// Block size was chosen somewhat arbitrarily by trying to
// minimize the overall size of the history written by a user when
// appending 1KB writes to a file, up to a 1GB total file. Here
// is the output of a simple script that approximates that
// calculation:
//
// Total history size for 0065536-byte blocks: 1134341128192 bytes
// Total history size for 0131072-byte blocks: 618945052672 bytes
// Total history size for 0262144-byte blocks: 412786622464 bytes
// Total history size for 0524288-byte blocks: 412786622464 bytes
// Total history size for 1048576-byte blocks: 618945052672 bytes
// Total history size for 2097152-byte blocks: 1134341128192 bytes
// Total history size for 4194304-byte blocks: 2216672886784 bytes
MaxBlockSizeBytesDefault = 512 << 10
// Maximum number of blocks that can be sent in parallel
maxParallelBlockPuts = 100
// Maximum number of blocks that can be fetched in parallel
maxParallelBlockGets = 10
// Max response size for a single DynamoDB query is 1MB.
maxMDsAtATime = 10
// Time between checks for dirty files to flush, in case Sync is
// never called.
secondsBetweenBackgroundFlushes = 10
// Cap the number of times we retry after a recoverable error
maxRetriesOnRecoverableErrors = 10
// When the number of dirty bytes exceeds this level, force a sync.
dirtyBytesThreshold = maxParallelBlockPuts * MaxBlockSizeBytesDefault
// The timeout for any background task.
backgroundTaskTimeout = 1 * time.Minute
// If it's been more than this long since our last update, check
// the current head before downloading all of the new revisions.
fastForwardTimeThresh = 15 * time.Minute
// If there are more than this many new revisions, fast forward
// rather than downloading them all.
fastForwardRevThresh = 50
)
type fboMutexLevel mutexLevel
const (
fboMDWriter fboMutexLevel = 1
fboHead = 2
fboBlock = 3
)
func (o fboMutexLevel) String() string {
switch o {
case fboMDWriter:
return "mdWriterLock"
case fboHead:
return "headLock"
case fboBlock:
return "blockLock"
default:
return fmt.Sprintf("Invalid fboMutexLevel %d", int(o))
}
}
func fboMutexLevelToString(o mutexLevel) string {
return (fboMutexLevel(o)).String()
}
// Rules for working with lockState in FBO:
//
// - Every "execution flow" (i.e., program flow that happens
// sequentially) needs its own lockState object. This usually means
// that each "public" FBO method does:
//
// lState := makeFBOLockState()
//
// near the top.
//
// - Plumb lState through to all functions that hold any of the
// relevant locks, or are called under those locks.
//
// This way, violations of the lock hierarchy will be detected at
// runtime.
func makeFBOLockState() *lockState {
return makeLevelState(fboMutexLevelToString)
}
// blockLock is just like a sync.RWMutex, but with an extra operation
// (DoRUnlockedIfPossible).
type blockLock struct {
leveledRWMutex
locked bool
}
func (bl *blockLock) Lock(lState *lockState) {
bl.leveledRWMutex.Lock(lState)
bl.locked = true
}
func (bl *blockLock) Unlock(lState *lockState) {
bl.locked = false
bl.leveledRWMutex.Unlock(lState)
}
// DoRUnlockedIfPossible must be called when r- or w-locked. If
// r-locked, r-unlocks, runs the given function, and r-locks after
// it's done. Otherwise, just runs the given function.
func (bl *blockLock) DoRUnlockedIfPossible(lState *lockState, f func(*lockState)) {
if !bl.locked {
bl.RUnlock(lState)
defer bl.RLock(lState)
}
f(lState)
}
// folderBranchOps implements the KBFSOps interface for a specific
// branch of a specific folder. It is go-routine safe for operations
// within the folder.
//
// We use locks to protect against multiple goroutines accessing the
// same folder-branch. The goal with our locking strategy is maximize
// concurrent access whenever possible. See design/state_machine.md
// for more details. There are three important locks:
//
// 1) mdWriterLock: Any "remote-sync" operation (one which modifies the
// folder's metadata) must take this lock during the entirety of
// its operation, to avoid forking the MD.
//
// 2) headLock: This is a read/write mutex. It must be taken for
// reading before accessing any part of the current head MD. It
// should be taken for the shortest time possible -- that means in
// general that it should be taken, and the MD copied to a
// goroutine-local variable, and then it can be released.
// Remote-sync operations should take it for writing after pushing
// all of the blocks and MD to the KBFS servers (i.e., all network
// accesses), and then hold it until after all notifications have
// been fired, to ensure that no concurrent "local" operations ever
// see inconsistent state locally.
//
// 3) blockLock: This too is a read/write mutex. It must be taken for
// reading before accessing any blocks in the block cache that
// belong to this folder/branch. This includes checking their
// dirty status. It should be taken for the shortest time possible
// -- that means in general it should be taken, and then the blocks
// that will be modified should be copied to local variables in the
// goroutine, and then it should be released. The blocks should
// then be modified locally, and then readied and pushed out
// remotely. Only after the blocks have been pushed to the server
// should a remote-sync operation take the lock again (this time
// for writing) and put/finalize the blocks. Write and Truncate
// should take blockLock for their entire lifetime, since they
// don't involve writes over the network. Furthermore, if a block
// is not in the cache and needs to be fetched, we should release
// the mutex before doing the network operation, and lock it again
// before writing the block back to the cache.
//
// We want to allow writes and truncates to a file that's currently
// being sync'd, like any good networked file system. The tricky part
// is making sure the changes can both: a) be read while the sync is
// happening, and b) be applied to the new file path after the sync is
// done.
//
// For now, we just do the dumb, brute force thing for now: if a block
// is currently being sync'd, it copies the block and puts it back
// into the cache as modified. Then, when the sync finishes, it
// throws away the modified blocks and re-applies the change to the
// new file path (which might have a completely different set of
// blocks, so we can't just reuse the blocks that were modified during
// the sync.)
type folderBranchOps struct {
config Config
folderBranch FolderBranch
bid BranchID // protected by mdWriterLock
bType branchType
observers *observerList
// these locks, when locked concurrently by the same goroutine,
// should only be taken in the following order to avoid deadlock:
mdWriterLock leveledMutex // taken by any method making MD modifications
// protects access to head and latestMergedRevision.
headLock leveledRWMutex
head ImmutableRootMetadata
// latestMergedRevision tracks the latest heard merged revision on server
latestMergedRevision MetadataRevision
blocks folderBlockOps
// nodeCache itself is goroutine-safe, but this object's use
// of it has special requirements:
//
// - Reads can call PathFromNode() unlocked, since there are
// no guarantees with concurrent reads.
//
// - Operations that takes mdWriterLock always needs the
// most up-to-date paths, so those must call
// PathFromNode() under mdWriterLock.
//
// - Block write operations (write/truncate/sync) need to
// coordinate. Specifically, sync must make sure that
// blocks referenced in a path (including all of the child
// blocks) must exist in the cache during calls to
// PathFromNode from write/truncate. This means that sync
// must modify dirty file blocks only under blockLock, and
// write/truncate must call PathFromNode() under
// blockLock.
//
// Furthermore, calls to UpdatePointer() must happen
// before the copy-on-write mode induced by Sync() is
// finished.
nodeCache NodeCache
// Whether we've identified this TLF or not.
identifyLock sync.Mutex
identifyDone bool
identifyTime time.Time
// The current status summary for this folder
status *folderBranchStatusKeeper
// How to log
log logger.Logger
deferLog logger.Logger
// Closed on shutdown
shutdownChan chan struct{}
// Can be used to turn off notifications for a while (e.g., for testing)
updatePauseChan chan (<-chan struct{})
// After a shutdown, this channel will be closed when the register
// goroutine completes.
updateDoneChan chan struct{}
// forceSyncChan is read from by the background sync process
// to know when it should sync immediately.
forceSyncChan <-chan struct{}
// How to resolve conflicts
cr *ConflictResolver
// Helper class for archiving and cleaning up the blocks for this TLF
fbm *folderBlockManager
// rekeyWithPromptTimer tracks a timed function that will try to
// rekey with a paper key prompt, if enough time has passed.
// Protected by mdWriterLock
rekeyWithPromptTimer *time.Timer
editHistory *TlfEditHistory
branchChanges kbfssync.RepeatedWaitGroup
mdFlushes kbfssync.RepeatedWaitGroup
}
var _ KBFSOps = (*folderBranchOps)(nil)
var _ fbmHelper = (*folderBranchOps)(nil)
// newFolderBranchOps constructs a new folderBranchOps object.
func newFolderBranchOps(config Config, fb FolderBranch,
bType branchType) *folderBranchOps {
nodeCache := newNodeCacheStandard(fb)
// make logger
branchSuffix := ""
if fb.Branch != MasterBranch {
branchSuffix = " " + string(fb.Branch)
}
tlfStringFull := fb.Tlf.String()
// Shorten the TLF ID for the module name. 8 characters should be
// unique enough for a local node.
log := config.MakeLogger(fmt.Sprintf("FBO %s%s", tlfStringFull[:8],
branchSuffix))
// But print it out once in full, just in case.
log.CInfof(nil, "Created new folder-branch for %s", tlfStringFull)
observers := newObserverList()
mdWriterLock := makeLeveledMutex(mutexLevel(fboMDWriter), &sync.Mutex{})
headLock := makeLeveledRWMutex(mutexLevel(fboHead), &sync.RWMutex{})
blockLockMu := makeLeveledRWMutex(mutexLevel(fboBlock), &sync.RWMutex{})
forceSyncChan := make(chan struct{})
fbo := &folderBranchOps{
config: config,
folderBranch: fb,
bid: BranchID{},
bType: bType,
observers: observers,
status: newFolderBranchStatusKeeper(config, nodeCache),
mdWriterLock: mdWriterLock,
headLock: headLock,
blocks: folderBlockOps{
config: config,
log: log,
folderBranch: fb,
observers: observers,
forceSyncChan: forceSyncChan,
blockLock: blockLock{
leveledRWMutex: blockLockMu,
},
dirtyFiles: make(map[BlockPointer]*dirtyFile),
unrefCache: make(map[BlockRef]*syncInfo),
deCache: make(map[BlockRef]DirEntry),
nodeCache: nodeCache,
},
nodeCache: nodeCache,
log: log,
deferLog: log.CloneWithAddedDepth(1),
shutdownChan: make(chan struct{}),
updatePauseChan: make(chan (<-chan struct{})),
forceSyncChan: forceSyncChan,
}
fbo.cr = NewConflictResolver(config, fbo)
fbo.fbm = newFolderBlockManager(config, fb, fbo)
fbo.editHistory = NewTlfEditHistory(config, fbo, log)
if config.DoBackgroundFlushes() {
go fbo.backgroundFlusher(secondsBetweenBackgroundFlushes * time.Second)
}
return fbo
}
// markForReIdentifyIfNeeded checks whether this tlf is identified and mark
// it for lazy reidentification if it exceeds time limits.
func (fbo *folderBranchOps) markForReIdentifyIfNeeded(now time.Time, maxValid time.Duration) {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
if fbo.identifyDone && (now.Before(fbo.identifyTime) || fbo.identifyTime.Add(maxValid).Before(now)) {
fbo.log.CDebugf(nil, "Expiring identify from %v", fbo.identifyTime)
fbo.identifyDone = false
}
}
// Shutdown safely shuts down any background goroutines that may have
// been launched by folderBranchOps.
func (fbo *folderBranchOps) Shutdown() error {
if fbo.config.CheckStateOnShutdown() {
ctx := context.TODO()
lState := makeFBOLockState()
if fbo.blocks.GetState(lState) == dirtyState {
fbo.log.CDebugf(ctx, "Skipping state-checking due to dirty state")
} else if !fbo.isMasterBranch(lState) {
fbo.log.CDebugf(ctx, "Skipping state-checking due to being staged")
} else {
// Make sure we're up to date first
if err := fbo.SyncFromServerForTesting(ctx, fbo.folderBranch); err != nil {
return err
}
// Check the state for consistency before shutting down.
sc := NewStateChecker(fbo.config)
if err := sc.CheckMergedState(ctx, fbo.id()); err != nil {
return err
}
}
}
close(fbo.shutdownChan)
fbo.cr.Shutdown()
fbo.fbm.shutdown()
fbo.editHistory.Shutdown()
// Wait for the update goroutine to finish, so that we don't have
// any races with logging during test reporting.
if fbo.updateDoneChan != nil {
<-fbo.updateDoneChan
}
return nil
}
func (fbo *folderBranchOps) id() TlfID {
return fbo.folderBranch.Tlf
}
func (fbo *folderBranchOps) branch() BranchName {
return fbo.folderBranch.Branch
}
func (fbo *folderBranchOps) GetFavorites(ctx context.Context) (
[]Favorite, error) {
return nil, errors.New("GetFavorites is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) RefreshCachedFavorites(ctx context.Context) {
// no-op
}
func (fbo *folderBranchOps) DeleteFavorite(ctx context.Context,
fav Favorite) error {
return errors.New("DeleteFavorite is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) AddFavorite(ctx context.Context,
fav Favorite) error {
return errors.New("AddFavorite is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) addToFavorites(ctx context.Context,
favorites *Favorites, created bool) (err error) {
lState := makeFBOLockState()
head := fbo.getHead(lState)
if head == (ImmutableRootMetadata{}) {
return OpsCantHandleFavorite{"Can't add a favorite without a handle"}
}
return fbo.addToFavoritesByHandle(ctx, favorites, head.GetTlfHandle(), created)
}
func (fbo *folderBranchOps) addToFavoritesByHandle(ctx context.Context,
favorites *Favorites, handle *TlfHandle, created bool) (err error) {
if _, _, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx); err != nil {
// Can't favorite while not logged in
return nil
}
favorites.AddAsync(ctx, handle.toFavToAdd(created))
return nil
}
func (fbo *folderBranchOps) deleteFromFavorites(ctx context.Context,
favorites *Favorites) error {
if _, _, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx); err != nil {
// Can't unfavorite while not logged in
return nil
}
lState := makeFBOLockState()
head := fbo.getHead(lState)
if head == (ImmutableRootMetadata{}) {
// This can happen when identifies fail and the head is never set.
return OpsCantHandleFavorite{"Can't delete a favorite without a handle"}
}
h := head.GetTlfHandle()
return favorites.Delete(ctx, h.ToFavorite())
}
func (fbo *folderBranchOps) getHead(lState *lockState) ImmutableRootMetadata {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.head
}
// isMasterBranch should not be called if mdWriterLock is already taken.
func (fbo *folderBranchOps) isMasterBranch(lState *lockState) bool {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid == NullBranchID
}
func (fbo *folderBranchOps) isMasterBranchLocked(lState *lockState) bool {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.bid == NullBranchID
}
func (fbo *folderBranchOps) setBranchIDLocked(lState *lockState, bid BranchID) {
fbo.mdWriterLock.AssertLocked(lState)
fbo.bid = bid
if bid == NullBranchID {
fbo.status.setCRSummary(nil, nil)
}
}
var errNoFlushedRevisions = errors.New("No flushed MDs yet")
// getJournalPredecessorRevision returns the revision that precedes
// the current journal head if journaling enabled and there are
// unflushed MD updates; otherwise it returns
// MetadataRevisionUninitialized. If there aren't any flushed MD
// revisions, it returns errNoFlushedRevisions.
func (fbo *folderBranchOps) getJournalPredecessorRevision(ctx context.Context) (
MetadataRevision, error) {
jServer, err := GetJournalServer(fbo.config)
if err != nil {
// Journaling is disabled entirely.
return MetadataRevisionUninitialized, nil
}
jStatus, err := jServer.JournalStatus(fbo.id())
if err != nil {
// Journaling is disabled for this TLF, so use the local head.
// TODO: JournalStatus could return other errors (likely
// file/disk corruption) that indicate a real problem, so it
// might be nice to type those errors so we can distinguish
// them.
return MetadataRevisionUninitialized, nil
}
if jStatus.BranchID != NullBranchID.String() {
return MetadataRevisionUninitialized,
errors.New("Cannot find most recent merged revision while staged")
}
if jStatus.RevisionStart == MetadataRevisionUninitialized {
// The journal is empty, so the local head must be the most recent.
return MetadataRevisionUninitialized, nil
} else if jStatus.RevisionStart == MetadataRevisionInitial {
// Nothing has been flushed to the servers yet, so don't
// return anything.
return MetadataRevisionUninitialized, errNoFlushedRevisions
}
return jStatus.RevisionStart - 1, nil
}
func (fbo *folderBranchOps) setHeadLocked(
ctx context.Context, lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
isFirstHead := fbo.head == ImmutableRootMetadata{}
wasReadable := false
if !isFirstHead {
wasReadable = fbo.head.IsReadable()
if fbo.head.mdID == md.mdID {
panic(fmt.Errorf("Re-putting the same MD: %s", md.mdID))
}
}
fbo.log.CDebugf(ctx, "Setting head revision to %d", md.Revision())
err := fbo.config.MDCache().Put(md)
if err != nil {
return err
}
// If this is the first time the MD is being set, and we are
// operating on unmerged data, initialize the state properly and
// kick off conflict resolution.
if isFirstHead && md.MergedStatus() == Unmerged {
fbo.setBranchIDLocked(lState, md.BID())
// Use uninitialized for the merged branch; the unmerged
// revision is enough to trigger conflict resolution.
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
} else if md.MergedStatus() == Merged {
journalEnabled := TLFJournalEnabled(fbo.config, fbo.id())
var key kbfscrypto.VerifyingKey
if journalEnabled {
if isFirstHead {
// If journaling is on, and this is the first head
// we're setting, we have to make sure we use the
// server's notion of the latest MD, not the one
// potentially coming from our journal. If there are
// no flushed revisions, it's not a hard error, and we
// just leave the latest merged revision
// uninitialized.
journalPred, err := fbo.getJournalPredecessorRevision(ctx)
switch err {
case nil:
// journalPred will be
// MetadataRevisionUninitialized when the journal
// is empty.
if journalPred >= MetadataRevisionInitial {
fbo.setLatestMergedRevisionLocked(
ctx, lState, journalPred, false)
} else {
fbo.setLatestMergedRevisionLocked(ctx, lState,
md.Revision(), false)
}
case errNoFlushedRevisions:
// The server has no revisions, so leave the
// latest merged revision uninitialized.
default:
return err
}
} else {
// If this isn't the first head, then this is either
// an update from the server, or an update just
// written by the client. But since journaling is on,
// then latter case will be handled by onMDFlush when
// the update is properly flushed to the server. So
// ignore updates written by this device.
key, err = fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
if key != md.LastModifyingWriterVerifyingKey() {
fbo.setLatestMergedRevisionLocked(
ctx, lState, md.Revision(), false)
}
}
} else {
// This is a merged revision, and journaling is disabled,
// so it's definitely the latest revision on the server as
// well.
fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false)
}
}
// Make sure that any unembedded block changes have been swapped
// back in.
if md.data.Changes.Info.BlockPointer != zeroPtr &&
len(md.data.Changes.Ops) == 0 {
return errors.New("Must swap in block changes before setting head")
}
fbo.head = md
fbo.status.setRootMetadata(md)
if isFirstHead {
// Start registering for updates right away, using this MD
// as a starting point. For now only the master branch can
// get updates
if fbo.branch() == MasterBranch {
fbo.updateDoneChan = make(chan struct{})
go fbo.registerAndWaitForUpdates()
}
}
if !wasReadable && md.IsReadable() {
// Let any listeners know that this folder is now readable,
// which may indicate that a rekey successfully took place.
fbo.config.Reporter().Notify(ctx, mdReadSuccessNotification(
md.GetTlfHandle(), md.TlfID().IsPublic()))
}
return nil
}
// setInitialHeadUntrustedLocked is for when the given RootMetadata
// was fetched not due to a user action, i.e. via a Rekey
// notification, and we don't have a TLF name to check against.
func (fbo *folderBranchOps) setInitialHeadUntrustedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked")
}
return fbo.setHeadLocked(ctx, lState, md)
}
// setNewInitialHeadLocked is for when we're creating a brand-new TLF.
func (fbo *folderBranchOps) setNewInitialHeadLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setNewInitialHeadLocked")
}
if md.Revision() != MetadataRevisionInitial {
return fmt.Errorf("setNewInitialHeadLocked unexpectedly called with revision %d", md.Revision())
}
return fbo.setHeadLocked(ctx, lState, md)
}
// setInitialHeadUntrustedLocked is for when the given RootMetadata
// was fetched due to a user action, and will be checked against the
// TLF name.
func (fbo *folderBranchOps) setInitialHeadTrustedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked")
}
return fbo.setHeadLocked(ctx, lState, md)
}
// setHeadSuccessorLocked is for when we're applying updates from the
// server or when we're applying new updates we created ourselves.
func (fbo *folderBranchOps) setHeadSuccessorLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata, rebased bool) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head == (ImmutableRootMetadata{}) {
// This can happen in tests via SyncFromServerForTesting().
return fbo.setInitialHeadTrustedLocked(ctx, lState, md)
}
if !rebased {
err := fbo.head.CheckValidSuccessor(fbo.head.mdID, md.ReadOnly())
if err != nil {
return err
}
}
oldHandle := fbo.head.GetTlfHandle()
newHandle := md.GetTlfHandle()
// Newer handles should be equal or more resolved over time.
//
// TODO: In some cases, they shouldn't, e.g. if we're on an
// unmerged branch. Add checks for this.
resolvesTo, partialResolvedOldHandle, err :=
oldHandle.ResolvesTo(
ctx, fbo.config.Codec(), fbo.config.KBPKI(),
*newHandle)
if err != nil {
return err
}
oldName := oldHandle.GetCanonicalName()
newName := newHandle.GetCanonicalName()
if !resolvesTo {
return IncompatibleHandleError{
oldName,
partialResolvedOldHandle.GetCanonicalName(),
newName,
}
}
err = fbo.setHeadLocked(ctx, lState, md)
if err != nil {
return err
}
if oldName != newName {
fbo.log.CDebugf(ctx, "Handle changed (%s -> %s)",
oldName, newName)
// If the handle has changed, send out a notification.
fbo.observers.tlfHandleChange(ctx, fbo.head.GetTlfHandle())
// Also the folder should be re-identified given the
// newly-resolved assertions.
func() {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
fbo.identifyDone = false
}()
}
return nil
}
// setHeadPredecessorLocked is for when we're unstaging updates.
func (fbo *folderBranchOps) setHeadPredecessorLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head == (ImmutableRootMetadata{}) {
return errors.New("Unexpected nil head in setHeadPredecessorLocked")
}
if fbo.head.Revision() <= MetadataRevisionInitial {
return fmt.Errorf("setHeadPredecessorLocked unexpectedly called with revision %d", fbo.head.Revision())
}
if fbo.head.MergedStatus() != Unmerged {
return errors.New("Unexpected merged head in setHeadPredecessorLocked")
}
err := md.CheckValidSuccessor(md.mdID, fbo.head.ReadOnly())
if err != nil {
return err
}
oldHandle := fbo.head.GetTlfHandle()
newHandle := md.GetTlfHandle()
// The two handles must be the same, since no rekeying is done
// while unmerged.
eq, err := oldHandle.Equals(fbo.config.Codec(), *newHandle)
if err != nil {
return err
}
if !eq {
return fmt.Errorf(
"head handle %v unexpectedly not equal to new handle = %v",
oldHandle, newHandle)
}
return fbo.setHeadLocked(ctx, lState, md)
}
// setHeadConflictResolvedLocked is for when we're setting the merged
// update with resolved conflicts.
func (fbo *folderBranchOps) setHeadConflictResolvedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head.MergedStatus() != Unmerged {
return errors.New("Unexpected merged head in setHeadConflictResolvedLocked")
}
if md.MergedStatus() != Merged {
return errors.New("Unexpected unmerged update in setHeadConflictResolvedLocked")
}
err := fbo.setHeadLocked(ctx, lState, md)
if err != nil {
return err
}
// Since the CR head goes directly to the server, we can safely
// set the latest merged revision here. (Normally self-commits
// don't update the latest merged revision since all non-CR
// updates go through the journal.)
if TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false)
}
return nil
}
func (fbo *folderBranchOps) identifyOnce(
ctx context.Context, md ReadOnlyRootMetadata) error {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
if fbo.identifyDone {
return nil
}
h := md.GetTlfHandle()
fbo.log.CDebugf(ctx, "Running identifies on %s", h.GetCanonicalPath())
kbpki := fbo.config.KBPKI()
err := identifyHandle(ctx, kbpki, kbpki, h)
if err != nil {
fbo.log.CDebugf(ctx, "Identify finished with error: %v", err)
// For now, if the identify fails, let the
// next function to hit this code path retry.
return err
}
ei := getExtendedIdentify(ctx)
if ei.behavior.WarningInsteadOfErrorOnBrokenTracks() &&
len(ei.getTlfBreakOrBust().Breaks) > 0 {
fbo.log.CDebugf(ctx,
"Identify finished with no error but broken proof warnings")
} else {
fbo.log.CDebugf(ctx, "Identify finished successfully")
fbo.identifyDone = true
fbo.identifyTime = fbo.config.Clock().Now()
}
return nil
}
// if rtype == mdWrite || mdRekey, then mdWriterLock must be taken
func (fbo *folderBranchOps) getMDLocked(
ctx context.Context, lState *lockState, rtype mdReqType) (
md ImmutableRootMetadata, err error) {
defer func() {
if err != nil || rtype == mdReadNoIdentify || rtype == mdRekey {
return
}
err = fbo.identifyOnce(ctx, md.ReadOnly())
}()
md = fbo.getHead(lState)
if md != (ImmutableRootMetadata{}) {
return md, nil
}
// Unless we're in mdWrite or mdRekey mode, we can't safely fetch
// the new MD without causing races, so bail.
if rtype != mdWrite && rtype != mdRekey {
return ImmutableRootMetadata{}, MDWriteNeededInRequest{}
}
// We go down this code path either due to a rekey
// notification for an unseen TLF, or in some tests.
//
// TODO: Make tests not take this code path, and keep track of
// the fact that MDs coming from rekey notifications are
// untrusted.
fbo.mdWriterLock.AssertLocked(lState)
// Not in cache, fetch from server and add to cache. First, see
// if this device has any unmerged commits -- take the latest one.
mdops := fbo.config.MDOps()
// get the head of the unmerged branch for this device (if any)
md, err = mdops.GetUnmergedForTLF(ctx, fbo.id(), NullBranchID)
if err != nil {
return ImmutableRootMetadata{}, err
}
mergedMD, err := mdops.GetForTLF(ctx, fbo.id())
if err != nil {
return ImmutableRootMetadata{}, err
}
if mergedMD == (ImmutableRootMetadata{}) {
return ImmutableRootMetadata{}, fmt.Errorf("Got nil RMD for %s", fbo.id())
}
if md == (ImmutableRootMetadata{}) {
// There are no unmerged MDs for this device, so just use the current head.
md = mergedMD
} else {
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// We don't need to do this for merged head
// because the setHeadLocked() already does
// that anyway.
fbo.setLatestMergedRevisionLocked(ctx, lState, mergedMD.Revision(), false)
}()
}
if md.data.Dir.Type != Dir && (!md.IsInitialized() || md.IsReadable()) {
return ImmutableRootMetadata{}, fmt.Errorf("Got undecryptable RMD for %s: initialized=%t, readable=%t", fbo.id(), md.IsInitialized(), md.IsReadable())
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setInitialHeadUntrustedLocked(ctx, lState, md)
if err != nil {
return ImmutableRootMetadata{}, err
}
return md, nil
}
func (fbo *folderBranchOps) getMDForReadHelper(
ctx context.Context, lState *lockState, rtype mdReqType) (ImmutableRootMetadata, error) {
md, err := fbo.getMDLocked(ctx, lState, rtype)
if err != nil {
return ImmutableRootMetadata{}, err
}
if !md.TlfID().IsPublic() {
username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
if !md.GetTlfHandle().IsReader(uid) {
return ImmutableRootMetadata{}, NewReadAccessError(md.GetTlfHandle(), username)
}
}
return md, nil
}
// getMostRecentFullyMergedMD is a helper method that returns the most
// recent merged MD that has been flushed to the server. This could
// be different from the current local head if journaling is on. If
// the journal is on a branch, it returns an error.
func (fbo *folderBranchOps) getMostRecentFullyMergedMD(ctx context.Context) (
ImmutableRootMetadata, error) {
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
if mergedRev == MetadataRevisionUninitialized {
// No unflushed journal entries, so use the local head.
lState := makeFBOLockState()
return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify)
}
// Otherwise, use the specified revision.
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID,
mergedRev, Merged)
if err != nil {
return ImmutableRootMetadata{}, err
}
fbo.log.CDebugf(ctx, "Most recent fully merged revision is %d", mergedRev)
return rmd, nil
}
func (fbo *folderBranchOps) getMDForReadNoIdentify(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify)
}
func (fbo *folderBranchOps) getMDForReadNeedIdentify(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
return fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify)
}
// getMDForWriteLocked returns a new RootMetadata object with an
// incremented version number for modification. If the returned object
// is put to the MDServer (via MDOps), mdWriterLock must be held until
// then. (See comments for mdWriterLock above.)
func (fbo *folderBranchOps) getMDForWriteLocked(
ctx context.Context, lState *lockState) (*RootMetadata, error) {
return fbo.getMDForWriteLockedForFilename(ctx, lState, "")
}
func (fbo *folderBranchOps) getMDForWriteLockedForFilename(
ctx context.Context, lState *lockState, filename string) (*RootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDLocked(ctx, lState, mdWrite)
if err != nil {
return nil, err
}
username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return nil, err
}
if !md.GetTlfHandle().IsWriter(uid) {
return nil, NewWriteAccessError(md.GetTlfHandle(), username, filename)
}
// Make a new successor of the current MD to hold the coming
// writes. The caller must pass this into
// syncBlockAndCheckEmbedLocked or the changes will be lost.
newMd, err := md.MakeSuccessor(fbo.config.Codec(), md.mdID, true)
if err != nil {
return nil, err
}
return newMd, nil
}
func (fbo *folderBranchOps) getMDForRekeyWriteLocked(
ctx context.Context, lState *lockState) (
rmd *RootMetadata, lastWriterVerifyingKey kbfscrypto.VerifyingKey,
wasRekeySet bool, err error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDLocked(ctx, lState, mdRekey)
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
handle := md.GetTlfHandle()
// must be a reader or writer (it checks both.)
if !handle.IsReader(uid) {
return nil, kbfscrypto.VerifyingKey{}, false,
NewRekeyPermissionError(md.GetTlfHandle(), username)
}
newMd, err := md.MakeSuccessor(fbo.config.Codec(), md.mdID, handle.IsWriter(uid))
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
// readers shouldn't modify writer metadata
if !handle.IsWriter(uid) && !newMd.IsWriterMetadataCopiedSet() {
return nil, kbfscrypto.VerifyingKey{}, false,
NewRekeyPermissionError(handle, username)
}
return newMd, md.LastModifyingWriterVerifyingKey(), md.IsRekeySet(), nil
}
func (fbo *folderBranchOps) nowUnixNano() int64 {
return fbo.config.Clock().Now().UnixNano()
}
func (fbo *folderBranchOps) maybeUnembedAndPutBlocks(ctx context.Context,
md *RootMetadata) (*blockPutState, error) {
if fbo.config.BlockSplitter().ShouldEmbedBlockChanges(&md.data.Changes) {
return nil, nil
}
_, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return nil, err
}
bps := newBlockPutState(1)
err = fbo.unembedBlockChanges(ctx, bps, md, &md.data.Changes, uid)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(md.ReadOnly(), bps, blockDeleteOnMDFail)
}
}()
ptrsToDelete, err := doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return nil, err
}
if len(ptrsToDelete) > 0 {
return nil, fmt.Errorf("Unexpected pointers to delete after "+
"unembedding block changes in gc op: %v", ptrsToDelete)
}
return bps, nil
}
func (fbo *folderBranchOps) initMDLocked(
ctx context.Context, lState *lockState, md *RootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
// create a dblock since one doesn't exist yet
username, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return err
}
handle := md.GetTlfHandle()
// make sure we're a writer before rekeying or putting any blocks.
if !handle.IsWriter(uid) {
return NewWriteAccessError(handle, username, handle.GetCanonicalPath())
}
newDblock := &DirBlock{
Children: make(map[string]DirEntry),
}
var expectedKeyGen KeyGen
var tlfCryptKey *kbfscrypto.TLFCryptKey
if md.TlfID().IsPublic() {
expectedKeyGen = PublicKeyGen
} else {
var rekeyDone bool
// create a new set of keys for this metadata
rekeyDone, tlfCryptKey, err = fbo.config.KeyManager().Rekey(ctx, md, false)
if err != nil {
return err
}
if !rekeyDone {
return fmt.Errorf("Initial rekey unexpectedly not done for private TLF %v", md.TlfID())
}
expectedKeyGen = FirstValidKeyGen
}
keyGen := md.LatestKeyGeneration()
if keyGen != expectedKeyGen {
return InvalidKeyGenerationError{md.TlfID(), keyGen}
}
info, plainSize, readyBlockData, err :=
fbo.blocks.ReadyBlock(ctx, md.ReadOnly(), newDblock, uid)
if err != nil {
return err
}
now := fbo.nowUnixNano()
md.data.Dir = DirEntry{
BlockInfo: info,
EntryInfo: EntryInfo{
Type: Dir,
Size: uint64(plainSize),
Mtime: now,
Ctime: now,
},
}
co := newCreateOpForRootDir()
md.AddOp(co)
md.AddRefBlock(md.data.Dir.BlockInfo)
md.SetUnrefBytes(0)
if err = putBlockCheckQuota(ctx, fbo.config.BlockServer(),
fbo.config.Reporter(), md.TlfID(), info.BlockPointer, readyBlockData,
md.GetTlfHandle().GetCanonicalName()); err != nil {
return err
}
if err = fbo.config.BlockCache().Put(
info.BlockPointer, fbo.id(), newDblock, TransientEntry); err != nil {
return err
}
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
// finally, write out the new metadata
mdID, err := fbo.config.MDOps().Put(ctx, md)
if err != nil {
return err
}
md.loadCachedBlockChanges(bps)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return fmt.Errorf(
"%v: Unexpected MD ID during new MD initialization: %v",
md.TlfID(), fbo.head.mdID)
}
key, err := fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
fbo.setNewInitialHeadLocked(ctx, lState, MakeImmutableRootMetadata(
md, key, mdID, fbo.config.Clock().Now()))
if err != nil {
return err
}
// cache any new TLF crypt key
if tlfCryptKey != nil {
err = fbo.config.KeyCache().PutTLFCryptKey(md.TlfID(), keyGen, *tlfCryptKey)
if err != nil {
return err
}
}
return nil
}
func (fbo *folderBranchOps) GetTLFCryptKeys(ctx context.Context,
h *TlfHandle) (keys []kbfscrypto.TLFCryptKey, id TlfID, err error) {
return nil, TlfID{}, errors.New("GetTLFCryptKeys is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetTLFID(ctx context.Context, h *TlfHandle) (TlfID, error) {
return TlfID{}, errors.New("GetTLFID is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetOrCreateRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error) {
return nil, EntryInfo{}, errors.New("GetOrCreateRootNode is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error) {
return nil, EntryInfo{}, errors.New("GetRootNode is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) checkNode(node Node) error {
fb := node.GetFolderBranch()
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
return nil
}
// SetInitialHeadFromServer sets the head to the given
// ImmutableRootMetadata, which must be retrieved from the MD server.
func (fbo *folderBranchOps) SetInitialHeadFromServer(
ctx context.Context, md ImmutableRootMetadata) (err error) {
fbo.log.CDebugf(ctx, "SetInitialHeadFromServer, revision=%d (%s)",
md.Revision(), md.MergedStatus())
defer func() {
fbo.deferLog.CDebugf(ctx, "Done: %v", err)
}()
if md.data.Dir.Type != Dir {
// Not initialized.
return fmt.Errorf("MD with revision=%d not initialized", md.Revision())
}
// Return early if the head is already set. This avoids taking
// mdWriterLock for no reason, and it also avoids any side effects
// (e.g., calling `identifyOnce` and downloading the merged
// head) if head is already set.
lState := makeFBOLockState()
head := fbo.getHead(lState)
if head != (ImmutableRootMetadata{}) && head.mdID == md.mdID {
fbo.log.CDebugf(ctx, "Head MD already set to revision %d (%s), no "+
"need to set initial head again", md.Revision(), md.MergedStatus())
return nil
}
return runUnlessCanceled(ctx, func() error {
fb := FolderBranch{md.TlfID(), MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
// Always identify first when trying to initialize the folder,
// even if we turn out not to be a writer. (We can't rely on
// the identifyOnce call in getMDLocked, because that isn't
// called from the initialization code path when the local
// user is not a valid writer.) Also, we want to make sure we
// fail before we set the head, otherwise future calls will
// succeed incorrectly.
err = fbo.identifyOnce(ctx, md.ReadOnly())
if err != nil {
return err
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
if md.MergedStatus() == Unmerged {
mdops := fbo.config.MDOps()
mergedMD, err := mdops.GetForTLF(ctx, fbo.id())
if err != nil {
return err
}
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(ctx, lState,
mergedMD.Revision(), false)
}()
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// Only update the head the first time; later it will be
// updated either directly via writes or through the
// background update processor.
if fbo.head == (ImmutableRootMetadata{}) {
err = fbo.setInitialHeadTrustedLocked(ctx, lState, md)
if err != nil {
return err
}
}
return nil
})
}
// SetInitialHeadToNew creates a brand-new ImmutableRootMetadata
// object and sets the head to that.
func (fbo *folderBranchOps) SetInitialHeadToNew(
ctx context.Context, id TlfID, handle *TlfHandle) (err error) {
fbo.log.CDebugf(ctx, "SetInitialHeadToNew")
defer func() {
fbo.deferLog.CDebugf(ctx, "Done: %v", err)
}()
bh, err := handle.ToBareHandle()
if err != nil {
return err
}
rmd := NewRootMetadata()
rmd.Update(id, bh)
if err != nil {
return err
}
// Need to keep the TLF handle around long enough to
// rekey the metadata for the first time.
rmd.tlfHandle = handle
return runUnlessCanceled(ctx, func() error {
fb := FolderBranch{rmd.TlfID(), MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
// Always identify first when trying to initialize the folder,
// even if we turn out not to be a writer. (We can't rely on
// the identifyOnce call in getMDLocked, because that isn't
// called from the initialization code path when the local
// user is not a valid writer.) Also, we want to make sure we
// fail before we set the head, otherwise future calls will
// succeed incorrectly.
err = fbo.identifyOnce(ctx, rmd.ReadOnly())
if err != nil {
return err
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.initMDLocked(ctx, lState, rmd)
})
}
// execMDReadNoIdentifyThenMDWrite first tries to execute the
// passed-in method in mdReadNoIdentify mode. If it fails with an
// MDWriteNeededInRequest error, it re-executes the method as in
// mdWrite mode. The passed-in method must note whether or not this
// is an mdWrite call.
//
// This must only be used by getRootNode().
func (fbo *folderBranchOps) execMDReadNoIdentifyThenMDWrite(
lState *lockState, f func(*lockState, mdReqType) error) error {
err := f(lState, mdReadNoIdentify)
// Redo as an MD write request if needed
if _, ok := err.(MDWriteNeededInRequest); ok {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
err = f(lState, mdWrite)
}
return err
}
func (fbo *folderBranchOps) getRootNode(ctx context.Context) (
node Node, ei EntryInfo, handle *TlfHandle, err error) {
fbo.log.CDebugf(ctx, "getRootNode")
defer func() {
if err != nil {
fbo.deferLog.CDebugf(ctx, "Error: %v", err)
} else {
// node may still be nil if we're unwinding
// from a panic.
fbo.deferLog.CDebugf(ctx, "Done: %v", node)
}
}()
lState := makeFBOLockState()
var md ImmutableRootMetadata
err = fbo.execMDReadNoIdentifyThenMDWrite(lState,
func(lState *lockState, rtype mdReqType) error {
md, err = fbo.getMDLocked(ctx, lState, rtype)
return err
})
if err != nil {
return nil, EntryInfo{}, nil, err
}
// we may be an unkeyed client
if err := isReadableOrError(ctx, fbo.config, md.ReadOnly()); err != nil {
return nil, EntryInfo{}, nil, err
}
handle = md.GetTlfHandle()
node, err = fbo.nodeCache.GetOrCreate(md.data.Dir.BlockPointer,
string(handle.GetCanonicalName()), nil)
if err != nil {
return nil, EntryInfo{}, nil, err
}
return node, md.Data().Dir.EntryInfo, handle, nil
}
type makeNewBlock func() Block
// pathFromNodeHelper() shouldn't be called except by the helper
// functions below.
func (fbo *folderBranchOps) pathFromNodeHelper(n Node) (path, error) {
p := fbo.nodeCache.PathFromNode(n)
if !p.isValid() {
return path{}, InvalidPathError{p}
}
return p, nil
}
// Helper functions to clarify uses of pathFromNodeHelper() (see
// nodeCache comments).
func (fbo *folderBranchOps) pathFromNodeForRead(n Node) (path, error) {
return fbo.pathFromNodeHelper(n)
}
func (fbo *folderBranchOps) pathFromNodeForMDWriteLocked(
lState *lockState, n Node) (path, error) {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.pathFromNodeHelper(n)
}
func (fbo *folderBranchOps) GetDirChildren(ctx context.Context, dir Node) (
children map[string]EntryInfo, err error) {
fbo.log.CDebugf(ctx, "GetDirChildren %p", dir.GetID())
defer func() { fbo.deferLog.CDebugf(ctx, "Done GetDirChildren: %v", err) }()
err = fbo.checkNode(dir)
if err != nil {
return nil, err
}
err = runUnlessCanceled(ctx, func() error {
var err error
lState := makeFBOLockState()
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node
// has been unlinked. Probably we have fast-forwarded, and
// missed all the updates deleting the children in this
// directory. In that case, just return an empty set of
// children so we don't return an incorrect set from the
// cache.
if md.data.Dir.BlockPointer != dirPath.path[0].BlockPointer {
fbo.log.CDebugf(ctx, "Returning an empty children set for "+
"unlinked directory %v", dirPath.tailPointer())
return nil
}
children, err = fbo.blocks.GetDirtyDirChildren(
ctx, lState, md.ReadOnly(), dirPath)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return children, nil
}
func (fbo *folderBranchOps) Lookup(ctx context.Context, dir Node, name string) (
node Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "Lookup %p %s", dir.GetID(), name)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return err
}
childPath := dirPath.ChildPathNoPtr(name)
de, err = fbo.blocks.GetDirtyEntry(
ctx, lState, md.ReadOnly(), childPath)
if err != nil {
return err
}
if de.Type == Sym {
node = nil
} else {
err = fbo.blocks.checkDataVersion(childPath, de.BlockPointer)
if err != nil {
return err
}
node, err = fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return nil, EntryInfo{}, err
}
return node, de.EntryInfo, nil
}
// statEntry is like Stat, but it returns a DirEntry. This is used by
// tests.
func (fbo *folderBranchOps) statEntry(ctx context.Context, node Node) (
de DirEntry, err error) {
err = fbo.checkNode(node)
if err != nil {
return DirEntry{}, err
}
lState := makeFBOLockState()
nodePath, err := fbo.pathFromNodeForRead(node)
if err != nil {
return DirEntry{}, err
}
var md ImmutableRootMetadata
if nodePath.hasValidParent() {
md, err = fbo.getMDForReadNeedIdentify(ctx, lState)
} else {
// If nodePath has no valid parent, it's just the TLF
// root, so we don't need an identify in this case.
md, err = fbo.getMDForReadNoIdentify(ctx, lState)
}
if err != nil {
return DirEntry{}, err
}
if nodePath.hasValidParent() {
de, err = fbo.blocks.GetDirtyEntry(
ctx, lState, md.ReadOnly(), nodePath)
if err != nil {
return DirEntry{}, err
}
} else {
// nodePath is just the root.
de = md.data.Dir
}
return de, nil
}
var zeroPtr BlockPointer
type blockState struct {
blockPtr BlockPointer
block Block
readyBlockData ReadyBlockData
syncedCb func() error
}
func (fbo *folderBranchOps) Stat(ctx context.Context, node Node) (
ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "Stat %p", node.GetID())
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
de, err = fbo.statEntry(ctx, node)
return err
})
if err != nil {
return EntryInfo{}, err
}
return de.EntryInfo, nil
}
func (fbo *folderBranchOps) GetNodeMetadata(ctx context.Context, node Node) (
ei NodeMetadata, err error) {
fbo.log.CDebugf(ctx, "GetNodeMetadata %p", node.GetID())
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
de, err = fbo.statEntry(ctx, node)
return err
})
var res NodeMetadata
if err != nil {
return res, err
}
res.BlockInfo = de.BlockInfo
uid := de.Writer
if uid == keybase1.UID("") {
uid = de.Creator
}
res.LastWriterUnverified, err =
fbo.config.KBPKI().GetNormalizedUsername(ctx, uid)
if err != nil {
return res, err
}
return res, nil
}
// blockPutState is an internal structure to track data when putting blocks
type blockPutState struct {
blockStates []blockState
}
func newBlockPutState(length int) *blockPutState {
bps := &blockPutState{}
bps.blockStates = make([]blockState, 0, length)
return bps
}
// addNewBlock tracks a new block that will be put. If syncedCb is
// non-nil, it will be called whenever the put for that block is
// complete (whether or not the put resulted in an error). Currently
// it will not be called if the block is never put (due to an earlier
// error).
func (bps *blockPutState) addNewBlock(blockPtr BlockPointer, block Block,
readyBlockData ReadyBlockData, syncedCb func() error) {
bps.blockStates = append(bps.blockStates,
blockState{blockPtr, block, readyBlockData, syncedCb})
}
func (bps *blockPutState) mergeOtherBps(other *blockPutState) {
bps.blockStates = append(bps.blockStates, other.blockStates...)
}
func (bps *blockPutState) DeepCopy() *blockPutState {
newBps := &blockPutState{}
newBps.blockStates = make([]blockState, len(bps.blockStates))
copy(newBps.blockStates, bps.blockStates)
return newBps
}
func (fbo *folderBranchOps) readyBlockMultiple(ctx context.Context,
kmd KeyMetadata, currBlock Block, uid keybase1.UID,
bps *blockPutState) (info BlockInfo, plainSize int, err error) {
info, plainSize, readyBlockData, err :=
fbo.blocks.ReadyBlock(ctx, kmd, currBlock, uid)
if err != nil {
return
}
bps.addNewBlock(info.BlockPointer, currBlock, readyBlockData, nil)
return
}
func (fbo *folderBranchOps) unembedBlockChanges(
ctx context.Context, bps *blockPutState, md *RootMetadata,
changes *BlockChanges, uid keybase1.UID) error {
buf, err := fbo.config.Codec().Encode(changes)
if err != nil {
return err
}
block := NewFileBlock().(*FileBlock)
copied := fbo.config.BlockSplitter().CopyUntilSplit(block, false, buf, 0)
info, _, err := fbo.readyBlockMultiple(ctx, md.ReadOnly(), block, uid, bps)
if err != nil {
return err
}
md.AddRefBytes(uint64(info.EncodedSize))
md.AddDiskUsage(uint64(info.EncodedSize))
// Everything fits in one block.
toCopy := int64(len(buf))
if copied >= toCopy {
changes.Info = info
md.data.cachedChanges = *changes
changes.Ops = nil
return nil
}
// Otherwise make a top block and split up the remaining buffer.
topBlock := NewFileBlock().(*FileBlock)
topBlock.IsInd = true
topBlock.IPtrs = append(topBlock.IPtrs, IndirectFilePtr{
BlockInfo: info,
Off: 0,
})
copiedSize := copied
for copiedSize < toCopy {
block := NewFileBlock().(*FileBlock)
currOff := copiedSize
copied := fbo.config.BlockSplitter().CopyUntilSplit(block, false,
buf[currOff:], 0)
copiedSize += copied
info, _, err := fbo.readyBlockMultiple(
ctx, md.ReadOnly(), block, uid, bps)
if err != nil {
return err
}
topBlock.IPtrs = append(topBlock.IPtrs, IndirectFilePtr{
BlockInfo: info,
Off: currOff,
})
md.AddRefBytes(uint64(info.EncodedSize))
md.AddDiskUsage(uint64(info.EncodedSize))
}
info, _, err = fbo.readyBlockMultiple(
ctx, md.ReadOnly(), topBlock, uid, bps)
if err != nil {
return err
}
changes.Info = info
md.AddRefBytes(uint64(info.EncodedSize))
md.AddDiskUsage(uint64(info.EncodedSize))
md.data.cachedChanges = *changes
changes.Ops = nil
return nil
}
type localBcache map[BlockPointer]*DirBlock
// syncBlock updates, and readies, the blocks along the path for the
// given write, up to the root of the tree or stopAt (if specified).
// When it updates the root of the tree, it also modifies the given
// head object with a new revision number and root block ID. It first
// checks the provided lbc for blocks that may have been modified by
// previous syncBlock calls or the FS calls themselves. It returns
// the updated path to the changed directory, the new or updated
// directory entry created as part of the call, and a summary of all
// the blocks that now must be put to the block server.
//
// This function is safe to use unlocked, but may modify MD to have
// the same revision number as another one. All functions in this file
// must call syncBlockLocked instead, which holds mdWriterLock and
// thus serializes the revision numbers. Conflict resolution may call
// syncBlockForConflictResolution, which doesn't hold the lock, since
// it already handles conflicts correctly.
//
// entryType must not be Sym.
//
// TODO: deal with multiple nodes for indirect blocks
func (fbo *folderBranchOps) syncBlock(
ctx context.Context, lState *lockState, uid keybase1.UID,
md *RootMetadata, newBlock Block, dir path, name string,
entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer,
lbc localBcache) (path, DirEntry, *blockPutState, error) {
// now ready each dblock and write the DirEntry for the next one
// in the path
currBlock := newBlock
currName := name
newPath := path{
FolderBranch: dir.FolderBranch,
path: make([]pathNode, 0, len(dir.path)),
}
bps := newBlockPutState(len(dir.path))
refPath := dir.ChildPathNoPtr(name)
var newDe DirEntry
doSetTime := true
now := fbo.nowUnixNano()
for len(newPath.path) < len(dir.path)+1 {
info, plainSize, err := fbo.readyBlockMultiple(
ctx, md.ReadOnly(), currBlock, uid, bps)
if err != nil {
return path{}, DirEntry{}, nil, err
}
// prepend to path and setup next one
newPath.path = append([]pathNode{{info.BlockPointer, currName}},
newPath.path...)
// get the parent block
prevIdx := len(dir.path) - len(newPath.path)
var prevDblock *DirBlock
var de DirEntry
var nextName string
nextDoSetTime := false
if prevIdx < 0 {
// root dir, update the MD instead
de = md.data.Dir
} else {
prevDir := path{
FolderBranch: dir.FolderBranch,
path: dir.path[:prevIdx+1],
}
// First, check the localBcache, which could contain
// blocks that were modified across multiple calls to
// syncBlock.
var ok bool
prevDblock, ok = lbc[prevDir.tailPointer()]
if !ok {
// If the block isn't in the local bcache, we
// have to fetch it, possibly from the
// network. Directory blocks are only ever
// modified while holding mdWriterLock, so it's
// safe to fetch them one at a time.
prevDblock, err = fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(),
prevDir, blockWrite)
if err != nil {
return path{}, DirEntry{}, nil, err
}
}
// modify the direntry for currName; make one
// if it doesn't exist (which should only
// happen the first time around).
//
// TODO: Pull the creation out of here and
// into createEntryLocked().
if de, ok = prevDblock.Children[currName]; !ok {
// If this isn't the first time
// around, we have an error.
if len(newPath.path) > 1 {
return path{}, DirEntry{}, nil, NoSuchNameError{currName}
}
// If this is a file, the size should be 0. (TODO:
// Ensure this.) If this is a directory, the size will
// be filled in below. The times will be filled in
// below as well, since we should only be creating a
// new directory entry when doSetTime is true.
de = DirEntry{
EntryInfo: EntryInfo{
Type: entryType,
Size: 0,
},
}
// If we're creating a new directory entry, the
// parent's times must be set as well.
nextDoSetTime = true
}
currBlock = prevDblock
nextName = prevDir.tailName()
}
if de.Type == Dir {
// TODO: When we use indirect dir blocks,
// we'll have to calculate the size some other
// way.
de.Size = uint64(plainSize)
}
if prevIdx < 0 {
md.AddUpdate(md.data.Dir.BlockInfo, info)
} else if prevDe, ok := prevDblock.Children[currName]; ok {
md.AddUpdate(prevDe.BlockInfo, info)
} else {
// this is a new block
md.AddRefBlock(info)
}
if len(refPath.path) > 1 {
refPath = *refPath.parentPath()
}
de.BlockInfo = info
if doSetTime {
if mtime {
de.Mtime = now
}
if ctime {
de.Ctime = now
}
}
if !newDe.IsInitialized() {
newDe = de
}
if prevIdx < 0 {
md.data.Dir = de
} else {
prevDblock.Children[currName] = de
}
currName = nextName
// Stop before we get to the common ancestor; it will be taken care of
// on the next sync call
if prevIdx >= 0 && dir.path[prevIdx].BlockPointer == stopAt {
// Put this back into the cache as dirty -- the next
// syncBlock call will ready it.
dblock, ok := currBlock.(*DirBlock)
if !ok {
return path{}, DirEntry{}, nil, BadDataError{stopAt.ID}
}
lbc[stopAt] = dblock
break
}
doSetTime = nextDoSetTime
}
return newPath, newDe, bps, nil
}
// syncBlockLock calls syncBlock under mdWriterLock.
func (fbo *folderBranchOps) syncBlockLocked(
ctx context.Context, lState *lockState, uid keybase1.UID,
md *RootMetadata, newBlock Block, dir path, name string,
entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer,
lbc localBcache) (path, DirEntry, *blockPutState, error) {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.syncBlock(ctx, lState, uid, md, newBlock, dir, name,
entryType, mtime, ctime, stopAt, lbc)
}
// syncBlockForConflictResolution calls syncBlock unlocked, since
// conflict resolution can handle MD revision number conflicts
// correctly.
func (fbo *folderBranchOps) syncBlockForConflictResolution(
ctx context.Context, lState *lockState, uid keybase1.UID,
md *RootMetadata, newBlock Block, dir path, name string,
entryType EntryType, mtime bool, ctime bool, stopAt BlockPointer,
lbc localBcache) (path, DirEntry, *blockPutState, error) {
return fbo.syncBlock(
ctx, lState, uid, md, newBlock, dir,
name, entryType, mtime, ctime, stopAt, lbc)
}
// entryType must not be Sym.
func (fbo *folderBranchOps) syncBlockAndCheckEmbedLocked(ctx context.Context,
lState *lockState, md *RootMetadata, newBlock Block, dir path,
name string, entryType EntryType, mtime bool, ctime bool,
stopAt BlockPointer, lbc localBcache) (
path, DirEntry, *blockPutState, error) {
fbo.mdWriterLock.AssertLocked(lState)
_, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return path{}, DirEntry{}, nil, err
}
newPath, newDe, bps, err := fbo.syncBlockLocked(
ctx, lState, uid, md, newBlock, dir, name, entryType, mtime,
ctime, stopAt, lbc)
if err != nil {
return path{}, DirEntry{}, nil, err
}
// Do the block changes need their own blocks? Unembed only if
// this is the final call to this function with this MD.
if stopAt == zeroPtr {
bsplit := fbo.config.BlockSplitter()
if !bsplit.ShouldEmbedBlockChanges(&md.data.Changes) {
err = fbo.unembedBlockChanges(ctx, bps, md, &md.data.Changes,
uid)
if err != nil {
return path{}, DirEntry{}, nil, err
}
}
}
return newPath, newDe, bps, nil
}
// Returns whether the given error is one that shouldn't block the
// removal of a file or directory.
//
// TODO: Consider other errors recoverable, e.g. ones that arise from
// present but corrupted blocks?
func isRecoverableBlockErrorForRemoval(err error) bool {
return isRecoverableBlockError(err)
}
func isRetriableError(err error, retries int) bool {
_, isExclOnUnmergedError := err.(ExclOnUnmergedError)
_, isUnmergedSelfConflictError := err.(UnmergedSelfConflictError)
recoverable := isExclOnUnmergedError || isUnmergedSelfConflictError ||
isRecoverableBlockError(err)
return recoverable && retries < maxRetriesOnRecoverableErrors
}
func (fbo *folderBranchOps) finalizeBlocks(bps *blockPutState) error {
bcache := fbo.config.BlockCache()
for _, blockState := range bps.blockStates {
newPtr := blockState.blockPtr
// only cache this block if we made a brand new block, not if
// we just incref'd some other block.
if !newPtr.IsFirstRef() {
continue
}
if err := bcache.Put(newPtr, fbo.id(), blockState.block,
TransientEntry); err != nil {
return err
}
}
return nil
}
// Returns true if the passed error indicates a revision conflict.
func isRevisionConflict(err error) bool {
if err == nil {
return false
}
_, isConflictRevision := err.(MDServerErrorConflictRevision)
_, isConflictPrevRoot := err.(MDServerErrorConflictPrevRoot)
_, isConflictDiskUsage := err.(MDServerErrorConflictDiskUsage)
_, isConditionFailed := err.(MDServerErrorConditionFailed)
_, isConflictFolderMapping := err.(MDServerErrorConflictFolderMapping)
_, isJournal := err.(MDJournalConflictError)
return isConflictRevision || isConflictPrevRoot ||
isConflictDiskUsage || isConditionFailed ||
isConflictFolderMapping || isJournal
}
func (fbo *folderBranchOps) finalizeMDWriteLocked(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState, excl Excl) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// finally, write out the new metadata
mdops := fbo.config.MDOps()
doUnmergedPut := true
mergedRev := MetadataRevisionUninitialized
oldPrevRoot := md.PrevRoot()
var mdID MdID
// This puts on a delay on any cancellations arriving to ctx. It is intended
// to work sort of like a critical section, except that there isn't an
// explicit call to exit the critical section. The cancellation, if any, is
// triggered after a timeout (i.e.
// fbo.config.DelayedCancellationGracePeriod()).
//
// The purpose of trying to avoid cancellation once we start MD write is to
// avoid having an unpredictable perceived MD state. That is, when
// runUnlessCanceled returns Canceled on cancellation, application receives
// an EINTR, and would assume the operation didn't succeed. But the MD write
// continues, and there's a chance the write will succeed, meaning the
// operation succeeds. This contradicts with the application's perception
// through error code and can lead to horrible situations. An easily caught
// situation is when application calls Create with O_EXCL set, gets an EINTR
// while MD write succeeds, retries and gets an EEXIST error. If users hit
// Ctrl-C, this might not be a big deal. However, it also happens for other
// interrupts. For applications that use signals to communicate, e.g.
// SIGALRM and SIGUSR1, this can happen pretty often, which renders broken.
if err = EnableDelayedCancellationWithGracePeriod(
ctx, fbo.config.DelayedCancellationGracePeriod()); err != nil {
return err
}
// we don't explicitly clean up (by using a defer) CancellationDelayer here
// because sometimes fuse makes another call using the same ctx. For example, in
// fuse's Create call handler, a dir.Create is followed by an Attr call. If
// we do a deferred cleanup here, if an interrupt has been received, it can
// cause ctx to be canceled before Attr call finishes, which causes FUSE to
// return EINTR for the Create request. But at this point, the request may
// have already succeeded. Returning EINTR makes application thinks the file
// is not created successfully.
if fbo.isMasterBranchLocked(lState) {
// only do a normal Put if we're not already staged.
mdID, err = mdops.Put(ctx, md)
if doUnmergedPut = isRevisionConflict(err); doUnmergedPut {
fbo.log.CDebugf(ctx, "Conflict: %v", err)
mergedRev = md.Revision()
if excl == WithExcl {
// If this was caused by an exclusive create, we shouldn't do an
// UnmergedPut, but rather try to get newest update from server, and
// retry afterwards.
err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdatesLocked)
if err != nil {
return err
}
return ExclOnUnmergedError{}
}
} else if err != nil {
return err
}
} else if excl == WithExcl {
return ExclOnUnmergedError{}
}
if doUnmergedPut {
// We're out of date, and this is not an exclusive write, so put it as an
// unmerged MD.
mdID, err = mdops.PutUnmerged(ctx, md)
if isRevisionConflict(err) {
// Self-conflicts are retried in `doMDWriteWithRetry`.
err = UnmergedSelfConflictError{err}
}
if err != nil {
return err
}
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(md.Revision(), mergedRev)
} else {
fbo.setBranchIDLocked(lState, NullBranchID)
if md.IsRekeySet() && !md.IsWriterMetadataCopiedSet() {
// Queue this folder for rekey if the bit was set and it's not a copy.
// This is for the case where we're coming out of conflict resolution.
// So why don't we do this in finalizeResolution? Well, we do but we don't
// want to block on a rekey so we queue it. Because of that it may fail
// due to a conflict with some subsequent write. By also handling it here
// we'll always retry if we notice we haven't been successful in clearing
// the bit yet. Note that I haven't actually seen this happen but it seems
// theoretically possible.
defer fbo.config.RekeyQueue().Enqueue(md.TlfID())
}
}
md.loadCachedBlockChanges(bps)
err = fbo.finalizeBlocks(bps)
if err != nil {
return err
}
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
}
key, err := fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
irmd := MakeImmutableRootMetadata(
md, key, mdID, fbo.config.Clock().Now())
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
// Archive the old, unref'd blocks if journaling is off.
if !TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly())
}
fbo.notifyBatchLocked(ctx, lState, irmd)
return nil
}
func (fbo *folderBranchOps) waitForJournalLocked(ctx context.Context,
lState *lockState, jServer *JournalServer) error {
fbo.mdWriterLock.AssertLocked(lState)
if !TLFJournalEnabled(fbo.config, fbo.id()) {
// Nothing to do.
return nil
}
if err := jServer.Wait(ctx, fbo.id()); err != nil {
return err
}
// Make sure everything flushed successfully, since we're holding
// the writer lock, no other revisions could have snuck in.
jStatus, err := jServer.JournalStatus(fbo.id())
if err != nil {
return err
}
if jStatus.RevisionEnd != MetadataRevisionUninitialized {
return fmt.Errorf("Couldn't flush all MD revisions; current "+
"revision end for the journal is %d", jStatus.RevisionEnd)
}
if jStatus.LastFlushErr != "" {
return fmt.Errorf("Couldn't flush the journal: %s",
jStatus.LastFlushErr)
}
return nil
}
func (fbo *folderBranchOps) finalizeMDRekeyWriteLocked(ctx context.Context,
lState *lockState, md *RootMetadata,
lastWriterVerifyingKey kbfscrypto.VerifyingKey) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
oldPrevRoot := md.PrevRoot()
// Write out the new metadata. If journaling is enabled, we don't
// want the rekey to hit the journal and possibly end up on a
// conflict branch, so wait for the journal to flush and then push
// straight to the server. TODO: we're holding the writer lock
// while flushing the journal here (just like for exclusive
// writes), which may end up blocking incoming writes for a long
// time. Rekeys are pretty rare, but if this becomes an issue
// maybe we should consider letting these hit the journal and
// scrubbing them when converting it to a branch.
mdOps := fbo.config.MDOps()
if jServer, err := GetJournalServer(fbo.config); err == nil {
if err = fbo.waitForJournalLocked(ctx, lState, jServer); err != nil {
return err
}
mdOps = jServer.delegateMDOps
}
mdID, err := mdOps.Put(ctx, md)
isConflict := isRevisionConflict(err)
if err != nil && !isConflict {
return err
}
if isConflict {
// drop this block. we've probably collided with someone also
// trying to rekey the same folder but that's not necessarily
// the case. we'll queue another rekey just in case. it should
// be safe as it's idempotent. we don't want any rekeys present
// in unmerged history or that will just make a mess.
fbo.config.RekeyQueue().Enqueue(md.TlfID())
return RekeyConflictError{err}
}
fbo.setBranchIDLocked(lState, NullBranchID)
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
}
md.loadCachedBlockChanges(nil)
var key kbfscrypto.VerifyingKey
if md.IsWriterMetadataCopiedSet() {
key = lastWriterVerifyingKey
} else {
var err error
key, err = fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
return fbo.setHeadSuccessorLocked(ctx, lState,
MakeImmutableRootMetadata(md, key, mdID, fbo.config.Clock().Now()),
rebased)
}
func (fbo *folderBranchOps) finalizeGCOp(ctx context.Context, gco *GCOp) (
err error) {
lState := makeFBOLockState()
// Lock the folder so we can get an internally-consistent MD
// revision number.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
if md.MergedStatus() == Unmerged {
return UnexpectedUnmergedPutError{}
}
md.AddOp(gco)
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
oldPrevRoot := md.PrevRoot()
// finally, write out the new metadata
mdID, err := fbo.config.MDOps().Put(ctx, md)
if err != nil {
// Don't allow garbage collection to put us into a conflicting
// state; just wait for the next period.
return err
}
fbo.setBranchIDLocked(lState, NullBranchID)
md.loadCachedBlockChanges(bps)
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
key, err := fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
irmd := MakeImmutableRootMetadata(
md, key, mdID, fbo.config.Clock().Now())
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
fbo.notifyBatchLocked(ctx, lState, irmd)
return nil
}
func (fbo *folderBranchOps) syncBlockAndFinalizeLocked(ctx context.Context,
lState *lockState, md *RootMetadata, newBlock Block, dir path,
name string, entryType EntryType, mtime bool, ctime bool,
stopAt BlockPointer, excl Excl) (de DirEntry, err error) {
fbo.mdWriterLock.AssertLocked(lState)
_, de, bps, err := fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, newBlock, dir, name, entryType, mtime,
ctime, zeroPtr, nil)
if err != nil {
return DirEntry{}, err
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(
md.ReadOnly(), bps, blockDeleteOnMDFail)
}
}()
_, err = doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return DirEntry{}, err
}
err = fbo.finalizeMDWriteLocked(ctx, lState, md, bps, excl)
if err != nil {
return DirEntry{}, err
}
return de, nil
}
func checkDisallowedPrefixes(name string) error {
for _, prefix := range disallowedPrefixes {
if strings.HasPrefix(name, prefix) {
return DisallowedPrefixError{name, prefix}
}
}
return nil
}
func (fbo *folderBranchOps) checkNewDirSize(ctx context.Context,
lState *lockState, md ReadOnlyRootMetadata,
dirPath path, newName string) error {
// Check that the directory isn't past capacity already.
var currSize uint64
if dirPath.hasValidParent() {
de, err := fbo.blocks.GetDirtyEntry(ctx, lState, md, dirPath)
if err != nil {
return err
}
currSize = de.Size
} else {
// dirPath is just the root.
currSize = md.data.Dir.Size
}
// Just an approximation since it doesn't include the size of the
// directory entry itself, but that's ok -- at worst it'll be an
// off-by-one-entry error, and since there's a maximum name length
// we can't get in too much trouble.
if currSize+uint64(len(newName)) > fbo.config.MaxDirBytes() {
return DirTooBigError{dirPath, currSize + uint64(len(newName)),
fbo.config.MaxDirBytes()}
}
return nil
}
// PathType returns path type
func (fbo *folderBranchOps) PathType() PathType {
if fbo.folderBranch.Tlf.IsPublic() {
return PublicPathType
}
return PrivatePathType
}
// canonicalPath returns full canonical path for dir node and name.
func (fbo *folderBranchOps) canonicalPath(ctx context.Context, dir Node, name string) (string, error) {
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return "", err
}
return BuildCanonicalPath(fbo.PathType(), dirPath.String(), name), nil
}
// entryType must not by Sym.
func (fbo *folderBranchOps) createEntryLocked(
ctx context.Context, lState *lockState, dir Node, name string,
entryType EntryType, excl Excl) (Node, DirEntry, error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := checkDisallowedPrefixes(name); err != nil {
return nil, DirEntry{}, err
}
if uint32(len(name)) > fbo.config.MaxNameBytes() {
return nil, DirEntry{},
NameTooLongError{name, fbo.config.MaxNameBytes()}
}
filename, err := fbo.canonicalPath(ctx, dir, name)
if err != nil {
return nil, DirEntry{}, err
}
// verify we have permission to write
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, filename)
if err != nil {
return nil, DirEntry{}, err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return nil, DirEntry{}, err
}
dblock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), dirPath, blockWrite)
if err != nil {
return nil, DirEntry{}, err
}
// does name already exist?
if _, ok := dblock.Children[name]; ok {
return nil, DirEntry{}, NameExistsError{name}
}
if err := fbo.checkNewDirSize(
ctx, lState, md.ReadOnly(), dirPath, name); err != nil {
return nil, DirEntry{}, err
}
co, err := newCreateOp(name, dirPath.tailPointer(), entryType)
if err != nil {
return nil, DirEntry{}, err
}
md.AddOp(co)
// create new data block
var newBlock Block
// XXX: for now, put a unique ID in every new block, to make sure it
// has a unique block ID. This may not be needed once we have encryption.
if entryType == Dir {
newBlock = &DirBlock{
Children: make(map[string]DirEntry),
}
} else {
newBlock = &FileBlock{}
}
de, err := fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, newBlock, dirPath, name, entryType,
true, true, zeroPtr, excl)
if err != nil {
return nil, DirEntry{}, err
}
node, err := fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir)
if err != nil {
return nil, DirEntry{}, err
}
return node, de, nil
}
func (fbo *folderBranchOps) doMDWriteWithRetry(ctx context.Context,
lState *lockState, fn func(lState *lockState) error) error {
doUnlock := false
defer func() {
if doUnlock {
fbo.mdWriterLock.Unlock(lState)
}
}()
for i := 0; ; i++ {
fbo.mdWriterLock.Lock(lState)
doUnlock = true
// Make sure we haven't been canceled before doing anything
// too serious.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
err := fn(lState)
if isRetriableError(err, i) {
fbo.log.CDebugf(ctx, "Trying again after retriable error: %v", err)
// Release the lock to give someone else a chance
doUnlock = false
fbo.mdWriterLock.Unlock(lState)
if _, ok := err.(ExclOnUnmergedError); ok {
if err = fbo.cr.Wait(ctx); err != nil {
return err
}
} else if _, ok := err.(UnmergedSelfConflictError); ok {
// We can only get here if we are already on an
// unmerged branch and an errored PutUnmerged did make
// it to the mdserver. Let's force sync, with a fresh
// context so the observer doesn't ignore the updates
// (but tie the cancels together).
newCtx := fbo.ctxWithFBOID(context.Background())
newCtx, cancel := context.WithCancel(newCtx)
defer cancel()
go func() {
select {
case <-ctx.Done():
cancel()
case <-newCtx.Done():
}
}()
fbo.log.CDebugf(ctx, "Got a revision conflict while unmerged "+
"(%v); forcing a sync", err)
err = fbo.getAndApplyNewestUnmergedHead(newCtx, lState)
if err != nil {
return err
}
cancel()
}
continue
} else if err != nil {
return err
}
return nil
}
}
func (fbo *folderBranchOps) doMDWriteWithRetryUnlessCanceled(
ctx context.Context, fn func(lState *lockState) error) error {
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
return fbo.doMDWriteWithRetry(ctx, lState, fn)
})
}
func (fbo *folderBranchOps) CreateDir(
ctx context.Context, dir Node, path string) (
n Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateDir %p %s", dir.GetID(), path)
defer func() {
if err != nil {
fbo.deferLog.CDebugf(ctx, "Error: %v", err)
} else {
fbo.deferLog.CDebugf(ctx, "Done: %p", n.GetID())
}
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var retNode Node
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
node, de, err :=
fbo.createEntryLocked(ctx, lState, dir, path, Dir, NoExcl)
// Don't set node and ei directly, as that can cause a
// race when the Create is canceled.
retNode = node
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return nil, EntryInfo{}, err
}
return retNode, retEntryInfo, nil
}
func (fbo *folderBranchOps) CreateFile(
ctx context.Context, dir Node, path string, isExec bool, excl Excl) (
n Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateFile %p %s isExec=%v Excl=%s",
dir.GetID(), path, isExec, excl)
defer func() {
if err != nil {
fbo.deferLog.CDebugf(ctx, "Error: %v", err)
} else {
fbo.deferLog.CDebugf(ctx, "Done: %p", n.GetID())
}
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
var entryType EntryType
if isExec {
entryType = Exec
} else {
entryType = File
}
// If journaling is turned on, an exclusive create may end up on a
// conflict branch.
if excl == WithExcl && TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.log.CDebugf(ctx, "Exclusive create status is being discarded.")
excl = NoExcl
}
if excl == WithExcl {
if err = fbo.cr.Wait(ctx); err != nil {
return nil, EntryInfo{}, err
}
}
var retNode Node
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Don't set node and ei directly, as that can cause a
// race when the Create is canceled.
node, de, err :=
fbo.createEntryLocked(ctx, lState, dir, path, entryType, excl)
retNode = node
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return nil, EntryInfo{}, err
}
return retNode, retEntryInfo, nil
}
func (fbo *folderBranchOps) createLinkLocked(
ctx context.Context, lState *lockState, dir Node, fromName string,
toPath string) (DirEntry, error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := checkDisallowedPrefixes(fromName); err != nil {
return DirEntry{}, err
}
if uint32(len(fromName)) > fbo.config.MaxNameBytes() {
return DirEntry{},
NameTooLongError{fromName, fbo.config.MaxNameBytes()}
}
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return DirEntry{}, err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return DirEntry{}, err
}
dblock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), dirPath, blockWrite)
if err != nil {
return DirEntry{}, err
}
// TODO: validate inputs
// does name already exist?
if _, ok := dblock.Children[fromName]; ok {
return DirEntry{}, NameExistsError{fromName}
}
if err := fbo.checkNewDirSize(ctx, lState, md.ReadOnly(),
dirPath, fromName); err != nil {
return DirEntry{}, err
}
co, err := newCreateOp(fromName, dirPath.tailPointer(), Sym)
if err != nil {
return DirEntry{}, err
}
md.AddOp(co)
// Create a direntry for the link, and then sync
now := fbo.nowUnixNano()
dblock.Children[fromName] = DirEntry{
EntryInfo: EntryInfo{
Type: Sym,
Size: uint64(len(toPath)),
SymPath: toPath,
Mtime: now,
Ctime: now,
},
}
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, dblock, *dirPath.parentPath(),
dirPath.tailName(), Dir, true, true, zeroPtr, NoExcl)
if err != nil {
return DirEntry{}, err
}
return dblock.Children[fromName], nil
}
func (fbo *folderBranchOps) CreateLink(
ctx context.Context, dir Node, fromName string, toPath string) (
ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateLink %p %s -> %s",
dir.GetID(), fromName, toPath)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(dir)
if err != nil {
return EntryInfo{}, err
}
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Don't set ei directly, as that can cause a race when
// the Create is canceled.
de, err := fbo.createLinkLocked(ctx, lState, dir, fromName, toPath)
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return EntryInfo{}, err
}
return retEntryInfo, nil
}
// unrefEntry modifies md to unreference all relevant blocks for the
// given entry.
func (fbo *folderBranchOps) unrefEntry(ctx context.Context,
lState *lockState, md *RootMetadata, dir path, de DirEntry,
name string) error {
md.AddUnrefBlock(de.BlockInfo)
// construct a path for the child so we can unlink with it.
childPath := dir.ChildPath(name, de.BlockPointer)
// If this is an indirect block, we need to delete all of its
// children as well. NOTE: non-empty directories can't be
// removed, so no need to check for indirect directory blocks
// here.
if de.Type == File || de.Type == Exec {
blockInfos, err := fbo.blocks.GetIndirectFileBlockInfos(
ctx, lState, md.ReadOnly(), childPath)
if isRecoverableBlockErrorForRemoval(err) {
msg := fmt.Sprintf("Recoverable block error encountered for unrefEntry(%v); continuing", childPath)
fbo.log.CWarningf(ctx, "%s", msg)
fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err)
} else if err != nil {
return err
}
for _, blockInfo := range blockInfos {
md.AddUnrefBlock(blockInfo)
}
}
return nil
}
func (fbo *folderBranchOps) removeEntryLocked(ctx context.Context,
lState *lockState, md *RootMetadata, dir path, name string) error {
fbo.mdWriterLock.AssertLocked(lState)
pblock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), dir, blockWrite)
if err != nil {
return err
}
// make sure the entry exists
de, ok := pblock.Children[name]
if !ok {
return NoSuchNameError{name}
}
ro, err := newRmOp(name, dir.tailPointer())
if err != nil {
return err
}
md.AddOp(ro)
err = fbo.unrefEntry(ctx, lState, md, dir, de, name)
if err != nil {
return err
}
// the actual unlink
delete(pblock.Children, name)
// sync the parent directory
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, pblock, *dir.parentPath(), dir.tailName(),
Dir, true, true, zeroPtr, NoExcl)
if err != nil {
return err
}
return nil
}
func (fbo *folderBranchOps) removeDirLocked(ctx context.Context,
lState *lockState, dir Node, dirName string) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return err
}
pblock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), dirPath, blockRead)
de, ok := pblock.Children[dirName]
if !ok {
return NoSuchNameError{dirName}
}
// construct a path for the child so we can check for an empty dir
childPath := dirPath.ChildPath(dirName, de.BlockPointer)
childBlock, err := fbo.blocks.GetDir(
ctx, lState, md.ReadOnly(), childPath, blockRead)
if isRecoverableBlockErrorForRemoval(err) {
msg := fmt.Sprintf("Recoverable block error encountered for removeDirLocked(%v); continuing", childPath)
fbo.log.CWarningf(ctx, "%s", msg)
fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err)
} else if err != nil {
return err
} else if len(childBlock.Children) > 0 {
return DirNotEmptyError{dirName}
}
return fbo.removeEntryLocked(ctx, lState, md, dirPath, dirName)
}
func (fbo *folderBranchOps) RemoveDir(
ctx context.Context, dir Node, dirName string) (err error) {
fbo.log.CDebugf(ctx, "RemoveDir %p %s", dir.GetID(), dirName)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(dir)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.removeDirLocked(ctx, lState, dir, dirName)
})
}
func (fbo *folderBranchOps) RemoveEntry(ctx context.Context, dir Node,
name string) (err error) {
fbo.log.CDebugf(ctx, "RemoveEntry %p %s", dir.GetID(), name)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(dir)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return err
}
return fbo.removeEntryLocked(ctx, lState, md, dirPath, name)
})
}
func (fbo *folderBranchOps) renameLocked(
ctx context.Context, lState *lockState, oldParent path,
oldName string, newParent path, newName string) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
oldPBlock, newPBlock, newDe, lbc, err := fbo.blocks.PrepRename(
ctx, lState, md, oldParent, oldName, newParent, newName)
if err != nil {
return err
}
// does name exist?
if de, ok := newPBlock.Children[newName]; ok {
// Usually higher-level programs check these, but just in case.
if de.Type == Dir && newDe.Type != Dir {
return NotDirError{newParent.ChildPathNoPtr(newName)}
} else if de.Type != Dir && newDe.Type == Dir {
return NotFileError{newParent.ChildPathNoPtr(newName)}
}
if de.Type == Dir {
// The directory must be empty.
oldTargetDir, err := fbo.blocks.GetDirBlockForReading(ctx, lState,
md.ReadOnly(), de.BlockPointer, newParent.Branch,
newParent.ChildPathNoPtr(newName))
if err != nil {
return err
}
if len(oldTargetDir.Children) != 0 {
fbo.log.CWarningf(ctx, "Renaming over a non-empty directory "+
" (%s/%s) not allowed.", newParent, newName)
return DirNotEmptyError{newName}
}
}
// Delete the old block pointed to by this direntry.
err := fbo.unrefEntry(ctx, lState, md, newParent, de, newName)
if err != nil {
return err
}
}
// only the ctime changes
newDe.Ctime = fbo.nowUnixNano()
newPBlock.Children[newName] = newDe
delete(oldPBlock.Children, oldName)
// find the common ancestor
var i int
found := false
// the root block will always be the same, so start at number 1
for i = 1; i < len(oldParent.path) && i < len(newParent.path); i++ {
if oldParent.path[i].ID != newParent.path[i].ID {
found = true
i--
break
}
}
if !found {
// if we couldn't find one, then the common ancestor is the
// last node in the shorter path
if len(oldParent.path) < len(newParent.path) {
i = len(oldParent.path) - 1
} else {
i = len(newParent.path) - 1
}
}
commonAncestor := oldParent.path[i].BlockPointer
oldIsCommon := oldParent.tailPointer() == commonAncestor
newIsCommon := newParent.tailPointer() == commonAncestor
newOldPath := path{FolderBranch: oldParent.FolderBranch}
var oldBps *blockPutState
if oldIsCommon {
if newIsCommon {
// if old and new are both the common ancestor, there is
// nothing to do (syncBlock will take care of everything)
} else {
// If the old one is common and the new one is
// not, then the last
// syncBlockAndCheckEmbedLocked call will need
// to access the old one.
lbc[oldParent.tailPointer()] = oldPBlock
}
} else {
if newIsCommon {
// If the new one is common, then the first
// syncBlockAndCheckEmbedLocked call will need to access
// it.
lbc[newParent.tailPointer()] = newPBlock
}
// The old one is not the common ancestor, so we need to sync it.
// TODO: optimize by pushing blocks from both paths in parallel
newOldPath, _, oldBps, err = fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, oldPBlock, *oldParent.parentPath(), oldParent.tailName(),
Dir, true, true, commonAncestor, lbc)
if err != nil {
return err
}
}
newNewPath, _, newBps, err := fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, newPBlock, *newParent.parentPath(), newParent.tailName(),
Dir, true, true, zeroPtr, lbc)
if err != nil {
return err
}
// newOldPath is really just a prefix now. A copy is necessary as an
// append could cause the new path to contain nodes from the old path.
newOldPath.path = append(make([]pathNode, i+1, i+1), newOldPath.path...)
copy(newOldPath.path[:i+1], newNewPath.path[:i+1])
// merge and finalize the blockPutStates
if oldBps != nil {
newBps.mergeOtherBps(oldBps)
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(
md.ReadOnly(), newBps, blockDeleteOnMDFail)
}
}()
_, err = doBlockPuts(ctx, fbo.config.BlockServer(), fbo.config.BlockCache(),
fbo.config.Reporter(), fbo.log, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *newBps)
if err != nil {
return err
}
return fbo.finalizeMDWriteLocked(ctx, lState, md, newBps, NoExcl)
}
func (fbo *folderBranchOps) Rename(
ctx context.Context, oldParent Node, oldName string, newParent Node,
newName string) (err error) {
fbo.log.CDebugf(ctx, "Rename %p/%s -> %p/%s", oldParent.GetID(),
oldName, newParent.GetID(), newName)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(newParent)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
oldParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, oldParent)
if err != nil {
return err
}
newParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, newParent)
if err != nil {
return err
}
// only works for paths within the same topdir
if oldParentPath.FolderBranch != newParentPath.FolderBranch {
return RenameAcrossDirsError{}
}
return fbo.renameLocked(ctx, lState, oldParentPath, oldName,
newParentPath, newName)
})
}
func (fbo *folderBranchOps) Read(
ctx context.Context, file Node, dest []byte, off int64) (
n int64, err error) {
fbo.log.CDebugf(ctx, "Read %p %d %d", file.GetID(), len(dest), off)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(file)
if err != nil {
return 0, err
}
filePath, err := fbo.pathFromNodeForRead(file)
if err != nil {
return 0, err
}
{
// It seems git isn't handling EINTR from some of its read calls (likely
// fread), which causes it to get corrupted data (which leads to coredumps
// later) when a read system call on pack files gets interrupted. This
// enables delayed cancellation for Read if the file path contains `.git`.
//
// TODO: get a patch in git, wait for sufficiently long time for people to
// upgrade, and remove this.
// allow turning this feature off by env var to make life easier when we
// try to fix git.
if _, isSet := os.LookupEnv("KBFS_DISABLE_GIT_SPECIAL_CASE"); !isSet {
for _, n := range filePath.path {
if n.Name == ".git" {
EnableDelayedCancellationWithGracePeriod(ctx, fbo.config.DelayedCancellationGracePeriod())
break
}
}
}
}
// Don't let the goroutine below write directly to the return
// variable, since if the context is canceled the goroutine might
// outlast this function call, and end up in a read/write race
// with the caller.
var bytesRead int64
err = runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// verify we have permission to read
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
bytesRead, err = fbo.blocks.Read(
ctx, lState, md.ReadOnly(), filePath, dest, off)
return err
})
if err != nil {
return 0, err
}
return bytesRead, nil
}
func (fbo *folderBranchOps) Write(
ctx context.Context, file Node, data []byte, off int64) (err error) {
fbo.log.CDebugf(ctx, "Write %p %d %d", file.GetID(), len(data), off)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(file)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// Get the MD for reading. We won't modify it; we'll track the
// unref changes on the side, and put them into the MD during the
// sync.
md, err := fbo.getMDLocked(ctx, lState, mdReadNeedIdentify)
if err != nil {
return err
}
err = fbo.blocks.Write(
ctx, lState, md.ReadOnly(), file, data, off)
if err != nil {
return err
}
fbo.status.addDirtyNode(file)
return nil
})
}
func (fbo *folderBranchOps) Truncate(
ctx context.Context, file Node, size uint64) (err error) {
fbo.log.CDebugf(ctx, "Truncate %p %d", file.GetID(), size)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(file)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// Get the MD for reading. We won't modify it; we'll track the
// unref changes on the side, and put them into the MD during the
// sync.
md, err := fbo.getMDLocked(ctx, lState, mdReadNeedIdentify)
if err != nil {
return err
}
err = fbo.blocks.Truncate(
ctx, lState, md.ReadOnly(), file, size)
if err != nil {
return err
}
fbo.status.addDirtyNode(file)
return nil
})
}
func (fbo *folderBranchOps) setExLocked(
ctx context.Context, lState *lockState, file path,
ex bool) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return
}
dblock, de, err := fbo.blocks.GetDirtyParentAndEntry(
ctx, lState, md.ReadOnly(), file)
if err != nil {
return err
}
// If the file is a symlink, do nothing (to match ext4
// behavior).
if de.Type == Sym || de.Type == Dir {
fbo.log.CDebugf(ctx, "Ignoring setex on type %s", de.Type)
return nil
}
if ex && (de.Type == File) {
de.Type = Exec
} else if !ex && (de.Type == Exec) {
de.Type = File
} else {
// Treating this as a no-op, without updating the ctime, is a
// POSIX violation, but it's an important optimization to keep
// permissions-preserving rsyncs fast.
fbo.log.CDebugf(ctx, "Ignoring no-op setex")
return nil
}
de.Ctime = fbo.nowUnixNano()
parentPath := file.parentPath()
sao, err := newSetAttrOp(file.tailName(), parentPath.tailPointer(),
exAttr, file.tailPointer())
if err != nil {
return err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node has
// been unlinked. In that case, we can safely ignore this setex.
if md.data.Dir.BlockPointer != file.path[0].BlockPointer {
fbo.log.CDebugf(ctx, "Skipping setex for a removed file %v",
file.tailPointer())
fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile(
ctx, lState, sao, de)
return nil
}
md.AddOp(sao)
dblock.Children[file.tailName()] = de
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, dblock, *parentPath.parentPath(), parentPath.tailName(),
Dir, false, false, zeroPtr, NoExcl)
return err
}
func (fbo *folderBranchOps) SetEx(
ctx context.Context, file Node, ex bool) (err error) {
fbo.log.CDebugf(ctx, "SetEx %p %t", file.GetID(), ex)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(file)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
return fbo.setExLocked(ctx, lState, filePath, ex)
})
}
func (fbo *folderBranchOps) setMtimeLocked(
ctx context.Context, lState *lockState, file path,
mtime *time.Time) error {
fbo.mdWriterLock.AssertLocked(lState)
// verify we have permission to write
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
dblock, de, err := fbo.blocks.GetDirtyParentAndEntry(
ctx, lState, md.ReadOnly(), file)
if err != nil {
return err
}
de.Mtime = mtime.UnixNano()
// setting the mtime counts as changing the file MD, so must set ctime too
de.Ctime = fbo.nowUnixNano()
parentPath := file.parentPath()
sao, err := newSetAttrOp(file.tailName(), parentPath.tailPointer(),
mtimeAttr, file.tailPointer())
if err != nil {
return err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node has
// been unlinked. In that case, we can safely ignore this
// setmtime.
if md.data.Dir.BlockPointer != file.path[0].BlockPointer {
fbo.log.CDebugf(ctx, "Skipping setmtime for a removed file %v",
file.tailPointer())
fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile(
ctx, lState, sao, de)
return nil
}
md.AddOp(sao)
dblock.Children[file.tailName()] = de
_, err = fbo.syncBlockAndFinalizeLocked(
ctx, lState, md, dblock, *parentPath.parentPath(), parentPath.tailName(),
Dir, false, false, zeroPtr, NoExcl)
return err
}
func (fbo *folderBranchOps) SetMtime(
ctx context.Context, file Node, mtime *time.Time) (err error) {
fbo.log.CDebugf(ctx, "SetMtime %p %v", file.GetID(), mtime)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if mtime == nil {
// Can happen on some OSes (e.g. OSX) when trying to set the atime only
return nil
}
err = fbo.checkNode(file)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
return fbo.setMtimeLocked(ctx, lState, filePath, mtime)
})
}
func (fbo *folderBranchOps) syncLocked(ctx context.Context,
lState *lockState, file path) (stillDirty bool, err error) {
fbo.mdWriterLock.AssertLocked(lState)
// if the cache for this file isn't dirty, we're done
if !fbo.blocks.IsDirty(lState, file) {
return false, nil
}
// Verify we have permission to write. We do this after the dirty
// check because otherwise readers who sync clean files on close
// would get an error.
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return true, err
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node has
// been unlinked. In that case, we can safely ignore this sync.
if md.data.Dir.BlockPointer != file.path[0].BlockPointer {
fbo.log.CDebugf(ctx, "Skipping sync for a removed file %v",
file.tailPointer())
// Removing the cached info here is a little sketchy,
// since there's no guarantee that this sync comes
// from closing the file, and we still want to serve
// stat calls accurately if the user still has an open
// handle to this file. TODO: Hook this in with the
// node cache GC logic to be perfectly accurate.
return true, fbo.blocks.ClearCacheInfo(lState, file)
}
_, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return true, err
}
// notify the daemon that a write is being performed
fbo.config.Reporter().Notify(ctx, writeNotification(file, false))
defer fbo.config.Reporter().Notify(ctx, writeNotification(file, true))
// Filled in by doBlockPuts below.
var blocksToRemove []BlockPointer
fblock, bps, lbc, syncState, err :=
fbo.blocks.StartSync(ctx, lState, md, uid, file)
defer func() {
fbo.blocks.CleanupSyncState(
ctx, lState, md.ReadOnly(), file, blocksToRemove, syncState, err)
}()
if err != nil {
return true, err
}
newPath, _, newBps, err :=
fbo.syncBlockAndCheckEmbedLocked(
ctx, lState, md, fblock, *file.parentPath(),
file.tailName(), File, true, true, zeroPtr, lbc)
if err != nil {
return true, err
}
bps.mergeOtherBps(newBps)
// Note: We explicitly don't call fbo.fbm.cleanUpBlockState here
// when there's an error, because it's possible some of the blocks
// will be reused in a future attempt at this same sync, and we
// don't want them cleaned up in that case. Instead, the
// FinishSync call below will take care of that.
blocksToRemove, err = doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return true, err
}
err = fbo.finalizeMDWriteLocked(ctx, lState, md, bps, NoExcl)
if err != nil {
return true, err
}
// At this point, all reads through the old path (i.e., file)
// see writes that happened since StartSync, whereas all reads
// through the new path (newPath) don't.
//
// TODO: This isn't completely correct, since reads that
// happen after a write should always see the new data.
//
// After FinishSync succeeds, then reads through both the old
// and the new paths will see the writes that happened during
// the sync.
return fbo.blocks.FinishSync(ctx, lState, file, newPath,
md.ReadOnly(), syncState, fbo.fbm)
}
func (fbo *folderBranchOps) Sync(ctx context.Context, file Node) (err error) {
fbo.log.CDebugf(ctx, "Sync %p", file.GetID())
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.checkNode(file)
if err != nil {
return
}
var stillDirty bool
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
stillDirty, err = fbo.syncLocked(ctx, lState, filePath)
return err
})
if err != nil {
return err
}
if !stillDirty {
fbo.status.rmDirtyNode(file)
}
return nil
}
func (fbo *folderBranchOps) FolderStatus(
ctx context.Context, folderBranch FolderBranch) (
fbs FolderBranchStatus, updateChan <-chan StatusUpdate, err error) {
fbo.log.CDebugf(ctx, "Status")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if folderBranch != fbo.folderBranch {
return FolderBranchStatus{}, nil,
WrongOpsError{fbo.folderBranch, folderBranch}
}
return fbo.status.getStatus(ctx, &fbo.blocks)
}
func (fbo *folderBranchOps) Status(
ctx context.Context) (
fbs KBFSStatus, updateChan <-chan StatusUpdate, err error) {
return KBFSStatus{}, nil, InvalidOpError{}
}
// RegisterForChanges registers a single Observer to receive
// notifications about this folder/branch.
func (fbo *folderBranchOps) RegisterForChanges(obs Observer) error {
// It's the caller's responsibility to make sure
// RegisterForChanges isn't called twice for the same Observer
fbo.observers.add(obs)
return nil
}
// UnregisterFromChanges stops an Observer from getting notifications
// about the folder/branch.
func (fbo *folderBranchOps) UnregisterFromChanges(obs Observer) error {
fbo.observers.remove(obs)
return nil
}
// notifyBatchLocked sends out a notification for the most recent op
// in md.
func (fbo *folderBranchOps) notifyBatchLocked(
ctx context.Context, lState *lockState, md ImmutableRootMetadata) {
fbo.headLock.AssertLocked(lState)
lastOp := md.data.Changes.Ops[len(md.data.Changes.Ops)-1]
fbo.notifyOneOpLocked(ctx, lState, lastOp, md)
fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{md})
}
// searchForNode tries to figure out the path to the given
// blockPointer, using only the block updates that happened as part of
// a given MD update operation.
func (fbo *folderBranchOps) searchForNode(ctx context.Context,
ptr BlockPointer, md ReadOnlyRootMetadata) (Node, error) {
// Record which pointers are new to this update, and thus worth
// searching.
newPtrs := make(map[BlockPointer]bool)
for _, op := range md.data.Changes.Ops {
for _, update := range op.allUpdates() {
newPtrs[update.Ref] = true
}
for _, ref := range op.Refs() {
newPtrs[ref] = true
}
}
nodeMap, _, err := fbo.blocks.SearchForNodes(ctx, fbo.nodeCache,
[]BlockPointer{ptr}, newPtrs, md, md.data.Dir.BlockPointer)
if err != nil {
return nil, err
}
n, ok := nodeMap[ptr]
if !ok {
return nil, NodeNotFoundError{ptr}
}
return n, nil
}
func (fbo *folderBranchOps) unlinkFromCache(op op, oldDir BlockPointer,
node Node, name string) error {
// The entry could be under any one of the unref'd blocks, and
// it's safe to perform this when the pointer isn't real, so just
// try them all to avoid the overhead of looking up the right
// pointer in the old version of the block.
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return err
}
childPath := p.ChildPathNoPtr(name)
// revert the parent pointer
childPath.path[len(childPath.path)-2].BlockPointer = oldDir
for _, ptr := range op.Unrefs() {
childPath.path[len(childPath.path)-1].BlockPointer = ptr
fbo.nodeCache.Unlink(ptr.Ref(), childPath)
}
return nil
}
func (fbo *folderBranchOps) notifyOneOpLocked(ctx context.Context,
lState *lockState, op op, md ImmutableRootMetadata) {
fbo.headLock.AssertLocked(lState)
fbo.blocks.UpdatePointers(lState, op)
var changes []NodeChange
switch realOp := op.(type) {
default:
return
case *createOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return
}
fbo.log.CDebugf(ctx, "notifyOneOp: create %s in node %p",
realOp.NewName, node.GetID())
changes = append(changes, NodeChange{
Node: node,
DirUpdated: []string{realOp.NewName},
})
case *rmOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return
}
fbo.log.CDebugf(ctx, "notifyOneOp: remove %s in node %p",
realOp.OldName, node.GetID())
changes = append(changes, NodeChange{
Node: node,
DirUpdated: []string{realOp.OldName},
})
// If this node exists, then the child node might exist too,
// and we need to unlink it in the node cache.
err := fbo.unlinkFromCache(op, realOp.Dir.Unref, node, realOp.OldName)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't unlink from cache: %v", err)
return
}
case *renameOp:
oldNode := fbo.nodeCache.Get(realOp.OldDir.Ref.Ref())
if oldNode != nil {
changes = append(changes, NodeChange{
Node: oldNode,
DirUpdated: []string{realOp.OldName},
})
}
var newNode Node
if realOp.NewDir.Ref != zeroPtr {
newNode = fbo.nodeCache.Get(realOp.NewDir.Ref.Ref())
if newNode != nil {
changes = append(changes, NodeChange{
Node: newNode,
DirUpdated: []string{realOp.NewName},
})
}
} else {
newNode = oldNode
if oldNode != nil {
// Add another name to the existing NodeChange.
changes[len(changes)-1].DirUpdated =
append(changes[len(changes)-1].DirUpdated, realOp.NewName)
}
}
if oldNode != nil {
var newNodeID NodeID
if newNode != nil {
newNodeID = newNode.GetID()
}
fbo.log.CDebugf(ctx, "notifyOneOp: rename %v from %s/%p to %s/%p",
realOp.Renamed, realOp.OldName, oldNode.GetID(), realOp.NewName,
newNodeID)
if newNode == nil {
if childNode :=
fbo.nodeCache.Get(realOp.Renamed.Ref()); childNode != nil {
// if the childNode exists, we still have to update
// its path to go through the new node. That means
// creating nodes for all the intervening paths.
// Unfortunately we don't have enough information to
// know what the newPath is; we have to guess it from
// the updates.
var err error
newNode, err =
fbo.searchForNode(ctx, realOp.NewDir.Ref, md.ReadOnly())
if newNode == nil {
fbo.log.CErrorf(ctx, "Couldn't find the new node: %v",
err)
}
}
}
if newNode != nil {
// If new node exists as well, unlink any previously
// existing entry and move the node.
var unrefPtr BlockPointer
if oldNode != newNode {
unrefPtr = realOp.NewDir.Unref
} else {
unrefPtr = realOp.OldDir.Unref
}
err := fbo.unlinkFromCache(op, unrefPtr, newNode, realOp.NewName)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't unlink from cache: %v", err)
return
}
err = fbo.nodeCache.Move(realOp.Renamed.Ref(), newNode, realOp.NewName)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't move node in cache: %v", err)
return
}
}
}
case *syncOp:
node := fbo.nodeCache.Get(realOp.File.Ref.Ref())
if node == nil {
return
}
fbo.log.CDebugf(ctx, "notifyOneOp: sync %d writes in node %p",
len(realOp.Writes), node.GetID())
changes = append(changes, NodeChange{
Node: node,
FileUpdated: realOp.Writes,
})
case *setAttrOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
return
}
fbo.log.CDebugf(ctx, "notifyOneOp: setAttr %s for file %s in node %p",
realOp.Attr, realOp.Name, node.GetID())
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return
}
childNode, err := fbo.blocks.UpdateCachedEntryAttributes(
ctx, lState, md.ReadOnly(), p, realOp)
if err != nil {
// TODO: Log error?
return
}
if childNode == nil {
return
}
changes = append(changes, NodeChange{
Node: childNode,
})
case *GCOp:
// Unreferenced blocks in a GCOp mean that we shouldn't cache
// them anymore
bcache := fbo.config.BlockCache()
for _, ptr := range realOp.Unrefs() {
if err := bcache.DeleteTransient(ptr, fbo.id()); err != nil {
fbo.log.CDebugf(ctx,
"Couldn't delete transient entry for %v: %v", ptr, err)
}
}
case *resolutionOp:
// If there are any unrefs of blocks that have a node, this is an
// implied rmOp (see KBFS-1424).
reverseUpdates := make(map[BlockPointer]BlockPointer)
for _, unref := range op.Unrefs() {
// TODO: I will add logic here to unlink and invalidate any
// corresponding unref'd nodes.
node := fbo.nodeCache.Get(unref.Ref())
if node == nil {
// TODO: even if we don't have the node that was
// unreferenced, we might have its parent, and that
// parent might need an invalidation.
continue
}
// If there is a node, unlink and invalidate.
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get path: %v", err)
continue
}
if !p.hasValidParent() {
fbo.log.CErrorf(ctx, "Removed node %s has no parent", p)
continue
}
parentPath := p.parentPath()
parentNode := fbo.nodeCache.Get(parentPath.tailPointer().Ref())
if parentNode != nil {
changes = append(changes, NodeChange{
Node: parentNode,
DirUpdated: []string{p.tailName()},
})
}
fbo.log.CDebugf(ctx, "resolutionOp: remove %s, node %p",
p.tailPointer(), node.GetID())
// Revert the path back to the original BlockPointers,
// before the updates were applied.
if len(reverseUpdates) == 0 {
for _, update := range op.allUpdates() {
reverseUpdates[update.Ref] = update.Unref
}
}
for i, pNode := range p.path {
if oldPtr, ok := reverseUpdates[pNode.BlockPointer]; ok {
p.path[i].BlockPointer = oldPtr
}
}
fbo.nodeCache.Unlink(p.tailPointer().Ref(), p)
}
if len(changes) == 0 {
return
}
}
fbo.observers.batchChanges(ctx, changes)
}
func (fbo *folderBranchOps) getCurrMDRevisionLocked(lState *lockState) MetadataRevision {
fbo.headLock.AssertAnyLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return fbo.head.Revision()
}
return MetadataRevisionUninitialized
}
func (fbo *folderBranchOps) getCurrMDRevision(
lState *lockState) MetadataRevision {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.getCurrMDRevisionLocked(lState)
}
type applyMDUpdatesFunc func(context.Context, *lockState, []ImmutableRootMetadata) error
func (fbo *folderBranchOps) applyMDUpdatesLocked(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
// If there's anything in the journal, don't apply these MDs.
// Wait for CR to happen.
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return err
}
if mergedRev != MetadataRevisionUninitialized {
fbo.log.CDebugf(ctx,
"Ignoring fetched revisions while MDs are in journal")
return nil
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// if we have staged changes, ignore all updates until conflict
// resolution kicks in. TODO: cache these for future use.
if !fbo.isMasterBranchLocked(lState) {
if len(rmds) > 0 {
// setHeadLocked takes care of merged case
fbo.setLatestMergedRevisionLocked(ctx, lState, rmds[len(rmds)-1].Revision(), false)
unmergedRev := MetadataRevisionUninitialized
if fbo.head != (ImmutableRootMetadata{}) {
unmergedRev = fbo.head.Revision()
}
fbo.cr.Resolve(unmergedRev, rmds[len(rmds)-1].Revision())
}
return UnmergedError{}
}
// Don't allow updates while we're in the dirty state; the next
// sync will put us into an unmerged state anyway and we'll
// require conflict resolution.
if fbo.blocks.GetState(lState) != cleanState {
return errors.New("Ignoring MD updates while writes are dirty")
}
appliedRevs := make([]ImmutableRootMetadata, 0, len(rmds))
for _, rmd := range rmds {
// check that we're applying the expected MD revision
if rmd.Revision() <= fbo.getCurrMDRevisionLocked(lState) {
// Already caught up!
continue
}
if err := isReadableOrError(ctx, fbo.config, rmd.ReadOnly()); err != nil {
return err
}
err := fbo.setHeadSuccessorLocked(ctx, lState, rmd, false)
if err != nil {
return err
}
// No new operations in these.
if rmd.IsWriterMetadataCopiedSet() {
continue
}
for _, op := range rmd.data.Changes.Ops {
fbo.notifyOneOpLocked(ctx, lState, op, rmd)
}
appliedRevs = append(appliedRevs, rmd)
}
if len(appliedRevs) > 0 {
fbo.editHistory.UpdateHistory(ctx, appliedRevs)
}
return nil
}
func (fbo *folderBranchOps) undoMDUpdatesLocked(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// Don't allow updates while we're in the dirty state; the next
// sync will put us into an unmerged state anyway and we'll
// require conflict resolution.
if fbo.blocks.GetState(lState) != cleanState {
return NotPermittedWhileDirtyError{}
}
// go backwards through the updates
for i := len(rmds) - 1; i >= 0; i-- {
rmd := rmds[i]
// on undo, it's ok to re-apply the current revision since you
// need to invert all of its ops.
//
// This duplicates a check in
// fbo.setHeadPredecessorLocked. TODO: Remove this
// duplication.
if rmd.Revision() != fbo.getCurrMDRevisionLocked(lState) &&
rmd.Revision() != fbo.getCurrMDRevisionLocked(lState)-1 {
return MDUpdateInvertError{rmd.Revision(),
fbo.getCurrMDRevisionLocked(lState)}
}
// TODO: Check that the revisions are equal only for
// the first iteration.
if rmd.Revision() < fbo.getCurrMDRevisionLocked(lState) {
err := fbo.setHeadPredecessorLocked(ctx, lState, rmd)
if err != nil {
return err
}
}
// iterate the ops in reverse and invert each one
ops := rmd.data.Changes.Ops
for j := len(ops) - 1; j >= 0; j-- {
io, err := invertOpForLocalNotifications(ops[j])
if err != nil {
fbo.log.CWarningf(ctx,
"got error %v when invert op %v; "+
"skipping. Open file handles "+
"may now be in an invalid "+
"state, which can be fixed by "+
"either closing them all or "+
"restarting KBFS.",
err, ops[j])
continue
}
fbo.notifyOneOpLocked(ctx, lState, io, rmd)
}
}
// TODO: update the edit history?
return nil
}
func (fbo *folderBranchOps) applyMDUpdates(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.applyMDUpdatesLocked(ctx, lState, rmds)
}
func (fbo *folderBranchOps) getLatestMergedRevision(lState *lockState) MetadataRevision {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.latestMergedRevision
}
// caller should have held fbo.headLock
func (fbo *folderBranchOps) setLatestMergedRevisionLocked(ctx context.Context, lState *lockState, rev MetadataRevision, allowBackward bool) {
fbo.headLock.AssertLocked(lState)
if rev == MetadataRevisionUninitialized {
panic("Cannot set latest merged revision to an uninitialized value")
}
if fbo.latestMergedRevision < rev || allowBackward {
fbo.latestMergedRevision = rev
fbo.log.CDebugf(ctx, "Updated latestMergedRevision to %d.", rev)
} else {
fbo.log.CDebugf(ctx, "Local latestMergedRevision (%d) is higher than "+
"the new revision (%d); won't update.", fbo.latestMergedRevision, rev)
}
}
// Assumes all necessary locking is either already done by caller, or
// is done by applyFunc.
func (fbo *folderBranchOps) getAndApplyMDUpdates(ctx context.Context,
lState *lockState, applyFunc applyMDUpdatesFunc) error {
// first look up all MD revisions newer than my current head
start := fbo.getLatestMergedRevision(lState) + 1
rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(), start)
if err != nil {
return err
}
err = applyFunc(ctx, lState, rmds)
if err != nil {
return err
}
return nil
}
func (fbo *folderBranchOps) getAndApplyNewestUnmergedHead(ctx context.Context,
lState *lockState) error {
fbo.log.CDebugf(ctx, "Fetching the newest unmerged head")
bid := func() BranchID {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid
}()
// We can only ever be at most one revision behind, so fetch the
// latest unmerged revision and apply it as a successor.
md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), bid)
if err != nil {
return err
}
if md == (ImmutableRootMetadata{}) {
// There is no unmerged revision, oops!
return errors.New("Couldn't find an unmerged head")
}
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
if fbo.bid != bid {
// The branches switched (apparently CR completed), so just
// try again.
fbo.log.CDebugf(ctx, "Branches switched while fetching unmerged head")
return nil
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if err := fbo.setHeadSuccessorLocked(ctx, lState, md, false); err != nil {
return err
}
fbo.notifyBatchLocked(ctx, lState, md)
if err := fbo.config.MDCache().Put(md); err != nil {
return err
}
return nil
}
// getUnmergedMDUpdates returns a slice of the unmerged MDs for this
// TLF's current unmerged branch and unmerged branch, between the
// merge point for the branch and the current head. The returned MDs
// are the same instances that are stored in the MD cache, so they
// should be modified with care.
func (fbo *folderBranchOps) getUnmergedMDUpdates(
ctx context.Context, lState *lockState) (
MetadataRevision, []ImmutableRootMetadata, error) {
// acquire mdWriterLock to read the current branch ID.
bid := func() BranchID {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid
}()
return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(),
bid, fbo.getCurrMDRevision(lState))
}
func (fbo *folderBranchOps) getUnmergedMDUpdatesLocked(
ctx context.Context, lState *lockState) (
MetadataRevision, []ImmutableRootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(),
fbo.bid, fbo.getCurrMDRevision(lState))
}
// Returns a list of block pointers that were created during the
// staged era.
func (fbo *folderBranchOps) undoUnmergedMDUpdatesLocked(
ctx context.Context, lState *lockState) ([]BlockPointer, error) {
fbo.mdWriterLock.AssertLocked(lState)
currHead, unmergedRmds, err := fbo.getUnmergedMDUpdatesLocked(ctx, lState)
if err != nil {
return nil, err
}
err = fbo.undoMDUpdatesLocked(ctx, lState, unmergedRmds)
if err != nil {
return nil, err
}
// We have arrived at the branch point. The new root is
// the previous revision from the current head. Find it
// and apply. TODO: somehow fake the current head into
// being currHead-1, so that future calls to
// applyMDUpdates will fetch this along with the rest of
// the updates.
fbo.setBranchIDLocked(lState, NullBranchID)
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID,
currHead, Merged)
if err != nil {
return nil, err
}
err = func() error {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadPredecessorLocked(ctx, lState, rmd)
if err != nil {
return err
}
fbo.setLatestMergedRevisionLocked(ctx, lState, rmd.Revision(), true)
return nil
}()
if err != nil {
return nil, err
}
// Return all new refs
var unmergedPtrs []BlockPointer
for _, rmd := range unmergedRmds {
for _, op := range rmd.data.Changes.Ops {
for _, ptr := range op.Refs() {
if ptr != zeroPtr {
unmergedPtrs = append(unmergedPtrs, ptr)
}
}
for _, update := range op.allUpdates() {
if update.Ref != zeroPtr {
unmergedPtrs = append(unmergedPtrs, update.Ref)
}
}
}
}
return unmergedPtrs, nil
}
func (fbo *folderBranchOps) unstageLocked(ctx context.Context,
lState *lockState) error {
fbo.mdWriterLock.AssertLocked(lState)
// fetch all of my unstaged updates, and undo them one at a time
bid, wasMasterBranch := fbo.bid, fbo.isMasterBranchLocked(lState)
unmergedPtrs, err := fbo.undoUnmergedMDUpdatesLocked(ctx, lState)
if err != nil {
return err
}
// let the server know we no longer have need
if !wasMasterBranch {
err = fbo.config.MDOps().PruneBranch(ctx, fbo.id(), bid)
if err != nil {
return err
}
}
// now go forward in time, if possible
err = fbo.getAndApplyMDUpdates(ctx, lState,
fbo.applyMDUpdatesLocked)
if err != nil {
return err
}
md, err := fbo.getMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
// Finally, create a resolutionOp with the newly-unref'd pointers.
resOp := newResolutionOp()
for _, ptr := range unmergedPtrs {
resOp.AddUnrefBlock(ptr)
}
md.AddOp(resOp)
return fbo.finalizeMDWriteLocked(ctx, lState, md, &blockPutState{}, NoExcl)
}
// TODO: remove once we have automatic conflict resolution
func (fbo *folderBranchOps) UnstageForTesting(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "UnstageForTesting")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
if fbo.isMasterBranch(lState) {
// no-op
return nil
}
if fbo.blocks.GetState(lState) != cleanState {
return NotPermittedWhileDirtyError{}
}
// launch unstaging in a new goroutine, because we don't want to
// use the provided context because upper layers might ignore our
// notifications if we do. But we still want to wait for the
// context to cancel.
c := make(chan error, 1)
freshCtx, cancel := fbo.newCtxWithFBOID()
defer cancel()
fbo.log.CDebugf(freshCtx, "Launching new context for UnstageForTesting")
go func() {
lState := makeFBOLockState()
c <- fbo.doMDWriteWithRetry(ctx, lState,
func(lState *lockState) error {
return fbo.unstageLocked(freshCtx, lState)
})
}()
select {
case err := <-c:
return err
case <-ctx.Done():
return ctx.Err()
}
})
}
// mdWriterLock must be taken by the caller.
func (fbo *folderBranchOps) rekeyLocked(ctx context.Context,
lState *lockState, promptPaper bool) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
if !fbo.isMasterBranchLocked(lState) {
return errors.New("Can't rekey while staged.")
}
head := fbo.getHead(lState)
if head != (ImmutableRootMetadata{}) {
// If we already have a cached revision, make sure we're
// up-to-date with the latest revision before inspecting the
// metadata, since Rekey doesn't let us go into CR mode, and
// we don't actually get folder update notifications when the
// rekey bit is set, just a "folder needs rekey" update.
if err := fbo.getAndApplyMDUpdates(
ctx, lState, fbo.applyMDUpdatesLocked); err != nil {
if applyErr, ok := err.(MDRevisionMismatch); !ok ||
applyErr.rev != applyErr.curr {
return err
}
}
}
md, lastWriterVerifyingKey, rekeyWasSet, err :=
fbo.getMDForRekeyWriteLocked(ctx, lState)
if err != nil {
return err
}
if fbo.rekeyWithPromptTimer != nil {
if !promptPaper {
fbo.log.CDebugf(ctx, "rekeyWithPrompt superseded before it fires.")
} else if !md.IsRekeySet() {
fbo.rekeyWithPromptTimer.Stop()
fbo.rekeyWithPromptTimer = nil
// If the rekey bit isn't set, then some other device
// already took care of our request, and we can stop
// early. Note that if this FBO never registered for
// updates, then we might not yet have seen the update, in
// which case we'll still try to rekey but it will fail as
// a conflict.
fbo.log.CDebugf(ctx, "rekeyWithPrompt not needed because the "+
"rekey bit was already unset.")
return nil
}
}
rekeyDone, tlfCryptKey, err := fbo.config.KeyManager().
Rekey(ctx, md, promptPaper)
stillNeedsRekey := false
switch err.(type) {
case nil:
// TODO: implement a "forced" option that rekeys even when the
// devices haven't changed?
if !rekeyDone {
fbo.log.CDebugf(ctx, "No rekey necessary")
return nil
}
// Clear the rekey bit if any.
md.clearRekeyBit()
_, uid, err := fbo.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return err
}
// Readers can't clear the last revision, because:
// 1) They don't have access to the writer metadata, so can't clear the
// block changes.
// 2) Readers need the MetadataFlagWriterMetadataCopied bit set for
// MDServer to authorize the write.
// Without this check, MDServer returns an Unauthorized error.
if md.GetTlfHandle().IsWriter(uid) {
md.clearLastRevision()
}
case RekeyIncompleteError:
if !rekeyDone && rekeyWasSet {
// The rekey bit was already set, and there's nothing else
// we can to do, so don't put any new revisions.
fbo.log.CDebugf(ctx, "No further rekey possible by this user.")
return nil
}
// Rekey incomplete, fallthrough without early exit, to ensure
// we write the metadata with any potential changes
fbo.log.CDebugf(ctx,
"Rekeyed reader devices, but still need writer rekey")
case NeedOtherRekeyError:
stillNeedsRekey = true
case NeedSelfRekeyError:
stillNeedsRekey = true
default:
if err == context.DeadlineExceeded {
fbo.log.CDebugf(ctx, "Paper key prompt timed out")
// Reschedule the prompt in the timeout case.
stillNeedsRekey = true
} else {
return err
}
}
if stillNeedsRekey {
fbo.log.CDebugf(ctx, "Device doesn't have access to rekey")
// If we didn't have read access, then we don't have any
// unlocked paper keys. Wait for some time, and then if we
// still aren't rekeyed, try again but this time prompt the
// user for any known paper keys. We do this even if the
// rekey bit is already set, since we may have restarted since
// the previous rekey attempt, before prompting for the paper
// key. Only schedule this as a one-time event, since direct
// folder accesses from the user will also cause a
// rekeyWithPrompt.
//
// Only ever set the timer once.
if fbo.rekeyWithPromptTimer == nil {
d := fbo.config.RekeyWithPromptWaitTime()
fbo.log.CDebugf(ctx, "Scheduling a rekeyWithPrompt in %s", d)
fbo.rekeyWithPromptTimer = time.AfterFunc(d, fbo.rekeyWithPrompt)
}
if rekeyWasSet {
// Devices not yet keyed shouldn't set the rekey bit again
fbo.log.CDebugf(ctx, "Rekey bit already set")
return nil
}
// This device hasn't been keyed yet, fall through to set the rekey bit
}
// add an empty operation to satisfy assumptions elsewhere
md.AddOp(newRekeyOp())
// we still let readers push a new md block that we validate against reader
// permissions
err = fbo.finalizeMDRekeyWriteLocked(
ctx, lState, md, lastWriterVerifyingKey)
if err != nil {
return err
}
// cache any new TLF crypt key
if tlfCryptKey != nil {
keyGen := md.LatestKeyGeneration()
err = fbo.config.KeyCache().PutTLFCryptKey(md.TlfID(), keyGen, *tlfCryptKey)
if err != nil {
return err
}
}
// send rekey finish notification
handle := md.GetTlfHandle()
fbo.config.Reporter().Notify(ctx,
rekeyNotification(ctx, fbo.config, handle, true))
if !stillNeedsRekey && fbo.rekeyWithPromptTimer != nil {
fbo.log.CDebugf(ctx, "Scheduled rekey timer no longer needed")
fbo.rekeyWithPromptTimer.Stop()
fbo.rekeyWithPromptTimer = nil
}
return nil
}
func (fbo *folderBranchOps) rekeyWithPrompt() {
var err error
ctx := ctxWithRandomIDReplayable(
context.Background(), CtxRekeyIDKey, CtxRekeyOpID, fbo.log)
// Only give the user limited time to enter their paper key, so we
// don't wait around forever.
d := fbo.config.RekeyWithPromptWaitTime()
ctx, cancel := context.WithTimeout(ctx, d)
defer cancel()
if ctx, err = NewContextWithCancellationDelayer(ctx); err != nil {
panic(err)
}
fbo.log.CDebugf(ctx, "rekeyWithPrompt")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.rekeyLocked(ctx, lState, true)
})
}
// Rekey rekeys the given folder.
func (fbo *folderBranchOps) Rekey(ctx context.Context, tlf TlfID) (err error) {
fbo.log.CDebugf(ctx, "Rekey")
defer func() {
fbo.deferLog.CDebugf(ctx, "Done: %v", err)
}()
fb := FolderBranch{tlf, MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.rekeyLocked(ctx, lState, false)
})
}
func (fbo *folderBranchOps) SyncFromServerForTesting(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "SyncFromServerForTesting")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
lState := makeFBOLockState()
// A journal flush before CR, if needed.
if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(),
fbo.log); err != nil {
return err
}
if err := fbo.mdFlushes.Wait(ctx); err != nil {
return err
}
if err := fbo.branchChanges.Wait(ctx); err != nil {
return err
}
if !fbo.isMasterBranch(lState) {
if err := fbo.cr.Wait(ctx); err != nil {
return err
}
// If we are still staged after the wait, then we have a problem.
if !fbo.isMasterBranch(lState) {
return fmt.Errorf("Conflict resolution didn't take us out of " +
"staging.")
}
}
dirtyRefs := fbo.blocks.GetDirtyRefs(lState)
if len(dirtyRefs) > 0 {
for _, ref := range dirtyRefs {
fbo.log.CDebugf(ctx, "DeCache entry left: %v", ref)
}
return errors.New("Can't sync from server while dirty.")
}
// A journal flush after CR, if needed.
if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(),
fbo.log); err != nil {
return err
}
if err := fbo.mdFlushes.Wait(ctx); err != nil {
return err
}
if err := fbo.branchChanges.Wait(ctx); err != nil {
return err
}
if err := fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdates); err != nil {
if applyErr, ok := err.(MDRevisionMismatch); ok {
if applyErr.rev == applyErr.curr {
fbo.log.CDebugf(ctx, "Already up-to-date with server")
return nil
}
}
return err
}
// Wait for all the asynchronous block archiving and quota
// reclamation to hit the block server.
if err := fbo.fbm.waitForArchives(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForDeletingBlocks(ctx); err != nil {
return err
}
if err := fbo.editHistory.Wait(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForQuotaReclamations(ctx); err != nil {
return err
}
// A second journal flush if needed, to clear out any
// archive/remove calls caused by the above operations.
return WaitForTLFJournal(ctx, fbo.config, fbo.id(), fbo.log)
}
// CtxFBOTagKey is the type used for unique context tags within folderBranchOps
type CtxFBOTagKey int
const (
// CtxFBOIDKey is the type of the tag for unique operation IDs
// within folderBranchOps.
CtxFBOIDKey CtxFBOTagKey = iota
)
// CtxFBOOpID is the display name for the unique operation
// folderBranchOps ID tag.
const CtxFBOOpID = "FBOID"
func (fbo *folderBranchOps) ctxWithFBOID(ctx context.Context) context.Context {
return ctxWithRandomIDReplayable(ctx, CtxFBOIDKey, CtxFBOOpID, fbo.log)
}
func (fbo *folderBranchOps) newCtxWithFBOID() (context.Context, context.CancelFunc) {
// No need to call NewContextReplayable since ctxWithFBOID calls
// ctxWithRandomIDReplayable, which attaches replayably.
ctx := fbo.ctxWithFBOID(context.Background())
ctx, cancelFunc := context.WithCancel(ctx)
ctx, err := NewContextWithCancellationDelayer(ctx)
if err != nil {
panic(err)
}
return ctx, cancelFunc
}
// Run the passed function with a context that's canceled on shutdown.
func (fbo *folderBranchOps) runUnlessShutdown(fn func(ctx context.Context) error) error {
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
errChan := make(chan error, 1)
go func() {
errChan <- fn(ctx)
}()
select {
case err := <-errChan:
return err
case <-fbo.shutdownChan:
return ShutdownHappenedError{}
}
}
func (fbo *folderBranchOps) maybeFastForward(ctx context.Context,
lState *lockState, lastUpdate time.Time, currUpdate time.Time) (
fastForwardDone bool, err error) {
// Has it been long enough to try fast-forwarding?
if currUpdate.Before(lastUpdate.Add(fastForwardTimeThresh)) ||
!fbo.isMasterBranch(lState) {
return false, nil
}
fbo.log.CDebugf(ctx, "Checking head for possible "+
"fast-forwarding (last update time=%s)", lastUpdate)
currHead, err := fbo.config.MDOps().GetForTLF(ctx, fbo.id())
if err != nil {
return false, err
}
fbo.log.CDebugf(ctx, "Current head is revision %d", currHead.Revision())
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
// If the journal has anything in it, don't fast-forward since we
// haven't finished flushing yet. If there was really a remote
// update on the server, we'll end up in CR eventually.
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return false, err
}
if mergedRev != MetadataRevisionUninitialized {
return false, nil
}
if !fbo.isMasterBranchLocked(lState) {
// Don't update if we're staged.
return false, nil
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if currHead.Revision() < fbo.latestMergedRevision+fastForwardRevThresh {
// Might as well fetch all the revisions.
return false, nil
}
fbo.log.CDebugf(ctx, "Fast-forwarding from rev %d to rev %d",
fbo.latestMergedRevision, currHead.Revision())
changes, err := fbo.blocks.FastForwardAllNodes(
ctx, lState, currHead.ReadOnly())
if err != nil {
return false, err
}
err = fbo.setHeadSuccessorLocked(ctx, lState, currHead, true /*rebase*/)
if err != nil {
return false, err
}
// Invalidate all the affected nodes.
fbo.observers.batchChanges(ctx, changes)
// Reset the edit history. TODO: notify any listeners that we've
// done this.
fbo.editHistory.Shutdown()
fbo.editHistory = NewTlfEditHistory(fbo.config, fbo, fbo.log)
return true, nil
}
func (fbo *folderBranchOps) registerAndWaitForUpdates() {
defer close(fbo.updateDoneChan)
childDone := make(chan struct{})
var lastUpdate time.Time
err := fbo.runUnlessShutdown(func(ctx context.Context) error {
defer close(childDone)
// If we fail to register for or process updates, try again
// with an exponential backoff, so we don't overwhelm the
// server or ourselves with too many attempts in a hopeless
// situation.
expBackoff := backoff.NewExponentialBackOff()
// Never give up hope until we shut down
expBackoff.MaxElapsedTime = 0
// Register and wait in a loop unless we hit an unrecoverable error
for {
err := backoff.RetryNotifyWithContext(ctx, func() error {
// Replace the FBOID one with a fresh id for every attempt
newCtx := fbo.ctxWithFBOID(ctx)
updateChan, err := fbo.registerForUpdates(newCtx)
if err != nil {
select {
case <-ctx.Done():
// Shortcut the retry, we're done.
return nil
default:
return err
}
}
currUpdate, err := fbo.waitForAndProcessUpdates(
newCtx, lastUpdate, updateChan)
if _, ok := err.(UnmergedError); ok {
// skip the back-off timer and continue directly to next
// registerForUpdates
return nil
}
select {
case <-ctx.Done():
// Shortcut the retry, we're done.
return nil
default:
if err == nil {
lastUpdate = currUpdate
}
return err
}
},
expBackoff,
func(err error, nextTime time.Duration) {
fbo.log.CDebugf(ctx,
"Retrying registerForUpdates in %s due to err: %v",
nextTime, err)
})
if err != nil {
return err
}
}
})
if err != nil && err != context.Canceled {
fbo.log.CWarningf(context.Background(),
"registerAndWaitForUpdates failed unexpectedly with an error: %v",
err)
}
<-childDone
}
func (fbo *folderBranchOps) registerForUpdates(ctx context.Context) (
updateChan <-chan error, err error) {
lState := makeFBOLockState()
currRev := fbo.getLatestMergedRevision(lState)
fbo.log.CDebugf(ctx, "Registering for updates (curr rev = %d)", currRev)
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
// RegisterForUpdate will itself retry on connectivity issues
return fbo.config.MDServer().RegisterForUpdate(ctx, fbo.id(), currRev)
}
func (fbo *folderBranchOps) waitForAndProcessUpdates(
ctx context.Context, lastUpdate time.Time,
updateChan <-chan error) (currUpdate time.Time, err error) {
// successful registration; now, wait for an update or a shutdown
fbo.log.CDebugf(ctx, "Waiting for updates")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
lState := makeFBOLockState()
for {
select {
case err := <-updateChan:
fbo.log.CDebugf(ctx, "Got an update: %v", err)
if err != nil {
return time.Time{}, err
}
// Getting and applying the updates requires holding
// locks, so make sure it doesn't take too long.
ctx, cancel := context.WithTimeout(ctx, backgroundTaskTimeout)
defer cancel()
currUpdate := fbo.config.Clock().Now()
ffDone, err :=
fbo.maybeFastForward(ctx, lState, lastUpdate, currUpdate)
if err != nil {
return time.Time{}, err
}
if ffDone {
return currUpdate, nil
}
err = fbo.getAndApplyMDUpdates(ctx, lState, fbo.applyMDUpdates)
if err != nil {
fbo.log.CDebugf(ctx, "Got an error while applying "+
"updates: %v", err)
return time.Time{}, err
}
return currUpdate, nil
case unpause := <-fbo.updatePauseChan:
fbo.log.CInfof(ctx, "Updates paused")
// wait to be unpaused
select {
case <-unpause:
fbo.log.CInfof(ctx, "Updates unpaused")
case <-ctx.Done():
return time.Time{}, ctx.Err()
}
case <-ctx.Done():
return time.Time{}, ctx.Err()
}
}
}
func (fbo *folderBranchOps) backgroundFlusher(betweenFlushes time.Duration) {
ticker := time.NewTicker(betweenFlushes)
defer ticker.Stop()
lState := makeFBOLockState()
var prevDirtyRefMap map[BlockRef]bool
sameDirtyRefCount := 0
for {
doSelect := true
if fbo.blocks.GetState(lState) == dirtyState &&
fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) {
// We have dirty files, and the system has a full buffer,
// so don't bother waiting for a signal, just get right to
// the main attraction.
doSelect = false
}
if doSelect {
select {
case <-ticker.C:
case <-fbo.forceSyncChan:
case <-fbo.shutdownChan:
return
}
}
dirtyRefs := fbo.blocks.GetDirtyRefs(lState)
if len(dirtyRefs) == 0 {
sameDirtyRefCount = 0
continue
}
// Make sure we are making some progress
currDirtyRefMap := make(map[BlockRef]bool)
for _, ref := range dirtyRefs {
currDirtyRefMap[ref] = true
}
if reflect.DeepEqual(currDirtyRefMap, prevDirtyRefMap) {
sameDirtyRefCount++
} else {
sameDirtyRefCount = 0
}
if sameDirtyRefCount >= 10 {
panic(fmt.Sprintf("Making no Sync progress on dirty refs: %v",
dirtyRefs))
}
prevDirtyRefMap = currDirtyRefMap
fbo.runUnlessShutdown(func(ctx context.Context) (err error) {
// Denote that these are coming from a background
// goroutine, not directly from any user.
ctx = NewContextReplayable(ctx,
func(ctx context.Context) context.Context {
return context.WithValue(ctx, CtxBackgroundSyncKey, "1")
})
// Just in case network access or a bug gets stuck for a
// long time, time out the sync eventually.
longCtx, longCancel :=
context.WithTimeout(ctx, backgroundTaskTimeout)
defer longCancel()
// Make sure this loop doesn't starve user requests for
// too long. But use the longer-timeout version in the
// actual Sync command, to avoid unnecessary errors.
shortCtx, shortCancel := context.WithTimeout(ctx, 1*time.Second)
defer shortCancel()
for _, ref := range dirtyRefs {
select {
case <-shortCtx.Done():
fbo.log.CDebugf(ctx,
"Stopping background sync early due to timeout")
return nil
default:
}
node := fbo.nodeCache.Get(ref)
if node == nil {
continue
}
err := fbo.Sync(longCtx, node)
if err != nil {
// Just log the warning and keep trying to
// sync the rest of the dirty files.
p := fbo.nodeCache.PathFromNode(node)
fbo.log.CWarningf(ctx, "Couldn't sync dirty file with "+
"ref=%v, nodeID=%p, and path=%v: %v",
ref, node.GetID(), p, err)
}
}
return nil
})
}
}
func (fbo *folderBranchOps) blockUnmergedWrites(lState *lockState) {
fbo.mdWriterLock.Lock(lState)
}
func (fbo *folderBranchOps) unblockUnmergedWrites(lState *lockState) {
fbo.mdWriterLock.Unlock(lState)
}
func (fbo *folderBranchOps) finalizeResolutionLocked(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState,
newOps []op) error {
fbo.mdWriterLock.AssertLocked(lState)
// Put the blocks into the cache so that, even if we fail below,
// future attempts may reuse the blocks.
err := fbo.finalizeBlocks(bps)
if err != nil {
return err
}
// Last chance to get pre-empted.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
mdOps := fbo.config.MDOps()
if jServer, err := GetJournalServer(fbo.config); err == nil {
// Switch to the non-journaled MDOps after flushing all the
// resolution block writes -- resolutions must go straight
// through to the server or else the journal will get
// confused.
if err := fbo.waitForJournalLocked(ctx, lState, jServer); err != nil {
return err
}
mdOps = jServer.delegateMDOps
}
// Put the MD. If there's a conflict, abort the whole process and
// let CR restart itself.
mdID, err := mdOps.Put(ctx, md)
doUnmergedPut := isRevisionConflict(err)
if doUnmergedPut {
fbo.log.CDebugf(ctx, "Got a conflict after resolution; aborting CR")
return err
}
if err != nil {
return err
}
// Prune the branch via the journal, if there is one.
err = fbo.config.MDOps().PruneBranch(ctx, fbo.id(), fbo.bid)
if err != nil {
return err
}
// Queue a rekey if the bit was set.
if md.IsRekeySet() {
defer fbo.config.RekeyQueue().Enqueue(md.TlfID())
}
md.loadCachedBlockChanges(bps)
// Set the head to the new MD.
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
key, err := fbo.config.KBPKI().GetCurrentVerifyingKey(ctx)
if err != nil {
return err
}
irmd := MakeImmutableRootMetadata(
md, key, mdID, fbo.config.Clock().Now())
err = fbo.setHeadConflictResolvedLocked(ctx, lState, irmd)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't set local MD head after a "+
"successful put: %v", err)
return err
}
fbo.setBranchIDLocked(lState, NullBranchID)
// Archive the old, unref'd blocks (the revision went straight to
// the server, so we know it is merged).
fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly())
// notifyOneOp for every fixed-up merged op.
for _, op := range newOps {
fbo.notifyOneOpLocked(ctx, lState, op, irmd)
}
fbo.editHistory.UpdateHistory(ctx, []ImmutableRootMetadata{irmd})
return nil
}
// finalizeResolution caches all the blocks, and writes the new MD to
// the merged branch, failing if there is a conflict. It also sends
// out the given newOps notifications locally. This is used for
// completing conflict resolution.
func (fbo *folderBranchOps) finalizeResolution(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState,
newOps []op) error {
// Take the writer lock.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.finalizeResolutionLocked(ctx, lState, md, bps, newOps)
}
func (fbo *folderBranchOps) unstageAfterFailedResolution(ctx context.Context,
lState *lockState) error {
// Take the writer lock.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
// Last chance to get pre-empted.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
fbo.log.CWarningf(ctx, "Unstaging branch %s after a resolution failure",
fbo.bid)
return fbo.unstageLocked(ctx, lState)
}
func (fbo *folderBranchOps) handleTLFBranchChange(ctx context.Context,
newBID BranchID) {
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.log.CDebugf(ctx, "Journal branch change: %s", newBID)
if !fbo.isMasterBranchLocked(lState) {
if fbo.bid == newBID {
fbo.log.CDebugf(ctx, "Already on branch %s", newBID)
return
}
panic(fmt.Sprintf("Cannot switch to branch %s while on branch %s",
newBID, fbo.bid))
}
md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), newBID)
if err != nil {
fbo.log.CWarningf(ctx,
"No unmerged head on journal branch change (bid=%s)", newBID)
return
}
if md == (ImmutableRootMetadata{}) || md.MergedStatus() != Unmerged ||
md.BID() != newBID {
// This can happen if CR got kicked off in some other way and
// completed before we took the lock to process this
// notification.
fbo.log.CDebugf(ctx, "Ignoring stale branch change: md=%v, newBID=%d",
md, newBID)
return
}
// Everything we thought we knew about quota reclamation is now
// called into question.
fbo.fbm.clearLastQRData()
// Kick off conflict resolution and set the head to the correct branch.
fbo.setBranchIDLocked(lState, newBID)
fbo.cr.Resolve(md.Revision(), MetadataRevisionUninitialized)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState, md, true /*rebased*/)
if err != nil {
fbo.log.CWarningf(ctx,
"Could not set head on journal branch change: %v", err)
return
}
}
func (fbo *folderBranchOps) onTLFBranchChange(newBID BranchID) {
fbo.branchChanges.Add(1)
go func() {
defer fbo.branchChanges.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
// This only happens on a `PruneBranch` call, in which case we
// would have already updated fbo's local view of the branch/head.
if newBID == NullBranchID {
fbo.log.CDebugf(ctx, "Ignoring branch change back to master")
return
}
fbo.handleTLFBranchChange(ctx, newBID)
}()
}
func (fbo *folderBranchOps) handleMDFlush(ctx context.Context, bid BranchID,
rev MetadataRevision) {
fbo.log.CDebugf(ctx, "Considering archiving references for flushed MD revision %d", rev)
lState := makeFBOLockState()
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(ctx, lState, rev, false)
}()
// Get that revision.
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), NullBranchID,
rev, Merged)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't get revision %d for archiving: %v",
rev, err)
return
}
if err := isArchivableMDOrError(rmd.ReadOnly()); err != nil {
fbo.log.CDebugf(
ctx, "Skipping archiving references for flushed MD revision %d: %s", rev, err)
return
}
fbo.fbm.archiveUnrefBlocks(rmd.ReadOnly())
}
func (fbo *folderBranchOps) onMDFlush(bid BranchID, rev MetadataRevision) {
fbo.mdFlushes.Add(1)
go func() {
defer fbo.mdFlushes.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
if bid != NullBranchID {
fbo.log.CDebugf(ctx, "Ignoring MD flush on branch %v for "+
"revision %d", bid, rev)
return
}
fbo.handleMDFlush(ctx, bid, rev)
}()
}
// GetUpdateHistory implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) GetUpdateHistory(ctx context.Context,
folderBranch FolderBranch) (history TLFUpdateHistory, err error) {
fbo.log.CDebugf(ctx, "GetUpdateHistory")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if folderBranch != fbo.folderBranch {
return TLFUpdateHistory{}, WrongOpsError{fbo.folderBranch, folderBranch}
}
rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(),
MetadataRevisionInitial)
if err != nil {
return TLFUpdateHistory{}, err
}
if len(rmds) > 0 {
rmd := rmds[len(rmds)-1]
history.ID = rmd.TlfID().String()
history.Name = rmd.GetTlfHandle().GetCanonicalPath()
}
history.Updates = make([]UpdateSummary, 0, len(rmds))
writerNames := make(map[keybase1.UID]string)
for _, rmd := range rmds {
writer, ok := writerNames[rmd.LastModifyingWriter()]
if !ok {
name, err := fbo.config.KBPKI().
GetNormalizedUsername(ctx, rmd.LastModifyingWriter())
if err != nil {
return TLFUpdateHistory{}, err
}
writer = string(name)
writerNames[rmd.LastModifyingWriter()] = writer
}
updateSummary := UpdateSummary{
Revision: rmd.Revision(),
Date: time.Unix(0, rmd.data.Dir.Mtime),
Writer: writer,
LiveBytes: rmd.DiskUsage(),
Ops: make([]OpSummary, 0, len(rmd.data.Changes.Ops)),
}
for _, op := range rmd.data.Changes.Ops {
opSummary := OpSummary{
Op: op.String(),
Refs: make([]string, 0, len(op.Refs())),
Unrefs: make([]string, 0, len(op.Unrefs())),
Updates: make(map[string]string),
}
for _, ptr := range op.Refs() {
opSummary.Refs = append(opSummary.Refs, ptr.String())
}
for _, ptr := range op.Unrefs() {
opSummary.Unrefs = append(opSummary.Unrefs, ptr.String())
}
for _, update := range op.allUpdates() {
opSummary.Updates[update.Unref.String()] = update.Ref.String()
}
updateSummary.Ops = append(updateSummary.Ops, opSummary)
}
history.Updates = append(history.Updates, updateSummary)
}
return history, nil
}
// GetEditHistory implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) GetEditHistory(ctx context.Context,
folderBranch FolderBranch) (edits TlfWriterEdits, err error) {
fbo.log.CDebugf(ctx, "GetEditHistory")
defer func() { fbo.deferLog.CDebugf(ctx, "Done: %v", err) }()
if folderBranch != fbo.folderBranch {
return nil, WrongOpsError{fbo.folderBranch, folderBranch}
}
lState := makeFBOLockState()
head, err := fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify)
if err != nil {
return nil, err
}
return fbo.editHistory.GetComplete(ctx, head)
}
// PushConnectionStatusChange pushes human readable connection status changes.
func (fbo *folderBranchOps) PushConnectionStatusChange(service string, newStatus error) {
fbo.config.KBFSOps().PushConnectionStatusChange(service, newStatus)
}
| 1 | 14,126 | Why? I thought the previous way was idiomatic. (Same with the `MetadataVer` stuff above.) | keybase-kbfs | go |
@@ -12,11 +12,7 @@ define([], function() {
}
function uploadContent(connectionManager, server, options) {
- return new Promise(function(resolve, reject) {
- require(["contentuploader"], function(ContentUploader) {
- (new ContentUploader).uploadImages(connectionManager, server).then(resolve, reject)
- })
- })
+ return new Promise().resolve();
}
function syncMedia(connectionManager, server, options) { | 1 | define([], function() {
"use strict";
function performSync(connectionManager, server, options) {
console.log("ServerSync.performSync to server: " + server.Id), options = options || {};
var cameraUploadServers = options.cameraUploadServers || [];
console.log("ServerSync cameraUploadServers: " + JSON.stringify(cameraUploadServers));
var uploadPhotos = -1 !== cameraUploadServers.indexOf(server.Id);
return console.log("ServerSync uploadPhotos: " + uploadPhotos), (uploadPhotos ? uploadContent(connectionManager, server, options) : Promise.resolve()).then(function() {
return syncMedia(connectionManager, server, options)
})
}
function uploadContent(connectionManager, server, options) {
return new Promise(function(resolve, reject) {
require(["contentuploader"], function(ContentUploader) {
(new ContentUploader).uploadImages(connectionManager, server).then(resolve, reject)
})
})
}
function syncMedia(connectionManager, server, options) {
return new Promise(function(resolve, reject) {
require(["mediasync"], function(MediaSync) {
var apiClient = connectionManager.getApiClient(server.Id);
(new MediaSync).sync(apiClient, server, options).then(resolve, reject)
})
})
}
function ServerSync() {}
return ServerSync.prototype.sync = function(connectionManager, server, options) {
if (!server.AccessToken && !server.ExchangeToken) return console.log("Skipping sync to server " + server.Id + " because there is no saved authentication information."), Promise.resolve();
var connectionOptions = {
updateDateLastAccessed: !1,
enableWebSocket: !1,
reportCapabilities: !1,
enableAutomaticBitrateDetection: !1
};
return connectionManager.connectToServer(server, connectionOptions).then(function(result) {
return "SignedIn" === result.State ? performSync(connectionManager, server, options) : (console.log("Unable to connect to server id: " + server.Id), Promise.reject())
}, function(err) {
throw console.log("Unable to connect to server id: " + server.Id), err
})
}, ServerSync
}); | 1 | 10,535 | shouldn't we `reject` here instead? | jellyfin-jellyfin-web | js |
@@ -167,10 +167,12 @@ func printActionProto(action *iotextypes.Action) (string, error) {
if err != nil {
return "", output.NewError(output.ConvertError, "failed to convert bytes into address", err)
}
+ //ioctl action should display IOTX unit instead Raul
+ gasPriceUnitIOTX, err := util.StringToIOTX(action.Core.GasPrice)
result := fmt.Sprintf("\nversion: %d ", action.Core.GetVersion()) +
fmt.Sprintf("nonce: %d ", action.Core.GetNonce()) +
fmt.Sprintf("gasLimit: %d ", action.Core.GasLimit) +
- fmt.Sprintf("gasPrice: %s Rau\n", action.Core.GasPrice) +
+ fmt.Sprintf("gasPrice: %s IOTX\n", gasPriceUnitIOTX) +
fmt.Sprintf("senderAddress: %s %s\n", senderAddress.String(),
Match(senderAddress.String(), "address"))
switch { | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package action
import (
"context"
"fmt"
"log"
"math/big"
"strconv"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/grpc-ecosystem/go-grpc-middleware/util/metautils"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/iotexproject/go-pkgs/crypto"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-proto/golang/iotexapi"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/iotexproject/iotex-core/ioctl/cmd/alias"
"github.com/iotexproject/iotex-core/ioctl/config"
"github.com/iotexproject/iotex-core/ioctl/output"
"github.com/iotexproject/iotex-core/ioctl/util"
)
// Multi-language support
var (
hashCmdShorts = map[config.Language]string{
config.English: "Get action by hash",
config.Chinese: "依据哈希值,获取行动",
}
hashCmdUses = map[config.Language]string{
config.English: "hash ACTION_HASH",
config.Chinese: "hash 行动_哈希", // this translation
}
)
// actionHashCmd represents the action hash command
var actionHashCmd = &cobra.Command{
Use: config.TranslateInLang(hashCmdUses, config.UILanguage),
Short: config.TranslateInLang(hashCmdShorts, config.UILanguage),
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
cmd.SilenceUsage = true
err := getActionByHash(args)
return output.PrintError(err)
},
}
type actionState int
const (
// Pending action is in the action pool but not executed by blockchain
Pending actionState = iota
// Executed action has been run and recorded on blockchain
Executed
)
type actionMessage struct {
State actionState `json:"state"`
Proto *iotexapi.ActionInfo `json:"proto"`
Receipt *iotextypes.Receipt `json:"receipt"`
}
func (m *actionMessage) String() string {
if output.Format == "" {
message, err := printAction(m.Proto)
if err != nil {
log.Panic(err.Error())
}
if m.State == Pending {
message += "\n#This action is pending"
} else {
message += "\n#This action has been written on blockchain\n\n" + printReceiptProto(m.Receipt)
}
return message
}
return output.FormatString(output.Result, m)
}
// getActionByHash gets action of IoTeX Blockchain by hash
func getActionByHash(args []string) error {
hash := args[0]
conn, err := util.ConnectToEndpoint(config.ReadConfig.SecureConnect && !config.Insecure)
if err != nil {
return output.NewError(output.NetworkError, "failed to connect to endpoint", err)
}
defer conn.Close()
cli := iotexapi.NewAPIServiceClient(conn)
ctx := context.Background()
jwtMD, err := util.JwtAuth()
if err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
// search action on blockchain
requestGetAction := iotexapi.GetActionsRequest{
Lookup: &iotexapi.GetActionsRequest_ByHash{
ByHash: &iotexapi.GetActionByHashRequest{
ActionHash: hash,
CheckPending: false,
},
},
}
response, err := cli.GetActions(ctx, &requestGetAction)
if err != nil {
sta, ok := status.FromError(err)
if ok {
return output.NewError(output.APIError, sta.Message(), nil)
}
return output.NewError(output.NetworkError, "failed to invoke GetActions api", err)
}
if len(response.ActionInfo) == 0 {
return output.NewError(output.APIError, "no action info returned", nil)
}
message := actionMessage{Proto: response.ActionInfo[0]}
requestGetReceipt := &iotexapi.GetReceiptByActionRequest{ActionHash: hash}
responseReceipt, err := cli.GetReceiptByAction(ctx, requestGetReceipt)
if err != nil {
sta, ok := status.FromError(err)
if ok && sta.Code() == codes.NotFound {
message.State = Pending
} else if ok {
return output.NewError(output.APIError, sta.Message(), nil)
}
return output.NewError(output.NetworkError, "failed to invoke GetReceiptByAction api", err)
}
message.State = Executed
message.Receipt = responseReceipt.ReceiptInfo.Receipt
fmt.Println(message.String())
return nil
}
func printAction(actionInfo *iotexapi.ActionInfo) (string, error) {
result, err := printActionProto(actionInfo.Action)
if err != nil {
return "", err
}
if actionInfo.Timestamp != nil {
ts, err := ptypes.Timestamp(actionInfo.Timestamp)
if err != nil {
return "", err
}
result += fmt.Sprintf("timeStamp: %d\n", ts.Unix())
result += fmt.Sprintf("blkHash: %s\n", actionInfo.BlkHash)
}
result += fmt.Sprintf("actHash: %s\n", actionInfo.ActHash)
return result, nil
}
func printActionProto(action *iotextypes.Action) (string, error) {
pubKey, err := crypto.BytesToPublicKey(action.SenderPubKey)
if err != nil {
return "", output.NewError(output.ConvertError, "failed to convert public key from bytes", err)
}
senderAddress, err := address.FromBytes(pubKey.Hash())
if err != nil {
return "", output.NewError(output.ConvertError, "failed to convert bytes into address", err)
}
result := fmt.Sprintf("\nversion: %d ", action.Core.GetVersion()) +
fmt.Sprintf("nonce: %d ", action.Core.GetNonce()) +
fmt.Sprintf("gasLimit: %d ", action.Core.GasLimit) +
fmt.Sprintf("gasPrice: %s Rau\n", action.Core.GasPrice) +
fmt.Sprintf("senderAddress: %s %s\n", senderAddress.String(),
Match(senderAddress.String(), "address"))
switch {
default:
result += proto.MarshalTextString(action.Core)
case action.Core.GetTransfer() != nil:
transfer := action.Core.GetTransfer()
amount, err := util.StringToIOTX(transfer.Amount)
if err != nil {
return "", output.NewError(output.ConvertError, "failed to convert string into IOTX amount", err)
}
result += "transfer: <\n" +
fmt.Sprintf(" recipient: %s %s\n", transfer.Recipient,
Match(transfer.Recipient, "address")) +
fmt.Sprintf(" amount: %s IOTX\n", amount)
if len(transfer.Payload) != 0 {
result += fmt.Sprintf(" payload: %s\n", transfer.Payload)
}
result += ">\n"
case action.Core.GetExecution() != nil:
execution := action.Core.GetExecution()
result += "execution: <\n" +
fmt.Sprintf(" contract: %s %s\n", execution.Contract,
Match(execution.Contract, "address"))
if execution.Amount != "0" {
amount, err := util.StringToIOTX(execution.Amount)
if err != nil {
return "", output.NewError(output.ConvertError, "failed to convert string into IOTX amount", err)
}
result += fmt.Sprintf(" amount: %s IOTX\n", amount)
}
result += fmt.Sprintf(" data: %x\n", execution.Data) + ">\n"
case action.Core.GetPutPollResult() != nil:
putPollResult := action.Core.GetPutPollResult()
result += "putPollResult: <\n" +
fmt.Sprintf(" height: %d\n", putPollResult.Height) +
" candidates: <\n"
for _, candidate := range putPollResult.Candidates.Candidates {
result += " candidate: <\n" +
fmt.Sprintf(" address: %s\n", candidate.Address)
votes := big.NewInt(0).SetBytes(candidate.Votes)
result += fmt.Sprintf(" votes: %s\n", votes.String()) +
fmt.Sprintf(" rewardAdress: %s\n", candidate.RewardAddress) +
" >\n"
}
result += " >\n" +
">\n"
}
result += fmt.Sprintf("senderPubKey: %x\n", action.SenderPubKey) +
fmt.Sprintf("signature: %x\n", action.Signature)
return result, nil
}
func printReceiptProto(receipt *iotextypes.Receipt) string {
result := fmt.Sprintf("status: %d %s\n", receipt.Status,
Match(strconv.Itoa(int(receipt.Status)), "status")) +
fmt.Sprintf("actHash: %x\n", receipt.ActHash) +
fmt.Sprintf("blkHeight: %d\n", receipt.BlkHeight) +
fmt.Sprintf("gasConsumed: %d\n", receipt.GasConsumed) +
fmt.Sprintf("logs: %d", len(receipt.Logs))
if len(receipt.ContractAddress) != 0 {
result += fmt.Sprintf("\ncontractAddress: %s %s", receipt.ContractAddress,
Match(receipt.ContractAddress, "address"))
}
return result
}
// Match returns human readable expression
func Match(in string, matchType string) string {
switch matchType {
case "address":
alias, err := alias.Alias(in)
if err != nil {
return ""
}
return "(" + alias + ")"
case "status":
switch in {
case "0":
return "(Failure)"
case "1":
return "(Success)"
case "100":
return "(Failure : Unknown)"
case "101":
return "(Failure : Execution out of gas)"
case "102":
return "(Failure : Deployment out of gas - not enough gas to store code)"
case "103":
return "(Failure : Max call depth exceeded)"
case "104":
return "(Failure : Contract address collision)"
case "105":
return "(Failure : No compatible interpreter)"
case "106":
return "(Failure : Execution reverted)"
case "107":
return "(Failure : Max code size exceeded)"
case "108":
return "(Failure : Write protection)"
}
}
return ""
}
| 1 | 21,365 | check err or use gasPriceUnitIOTX, _ := if we are sure action.Core.GasPrice is correct | iotexproject-iotex-core | go |
@@ -34,7 +34,7 @@ func newHarness(t *testing.T) (drivertest.Harness, error) {
return &harness{vars: map[string][]byte{}}, nil
}
-func (h *harness) MakeWatcher(ctx context.Context, name string, decoder *runtimevar.Decoder, wait time.Duration) (driver.Watcher, error) {
+func (h *harness) MakeWatcher(ctx context.Context, name string, decoder *runtimevar.Decoder) (driver.Watcher, error) {
rawVal, found := h.vars[name]
if !found {
// The variable isn't set. Create a Variable that always returns an error. | 1 | // Copyright 2018 The Go Cloud Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package constantvar
import (
"context"
"errors"
"testing"
"time"
"github.com/google/go-cloud/runtimevar"
"github.com/google/go-cloud/runtimevar/driver"
"github.com/google/go-cloud/runtimevar/drivertest"
)
type harness struct {
// vars stores the variable value(s) that have been set using CreateVariable.
vars map[string][]byte
}
func newHarness(t *testing.T) (drivertest.Harness, error) {
return &harness{vars: map[string][]byte{}}, nil
}
func (h *harness) MakeWatcher(ctx context.Context, name string, decoder *runtimevar.Decoder, wait time.Duration) (driver.Watcher, error) {
rawVal, found := h.vars[name]
if !found {
// The variable isn't set. Create a Variable that always returns an error.
return &watcher{err: errors.New("not found")}, nil
}
val, err := decoder.Decode(rawVal)
if err != nil {
// The variable didn't decode.
return &watcher{err: errors.New("not found")}, nil
}
return &watcher{value: val, t: time.Now()}, nil
}
func (h *harness) CreateVariable(ctx context.Context, name string, val []byte) error {
h.vars[name] = val
return nil
}
func (h *harness) UpdateVariable(ctx context.Context, name string, val []byte) error {
return errors.New("not supported")
}
func (h *harness) DeleteVariable(ctx context.Context, name string) error {
return errors.New("not supported")
}
func (h *harness) Close() {}
func (h *harness) Mutable() bool { return false }
func TestConformance(t *testing.T) {
drivertest.RunConformanceTests(t, newHarness)
}
| 1 | 12,438 | Just curious, was wait not used at all before? | google-go-cloud | go |
@@ -7,9 +7,10 @@ package manifest
import (
"errors"
"fmt"
+ "github.com/google/shlex"
"path/filepath"
"strconv"
- "strings"
+ //"strings"
"github.com/aws/aws-sdk-go/aws"
"gopkg.in/yaml.v3" | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package manifest provides functionality to create Manifest files.
package manifest
import (
"errors"
"fmt"
"path/filepath"
"strconv"
"strings"
"github.com/aws/aws-sdk-go/aws"
"gopkg.in/yaml.v3"
)
const (
defaultFluentbitImage = "amazon/aws-for-fluent-bit:latest"
defaultDockerfileName = "Dockerfile"
// AWS VPC subnet placement options.
PublicSubnetPlacement = "public"
PrivateSubnetPlacement = "private"
)
var (
// WorkloadTypes holds all workload manifest types.
WorkloadTypes = append(ServiceTypes, JobTypes...)
// All placement options.
subnetPlacements = []string{PublicSubnetPlacement, PrivateSubnetPlacement}
// Error definitions.
errUnmarshalBuildOpts = errors.New("cannot unmarshal build field into string or compose-style map")
errUnmarshalCountOpts = errors.New(`cannot unmarshal "count" field to an integer or autoscaling configuration`)
errUnmarshalEntryPoint = errors.New("cannot unmarshal entrypoint into string or slice of strings")
errUnmarshalCommand = errors.New("cannot unmarshal command into string or slice of strings")
)
// WorkloadProps contains properties for creating a new workload manifest.
type WorkloadProps struct {
Name string
Dockerfile string
Image string
}
// Workload holds the basic data that every workload manifest file needs to have.
type Workload struct {
Name *string `yaml:"name"`
Type *string `yaml:"type"` // must be one of the supported manifest types.
}
// Image represents the workload's container image.
type Image struct {
Build BuildArgsOrString `yaml:"build"` // Build an image from a Dockerfile.
Location *string `yaml:"location"` // Use an existing image instead.
}
// GetLocation returns the location of the image.
func (i Image) GetLocation() string {
return aws.StringValue(i.Location)
}
// BuildConfig populates a docker.BuildArguments struct from the fields available in the manifest.
// Prefer the following hierarchy:
// 1. Specific dockerfile, specific context
// 2. Specific dockerfile, context = dockerfile dir
// 3. "Dockerfile" located in context dir
// 4. "Dockerfile" located in ws root.
func (i *Image) BuildConfig(rootDirectory string) *DockerBuildArgs {
df := i.dockerfile()
ctx := i.context()
dockerfile := aws.String(filepath.Join(rootDirectory, defaultDockerfileName))
context := aws.String(rootDirectory)
if df != "" && ctx != "" {
dockerfile = aws.String(filepath.Join(rootDirectory, df))
context = aws.String(filepath.Join(rootDirectory, ctx))
}
if df != "" && ctx == "" {
dockerfile = aws.String(filepath.Join(rootDirectory, df))
context = aws.String(filepath.Join(rootDirectory, filepath.Dir(df)))
}
if df == "" && ctx != "" {
dockerfile = aws.String(filepath.Join(rootDirectory, ctx, defaultDockerfileName))
context = aws.String(filepath.Join(rootDirectory, ctx))
}
return &DockerBuildArgs{
Dockerfile: dockerfile,
Context: context,
Args: i.args(),
Target: i.target(),
CacheFrom: i.cacheFrom(),
}
}
// dockerfile returns the path to the workload's Dockerfile. If no dockerfile is specified,
// returns "".
func (i *Image) dockerfile() string {
// Prefer to use the "Dockerfile" string in BuildArgs. Otherwise,
// "BuildString". If no dockerfile specified, return "".
if i.Build.BuildArgs.Dockerfile != nil {
return aws.StringValue(i.Build.BuildArgs.Dockerfile)
}
var dfPath string
if i.Build.BuildString != nil {
dfPath = aws.StringValue(i.Build.BuildString)
}
return dfPath
}
// context returns the build context directory if it exists, otherwise an empty string.
func (i *Image) context() string {
return aws.StringValue(i.Build.BuildArgs.Context)
}
// args returns the args section, if it exists, to override args in the dockerfile.
// Otherwise it returns an empty map.
func (i *Image) args() map[string]string {
return i.Build.BuildArgs.Args
}
// target returns the build target stage if it exists, otherwise nil.
func (i *Image) target() *string {
return i.Build.BuildArgs.Target
}
// cacheFrom returns the cache from build section, if it exists.
// Otherwise it returns nil.
func (i *Image) cacheFrom() []string {
return i.Build.BuildArgs.CacheFrom
}
// ImageOverride holds fields that override Dockerfile image defaults.
type ImageOverride struct {
EntryPoint EntryPointOverride `yaml:"entrypoint"`
Command CommandOverride `yaml:"command"`
}
// EntryPointOverride is a custom type which supports unmarshaling "entrypoint" yaml which
// can either be of type string or type slice of string.
type EntryPointOverride stringSliceOrString
// CommandOverride is a custom type which supports unmarshaling "command" yaml which
// can either be of type string or type slice of string.
type CommandOverride stringSliceOrString
// UnmarshalYAML overrides the default YAML unmarshaling logic for the EntryPointOverride
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v2) interface.
func (e *EntryPointOverride) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := unmarshalYAMLToStringSliceOrString((*stringSliceOrString)(e), unmarshal); err != nil {
return errUnmarshalEntryPoint
}
return nil
}
// ToStringSlice converts an EntryPointOverride to a slice of string.
func (e *EntryPointOverride) ToStringSlice() []string {
return toStringSlice((*stringSliceOrString)(e))
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the CommandOverride
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v2) interface.
func (c *CommandOverride) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := unmarshalYAMLToStringSliceOrString((*stringSliceOrString)(c), unmarshal); err != nil {
return errUnmarshalCommand
}
return nil
}
// ToStringSlice converts an CommandOverride to a slice of string.
func (c *CommandOverride) ToStringSlice() []string {
return toStringSlice((*stringSliceOrString)(c))
}
type stringSliceOrString struct {
String *string
StringSlice []string
}
func unmarshalYAMLToStringSliceOrString(s *stringSliceOrString, unmarshal func(interface{}) error) error {
if err := unmarshal(&s.StringSlice); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if s.StringSlice != nil {
// Unmarshaled successfully to s.StringSlice, unset s.String, and return.
s.String = nil
return nil
}
return unmarshal(&s.String)
}
func toStringSlice(s *stringSliceOrString) []string {
if s.String != nil {
return strings.Split(*s.String, " ")
}
return s.StringSlice
}
// BuildArgsOrString is a custom type which supports unmarshaling yaml which
// can either be of type string or type DockerBuildArgs.
type BuildArgsOrString struct {
BuildString *string
BuildArgs DockerBuildArgs
}
func (b *BuildArgsOrString) isEmpty() bool {
if aws.StringValue(b.BuildString) == "" && b.BuildArgs.isEmpty() {
return true
}
return false
}
// UnmarshalYAML overrides the default YAML unmarshaling logic for the BuildArgsOrString
// struct, allowing it to perform more complex unmarshaling behavior.
// This method implements the yaml.Unmarshaler (v2) interface.
func (b *BuildArgsOrString) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := unmarshal(&b.BuildArgs); err != nil {
switch err.(type) {
case *yaml.TypeError:
break
default:
return err
}
}
if !b.BuildArgs.isEmpty() {
// Unmarshaled successfully to b.BuildArgs, unset b.BuildString, and return.
b.BuildString = nil
return nil
}
if err := unmarshal(&b.BuildString); err != nil {
return errUnmarshalBuildOpts
}
return nil
}
// DockerBuildArgs represents the options specifiable under the "build" field
// of Docker Compose services. For more information, see:
// https://docs.docker.com/compose/compose-file/#build
type DockerBuildArgs struct {
Context *string `yaml:"context,omitempty"`
Dockerfile *string `yaml:"dockerfile,omitempty"`
Args map[string]string `yaml:"args,omitempty"`
Target *string `yaml:"target,omitempty"`
CacheFrom []string `yaml:"cache_from,omitempty"`
}
func (b *DockerBuildArgs) isEmpty() bool {
if b.Context == nil && b.Dockerfile == nil && b.Args == nil && b.Target == nil && b.CacheFrom == nil {
return true
}
return false
}
// Logging holds configuration for Firelens to route your logs.
type Logging struct {
Image *string `yaml:"image"`
Destination map[string]string `yaml:"destination,flow"`
EnableMetadata *bool `yaml:"enableMetadata"`
SecretOptions map[string]string `yaml:"secretOptions"`
ConfigFile *string `yaml:"configFilePath"`
}
// LogImage returns the default Fluent Bit image if not otherwise configured.
func (lc *Logging) LogImage() *string {
if lc.Image == nil {
return aws.String(defaultFluentbitImage)
}
return lc.Image
}
// GetEnableMetadata returns the configuration values and sane default for the EnableMEtadata field
func (lc *Logging) GetEnableMetadata() *string {
if lc.EnableMetadata == nil {
// Enable ecs log metadata by default.
return aws.String("true")
}
return aws.String(strconv.FormatBool(*lc.EnableMetadata))
}
// SidecarConfig represents the configurable options for setting up a sidecar container.
type SidecarConfig struct {
Port *string `yaml:"port"`
Image *string `yaml:"image"`
CredsParam *string `yaml:"credentialsParameter"`
Variables map[string]string `yaml:"variables"`
Secrets map[string]string `yaml:"secrets"`
MountPoints []SidecarMountPoint `yaml:"mount_points"`
}
// TaskConfig represents the resource boundaries and environment variables for the containers in the task.
type TaskConfig struct {
CPU *int `yaml:"cpu"`
Memory *int `yaml:"memory"`
Count Count `yaml:"count"`
Variables map[string]string `yaml:"variables"`
Secrets map[string]string `yaml:"secrets"`
Storage *Storage `yaml:"storage"`
}
// NetworkConfig represents options for network connection to AWS resources within a VPC.
type NetworkConfig struct {
VPC vpcConfig `yaml:"vpc"`
}
// UnmarshalYAML ensures that a NetworkConfig always defaults to public subnets.
// If the user specified a placement that's not valid then throw an error.
func (c *NetworkConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
type networkWithDefaults NetworkConfig
conf := networkWithDefaults{
VPC: vpcConfig{
Placement: stringP(PublicSubnetPlacement),
},
}
if err := unmarshal(&conf); err != nil {
return err
}
if !conf.VPC.isValidPlacement() {
return fmt.Errorf("field '%s' is '%v' must be one of %#v", "network.vpc.placement", aws.StringValue(conf.VPC.Placement), subnetPlacements)
}
*c = NetworkConfig(conf)
return nil
}
// vpcConfig represents the security groups and subnets attached to a task.
type vpcConfig struct {
Placement *string `yaml:"placement"`
SecurityGroups []string `yaml:"security_groups"`
}
func (c vpcConfig) isValidPlacement() bool {
if c.Placement == nil {
return false
}
for _, allowed := range subnetPlacements {
if *c.Placement == allowed {
return true
}
}
return false
}
// UnmarshalWorkload deserializes the YAML input stream into a workload manifest object.
// If an error occurs during deserialization, then returns the error.
// If the workload type in the manifest is invalid, then returns an ErrInvalidManifestType.
func UnmarshalWorkload(in []byte) (interface{}, error) {
am := Workload{}
if err := yaml.Unmarshal(in, &am); err != nil {
return nil, fmt.Errorf("unmarshal to workload manifest: %w", err)
}
typeVal := aws.StringValue(am.Type)
switch typeVal {
case LoadBalancedWebServiceType:
m := newDefaultLoadBalancedWebService()
if err := yaml.Unmarshal(in, m); err != nil {
return nil, fmt.Errorf("unmarshal to load balanced web service: %w", err)
}
return m, nil
case BackendServiceType:
m := newDefaultBackendService()
if err := yaml.Unmarshal(in, m); err != nil {
return nil, fmt.Errorf("unmarshal to backend service: %w", err)
}
if m.BackendServiceConfig.ImageConfig.HealthCheck != nil {
// Make sure that unset fields in the healthcheck gets a default value.
m.BackendServiceConfig.ImageConfig.HealthCheck.applyIfNotSet(newDefaultContainerHealthCheck())
}
return m, nil
case ScheduledJobType:
m := newDefaultScheduledJob()
if err := yaml.Unmarshal(in, m); err != nil {
return nil, fmt.Errorf("unmarshal to scheduled job: %w", err)
}
return m, nil
default:
return nil, &ErrInvalidWorkloadType{Type: typeVal}
}
}
func requiresBuild(image Image) (bool, error) {
noBuild, noURL := image.Build.isEmpty(), image.Location == nil
// Error if both of them are specified or neither is specified.
if noBuild == noURL {
return false, fmt.Errorf(`either "image.build" or "image.location" needs to be specified in the manifest`)
}
if image.Location == nil {
return true, nil
}
return false, nil
}
func dockerfileBuildRequired(workloadType string, svc interface{}) (bool, error) {
type manifest interface {
BuildRequired() (bool, error)
}
mf, ok := svc.(manifest)
if !ok {
return false, fmt.Errorf("%s does not have required methods BuildRequired()", workloadType)
}
required, err := mf.BuildRequired()
if err != nil {
return false, fmt.Errorf("check if %s requires building from local Dockerfile: %w", workloadType, err)
}
return required, nil
}
func stringP(s string) *string {
if s == "" {
return nil
}
return &s
}
func uint16P(n uint16) *uint16 {
if n == 0 {
return nil
}
return &n
}
| 1 | 16,546 | nit: can we remove this? | aws-copilot-cli | go |
@@ -44,11 +44,11 @@ public class NotificationStore {
}
}
- public Notification get(int index) {
+ public synchronized Notification get(int index) {
return store.get(index);
}
- public void add(Notification n) {
+ public synchronized void add(Notification n) {
log.info("Notification received: " + n.text);
for (int i = 0; i < store.size(); i++) {
if (get(i).id == n.id) { | 1 | package info.nightscout.androidaps.plugins.Overview.notifications;
import android.app.NotificationManager;
import android.content.Context;
import android.content.Intent;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.media.AudioAttributes;
import android.media.RingtoneManager;
import android.net.Uri;
import android.support.v4.app.NotificationCompat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import info.nightscout.androidaps.MainApp;
import info.nightscout.androidaps.R;
import info.nightscout.androidaps.Services.AlarmSoundService;
import info.nightscout.androidaps.plugins.Wear.WearPlugin;
//Added by Rumen for snooze time
import info.nightscout.utils.SP;
/**
* Created by mike on 03.12.2016.
*/
public class NotificationStore {
private static Logger log = LoggerFactory.getLogger(NotificationStore.class);
public List<Notification> store = new ArrayList<Notification>();
public long snoozedUntil = 0L;
public NotificationStore() {
}
public class NotificationComparator implements Comparator<Notification> {
@Override
public int compare(Notification o1, Notification o2) {
return o1.level - o2.level;
}
}
public Notification get(int index) {
return store.get(index);
}
public void add(Notification n) {
log.info("Notification received: " + n.text);
for (int i = 0; i < store.size(); i++) {
if (get(i).id == n.id) {
get(i).date = n.date;
get(i).validTo = n.validTo;
return;
}
}
store.add(n);
if (SP.getBoolean(MainApp.sResources.getString(R.string.key_raise_notifications_as_android_notifications), false)) {
raiseSystemNotification(n);
} else {
if (n.soundId != null) {
Intent alarm = new Intent(MainApp.instance().getApplicationContext(), AlarmSoundService.class);
alarm.putExtra("soundid", n.soundId);
MainApp.instance().startService(alarm);
}
}
Collections.sort(store, new NotificationComparator());
}
private void raiseSystemNotification(Notification n) {
Context context = MainApp.instance().getApplicationContext();
NotificationManager mgr = (NotificationManager) context.getSystemService(Context.NOTIFICATION_SERVICE);
Bitmap largeIcon = BitmapFactory.decodeResource(context.getResources(), R.mipmap.blueowl);
Uri sound = RingtoneManager.getDefaultUri(RingtoneManager.TYPE_ALARM);
NotificationCompat.Builder notificationBuilder =
new NotificationCompat.Builder(context)
.setSmallIcon(R.drawable.ic_notification)
.setLargeIcon(largeIcon)
.setContentText(n.text)
.setPriority(NotificationCompat.PRIORITY_MAX)
.setDeleteIntent(DismissNotificationService.deleteIntent(n.id));
if (n.level == Notification.URGENT) {
notificationBuilder.setVibrate(new long[]{1000, 1000, 1000, 1000})
.setContentTitle(MainApp.sResources.getString(R.string.urgent_alarm))
.setSound(sound, AudioAttributes.USAGE_ALARM);
} else {
notificationBuilder.setVibrate(new long[]{0, 100, 50, 100, 50})
.setContentTitle(MainApp.sResources.getString(R.string.info))
;
}
mgr.notify(n.id, notificationBuilder.build());
}
public boolean remove(int id) {
for (int i = 0; i < store.size(); i++) {
if (get(i).id == id) {
if (get(i).soundId != null) {
Intent alarm = new Intent(MainApp.instance().getApplicationContext(), AlarmSoundService.class);
MainApp.instance().stopService(alarm);
}
store.remove(i);
return true;
}
}
return false;
}
public void removeExpired() {
for (int i = 0; i < store.size(); i++) {
Notification n = get(i);
if (n.validTo.getTime() != 0 && n.validTo.getTime() < System.currentTimeMillis()) {
store.remove(i);
i--;
}
}
}
public void snoozeTo(long timeToSnooze) {
log.debug("Snoozing alarm until: " + timeToSnooze);
SP.putLong("snoozedTo", timeToSnooze);
}
public void unSnooze() {
if (Notification.isAlarmForStaleData()) {
Notification notification = new Notification(Notification.NSALARM, MainApp.sResources.getString(R.string.nsalarm_staledata), Notification.URGENT);
SP.putLong("snoozedTo", System.currentTimeMillis());
add(notification);
log.debug("Snoozed to current time and added back notification!");
}
}
}
| 1 | 29,907 | is it not a problem when one synchronized function is called by other? | MilosKozak-AndroidAPS | java |
@@ -1,6 +1,7 @@
ActiveAdmin.register Account do
- permit_params :level
- actions :index, :show, :edit
+ permit_params :login, :email, :level, :country_code, :location, :url, :hide_experience, :email_master, :email_posts,
+ :email_kudos, :email_new_followers, :twitter_account, :affiliation_type, :organization_name
+ actions :index, :show, :edit, :update
controller do
defaults finder: :fetch_by_login_or_email | 1 | ActiveAdmin.register Account do
permit_params :level
actions :index, :show, :edit
controller do
defaults finder: :fetch_by_login_or_email
end
filter :login
filter :email
filter :name
filter :level, as: :select, collection: [['Default', Account::Access::DEFAULT],
['Admin', Account::Access::ADMIN],
['Disabled', Account::Access::DISABLED],
['Spammer', Account::Access::SPAM]]
filter :last_seen_at
filter :last_seen_ip
index do
column :id
column :name do |account|
link_to account.name, account_path(account)
end
column :login
column :email
column :level do |account|
case account.level
when Account::Access::DEFAULT
status_tag('default', :ok)
when Account::Access::ADMIN
status_tag('admin', :warning)
when Account::Access::DISABLED
status_tag('disabled', :error)
else
status_tag('spammer', :error)
end
end
column :url
column :last_seen_at
column :last_seen_ip do |account|
ip = account.last_seen_ip
ip.blank? ? '' : link_to(ip, admin_accounts_path('q[last_seen_ip_contains]' => ip, 'commit' => 'Filter'))
end
actions
end
form do |f|
f.semantic_errors(*f.object.errors.keys)
f.inputs 'Details' do
f.input :login, as: :string
f.input :email, as: :string
f.input :name, as: :string
f.input :level, as: :select, include_blank: false,
collection: { 'Default' => Account::Access::DEFAULT,
'Admin' => Account::Access::ADMIN,
'Disabled' => Account::Access::DISABLED,
'Spammer' => Account::Access::SPAM }
f.input :country_code, as: :string
f.input :location, as: :string
f.input :url, as: :url
f.input :hide_experience
f.input :email_master
f.input :email_posts
f.input :email_kudos
f.input :email_new_followers
f.input :twitter_account, as: :string
f.input :affiliation_type, as: :string
f.input :organization_name, as: :string
end
f.actions
end
end
| 1 | 8,480 | We (even as admins) shouldn't override the User Preference settings like `email_master`, `email_posts`, `email_kudos`, `email_new_followers`. These all would be set by the user of their choice. Please do remove these attributes from editing//updating. Thanks! | blackducksoftware-ohloh-ui | rb |
@@ -20,7 +20,7 @@ return [
'alpha_dash' => 'O campo :attribute deve conter apenas letras, números e traços.',
'alpha_num' => 'O campo :attribute deve conter apenas letras e números .',
'array' => 'O campo :attribute deve conter um array.',
- 'attached' => 'This :attribute is already attached.',
+ 'attached' => 'Este :attribute já está ligado.',
'before' => 'O campo :attribute deve conter uma data anterior a :date.',
'before_or_equal' => 'O campo :attribute deve conter uma data inferior ou igual a :date.',
'between' => [ | 1 | <?php
/*
|--------------------------------------------------------------------------
| Validation Language Lines
|--------------------------------------------------------------------------
|
| The following language lines contain the default error messages used by
| the validator class. Some of these rules have multiple versions such
| as the size rules. Feel free to tweak each of these messages here.
|
*/
return [
'accepted' => 'O campo :attribute deve ser aceito.',
'active_url' => 'O campo :attribute deve conter uma URL válida.',
'after' => 'O campo :attribute deve conter uma data posterior a :date.',
'after_or_equal' => 'O campo :attribute deve conter uma data superior ou igual a :date.',
'alpha' => 'O campo :attribute deve conter apenas letras.',
'alpha_dash' => 'O campo :attribute deve conter apenas letras, números e traços.',
'alpha_num' => 'O campo :attribute deve conter apenas letras e números .',
'array' => 'O campo :attribute deve conter um array.',
'attached' => 'This :attribute is already attached.',
'before' => 'O campo :attribute deve conter uma data anterior a :date.',
'before_or_equal' => 'O campo :attribute deve conter uma data inferior ou igual a :date.',
'between' => [
'array' => 'O campo :attribute deve conter de :min a :max itens.',
'file' => 'O campo :attribute deve conter um arquivo de :min a :max kilobytes.',
'numeric' => 'O campo :attribute deve conter um número entre :min e :max.',
'string' => 'O campo :attribute deve conter entre :min a :max caracteres.',
],
'boolean' => 'O campo :attribute deve conter o valor verdadeiro ou falso.',
'confirmed' => 'A confirmação para o campo :attribute não coincide.',
'date' => 'O campo :attribute não contém uma data válida.',
'date_equals' => 'O campo :attribute deve ser uma data igual a :date.',
'date_format' => 'A data informada para o campo :attribute não respeita o formato :format.',
'different' => 'Os campos :attribute e :other devem conter valores diferentes.',
'digits' => 'O campo :attribute deve conter :digits dígitos.',
'digits_between' => 'O campo :attribute deve conter entre :min a :max dígitos.',
'dimensions' => 'O valor informado para o campo :attribute não é uma dimensão de imagem válida.',
'distinct' => 'O campo :attribute contém um valor duplicado.',
'email' => 'O campo :attribute não contém um endereço de email válido.',
'ends_with' => 'O campo :attribute deve terminar com um dos seguintes valores: :values',
'exists' => 'O valor selecionado para o campo :attribute é inválido.',
'file' => 'O campo :attribute deve conter um arquivo.',
'filled' => 'O campo :attribute é obrigatório.',
'gt' => [
'array' => 'O campo :attribute deve ter mais que :value itens.',
'file' => 'O arquivo :attribute deve ser maior que :value kilobytes.',
'numeric' => 'O campo :attribute deve ser maior que :value.',
'string' => 'O campo :attribute deve ser maior que :value caracteres.',
],
'gte' => [
'array' => 'O campo :attribute deve ter :value itens ou mais.',
'file' => 'O arquivo :attribute deve ser maior ou igual a :value kilobytes.',
'numeric' => 'O campo :attribute deve ser maior ou igual a :value.',
'string' => 'O campo :attribute deve ser maior ou igual a :value caracteres.',
],
'image' => 'O campo :attribute deve conter uma imagem.',
'in' => 'O campo :attribute não contém um valor válido.',
'in_array' => 'O campo :attribute não existe em :other.',
'integer' => 'O campo :attribute deve conter um número inteiro.',
'ip' => 'O campo :attribute deve conter um IP válido.',
'ipv4' => 'O campo :attribute deve conter um IPv4 válido.',
'ipv6' => 'O campo :attribute deve conter um IPv6 válido.',
'json' => 'O campo :attribute deve conter uma string JSON válida.',
'lt' => [
'array' => 'O campo :attribute deve ter menos que :value itens.',
'file' => 'O arquivo :attribute ser menor que :value kilobytes.',
'numeric' => 'O campo :attribute deve ser menor que :value.',
'string' => 'O campo :attribute deve ser menor que :value caracteres.',
],
'lte' => [
'array' => 'O campo :attribute não deve ter mais que :value itens.',
'file' => 'O arquivo :attribute ser menor ou igual a :value kilobytes.',
'numeric' => 'O campo :attribute deve ser menor ou igual a :value.',
'string' => 'O campo :attribute deve ser menor ou igual a :value caracteres.',
],
'max' => [
'array' => 'O campo :attribute deve conter no máximo :max itens.',
'file' => 'O campo :attribute não pode conter um arquivo com mais de :max kilobytes.',
'numeric' => 'O campo :attribute não pode conter um valor superior a :max.',
'string' => 'O campo :attribute não pode conter mais de :max caracteres.',
],
'mimes' => 'O campo :attribute deve conter um arquivo do tipo: :values.',
'mimetypes' => 'O campo :attribute deve conter um arquivo do tipo: :values.',
'min' => [
'array' => 'O campo :attribute deve conter no mínimo :min itens.',
'file' => 'O campo :attribute deve conter um arquivo com no mínimo :min kilobytes.',
'numeric' => 'O campo :attribute deve conter um número superior ou igual a :min.',
'string' => 'O campo :attribute deve conter no mínimo :min caracteres.',
],
'multiple_of' => 'The :attribute must be a multiple of :value',
'not_in' => 'O campo :attribute contém um valor inválido.',
'not_regex' => 'O formato do valor :attribute é inválido.',
'numeric' => 'O campo :attribute deve conter um valor numérico.',
'password' => 'A senha está incorreta.',
'present' => 'O campo :attribute deve estar presente.',
'prohibited' => 'The :attribute field is prohibited.',
'prohibited_if' => 'The :attribute field is prohibited when :other is :value.',
'prohibited_unless' => 'The :attribute field is prohibited unless :other is in :values.',
'regex' => 'O formato do valor informado no campo :attribute é inválido.',
'relatable' => 'This :attribute may not be associated with this resource.',
'required' => 'O campo :attribute é obrigatório.',
'required_if' => 'O campo :attribute é obrigatório quando o valor do campo :other é igual a :value.',
'required_unless' => 'O campo :attribute é obrigatório a menos que :other esteja presente em :values.',
'required_with' => 'O campo :attribute é obrigatório quando :values está presente.',
'required_with_all' => 'O campo :attribute é obrigatório quando um dos :values está presente.',
'required_without' => 'O campo :attribute é obrigatório quando :values não está presente.',
'required_without_all' => 'O campo :attribute é obrigatório quando nenhum dos :values está presente.',
'same' => 'Os campos :attribute e :other devem conter valores iguais.',
'size' => [
'array' => 'O campo :attribute deve conter :size itens.',
'file' => 'O campo :attribute deve conter um arquivo com o tamanho de :size kilobytes.',
'numeric' => 'O campo :attribute deve conter o número :size.',
'string' => 'O campo :attribute deve conter :size caracteres.',
],
'starts_with' => 'O campo :attribute deve começar com um dos seguintes valores: :values',
'string' => 'O campo :attribute deve ser uma string.',
'timezone' => 'O campo :attribute deve conter um fuso horário válido.',
'unique' => 'O valor informado para o campo :attribute já está em uso.',
'uploaded' => 'Falha no Upload do arquivo :attribute.',
'url' => 'O formato da URL informada para o campo :attribute é inválido.',
'uuid' => 'O campo :attribute deve ser um UUID válido.',
'custom' => [
'attribute-name' => [
'rule-name' => 'custom-message',
],
],
'attributes' => [
'address' => 'endereço',
'age' => 'idade',
'body' => 'conteúdo',
'city' => 'cidade',
'country' => 'país',
'date' => 'data',
'day' => 'dia',
'description' => 'descrição',
'email' => 'e-mail',
'excerpt' => 'resumo',
'first_name' => 'primeiro nome',
'gender' => 'gênero',
'hour' => 'hora',
'last_name' => 'sobrenome',
'message' => 'mensagem',
'minute' => 'minuto',
'mobile' => 'celular',
'month' => 'mês',
'name' => 'nome',
'password' => 'senha',
'password_confirmation' => 'confirmação da senha',
'phone' => 'telefone',
'remember' => 'lembrar-me',
'second' => 'segundo',
'sex' => 'sexo',
'state' => 'estado',
'subject' => 'assunto',
'text' => 'texto',
'time' => 'hora',
'title' => 'título',
'username' => 'usuário',
'year' => 'ano',
],
];
| 1 | 8,637 | "Este :attribute j est anexado." sounds better. | Laravel-Lang-lang | php |
@@ -0,0 +1,6 @@
+
+if __name__ == "__main__":
+ import doctest
+ import databricks.koalas as ks
+ from databricks.koalas import frame, series
+ doctest.testmod(frame, extraglobs={"ks": ks}) | 1 | 1 | 8,532 | This line should be repeated for every module that needs testing. One cannot rely on nosetest to automatically discover all the modules. On the bright side, there is no need to change any of the other files. | databricks-koalas | py |
|
@@ -301,5 +301,3 @@ var failedIssuanceMu sync.RWMutex
// If this value is recent, do not make any on-demand certificate requests.
var lastIssueTime time.Time
var lastIssueTimeMu sync.Mutex
-
-var errNoCert = errors.New("no certificate available") | 1 | package caddytls
import (
"crypto/tls"
"errors"
"fmt"
"log"
"strings"
"sync"
"sync/atomic"
"time"
)
// configGroup is a type that keys configs by their hostname
// (hostnames can have wildcard characters; use the getConfig
// method to get a config by matching its hostname). Its
// GetCertificate function can be used with tls.Config.
type configGroup map[string]*Config
// getConfig gets the config by the first key match for name.
// In other words, "sub.foo.bar" will get the config for "*.foo.bar"
// if that is the closest match. This function MAY return nil
// if no match is found.
//
// This function follows nearly the same logic to lookup
// a hostname as the getCertificate function uses.
func (cg configGroup) getConfig(name string) *Config {
name = strings.ToLower(name)
// exact match? great, let's use it
if config, ok := cg[name]; ok {
return config
}
// try replacing labels in the name with wildcards until we get a match
labels := strings.Split(name, ".")
for i := range labels {
labels[i] = "*"
candidate := strings.Join(labels, ".")
if config, ok := cg[candidate]; ok {
return config
}
}
// as last resort, try a config that serves all names
if config, ok := cg[""]; ok {
return config
}
return nil
}
// GetCertificate gets a certificate to satisfy clientHello. In getting
// the certificate, it abides the rules and settings defined in the
// Config that matches clientHello.ServerName. It first checks the in-
// memory cache, then, if the config enables "OnDemand", it accesses
// disk, then accesses the network if it must obtain a new certificate
// via ACME.
//
// This method is safe for use as a tls.Config.GetCertificate callback.
func (cg configGroup) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
cert, err := cg.getCertDuringHandshake(strings.ToLower(clientHello.ServerName), true, true)
return &cert.Certificate, err
}
// getCertDuringHandshake will get a certificate for name. It first tries
// the in-memory cache. If no certificate for name is in the cache, the
// config most closely corresponding to name will be loaded. If that config
// allows it (OnDemand==true) and if loadIfNecessary == true, it goes to disk
// to load it into the cache and serve it. If it's not on disk and if
// obtainIfNecessary == true, the certificate will be obtained from the CA,
// cached, and served. If obtainIfNecessary is true, then loadIfNecessary
// must also be set to true. An error will be returned if and only if no
// certificate is available.
//
// This function is safe for concurrent use.
func (cg configGroup) getCertDuringHandshake(name string, loadIfNecessary, obtainIfNecessary bool) (Certificate, error) {
// First check our in-memory cache to see if we've already loaded it
cert, matched, defaulted := getCertificate(name)
if matched {
return cert, nil
}
// Get the relevant TLS config for this name. If OnDemand is enabled,
// then we might be able to load or obtain a needed certificate.
cfg := cg.getConfig(name)
if cfg != nil && cfg.OnDemand && loadIfNecessary {
// Then check to see if we have one on disk
loadedCert, err := CacheManagedCertificate(name, cfg)
if err == nil {
loadedCert, err = cg.handshakeMaintenance(name, loadedCert)
if err != nil {
log.Printf("[ERROR] Maintaining newly-loaded certificate for %s: %v", name, err)
}
return loadedCert, nil
}
if obtainIfNecessary {
// By this point, we need to ask the CA for a certificate
name = strings.ToLower(name)
// Make sure aren't over any applicable limits
err := cg.checkLimitsForObtainingNewCerts(name, cfg)
if err != nil {
return Certificate{}, err
}
// Name has to qualify for a certificate
if !HostQualifies(name) {
return cert, errors.New("hostname '" + name + "' does not qualify for certificate")
}
// Obtain certificate from the CA
return cg.obtainOnDemandCertificate(name, cfg)
}
}
// Fall back to the default certificate if there is one
if defaulted {
return cert, nil
}
return Certificate{}, fmt.Errorf("no certificate available for %s", name)
}
// checkLimitsForObtainingNewCerts checks to see if name can be issued right
// now according to mitigating factors we keep track of and preferences the
// user has set. If a non-nil error is returned, do not issue a new certificate
// for name.
func (cg configGroup) checkLimitsForObtainingNewCerts(name string, cfg *Config) error {
// User can set hard limit for number of certs for the process to issue
if cfg.OnDemandState.MaxObtain > 0 &&
atomic.LoadInt32(&cfg.OnDemandState.ObtainedCount) >= cfg.OnDemandState.MaxObtain {
return fmt.Errorf("%s: maximum certificates issued (%d)", name, cfg.OnDemandState.MaxObtain)
}
// Make sure name hasn't failed a challenge recently
failedIssuanceMu.RLock()
when, ok := failedIssuance[name]
failedIssuanceMu.RUnlock()
if ok {
return fmt.Errorf("%s: throttled; refusing to issue cert since last attempt on %s failed", name, when.String())
}
// Make sure, if we've issued a few certificates already, that we haven't
// issued any recently
lastIssueTimeMu.Lock()
since := time.Since(lastIssueTime)
lastIssueTimeMu.Unlock()
if atomic.LoadInt32(&cfg.OnDemandState.ObtainedCount) >= 10 && since < 10*time.Minute {
return fmt.Errorf("%s: throttled; last certificate was obtained %v ago", name, since)
}
// 👍Good to go
return nil
}
// obtainOnDemandCertificate obtains a certificate for name for the given
// name. If another goroutine has already started obtaining a cert for
// name, it will wait and use what the other goroutine obtained.
//
// This function is safe for use by multiple concurrent goroutines.
func (cg configGroup) obtainOnDemandCertificate(name string, cfg *Config) (Certificate, error) {
// We must protect this process from happening concurrently, so synchronize.
obtainCertWaitChansMu.Lock()
wait, ok := obtainCertWaitChans[name]
if ok {
// lucky us -- another goroutine is already obtaining the certificate.
// wait for it to finish obtaining the cert and then we'll use it.
obtainCertWaitChansMu.Unlock()
<-wait
return cg.getCertDuringHandshake(name, true, false)
}
// looks like it's up to us to do all the work and obtain the cert.
// make a chan others can wait on if needed
wait = make(chan struct{})
obtainCertWaitChans[name] = wait
obtainCertWaitChansMu.Unlock()
// do the obtain
log.Printf("[INFO] Obtaining new certificate for %s", name)
err := cfg.ObtainCert(name, false)
// immediately unblock anyone waiting for it; doing this in
// a defer would risk deadlock because of the recursive call
// to getCertDuringHandshake below when we return!
obtainCertWaitChansMu.Lock()
close(wait)
delete(obtainCertWaitChans, name)
obtainCertWaitChansMu.Unlock()
if err != nil {
// Failed to solve challenge, so don't allow another on-demand
// issue for this name to be attempted for a little while.
failedIssuanceMu.Lock()
failedIssuance[name] = time.Now()
go func(name string) {
time.Sleep(5 * time.Minute)
failedIssuanceMu.Lock()
delete(failedIssuance, name)
failedIssuanceMu.Unlock()
}(name)
failedIssuanceMu.Unlock()
return Certificate{}, err
}
// Success - update counters and stuff
atomic.AddInt32(&cfg.OnDemandState.ObtainedCount, 1)
lastIssueTimeMu.Lock()
lastIssueTime = time.Now()
lastIssueTimeMu.Unlock()
// certificate is already on disk; now just start over to load it and serve it
return cg.getCertDuringHandshake(name, true, false)
}
// handshakeMaintenance performs a check on cert for expiration and OCSP
// validity.
//
// This function is safe for use by multiple concurrent goroutines.
func (cg configGroup) handshakeMaintenance(name string, cert Certificate) (Certificate, error) {
// Check cert expiration
timeLeft := cert.NotAfter.Sub(time.Now().UTC())
if timeLeft < RenewDurationBefore {
log.Printf("[INFO] Certificate for %v expires in %v; attempting renewal", cert.Names, timeLeft)
return cg.renewDynamicCertificate(name, cert.Config)
}
// Check OCSP staple validity
if cert.OCSP != nil {
refreshTime := cert.OCSP.ThisUpdate.Add(cert.OCSP.NextUpdate.Sub(cert.OCSP.ThisUpdate) / 2)
if time.Now().After(refreshTime) {
err := stapleOCSP(&cert, nil)
if err != nil {
// An error with OCSP stapling is not the end of the world, and in fact, is
// quite common considering not all certs have issuer URLs that support it.
log.Printf("[ERROR] Getting OCSP for %s: %v", name, err)
}
certCacheMu.Lock()
certCache[name] = cert
certCacheMu.Unlock()
}
}
return cert, nil
}
// renewDynamicCertificate renews the certificate for name using cfg. It returns the
// certificate to use and an error, if any. currentCert may be returned even if an
// error occurs, since we perform renewals before they expire and it may still be
// usable. name should already be lower-cased before calling this function.
//
// This function is safe for use by multiple concurrent goroutines.
func (cg configGroup) renewDynamicCertificate(name string, cfg *Config) (Certificate, error) {
obtainCertWaitChansMu.Lock()
wait, ok := obtainCertWaitChans[name]
if ok {
// lucky us -- another goroutine is already renewing the certificate.
// wait for it to finish, then we'll use the new one.
obtainCertWaitChansMu.Unlock()
<-wait
return cg.getCertDuringHandshake(name, true, false)
}
// looks like it's up to us to do all the work and renew the cert
wait = make(chan struct{})
obtainCertWaitChans[name] = wait
obtainCertWaitChansMu.Unlock()
// do the renew
log.Printf("[INFO] Renewing certificate for %s", name)
err := cfg.RenewCert(name, false)
// immediately unblock anyone waiting for it; doing this in
// a defer would risk deadlock because of the recursive call
// to getCertDuringHandshake below when we return!
obtainCertWaitChansMu.Lock()
close(wait)
delete(obtainCertWaitChans, name)
obtainCertWaitChansMu.Unlock()
if err != nil {
return Certificate{}, err
}
return cg.getCertDuringHandshake(name, true, false)
}
// obtainCertWaitChans is used to coordinate obtaining certs for each hostname.
var obtainCertWaitChans = make(map[string]chan struct{})
var obtainCertWaitChansMu sync.Mutex
// failedIssuance is a set of names that we recently failed to get a
// certificate for from the ACME CA. They are removed after some time.
// When a name is in this map, do not issue a certificate for it on-demand.
var failedIssuance = make(map[string]time.Time)
var failedIssuanceMu sync.RWMutex
// lastIssueTime records when we last obtained a certificate successfully.
// If this value is recent, do not make any on-demand certificate requests.
var lastIssueTime time.Time
var lastIssueTimeMu sync.Mutex
var errNoCert = errors.New("no certificate available")
| 1 | 9,350 | This error was unused | caddyserver-caddy | go |
@@ -212,6 +212,11 @@ type (
PreviousRangeID int64
}
+ // CloseShardRequest is used to notify persistence that we're unloading a shard
+ CloseShardRequest struct {
+ ShardID int32
+ }
+
// AddTasksRequest is used to write new tasks
AddTasksRequest struct {
ShardID int32 | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination dataInterfaces_mock.go
package persistence
import (
"fmt"
"net"
"strings"
"time"
"github.com/pborman/uuid"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
historypb "go.temporal.io/api/history/v1"
enumsspb "go.temporal.io/server/api/enums/v1"
persistencespb "go.temporal.io/server/api/persistence/v1"
"go.temporal.io/server/common/persistence/serialization"
"go.temporal.io/server/common/primitives"
"go.temporal.io/server/service/history/tasks"
)
// CreateWorkflowMode workflow creation mode
type CreateWorkflowMode int
// QueueType is an enum that represents various queue types in persistence
type QueueType int32
// Queue types used in queue table
// Use positive numbers for queue type
// Negative numbers are reserved for DLQ
const (
NamespaceReplicationQueueType QueueType = iota + 1
)
// Create Workflow Execution Mode
const (
// CreateWorkflowModeBrandNew fail if current record exists
// Only applicable for CreateWorkflowExecution
CreateWorkflowModeBrandNew CreateWorkflowMode = iota
// CreateWorkflowModeWorkflowIDReuse update current record only if workflow is closed
// Only applicable for CreateWorkflowExecution
CreateWorkflowModeWorkflowIDReuse
// CreateWorkflowModeContinueAsNew update current record only if workflow is open
// Only applicable for UpdateWorkflowExecution
CreateWorkflowModeContinueAsNew
// CreateWorkflowModeZombie do not update current record since workflow is in zombie state
// applicable for CreateWorkflowExecution, UpdateWorkflowExecution
CreateWorkflowModeZombie
)
// UpdateWorkflowMode update mode
type UpdateWorkflowMode int
// Update Workflow Execution Mode
const (
// UpdateWorkflowModeUpdateCurrent update workflow, including current record
// NOTE: update on current record is a condition update
UpdateWorkflowModeUpdateCurrent UpdateWorkflowMode = iota
// UpdateWorkflowModeBypassCurrent update workflow, without current record
// NOTE: current record CANNOT point to the workflow to be updated
UpdateWorkflowModeBypassCurrent
)
// ConflictResolveWorkflowMode conflict resolve mode
type ConflictResolveWorkflowMode int
// Conflict Resolve Workflow Mode
const (
// ConflictResolveWorkflowModeUpdateCurrent conflict resolve workflow, including current record
// NOTE: update on current record is a condition update
ConflictResolveWorkflowModeUpdateCurrent ConflictResolveWorkflowMode = iota
// ConflictResolveWorkflowModeBypassCurrent conflict resolve workflow, without current record
// NOTE: current record CANNOT point to the workflow to be updated
ConflictResolveWorkflowModeBypassCurrent
)
// UnknownNumRowsAffected is returned when the number of rows that an API affected cannot be determined
const UnknownNumRowsAffected = -1
const (
// InitialFailoverNotificationVersion is the initial failover version for a namespace
InitialFailoverNotificationVersion int64 = 0
)
const numItemsInGarbageInfo = 3
type (
// InvalidPersistenceRequestError represents invalid request to persistence
InvalidPersistenceRequestError struct {
Msg string
}
// CurrentWorkflowConditionFailedError represents a failed conditional update for current workflow record
CurrentWorkflowConditionFailedError struct {
Msg string
RequestID string
RunID string
State enumsspb.WorkflowExecutionState
Status enumspb.WorkflowExecutionStatus
LastWriteVersion int64
}
// WorkflowConditionFailedError represents a failed conditional update for workflow record
WorkflowConditionFailedError struct {
Msg string
NextEventID int64
DBRecordVersion int64
}
// ConditionFailedError represents a failed conditional update for execution record
ConditionFailedError struct {
Msg string
}
// ShardAlreadyExistError is returned when conditionally creating a shard fails
ShardAlreadyExistError struct {
Msg string
}
// ShardOwnershipLostError is returned when conditional update fails due to RangeID for the shard
ShardOwnershipLostError struct {
ShardID int32
Msg string
}
// TimeoutError is returned when a write operation fails due to a timeout
TimeoutError struct {
Msg string
}
// TransactionSizeLimitError is returned when the transaction size is too large
TransactionSizeLimitError struct {
Msg string
}
// ShardInfoWithFailover describes a shard
ShardInfoWithFailover struct {
*persistencespb.ShardInfo
TransferFailoverLevels map[string]TransferFailoverLevel // uuid -> TransferFailoverLevel
TimerFailoverLevels map[string]TimerFailoverLevel // uuid -> TimerFailoverLevel
}
// TransferFailoverLevel contains corresponding start / end level
TransferFailoverLevel struct {
StartTime time.Time
MinLevel int64
CurrentLevel int64
MaxLevel int64
NamespaceIDs map[string]struct{}
}
// TimerFailoverLevel contains namespace IDs and corresponding start / end level
TimerFailoverLevel struct {
StartTime time.Time
MinLevel time.Time
CurrentLevel time.Time
MaxLevel time.Time
NamespaceIDs map[string]struct{}
}
// TaskQueueKey is the struct used to identity TaskQueues
TaskQueueKey struct {
NamespaceID string
Name string
TaskType enumspb.TaskQueueType
}
// CreateShardRequest is used to create a shard in executions table
CreateShardRequest struct {
ShardInfo *persistencespb.ShardInfo
}
// GetShardRequest is used to get shard information
GetShardRequest struct {
ShardID int32
}
// GetShardResponse is the response to GetShard
GetShardResponse struct {
ShardInfo *persistencespb.ShardInfo
}
// UpdateShardRequest is used to update shard information
UpdateShardRequest struct {
ShardInfo *persistencespb.ShardInfo
PreviousRangeID int64
}
// AddTasksRequest is used to write new tasks
AddTasksRequest struct {
ShardID int32
RangeID int64
NamespaceID string
WorkflowID string
RunID string
TransferTasks []tasks.Task
TimerTasks []tasks.Task
ReplicationTasks []tasks.Task
VisibilityTasks []tasks.Task
}
// CreateWorkflowExecutionRequest is used to write a new workflow execution
CreateWorkflowExecutionRequest struct {
ShardID int32
RangeID int64
Mode CreateWorkflowMode
PreviousRunID string
PreviousLastWriteVersion int64
NewWorkflowSnapshot WorkflowSnapshot
NewWorkflowEvents []*WorkflowEvents
}
// CreateWorkflowExecutionResponse is the response to CreateWorkflowExecutionRequest
CreateWorkflowExecutionResponse struct {
NewMutableStateStats MutableStateStatistics
}
// GetWorkflowExecutionRequest is used to retrieve the info of a workflow execution
GetWorkflowExecutionRequest struct {
ShardID int32
NamespaceID string
Execution commonpb.WorkflowExecution
}
// GetWorkflowExecutionResponse is the response to GetWorkflowExecutionRequest
GetWorkflowExecutionResponse struct {
State *persistencespb.WorkflowMutableState
DBRecordVersion int64
MutableStateStats MutableStateStatistics
}
// GetCurrentExecutionRequest is used to retrieve the current RunId for an execution
GetCurrentExecutionRequest struct {
ShardID int32
NamespaceID string
WorkflowID string
}
// ListConcreteExecutionsRequest is request to ListConcreteExecutions
ListConcreteExecutionsRequest struct {
ShardID int32
PageSize int
PageToken []byte
}
// ListConcreteExecutionsResponse is response to ListConcreteExecutions
ListConcreteExecutionsResponse struct {
States []*persistencespb.WorkflowMutableState
PageToken []byte
}
// GetCurrentExecutionResponse is the response to GetCurrentExecution
GetCurrentExecutionResponse struct {
StartRequestID string
RunID string
State enumsspb.WorkflowExecutionState
Status enumspb.WorkflowExecutionStatus
}
// UpdateWorkflowExecutionRequest is used to update a workflow execution
UpdateWorkflowExecutionRequest struct {
ShardID int32
RangeID int64
Mode UpdateWorkflowMode
UpdateWorkflowMutation WorkflowMutation
UpdateWorkflowEvents []*WorkflowEvents
NewWorkflowSnapshot *WorkflowSnapshot
NewWorkflowEvents []*WorkflowEvents
}
// UpdateWorkflowExecutionResponse is response for UpdateWorkflowExecutionRequest
UpdateWorkflowExecutionResponse struct {
UpdateMutableStateStats MutableStateStatistics
NewMutableStateStats *MutableStateStatistics
}
// ConflictResolveWorkflowExecutionRequest is used to reset workflow execution state for a single run
ConflictResolveWorkflowExecutionRequest struct {
ShardID int32
RangeID int64
Mode ConflictResolveWorkflowMode
// workflow to be resetted
ResetWorkflowSnapshot WorkflowSnapshot
ResetWorkflowEvents []*WorkflowEvents
// maybe new workflow
NewWorkflowSnapshot *WorkflowSnapshot
NewWorkflowEvents []*WorkflowEvents
// current workflow
CurrentWorkflowMutation *WorkflowMutation
CurrentWorkflowEvents []*WorkflowEvents
}
ConflictResolveWorkflowExecutionResponse struct {
ResetMutableStateStats MutableStateStatistics
NewMutableStateStats *MutableStateStatistics
CurrentMutableStateStats *MutableStateStatistics
}
// WorkflowEvents is used as generic workflow history events transaction container
WorkflowEvents struct {
NamespaceID string
WorkflowID string
RunID string
BranchToken []byte
PrevTxnID int64
TxnID int64
Events []*historypb.HistoryEvent
}
// WorkflowMutation is used as generic workflow execution state mutation
WorkflowMutation struct {
ExecutionInfo *persistencespb.WorkflowExecutionInfo
ExecutionState *persistencespb.WorkflowExecutionState
// TODO deprecate NextEventID in favor of DBRecordVersion
NextEventID int64
UpsertActivityInfos map[int64]*persistencespb.ActivityInfo
DeleteActivityInfos map[int64]struct{}
UpsertTimerInfos map[string]*persistencespb.TimerInfo
DeleteTimerInfos map[string]struct{}
UpsertChildExecutionInfos map[int64]*persistencespb.ChildExecutionInfo
DeleteChildExecutionInfos map[int64]struct{}
UpsertRequestCancelInfos map[int64]*persistencespb.RequestCancelInfo
DeleteRequestCancelInfos map[int64]struct{}
UpsertSignalInfos map[int64]*persistencespb.SignalInfo
DeleteSignalInfos map[int64]struct{}
UpsertSignalRequestedIDs map[string]struct{}
DeleteSignalRequestedIDs map[string]struct{}
NewBufferedEvents []*historypb.HistoryEvent
ClearBufferedEvents bool
TransferTasks []tasks.Task
ReplicationTasks []tasks.Task
TimerTasks []tasks.Task
VisibilityTasks []tasks.Task
// TODO deprecate Condition in favor of DBRecordVersion
Condition int64
DBRecordVersion int64
Checksum *persistencespb.Checksum
}
// WorkflowSnapshot is used as generic workflow execution state snapshot
WorkflowSnapshot struct {
ExecutionInfo *persistencespb.WorkflowExecutionInfo
ExecutionState *persistencespb.WorkflowExecutionState
// TODO deprecate NextEventID in favor of DBRecordVersion
NextEventID int64
ActivityInfos map[int64]*persistencespb.ActivityInfo
TimerInfos map[string]*persistencespb.TimerInfo
ChildExecutionInfos map[int64]*persistencespb.ChildExecutionInfo
RequestCancelInfos map[int64]*persistencespb.RequestCancelInfo
SignalInfos map[int64]*persistencespb.SignalInfo
SignalRequestedIDs map[string]struct{}
TransferTasks []tasks.Task
ReplicationTasks []tasks.Task
TimerTasks []tasks.Task
VisibilityTasks []tasks.Task
// TODO deprecate Condition in favor of DBRecordVersion
Condition int64
DBRecordVersion int64
Checksum *persistencespb.Checksum
}
// DeleteWorkflowExecutionRequest is used to delete a workflow execution
DeleteWorkflowExecutionRequest struct {
ShardID int32
NamespaceID string
WorkflowID string
RunID string
}
// DeleteCurrentWorkflowExecutionRequest is used to delete the current workflow execution
DeleteCurrentWorkflowExecutionRequest struct {
ShardID int32
NamespaceID string
WorkflowID string
RunID string
}
// GetTransferTaskRequest is the request for GetTransferTask
GetTransferTaskRequest struct {
ShardID int32
TaskID int64
}
// GetTransferTaskResponse is the response to GetTransferTask
GetTransferTaskResponse struct {
Task tasks.Task
}
// GetTransferTasksRequest is used to read tasks from the transfer task queue
GetTransferTasksRequest struct {
ShardID int32
ReadLevel int64
MaxReadLevel int64
BatchSize int
NextPageToken []byte
}
// GetTransferTasksResponse is the response to GetTransferTasksRequest
GetTransferTasksResponse struct {
Tasks []tasks.Task
NextPageToken []byte
}
// GetVisibilityTaskRequest is the request for GetVisibilityTask
GetVisibilityTaskRequest struct {
ShardID int32
TaskID int64
}
// GetVisibilityTaskResponse is the response to GetVisibilityTask
GetVisibilityTaskResponse struct {
Task tasks.Task
}
// GetVisibilityTasksRequest is used to read tasks from the visibility task queue
GetVisibilityTasksRequest struct {
ShardID int32
ReadLevel int64
MaxReadLevel int64
BatchSize int
NextPageToken []byte
}
// GetVisibilityTasksResponse is the response to GetVisibilityTasksRequest
GetVisibilityTasksResponse struct {
Tasks []tasks.Task
NextPageToken []byte
}
// GetReplicationTaskRequest is the request for GetReplicationTask
GetReplicationTaskRequest struct {
ShardID int32
TaskID int64
}
// GetReplicationTaskResponse is the response to GetReplicationTask
GetReplicationTaskResponse struct {
Task tasks.Task
}
// GetReplicationTasksRequest is used to read tasks from the replication task queue
GetReplicationTasksRequest struct {
ShardID int32
MinTaskID int64
MaxTaskID int64
BatchSize int
NextPageToken []byte
}
// GetReplicationTasksResponse is the response to GetReplicationTask
GetReplicationTasksResponse struct {
Tasks []tasks.Task
NextPageToken []byte
}
// CompleteTransferTaskRequest is used to complete a task in the transfer task queue
CompleteTransferTaskRequest struct {
ShardID int32
TaskID int64
}
// RangeCompleteTransferTaskRequest is used to complete a range of tasks in the transfer task queue
RangeCompleteTransferTaskRequest struct {
ShardID int32
ExclusiveBeginTaskID int64
InclusiveEndTaskID int64
}
// CompleteVisibilityTaskRequest is used to complete a task in the visibility task queue
CompleteVisibilityTaskRequest struct {
ShardID int32
TaskID int64
}
// RangeCompleteVisibilityTaskRequest is used to complete a range of tasks in the visibility task queue
RangeCompleteVisibilityTaskRequest struct {
ShardID int32
ExclusiveBeginTaskID int64
InclusiveEndTaskID int64
}
// CompleteReplicationTaskRequest is used to complete a task in the replication task queue
CompleteReplicationTaskRequest struct {
ShardID int32
TaskID int64
}
// RangeCompleteReplicationTaskRequest is used to complete a range of task in the replication task queue
RangeCompleteReplicationTaskRequest struct {
ShardID int32
InclusiveEndTaskID int64
}
// PutReplicationTaskToDLQRequest is used to put a replication task to dlq
PutReplicationTaskToDLQRequest struct {
ShardID int32
SourceClusterName string
TaskInfo *persistencespb.ReplicationTaskInfo
}
// GetReplicationTasksFromDLQRequest is used to get replication tasks from dlq
GetReplicationTasksFromDLQRequest struct {
ShardID int32
SourceClusterName string
GetReplicationTasksRequest
}
// DeleteReplicationTaskFromDLQRequest is used to delete replication task from DLQ
DeleteReplicationTaskFromDLQRequest struct {
ShardID int32
SourceClusterName string
TaskID int64
}
// RangeDeleteReplicationTaskFromDLQRequest is used to delete replication tasks from DLQ
RangeDeleteReplicationTaskFromDLQRequest struct {
ShardID int32
SourceClusterName string
ExclusiveBeginTaskID int64
InclusiveEndTaskID int64
}
// GetReplicationTasksFromDLQResponse is the response for GetReplicationTasksFromDLQ
GetReplicationTasksFromDLQResponse = GetReplicationTasksResponse
// RangeCompleteTimerTaskRequest is used to complete a range of tasks in the timer task queue
RangeCompleteTimerTaskRequest struct {
ShardID int32
InclusiveBeginTimestamp time.Time
ExclusiveEndTimestamp time.Time
}
// CompleteTimerTaskRequest is used to complete a task in the timer task queue
CompleteTimerTaskRequest struct {
ShardID int32
VisibilityTimestamp time.Time
TaskID int64
}
// LeaseTaskQueueRequest is used to request lease of a task queue
LeaseTaskQueueRequest struct {
NamespaceID string
TaskQueue string
TaskType enumspb.TaskQueueType
TaskQueueKind enumspb.TaskQueueKind
RangeID int64
}
// LeaseTaskQueueResponse is response to LeaseTaskQueueRequest
LeaseTaskQueueResponse struct {
TaskQueueInfo *PersistedTaskQueueInfo
}
// UpdateTaskQueueRequest is used to update task queue implementation information
UpdateTaskQueueRequest struct {
RangeID int64
TaskQueueInfo *persistencespb.TaskQueueInfo
}
// UpdateTaskQueueResponse is the response to UpdateTaskQueue
UpdateTaskQueueResponse struct {
}
// ListTaskQueueRequest contains the request params needed to invoke ListTaskQueue API
ListTaskQueueRequest struct {
PageSize int
PageToken []byte
}
// ListTaskQueueResponse is the response from ListTaskQueue API
ListTaskQueueResponse struct {
Items []*PersistedTaskQueueInfo
NextPageToken []byte
}
// DeleteTaskQueueRequest contains the request params needed to invoke DeleteTaskQueue API
DeleteTaskQueueRequest struct {
TaskQueue *TaskQueueKey
RangeID int64
}
// CreateTasksRequest is used to create a new task for a workflow execution
CreateTasksRequest struct {
TaskQueueInfo *PersistedTaskQueueInfo
Tasks []*persistencespb.AllocatedTaskInfo
}
// CreateTasksResponse is the response to CreateTasksRequest
CreateTasksResponse struct {
}
PersistedTaskQueueInfo struct {
Data *persistencespb.TaskQueueInfo
RangeID int64
}
// GetTasksRequest is used to retrieve tasks of a task queue
GetTasksRequest struct {
NamespaceID string
TaskQueue string
TaskType enumspb.TaskQueueType
ReadLevel int64 // range exclusive
MaxReadLevel *int64 // optional: range inclusive when specified
BatchSize int
}
// GetTasksResponse is the response to GetTasksRequests
GetTasksResponse struct {
Tasks []*persistencespb.AllocatedTaskInfo
}
// CompleteTaskRequest is used to complete a task
CompleteTaskRequest struct {
TaskQueue *TaskQueueKey
TaskID int64
}
// CompleteTasksLessThanRequest contains the request params needed to invoke CompleteTasksLessThan API
CompleteTasksLessThanRequest struct {
NamespaceID string
TaskQueueName string
TaskType enumspb.TaskQueueType
TaskID int64 // Tasks less than or equal to this ID will be completed
Limit int // Limit on the max number of tasks that can be completed. Required param
}
// GetTimerTaskRequest is the request for GetTimerTask
GetTimerTaskRequest struct {
ShardID int32
TaskID int64
VisibilityTimestamp time.Time
}
// GetTimerTaskResponse is the response to GetTimerTask
GetTimerTaskResponse struct {
Task tasks.Task
}
// GetTimerTasksRequest is the request for GetTimerTasks
// TODO: replace this with an iterator that can configure min and max index.
GetTimerTasksRequest struct {
ShardID int32
MinTimestamp time.Time
MaxTimestamp time.Time
BatchSize int
NextPageToken []byte
}
// GetTimerTasksResponse is the response for GetTimerTasks
GetTimerTasksResponse struct {
Tasks []tasks.Task
NextPageToken []byte
}
// CreateNamespaceRequest is used to create the namespace
CreateNamespaceRequest struct {
Namespace *persistencespb.NamespaceDetail
IsGlobalNamespace bool
}
// CreateNamespaceResponse is the response for CreateNamespace
CreateNamespaceResponse struct {
ID string
}
// GetNamespaceRequest is used to read namespace
GetNamespaceRequest struct {
ID string
Name string
}
// GetNamespaceResponse is the response for GetNamespace
GetNamespaceResponse struct {
Namespace *persistencespb.NamespaceDetail
IsGlobalNamespace bool
NotificationVersion int64
}
// UpdateNamespaceRequest is used to update namespace
UpdateNamespaceRequest struct {
Namespace *persistencespb.NamespaceDetail
IsGlobalNamespace bool
NotificationVersion int64
}
// DeleteNamespaceRequest is used to delete namespace entry from namespaces table
DeleteNamespaceRequest struct {
ID string
}
// DeleteNamespaceByNameRequest is used to delete namespace entry from namespaces_by_name table
DeleteNamespaceByNameRequest struct {
Name string
}
// ListNamespacesRequest is used to list namespaces
ListNamespacesRequest struct {
PageSize int
NextPageToken []byte
}
// ListNamespacesResponse is the response for GetNamespace
ListNamespacesResponse struct {
Namespaces []*GetNamespaceResponse
NextPageToken []byte
}
// GetMetadataResponse is the response for GetMetadata
GetMetadataResponse struct {
NotificationVersion int64
}
// MutableStateStatistics is the size stats for MutableState
MutableStateStatistics struct {
TotalSize int
HistoryStatistics *HistoryStatistics
// Breakdown of size into more granular stats
ExecutionInfoSize int
ExecutionStateSize int
ActivityInfoSize int
TimerInfoSize int
ChildInfoSize int
RequestCancelInfoSize int
SignalInfoSize int
SignalRequestIDSize int
BufferedEventsSize int
// Item count for various information captured within mutable state
ActivityInfoCount int
TimerInfoCount int
ChildInfoCount int
RequestCancelInfoCount int
SignalInfoCount int
SignalRequestIDCount int
BufferedEventsCount int
}
HistoryStatistics struct {
SizeDiff int
CountDiff int
}
// AppendHistoryNodesRequest is used to append a batch of history nodes
AppendHistoryNodesRequest struct {
// The shard to get history node data
ShardID int32
// true if this is the first append request to the branch
IsNewBranch bool
// the info for clean up data in background
Info string
// The branch to be appended
BranchToken []byte
// The batch of events to be appended. The first eventID will become the nodeID of this batch
Events []*historypb.HistoryEvent
// TransactionID for events before these events. For events chaining
PrevTransactionID int64
// requested TransactionID for this write operation. For the same eventID, the node with larger TransactionID always wins
TransactionID int64
}
// AppendHistoryNodesResponse is a response to AppendHistoryNodesRequest
AppendHistoryNodesResponse struct {
// the size of the event data that has been appended
Size int
}
// ReadHistoryBranchRequest is used to read a history branch
ReadHistoryBranchRequest struct {
// The shard to get history branch data
ShardID int32
// The branch to be read
BranchToken []byte
// Get the history nodes from MinEventID. Inclusive.
MinEventID int64
// Get the history nodes upto MaxEventID. Exclusive.
MaxEventID int64
// Maximum number of batches of events per page. Not that number of events in a batch >=1, it is not number of events per page.
// However for a single page, it is also possible that the returned events is less than PageSize (event zero events) due to stale events.
PageSize int
// Token to continue reading next page of history append transactions. Pass in empty slice for first page
NextPageToken []byte
}
// ReadHistoryBranchResponse is the response to ReadHistoryBranchRequest
ReadHistoryBranchResponse struct {
// History events
HistoryEvents []*historypb.HistoryEvent
// Token to read next page if there are more events beyond page size.
// Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page.
// Empty means we have reached the last page, not need to continue
NextPageToken []byte
// Size of history read from store
Size int
}
// ReadHistoryBranchByBatchResponse is the response to ReadHistoryBranchRequest
ReadHistoryBranchByBatchResponse struct {
// History events by batch
History []*historypb.History
// Token to read next page if there are more events beyond page size.
// Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page.
// Empty means we have reached the last page, not need to continue
NextPageToken []byte
// Size of history read from store
Size int
}
// ReadRawHistoryBranchResponse is the response to ReadHistoryBranchRequest
ReadRawHistoryBranchResponse struct {
// HistoryEventBlobs history event blobs
HistoryEventBlobs []*commonpb.DataBlob
// Token to read next page if there are more events beyond page size.
// Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page.
// Empty means we have reached the last page, not need to continue
NextPageToken []byte
// Size of history read from store
Size int
}
// ForkHistoryBranchRequest is used to fork a history branch
ForkHistoryBranchRequest struct {
// The shard to get history branch data
ShardID int32
// The base branch to fork from
ForkBranchToken []byte
// The nodeID to fork from, the new branch will start from ( inclusive ), the base branch will stop at(exclusive)
// Application must provide a void forking nodeID, it must be a valid nodeID in that branch. A valid nodeID is the firstEventID of a valid batch of events.
// And ForkNodeID > 1 because forking from 1 doesn't make any sense.
ForkNodeID int64
// the info for clean up data in background
Info string
}
// ForkHistoryBranchResponse is the response to ForkHistoryBranchRequest
ForkHistoryBranchResponse struct {
// branchToken to represent the new branch
NewBranchToken []byte
}
// CompleteForkBranchRequest is used to complete forking
CompleteForkBranchRequest struct {
// the new branch returned from ForkHistoryBranchRequest
BranchToken []byte
// true means the fork is success, will update the flag, otherwise will delete the new branch
Success bool
// The shard to update history branch data
ShardID *int
}
// DeleteHistoryBranchRequest is used to remove a history branch
DeleteHistoryBranchRequest struct {
// The shard to delete history branch data
ShardID int32
// branch to be deleted
BranchToken []byte
}
// TrimHistoryBranchRequest is used to validate & trim a history branch
TrimHistoryBranchRequest struct {
// The shard to delete history branch data
ShardID int32
// branch to be validated & trimmed
BranchToken []byte
// known valid node ID
NodeID int64
// known valid transaction ID
TransactionID int64
}
// TrimHistoryBranchResponse is the response to TrimHistoryBranchRequest
TrimHistoryBranchResponse struct {
}
// GetHistoryTreeRequest is used to retrieve branch info of a history tree
GetHistoryTreeRequest struct {
// A UUID of a tree
TreeID string
// Get data from this shard
ShardID *int32
// optional: can provide treeID via branchToken if treeID is empty
BranchToken []byte
}
// HistoryBranchDetail contains detailed information of a branch
HistoryBranchDetail struct {
TreeID string
BranchID string
ForkTime *time.Time
Info string
}
// GetHistoryTreeResponse is a response to GetHistoryTreeRequest
GetHistoryTreeResponse struct {
// all branches of a tree
Branches []*persistencespb.HistoryBranch
}
// GetAllHistoryTreeBranchesRequest is a request of GetAllHistoryTreeBranches
GetAllHistoryTreeBranchesRequest struct {
// pagination token
NextPageToken []byte
// maximum number of branches returned per page
PageSize int
}
// GetAllHistoryTreeBranchesResponse is a response to GetAllHistoryTreeBranches
GetAllHistoryTreeBranchesResponse struct {
// pagination token
NextPageToken []byte
// all branches of all trees
Branches []HistoryBranchDetail
}
// GetClusterMetadataResponse is the response to GetClusterMetadata
GetClusterMetadataResponse struct {
persistencespb.ClusterMetadata
Version int64
}
SaveClusterMetadataRequest struct {
persistencespb.ClusterMetadata
Version int64
}
// GetClusterMembersRequest is the response to GetClusterMembers
GetClusterMembersRequest struct {
LastHeartbeatWithin time.Duration
RPCAddressEquals net.IP
HostIDEquals uuid.UUID
RoleEquals ServiceType
SessionStartedAfter time.Time
NextPageToken []byte
PageSize int
}
// GetClusterMembersResponse is the response to GetClusterMembers
GetClusterMembersResponse struct {
ActiveMembers []*ClusterMember
NextPageToken []byte
}
// ClusterMember is used as a response to GetClusterMembers
ClusterMember struct {
Role ServiceType
HostID uuid.UUID
RPCAddress net.IP
RPCPort uint16
SessionStart time.Time
LastHeartbeat time.Time
RecordExpiry time.Time
}
// UpsertClusterMembershipRequest is the request to UpsertClusterMembership
UpsertClusterMembershipRequest struct {
Role ServiceType
HostID uuid.UUID
RPCAddress net.IP
RPCPort uint16
SessionStart time.Time
RecordExpiry time.Duration
}
// PruneClusterMembershipRequest is the request to PruneClusterMembership
PruneClusterMembershipRequest struct {
MaxRecordsPruned int
}
// Closeable is an interface for any entity that supports a close operation to release resources
Closeable interface {
Close()
}
// ShardManager is used to manage all shards
ShardManager interface {
Closeable
GetName() string
CreateShard(request *CreateShardRequest) error
GetShard(request *GetShardRequest) (*GetShardResponse, error)
UpdateShard(request *UpdateShardRequest) error
}
// ExecutionManager is used to manage workflow executions
ExecutionManager interface {
Closeable
GetName() string
CreateWorkflowExecution(request *CreateWorkflowExecutionRequest) (*CreateWorkflowExecutionResponse, error)
GetWorkflowExecution(request *GetWorkflowExecutionRequest) (*GetWorkflowExecutionResponse, error)
UpdateWorkflowExecution(request *UpdateWorkflowExecutionRequest) (*UpdateWorkflowExecutionResponse, error)
ConflictResolveWorkflowExecution(request *ConflictResolveWorkflowExecutionRequest) (*ConflictResolveWorkflowExecutionResponse, error)
DeleteWorkflowExecution(request *DeleteWorkflowExecutionRequest) error
DeleteCurrentWorkflowExecution(request *DeleteCurrentWorkflowExecutionRequest) error
GetCurrentExecution(request *GetCurrentExecutionRequest) (*GetCurrentExecutionResponse, error)
// Scan operations
ListConcreteExecutions(request *ListConcreteExecutionsRequest) (*ListConcreteExecutionsResponse, error)
// Tasks related APIs
AddTasks(request *AddTasksRequest) error
// transfer tasks
GetTransferTask(request *GetTransferTaskRequest) (*GetTransferTaskResponse, error)
GetTransferTasks(request *GetTransferTasksRequest) (*GetTransferTasksResponse, error)
CompleteTransferTask(request *CompleteTransferTaskRequest) error
RangeCompleteTransferTask(request *RangeCompleteTransferTaskRequest) error
// timer tasks
GetTimerTask(request *GetTimerTaskRequest) (*GetTimerTaskResponse, error)
GetTimerTasks(request *GetTimerTasksRequest) (*GetTimerTasksResponse, error)
CompleteTimerTask(request *CompleteTimerTaskRequest) error
RangeCompleteTimerTask(request *RangeCompleteTimerTaskRequest) error
// replication tasks
GetReplicationTask(request *GetReplicationTaskRequest) (*GetReplicationTaskResponse, error)
GetReplicationTasks(request *GetReplicationTasksRequest) (*GetReplicationTasksResponse, error)
CompleteReplicationTask(request *CompleteReplicationTaskRequest) error
RangeCompleteReplicationTask(request *RangeCompleteReplicationTaskRequest) error
PutReplicationTaskToDLQ(request *PutReplicationTaskToDLQRequest) error
GetReplicationTasksFromDLQ(request *GetReplicationTasksFromDLQRequest) (*GetReplicationTasksFromDLQResponse, error)
DeleteReplicationTaskFromDLQ(request *DeleteReplicationTaskFromDLQRequest) error
RangeDeleteReplicationTaskFromDLQ(request *RangeDeleteReplicationTaskFromDLQRequest) error
// visibility tasks
GetVisibilityTask(request *GetVisibilityTaskRequest) (*GetVisibilityTaskResponse, error)
GetVisibilityTasks(request *GetVisibilityTasksRequest) (*GetVisibilityTasksResponse, error)
CompleteVisibilityTask(request *CompleteVisibilityTaskRequest) error
RangeCompleteVisibilityTask(request *RangeCompleteVisibilityTaskRequest) error
// The below are history V2 APIs
// V2 regards history events growing as a tree, decoupled from workflow concepts
// For Temporal, treeID is new runID, except for fork(reset), treeID will be the runID that it forks from.
// AppendHistoryNodes add a node to history node table
AppendHistoryNodes(request *AppendHistoryNodesRequest) (*AppendHistoryNodesResponse, error)
// ReadHistoryBranch returns history node data for a branch
ReadHistoryBranch(request *ReadHistoryBranchRequest) (*ReadHistoryBranchResponse, error)
// ReadHistoryBranchByBatch returns history node data for a branch ByBatch
ReadHistoryBranchByBatch(request *ReadHistoryBranchRequest) (*ReadHistoryBranchByBatchResponse, error)
// ReadRawHistoryBranch returns history node raw data for a branch ByBatch
// NOTE: this API should only be used by 3+DC
ReadRawHistoryBranch(request *ReadHistoryBranchRequest) (*ReadRawHistoryBranchResponse, error)
// ForkHistoryBranch forks a new branch from a old branch
ForkHistoryBranch(request *ForkHistoryBranchRequest) (*ForkHistoryBranchResponse, error)
// DeleteHistoryBranch removes a branch
// If this is the last branch to delete, it will also remove the root node
DeleteHistoryBranch(request *DeleteHistoryBranchRequest) error
// TrimHistoryBranch validate & trim a history branch
TrimHistoryBranch(request *TrimHistoryBranchRequest) (*TrimHistoryBranchResponse, error)
// GetHistoryTree returns all branch information of a tree
GetHistoryTree(request *GetHistoryTreeRequest) (*GetHistoryTreeResponse, error)
// GetAllHistoryTreeBranches returns all branches of all trees
GetAllHistoryTreeBranches(request *GetAllHistoryTreeBranchesRequest) (*GetAllHistoryTreeBranchesResponse, error)
}
// TaskManager is used to manage tasks
TaskManager interface {
Closeable
GetName() string
LeaseTaskQueue(request *LeaseTaskQueueRequest) (*LeaseTaskQueueResponse, error)
UpdateTaskQueue(request *UpdateTaskQueueRequest) (*UpdateTaskQueueResponse, error)
ListTaskQueue(request *ListTaskQueueRequest) (*ListTaskQueueResponse, error)
DeleteTaskQueue(request *DeleteTaskQueueRequest) error
CreateTasks(request *CreateTasksRequest) (*CreateTasksResponse, error)
GetTasks(request *GetTasksRequest) (*GetTasksResponse, error)
CompleteTask(request *CompleteTaskRequest) error
// CompleteTasksLessThan completes tasks less than or equal to the given task id
// This API takes a limit parameter which specifies the count of maxRows that
// can be deleted. This parameter may be ignored by the underlying storage, but
// its mandatory to specify it. On success this method returns the number of rows
// actually deleted. If the underlying storage doesn't support "limit", all rows
// less than or equal to taskID will be deleted.
// On success, this method returns:
// - number of rows actually deleted, if limit is honored
// - UnknownNumRowsDeleted, when all rows below value are deleted
CompleteTasksLessThan(request *CompleteTasksLessThanRequest) (int, error)
}
// MetadataManager is used to manage metadata CRUD for namespace entities
MetadataManager interface {
Closeable
GetName() string
CreateNamespace(request *CreateNamespaceRequest) (*CreateNamespaceResponse, error)
GetNamespace(request *GetNamespaceRequest) (*GetNamespaceResponse, error)
UpdateNamespace(request *UpdateNamespaceRequest) error
DeleteNamespace(request *DeleteNamespaceRequest) error
DeleteNamespaceByName(request *DeleteNamespaceByNameRequest) error
ListNamespaces(request *ListNamespacesRequest) (*ListNamespacesResponse, error)
GetMetadata() (*GetMetadataResponse, error)
InitializeSystemNamespaces(currentClusterName string) error
}
// ClusterMetadataManager is used to manage cluster-wide metadata and configuration
ClusterMetadataManager interface {
Closeable
GetName() string
GetClusterMembers(request *GetClusterMembersRequest) (*GetClusterMembersResponse, error)
UpsertClusterMembership(request *UpsertClusterMembershipRequest) error
PruneClusterMembership(request *PruneClusterMembershipRequest) error
GetClusterMetadata() (*GetClusterMetadataResponse, error)
SaveClusterMetadata(request *SaveClusterMetadataRequest) (bool, error)
}
)
func (e *InvalidPersistenceRequestError) Error() string {
return e.Msg
}
func (e *CurrentWorkflowConditionFailedError) Error() string {
return e.Msg
}
func (e *WorkflowConditionFailedError) Error() string {
return e.Msg
}
func (e *ConditionFailedError) Error() string {
return e.Msg
}
func (e *ShardAlreadyExistError) Error() string {
return e.Msg
}
func (e *ShardOwnershipLostError) Error() string {
return e.Msg
}
func (e *TimeoutError) Error() string {
return e.Msg
}
func (e *TransactionSizeLimitError) Error() string {
return e.Msg
}
// UnixMilliseconds returns t as a Unix time, the number of milliseconds elapsed since January 1, 1970 UTC.
// It should be used for all CQL timestamp.
func UnixMilliseconds(t time.Time) int64 {
// Handling zero time separately because UnixNano is undefined for zero times.
if t.IsZero() {
return 0
}
unixNano := t.UnixNano()
if unixNano < 0 {
// Time is before January 1, 1970 UTC
return 0
}
return unixNano / int64(time.Millisecond)
}
// NewHistoryBranchToken return a new branch token
func NewHistoryBranchToken(treeID string) ([]byte, error) {
branchID := primitives.NewUUID().String()
bi := &persistencespb.HistoryBranch{
TreeId: treeID,
BranchId: branchID,
Ancestors: []*persistencespb.HistoryBranchRange{},
}
datablob, err := serialization.HistoryBranchToBlob(bi)
if err != nil {
return nil, err
}
token := datablob.Data
return token, nil
}
// NewHistoryBranchTokenByBranchID return a new branch token with treeID/branchID
func NewHistoryBranchTokenByBranchID(treeID, branchID string) ([]byte, error) {
bi := &persistencespb.HistoryBranch{
TreeId: treeID,
BranchId: branchID,
Ancestors: []*persistencespb.HistoryBranchRange{},
}
datablob, err := serialization.HistoryBranchToBlob(bi)
if err != nil {
return nil, err
}
token := datablob.Data
return token, nil
}
// BuildHistoryGarbageCleanupInfo combine the workflow identity information into a string
func BuildHistoryGarbageCleanupInfo(namespaceID, workflowID, runID string) string {
return fmt.Sprintf("%v:%v:%v", namespaceID, workflowID, runID)
}
// SplitHistoryGarbageCleanupInfo returns workflow identity information
func SplitHistoryGarbageCleanupInfo(info string) (namespaceID, workflowID, runID string, err error) {
ss := strings.Split(info, ":")
// workflowID can contain ":" so len(ss) can be greater than 3
if len(ss) < numItemsInGarbageInfo {
return "", "", "", fmt.Errorf("not able to split info for %s", info)
}
namespaceID = ss[0]
runID = ss[len(ss)-1]
workflowEnd := len(info) - len(runID) - 1
workflowID = info[len(namespaceID)+1 : workflowEnd]
return
}
// NewGetReplicationTasksFromDLQRequest creates a new GetReplicationTasksFromDLQRequest
func NewGetReplicationTasksFromDLQRequest(
shardID int32,
sourceClusterName string,
readLevel int64,
maxReadLevel int64,
batchSize int,
nextPageToken []byte,
) *GetReplicationTasksFromDLQRequest {
return &GetReplicationTasksFromDLQRequest{
ShardID: shardID,
SourceClusterName: sourceClusterName,
GetReplicationTasksRequest: GetReplicationTasksRequest{
MinTaskID: readLevel,
MaxTaskID: maxReadLevel,
BatchSize: batchSize,
NextPageToken: nextPageToken,
},
}
}
type ServiceType int
const (
All ServiceType = iota
Frontend
History
Matching
Worker
)
| 1 | 13,200 | Personally I wish we didn't have a proto for every single little thing we do. RequestShardAction with an enum for the action type would be a lot cleaner imo, but I'm pretty sure that ship has sailed :) Just kvetching. | temporalio-temporal | go |
@@ -0,0 +1,5 @@
+<% if trail.complete? %>
+ <%= render "completed_trails/trail", trail: trail %>
+<% else %>
+ <%= render "trails/incomplete_trail", trail: trail %>
+<% end %> | 1 | 1 | 14,241 | Does this make more sense as `trails/_trail` now? | thoughtbot-upcase | rb |
|
@@ -435,6 +435,17 @@ func (agent *ecsAgent) getEC2InstanceID() string {
return instanceID
}
+// getoutpostARN gets the Outpost ARN from the metadata service
+func (agent *ecsAgent) getoutpostARN() string {
+ outpostARN, err := agent.ec2MetadataClient.OutpostARN()
+ if err != nil {
+ seelog.Warnf(
+ "Unable to obtain Outpost ARN from EC2 Metadata: %v", err)
+ return ""
+ }
+ return outpostARN
+}
+
// newStateManager creates a new state manager object for the task engine.
// Rest of the parameters are pointers and it's expected that all of these
// will be backfilled when state manager's Load() method is invoked | 1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package app
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/aws/amazon-ecs-agent/agent/metrics"
acshandler "github.com/aws/amazon-ecs-agent/agent/acs/handler"
"github.com/aws/amazon-ecs-agent/agent/api"
"github.com/aws/amazon-ecs-agent/agent/api/ecsclient"
apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors"
"github.com/aws/amazon-ecs-agent/agent/app/factory"
"github.com/aws/amazon-ecs-agent/agent/app/oswrapper"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/containermetadata"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/amazon-ecs-agent/agent/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
"github.com/aws/amazon-ecs-agent/agent/ecscni"
"github.com/aws/amazon-ecs-agent/agent/engine"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerstate"
"github.com/aws/amazon-ecs-agent/agent/eni/pause"
"github.com/aws/amazon-ecs-agent/agent/eventhandler"
"github.com/aws/amazon-ecs-agent/agent/eventstream"
"github.com/aws/amazon-ecs-agent/agent/handlers"
"github.com/aws/amazon-ecs-agent/agent/sighandlers"
"github.com/aws/amazon-ecs-agent/agent/sighandlers/exitcodes"
"github.com/aws/amazon-ecs-agent/agent/statemanager"
"github.com/aws/amazon-ecs-agent/agent/stats"
"github.com/aws/amazon-ecs-agent/agent/taskresource"
tcshandler "github.com/aws/amazon-ecs-agent/agent/tcs/handler"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/amazon-ecs-agent/agent/utils/mobypkgwrapper"
"github.com/aws/amazon-ecs-agent/agent/version"
"github.com/aws/aws-sdk-go/aws"
aws_credentials "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/cihub/seelog"
"github.com/pborman/uuid"
)
const (
containerChangeEventStreamName = "ContainerChange"
deregisterContainerInstanceEventStreamName = "DeregisterContainerInstance"
clusterMismatchErrorFormat = "Data mismatch; saved cluster '%v' does not match configured cluster '%v'. Perhaps you want to delete the configured checkpoint file?"
instanceIDMismatchErrorFormat = "Data mismatch; saved InstanceID '%s' does not match current InstanceID '%s'. Overwriting old datafile"
instanceTypeMismatchErrorFormat = "The current instance type does not match the registered instance type. Please revert the instance type change, or alternatively launch a new instance: %v"
vpcIDAttributeName = "ecs.vpc-id"
subnetIDAttributeName = "ecs.subnet-id"
)
var (
instanceNotLaunchedInVPCError = errors.New("instance not launched in VPC")
)
// agent interface is used by the app runner to interact with the ecsAgent
// object. Its purpose is to mostly demonstrate how to interact with the
// ecsAgent type.
type agent interface {
// printECSAttributes prints the Agent's capabilities based on
// its environment
printECSAttributes() int
// startWindowsService starts the agent as a Windows Service
startWindowsService() int
// start starts the Agent execution
start() int
// setTerminationHandler sets the termination handler
setTerminationHandler(sighandlers.TerminationHandler)
}
// ecsAgent wraps all the entities needed to start the ECS Agent execution.
// after creating it via
// the newAgent() method
type ecsAgent struct {
ctx context.Context
ec2MetadataClient ec2.EC2MetadataClient
ec2Client ec2.Client
cfg *config.Config
dockerClient dockerapi.DockerClient
containerInstanceARN string
credentialProvider *aws_credentials.Credentials
stateManagerFactory factory.StateManager
saveableOptionFactory factory.SaveableOption
pauseLoader pause.Loader
cniClient ecscni.CNIClient
os oswrapper.OS
vpc string
subnet string
mac string
metadataManager containermetadata.Manager
terminationHandler sighandlers.TerminationHandler
mobyPlugins mobypkgwrapper.Plugins
resourceFields *taskresource.ResourceFields
availabilityZone string
}
// newAgent returns a new ecsAgent object, but does not start anything
func newAgent(
ctx context.Context,
blackholeEC2Metadata bool,
acceptInsecureCert *bool) (agent, error) {
ec2MetadataClient := ec2.NewEC2MetadataClient(nil)
if blackholeEC2Metadata {
ec2MetadataClient = ec2.NewBlackholeEC2MetadataClient()
}
seelog.Info("Loading configuration")
cfg, err := config.NewConfig(ec2MetadataClient)
if err != nil {
// All required config values can be inferred from EC2 Metadata,
// so this error could be transient.
seelog.Criticalf("Error loading config: %v", err)
return nil, err
}
cfg.AcceptInsecureCert = aws.BoolValue(acceptInsecureCert)
if cfg.AcceptInsecureCert {
seelog.Warn("SSL certificate verification disabled. This is not recommended.")
}
seelog.Infof("Amazon ECS agent Version: %s, Commit: %s", version.Version, version.GitShortHash)
seelog.Debugf("Loaded config: %s", cfg.String())
ec2Client := ec2.NewClientImpl(cfg.AWSRegion)
dockerClient, err := dockerapi.NewDockerGoClient(sdkclientfactory.NewFactory(ctx, cfg.DockerEndpoint), cfg, ctx)
if err != nil {
// This is also non terminal in the current config
seelog.Criticalf("Error creating Docker client: %v", err)
return nil, err
}
var metadataManager containermetadata.Manager
if cfg.ContainerMetadataEnabled {
// We use the default API client for the metadata inspect call. This version has some information
// missing which means if we need those fields later we will need to change this client to
// the appropriate version
metadataManager = containermetadata.NewManager(dockerClient, cfg)
}
return &ecsAgent{
ctx: ctx,
ec2MetadataClient: ec2MetadataClient,
ec2Client: ec2Client,
cfg: cfg,
dockerClient: dockerClient,
// We instantiate our own credentialProvider for use in acs/tcs. This tries
// to mimic roughly the way it's instantiated by the SDK for a default
// session.
credentialProvider: defaults.CredChain(defaults.Config(), defaults.Handlers()),
stateManagerFactory: factory.NewStateManager(),
saveableOptionFactory: factory.NewSaveableOption(),
pauseLoader: pause.New(),
cniClient: ecscni.NewClient(cfg.CNIPluginsPath),
os: oswrapper.New(),
metadataManager: metadataManager,
terminationHandler: sighandlers.StartDefaultTerminationHandler,
mobyPlugins: mobypkgwrapper.NewPlugins(),
}, nil
}
// printECSAttributes prints the Agent's ECS Attributes based on its
// environment
func (agent *ecsAgent) printECSAttributes() int {
capabilities, err := agent.capabilities()
if err != nil {
seelog.Warnf("Unable to obtain capabilities: %v", err)
return exitcodes.ExitError
}
for _, attr := range capabilities {
fmt.Printf("%s\t%s\n", aws.StringValue(attr.Name), aws.StringValue(attr.Value))
}
return exitcodes.ExitSuccess
}
func (agent *ecsAgent) setTerminationHandler(handler sighandlers.TerminationHandler) {
agent.terminationHandler = handler
}
// start starts the ECS Agent
func (agent *ecsAgent) start() int {
sighandlers.StartDebugHandler()
containerChangeEventStream := eventstream.NewEventStream(containerChangeEventStreamName, agent.ctx)
credentialsManager := credentials.NewManager()
state := dockerstate.NewTaskEngineState()
imageManager := engine.NewImageManager(agent.cfg, agent.dockerClient, state)
client := ecsclient.NewECSClient(agent.credentialProvider, agent.cfg, agent.ec2MetadataClient)
agent.initializeResourceFields(credentialsManager)
return agent.doStart(containerChangeEventStream, credentialsManager, state, imageManager, client)
}
// doStart is the worker invoked by start for starting the ECS Agent. This involves
// initializing the docker task engine, state saver, image manager, credentials
// manager, poll and telemetry sessions, api handler etc
func (agent *ecsAgent) doStart(containerChangeEventStream *eventstream.EventStream,
credentialsManager credentials.Manager,
state dockerstate.TaskEngineState,
imageManager engine.ImageManager,
client api.ECSClient) int {
// check docker version >= 1.9.0, exit agent if older
if exitcode, ok := agent.verifyRequiredDockerVersion(); !ok {
return exitcode
}
// Conditionally create '/ecs' cgroup root
if agent.cfg.TaskCPUMemLimit.Enabled() {
if err := agent.cgroupInit(); err != nil {
seelog.Criticalf("Unable to initialize cgroup root for ECS: %v", err)
return exitcodes.ExitTerminal
}
}
if agent.cfg.GPUSupportEnabled {
err := agent.initializeGPUManager()
if err != nil {
seelog.Criticalf("Could not initialize Nvidia GPU Manager: %v", err)
return exitcodes.ExitError
}
}
// Create the task engine
taskEngine, currentEC2InstanceID, err := agent.newTaskEngine(containerChangeEventStream,
credentialsManager, state, imageManager)
if err != nil {
return exitcodes.ExitTerminal
}
agent.initMetricsEngine()
// Initialize the state manager
stateManager, err := agent.newStateManager(taskEngine,
&agent.cfg.Cluster, &agent.containerInstanceARN, ¤tEC2InstanceID, &agent.availabilityZone)
if err != nil {
seelog.Criticalf("Error creating state manager: %v", err)
return exitcodes.ExitTerminal
}
var vpcSubnetAttributes []*ecs.Attribute
// Check if Task ENI is enabled
if agent.cfg.TaskENIEnabled {
err, terminal := agent.initializeTaskENIDependencies(state, taskEngine)
switch err {
case nil:
// No error, we can proceed with the rest of initialization
// Set vpc and subnet id attributes
vpcSubnetAttributes = agent.constructVPCSubnetAttributes()
case instanceNotLaunchedInVPCError:
// We have ascertained that the EC2 Instance is not running in a VPC
// No need to stop the ECS Agent in this case; all we need to do is
// to not update the config to disable the TaskENIEnabled flag and
// move on
seelog.Warnf("Unable to detect VPC ID for the Instance, disabling Task ENI capability: %v", err)
agent.cfg.TaskENIEnabled = false
default:
// Encountered an error initializing dependencies for dealing with
// ENIs for Tasks. Exit with the appropriate error code
seelog.Criticalf("Unable to initialize Task ENI dependencies: %v", err)
if terminal {
return exitcodes.ExitTerminal
}
return exitcodes.ExitError
}
}
// Register the container instance
err = agent.registerContainerInstance(stateManager, client, vpcSubnetAttributes)
if err != nil {
if isTransient(err) {
return exitcodes.ExitError
}
return exitcodes.ExitTerminal
}
// Add container instance ARN to metadata manager
if agent.cfg.ContainerMetadataEnabled {
agent.metadataManager.SetContainerInstanceARN(agent.containerInstanceARN)
agent.metadataManager.SetAvailabilityZone(agent.availabilityZone)
agent.metadataManager.SetHostPrivateIPv4Address(agent.getHostPrivateIPv4AddressFromEC2Metadata())
agent.metadataManager.SetHostPublicIPv4Address(agent.getHostPublicIPv4AddressFromEC2Metadata())
}
// Begin listening to the docker daemon and saving changes
taskEngine.SetSaver(stateManager)
imageManager.SetSaver(stateManager)
taskEngine.MustInit(agent.ctx)
// Start back ground routines, including the telemetry session
deregisterInstanceEventStream := eventstream.NewEventStream(
deregisterContainerInstanceEventStreamName, agent.ctx)
deregisterInstanceEventStream.StartListening()
taskHandler := eventhandler.NewTaskHandler(agent.ctx, stateManager, state, client)
attachmentEventHandler := eventhandler.NewAttachmentEventHandler(agent.ctx, stateManager, client)
agent.startAsyncRoutines(containerChangeEventStream, credentialsManager, imageManager,
taskEngine, stateManager, deregisterInstanceEventStream, client, taskHandler, attachmentEventHandler, state)
// Start the acs session, which should block doStart
return agent.startACSSession(credentialsManager, taskEngine, stateManager,
deregisterInstanceEventStream, client, state, taskHandler)
}
// newTaskEngine creates a new docker task engine object. It tries to load the
// local state if needed, else initializes a new one
func (agent *ecsAgent) newTaskEngine(containerChangeEventStream *eventstream.EventStream,
credentialsManager credentials.Manager,
state dockerstate.TaskEngineState,
imageManager engine.ImageManager) (engine.TaskEngine, string, error) {
containerChangeEventStream.StartListening()
if !agent.cfg.Checkpoint {
seelog.Info("Checkpointing not enabled; a new container instance will be created each time the agent is run")
return engine.NewTaskEngine(agent.cfg, agent.dockerClient, credentialsManager,
containerChangeEventStream, imageManager, state,
agent.metadataManager, agent.resourceFields), "", nil
}
// We try to set these values by loading the existing state file first
var previousCluster, previousEC2InstanceID, previousContainerInstanceArn, previousAZ string
previousTaskEngine := engine.NewTaskEngine(agent.cfg, agent.dockerClient,
credentialsManager, containerChangeEventStream, imageManager, state,
agent.metadataManager, agent.resourceFields)
// previousStateManager is used to verify that our current runtime configuration is
// compatible with our past configuration as reflected by our state-file
previousStateManager, err := agent.newStateManager(previousTaskEngine, &previousCluster,
&previousContainerInstanceArn, &previousEC2InstanceID, &previousAZ)
if err != nil {
seelog.Criticalf("Error creating state manager: %v", err)
return nil, "", err
}
err = previousStateManager.Load()
if err != nil {
seelog.Criticalf("Error loading previously saved state: %v", err)
return nil, "", err
}
err = agent.checkCompatibility(previousTaskEngine)
if err != nil {
seelog.Criticalf("Error checking compatibility with previously saved state: %v", err)
return nil, "", err
}
currentEC2InstanceID := agent.getEC2InstanceID()
if previousEC2InstanceID != "" && previousEC2InstanceID != currentEC2InstanceID {
seelog.Warnf(instanceIDMismatchErrorFormat,
previousEC2InstanceID, currentEC2InstanceID)
// Reset agent state as a new container instance
state.Reset()
// Reset taskEngine; all the other values are still default
return engine.NewTaskEngine(agent.cfg, agent.dockerClient, credentialsManager,
containerChangeEventStream, imageManager, state, agent.metadataManager,
agent.resourceFields), currentEC2InstanceID, nil
}
if previousCluster != "" {
if err := agent.setClusterInConfig(previousCluster); err != nil {
return nil, "", err
}
}
// Use the values we loaded if there's no issue
agent.containerInstanceARN = previousContainerInstanceArn
return previousTaskEngine, currentEC2InstanceID, nil
}
func (agent *ecsAgent) initMetricsEngine() {
// In case of a panic during set-up, we will recover quietly and resume
// normal Agent execution.
defer func() {
if r := recover(); r != nil {
seelog.Errorf("MetricsEngine Set-up panicked. Recovering quietly: %s", r)
}
}()
// We init the global MetricsEngine before we publish metrics
metrics.MustInit(agent.cfg)
metrics.PublishMetrics()
}
// setClusterInConfig sets the cluster name in the config object based on
// previous state. It returns an error if there's a mismatch between the
// the current cluster name with what's restored from the cluster state
func (agent *ecsAgent) setClusterInConfig(previousCluster string) error {
// TODO Handle default cluster in a sane and unified way across the codebase
configuredCluster := agent.cfg.Cluster
if configuredCluster == "" {
seelog.Debug("Setting cluster to default; none configured")
configuredCluster = config.DefaultClusterName
}
if previousCluster != configuredCluster {
err := clusterMismatchError{
fmt.Errorf(clusterMismatchErrorFormat, previousCluster, configuredCluster),
}
seelog.Criticalf("%v", err)
return err
}
agent.cfg.Cluster = previousCluster
seelog.Infof("Restored cluster '%s'", agent.cfg.Cluster)
return nil
}
// getEC2InstanceID gets the EC2 instance ID from the metadata service
func (agent *ecsAgent) getEC2InstanceID() string {
instanceID, err := agent.ec2MetadataClient.InstanceID()
if err != nil {
seelog.Warnf(
"Unable to access EC2 Metadata service to determine EC2 ID: %v", err)
return ""
}
return instanceID
}
// newStateManager creates a new state manager object for the task engine.
// Rest of the parameters are pointers and it's expected that all of these
// will be backfilled when state manager's Load() method is invoked
func (agent *ecsAgent) newStateManager(
taskEngine engine.TaskEngine,
cluster *string,
containerInstanceArn *string,
savedInstanceID *string,
availabilityZone *string) (statemanager.StateManager, error) {
if !agent.cfg.Checkpoint {
return statemanager.NewNoopStateManager(), nil
}
return agent.stateManagerFactory.NewStateManager(agent.cfg,
statemanager.AddSaveable("TaskEngine", taskEngine),
// This is for making testing easier as we can mock this
agent.saveableOptionFactory.AddSaveable("ContainerInstanceArn",
containerInstanceArn),
agent.saveableOptionFactory.AddSaveable("Cluster", cluster),
// This is for making testing easier as we can mock this
agent.saveableOptionFactory.AddSaveable("EC2InstanceID", savedInstanceID),
agent.saveableOptionFactory.AddSaveable("availabilityZone", availabilityZone),
)
}
// constructVPCSubnetAttributes returns vpc and subnet IDs of the instance as
// an attribute list
func (agent *ecsAgent) constructVPCSubnetAttributes() []*ecs.Attribute {
return []*ecs.Attribute{
{
Name: aws.String(vpcIDAttributeName),
Value: aws.String(agent.vpc),
},
{
Name: aws.String(subnetIDAttributeName),
Value: aws.String(agent.subnet),
},
}
}
// registerContainerInstance registers the container instance ID for the ECS Agent
func (agent *ecsAgent) registerContainerInstance(
stateManager statemanager.StateManager,
client api.ECSClient,
additionalAttributes []*ecs.Attribute) error {
// Preflight request to make sure they're good
if preflightCreds, err := agent.credentialProvider.Get(); err != nil || preflightCreds.AccessKeyID == "" {
seelog.Warnf("Error getting valid credentials (AKID %s): %v", preflightCreds.AccessKeyID, err)
}
agentCapabilities, err := agent.capabilities()
if err != nil {
return err
}
capabilities := append(agentCapabilities, additionalAttributes...)
// Get the tags of this container instance defined in config file
tags := utils.MapToTags(agent.cfg.ContainerInstanceTags)
if agent.cfg.ContainerInstancePropagateTagsFrom == config.ContainerInstancePropagateTagsFromEC2InstanceType {
ec2Tags, err := agent.getContainerInstanceTagsFromEC2API()
// If we are unable to call the API, we should not treat it as a transient error,
// because we've already retried several times, we may throttle the API if we
// keep retrying.
if err != nil {
return err
}
seelog.Infof("Retrieved Tags from EC2 DescribeTags API:\n%v", ec2Tags)
tags = mergeTags(tags, ec2Tags)
}
platformDevices := agent.getPlatformDevices()
if agent.containerInstanceARN != "" {
seelog.Infof("Restored from checkpoint file. I am running as '%s' in cluster '%s'", agent.containerInstanceARN, agent.cfg.Cluster)
return agent.reregisterContainerInstance(client, capabilities, tags, uuid.New(), platformDevices)
}
seelog.Info("Registering Instance with ECS")
containerInstanceArn, availabilityZone, err := client.RegisterContainerInstance("", capabilities, tags, uuid.New(), platformDevices)
if err != nil {
seelog.Errorf("Error registering: %v", err)
if retriable, ok := err.(apierrors.Retriable); ok && !retriable.Retry() {
return err
}
if utils.IsAWSErrorCodeEqual(err, ecs.ErrCodeInvalidParameterException) {
seelog.Critical("Instance registration attempt with an invalid parameter")
return err
}
if _, ok := err.(apierrors.AttributeError); ok {
seelog.Critical("Instance registration attempt with an invalid attribute")
return err
}
return transientError{err}
}
seelog.Infof("Registration completed successfully. I am running as '%s' in cluster '%s'", containerInstanceArn, agent.cfg.Cluster)
agent.containerInstanceARN = containerInstanceArn
agent.availabilityZone = availabilityZone
// Save our shiny new containerInstanceArn
stateManager.Save()
return nil
}
// reregisterContainerInstance registers a container instance that has already been
// registered with ECS. This is for cases where the ECS Agent is being restored
// from a check point.
func (agent *ecsAgent) reregisterContainerInstance(client api.ECSClient,
capabilities []*ecs.Attribute, tags []*ecs.Tag, registrationToken string, platformDevices []*ecs.PlatformDevice) error {
_, availabilityZone, err := client.RegisterContainerInstance(agent.containerInstanceARN, capabilities, tags, registrationToken, platformDevices)
//set az to agent
agent.availabilityZone = availabilityZone
if err == nil {
return nil
}
seelog.Errorf("Error re-registering: %v", err)
if apierrors.IsInstanceTypeChangedError(err) {
seelog.Criticalf(instanceTypeMismatchErrorFormat, err)
return err
}
if _, ok := err.(apierrors.AttributeError); ok {
seelog.Critical("Instance re-registration attempt with an invalid attribute")
return err
}
return transientError{err}
}
// startAsyncRoutines starts all of the background methods
func (agent *ecsAgent) startAsyncRoutines(
containerChangeEventStream *eventstream.EventStream,
credentialsManager credentials.Manager,
imageManager engine.ImageManager,
taskEngine engine.TaskEngine,
stateManager statemanager.StateManager,
deregisterInstanceEventStream *eventstream.EventStream,
client api.ECSClient,
taskHandler *eventhandler.TaskHandler,
attachmentEventHandler *eventhandler.AttachmentEventHandler,
state dockerstate.TaskEngineState) {
// Start of the periodic image cleanup process
if !agent.cfg.ImageCleanupDisabled {
go imageManager.StartImageCleanupProcess(agent.ctx)
}
// Start automatic spot instance draining poller routine
if agent.cfg.SpotInstanceDrainingEnabled {
go agent.startSpotInstanceDrainingPoller(client)
}
go agent.terminationHandler(stateManager, taskEngine)
// Agent introspection api
go handlers.ServeIntrospectionHTTPEndpoint(&agent.containerInstanceARN, taskEngine, agent.cfg)
statsEngine := stats.NewDockerStatsEngine(agent.cfg, agent.dockerClient, containerChangeEventStream)
// Start serving the endpoint to fetch IAM Role credentials and other task metadata
if agent.cfg.TaskMetadataAZDisabled {
// send empty availability zone
go handlers.ServeTaskHTTPEndpoint(credentialsManager, state, client, agent.containerInstanceARN, agent.cfg, statsEngine, "")
} else {
go handlers.ServeTaskHTTPEndpoint(credentialsManager, state, client, agent.containerInstanceARN, agent.cfg, statsEngine, agent.availabilityZone)
}
// Start sending events to the backend
go eventhandler.HandleEngineEvents(taskEngine, client, taskHandler, attachmentEventHandler)
telemetrySessionParams := tcshandler.TelemetrySessionParams{
Ctx: agent.ctx,
CredentialProvider: agent.credentialProvider,
Cfg: agent.cfg,
ContainerInstanceArn: agent.containerInstanceARN,
DeregisterInstanceEventStream: deregisterInstanceEventStream,
ECSClient: client,
TaskEngine: taskEngine,
StatsEngine: statsEngine,
}
// Start metrics session in a go routine
go tcshandler.StartMetricsSession(&telemetrySessionParams)
}
func (agent *ecsAgent) startSpotInstanceDrainingPoller(client api.ECSClient) {
for !agent.spotInstanceDrainingPoller(client) {
time.Sleep(time.Second)
}
}
// spotInstanceDrainingPoller returns true if spot instance interruption has been
// set AND the container instance state is successfully updated to DRAINING.
func (agent *ecsAgent) spotInstanceDrainingPoller(client api.ECSClient) bool {
// this endpoint 404s unless a interruption has been set, so expect failure in most cases.
resp, err := agent.ec2MetadataClient.SpotInstanceAction()
if err == nil {
type InstanceAction struct {
Time string
Action string
}
ia := InstanceAction{}
err := json.Unmarshal([]byte(resp), &ia)
if err != nil {
seelog.Errorf("Invalid response from /spot/instance-action endpoint: %s Error: %s", resp, err)
return false
}
switch ia.Action {
case "hibernate", "terminate", "stop":
default:
seelog.Errorf("Invalid response from /spot/instance-action endpoint: %s, Error: unrecognized action (%s)", resp, ia.Action)
return false
}
seelog.Infof("Received a spot interruption (%s) scheduled for %s, setting state to DRAINING", ia.Action, ia.Time)
err = client.UpdateContainerInstancesState(agent.containerInstanceARN, "DRAINING")
if err != nil {
seelog.Errorf("Error setting instance [ARN: %s] state to DRAINING: %s", agent.containerInstanceARN, err)
} else {
return true
}
}
return false
}
// startACSSession starts a session with ECS's Agent Communication service. This
// is a blocking call and only returns when the handler returns
func (agent *ecsAgent) startACSSession(
credentialsManager credentials.Manager,
taskEngine engine.TaskEngine,
stateManager statemanager.StateManager,
deregisterInstanceEventStream *eventstream.EventStream,
client api.ECSClient,
state dockerstate.TaskEngineState,
taskHandler *eventhandler.TaskHandler) int {
acsSession := acshandler.NewSession(
agent.ctx,
agent.cfg,
deregisterInstanceEventStream,
agent.containerInstanceARN,
agent.credentialProvider,
client,
state,
stateManager,
taskEngine,
credentialsManager,
taskHandler,
)
seelog.Info("Beginning Polling for updates")
err := acsSession.Start()
if err != nil {
seelog.Criticalf("Unretriable error starting communicating with ACS: %v", err)
return exitcodes.ExitTerminal
}
seelog.Critical("ACS Session handler should never exit")
return exitcodes.ExitError
}
// validateRequiredVersion validates docker version.
// Minimum docker version supported is 1.9.0, maps to api version 1.21
// see https://docs.docker.com/develop/sdk/#api-version-matrix
func (agent *ecsAgent) verifyRequiredDockerVersion() (int, bool) {
supportedVersions := agent.dockerClient.SupportedVersions()
if len(supportedVersions) == 0 {
seelog.Critical("Could not get supported docker versions.")
return exitcodes.ExitError, false
}
// if api version 1.21 is supported, it means docker version is at least 1.9.0
for _, version := range supportedVersions {
if version == dockerclient.Version_1_21 {
return -1, true
}
}
// api 1.21 is not supported, docker version is older than 1.9.0
seelog.Criticalf("Required minimum docker API verion %s is not supported",
dockerclient.Version_1_21)
return exitcodes.ExitTerminal, false
}
// getContainerInstanceTagsFromEC2API will retrieve the tags of this instance remotely.
func (agent *ecsAgent) getContainerInstanceTagsFromEC2API() ([]*ecs.Tag, error) {
// Get instance ID from ec2 metadata client.
instanceID, err := agent.ec2MetadataClient.InstanceID()
if err != nil {
return nil, err
}
return agent.ec2Client.DescribeECSTagsForInstance(instanceID)
}
// mergeTags will merge the local tags and ec2 tags, for the overlap part, ec2 tags
// will be overridden by local tags.
func mergeTags(localTags []*ecs.Tag, ec2Tags []*ecs.Tag) []*ecs.Tag {
tagsMap := make(map[string]string)
for _, ec2Tag := range ec2Tags {
tagsMap[aws.StringValue(ec2Tag.Key)] = aws.StringValue(ec2Tag.Value)
}
for _, localTag := range localTags {
tagsMap[aws.StringValue(localTag.Key)] = aws.StringValue(localTag.Value)
}
return utils.MapToTags(tagsMap)
}
// getHostPrivateIPv4AddressFromEC2Metadata will retrieve the PrivateIPAddress (IPv4) of this
// instance throught the EC2 API
func (agent *ecsAgent) getHostPrivateIPv4AddressFromEC2Metadata() string {
// Get instance private IP from ec2 metadata client.
hostPrivateIPv4Address, err := agent.ec2MetadataClient.PrivateIPv4Address()
if err != nil {
seelog.Errorf("Unable to retrieve Host Instance PrivateIPv4 Address: %v", err)
return ""
}
return hostPrivateIPv4Address
}
// getHostPublicIPv4AddressFromEC2Metadata will retrieve the PublicIPAddress (IPv4) of this
// instance through the EC2 API
func (agent *ecsAgent) getHostPublicIPv4AddressFromEC2Metadata() string {
// Get instance public IP from ec2 metadata client.
hostPublicIPv4Address, err := agent.ec2MetadataClient.PublicIPv4Address()
if err != nil {
seelog.Errorf("Unable to retrieve Host Instance PublicIPv4 Address: %v", err)
return ""
}
return hostPublicIPv4Address
}
| 1 | 23,501 | synced offline. let's move this to `agent_unix.go` to make the IMDS call, since this is not supported in Windows. | aws-amazon-ecs-agent | go |
@@ -12,6 +12,10 @@ class PythonMappings
fun.add_mapping("py_env", Python::VirtualEnv.new)
fun.add_mapping("py_docs", Python::GenerateDocs.new)
+
+ fun.add_mapping("py_install", Python::Install.new)
+
+ fun.add_mapping("py_prep", Python::Prep.new)
end
end
| 1 | require 'rake'
require 'rake-tasks/browsers.rb'
require 'rake-tasks/crazy_fun/mappings/common'
class PythonMappings
def add_all(fun)
fun.add_mapping("py_test", Python::CheckPreconditions.new)
fun.add_mapping("py_test", Python::PrepareTests.new)
fun.add_mapping("py_test", Python::AddDependencies.new)
fun.add_mapping("py_test", Python::RunTests.new)
fun.add_mapping("py_env", Python::VirtualEnv.new)
fun.add_mapping("py_docs", Python::GenerateDocs.new)
end
end
module Python
class CheckPreconditions
def handle(fun, dir, args)
raise StandardError, ":name must be set" if args[:name].nil?
end
end
def self.lib_dir
Dir::glob('build/lib*')[0] || 'build/lib'
end
class PyTask < Tasks
def get_resources(browser, args)
resources = []
resources.concat(args[:resources]) if args[:resources]
browser_specific_resources = BROWSERS[browser][:python][:resources]
resources.concat(browser_specific_resources) if browser_specific_resources
return resources
end
end
class AddDependencies < PyTask
def handle(fun, dir, args)
(args[:browsers] || [:ff]).each do |browser|
target = Rake::Task[task_name(dir, "#{args[:name]}_#{browser}")]
add_dependencies(target, dir, args[:deps])
resources = get_resources(browser, args)
add_dependencies(target, dir, resources)
end
end
end
class PrepareTests < PyTask
def copy_common_tests(dir, tests, browser)
general_tests = Dir.glob(tests.map { |test| dir + Platform.dir_separator + test })
general_tests.each do |general_test|
create_test_file_for(general_test, browser)
end
end
def create_test_file_for(general_test, browser)
browser_data = BROWSERS[browser][:python]
browser_class = browser_data[:class]
filename_parts = general_test.split(/[\\\/]/) # Split on / or \
package_name = filename_parts[1..-2].join('.') # Drop py/ prefix, and filename
general_filename = filename_parts.last
file = IO.read(general_test)
general_test_class = file[/class ([A-Za-z]+)/, 1] or raise "could not find class name in #{file.inspect}"
browser_specific_test_class = browser_class + general_test_class
template = IO.read("py/test/selenium/webdriver/browser_specific_template.py")
{
"##BROWSER_CONSTRUCTOR##" => "#{browser_class}(#{browser_data[:constructor_args] || ''})",
"##GENERAL_TEST_CLASS##" => general_test_class,
"##BROWSER_SPECIFIC_TEST_CLASS##" => browser_specific_test_class,
"##PACKAGE_NAME##" => package_name,
"##GENERAL_FILENAME##" => general_filename.split('.').first,
"##CUSTOM_TEST_SETUP##" => browser_data[:custom_test_setup] || "",
"##CUSTOM_TEST_TEARDOWN##" => browser_data[:custom_test_teardown] || "",
"##CUSTOM_TEST_IMPORT##" => browser_data[:custom_test_import] || "",
}.each do |old,new|
template = template.gsub(old, new)
end
#This path should be somehow passed through the py_env dep, rather than hard-coded
path = "#{Python::lib_dir}/selenium/test/selenium/webdriver/#{browser_data[:dir]}"
unless File.exists?(path)
mkdir_p path
touch "#{path}/__init__.py"
end
File.open("#{path}/#{browser_data[:file_string]}_#{general_filename}", "w") { |f| f.write(template) }
end
def copy_browser_specific_tests(dir, test_files, browser)
tests = Dir.glob(test_files.map { |test| dir + Platform.dir_separator + test })
tests.each do |test_file|
#This path should be somehow passed through the py_env dep, rather than hard-coded
cp test_file, "#{Python::lib_dir}/selenium/test/selenium/webdriver/#{BROWSERS[browser][:python][:dir]}"
end
end
def handle(fun, dir, args)
base_task_name = task_name(dir, args[:name])
browsers = args[:browsers] || ['ff']
browsers.each do |browser|
browser_data = BROWSERS[browser][:python]
task_name = "#{base_task_name}_#{browser}"
task task_name do
resources = get_resources(browser, args)
copy_resources dir, resources, Python::lib_dir
copy_common_tests(dir, args[:common_tests], browser) if args[:common_tests]
browser_specific_tests = args[:"#{browser}_specific_tests"]
copy_browser_specific_tests(dir, browser_specific_tests, browser) if browser_specific_tests
end
end
end
end
class RunTests
def python_path
#This path should be passed through the py_env dep, rather than hard-coded
windows? ? "build\\python\\Scripts\\" : "build/python/bin/"
end
def pytest_path
py_test_path = python_path + 'py.test'
py_test_path = py_test_path + ".exe" if windows?
if File.exists?(py_test_path)
py_test = py_test_path
else
py_test = 'py.test'
end
end
def copy_source_to_env
py_setup = python_path + 'python setup.py build'
sh py_setup , :verbose => true
end
def handle(fun, dir, args)
base_task_name = Tasks.new.task_name(dir, args[:name])
browsers = args[:browsers] || ['ff']
browsers.each do |browser|
browser_data = BROWSERS[browser][:python]
deps = ["//py:test_env", "#{base_task_name}_#{browser}"] + (browser_data[:deps] || [])
task_name = "#{base_task_name}_#{browser}:run"
task task_name => deps do
copy_source_to_env
# Test file pattern has been specified in the pytest.ini file at project root dir
test_dir = ["#{Python::lib_dir}/selenium/test/selenium/webdriver/#{browser_data[:dir]}/"]
pytest_args = [pytest_path] + test_dir
ignores = "-ignore_#{browser_data[:ignore]}" if browser_data[:ignore]
ignores += " and " + ENV['method'] if ENV['method']
pytest_args += ["-k=\"" + ignores + "\""]
pytest_args += ["--junitxml=build/test_logs/python-#{browser}-#{Time.now.to_i}.xml"]
mkdir_p "build/test_logs"
sh pytest_args.join(' '), :verbose => true
end
end
#Also generate test with exactly this name, if only one browser specified
task "#{base_task_name}:run" => [ :"#{base_task_name}_#{browsers.first}:run" ] if browsers.length == 1
end
end
class VirtualEnv
def handle(fun, dir, args)
task Tasks.new.task_name(dir, args[:name]) do
dest = Platform.path_for(args[:dest])
pip_pkg = "pip install #{args[:packages].join(' ')}"
virtualenv = ["virtualenv", "--no-site-packages", " #{dest}"]
virtualenv += ["-p", ENV['pyversion']] if ENV['pyversion']
sh virtualenv.join(' '), :verbose => true do |ok, res|
unless ok
puts ""
puts "PYTHON DEPENDENCY ERROR: Virtualenv not found."
puts "Please run '[sudo] pip install virtualenv'"
puts ""
end
end
slash = Platform.dir_separator
python_dir = dest + slash + (windows? ? "Scripts" : "bin")
pip_install = python_dir + slash + pip_pkg
sh pip_install, :verbose => true
sh "#{python_dir}#{slash}python setup.py install", :verbose => true
end
end
end
class GenerateDocs < Tasks
def python_path
#This path should be passed through the py_env dep, rather than hard-coded
windows? ? "build\\python\\Scripts\\" : "build/python/bin/"
end
def handle(fun, dir, args)
task Tasks.new.task_name(dir, args[:name]) => args[:deps] do
source_folder = Platform.path_for args[:source_folder]
target_folder = Platform.path_for args[:target_folder]
sphinx_build = "#{python_path}sphinx-build"
sphinx_build = sphinx_build + ".exe" if windows?
sh "#{sphinx_build} -b html -d build/doctrees #{source_folder} #{target_folder}", :verbose => true
end
end
end
end
| 1 | 11,997 | Too much indentation here. Should match the lines above, which have four spaces. | SeleniumHQ-selenium | js |
@@ -15,14 +15,6 @@ BOOST_AUTO_TEST_CASE(test_incompatible_with_mld)
osrm::exception);
}
-BOOST_AUTO_TEST_CASE(test_incompatible_with_corech)
-{
- // Note - CH-only data can't be used with the CoreCH algorithm
- BOOST_CHECK_THROW(
- getOSRM(OSRM_TEST_DATA_DIR "/ch/monaco.osrm", osrm::EngineConfig::Algorithm::CoreCH),
- osrm::exception);
-}
-
BOOST_AUTO_TEST_CASE(test_incompatible_with_ch)
{
// Can't use the CH algorithm with MLD data | 1 | #include <boost/test/test_case_template.hpp>
#include <boost/test/unit_test.hpp>
#include "fixture.hpp"
#include "osrm/exception.hpp"
BOOST_AUTO_TEST_SUITE(table)
BOOST_AUTO_TEST_CASE(test_incompatible_with_mld)
{
// Can't use the MLD algorithm with CH data
BOOST_CHECK_THROW(
getOSRM(OSRM_TEST_DATA_DIR "/ch/monaco.osrm", osrm::EngineConfig::Algorithm::MLD),
osrm::exception);
}
BOOST_AUTO_TEST_CASE(test_incompatible_with_corech)
{
// Note - CH-only data can't be used with the CoreCH algorithm
BOOST_CHECK_THROW(
getOSRM(OSRM_TEST_DATA_DIR "/ch/monaco.osrm", osrm::EngineConfig::Algorithm::CoreCH),
osrm::exception);
}
BOOST_AUTO_TEST_CASE(test_incompatible_with_ch)
{
// Can't use the CH algorithm with MLD data
BOOST_CHECK_THROW(
getOSRM(OSRM_TEST_DATA_DIR "/mld/monaco.osrm", osrm::EngineConfig::Algorithm::CH),
osrm::exception);
}
BOOST_AUTO_TEST_SUITE_END()
| 1 | 22,954 | Same here we still need this test to make sure the fallback works. | Project-OSRM-osrm-backend | cpp |
@@ -0,0 +1,8 @@
+using System;
+namespace MvvmCross.iOS.Views.Presenters.Attributes
+{
+ public interface IMvxOverridePresentationAttribute
+ {
+ MvxBasePresentationAttribute OverridePresentationAttribute();
+ }
+} | 1 | 1 | 12,726 | I'm wondering if we can actually base this on a `IMvxPresentationAttribute` instead of the base one. | MvvmCross-MvvmCross | .cs |
|
@@ -430,7 +430,7 @@ export function unmount(vnode, parentVNode, skipRemove) {
}
}
- r.base = r._parentDom = null;
+ r.base = r._parentDom = r._vnode = vnode._component = null;
}
if ((r = vnode._children)) { | 1 | import { EMPTY_OBJ, EMPTY_ARR } from '../constants';
import { Component } from '../component';
import { Fragment } from '../create-element';
import { diffChildren } from './children';
import { diffProps } from './props';
import { assign, removeNode } from '../util';
import options from '../options';
/**
* Diff two virtual nodes and apply proper changes to the DOM
* @param {import('../internal').PreactElement} parentDom The parent of the DOM element
* @param {import('../internal').VNode} newVNode The new virtual node
* @param {import('../internal').VNode} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this element is an SVG node
* @param {Array<import('../internal').PreactElement>} excessDomChildren
* @param {Array<import('../internal').Component>} commitQueue List of components
* which have callbacks to invoke in commitRoot
* @param {Element | Text} oldDom The current attached DOM
* element any new dom elements should be placed around. Likely `null` on first
* render (except when hydrating). Can be a sibling DOM element when diffing
* Fragments that have siblings. In most cases, it starts out as `oldChildren[0]._dom`.
* @param {boolean} [isHydrating] Whether or not we are in hydration
*/
export function diff(
parentDom,
newVNode,
oldVNode,
context,
isSvg,
excessDomChildren,
commitQueue,
oldDom,
isHydrating
) {
let tmp,
newType = newVNode.type;
// When passing through createElement it assigns the object
// constructor as undefined. This to prevent JSON-injection.
if (newVNode.constructor !== undefined) return null;
if ((tmp = options._diff)) tmp(newVNode);
try {
outer: if (typeof newType === 'function') {
let c, isNew, oldProps, oldState, snapshot, clearProcessingException;
let newProps = newVNode.props;
// Necessary for createContext api. Setting this property will pass
// the context value as `this.context` just for this component.
tmp = newType.contextType;
let provider = tmp && context[tmp._id];
let cctx = tmp
? provider
? provider.props.value
: tmp._defaultValue
: context;
// Get component and set it to `c`
if (oldVNode._component) {
c = newVNode._component = oldVNode._component;
clearProcessingException = c._processingException = c._pendingError;
} else {
// Instantiate the new component
if ('prototype' in newType && newType.prototype.render) {
newVNode._component = c = new newType(newProps, cctx); // eslint-disable-line new-cap
} else {
newVNode._component = c = new Component(newProps, cctx);
c.constructor = newType;
c.render = doRender;
}
if (provider) provider.sub(c);
c.props = newProps;
if (!c.state) c.state = {};
c.context = cctx;
c._context = context;
isNew = c._dirty = true;
c._renderCallbacks = [];
}
// Invoke getDerivedStateFromProps
if (c._nextState == null) {
c._nextState = c.state;
}
if (newType.getDerivedStateFromProps != null) {
if (c._nextState == c.state) {
c._nextState = assign({}, c._nextState);
}
assign(
c._nextState,
newType.getDerivedStateFromProps(newProps, c._nextState)
);
}
oldProps = c.props;
oldState = c.state;
// Invoke pre-render lifecycle methods
if (isNew) {
if (
newType.getDerivedStateFromProps == null &&
c.componentWillMount != null
) {
c.componentWillMount();
}
if (c.componentDidMount != null) {
c._renderCallbacks.push(c.componentDidMount);
}
} else {
if (
newType.getDerivedStateFromProps == null &&
newProps !== oldProps &&
c.componentWillReceiveProps != null
) {
c.componentWillReceiveProps(newProps, cctx);
}
if (
!c._force &&
c.shouldComponentUpdate != null &&
c.shouldComponentUpdate(newProps, c._nextState, cctx) === false
) {
c.props = newProps;
c.state = c._nextState;
c._dirty = false;
c._vnode = newVNode;
newVNode._dom = oldVNode._dom;
newVNode._children = oldVNode._children;
if (c._renderCallbacks.length) {
commitQueue.push(c);
}
for (tmp = 0; tmp < newVNode._children.length; tmp++) {
if (newVNode._children[tmp]) {
newVNode._children[tmp]._parent = newVNode;
}
}
break outer;
}
if (c.componentWillUpdate != null) {
c.componentWillUpdate(newProps, c._nextState, cctx);
}
if (c.componentDidUpdate != null) {
c._renderCallbacks.push(() => {
c.componentDidUpdate(oldProps, oldState, snapshot);
});
}
}
c.context = cctx;
c.props = newProps;
c.state = c._nextState;
if ((tmp = options._render)) tmp(newVNode);
c._dirty = false;
c._vnode = newVNode;
c._parentDom = parentDom;
tmp = c.render(c.props, c.state, c.context);
let isTopLevelFragment =
tmp != null && tmp.type == Fragment && tmp.key == null;
newVNode._children = isTopLevelFragment ? tmp.props.children : tmp;
if (c.getChildContext != null) {
context = assign(assign({}, context), c.getChildContext());
}
if (!isNew && c.getSnapshotBeforeUpdate != null) {
snapshot = c.getSnapshotBeforeUpdate(oldProps, oldState);
}
diffChildren(
parentDom,
newVNode,
oldVNode,
context,
isSvg,
excessDomChildren,
commitQueue,
oldDom,
isHydrating
);
c.base = newVNode._dom;
if (c._renderCallbacks.length) {
commitQueue.push(c);
}
if (clearProcessingException) {
c._pendingError = c._processingException = null;
}
c._force = false;
} else {
newVNode._dom = diffElementNodes(
oldVNode._dom,
newVNode,
oldVNode,
context,
isSvg,
excessDomChildren,
commitQueue,
isHydrating
);
}
if ((tmp = options.diffed)) tmp(newVNode);
} catch (e) {
options._catchError(e, newVNode, oldVNode);
}
return newVNode._dom;
}
/**
* @param {Array<import('../internal').Component>} commitQueue List of components
* which have callbacks to invoke in commitRoot
* @param {import('../internal').VNode} root
*/
export function commitRoot(commitQueue, root) {
if (options._commit) options._commit(root, commitQueue);
commitQueue.some(c => {
try {
commitQueue = c._renderCallbacks;
c._renderCallbacks = [];
commitQueue.some(cb => {
cb.call(c);
});
} catch (e) {
options._catchError(e, c._vnode);
}
});
}
/**
* Diff two virtual nodes representing DOM element
* @param {import('../internal').PreactElement} dom The DOM element representing
* the virtual nodes being diffed
* @param {import('../internal').VNode} newVNode The new virtual node
* @param {import('../internal').VNode} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this DOM node is an SVG node
* @param {*} excessDomChildren
* @param {Array<import('../internal').Component>} commitQueue List of components
* which have callbacks to invoke in commitRoot
* @param {boolean} isHydrating Whether or not we are in hydration
* @returns {import('../internal').PreactElement}
*/
function diffElementNodes(
dom,
newVNode,
oldVNode,
context,
isSvg,
excessDomChildren,
commitQueue,
isHydrating
) {
let i;
let oldProps = oldVNode.props;
let newProps = newVNode.props;
// Tracks entering and exiting SVG namespace when descending through the tree.
isSvg = newVNode.type === 'svg' || isSvg;
if (dom == null && excessDomChildren != null) {
for (i = 0; i < excessDomChildren.length; i++) {
const child = excessDomChildren[i];
if (
child != null &&
(newVNode.type === null
? child.nodeType === 3
: child.localName === newVNode.type)
) {
dom = child;
excessDomChildren[i] = null;
break;
}
}
}
if (dom == null) {
if (newVNode.type === null) {
return document.createTextNode(newProps);
}
dom = isSvg
? document.createElementNS('http://www.w3.org/2000/svg', newVNode.type)
: document.createElement(
newVNode.type,
newProps.is && { is: newProps.is }
);
// we created a new parent, so none of the previously attached children can be reused:
excessDomChildren = null;
}
if (newVNode.type === null) {
if (excessDomChildren != null) {
excessDomChildren[excessDomChildren.indexOf(dom)] = null;
}
if (oldProps !== newProps && dom.data != newProps) {
dom.data = newProps;
}
} else if (newVNode !== oldVNode) {
if (excessDomChildren != null) {
excessDomChildren[excessDomChildren.indexOf(dom)] = null;
excessDomChildren = EMPTY_ARR.slice.call(dom.childNodes);
}
oldProps = oldVNode.props || EMPTY_OBJ;
let oldHtml = oldProps.dangerouslySetInnerHTML;
let newHtml = newProps.dangerouslySetInnerHTML;
// During hydration, props are not diffed at all (including dangerouslySetInnerHTML)
// @TODO we should warn in debug mode when props don't match here.
if (!isHydrating) {
if (oldProps === EMPTY_OBJ) {
oldProps = {};
for (let i = 0; i < dom.attributes.length; i++) {
oldProps[dom.attributes[i].name] = dom.attributes[i].value;
}
}
if (newHtml || oldHtml) {
// Avoid re-applying the same '__html' if it did not changed between re-render
if (!newHtml || !oldHtml || newHtml.__html != oldHtml.__html) {
dom.innerHTML = (newHtml && newHtml.__html) || '';
}
}
}
diffProps(dom, newProps, oldProps, isSvg, isHydrating);
newVNode._children = newVNode.props.children;
// If the new vnode didn't have dangerouslySetInnerHTML, diff its children
if (!newHtml) {
diffChildren(
dom,
newVNode,
oldVNode,
context,
newVNode.type === 'foreignObject' ? false : isSvg,
excessDomChildren,
commitQueue,
EMPTY_OBJ,
isHydrating
);
}
// (as above, don't diff props during hydration)
if (!isHydrating) {
if (
'value' in newProps &&
newProps.value !== undefined &&
newProps.value !== dom.value
) {
dom.value = newProps.value == null ? '' : newProps.value;
}
if (
'checked' in newProps &&
newProps.checked !== undefined &&
newProps.checked !== dom.checked
) {
dom.checked = newProps.checked;
}
}
}
return dom;
}
/**
* Invoke or update a ref, depending on whether it is a function or object ref.
* @param {object|function} ref
* @param {any} value
* @param {import('../internal').VNode} vnode
*/
export function applyRef(ref, value, vnode) {
try {
if (typeof ref == 'function') ref(value);
else ref.current = value;
} catch (e) {
options._catchError(e, vnode);
}
}
/**
* Unmount a virtual node from the tree and apply DOM changes
* @param {import('../internal').VNode} vnode The virtual node to unmount
* @param {import('../internal').VNode} parentVNode The parent of the VNode that
* initiated the unmount
* @param {boolean} [skipRemove] Flag that indicates that a parent node of the
* current element is already detached from the DOM.
*/
export function unmount(vnode, parentVNode, skipRemove) {
let r;
if (options.unmount) options.unmount(vnode);
if ((r = vnode.ref)) {
if (!r.current || r.current === vnode._dom) applyRef(r, null, parentVNode);
}
let dom;
if (!skipRemove && typeof vnode.type !== 'function') {
skipRemove = (dom = vnode._dom) != null;
}
// Must be set to `undefined` to properly clean up `_nextDom`
// for which `null` is a valid value. See comment in `create-element.js`
vnode._dom = vnode._nextDom = undefined;
if ((r = vnode._component) != null) {
if (r.componentWillUnmount) {
try {
r.componentWillUnmount();
} catch (e) {
options._catchError(e, parentVNode);
}
}
r.base = r._parentDom = null;
}
if ((r = vnode._children)) {
for (let i = 0; i < r.length; i++) {
if (r[i]) unmount(r[i], parentVNode, skipRemove);
}
}
if (dom != null) removeNode(dom);
}
/** The `.render()` method for a PFC backing instance. */
function doRender(props, state, context) {
return this.constructor(props, context);
}
| 1 | 15,244 | could we switch to `undefined` here? | preactjs-preact | js |
@@ -94,6 +94,7 @@ public class SyncManager {
this.smartStore = smartStore;
this.restClient = restClient;
SyncState.setupSyncsSoupIfNeeded(smartStore);
+ SyncState.cleanupSyncsSoupIfNeeded(smartStore);
}
/** | 1 | /*
* Copyright (c) 2014-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.smartsync.manager;
import com.salesforce.androidsdk.accounts.UserAccount;
import com.salesforce.androidsdk.analytics.EventBuilderHelper;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.auth.HttpAccess;
import com.salesforce.androidsdk.rest.ApiVersionStrings;
import com.salesforce.androidsdk.rest.RestClient;
import com.salesforce.androidsdk.rest.RestRequest;
import com.salesforce.androidsdk.rest.RestResponse;
import com.salesforce.androidsdk.smartstore.store.SmartStore;
import com.salesforce.androidsdk.smartstore.store.SmartStore.SmartStoreException;
import com.salesforce.androidsdk.smartsync.app.Features;
import com.salesforce.androidsdk.smartsync.app.SmartSyncSDKManager;
import com.salesforce.androidsdk.smartsync.target.AdvancedSyncUpTarget;
import com.salesforce.androidsdk.smartsync.target.SyncDownTarget;
import com.salesforce.androidsdk.smartsync.target.SyncUpTarget;
import com.salesforce.androidsdk.smartsync.util.SmartSyncLogger;
import com.salesforce.androidsdk.smartsync.util.SyncOptions;
import com.salesforce.androidsdk.smartsync.util.SyncState;
import com.salesforce.androidsdk.smartsync.util.SyncState.MergeMode;
import com.salesforce.androidsdk.util.JSONObjectHelper;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
/**
* Sync Manager
*/
public class SyncManager {
// Constants
private static final int UNCHANGED = -1;
private static final String TAG = "SyncManager";
// For user agent
private static final String SMART_SYNC = "SmartSync";
// Static member
private static Map<String, SyncManager> INSTANCES = new HashMap<String, SyncManager>();
// Members
private Set<Long> runningSyncIds = new HashSet<Long>();
public final String apiVersion;
private final ExecutorService threadPool = Executors.newFixedThreadPool(1);
private SmartStore smartStore;
private RestClient restClient;
/**
* Private constructor
*
* @param smartStore
*/
private SyncManager(SmartStore smartStore, RestClient restClient) {
apiVersion = ApiVersionStrings.getVersionNumber(SalesforceSDKManager.getInstance().getAppContext());
this.smartStore = smartStore;
this.restClient = restClient;
SyncState.setupSyncsSoupIfNeeded(smartStore);
}
/**
* Returns the instance of this class associated with current user.
*
* @return Instance of this class.
*/
public static synchronized SyncManager getInstance() {
return getInstance(null, null);
}
/**
* Returns the instance of this class associated with this user account.
*
* @param account User account.
* @return Instance of this class.
*/
public static synchronized SyncManager getInstance(UserAccount account) {
return getInstance(account, null);
}
/**
* Returns the instance of this class associated with this user and community.
* Sync manager returned is ready to use.
*
* @param account User account.
* @param communityId Community ID.
* @return Instance of this class.
*/
public static synchronized SyncManager getInstance(UserAccount account, String communityId) {
return getInstance(account, communityId, null);
}
/**
* Returns the instance of this class associated with this user, community and smartstore.
*
* @param account User account. Pass null to user current user.
* @param communityId Community ID. Pass null if not applicable
* @param smartStore SmartStore instance. Pass null to use current user default smartstore.
* @return Instance of this class.
*/
public static synchronized SyncManager getInstance(UserAccount account, String communityId, SmartStore smartStore) {
if (account == null) {
account = SmartSyncSDKManager.getInstance().getUserAccountManager().getCachedCurrentUser();
}
if (smartStore == null) {
smartStore = SmartSyncSDKManager.getInstance().getSmartStore(account, communityId);
}
String uniqueId = (account != null ? account.getUserId() : "") + ":"
+ smartStore.getDatabase().getPath();
SyncManager instance = INSTANCES.get(uniqueId);
if (instance == null) {
RestClient restClient = null;
/*
* If account is still null, there is no user logged in, which means, the default
* RestClient should be set to the unauthenticated RestClient instance.
*/
if (account == null) {
restClient = SalesforceSDKManager.getInstance().getClientManager().peekUnauthenticatedRestClient();
} else {
restClient = SalesforceSDKManager.getInstance().getClientManager().peekRestClient(account);
}
instance = new SyncManager(smartStore, restClient);
INSTANCES.put(uniqueId, instance);
}
SalesforceSDKManager.getInstance().registerUsedAppFeature(Features.FEATURE_SMART_SYNC);
return instance;
}
/**
* Resets all the sync managers
*/
public static synchronized void reset() {
for (SyncManager syncManager : INSTANCES.values()) {
syncManager.threadPool.shutdownNow();
}
INSTANCES.clear();
}
/**
* Resets the sync managers for this user account
*
* @param account User account.
*/
public static synchronized void reset(UserAccount account) {
if (account != null) {
Set<String> keysToRemove = new HashSet<>();
for (String key : INSTANCES.keySet()) {
if (key.startsWith(account.getUserId())) {
keysToRemove.add(key);
SyncManager syncManager = INSTANCES.get(key);
syncManager.threadPool.shutdownNow();
}
}
// NB: keySet returns a Set view of the keys contained in this map.
// The set is backed by the map, so changes to the map are reflected in the set, and vice-versa.
INSTANCES.keySet().removeAll(keysToRemove);
}
}
/**
* Get details of a sync by id
*
* @param syncId
* @return
* @throws JSONException
*/
public SyncState getSyncStatus(long syncId) throws JSONException {
return SyncState.byId(smartStore, syncId);
}
/**
* Get details of a sync by name
*
* @param name
* @return
* @throws JSONException
*/
public SyncState getSyncStatus(String name) throws JSONException {
return SyncState.byName(smartStore, name);
}
/**
* Return true if there is a sync with the given name
*
* @param name
* @return
*/
public boolean hasSyncWithName(String name) {
return SyncState.hasSyncWithName(smartStore, name);
}
/**
* Delete sync by id
*
* @param syncId
* @return
* @throws JSONException
*/
public void deleteSync(long syncId) throws JSONException {
SyncState.deleteSync(smartStore, syncId);
}
/**
* Delete sync by name
*
* @param name
* @return
* @throws JSONException
*/
public void deleteSync(String name) throws JSONException {
SyncState.deleteSync(smartStore, name);
}
/**
* Create and run a sync down that will overwrite any modified records
*
* @param target
* @param soupName
* @param callback
* @return
* @throws JSONException
*/
public SyncState syncDown(SyncDownTarget target, String soupName, SyncUpdateCallback callback) throws JSONException {
SyncOptions options = SyncOptions.optionsForSyncDown(MergeMode.OVERWRITE);
return syncDown(target, options, soupName, callback);
}
/**
* Create and run a sync down without a name
*
* @param target
* @param options
* @param soupName
* @param callback
* @return
* @throws JSONException
*/
public SyncState syncDown(SyncDownTarget target, SyncOptions options, String soupName, SyncUpdateCallback callback) throws JSONException {
return syncDown(target, options, soupName, null, callback);
}
/**
* Create and run a sync down
*
* @param target
* @param options
* @param soupName
* @param syncName
* @param callback
* @return
* @throws JSONException
*/
public SyncState syncDown(SyncDownTarget target, SyncOptions options, String soupName, String syncName, SyncUpdateCallback callback) throws JSONException {
SyncState sync = createSyncDown(target, options, soupName, syncName);
SmartSyncLogger.d(TAG, "syncDown called", sync);
runSync(sync, callback);
return sync;
}
/**
* Create a sync down
*
* @param target
* @param options
* @param soupName
* @param syncName
* @return
* @throws JSONException
*/
public SyncState createSyncDown(SyncDownTarget target, SyncOptions options, String soupName, String syncName) throws JSONException {
return SyncState.createSyncDown(smartStore, target, options, soupName, syncName);
}
/**
* Re-run sync but only fetch new/modified records
*
* @param syncId
* @param callback
* @throws JSONException
*/
public SyncState reSync(long syncId, SyncUpdateCallback callback) throws JSONException {
if (runningSyncIds.contains(syncId)) {
throw new SmartSyncException("Cannot run reSync:" + syncId + ": still running");
}
SyncState sync = SyncState.byId(smartStore, syncId);
if (sync == null) {
throw new SmartSyncException("Cannot run reSync:" + syncId + ": no sync found");
}
sync.setTotalSize(-1);
SmartSyncLogger.d(TAG, "reSync called", sync);
runSync(sync, callback);
return sync;
}
/**
* Re-run sync but only fetch new/modified records
*
* @param syncName
* @param callback
* @throws JSONException
*/
public SyncState reSync(String syncName, SyncUpdateCallback callback) throws JSONException {
SyncState sync = getSyncStatus(syncName);
if (sync == null) {
throw new SmartSyncException("Cannot run reSync:" + syncName + ": no sync found");
}
return reSync(sync.getId(), callback);
}
/**
* Run a sync
*
* @param sync
* @param callback
*/
public void runSync(final SyncState sync, final SyncUpdateCallback callback) {
updateSync(sync, SyncState.Status.RUNNING, 0, callback);
threadPool.execute(new Runnable() {
@Override
public void run() {
try {
switch (sync.getType()) {
case syncDown:
syncDown(sync, callback);
break;
case syncUp:
syncUp(sync, callback);
break;
}
updateSync(sync, SyncState.Status.DONE, 100, callback);
} catch (RestClient.RefreshTokenRevokedException re) {
SmartSyncLogger.e(TAG, "Exception thrown in runSync", re);
// Do not do anything - let the logout go through!
} catch (Exception e) {
SmartSyncLogger.e(TAG, "Exception thrown in runSync", e);
//Set error message to sync state
sync.setError(e.getMessage());
// Update status to failed
updateSync(sync, SyncState.Status.FAILED, UNCHANGED, callback);
}
}
});
}
/**
* Create and run a sync up without a name
*
* @param target
* @param options
* @param soupName
* @param callback
* @return
* @throws JSONException
*/
public SyncState syncUp(SyncUpTarget target, SyncOptions options, String soupName, SyncUpdateCallback callback) throws JSONException {
return syncUp(target, options, soupName, null, callback);
}
/**
* Create and run a sync up
*
* @param target
* @param options
* @param soupName
* @param callback
* @return
* @throws JSONException
*/
public SyncState syncUp(SyncUpTarget target, SyncOptions options, String soupName, String syncName, SyncUpdateCallback callback) throws JSONException {
SyncState sync = createSyncUp(target, options, soupName, syncName);
SmartSyncLogger.d(TAG, "syncUp called", sync);
runSync(sync, callback);
return sync;
}
/**
* Create a sync up
*
* @param target
* @param options
* @param soupName
* @param syncName
* @return
* @throws JSONException
*/
public SyncState createSyncUp(SyncUpTarget target, SyncOptions options, String soupName, String syncName) throws JSONException {
return SyncState.createSyncUp(smartStore, target, options, soupName, syncName);
}
/**
* Removes local copies of records that have been deleted on the server
* or do not match the query results on the server anymore.
*
* @param syncId Sync ID.
* @throws JSONException
* @throws IOException
*/
public void cleanResyncGhosts(final long syncId) throws JSONException, IOException {
cleanResyncGhosts(syncId, null);
}
/**
* Removes local copies of records that have been deleted on the server
* or do not match the query results on the server anymore.
*
* @param syncId
* @param callback Callback to get clean resync ghosts completion status.
* @throws JSONException
* @throws IOException
*/
public void cleanResyncGhosts(final long syncId, final CleanResyncGhostsCallback callback) throws JSONException {
if (runningSyncIds.contains(syncId)) {
throw new SmartSyncException("Cannot run cleanResyncGhosts:" + syncId + ": still running");
}
final SyncState sync = SyncState.byId(smartStore, syncId);
if (sync == null) {
throw new SmartSyncException("Cannot run cleanResyncGhosts:" + syncId + ": no sync found");
}
if (sync.getType() != SyncState.Type.syncDown) {
throw new SmartSyncException("Cannot run cleanResyncGhosts:" + syncId + ": wrong type:" + sync.getType());
}
SmartSyncLogger.d(TAG, "cleanResyncGhosts called", sync);
final String soupName = sync.getSoupName();
final SyncDownTarget target = (SyncDownTarget) sync.getTarget();
// Ask target to clean up ghosts
threadPool.execute(new Runnable() {
@Override
public void run() {
try {
final int localIdSize = target.cleanGhosts(SyncManager.this, soupName, syncId);
final JSONObject attributes = new JSONObject();
if (localIdSize > 0) {
try {
attributes.put("numRecords", localIdSize);
attributes.put("syncId", sync.getId());
attributes.put("syncTarget", target.getClass().getName());
EventBuilderHelper.createAndStoreEventSync("cleanResyncGhosts", null, TAG, attributes);
} catch (JSONException e) {
SmartSyncLogger.e(TAG, "Unexpected JSON error for cleanResyncGhosts sync tag: " + sync.getId(), e);
}
}
if (callback != null) {
callback.onSuccess(localIdSize);
}
} catch (Exception e) {
SmartSyncLogger.e(TAG, "Exception thrown cleaning resync ghosts", e);
if (callback != null) {
callback.onError(e);
}
}
}
});
}
/**
* Update sync with new status, progress, totalSize
*
* @param sync
* @param status
* @param progress pass -1 to keep the current value
* @param callback
*/
private void updateSync(SyncState sync, SyncState.Status status, int progress, SyncUpdateCallback callback) {
try {
sync.setStatus(status);
if (progress != UNCHANGED) {
sync.setProgress(progress);
}
switch (status) {
case NEW:
break;
case RUNNING:
runningSyncIds.add(sync.getId());
break;
case DONE:
case FAILED:
int totalSize = sync.getTotalSize();
final JSONObject attributes = new JSONObject();
try {
if (totalSize > 0) {
attributes.put("numRecords", totalSize);
}
attributes.put("syncId", sync.getId());
attributes.put("syncTarget", sync.getTarget().getClass().getName());
attributes.put(EventBuilderHelper.START_TIME, sync.getStartTime());
attributes.put(EventBuilderHelper.END_TIME, sync.getEndTime());
} catch (JSONException e) {
SmartSyncLogger.e(TAG, "Exception thrown while building attributes", e);
}
EventBuilderHelper.createAndStoreEvent(sync.getType().name(), null, TAG, attributes);
runningSyncIds.remove(sync.getId());
break;
}
sync.save(smartStore);
} catch (JSONException e) {
SmartSyncLogger.e(TAG, "Unexpected JSON error for sync: " + sync.getId(), e);
} catch (SmartStoreException e) {
SmartSyncLogger.e(TAG, "Unexpected smart store error for sync: " + sync.getId(), e);
} finally {
callback.onUpdate(sync);
}
}
private void syncUp(SyncState sync, SyncUpdateCallback callback) throws Exception {
final SyncUpTarget target = (SyncUpTarget) sync.getTarget();
final List<String> dirtyRecordIds = new ArrayList<>(target.getIdsOfRecordsToSyncUp(this, sync.getSoupName()));
sync.setTotalSize(dirtyRecordIds.size());
if (target instanceof AdvancedSyncUpTarget) {
advancedSyncUp(sync, callback, dirtyRecordIds);
} else {
syncUp(sync, callback, dirtyRecordIds);
}
}
private void advancedSyncUp(SyncState sync, SyncUpdateCallback callback, List<String> dirtyRecordIds) throws JSONException, IOException {
final String soupName = sync.getSoupName();
final SyncUpTarget target = (SyncUpTarget) sync.getTarget();
final SyncOptions options = sync.getOptions();
int totalSize = dirtyRecordIds.size();
int maxBatchSize = ((AdvancedSyncUpTarget) target).getMaxBatchSize();
List<JSONObject> batch = new ArrayList<>();
updateSync(sync, SyncState.Status.RUNNING, 0, callback);
for (int i=0; i<totalSize; i++) {
JSONObject record = target.getFromLocalStore(this, soupName, dirtyRecordIds.get(i));
if (shouldSyncUpRecord(target, record, options)) {
batch.add(record);
}
// Process batch if max batch size reached or at the end of dirtyRecordIds
if (batch.size() == maxBatchSize || i == totalSize - 1) {
((AdvancedSyncUpTarget) target).syncUpRecords(this, batch, options.getFieldlist(), options.getMergeMode(), sync.getSoupName());
batch.clear();
}
// Updating status
int progress = (i + 1) * 100 / totalSize;
if (progress < 100) {
updateSync(sync, SyncState.Status.RUNNING, progress, callback);
}
}
}
private void syncUp(SyncState sync, SyncUpdateCallback callback, List<String> dirtyRecordIds) throws JSONException, IOException {
final String soupName = sync.getSoupName();
final SyncUpTarget target = (SyncUpTarget) sync.getTarget();
final SyncOptions options = sync.getOptions();
int totalSize = dirtyRecordIds.size();
updateSync(sync, SyncState.Status.RUNNING, 0, callback);
int i = 0;
for (final String id : dirtyRecordIds) {
JSONObject record = target.getFromLocalStore(this, soupName, id);
if (shouldSyncUpRecord(target, record, options)) {
syncUpOneRecord(target, soupName, record, options);
}
// Updating status
int progress = (i + 1) * 100 / totalSize;
if (progress < 100) {
updateSync(sync, SyncState.Status.RUNNING, progress, callback);
}
// Incrementing i
i++;
}
}
private boolean shouldSyncUpRecord(SyncUpTarget target, JSONObject record, SyncOptions options) throws IOException, JSONException {
/*
* Checks if we are attempting to sync up a record that has been updated
* on the server AFTER the client's last sync down. If the merge mode
* passed in tells us to leave the record alone under these
* circumstances, we will do nothing and return here.
*/
if (options.getMergeMode() == MergeMode.LEAVE_IF_CHANGED &&
!target.isNewerThanServer(this, record)) {
// Nothing to do for this record
SmartSyncLogger.d(TAG, "syncUpOneRecord: Record not synched since client does not have the latest from server", record);
return false;
}
else {
return true;
}
}
private void syncUpOneRecord(SyncUpTarget target, String soupName,
JSONObject record, SyncOptions options) throws JSONException, IOException {
SmartSyncLogger.d(TAG, "syncUpOneRecord called", record);
// Do we need to do a create, update or delete
boolean locallyDeleted = target.isLocallyDeleted(record);
boolean locallyCreated = target.isLocallyCreated(record);
boolean locallyUpdated = target.isLocallyUpdated(record);
Action action = null;
if (locallyDeleted)
action = Action.delete;
else if (locallyCreated)
action = Action.create;
else if (locallyUpdated)
action = Action.update;
if (action == null) {
// Nothing to do for this record
return;
}
// Create/update/delete record on server and update smartstore
String recordServerId;
int statusCode;
switch (action) {
case create:
recordServerId = target.createOnServer(this, record, options.getFieldlist());
// Success
if (recordServerId != null) {
record.put(target.getIdFieldName(), recordServerId);
target.cleanAndSaveInLocalStore(this, soupName, record);
}
// Failure
else {
target.saveRecordToLocalStoreWithLastError(this, soupName, record);
}
break;
case delete:
statusCode = (locallyCreated
? HttpURLConnection.HTTP_NOT_FOUND // if locally created it can't exist on the server - we don't need to actually do the deleteOnServer call
: target.deleteOnServer(this, record));
// Success
if (RestResponse.isSuccess(statusCode) || statusCode == HttpURLConnection.HTTP_NOT_FOUND) {
target.deleteFromLocalStore(this, soupName, record);
}
// Failure
else {
target.saveRecordToLocalStoreWithLastError(this, soupName, record);
}
break;
case update:
statusCode = target.updateOnServer(this, record, options.getFieldlist());
// Success
if (RestResponse.isSuccess(statusCode)) {
target.cleanAndSaveInLocalStore(this, soupName, record);
}
// Handling remotely deleted records
else if (statusCode == HttpURLConnection.HTTP_NOT_FOUND) {
if (options.getMergeMode() == MergeMode.OVERWRITE) {
recordServerId = target.createOnServer(this, record, options.getFieldlist());
if (recordServerId != null) {
record.put(target.getIdFieldName(), recordServerId);
target.cleanAndSaveInLocalStore(this, soupName, record);
}
}
else {
// Leave local record alone
}
}
// Failure
else {
target.saveRecordToLocalStoreWithLastError(this, soupName, record);
}
break;
}
}
private void syncDown(SyncState sync, SyncUpdateCallback callback) throws Exception {
String soupName = sync.getSoupName();
SyncDownTarget target = (SyncDownTarget) sync.getTarget();
MergeMode mergeMode = sync.getMergeMode();
long maxTimeStamp = sync.getMaxTimeStamp();
JSONArray records = target.startFetch(this, maxTimeStamp);
int countSaved = 0;
int totalSize = target.getTotalSize();
sync.setTotalSize(totalSize);
updateSync(sync, SyncState.Status.RUNNING, 0, callback);
final String idField = sync.getTarget().getIdFieldName();
// Get ids of records to leave alone
Set<String> idsToSkip = null;
if (mergeMode == MergeMode.LEAVE_IF_CHANGED) {
idsToSkip = target.getIdsToSkip(this, soupName);
}
while (records != null) {
// Figure out records to save
JSONArray recordsToSave = idsToSkip == null ? records : removeWithIds(records, idsToSkip, idField);
// Save to smartstore.
target.saveRecordsToLocalStore(this, soupName, recordsToSave, sync.getId());
countSaved += records.length();
maxTimeStamp = Math.max(maxTimeStamp, target.getLatestModificationTimeStamp(records));
// Update sync status.
if (countSaved < totalSize) {
updateSync(sync, SyncState.Status.RUNNING, countSaved*100 / totalSize, callback);
}
// Fetch next records, if any.
records = target.continueFetch(this);
}
sync.setMaxTimeStamp(maxTimeStamp);
}
private JSONArray removeWithIds(JSONArray records, Set<String> idsToSkip, String idField) throws JSONException {
JSONArray arr = new JSONArray();
for (int i = 0; i < records.length(); i++) {
JSONObject record = records.getJSONObject(i);
// Keep ?
String id = JSONObjectHelper.optString(record, idField);
if (id == null || !idsToSkip.contains(id)) {
arr.put(record);
}
}
return arr;
}
/**
* Send request after adding user-agent header that says SmartSync
* @param restRequest
* @return
* @throws IOException
*/
public RestResponse sendSyncWithSmartSyncUserAgent(RestRequest restRequest) throws IOException {
SmartSyncLogger.d(TAG, "sendSyncWithSmartSyncUserAgent called with request: ", restRequest);
RestResponse restResponse = restClient.sendSync(restRequest, new HttpAccess.UserAgentInterceptor(SalesforceSDKManager.getInstance().getUserAgent(SMART_SYNC)));
return restResponse;
}
/**
* @return SmartStore used by this SyncManager
*/
public SmartStore getSmartStore() {
return smartStore;
}
/**
* Enum for action
*
*/
public enum Action {
create,
update,
delete
}
/**
* Exception thrown by smart sync manager
*
*/
public static class SmartSyncException extends RuntimeException {
public SmartSyncException(String message) {
super(message);
}
public SmartSyncException(Throwable e) {
super(e);
}
private static final long serialVersionUID = 1L;
}
/**
* Sets the rest client to be used.
*
* @param restClient
*/
public void setRestClient(RestClient restClient) {
this.restClient = restClient;
}
/**
* @return rest client in use
*/
public RestClient getRestClient() {
return this.restClient;
}
/**
* Callback to get sync status udpates
*/
public interface SyncUpdateCallback {
void onUpdate(SyncState sync);
}
/**
* Callback to get clean resync ghosts completion status
*/
public interface CleanResyncGhostsCallback {
/**
* Called when clean resync ghosts completes successfully
* @param numRecords Number of local ghosts found (and removed)
*/
void onSuccess(int numRecords);
/**
* Called when clean resync ghosts fails with an error
* @param e Error
*/
void onError(Exception e);
}
}
| 1 | 17,343 | The cleanup call | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -288,10 +288,11 @@ define(['apphost', 'globalize', 'connectionManager', 'itemHelper', 'appRouter',
icon: 'album'
});
}
-
- if (options.openArtist !== false && item.ArtistItems && item.ArtistItems.length) {
+ // Show Album Artist by default, as a song can have multiple artists, which specific one would this option refer to?
+ // Although some albums can have multiple artists, it's not as common as songs.
+ if (options.openArtist !== false && item.AlbumArtists && item.AlbumArtists.length) {
commands.push({
- name: globalize.translate('ViewArtist'),
+ name: globalize.translate('ViewAlbumArtist'),
id: 'artist',
icon: 'person'
}); | 1 | define(['apphost', 'globalize', 'connectionManager', 'itemHelper', 'appRouter', 'playbackManager', 'loading', 'appSettings', 'browser', 'actionsheet'], function (appHost, globalize, connectionManager, itemHelper, appRouter, playbackManager, loading, appSettings, browser, actionsheet) {
'use strict';
function getCommands(options) {
var item = options.item;
var user = options.user;
var canPlay = playbackManager.canPlay(item);
var restrictOptions = (browser.operaTv || browser.web0s) && !user.Policy.IsAdministrator;
var commands = [];
if (canPlay && item.MediaType !== 'Photo') {
if (options.play !== false) {
commands.push({
name: globalize.translate('Play'),
id: 'resume',
icon: 'play_arrow'
});
}
if (options.playAllFromHere && item.Type !== 'Program' && item.Type !== 'TvChannel') {
commands.push({
name: globalize.translate('PlayAllFromHere'),
id: 'playallfromhere',
icon: 'play_arrow'
});
}
}
if (playbackManager.canQueue(item)) {
if (options.queue !== false) {
commands.push({
name: globalize.translate('AddToPlayQueue'),
id: 'queue',
icon: 'playlist_add'
});
}
if (options.queue !== false) {
commands.push({
name: globalize.translate('PlayNext'),
id: 'queuenext',
icon: 'playlist_add'
});
}
//if (options.queueAllFromHere) {
// commands.push({
// name: globalize.translate("QueueAllFromHere"),
// id: "queueallfromhere"
// });
//}
}
if (item.IsFolder || item.Type === 'MusicArtist' || item.Type === 'MusicGenre') {
if (item.CollectionType !== 'livetv') {
if (options.shuffle !== false) {
commands.push({
name: globalize.translate('Shuffle'),
id: 'shuffle',
icon: 'shuffle'
});
}
}
}
if (item.MediaType === 'Audio' || item.Type === 'MusicAlbum' || item.Type === 'MusicArtist' || item.Type === 'MusicGenre') {
if (options.instantMix !== false && !itemHelper.isLocalItem(item)) {
commands.push({
name: globalize.translate('InstantMix'),
id: 'instantmix',
icon: 'explore'
});
}
}
if (commands.length) {
commands.push({
divider: true
});
}
if (!restrictOptions) {
if (itemHelper.supportsAddingToCollection(item)) {
commands.push({
name: globalize.translate('AddToCollection'),
id: 'addtocollection',
icon: 'playlist_add'
});
}
if (itemHelper.supportsAddingToPlaylist(item) && options.playlist !== false) {
commands.push({
name: globalize.translate('AddToPlaylist'),
id: 'addtoplaylist',
icon: 'playlist_add'
});
}
}
if ((item.Type === 'Timer') && user.Policy.EnableLiveTvManagement && options.cancelTimer !== false) {
commands.push({
name: globalize.translate('CancelRecording'),
id: 'canceltimer',
icon: 'cancel'
});
}
if ((item.Type === 'Recording' && item.Status === 'InProgress') && user.Policy.EnableLiveTvManagement && options.cancelTimer !== false) {
commands.push({
name: globalize.translate('CancelRecording'),
id: 'canceltimer',
icon: 'cancel'
});
}
if ((item.Type === 'SeriesTimer') && user.Policy.EnableLiveTvManagement && options.cancelTimer !== false) {
commands.push({
name: globalize.translate('CancelSeries'),
id: 'cancelseriestimer',
icon: 'cancel'
});
}
if (item.CanDelete && options.deleteItem !== false) {
if (item.Type === 'Playlist' || item.Type === 'BoxSet') {
commands.push({
name: globalize.translate('Delete'),
id: 'delete',
icon: 'delete'
});
} else {
commands.push({
name: globalize.translate('DeleteMedia'),
id: 'delete',
icon: 'delete'
});
}
}
// Books are promoted to major download Button and therefor excluded in the context menu
if ((item.CanDownload && appHost.supports('filedownload')) && item.Type !== 'Book') {
commands.push({
name: globalize.translate('Download'),
id: 'download',
icon: 'file_download'
});
commands.push({
name: globalize.translate('CopyStreamURL'),
id: 'copy-stream',
icon: 'content_copy'
});
}
if (commands.length) {
commands.push({
divider: true
});
}
var canEdit = itemHelper.canEdit(user, item);
if (canEdit) {
if (options.edit !== false && item.Type !== 'SeriesTimer') {
var text = (item.Type === 'Timer' || item.Type === 'SeriesTimer') ? globalize.translate('Edit') : globalize.translate('EditMetadata');
commands.push({
name: text,
id: 'edit',
icon: 'edit'
});
}
}
if (itemHelper.canEditImages(user, item)) {
if (options.editImages !== false) {
commands.push({
name: globalize.translate('EditImages'),
id: 'editimages',
icon: 'image'
});
}
}
if (canEdit) {
if (item.MediaType === 'Video' && item.Type !== 'TvChannel' && item.Type !== 'Program' && item.LocationType !== 'Virtual' && !(item.Type === 'Recording' && item.Status !== 'Completed')) {
if (options.editSubtitles !== false) {
commands.push({
name: globalize.translate('EditSubtitles'),
id: 'editsubtitles',
icon: 'closed_caption'
});
}
}
}
if (options.identify !== false) {
if (itemHelper.canIdentify(user, item)) {
commands.push({
name: globalize.translate('Identify'),
id: 'identify',
icon: 'edit'
});
}
}
if (item.MediaSources) {
if (options.moremediainfo !== false) {
commands.push({
name: globalize.translate('MoreMediaInfo'),
id: 'moremediainfo',
icon: 'info'
});
}
}
if (item.Type === 'Program' && options.record !== false) {
if (item.TimerId) {
commands.push({
name: globalize.translate('ManageRecording'),
id: 'record',
icon: 'fiber_manual_record'
});
}
}
if (item.Type === 'Program' && options.record !== false) {
if (!item.TimerId) {
commands.push({
name: globalize.translate('Record'),
id: 'record',
icon: 'fiber_manual_record'
});
}
}
if (itemHelper.canRefreshMetadata(item, user)) {
commands.push({
name: globalize.translate('RefreshMetadata'),
id: 'refresh',
icon: 'refresh'
});
}
if (item.PlaylistItemId && options.playlistId) {
commands.push({
name: globalize.translate('RemoveFromPlaylist'),
id: 'removefromplaylist',
icon: 'remove'
});
}
if (options.collectionId) {
commands.push({
name: globalize.translate('RemoveFromCollection'),
id: 'removefromcollection',
icon: 'remove'
});
}
if (!restrictOptions) {
if (options.share === true) {
if (itemHelper.canShare(item, user)) {
commands.push({
name: globalize.translate('Share'),
id: 'share',
icon: 'share'
});
}
}
}
if (options.sync !== false) {
if (itemHelper.canSync(user, item)) {
commands.push({
name: globalize.translate('Sync'),
id: 'sync',
icon: 'sync'
});
}
}
if (options.openAlbum !== false && item.AlbumId && item.MediaType !== 'Photo') {
commands.push({
name: globalize.translate('ViewAlbum'),
id: 'album',
icon: 'album'
});
}
if (options.openArtist !== false && item.ArtistItems && item.ArtistItems.length) {
commands.push({
name: globalize.translate('ViewArtist'),
id: 'artist',
icon: 'person'
});
}
return commands;
}
function getResolveFunction(resolve, id, changed, deleted) {
return function () {
resolve({
command: id,
updated: changed,
deleted: deleted
});
};
}
function executeCommand(item, id, options) {
var itemId = item.Id;
var serverId = item.ServerId;
var apiClient = connectionManager.getApiClient(serverId);
return new Promise(function (resolve, reject) {
switch (id) {
case 'addtocollection':
require(['collectionEditor'], function (collectionEditor) {
new collectionEditor().show({
items: [itemId],
serverId: serverId
}).then(getResolveFunction(resolve, id, true), getResolveFunction(resolve, id));
});
break;
case 'addtoplaylist':
require(['playlistEditor'], function (playlistEditor) {
new playlistEditor().show({
items: [itemId],
serverId: serverId
}).then(getResolveFunction(resolve, id, true), getResolveFunction(resolve, id));
});
break;
case 'download':
require(['fileDownloader'], function (fileDownloader) {
var downloadHref = apiClient.getItemDownloadUrl(itemId);
fileDownloader.download([{
url: downloadHref,
itemId: itemId,
serverId: serverId,
title: item.Name,
filename: item.Path.replace(/^.*[\\\/]/, '')
}]);
getResolveFunction(getResolveFunction(resolve, id), id)();
});
break;
case 'copy-stream':
var downloadHref = apiClient.getItemDownloadUrl(itemId);
var textAreaCopy = function () {
var textArea = document.createElement('textarea');
textArea.value = downloadHref;
document.body.appendChild(textArea);
textArea.focus();
textArea.select();
if (document.execCommand('copy')) {
require(['toast'], function (toast) {
toast(globalize.translate('CopyStreamURLSuccess'));
});
} else {
prompt(globalize.translate('CopyStreamURL'), downloadHref);
}
document.body.removeChild(textArea);
};
/* eslint-disable-next-line compat/compat */
if (navigator.clipboard === undefined) {
textAreaCopy();
} else {
/* eslint-disable-next-line compat/compat */
navigator.clipboard.writeText(downloadHref).then(function () {
require(['toast'], function (toast) {
toast(globalize.translate('CopyStreamURLSuccess'));
});
}).catch(function () {
textAreaCopy();
});
}
getResolveFunction(resolve, id)();
break;
case 'editsubtitles':
require(['subtitleEditor'], function (subtitleEditor) {
subtitleEditor.show(itemId, serverId).then(getResolveFunction(resolve, id, true), getResolveFunction(resolve, id));
});
break;
case 'edit':
editItem(apiClient, item).then(getResolveFunction(resolve, id, true), getResolveFunction(resolve, id));
break;
case 'editimages':
require(['imageEditor'], function (imageEditor) {
imageEditor.show({
itemId: itemId,
serverId: serverId
}).then(getResolveFunction(resolve, id, true), getResolveFunction(resolve, id));
});
break;
case 'identify':
require(['itemIdentifier'], function (itemIdentifier) {
itemIdentifier.show(itemId, serverId).then(getResolveFunction(resolve, id, true), getResolveFunction(resolve, id));
});
break;
case 'moremediainfo':
require(['itemMediaInfo'], function (itemMediaInfo) {
itemMediaInfo.show(itemId, serverId).then(getResolveFunction(resolve, id, true), getResolveFunction(resolve, id));
});
break;
case 'refresh':
refresh(apiClient, item);
getResolveFunction(resolve, id)();
break;
case 'open':
appRouter.showItem(item);
getResolveFunction(resolve, id)();
break;
case 'play':
play(item, false);
getResolveFunction(resolve, id)();
break;
case 'resume':
play(item, true);
getResolveFunction(resolve, id)();
break;
case 'queue':
play(item, false, true);
getResolveFunction(resolve, id)();
break;
case 'queuenext':
play(item, false, true, true);
getResolveFunction(resolve, id)();
break;
case 'record':
require(['recordingCreator'], function (recordingCreator) {
recordingCreator.show(itemId, serverId).then(getResolveFunction(resolve, id, true), getResolveFunction(resolve, id));
});
break;
case 'shuffle':
playbackManager.shuffle(item);
getResolveFunction(resolve, id)();
break;
case 'instantmix':
playbackManager.instantMix(item);
getResolveFunction(resolve, id)();
break;
case 'delete':
deleteItem(apiClient, item).then(getResolveFunction(resolve, id, true, true), getResolveFunction(resolve, id));
break;
case 'share':
navigator.share({
title: item.Name,
text: item.Overview,
url: `${apiClient.serverAddress()}/web/index.html#!/${appRouter.getRouteUrl(item)}`
});
break;
case 'album':
appRouter.showItem(item.AlbumId, item.ServerId);
getResolveFunction(resolve, id)();
break;
case 'artist':
appRouter.showItem(item.ArtistItems[0].Id, item.ServerId);
getResolveFunction(resolve, id)();
break;
case 'playallfromhere':
getResolveFunction(resolve, id)();
break;
case 'queueallfromhere':
getResolveFunction(resolve, id)();
break;
case 'removefromplaylist':
apiClient.ajax({
url: apiClient.getUrl('Playlists/' + options.playlistId + '/Items', {
EntryIds: [item.PlaylistItemId].join(',')
}),
type: 'DELETE'
}).then(function () {
getResolveFunction(resolve, id, true)();
});
break;
case 'removefromcollection':
apiClient.ajax({
type: 'DELETE',
url: apiClient.getUrl('Collections/' + options.collectionId + '/Items', {
Ids: [item.Id].join(',')
})
}).then(function () {
getResolveFunction(resolve, id, true)();
});
break;
case 'canceltimer':
deleteTimer(apiClient, item, resolve, id);
break;
case 'cancelseriestimer':
deleteSeriesTimer(apiClient, item, resolve, id);
break;
default:
reject();
break;
}
});
}
function deleteTimer(apiClient, item, resolve, command) {
require(['recordingHelper'], function (recordingHelper) {
var timerId = item.TimerId || item.Id;
recordingHelper.cancelTimerWithConfirmation(timerId, item.ServerId).then(function () {
getResolveFunction(resolve, command, true)();
});
});
}
function deleteSeriesTimer(apiClient, item, resolve, command) {
require(['recordingHelper'], function (recordingHelper) {
recordingHelper.cancelSeriesTimerWithConfirmation(item.Id, item.ServerId).then(function () {
getResolveFunction(resolve, command, true)();
});
});
}
function play(item, resume, queue, queueNext) {
var method = queue ? (queueNext ? 'queueNext' : 'queue') : 'play';
var startPosition = 0;
if (resume && item.UserData && item.UserData.PlaybackPositionTicks) {
startPosition = item.UserData.PlaybackPositionTicks;
}
if (item.Type === 'Program') {
playbackManager[method]({
ids: [item.ChannelId],
startPositionTicks: startPosition,
serverId: item.ServerId
});
} else {
playbackManager[method]({
items: [item],
startPositionTicks: startPosition
});
}
}
function editItem(apiClient, item) {
return new Promise(function (resolve, reject) {
var serverId = apiClient.serverInfo().Id;
if (item.Type === 'Timer') {
require(['recordingEditor'], function (recordingEditor) {
recordingEditor.show(item.Id, serverId).then(resolve, reject);
});
} else if (item.Type === 'SeriesTimer') {
require(['seriesRecordingEditor'], function (recordingEditor) {
recordingEditor.show(item.Id, serverId).then(resolve, reject);
});
} else {
require(['metadataEditor'], function (metadataEditor) {
metadataEditor.show(item.Id, serverId).then(resolve, reject);
});
}
});
}
function deleteItem(apiClient, item) {
return new Promise(function (resolve, reject) {
require(['deleteHelper'], function (deleteHelper) {
deleteHelper.deleteItem({
item: item,
navigate: false
}).then(function () {
resolve(true);
}, reject);
});
});
}
function refresh(apiClient, item) {
require(['refreshDialog'], function (refreshDialog) {
new refreshDialog({
itemIds: [item.Id],
serverId: apiClient.serverInfo().Id,
mode: item.Type === 'CollectionFolder' ? 'scan' : null
}).show();
});
}
function show(options) {
var commands = getCommands(options);
if (!commands.length) {
return Promise.reject();
}
return actionsheet.show({
items: commands,
positionTo: options.positionTo,
resolveOnClick: ['share']
}).then(function (id) {
return executeCommand(options.item, id, options);
});
}
return {
getCommands: getCommands,
show: show
};
});
| 1 | 16,134 | I think "View artist" is a bit more standard and expected. Or even "Go to artist" to take the Spotify terminology as-is. | jellyfin-jellyfin-web | js |
@@ -196,6 +196,9 @@ module Bolt
# This works on deeply nested data structures composed of Hashes, Arrays, and
# and plain-old data types (int, string, etc).
def unwrap_sensitive_args(arguments)
+ # Skip this if Puppet isn't loaded
+ return arguments unless defined?(Puppet::Pops::Types::PSensitiveType::Sensitive)
+
case arguments
when Array
# iterate over the array, unwrapping all elements | 1 | # frozen_string_literal: true
require 'logging'
module Bolt
module Transport
# This class provides the default behavior for Transports. A Transport is
# responsible for uploading files and running commands, scripts, and tasks
# on Targets.
#
# Bolt executes work on the Transport in "batches". To do that, it calls
# the batches() method, which is responsible for dividing the list of
# Targets into batches according to how it wants to handle them. It will
# then call Transport#batch_task, or the corresponding method for another
# operation, passing a list of Targets. The Transport returns a list of
# Bolt::Result objects, one per Target. Each batch is executed on a
# separate thread, controlled by the `concurrency` setting, so many batches
# may be running in parallel.
#
# The default batch implementation splits the list of Targets into batches
# of 1. It then calls run_task(), or a corresponding method for other
# operations, passing in the single Target.
#
# Most Transport implementations, like the SSH and WinRM transports, don't
# need to do their own batching, since they only operate on a single Target
# at a time. Those Transports can implement the run_task() and related
# methods, which will automatically handle running many Targets in
# parallel, and will handle publishing start and finish events for each
# Target.
#
# Transports that need their own batching, like the Orch transport, can
# instead override the batches() method to split Targets into sets that can
# be executed together, and override the batch_task() and related methods
# to execute a batch of nodes. In that case, those Transports should accept
# a block argument and call it with a :node_start event for each Target
# before executing, and a :node_result event for each Target after
# execution.
class Base
STDIN_METHODS = %w[both stdin].freeze
ENVIRONMENT_METHODS = %w[both environment].freeze
attr_reader :logger
# Returns options this transport supports
def self.options
raise NotImplementedError, "self.options() must be implemented by the transport class"
end
def self.validate(_options)
raise NotImplementedError, "self.validate() must be implemented by the transport class"
end
def initialize
@logger = Logging.logger[self]
end
def with_events(target, callback)
callback&.call(type: :node_start, target: target)
result = begin
yield
rescue StandardError => ex
Bolt::Result.from_exception(target, ex)
end
callback&.call(type: :node_result, result: result)
result
end
def filter_options(target, options)
if target.options['run-as']
options.reject { |k, _v| k == '_run_as' }
else
options
end
end
# Transform a parameter map to an environment variable map, with parameter names prefixed
# with 'PT_' and values transformed to JSON unless they're strings.
def envify_params(params)
params.each_with_object({}) do |(k, v), h|
v = v.to_json unless v.is_a?(String)
h["PT_#{k}"] = v
end
end
# Raises an error if more than one target was given in the batch.
#
# The default implementations of batch_* strictly assume the transport is
# using the default batch size of 1. This method ensures that is the
# case and raises an error if it's not.
def assert_batch_size_one(method, targets)
if targets.length > 1
message = "#{self.class.name} must implement #{method} to support batches (got #{targets.length} nodes)"
raise NotImplementedError, message
end
end
# Runs the given task on a batch of nodes.
#
# The default implementation only supports batches of size 1 and will fail otherwise.
#
# Transports may override this method to implement their own batch processing.
def batch_task(targets, task, arguments, options = {}, &callback)
assert_batch_size_one("batch_task()", targets)
target = targets.first
with_events(target, callback) do
@logger.debug { "Running task run '#{task}' on #{target.uri}" }
run_task(target, task, arguments, filter_options(target, options))
end
end
# Runs the given command on a batch of nodes.
#
# The default implementation only supports batches of size 1 and will fail otherwise.
#
# Transports may override this method to implement their own batch processing.
def batch_command(targets, command, options = {}, &callback)
assert_batch_size_one("batch_command()", targets)
target = targets.first
with_events(target, callback) do
@logger.debug("Running command '#{command}' on #{target.uri}")
run_command(target, command, filter_options(target, options))
end
end
# Runs the given script on a batch of nodes.
#
# The default implementation only supports batches of size 1 and will fail otherwise.
#
# Transports may override this method to implement their own batch processing.
def batch_script(targets, script, arguments, options = {}, &callback)
assert_batch_size_one("batch_script()", targets)
target = targets.first
with_events(target, callback) do
@logger.debug { "Running script '#{script}' on #{target.uri}" }
run_script(target, script, arguments, filter_options(target, options))
end
end
# Uploads the given source file to the destination location on a batch of nodes.
#
# The default implementation only supports batches of size 1 and will fail otherwise.
#
# Transports may override this method to implement their own batch processing.
def batch_upload(targets, source, destination, options = {}, &callback)
assert_batch_size_one("batch_upload()", targets)
target = targets.first
with_events(target, callback) do
@logger.debug { "Uploading: '#{source}' to #{destination} on #{target.uri}" }
upload(target, source, destination, filter_options(target, options))
end
end
# Split the given list of targets into a list of batches. The default
# implementation returns single-node batches.
#
# Transports may override this method, and the corresponding batch_*
# methods, to implement their own batch processing.
def batches(targets)
targets.map { |target| [target] }
end
def from_api?(task)
if task.respond_to? :file
unless task.file.nil?
return true
end
end
false
end
# Transports should override this method with their own implementation of running a command.
def run_command(*_args)
raise NotImplementedError, "run_command() must be implemented by the transport class"
end
# Transports should override this method with their own implementation of running a script.
def run_script(*_args)
raise NotImplementedError, "run_script() must be implemented by the transport class"
end
# Transports should override this method with their own implementation of running a task.
def run_task(*_args)
raise NotImplementedError, "run_task() must be implemented by the transport class"
end
# Transports should override this method with their own implementation of file upload.
def upload(*_args)
raise NotImplementedError, "upload() must be implemented by the transport class"
end
# Unwraps any Sensitive data in an arguments Hash, so the plain-text is passed
# to the Task/Script.
#
# This works on deeply nested data structures composed of Hashes, Arrays, and
# and plain-old data types (int, string, etc).
def unwrap_sensitive_args(arguments)
case arguments
when Array
# iterate over the array, unwrapping all elements
arguments.map { |x| unwrap_sensitive_args(x) }
when Hash
# iterate over the arguments hash and unwrap all keys and values
arguments.each_with_object({}) { |(k, v), h|
h[unwrap_sensitive_args(k)] = unwrap_sensitive_args(v)
}
when Puppet::Pops::Types::PSensitiveType::Sensitive
# this value is Sensitive, unwrap it
unwrap_sensitive_args(arguments.unwrap)
else
# unknown data type, just return it
arguments
end
end
end
end
end
| 1 | 9,440 | It might make sense to refactor this later so that we use a Bolt-native type to hide sensitive values. | puppetlabs-bolt | rb |
@@ -30,3 +30,7 @@ from extensive_tm_test_base import ExtensiveTemporalMemoryTest
class ExtensiveTemporalMemoryTestCPP(ExtensiveTemporalMemoryTest, unittest.TestCase):
def getTMClass(self):
return nupic.bindings.algorithms.TemporalMemory
+
+
+if __name__ == "__main__":
+ unittest.main() | 1 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
import nupic.bindings.algorithms
from extensive_tm_test_base import ExtensiveTemporalMemoryTest
class ExtensiveTemporalMemoryTestCPP(ExtensiveTemporalMemoryTest, unittest.TestCase):
def getTMClass(self):
return nupic.bindings.algorithms.TemporalMemory
| 1 | 21,381 | We were trying to move away from this to force people to always run tests under py.test. | numenta-nupic | py |
@@ -933,6 +933,8 @@ from selenium.webdriver.common.keys import Keys
browser = self._check_platform()
+ body.append(self._get_selenium_options())
+
if browser == 'firefox':
body.extend(self._get_firefox_options() + self._get_firefox_profile() + [self._get_firefox_webdriver()])
| 1 | """
Copyright 2018 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ast
import math
import re
import string
from collections import OrderedDict
from urllib import parse
import astunparse
from bzt import TaurusConfigError, TaurusInternalException
from bzt.engine import Scenario
from bzt.requests_model import HTTPRequest, HierarchicRequestParser, TransactionBlock, \
SetVariables, IncludeScenarioBlock
from bzt.utils import iteritems, dehumanize_time, ensure_is_dict
from .ast_helpers import ast_attr, ast_call, gen_empty_line_stmt, gen_store, gen_subscript
from .jmeter_functions import JMeterExprCompiler
def normalize_class_name(text):
allowed_chars = "%s%s%s" % (string.digits, string.ascii_letters, '_')
split_separator = re.split(r'[\-_]', text)
return ''.join([capitalize_class_name(part, allowed_chars) for part in split_separator])
def capitalize_class_name(text, allowed_chars):
return filter_string(text, allowed_chars).capitalize()
def filter_string(text, allowed_chars):
return ''.join(c for c in text if c in allowed_chars)
def normalize_method_name(text):
allowed_chars = "%s%s%s" % (string.digits, string.ascii_letters, '- ')
return filter_string(text, allowed_chars).replace(' ', '_').replace('-', '_')
def create_class_name(label):
return 'TestAPI' if label.startswith('autogenerated') else 'Test%s' % normalize_class_name(label)
def create_method_name(label):
return 'test_requests' if label.startswith('autogenerated') else normalize_method_name(label)
class ApiritifScriptGenerator(object):
BYS = {
'xpath': "XPATH",
'css': "CSS_SELECTOR",
'name': "NAME",
'id': "ID",
'linktext': "LINK_TEXT"
}
TO_BYS = {
'byxpath': "xpath",
'bycss': "css",
'byname': "name",
'byid': "id",
'bylinktext': "linktext",
'byelement': "byelement",
'byshadow': "shadow"
}
ACTION_CHAINS = {
'doubleclick': "double_click",
'contextclick': "context_click",
'mousedown': "click_and_hold",
'mouseup': "release",
'mousemove': "move_to_element",
'mouseover': "move_to_element",
'mouseout': "move_to_element_with_offset"
}
ACTIONS = "|".join(['click', 'doubleClick', 'contextClick', 'mouseDown', 'mouseUp', 'mouseMove', 'mouseOut',
'mouseOver', 'select', 'wait', 'keys', 'pauseFor', 'clear', 'assert',
'assertText', 'assertValue', 'assertDialog', 'answerDialog', 'submit',
'close', 'script', 'editcontent',
'switch', 'switchFrame', 'go', 'echo', 'type', 'element', 'drag',
'storeText', 'storeValue', 'store', 'open', 'screenshot', 'rawCode',
'resize', 'maximize', 'alert', 'waitFor'
])
ACTIONS_WITH_WAITER = ['go', 'click', 'doubleclick', 'contextclick', 'drag', 'select', 'type', 'script']
EXECUTION_BLOCKS = "|".join(['if', 'loop', 'foreach'])
# Python AST docs: https://greentreesnakes.readthedocs.io/en/latest/
IMPORTS = """import os
import re
from %s import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as econd
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
"""
BY_TAGS = ("byName", "byID", "byCSS", "byXPath", "byLinkText", "byElement", "byShadow")
COMMON_TAGS = ("Cookies", "Title", "Window", "Eval", "ByIdx", "String")
EXTENDED_LOG_TAG = ("logStart", "logEnd", "log")
ACCESS_TARGET = 'target'
ACCESS_PLAIN = 'plain'
SUPPORTED_BLOCKS = (HTTPRequest, TransactionBlock, SetVariables, IncludeScenarioBlock)
def __init__(self, scenario, label, wdlog=None, executor=None,
ignore_unknown_actions=False, generate_markers=None,
capabilities=None, wd_addr=None, test_mode="selenium", generate_external_logging=None):
self.scenario = scenario
self.selenium_extras = set()
self.data_sources = list(scenario.get_data_sources())
self.executor = executor
self.label = label
self.log = self.scenario.engine.log.getChild(self.__class__.__name__)
self.tree = None
self.verbose = False
self.expr_compiler = JMeterExprCompiler(parent_log=self.log)
self.service_methods = []
self.remote_address = wd_addr
self.capabilities = capabilities or {}
self.window_size = None
self.wdlog = wdlog
self.browser = None
self.appium = False
self.ignore_unknown_actions = ignore_unknown_actions
self.generate_markers = generate_markers
self.generate_external_logging = generate_external_logging
self.test_mode = test_mode
def _parse_action_params(self, expr, name):
res = expr.match(name)
if not res:
msg = "Unsupported action: %s" % name
if self.ignore_unknown_actions:
self.log.warning(msg)
return
else:
raise TaurusConfigError(msg)
atype = res.group(1).lower()
tag = res.group(2).lower() if res.group(2) else ""
selector = None
if len(res.groups()) > 3:
selector = res.group(4)
return atype, tag, selector
@staticmethod
def _trim_quotes(selector):
if selector.startswith('"') and selector.endswith('"'):
selector = selector[1:-1]
elif selector.startswith("'") and selector.endswith("'"):
selector = selector[1:-1]
return selector
def _parse_string_action(self, name, param):
tags = "|".join(self.BY_TAGS + self.COMMON_TAGS)
all_actions = self.ACTIONS + "|" + "|".join(self.EXTENDED_LOG_TAG) + "|" + self.EXECUTION_BLOCKS
expr = re.compile(r"^(%s)(%s)?(\(([\S\s]*)\))?$" % (all_actions, tags), re.IGNORECASE)
atype, tag, selector = self._parse_action_params(expr, name)
value = None
selectors = []
if selector:
selector = self._trim_quotes(selector)
else:
selector = ""
# Need to shuffle the variables to get the same output for both of the versions of
# action types, this is unfortunately cumbersome as the param/value can be on different
# places:
# action_name(selector): param
# action_name(param)
# action_name(value): param, e.g. storeString(value): var_name
if selector:
if tag in self.TO_BYS.keys():
tag_name = self.TO_BYS[tag]
selectors = [{tag_name: selector}]
elif param is None:
param = selector
else:
value = selector
if atype == "drag":
# param should be e.g. elementByXPath(/xpath)
element_action = self._parse_action(param)
selectors = (selectors, element_action[4])
elif atype == "switchframe":
# for switchFrameByName we need to get the param
param = selector
elif atype == "waitfor":
value = param
args = selector.rsplit(",", 1)
if len(args) != 2:
raise TaurusConfigError("Incorrect amount of arguments (%s) for waitFor (2 expected)." % len(args))
param = args[1].strip()
selectors = [{self.TO_BYS[tag]: self._trim_quotes(args[0].strip())}]
elif atype in ['answerdialog', 'assertdialog']:
param, value = value, param
return atype, tag, param, value, selectors
def _parse_dict_action(self, action_config):
name = action_config["type"]
selectors = []
if action_config.get("locators"):
selectors = action_config.get("locators")
if action_config.get("element"):
selectors.extend(self._gen_selector_byelement(action_config))
if action_config.get("shadow"):
selectors = [{"shadow" : action_config.get("shadow")}]
if action_config.get("source") and action_config.get("target"):
source = action_config.get("source")
target = action_config.get("target")
if self._is_foreach_element(source):
source = self._gen_selector_byelement(source[0])
if self._is_foreach_element(target):
target = self._gen_selector_byelement(target[0])
selectors = (source, target)
param = action_config["param"]
value = action_config["value"]
tags = "|".join(self.COMMON_TAGS) + "|ByName" # ByName is needed in switchFrameByName
expr = re.compile("^(%s)(%s)?$" % (self.ACTIONS, tags), re.IGNORECASE)
action_params = self._parse_action_params(expr, name)
return action_params[0], action_params[1], param, value, selectors
@staticmethod
def _gen_selector_byelement(config):
return [{"byelement": config.get("element")}]
def _parse_action(self, action_config):
if isinstance(action_config, str):
name = action_config
param = None
elif isinstance(action_config, dict):
if action_config.get("type"):
return self._parse_dict_action(action_config)
block = self._get_execution_block(action_config)
if len(block) == 1:
name, param = (block[0], action_config.get(block[0]))
else:
name, param = next(iteritems(action_config))
else:
raise TaurusConfigError("Unsupported value for action: %s" % action_config)
return self._parse_string_action(name, param)
def _get_execution_block(self, action_config):
# get the list of execution blocks in this action if there are any or empty list
return list(set(action_config.keys()).intersection(self.EXECUTION_BLOCKS.split("|")))
@staticmethod
def _is_foreach_element(locators):
# action performed in foreach loop on element
return len(locators) == 1 and (locators[0].get("byelement") or locators[0].get("element"))
@staticmethod
def _is_shadow_locator(locators):
return len(locators) == 1 and locators[0].get("shadow")
def _gen_dynamic_locator(self, var_w_locator, locators):
if self._is_foreach_element(locators):
return ast.Name(id=locators[0].get("byelement"))
el = self._get_byelement(locators)
target = el if el else "self.driver"
method = "%s.find_element" % target
if self._is_shadow_locator(locators):
self.selenium_extras.add("find_element_by_shadow")
return ast_call(
func=ast_attr("find_element_by_shadow"),
args=[
ast.Str(locators[0].get("shadow"), kind="")
]
)
return ast_call(
func=ast_attr(method),
args=[
gen_subscript(var_w_locator, 0),
gen_subscript(var_w_locator, 1)
])
def _gen_ast_locators_dict(self, locators):
args = []
for loc in locators:
locator_type = list(loc.keys())[0]
locator_value = loc[locator_type]
args.append(ast.Dict([ast.Str(locator_type, kind="")], [self._gen_expr(locator_value)]))
return args
def _gen_loc_method_call(self, method, var_name, locators, parent_el=None):
args = [ast.List(elts=self._gen_ast_locators_dict(locators))]
if parent_el:
args.append(ast.Name(id=parent_el))
return ast.Assign(
targets=[ast.Name(id=var_name, ctx=ast.Store(), kind="")],
value=ast_call(func=method,
args=args))
def _gen_get_locator_call(self, var_name, locators):
# don't generate 'get_locator' for byElement action or shadow locator
if self._is_foreach_element(locators) or self._is_shadow_locator(locators):
return []
parent_el = self._get_byelement(locators)
locs = [l for l in locators if not l.get("byelement")] # remove the byelement locator from the list
return self._gen_loc_method_call("get_locator", var_name, locs, parent_el)
def _get_byelement(self, locators):
for loc in locators:
el = loc.get("byelement")
if el:
return el
return None
def _gen_get_elements_call(self, var_name, locators):
return self._gen_loc_method_call("get_elements", var_name, locators)
def _gen_locator(self, tag, selector):
return ast_call(
func=ast_attr("self.driver.find_element"),
args=[
ast_attr("By.%s" % self.BYS[tag]),
self._gen_expr(selector)])
def _gen_window_mngr(self, atype, param):
elements = []
if atype == "switch":
method = "switch_window"
self.selenium_extras.add(method)
elements.append(ast_call(
func=ast_attr(method),
args=[self._gen_expr(param)]))
elif atype == "resize":
if not re.compile(r"\d+,\d+").match(param):
if re.compile(r"\d+, \d+").match(param):
param = param.replace(', ', ',')
else:
return elements
x, y = param.split(",")
elements.append(ast_call(
func=ast_attr("self.driver.set_window_size"),
args=[self._gen_expr(x), self._gen_expr(y)]))
elif atype == "maximize":
args = []
elements.append(ast_call(
func=ast_attr("self.driver.maximize_window"),
args=args))
elif atype == "open":
method = "open_window"
self.selenium_extras.add(method)
elements.append(ast_call(
func=ast_attr(method),
args=[ast.Str(param, kind="")]
))
elif atype == "close":
method = "close_window"
self.selenium_extras.add(method)
args = []
if param:
args.append(self._gen_expr(param))
elements.append(ast_call(
func=ast_attr(method),
args=args))
return elements
def _gen_frame_mngr(self, tag, selector):
method = "switch_frame"
self.selenium_extras.add(method)
elements = [] # todo: byid/byidx disambiguation?
if tag == "byidx" or selector.startswith("index=") or selector in ["relative=top", "relative=parent"]:
if tag == "byidx":
selector = "index=%s" % selector
elements.append(ast_call(
func=ast_attr(method),
args=[ast.Str(selector, kind="")]))
else:
if not tag:
tag = "name" # if tag is not present default it to name
elif tag.startswith('by'):
tag = tag[2:] # remove the 'by' prefix
elements.append(ast_call(
func=ast_attr(method),
args=[self._gen_locator(tag, selector)]))
return elements
def _gen_chain_mngr(self, atype, selectors):
elements = []
if atype in self.ACTION_CHAINS:
elements.append(self._gen_get_locator_call("var_loc_chain", selectors))
locator = self._gen_dynamic_locator("var_loc_chain", selectors)
operator = ast_attr(fields=(
ast_call(func="ActionChains", args=[ast_attr("self.driver")]),
self.ACTION_CHAINS[atype.lower()]))
args = [locator, ast.Num(-10, kind=""), ast.Num(-10, kind="")] if atype == "mouseout" else [locator]
elements.append(ast_call(
func=ast_attr(
fields=(
ast_call(
func=operator,
args=args),
"perform"))))
elif atype == "drag":
if not selectors or not selectors[0]:
raise TaurusConfigError("Can not generate action for 'drag'. Source is empty.")
if not selectors[1]:
raise TaurusConfigError("Can not generate action for 'drag'. Target is empty.")
source = selectors[0]
target = selectors[1]
elements = [self._gen_get_locator_call("source", source),
self._gen_get_locator_call("target", target)]
operator = ast_attr(
fields=(
ast_call(
func="ActionChains",
args=[ast_attr("self.driver")]),
"drag_and_drop"))
elements.append(ast_call(
func=ast_attr(
fields=(
ast_call(
func=operator,
args=[self._gen_dynamic_locator("source", source),
self._gen_dynamic_locator("target", target)]),
"perform"))))
return elements
def _gen_assert_store_mngr(self, atype, tag, name, value, selectors):
elements = []
if not name:
raise TaurusConfigError("Missing param for %s action." % atype)
if tag == 'title':
if atype.startswith('assert'):
elements.append(ast_call(
func=ast_attr("self.assertEqual"),
args=[ast_attr("self.driver.title"), self._gen_expr(name)]))
else:
elements.append(gen_store(
name=name.strip(),
value=self._gen_expr(ast_attr("self.driver.title"))))
elif atype == 'store' and tag == 'string':
elements.append(gen_store(
name=name.strip(),
value=self._gen_expr(value.strip())))
elif atype == 'assert' and tag == 'eval':
elements.append(ast_call(
func=ast_attr("self.assertTrue"),
args=[self._gen_eval_js_expression(name), ast.Str(name, kind="")]))
elif atype == 'store' and tag == 'eval':
elements.append(
gen_store(
name=name.strip(),
value=self._gen_eval_js_expression(value))
)
else:
target = None
if atype in ["asserttext", "storetext"]:
target = "innerText"
elif atype in ["assertvalue", "storevalue"]:
target = "value"
if target:
elements.append(self._gen_get_locator_call("var_loc_as", selectors))
locator_attr = ast_call(
func=ast_attr(
fields=(
self._gen_dynamic_locator("var_loc_as", selectors),
"get_attribute")),
args=[ast.Str(target, kind="")])
if atype.startswith("assert"):
elements.append(ast_call(
func=ast_attr(fields="self.assertEqual"),
args=[
ast_call(
func=ast_attr(
fields=(
self._gen_expr(locator_attr),
"strip"))),
ast_call(
func=ast_attr(
fields=(
self._gen_expr(name),
"strip")))]))
elif atype.startswith('store'):
elements.append(gen_store(
name=name.strip(),
value=self._gen_expr(locator_attr)))
return elements
def _gen_keys_mngr(self, atype, param, selectors):
elements = []
args = []
action = None
elements.append(self._gen_get_locator_call("var_loc_keys", selectors))
if atype == "click":
action = "click"
elif atype == "submit":
action = "submit"
elif atype in ["keys", "type"]:
if atype == "type":
elements.append(ast_call(
func=ast_attr(
fields=(
self._gen_dynamic_locator("var_loc_keys", selectors),
"clear"))))
action = "send_keys"
if isinstance(param, str) and param.startswith("KEY_"):
args = [ast_attr("Keys.%s" % param.split("KEY_")[1])]
else:
args = [self._gen_expr(str(param))]
if action:
elements.append(ast_call(
func=ast_attr(
fields=(
self._gen_dynamic_locator("var_loc_keys", selectors),
action)),
args=args))
return elements
def _gen_edit_mngr(self, param, locators):
if not param:
raise TaurusConfigError("Missing param for editContent action.")
var_name = "var_edit_content"
elements = [self._gen_get_locator_call(var_name, locators)]
locator = self._gen_dynamic_locator(var_name, locators)
tag = gen_subscript(var_name, 0)
selector = gen_subscript(var_name, 1)
if self._is_foreach_element(locators):
el = locators[0].get("byelement")
exc_msg = "The element '%s' (tag name: '%s', text: '%s') is not a contenteditable element"
exc_args = [ast.Str(el, kind=""), ast_attr(el + ".tag_name"), ast_attr(el + ".text")]
elif self._is_shadow_locator(locators):
el = locators[0].get("shadow")
exc_msg = "The element (shadow: '%s') is not a contenteditable element"
exc_args = [ast.Str(el, kind="")]
else:
exc_msg = "The element (%s: %r) is not a contenteditable element"
exc_args = [tag, selector]
exc_type = ast_call(
func="NoSuchElementException",
args=[
ast.BinOp(
left=ast.Str(exc_msg, kind=""),
op=ast.Mod(),
right=ast.Tuple(elts=exc_args))
]
)
raise_kwargs = {
"exc": exc_type,
"cause": None}
body = ast.Expr(ast_call(func=ast_attr("self.driver.execute_script"),
args=[
ast.BinOp(
left=ast.Str("arguments[0].innerHTML = '%s';", kind=""),
op=ast.Mod(),
right=self._gen_expr(param.strip())),
locator]))
element = ast.If(
test=ast_call(
func=ast_attr(
fields=(locator, "get_attribute")),
args=[ast.Str("contenteditable", kind="")]),
body=[body],
orelse=[ast.Raise(**raise_kwargs)])
elements.append(element)
return elements
def _gen_screenshot_mngr(self, param):
elements = []
if param:
elements.append(ast_call(
func=ast_attr("self.driver.save_screenshot"),
args=[self._gen_expr(param)]))
else:
elements.append(ast.Assign(
targets=[ast.Name(id="filename")],
value=ast_call(
func=ast_attr("os.path.join"),
args=[
ast_call(
func=ast_attr("os.getenv"),
args=[ast.Str('TAURUS_ARTIFACTS_DIR', kind="")]),
ast.BinOp(
left=ast.Str('screenshot-%d.png', kind=""),
op=ast.Mod(),
right=ast.BinOp(
left=ast_call(func="time"),
op=ast.Mult(),
right=ast.Num(1000, kind="")))])))
elements.append(ast_call(
func=ast_attr("self.driver.save_screenshot"),
args=[ast.Name(id="filename")]))
return elements
def _gen_alert(self, param):
elements = []
switch, args = "self.driver.switch_to.alert.", []
if param == "OK":
elements.append(ast_call(
func=ast_attr(switch + "accept"),
args=args))
elif param == "Dismiss":
elements.append(ast_call(
func=ast_attr(switch + "dismiss"),
args=args))
return elements
def _gen_sleep_mngr(self, param):
elements = [ast_call(
func="sleep",
args=[ast.Num(dehumanize_time(param), kind="")])]
return elements
def _gen_select_mngr(self, param, selectors):
elements = [self._gen_get_locator_call("var_loc_select", selectors), ast_call(
func=ast_attr(
fields=(
ast_call(func="Select", args=[self._gen_dynamic_locator("var_loc_select", selectors)]),
"select_by_visible_text")),
args=[self._gen_expr(param)])]
return elements
def _gen_action(self, action_config):
action = self._parse_action(action_config)
if action:
atype, tag, param, value, selectors = action
else:
atype = tag = param = value = selectors = None
action_elements = []
if atype == "log":
action_elements.append(
ast_call(func=ast_attr("apiritif.external_log"), args=[self._gen_expr(param.strip())]))
elif tag == "window":
action_elements.extend(self._gen_window_mngr(atype, param))
elif atype == "switchframe":
action_elements.extend(self._gen_frame_mngr(tag, param))
elif atype in self.ACTION_CHAINS or atype == "drag":
action_elements.extend(self._gen_chain_mngr(atype, selectors))
elif atype == "select":
action_elements.extend(self._gen_select_mngr(param, selectors))
elif atype == 'assertdialog':
action_elements.extend(self._gen_assert_dialog(param, value))
elif atype == 'answerdialog':
action_elements.extend(self._gen_answer_dialog(param, value))
elif atype is not None and (atype.startswith("assert") or atype.startswith("store")):
action_elements.extend(self._gen_assert_store_mngr(atype, tag, param, value, selectors))
elif atype in ("click", "type", "keys", "submit"):
action_elements.extend(self._gen_keys_mngr(atype, param, selectors))
elif atype == 'echo' and tag == 'string':
if len(param) > 0 and not selectors:
action_elements.append(ast_call(
func="print",
args=[self._gen_expr(param.strip())]))
elif atype == "script" and tag == "eval":
escaped_param = self._escape_js_blocks(param)
action_elements.append(ast_call(func=ast_attr("self.driver.execute_script"),
args=[self._gen_expr(escaped_param)]))
elif atype == "rawcode":
action_elements.append(ast.parse(param))
elif atype == 'go':
if param:
action_elements.append(ast_call(func=ast_attr("self.driver.get"),
args=[self._gen_expr(param.strip())]))
action_elements.append(self._gen_replace_dialogs())
elif atype == "editcontent":
action_elements.extend(self._gen_edit_mngr(param, selectors))
elif atype.startswith('wait'):
action_elements.extend(self._gen_wait_for(atype, param, value, selectors))
elif atype == 'pausefor':
action_elements.extend(self._gen_sleep_mngr(param))
elif atype == 'clear' and tag == 'cookies':
action_elements.append(ast_call(
func=ast_attr("self.driver.delete_all_cookies")))
elif atype == 'screenshot':
action_elements.extend(self._gen_screenshot_mngr(param))
elif atype == 'alert':
action_elements.extend(self._gen_alert(param))
elif atype == 'if':
action_elements.append(self._gen_condition_mngr(param, action_config))
elif atype == 'loop':
action_elements.append(self._gen_loop_mngr(action_config))
elif atype == 'foreach':
action_elements.append(self._gen_foreach_mngr(action_config))
if not action_elements and not self.ignore_unknown_actions:
raise TaurusInternalException("Could not build code for action: %s" % action_config)
if atype.lower() in self.ACTIONS_WITH_WAITER:
action_elements.append(ast_call(func=ast_attr("waiter"), args=[]))
return [ast.Expr(element) for element in action_elements]
def _gen_foreach_mngr(self, action_config):
self.selenium_extras.add("get_elements")
exc = TaurusConfigError("Foreach loop must contain locators and do")
elements = []
locators = action_config.get('locators', exc)
body = []
for action in action_config.get('do', exc):
body = body + self._gen_action(action)
body_list = []
# filter out empty AST expressions that cause empty lines in the generated code
for item in body:
if isinstance(item.value, list):
if len(item.value) > 0:
body_list.append(item)
else:
body_list.append(item)
elements.append(self._gen_get_elements_call("elements", locators))
elements.append(
ast.For(target=ast.Name(id=action_config.get('foreach'), ctx=ast.Store()), iter=ast.Name(id="elements"),
body=body_list,
orelse=[]))
return elements
def _gen_wait_for(self, atype, param, value, selectors):
self.selenium_extras.add("wait_for")
supported_conds = ["present", "visible", "clickable", "notpresent", "notvisible", "notclickable"]
if not atype.endswith("for"):
self.log.warning("Wait command is deprecated and will be removed soon. Use waitFor instead.")
exc = TaurusConfigError("wait action requires timeout in scenario: \n%s" % self.scenario)
timeout = dehumanize_time(self.scenario.get("timeout", exc))
if not param:
param = "present"
else:
if not value:
value = 10 # if timeout value is not present set it by default to 10s
timeout = dehumanize_time(value)
if param.lower() not in supported_conds:
raise TaurusConfigError("Invalid condition in %s: '%s'. Supported conditions are: %s." %
(atype, param, ", ".join(supported_conds)))
return [ast_call(func="wait_for",
args=[ast.Str(param, kind=""),
ast.List(elts=self._gen_ast_locators_dict(selectors)),
ast.Num(timeout, kind="")])]
def _gen_answer_dialog(self, type, value):
if type not in ['alert', 'prompt', 'confirm']:
raise TaurusConfigError("answerDialog type must be one of the following: 'alert', 'prompt' or 'confirm'")
if type == 'confirm' and str(value).lower() not in ['#ok', '#cancel']:
raise TaurusConfigError("answerDialog of type confirm must have value either '#Ok' or '#Cancel'")
if type == 'alert' and str(value).lower() != '#ok':
raise TaurusConfigError("answerDialog of type alert must have value '#Ok'")
dlg_method = "dialogs_answer_on_next_%s" % type
self.selenium_extras.add(dlg_method)
return [ast_call(func=ast_attr(dlg_method), args=[ast.Str(value, kind="")])]
def _gen_assert_dialog(self, type, value):
if type not in ['alert', 'prompt', 'confirm']:
raise TaurusConfigError("assertDialog type must be one of the following: 'alert', 'prompt' or 'confirm'")
elements = []
dlg_method = "dialogs_get_next_%s" % type
self.selenium_extras.add(dlg_method)
elements.append(ast.Assign(targets=[ast.Name(id='dialog', ctx=ast.Store())],
value=ast_call(
func=ast_attr(dlg_method))))
elements.append(ast_call(
func=ast_attr("self.assertIsNotNone"),
args=[ast.Name(id='dialog'), ast.Str("No dialog of type %s appeared" % type, kind="")]))
elements.append(ast_call(
func=ast_attr("self.assertEqual"),
args=[ast.Name(id='dialog'), ast.Str(value, kind=""), ast.Str("Dialog message didn't match", kind="")]))
return elements
def _gen_replace_dialogs(self):
"""
Generates the call to DialogsManager to replace dialogs
"""
method = "dialogs_replace"
self.selenium_extras.add(method)
return [
gen_empty_line_stmt(),
ast_call(
func=ast_attr(method))
]
@staticmethod
def _convert_to_number(arg):
if isinstance(arg, str) and arg.isdigit():
return int(arg)
return arg
def _gen_loop_mngr(self, action_config):
extra_method = "get_loop_range"
self.selenium_extras.add(extra_method)
exc = TaurusConfigError("Loop must contain start, end and do")
start = self._convert_to_number(action_config.get('start', exc))
end = self._convert_to_number(action_config.get('end', exc))
step = self._convert_to_number(action_config.get('step')) or 1
elements = []
body = [
ast.Assign(
targets=[self._gen_expr("${%s}" % action_config['loop'])],
value=ast_call(func=ast_attr("str"), args=[ast.Name(id=action_config['loop'])]))
]
actions = action_config.get('do', exc)
if len(actions) == 0:
raise exc
for action in actions:
body.append(self._gen_action(action))
range_args = [self.expr_compiler.gen_expr(start),
self.expr_compiler.gen_expr(end),
self.expr_compiler.gen_expr(step)]
elements.append(
ast.For(target=ast.Name(id=action_config.get('loop'),
ctx=ast.Store()),
iter=ast_call(func=ast_attr(extra_method),
args=range_args),
body=body,
orelse=[]))
return elements
def _gen_eval_js_expression(self, js_expr):
return ast_call(func=ast_attr("self.driver.execute_script"), args=[self._gen_expr("return %s;" % js_expr)])
def _gen_condition_mngr(self, param, action_config):
if not action_config.get('then'):
raise TaurusConfigError("Missing then branch in if statement")
test = ast.Assign(targets=[ast.Name(id='test', ctx=ast.Store())],
value=self._gen_eval_js_expression(param))
body = []
for action in action_config.get('then'):
body.append(self._gen_action(action))
orelse = []
if action_config.get('else'):
for action in action_config.get('else'):
orelse.append(self._gen_action(action))
return [test,
[ast.If(
test=[ast.Name(id='test')],
body=body,
orelse=orelse)]]
def _check_platform(self):
mobile_browsers = ["chrome", "safari"]
mobile_platforms = ["android", "ios"]
browser = self.capabilities.get("browserName", "")
browser = self.scenario.get("browser", browser)
browser = browser.lower() # todo: whether we should take browser as is? (without lower case)
local_browsers = ["firefox", "chrome", "ie", "opera"] + mobile_browsers
browser_platform = None
if browser:
browser_split = browser.split("-")
browser = browser_split[0]
if len(browser_split) > 1:
browser_platform = browser_split[1]
if self.remote_address:
if browser and browser != "remote":
msg = "Forcing browser to Remote, because of remote WebDriver address, use '%s' as browserName"
self.log.warning(msg % browser)
self.capabilities["browserName"] = browser
browser = "remote"
if self.generate_markers is None: # if not set by user - set to true
self.generate_markers = True
elif browser in mobile_browsers and browser_platform in mobile_platforms:
self.appium = True
self.remote_address = "http://localhost:4723/wd/hub"
self.capabilities["platformName"] = browser_platform
self.capabilities["browserName"] = browser
browser = "remote" # Force to use remote web driver
elif not browser:
browser = "firefox"
elif browser not in local_browsers: # browser isn't supported
raise TaurusConfigError("Unsupported browser name: %s" % browser)
return browser
def _get_scenario_timeout(self):
return dehumanize_time(self.scenario.get("timeout", "30s"))
def _gen_webdriver(self):
self.log.debug("Generating setUp test method")
body = [ast.Assign(targets=[ast_attr("self.driver")], value=ast_attr("None"))]
browser = self._check_platform()
if browser == 'firefox':
body.extend(self._get_firefox_options() + self._get_firefox_profile() + [self._get_firefox_webdriver()])
elif browser == 'chrome':
body.extend(self._get_chrome_options() + [self._get_chrome_webdriver()])
elif browser == 'remote':
if 'firefox' == self.capabilities.get('browserName'):
body.append(self._get_firefox_options())
else:
empty_options = ast.Assign(targets=[ast_attr("options")], value=ast_attr("None"))
body.append(empty_options)
body.append(self._get_remote_webdriver())
else:
body.append(ast.Assign(
targets=[ast_attr("self.driver")],
value=ast_call(
func=ast_attr("webdriver.%s" % browser)))) # todo bring 'browser' to correct case
body.append(self._get_timeout())
body.extend(self._get_extra_mngrs())
return body
def _get_timeout(self):
return ast.Expr(
ast_call(
func=ast_attr("self.driver.implicitly_wait"),
args=[ast_attr("timeout")]))
def _get_extra_mngrs(self):
mngrs = []
mgr = "WindowManager"
if mgr in self.selenium_extras:
mngrs.append(ast.Assign(
targets=[ast_attr("self.wnd_mng")],
value=ast_call(
func=ast.Name(id=mgr))))
mgr = "FrameManager"
if mgr in self.selenium_extras:
mngrs.append(ast.Assign(
targets=[ast_attr("self.frm_mng")],
value=ast_call(
func=ast.Name(id=mgr))))
return mngrs
def _get_headless_setup(self):
if self.scenario.get("headless", False):
self.log.info("Headless mode works only with Selenium 3.8.0+, be sure to have it installed")
return [ast.Expr(
ast_call(func=ast_attr("options.set_headless")))]
else:
return []
def _get_firefox_options(self):
firefox_options = [
ast.Assign(
targets=[ast.Name(id="options")],
value=ast_call(
func=ast_attr("webdriver.FirefoxOptions")))]
return firefox_options + self._get_headless_setup()
def _get_chrome_options(self):
chrome_options = [
ast.Assign(
targets=[ast.Name(id="options")],
value=ast_call(
func=ast_attr("webdriver.ChromeOptions"))),
ast.Expr(
ast_call(
func=ast_attr("options.add_argument"),
args=[ast.Str("%s" % "--no-sandbox", kind="")])),
ast.Expr(
ast_call(
func=ast_attr("options.add_argument"),
args=[ast.Str("%s" % "--disable-dev-shm-usage", kind="")])),
ast.Expr(
ast_call(
func=ast_attr("options.set_capability"),
args=[ast.Str("unhandledPromptBehavior", kind=""), ast.Str("ignore", kind="")]))]
return chrome_options + self._get_headless_setup()
def _get_firefox_profile(self):
return [
ast.Assign(
targets=[ast.Name(id="profile")],
value=ast_call(func=ast_attr("webdriver.FirefoxProfile"))),
ast.Expr(ast_call(
func=ast_attr("profile.set_preference"),
args=[ast.Str("webdriver.log.file", kind=""), ast.Str(self.wdlog, kind="")])),
ast.Expr(
ast_call(
func=ast_attr("options.set_capability"),
args=[ast.Str("unhandledPromptBehavior", kind=""), ast.Str("ignore", kind="")]))]
def _get_firefox_webdriver(self):
return ast.Assign(
targets=[ast_attr("self.driver")],
value=ast_call(
func=ast_attr("webdriver.Firefox"),
args=[ast.Name(id="profile")],
keywords=[ast.keyword(
arg="options",
value=ast.Name(id="options"))]))
def _get_chrome_webdriver(self):
return ast.Assign(
targets=[ast_attr("self.driver")],
value=ast_call(
func=ast_attr("webdriver.Chrome"),
keywords=[
ast.keyword(
arg="service_log_path",
value=ast.Str(self.wdlog, kind="")),
ast.keyword(
arg="options",
value=ast.Name(id="options"))]))
def _get_remote_webdriver(self):
keys = sorted(self.capabilities.keys())
values = [self.capabilities[key] for key in keys]
return ast.Assign(
targets=[ast_attr("self.driver")],
value=ast_call(
func=ast_attr("webdriver.Remote"),
keywords=[
ast.keyword(
arg="command_executor",
value=ast.Str(self.remote_address, kind="")),
ast.keyword(
arg="desired_capabilities",
value=ast.Dict(
keys=[ast.Str(key, kind="") for key in keys],
values=[ast.Str(value, kind="") for value in values])),
ast.keyword(
arg="options",
value=ast.Name(id="options"))]))
@staticmethod
def _gen_impl_wait(timeout):
return ast.Expr(
ast_call(
func=ast_attr("self.driver.implicitly_wait"),
args=[ast.Num(dehumanize_time(timeout), kind="")]))
def _gen_module(self):
stmts = []
if self.verbose:
stmts.extend(self._gen_logging())
stmts.extend(self._gen_data_source_readers())
stmts.append(self._gen_classdef())
stmts = self._gen_imports() + stmts # todo: order is important (with classdef) because of self.appium setup
return ast.Module(body=stmts)
def _gen_imports(self):
imports = [
ast.Import(names=[ast.alias(name='logging', asname=None)]),
ast.Import(names=[ast.alias(name='random', asname=None)]),
ast.Import(names=[ast.alias(name='string', asname=None)]),
ast.Import(names=[ast.alias(name='sys', asname=None)]),
ast.Import(names=[ast.alias(name='unittest', asname=None)]),
ast.ImportFrom(
module="time",
names=[
ast.alias(name="time", asname=None),
ast.alias(name="sleep", asname=None)],
level=0),
gen_empty_line_stmt(),
ast.Import(names=[ast.alias(name='apiritif', asname=None)]), # or "from apiritif import http, utils"?
gen_empty_line_stmt()]
if self.test_mode == "selenium":
if self.appium:
source = "appium"
else:
source = "selenium"
imports.append(ast.parse(self.IMPORTS % source).body)
self.selenium_extras.add("get_locator")
self.selenium_extras.add("waiter")
extra_names = [ast.alias(name=name, asname=None) for name in self.selenium_extras]
imports.append(
ast.ImportFrom(
module="bzt.resources.selenium_extras",
names=extra_names,
level=0))
return imports
def _gen_data_source_readers(self):
readers = []
for idx, source in enumerate(self.data_sources, start=1):
keywords = []
if "fieldnames" in source:
fieldnames = ast.keyword()
fieldnames.arg = "fieldnames"
str_names = source.get("fieldnames").split(",")
fieldnames.value = ast.List(elts=[ast.Str(s=fname, kind="") for fname in str_names])
keywords.append(fieldnames)
if "loop" in source:
loop = ast.keyword()
loop.arg = "loop"
loop.value = ast.Name(id=source.get("loop"))
keywords.append(loop)
if "quoted" in source:
quoted = ast.keyword()
quoted.arg = "quoted"
quoted.value = ast.Name(id=source.get("quoted"))
keywords.append(quoted)
if "delimiter" in source:
delimiter = ast.keyword()
delimiter.arg = "delimiter"
delimiter.value = ast.Str(s=source.get("delimiter"), kind="")
keywords.append(delimiter)
if "encoding" in source:
encoding = ast.keyword()
encoding.arg = "encoding"
encoding.value = ast.Str(s=source.get("encoding"), kind="")
keywords.append(encoding)
csv_file = self.scenario.engine.find_file(source["path"])
reader = ast.Assign(
targets=[ast.Name(id="reader_%s" % idx)],
value=ast_call(
func=ast_attr("apiritif.CSVReaderPerThread"),
args=[ast.Str(s=csv_file, kind="")],
keywords=keywords))
readers.append(reader)
if readers:
readers.append(gen_empty_line_stmt())
return readers
def _gen_classdef(self):
class_body = [self._gen_test_methods()]
class_body = [self._gen_class_setup()] + class_body # order is important for selenium_extras set
if self.test_mode == "selenium":
class_body.append(self._gen_class_teardown())
return ast.ClassDef(
name=create_class_name(self.label),
bases=[ast_attr("unittest.TestCase")],
body=class_body,
keywords=[],
starargs=None,
kwargs=None,
decorator_list=[])
def _gen_class_setup(self):
data_sources = [self._gen_default_vars()]
for idx in range(len(self.data_sources)):
data_sources.append(ast.Expr(ast_call(func=ast_attr("reader_%s.read_vars" % (idx + 1)))))
for idx in range(len(self.data_sources)):
extend_vars = ast_call(
func=ast_attr("self.vars.update"),
args=[ast_call(
func=ast_attr("reader_%s.get_vars" % (idx + 1)))])
data_sources.append(ast.Expr(extend_vars))
if self.test_mode == "apiritif":
target_init = self._gen_api_target()
else:
target_init = self._gen_webdriver()
handlers = []
if self.generate_markers:
func_name = "add_flow_markers"
self.selenium_extras.add(func_name)
handlers.append(ast.Expr(ast_call(func=func_name)))
if self.generate_external_logging:
self.selenium_extras.add("add_logging_handlers")
handlers.append(ast.Expr(ast_call(func="add_logging_handlers")))
stored_vars = {
"timeout": "timeout",
"func_mode": str(self.executor.engine.is_functional_mode())}
if target_init:
if self.test_mode == "selenium":
stored_vars["driver"] = "self.driver"
stored_vars["windows"] = "{}"
has_ds = bool(list(self.scenario.get_data_sources()))
stored_vars['scenario_name'] = [ast.Str(self.label, kind="")]
if has_ds:
stored_vars['data_sources'] = str(has_ds)
store_call = ast_call(
func=ast_attr("apiritif.put_into_thread_store"),
keywords=[ast.keyword(arg=key, value=ast_attr(stored_vars[key])) for key in stored_vars],
args=[])
store_block = [ast.Expr(store_call)]
timeout_setup = [ast.Expr(ast.Assign(
targets=[ast_attr("timeout")],
value=ast.Num(self._get_scenario_timeout(), kind="")))]
setup = ast.FunctionDef(
name="setUp",
args=[ast_attr("self")],
body=data_sources + timeout_setup + target_init + handlers + store_block,
decorator_list=[])
return [setup, gen_empty_line_stmt()]
def _gen_class_teardown(self):
body = [
ast.If(
test=ast_attr("self.driver"),
body=ast.Expr(ast_call(func=ast_attr("self.driver.quit"))), orelse=[])]
return ast.FunctionDef(name="tearDown", args=[ast_attr("self")], body=body, decorator_list=[])
def _gen_test_methods(self):
methods = []
slave_methods_names = []
requests = self.scenario.get_requests(parser=HierarchicRequestParser, require_url=False)
number_of_digits = int(math.log10(len(requests))) + 1
for index, request in enumerate(requests, start=1):
if not isinstance(request, self.SUPPORTED_BLOCKS):
msg = "Apiritif script generator doesn't support '%s' blocks, skipping"
self.log.warning(msg, request.NAME)
continue
# convert top-level http request to transaction
if isinstance(request, HTTPRequest):
request = TransactionBlock(
name=request.label,
requests=[request],
include_timers=[],
config=request.config,
scenario=request.scenario)
if isinstance(request, TransactionBlock):
body = [self._gen_transaction(request)]
label = create_method_name(request.label[:40])
elif isinstance(request, IncludeScenarioBlock):
body = [self._gen_transaction(request)]
label = create_method_name(request.scenario_name)
elif isinstance(request, SetVariables):
body = self._gen_set_vars(request)
label = request.config.get("label", "set_variables")
else:
return
counter = str(index).zfill(number_of_digits)
method_name = '_' + counter + '_' + label
if isinstance(request, SetVariables):
self.service_methods.append(label) # for sample excluding
methods.append(self._gen_test_method(method_name, body))
slave_methods_names.append(method_name)
methods.append(self._gen_master_test_method(slave_methods_names))
return methods
def _gen_set_vars(self, request):
res = []
for name in sorted(request.mapping.keys()):
res.append(ast.Assign(
targets=[self._gen_expr("${%s}" % name)],
value=ast.Str(s="%s" % request.mapping[name], kind="")))
return res
def _gen_master_test_method(self, slave_method_names):
if not slave_method_names:
raise TaurusConfigError("Supported trasactions not found, test is empty")
body = []
for slave_name in slave_method_names:
body.append(ast.Expr(ast_call(func=ast_attr("self." + slave_name))))
name = 'test_' + create_method_name(self.label)
return self._gen_test_method(name=name, body=body)
@staticmethod
def _gen_test_method(name, body):
# 'test_01_get_posts'
return ast.FunctionDef(
name=name,
args=[ast.Name(id='self', ctx=ast.Param())],
body=body,
decorator_list=[])
def _gen_expr(self, value):
return self.expr_compiler.gen_expr(value)
@staticmethod
def _escape_js_blocks(value): # escapes plain { with {{
for block in re.finditer(r"(?<!\$){.*}", value):
start, end = block.start(), block.end()
line = "{" + value[start:end] + "}"
value = value[:start] + line + value[end:]
return value
def _gen_target_setup(self, key, value):
return ast.Expr(ast_call(
func=ast_attr("self.target.%s" % key),
args=[self._gen_expr(value)]))
def _access_method(self):
keepalive = self.scenario.get("keepalive", None)
default_address = self.scenario.get("default-address", None)
store_cookie = self.scenario.get("store-cookie", None)
if default_address is not None or keepalive or store_cookie:
return ApiritifScriptGenerator.ACCESS_TARGET
else:
return ApiritifScriptGenerator.ACCESS_PLAIN
def _gen_api_target(self):
keepalive = self.scenario.get("keepalive", None)
base_path = self.scenario.get("base-path", None)
auto_assert_ok = self.scenario.get("auto-assert-ok", True)
store_cookie = self.scenario.get("store-cookie", None)
timeout = self.scenario.get("timeout", None)
follow_redirects = self.scenario.get("follow-redirects", True)
if keepalive is None:
keepalive = True
if store_cookie is None:
store_cookie = True
target = []
if self._access_method() == ApiritifScriptGenerator.ACCESS_TARGET:
target.extend([
self._init_target(),
self._gen_target_setup('keep_alive', keepalive),
self._gen_target_setup('auto_assert_ok', auto_assert_ok),
self._gen_target_setup('use_cookies', store_cookie),
self._gen_target_setup('allow_redirects', follow_redirects),
])
if base_path:
target.append(self._gen_target_setup('base_path', base_path))
if timeout is not None:
target.append(self._gen_target_setup('timeout', dehumanize_time(timeout)))
target.append(gen_empty_line_stmt())
return target
def _init_target(self):
# todo: allow empty address in apiritif (HTTPTarget.__init__)
default_address = self.scenario.get("default-address", "")
target_call = ast_call(
func=ast_attr("apiritif.http.target"),
args=[self._gen_expr(default_address)])
target = ast.Assign(
targets=[ast_attr("self.target")],
value=target_call)
return target
def _extract_named_args(self, req):
named_args = OrderedDict()
no_target = self._access_method() != ApiritifScriptGenerator.ACCESS_TARGET
if req.timeout is not None:
named_args['timeout'] = dehumanize_time(req.timeout)
elif "timeout" in self.scenario and no_target:
named_args['timeout'] = dehumanize_time(self.scenario.get("timeout"))
follow_redirects = req.priority_option('follow-redirects', None)
if follow_redirects is not None:
named_args['allow_redirects'] = follow_redirects
headers = {}
headers.update(self.scenario.get("headers"))
headers.update(req.headers)
if headers:
named_args['headers'] = self._gen_expr(headers)
merged_headers = dict([(key.lower(), value) for key, value in iteritems(headers)])
content_type = merged_headers.get("content-type")
if content_type == 'application/json' and isinstance(req.body, (dict, list)): # json request body
named_args['json'] = self._gen_expr(req.body)
elif req.method.lower() == "get" and isinstance(req.body, dict): # request URL params (?a=b&c=d)
named_args['params'] = self._gen_expr(req.body)
elif isinstance(req.body, dict): # form data
named_args['data'] = self._gen_expr(list(iteritems(req.body)))
elif isinstance(req.body, str):
named_args['data'] = self._gen_expr(req.body)
elif req.body:
msg = "Cannot handle 'body' option of type %s: %s"
raise TaurusConfigError(msg % (type(req.body), req.body))
cert = self.scenario.get("certificate")
cert_pass = self.scenario.get("passphrase", None)
if cert:
named_args['encrypted_cert'] = (self.executor.engine.find_file(cert), cert_pass)
if cert_pass and not cert:
self.log.warning("Passphrase was found, but certificate is missing!")
return named_args
# generate transactions recursively
def _gen_transaction(self, trans_conf, transaction_class="apiritif.smart_transaction"):
body = []
if isinstance(trans_conf, IncludeScenarioBlock):
included = self.executor.get_scenario(trans_conf.scenario_name)
included_requests = included.get_requests(parser=HierarchicRequestParser,
require_url=False)
trans_conf = TransactionBlock(
name=trans_conf.scenario_name,
requests=included_requests,
include_timers=[],
config=included.data,
scenario=included)
for request in trans_conf.requests:
if isinstance(request, TransactionBlock) or isinstance(request, IncludeScenarioBlock):
body.append(self._gen_transaction(request, transaction_class="apiritif.transaction"))
elif isinstance(request, SetVariables):
body.append(self._gen_set_vars(request))
else:
body.append(self._gen_http_request(request))
# if self.test_mode == "selenium": # todo: remove it?
# transaction_class += "_logged"
transaction = ast.With(
context_expr=ast_call(
func=ast_attr(transaction_class),
args=[self._gen_expr(trans_conf.label)]),
optional_vars=None,
body=body)
return transaction
def _gen_http_request(self, req):
lines = []
think_time = dehumanize_time(req.get_think_time())
if req.url:
if self.test_mode == "selenium":
if req.timeout:
lines.append(self._gen_impl_wait(req.timeout))
default_address = self.scenario.get("default-address")
parsed_url = parse.urlparse(req.url)
if default_address and not parsed_url.netloc:
url = default_address + req.url
else:
url = req.url
lines.append(ast.Expr(
ast_call(
func=ast_attr("self.driver.get"),
args=[self._gen_expr(url)])))
lines.append(self._gen_replace_dialogs())
else:
method = req.method.lower()
named_args = self._extract_named_args(req)
if self._access_method() == ApiritifScriptGenerator.ACCESS_TARGET:
requestor = ast_attr("self.target")
else:
requestor = ast_attr("apiritif.http")
keywords = [ast.keyword(
arg=name,
value=self._gen_expr(value)) for name, value in iteritems(named_args)]
lines.append(ast.Assign(
targets=[ast.Name(id="response")],
value=ast_call(
func=ast_attr((requestor, method)),
args=[self._gen_expr(req.url)],
keywords=keywords)))
elif "actions" not in req.config:
self.log.warning("'url' and/or 'actions' are mandatory for request but not found: '%s'", req.config)
return [ast.Pass()]
if self.test_mode == "selenium":
for action in req.config.get("actions"):
action_lines = self._gen_action(action)
if self.generate_external_logging:
action_lines = self._gen_log_start(action) + action_lines + self._gen_log_end(action)
lines.extend(action_lines)
if "assert" in req.config:
lines.append(ast.Assign(
targets=[ast.Name(id="body")],
value=ast_attr("self.driver.page_source")))
for assert_config in req.config.get("assert"):
lines.extend(self._gen_sel_assertion(assert_config))
else:
lines.extend(self._gen_assertions(req))
lines.extend(self._gen_jsonpath_assertions(req))
lines.extend(self._gen_xpath_assertions(req))
lines.extend(self._gen_extractors(req))
if think_time:
lines.append(ast.Expr(
ast_call(
func=ast_attr("sleep"),
args=[self._gen_expr(think_time)])))
return lines
def _gen_log_start(self, action):
return self._gen_action("log('start: %s')" % action)
def _gen_log_end(self, action):
return self._gen_action("log('end: %s')" % action)
def _gen_sel_assertion(self, assertion_config):
self.log.debug("Generating assertion, config: %s", assertion_config)
assertion_elements = []
if isinstance(assertion_config, str):
assertion_config = {"contains": [assertion_config]}
for val in assertion_config["contains"]:
regexp = assertion_config.get("regexp", True)
reverse = assertion_config.get("not", False)
subject = assertion_config.get("subject", "body")
if subject != "body":
raise TaurusConfigError("Only 'body' subject supported ")
assert_message = "'%s' " % val
if not reverse:
assert_message += 'not '
assert_message += 'found in BODY'
if regexp:
if reverse:
method = "self.assertEqual"
else:
method = "self.assertNotEqual"
assertion_elements.append(
ast.Assign(
targets=[ast.Name(id="re_pattern")],
value=ast_call(
func=ast_attr("re.compile"),
args=[ast.Str(val, kind="")])))
assertion_elements.append(ast.Expr(
ast_call(
func=ast_attr(method),
args=[
ast.Num(0, kind=""),
ast_call(
func=ast.Name(id="len"),
args=[ast_call(
func=ast_attr("re.findall"),
args=[ast.Name(id="re_pattern"), ast.Name(id="body")])]),
ast.Str("Assertion: %s" % assert_message, kind="")])))
else:
if reverse:
method = "self.assertNotIn"
else:
method = "self.assertIn"
assertion_elements.append(
ast.Expr(
ast_call(
func=ast_attr(method),
args=[
ast.Str(val, kind=""),
ast.Name(id="body"),
ast.Str("Assertion: %s" % assert_message, kind="")])))
return assertion_elements
def _gen_default_vars(self):
variables = self.scenario.get("variables")
names = sorted(variables.keys())
values = [variables[name] for name in names]
return ast.Assign(
targets=[ast_attr("self.vars")],
value=ast.Dict(
keys=[self._gen_expr(name) for name in names],
values=[self._gen_expr(val) for val in values]))
def _gen_assertions(self, request):
stmts = []
assertions = request.config.get("assert", [])
for idx, assertion in enumerate(assertions):
assertion = ensure_is_dict(assertions, idx, "contains")
if not isinstance(assertion['contains'], list):
assertion['contains'] = [assertion['contains']]
subject = assertion.get("subject", Scenario.FIELD_BODY)
if subject in (Scenario.FIELD_BODY, Scenario.FIELD_HEADERS):
for member in assertion["contains"]:
func_table = {
(Scenario.FIELD_BODY, False, False): "assert_in_body",
(Scenario.FIELD_BODY, False, True): "assert_not_in_body",
(Scenario.FIELD_BODY, True, False): "assert_regex_in_body",
(Scenario.FIELD_BODY, True, True): "assert_regex_not_in_body",
(Scenario.FIELD_HEADERS, False, False): "assert_in_headers",
(Scenario.FIELD_HEADERS, False, True): "assert_not_in_headers",
(Scenario.FIELD_HEADERS, True, False): "assert_regex_in_headers",
(Scenario.FIELD_HEADERS, True, True): "assert_regex_not_in_headers",
}
method = func_table[(subject, assertion.get('regexp', True), assertion.get('not', False))]
stmts.append(ast.Expr(
ast_call(
func=ast_attr("response.%s" % method),
args=[self._gen_expr(member)])))
elif subject == Scenario.FIELD_RESP_CODE:
for member in assertion["contains"]:
method = "assert_status_code" if not assertion.get('not', False) else "assert_not_status_code"
stmts.append(ast.Expr(
ast_call(
func=ast_attr("response.%s" % method),
args=[self._gen_expr(member)])))
return stmts
def _gen_jsonpath_assertions(self, request):
stmts = []
jpath_assertions = request.config.get("assert-jsonpath", [])
for idx, assertion in enumerate(jpath_assertions):
assertion = ensure_is_dict(jpath_assertions, idx, "jsonpath")
exc = TaurusConfigError('JSON Path not found in assertion: %s' % assertion)
query = assertion.get('jsonpath', exc)
expected = assertion.get('expected-value', None)
method = "assert_not_jsonpath" if assertion.get('invert', False) else "assert_jsonpath"
stmts.append(ast.Expr(
ast_call(
func=ast_attr("response.%s" % method),
args=[self._gen_expr(query)],
keywords=[ast.keyword(arg="expected_value", value=self._gen_expr(expected))])))
return stmts
def _gen_xpath_assertions(self, request):
stmts = []
jpath_assertions = request.config.get("assert-xpath", [])
for idx, assertion in enumerate(jpath_assertions):
assertion = ensure_is_dict(jpath_assertions, idx, "xpath")
exc = TaurusConfigError('XPath not found in assertion: %s' % assertion)
query = assertion.get('xpath', exc)
parser_type = 'html' if assertion.get('use-tolerant-parser', True) else 'xml'
validate = assertion.get('validate-xml', False)
method = "assert_not_xpath" if assertion.get('invert', False) else "assert_xpath"
stmts.append(ast.Expr(
ast_call(
func=ast_attr("response.%s" % method),
args=[self._gen_expr(query)],
keywords=[ast.keyword(arg="parser_type", value=self._gen_expr(parser_type)),
ast.keyword(arg="validate", value=self._gen_expr(validate))])))
return stmts
def _gen_extractors(self, request):
stmts = []
jextractors = request.config.get("extract-jsonpath")
for varname in jextractors:
cfg = ensure_is_dict(jextractors, varname, "jsonpath")
stmts.append(ast.Assign(
targets=[self.expr_compiler.gen_var_accessor(varname, ast.Store())],
value=ast_call(
func=ast_attr("response.extract_jsonpath"),
args=[self._gen_expr(cfg['jsonpath']), self._gen_expr(cfg.get('default', 'NOT_FOUND'))])))
extractors = request.config.get("extract-regexp")
for varname in extractors:
cfg = ensure_is_dict(extractors, varname, "regexp")
# TODO: support non-'body' value of 'subject'
stmts.append(ast.Assign(
targets=[self.expr_compiler.gen_var_accessor(varname, ast.Store())],
value=ast_call(
func=ast_attr("response.extract_regex"),
args=[self._gen_expr(cfg['regexp']), self._gen_expr(cfg.get('default', 'NOT_FOUND'))])))
# TODO: css/jquery extractor?
xpath_extractors = request.config.get("extract-xpath")
for varname in xpath_extractors:
cfg = ensure_is_dict(xpath_extractors, varname, "xpath")
parser_type = 'html' if cfg.get('use-tolerant-parser', True) else 'xml'
validate = cfg.get('validate-xml', False)
stmts.append(ast.Assign(
targets=[self.expr_compiler.gen_var_accessor(varname, ast.Store())],
value=ast_call(
func=ast_attr("response.extract_xpath"),
args=[self._gen_expr(cfg['xpath'])],
keywords=[ast.keyword(arg="default", value=cfg.get('default', 'NOT_FOUND')),
ast.keyword(arg="parser_type", value=parser_type),
ast.keyword(arg="validate", value=validate)])))
return stmts
def _build_tree(self):
mod = self._gen_module()
mod.lineno = 0
mod.col_offset = 0
mod = ast.fix_missing_locations(mod)
return mod
def build_source_code(self):
self.tree = self._build_tree()
def save(self, filename):
with open(filename, 'wt', encoding='utf8') as fds:
fds.write("# coding=utf-8\n")
fds.write(astunparse.unparse(self.tree))
def _gen_logging(self):
set_log = ast.Assign(
targets=[ast.Name(id="log")],
value=ast_call(
func=ast_attr("logging.getLogger"),
args=[ast.Str(s="apiritif.http", kind="")]))
add_handler = ast_call(
func=ast_attr("log.addHandler"),
args=[ast_call(
func=ast_attr("logging.StreamHandler"),
args=[ast_attr("sys.stdout")])])
set_level = ast_call(
func=ast_attr("log.setLevel"),
args=[ast_attr("logging.DEBUG")])
return [set_log, gen_empty_line_stmt(), add_handler,
gen_empty_line_stmt(), set_level, gen_empty_line_stmt()]
| 1 | 15,848 | 1 - create browser specific options class: - chromeoptions - firefoxoptions - argsoptions | Blazemeter-taurus | py |
@@ -681,7 +681,9 @@ class ProxyListenerS3(ProxyListener):
# fix content-type: https://github.com/localstack/localstack/issues/618
# https://github.com/localstack/localstack/issues/549
- if 'text/html' in response.headers.get('Content-Type', ''):
+ # https://github.com/localstack/localstack/issues/854
+ if 'text/html' in response.headers.get('Content-Type', '') \
+ and not response_content_str.startswith('<!doctype html'):
response.headers['Content-Type'] = 'application/xml; charset=utf-8'
reset_content_length = True | 1 | import re
import logging
import json
import uuid
import base64
import codecs
import xmltodict
import collections
import botocore.config
import six
import datetime
import dateutil.parser
from six import iteritems
from six.moves.urllib import parse as urlparse
from botocore.client import ClientError
from requests.models import Response, Request
from localstack import config
from localstack.config import HOSTNAME, HOSTNAME_EXTERNAL
from localstack.utils import persistence
from localstack.utils.aws import aws_stack
from localstack.utils.common import (
short_uid, timestamp, TIMESTAMP_FORMAT_MILLIS, to_str, to_bytes, clone, md5, get_service_protocol)
from localstack.utils.analytics import event_publisher
from localstack.utils.aws.aws_responses import requests_response
from localstack.services.s3 import multipart_content
from localstack.services.generic_proxy import ProxyListener
# mappings for S3 bucket notifications
S3_NOTIFICATIONS = {}
# mappings for bucket CORS settings
BUCKET_CORS = {}
# mappings for bucket lifecycle settings
BUCKET_LIFECYCLE = {}
# set up logger
LOGGER = logging.getLogger(__name__)
# XML namespace constants
XMLNS_S3 = 'http://s3.amazonaws.com/doc/2006-03-01/'
# list of destination types for bucket notifications
NOTIFICATION_DESTINATION_TYPES = ('Queue', 'Topic', 'CloudFunction', 'LambdaFunction')
# response header overrides the client may request
ALLOWED_HEADER_OVERRIDES = {
'response-content-type': 'Content-Type',
'response-content-language': 'Content-Language',
'response-expires': 'Expires',
'response-cache-control': 'Cache-Control',
'response-content-disposition': 'Content-Disposition',
'response-content-encoding': 'Content-Encoding',
}
def event_type_matches(events, action, api_method):
""" check whether any of the event types in `events` matches the
given `action` and `api_method`, and return the first match. """
for event in events:
regex = event.replace('*', '[^:]*')
action_string = 's3:%s:%s' % (action, api_method)
match = re.match(regex, action_string)
if match:
return match
return False
def filter_rules_match(filters, object_path):
""" check whether the given object path matches all of the given filters """
filters = filters or {}
s3_filter = _get_s3_filter(filters)
for rule in s3_filter.get('FilterRule', []):
if rule['Name'] == 'prefix':
if not prefix_with_slash(object_path).startswith(prefix_with_slash(rule['Value'])):
return False
elif rule['Name'] == 'suffix':
if not object_path.endswith(rule['Value']):
return False
else:
LOGGER.warning('Unknown filter name: "%s"' % rule['Name'])
return True
def _get_s3_filter(filters):
return filters.get('S3Key', filters.get('Key', {}))
def prefix_with_slash(s):
return s if s[0] == '/' else '/%s' % s
def get_event_message(event_name, bucket_name, file_name='testfile.txt', version_id=None, file_size=1024):
# Based on: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html
return {
'Records': [{
'eventVersion': '2.0',
'eventSource': 'aws:s3',
'awsRegion': aws_stack.get_region(),
'eventTime': timestamp(format=TIMESTAMP_FORMAT_MILLIS),
'eventName': event_name,
'userIdentity': {
'principalId': 'AIDAJDPLRKLG7UEXAMPLE'
},
'requestParameters': {
'sourceIPAddress': '127.0.0.1' # TODO determine real source IP
},
'responseElements': {
'x-amz-request-id': short_uid(),
'x-amz-id-2': 'eftixk72aD6Ap51TnqcoF8eFidJG9Z/2' # Amazon S3 host that processed the request
},
's3': {
's3SchemaVersion': '1.0',
'configurationId': 'testConfigRule',
'bucket': {
'name': bucket_name,
'ownerIdentity': {
'principalId': 'A3NL1KOZZKExample'
},
'arn': 'arn:aws:s3:::%s' % bucket_name
},
'object': {
'key': file_name,
'size': file_size,
'eTag': 'd41d8cd98f00b204e9800998ecf8427e',
'versionId': version_id,
'sequencer': '0055AED6DCD90281E5'
}
}
}]
}
def queue_url_for_arn(queue_arn):
sqs_client = aws_stack.connect_to_service('sqs')
parts = queue_arn.split(':')
return sqs_client.get_queue_url(QueueName=parts[5],
QueueOwnerAWSAccountId=parts[4])['QueueUrl']
def send_notifications(method, bucket_name, object_path, version_id):
for bucket, b_cfg in iteritems(S3_NOTIFICATIONS):
if bucket == bucket_name:
action = {'PUT': 'ObjectCreated', 'POST': 'ObjectCreated', 'DELETE': 'ObjectRemoved'}[method]
# TODO: support more detailed methods, e.g., DeleteMarkerCreated
# http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
if action == 'ObjectCreated' and method == 'POST':
api_method = 'CompleteMultipartUpload'
else:
api_method = {'PUT': 'Put', 'POST': 'Post', 'DELETE': 'Delete'}[method]
event_name = '%s:%s' % (action, api_method)
if (event_type_matches(b_cfg['Event'], action, api_method) and
filter_rules_match(b_cfg.get('Filter'), object_path)):
# send notification
message = get_event_message(
event_name=event_name, bucket_name=bucket_name,
file_name=urlparse.urlparse(object_path[1:]).path,
version_id=version_id
)
message = json.dumps(message)
if b_cfg.get('Queue'):
sqs_client = aws_stack.connect_to_service('sqs')
try:
queue_url = queue_url_for_arn(b_cfg['Queue'])
sqs_client.send_message(QueueUrl=queue_url, MessageBody=message)
except Exception as e:
LOGGER.warning('Unable to send notification for S3 bucket "%s" to SQS queue "%s": %s' %
(bucket_name, b_cfg['Queue'], e))
if b_cfg.get('Topic'):
sns_client = aws_stack.connect_to_service('sns')
try:
sns_client.publish(TopicArn=b_cfg['Topic'], Message=message, Subject='Amazon S3 Notification')
except Exception:
LOGGER.warning('Unable to send notification for S3 bucket "%s" to SNS topic "%s".' %
(bucket_name, b_cfg['Topic']))
# CloudFunction and LambdaFunction are semantically identical
lambda_function_config = b_cfg.get('CloudFunction') or b_cfg.get('LambdaFunction')
if lambda_function_config:
# make sure we don't run into a socket timeout
connection_config = botocore.config.Config(read_timeout=300)
lambda_client = aws_stack.connect_to_service('lambda', config=connection_config)
try:
lambda_client.invoke(FunctionName=lambda_function_config,
InvocationType='Event', Payload=message)
except Exception:
LOGGER.warning('Unable to send notification for S3 bucket "%s" to Lambda function "%s".' %
(bucket_name, lambda_function_config))
if not filter(lambda x: b_cfg.get(x), NOTIFICATION_DESTINATION_TYPES):
LOGGER.warning('Neither of %s defined for S3 notification.' %
'/'.join(NOTIFICATION_DESTINATION_TYPES))
def get_cors(bucket_name):
response = Response()
exists, code = bucket_exists(bucket_name)
if not exists:
response.status_code = code
return response
cors = BUCKET_CORS.get(bucket_name)
if not cors:
cors = {
'CORSConfiguration': []
}
body = xmltodict.unparse(cors)
response._content = body
response.status_code = 200
return response
def set_cors(bucket_name, cors):
response = Response()
exists, code = bucket_exists(bucket_name)
if not exists:
response.status_code = code
return response
if not isinstance(cors, dict):
cors = xmltodict.parse(cors)
BUCKET_CORS[bucket_name] = cors
response.status_code = 200
return response
def delete_cors(bucket_name):
response = Response()
exists, code = bucket_exists(bucket_name)
if not exists:
response.status_code = code
return response
BUCKET_CORS.pop(bucket_name, {})
response.status_code = 200
return response
def append_cors_headers(bucket_name, request_method, request_headers, response):
cors = BUCKET_CORS.get(bucket_name)
if not cors:
return
origin = request_headers.get('Origin', '')
rules = cors['CORSConfiguration']['CORSRule']
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
# add allow-origin header
allowed_methods = rule.get('AllowedMethod', [])
if request_method in allowed_methods:
allowed_origins = rule.get('AllowedOrigin', [])
for allowed in allowed_origins:
if origin in allowed or re.match(allowed.replace('*', '.*'), origin):
response.headers['Access-Control-Allow-Origin'] = origin
if 'ExposeHeader' in rule:
expose_headers = rule['ExposeHeader']
response.headers['Access-Control-Expose-Headers'] = \
','.join(expose_headers) if isinstance(expose_headers, list) else expose_headers
break
def append_last_modified_headers(response, content=None):
"""Add Last-Modified header with current time
(if the response content is an XML containing <LastModified>, add that instead)"""
time_format = '%a, %d %b %Y %H:%M:%S GMT' # TimeFormat
try:
if content:
last_modified_str = re.findall(r'<LastModified>(.*)</LastModified>', content)
if last_modified_str:
last_modified_str = last_modified_str[0]
last_modified_time_format = dateutil.parser.parse(last_modified_str).strftime(time_format)
response.headers['Last-Modified'] = last_modified_time_format
except TypeError as err:
LOGGER.debug('No parsable content: %s' % err)
except ValueError as err:
LOGGER.error('Failed to parse LastModified: %s' % err)
except Exception as err:
LOGGER.error('Caught generic exception (parsing LastModified): %s' % err)
# if cannot parse any LastModified, just continue
try:
if response.headers.get('Last-Modified', '') == '':
response.headers['Last-Modified'] = datetime.datetime.now().strftime(time_format)
except Exception as err:
LOGGER.error('Caught generic exception (setting LastModified header): %s' % err)
def get_lifecycle(bucket_name):
lifecycle = BUCKET_LIFECYCLE.get(bucket_name)
if not lifecycle:
# TODO: check if bucket exists, otherwise return 404-like error
lifecycle = {
'LifecycleConfiguration': {}
}
body = xmltodict.unparse(lifecycle)
return requests_response(body)
def get_replication(bucket_name):
# TODO return actual value
# result = {
# 'Error': {
# 'Code': 'NoSuchReplicationConfiguration',
# 'Message': 'There is no replication configuration with that name.'
# }
# }
# content = xmltodict.unparse(result)
# return requests_response(content, status_code=404)
# see https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETreplication.html
config = {}
result = {
'ReplicationConfiguration': config
}
body = xmltodict.unparse(result)
return requests_response(body)
def get_encryption(bucket_name):
# TODO return actual value
result = {
'ServerSideEncryptionConfiguration': {}
}
body = xmltodict.unparse(result)
return requests_response(body)
def set_lifecycle(bucket_name, lifecycle):
# TODO: check if bucket exists, otherwise return 404-like error
if isinstance(to_str(lifecycle), six.string_types):
lifecycle = xmltodict.parse(lifecycle)
BUCKET_LIFECYCLE[bucket_name] = lifecycle
response = Response()
response.status_code = 200
return response
def strip_chunk_signatures(data):
# For clients that use streaming v4 authentication, the request contains chunk signatures
# in the HTTP body (see example below) which we need to strip as moto cannot handle them
#
# 17;chunk-signature=6e162122ec4962bea0b18bc624025e6ae4e9322bdc632762d909e87793ac5921
# <payload data ...>
# 0;chunk-signature=927ab45acd82fc90a3c210ca7314d59fedc77ce0c914d79095f8cc9563cf2c70
data_new = re.sub(b'(^|\r\n)[0-9a-fA-F]+;chunk-signature=[0-9a-f]{64}(\r\n)(\r\n$)?', b'',
data, flags=re.MULTILINE | re.DOTALL)
return data_new
def bucket_exists(bucket_name):
"""Tests for the existence of the specified bucket. Returns the error code
if the bucket does not exist (200 if the bucket does exist).
"""
s3_client = aws_stack.connect_to_service('s3')
try:
s3_client.head_bucket(Bucket=bucket_name)
except ClientError as err:
error_code = err.response.get('Error').get('Code')
return False, error_code
return True, 200
def check_content_md5(data, headers):
actual = md5(strip_chunk_signatures(data))
expected = headers['Content-MD5']
try:
expected = to_str(codecs.encode(base64.b64decode(expected), 'hex'))
except Exception:
expected = '__invalid__'
if actual != expected:
result = {
'Error': {
'Code': 'InvalidDigest',
'Message': 'The Content-MD5 you specified was invalid'
}
}
content = xmltodict.unparse(result)
return requests_response(content, status_code=400)
def expand_redirect_url(starting_url, key, bucket):
""" Add key and bucket parameters to starting URL query string. """
parsed = urlparse.urlparse(starting_url)
query = collections.OrderedDict(urlparse.parse_qsl(parsed.query))
query.update([('key', key), ('bucket', bucket)])
redirect_url = urlparse.urlunparse((
parsed.scheme, parsed.netloc, parsed.path,
parsed.params, urlparse.urlencode(query), None))
return redirect_url
def get_bucket_name(path, headers):
parsed = urlparse.urlparse(path)
# try pick the bucket_name from the path
bucket_name = parsed.path.split('/')[1]
host = headers['host']
# is the hostname not starting a bucket name?
if host.startswith(HOSTNAME) or host.startswith(HOSTNAME_EXTERNAL):
return bucket_name
# matches the common endpoints like
# - '<bucket_name>.s3.<region>.amazonaws.com'
# - '<bucket_name>.s3-<region>.amazonaws.com.cn'
common_pattern = re.compile(r'^(.+)\.s3[.\-][a-z]{2}-[a-z]+-[0-9]{1,}'
r'\.amazonaws\.com(\.[a-z]+)?$')
# matches dualstack endpoints like
# - <bucket_name>.s3.dualstack.<region>.amazonaws.com'
# - <bucket_name>.s3.dualstack.<region>.amazonaws.com.cn'
dualstack_pattern = re.compile(r'^(.+)\.s3\.dualstack\.[a-z]{2}-[a-z]+-[0-9]{1,}'
r'\.amazonaws\.com(\.[a-z]+)?$')
# matches legacy endpoints like
# - '<bucket_name>.s3.amazonaws.com'
# - '<bucket_name>.s3-external-1.amazonaws.com.cn'
legacy_patterns = re.compile(r'^(.+)\.s3\.?(-external-1)?\.amazonaws\.com(\.[a-z]+)?$')
# if any of the above patterns match, the first captured group
# will be returned as the bucket name
for pattern in [common_pattern, dualstack_pattern, legacy_patterns]:
match = pattern.match(host)
if match:
bucket_name = match.groups()[0]
break
# we're either returning the original bucket_name,
# or a pattern matched the host and we're returning that name instead
return bucket_name
def handle_notification_request(bucket, method, data):
response = Response()
response.status_code = 200
response._content = ''
if method == 'GET':
# TODO check if bucket exists
result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3
if bucket in S3_NOTIFICATIONS:
notif = S3_NOTIFICATIONS[bucket]
for dest in NOTIFICATION_DESTINATION_TYPES:
if dest in notif:
dest_dict = {
'%sConfiguration' % dest: {
'Id': uuid.uuid4(),
dest: notif[dest],
'Event': notif['Event'],
'Filter': notif['Filter']
}
}
result += xmltodict.unparse(dest_dict, full_document=False)
result += '</NotificationConfiguration>'
response._content = result
if method == 'PUT':
parsed = xmltodict.parse(data)
notif_config = parsed.get('NotificationConfiguration')
S3_NOTIFICATIONS.pop(bucket, None)
for dest in NOTIFICATION_DESTINATION_TYPES:
config = notif_config.get('%sConfiguration' % (dest))
if config:
events = config.get('Event')
if isinstance(events, six.string_types):
events = [events]
event_filter = config.get('Filter', {})
# make sure FilterRule is an array
s3_filter = _get_s3_filter(event_filter)
if s3_filter and not isinstance(s3_filter.get('FilterRule', []), list):
s3_filter['FilterRule'] = [s3_filter['FilterRule']]
# create final details dict
notification_details = {
'Id': config.get('Id'),
'Event': events,
dest: config.get(dest),
'Filter': event_filter
}
# TODO: what if we have multiple destinations - would we overwrite the config?
S3_NOTIFICATIONS[bucket] = clone(notification_details)
return response
class ProxyListenerS3(ProxyListener):
def is_s3_copy_request(self, headers, path):
return 'x-amz-copy-source' in headers or 'x-amz-copy-source' in path
def forward_request(self, method, path, data, headers):
# Make sure we use 'localhost' as forward host, to ensure moto uses path style addressing.
# Note that all S3 clients using LocalStack need to enable path style addressing.
if 's3.amazonaws.com' not in headers.get('host', ''):
headers['host'] = 'localhost'
# check content md5 hash integrity if not a copy request
if 'Content-MD5' in headers and not self.is_s3_copy_request(headers, path):
response = check_content_md5(data, headers)
if response is not None:
return response
modified_data = None
# TODO: For some reason, moto doesn't allow us to put a location constraint on us-east-1
to_find = to_bytes('<LocationConstraint>us-east-1</LocationConstraint>')
if data and data.startswith(to_bytes('<')) and to_find in data:
modified_data = data.replace(to_find, to_bytes(''))
# If this request contains streaming v4 authentication signatures, strip them from the message
# Related isse: https://github.com/localstack/localstack/issues/98
# TODO we should evaluate whether to replace moto s3 with scality/S3:
# https://github.com/scality/S3/issues/237
if headers.get('x-amz-content-sha256') == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD':
modified_data = strip_chunk_signatures(modified_data or data)
headers['content-length'] = headers.get('x-amz-decoded-content-length')
# POST requests to S3 may include a "${filename}" placeholder in the
# key, which should be replaced with an actual file name before storing.
if method == 'POST':
original_data = modified_data or data
expanded_data = multipart_content.expand_multipart_filename(original_data, headers)
if expanded_data is not original_data:
modified_data = expanded_data
# If no content-type is provided, 'binary/octet-stream' should be used
# src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
if method == 'PUT' and not headers.get('content-type'):
headers['content-type'] = 'binary/octet-stream'
# persist this API call to disk
persistence.record('s3', method, path, data, headers)
# parse query params
parsed = urlparse.urlparse(path)
query = parsed.query
path = parsed.path
bucket = path.split('/')[1]
query_map = urlparse.parse_qs(query, keep_blank_values=True)
if query == 'notification' or 'notification' in query_map:
# handle and return response for ?notification request
response = handle_notification_request(bucket, method, data)
return response
if query == 'cors' or 'cors' in query_map:
if method == 'GET':
return get_cors(bucket)
if method == 'PUT':
return set_cors(bucket, data)
if method == 'DELETE':
return delete_cors(bucket)
if query == 'lifecycle' or 'lifecycle' in query_map:
if method == 'GET':
return get_lifecycle(bucket)
if method == 'PUT':
return set_lifecycle(bucket, data)
if query == 'replication' or 'replication' in query_map:
if method == 'GET':
return get_replication(bucket)
if query == 'encryption' or 'encryption' in query_map:
if method == 'GET':
return get_encryption(bucket)
if modified_data is not None:
return Request(data=modified_data, headers=headers, method=method)
return True
def return_response(self, method, path, data, headers, response):
path = to_str(path)
method = to_str(method)
bucket_name = get_bucket_name(path, headers)
# No path-name based bucket name? Try host-based
hostname_parts = headers['host'].split('.')
if (not bucket_name or len(bucket_name) == 0) and len(hostname_parts) > 1:
bucket_name = hostname_parts[0]
# POST requests to S3 may include a success_action_redirect field,
# which should be used to redirect a client to a new location.
key = None
if method == 'POST':
key, redirect_url = multipart_content.find_multipart_redirect_url(data, headers)
if key and redirect_url:
response.status_code = 303
response.headers['Location'] = expand_redirect_url(redirect_url, key, bucket_name)
LOGGER.debug('S3 POST {} to {}'.format(response.status_code, response.headers['Location']))
parsed = urlparse.urlparse(path)
bucket_name_in_host = headers['host'].startswith(bucket_name)
should_send_notifications = all([
method in ('PUT', 'POST', 'DELETE'),
'/' in path[1:] or bucket_name_in_host,
# check if this is an actual put object request, because it could also be
# a put bucket request with a path like this: /bucket_name/
bucket_name_in_host or (len(path[1:].split('/')) > 1 and len(path[1:].split('/')[1]) > 0),
self.is_query_allowable(method, parsed.query)
])
# get subscribers and send bucket notifications
if should_send_notifications:
# if we already have a good key, use it, otherwise examine the path
if key:
object_path = '/' + key
elif bucket_name_in_host:
object_path = parsed.path
else:
parts = parsed.path[1:].split('/', 1)
object_path = parts[1] if parts[1][0] == '/' else '/%s' % parts[1]
version_id = response.headers.get('x-amz-version-id', None)
send_notifications(method, bucket_name, object_path, version_id)
# publish event for creation/deletion of buckets:
if method in ('PUT', 'DELETE') and ('/' not in path[1:] or len(path[1:].split('/')[1]) <= 0):
event_type = (event_publisher.EVENT_S3_CREATE_BUCKET if method == 'PUT'
else event_publisher.EVENT_S3_DELETE_BUCKET)
event_publisher.fire_event(event_type, payload={'n': event_publisher.get_hash(bucket_name)})
# fix an upstream issue in moto S3 (see https://github.com/localstack/localstack/issues/382)
if method == 'PUT' and parsed.query == 'policy':
response._content = ''
response.status_code = 204
return response
if response:
reset_content_length = False
# append CORS headers to response
append_cors_headers(bucket_name, request_method=method, request_headers=headers, response=response)
append_last_modified_headers(response=response)
# Remove body from PUT response on presigned URL
# https://github.com/localstack/localstack/issues/1317
if method == 'PUT' and ('X-Amz-Security-Token=' in path or 'AWSAccessKeyId=' in path):
response._content = ''
reset_content_length = True
response_content_str = None
try:
response_content_str = to_str(response._content)
except Exception:
pass
# Honor response header overrides
# https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
if method == 'GET':
query_map = urlparse.parse_qs(parsed.query, keep_blank_values=True)
for param_name, header_name in ALLOWED_HEADER_OVERRIDES.items():
if param_name in query_map:
response.headers[header_name] = query_map[param_name][0]
# We need to un-pretty-print the XML, otherwise we run into this issue with Spark:
# https://github.com/jserver/mock-s3/pull/9/files
# https://github.com/localstack/localstack/issues/183
# Note: yet, we need to make sure we have a newline after the first line: <?xml ...>\n
if response_content_str and response_content_str.startswith('<'):
is_bytes = isinstance(response._content, six.binary_type)
append_last_modified_headers(response=response, content=response_content_str)
# un-pretty-print the XML
response._content = re.sub(r'([^\?])>\n\s*<', r'\1><', response_content_str, flags=re.MULTILINE)
# update Location information in response payload
response._content = self._update_location(response._content, bucket_name)
# convert back to bytes
if is_bytes:
response._content = to_bytes(response._content)
# fix content-type: https://github.com/localstack/localstack/issues/618
# https://github.com/localstack/localstack/issues/549
if 'text/html' in response.headers.get('Content-Type', ''):
response.headers['Content-Type'] = 'application/xml; charset=utf-8'
reset_content_length = True
# update content-length headers (fix https://github.com/localstack/localstack/issues/541)
if method == 'DELETE':
reset_content_length = True
if reset_content_length:
response.headers['content-length'] = len(response._content)
def _update_location(self, content, bucket_name):
host = config.HOSTNAME_EXTERNAL
if ':' not in host:
host = '%s:%s' % (host, config.PORT_S3)
return re.sub(r'<Location>\s*([a-zA-Z0-9\-]+)://[^/]+/([^<]+)\s*</Location>',
r'<Location>%s://%s/%s/\2</Location>' % (get_service_protocol(), host, bucket_name),
content, flags=re.MULTILINE)
@staticmethod
def is_query_allowable(method, query):
# Generally if there is a query (some/path/with?query) we don't want to send notifications
if not query:
return True
# Except we do want to notify on multipart and presigned url upload completion
elif (method == 'POST' and query.startswith('uploadId')) or \
('X-Amz-Credential' in query and 'X-Amz-Signature' in query):
return True
# instantiate listener
UPDATE_S3 = ProxyListenerS3()
| 1 | 10,269 | should be a case insensitive match though no? DOCTYPE and doctype are both widely used | localstack-localstack | py |
@@ -0,0 +1,15 @@
+class AccountWidgetsController < WidgetsController
+ before_action :set_account
+ before_action :render_gif_image
+ before_action :account_context, only: :index
+
+ def index
+ @widgets = AccountWidget.create_widgets(params[:account_id])
+ end
+
+ private
+
+ def set_account
+ @account = Account.from_param(params[:account_id]).first!
+ end
+end | 1 | 1 | 7,245 | We must have a `fail ParamNotFound` here for cases where `@account.nil?`. | blackducksoftware-ohloh-ui | rb |
|
@@ -144,6 +144,7 @@ namespace Datadog.Trace.Vendors.Newtonsoft.Json.Serialization
return property;
}
+#if !NETCOREAPP
private bool TryGetValue(string key, out JsonProperty item)
{
if (Dictionary == null) | 1 | //------------------------------------------------------------------------------
// <auto-generated />
// This file was automatically generated by the UpdateVendors tool.
//------------------------------------------------------------------------------
#region License
// Copyright (c) 2007 James Newton-King
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
#endregion
using System;
using System.Collections.Generic;
using System.Text;
using System.Collections.ObjectModel;
using Datadog.Trace.Vendors.Newtonsoft.Json.Utilities;
using System.Globalization;
namespace Datadog.Trace.Vendors.Newtonsoft.Json.Serialization
{
/// <summary>
/// A collection of <see cref="JsonProperty"/> objects.
/// </summary>
internal class JsonPropertyCollection : KeyedCollection<string, JsonProperty>
{
private readonly Type _type;
private readonly List<JsonProperty> _list;
/// <summary>
/// Initializes a new instance of the <see cref="JsonPropertyCollection"/> class.
/// </summary>
/// <param name="type">The type.</param>
public JsonPropertyCollection(Type type)
: base(StringComparer.Ordinal)
{
ValidationUtils.ArgumentNotNull(type, "type");
_type = type;
// foreach over List<T> to avoid boxing the Enumerator
_list = (List<JsonProperty>)Items;
}
/// <summary>
/// When implemented in a derived class, extracts the key from the specified element.
/// </summary>
/// <param name="item">The element from which to extract the key.</param>
/// <returns>The key for the specified element.</returns>
protected override string GetKeyForItem(JsonProperty item)
{
return item.PropertyName;
}
/// <summary>
/// Adds a <see cref="JsonProperty"/> object.
/// </summary>
/// <param name="property">The property to add to the collection.</param>
public void AddProperty(JsonProperty property)
{
if (Contains(property.PropertyName))
{
// don't overwrite existing property with ignored property
if (property.Ignored)
{
return;
}
JsonProperty existingProperty = this[property.PropertyName];
bool duplicateProperty = true;
if (existingProperty.Ignored)
{
// remove ignored property so it can be replaced in collection
Remove(existingProperty);
duplicateProperty = false;
}
else
{
if (property.DeclaringType != null && existingProperty.DeclaringType != null)
{
if (property.DeclaringType.IsSubclassOf(existingProperty.DeclaringType)
|| (existingProperty.DeclaringType.IsInterface() && property.DeclaringType.ImplementInterface(existingProperty.DeclaringType)))
{
// current property is on a derived class and hides the existing
Remove(existingProperty);
duplicateProperty = false;
}
if (existingProperty.DeclaringType.IsSubclassOf(property.DeclaringType)
|| (property.DeclaringType.IsInterface() && existingProperty.DeclaringType.ImplementInterface(property.DeclaringType)))
{
// current property is hidden by the existing so don't add it
return;
}
if (_type.ImplementInterface(existingProperty.DeclaringType) && _type.ImplementInterface(property.DeclaringType))
{
// current property was already defined on another interface
return;
}
}
}
if (duplicateProperty)
{
throw new JsonSerializationException("A member with the name '{0}' already exists on '{1}'. Use the JsonPropertyAttribute to specify another name.".FormatWith(CultureInfo.InvariantCulture, property.PropertyName, _type));
}
}
Add(property);
}
/// <summary>
/// Gets the closest matching <see cref="JsonProperty"/> object.
/// First attempts to get an exact case match of <paramref name="propertyName"/> and then
/// a case insensitive match.
/// </summary>
/// <param name="propertyName">Name of the property.</param>
/// <returns>A matching property if found.</returns>
public JsonProperty GetClosestMatchProperty(string propertyName)
{
JsonProperty property = GetProperty(propertyName, StringComparison.Ordinal);
if (property == null)
{
property = GetProperty(propertyName, StringComparison.OrdinalIgnoreCase);
}
return property;
}
private bool TryGetValue(string key, out JsonProperty item)
{
if (Dictionary == null)
{
item = default;
return false;
}
return Dictionary.TryGetValue(key, out item);
}
/// <summary>
/// Gets a property by property name.
/// </summary>
/// <param name="propertyName">The name of the property to get.</param>
/// <param name="comparisonType">Type property name string comparison.</param>
/// <returns>A matching property if found.</returns>
public JsonProperty GetProperty(string propertyName, StringComparison comparisonType)
{
// KeyedCollection has an ordinal comparer
if (comparisonType == StringComparison.Ordinal)
{
if (TryGetValue(propertyName, out JsonProperty property))
{
return property;
}
return null;
}
for (int i = 0; i < _list.Count; i++)
{
JsonProperty property = _list[i];
if (string.Equals(propertyName, property.PropertyName, comparisonType))
{
return property;
}
}
return null;
}
}
}
| 1 | 17,643 | How come this change was needed? | DataDog-dd-trace-dotnet | .cs |
@@ -25,3 +25,13 @@ const (
UART_TX_PIN = 6
UART_RX_PIN = 8
)
+
+// ADC pins
+const (
+ ADC0 = 3
+ ADC1 = 4
+ ADC2 = 28
+ ADC3 = 29
+ ADC4 = 30
+ ADC5 = 31
+) | 1 | // +build nrf,pca10040
package machine
// LEDs on the PCA10040 (nRF52832 dev board)
const (
LED = LED1
LED1 = 17
LED2 = 18
LED3 = 19
LED4 = 20
)
// Buttons on the PCA10040 (nRF52832 dev board)
const (
BUTTON = BUTTON1
BUTTON1 = 13
BUTTON2 = 14
BUTTON3 = 15
BUTTON4 = 16
)
// UART pins for NRF52840-DK
const (
UART_TX_PIN = 6
UART_RX_PIN = 8
)
| 1 | 5,898 | Why are there only 6 pins here, while below it appears to have 8 ADC inputs? Are pin 2 and 5 used for something else on this board? | tinygo-org-tinygo | go |
@@ -224,7 +224,9 @@ func (gsf *GraphSyncFetcher) fetchRemainingTipsets(ctx context.Context, starting
// non-recursively
func (gsf *GraphSyncFetcher) fetchBlocks(ctx context.Context, cids []cid.Cid, targetPeer peer.ID) error {
selector := gsf.ssb.ExploreFields(func(efsb selectorbuilder.ExploreFieldsSpecBuilder) {
- efsb.Insert("messages", gsf.ssb.Matcher())
+ efsb.Insert("messages", gsf.ssb.ExploreFields(func(messagesSelector selectorbuilder.ExploreFieldsSpecBuilder) {
+ messagesSelector.Insert("secpRoot", gsf.ssb.Matcher())
+ }))
efsb.Insert("messageReceipts", gsf.ssb.Matcher())
}).Node()
var wg sync.WaitGroup | 1 | package net
import (
"context"
"fmt"
"sync"
"time"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-graphsync"
bstore "github.com/ipfs/go-ipfs-blockstore"
logging "github.com/ipfs/go-log"
"github.com/ipld/go-ipld-prime"
ipldfree "github.com/ipld/go-ipld-prime/impl/free"
cidlink "github.com/ipld/go-ipld-prime/linking/cid"
selectorbuilder "github.com/ipld/go-ipld-prime/traversal/selector/builder"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/pkg/errors"
"github.com/filecoin-project/go-filecoin/clock"
"github.com/filecoin-project/go-filecoin/consensus"
"github.com/filecoin-project/go-filecoin/types"
)
var logGraphsyncFetcher = logging.Logger("net.graphsync_fetcher")
const (
// Timeout for a single graphsync request getting "stuck"
// -- if no more responses are received for a period greater than this,
// we will assume the request has hung-up and cancel it
progressTimeout = 10 * time.Second
)
// Fetcher defines an interface that may be used to fetch data from the network.
type Fetcher interface {
// FetchTipSets will only fetch TipSets that evaluate to `false` when passed to `done`,
// this includes the provided `ts`. The TipSet that evaluates to true when
// passed to `done` will be in the returned slice. The returns slice of TipSets is in Traversal order.
FetchTipSets(context.Context, types.TipSetKey, peer.ID, func(types.TipSet) (bool, error)) ([]types.TipSet, error)
}
// interface conformance check
var _ Fetcher = (*GraphSyncFetcher)(nil)
// GraphExchange is an interface wrapper to Graphsync so it can be stubbed in
// unit testing
type GraphExchange interface {
Request(ctx context.Context, p peer.ID, root ipld.Link, selector ipld.Node) (<-chan graphsync.ResponseProgress, <-chan error)
}
type graphsyncFallbackPeerTracker interface {
List() []*types.ChainInfo
Self() peer.ID
}
// GraphSyncFetcher is used to fetch data over the network. It is implemented
// using a Graphsync exchange to fetch tipsets recursively
type GraphSyncFetcher struct {
exchange GraphExchange
validator consensus.SyntaxValidator
store bstore.Blockstore
ssb selectorbuilder.SelectorSpecBuilder
peerTracker graphsyncFallbackPeerTracker
systemClock clock.Clock
}
// NewGraphSyncFetcher returns a GraphsyncFetcher wired up to the input Graphsync exchange and
// attached local blockservice for reloading blocks in memory once they are returned
func NewGraphSyncFetcher(ctx context.Context, exchange GraphExchange, blockstore bstore.Blockstore,
bv consensus.SyntaxValidator, systemClock clock.Clock, pt graphsyncFallbackPeerTracker) *GraphSyncFetcher {
gsf := &GraphSyncFetcher{
store: blockstore,
validator: bv,
exchange: exchange,
ssb: selectorbuilder.NewSelectorSpecBuilder(ipldfree.NodeBuilder()),
peerTracker: pt,
systemClock: systemClock,
}
return gsf
}
// Graphsync can fetch a fixed number of tipsets from a remote peer recursively
// with a single request. We don't know until we get all of the response whether
// our final tipset was included in the response
//
// When fetching tipsets we try to balance performance for two competing cases:
// - an initial chain sync that is likely to fetch lots and lots of tipsets
// - a future update sync that is likely to fetch only a few
//
// To do this, the Graphsync fetcher starts fetching a single tipset at a time,
// then gradually ramps up to fetch lots of tipsets at once, up to a fixed limit
//
// The constants below determine the maximum number of tipsets fetched at once
// (maxRecursionDepth) and how fast the ramp up is (recursionMultipler)
const maxRecursionDepth = 64
const recursionMultiplier = 4
// FetchTipSets gets Tipsets starting from the given tipset key and continuing until
// the done function returns true or errors
//
// For now FetchTipSets operates in two parts:
// 1. It fetches relevant blocks through Graphsync, which writes them to the block store
// 2. It reads them from the block store and validates their syntax as blocks
// and constructs a tipset
// This does have a potentially unwanted side effect of writing blocks to the block store
// that later don't validate (bitswap actually does this as well)
//
// TODO: In the future, the blocks will be validated directly through graphsync as
// go-filecoin migrates to the same IPLD library used by go-graphsync (go-ipld-prime)
//
// See: https://github.com/filecoin-project/go-filecoin/issues/3175
func (gsf *GraphSyncFetcher) FetchTipSets(ctx context.Context, tsKey types.TipSetKey, originatingPeer peer.ID, done func(types.TipSet) (bool, error)) ([]types.TipSet, error) {
// We can run into issues if we fetch from an originatingPeer that we
// are not already connected to so we usually ignore this value.
// However if the originator is our own peer ID (i.e. this node mined
// the block) then we need to fetch from ourselves to retrieve it
fetchFromSelf := originatingPeer == gsf.peerTracker.Self()
rpf, err := newRequestPeerFinder(gsf.peerTracker, fetchFromSelf)
if err != nil {
return nil, err
}
// fetch initial tipset
startingTipset, err := gsf.fetchFirstTipset(ctx, tsKey, rpf)
if err != nil {
return nil, err
}
// fetch remaining tipsets recursively
return gsf.fetchRemainingTipsets(ctx, startingTipset, rpf, done)
}
func (gsf *GraphSyncFetcher) fetchFirstTipset(ctx context.Context, key types.TipSetKey, rpf *requestPeerFinder) (types.TipSet, error) {
blocksToFetch := key.ToSlice()
for {
peer := rpf.CurrentPeer()
logGraphsyncFetcher.Infof("fetching initial tipset %s from peer %s", key, peer)
err := gsf.fetchBlocks(ctx, blocksToFetch, peer)
if err != nil {
// A likely case is the peer doesn't have the tipset. When graphsync provides
// this status we should quiet this log.
logGraphsyncFetcher.Infof("request failed: %s", err)
}
var verifiedTip types.TipSet
verifiedTip, blocksToFetch, err = gsf.loadAndVerify(ctx, key)
if err != nil {
return types.UndefTipSet, err
}
if len(blocksToFetch) == 0 {
return verifiedTip, nil
}
logGraphsyncFetcher.Infof("incomplete fetch for initial tipset %s, trying new peer", key)
// Some of the blocks may have been fetched, but avoid tricksy optimization here and just
// request the whole bunch again. Graphsync internally will avoid redundant network requests.
err = rpf.FindNextPeer()
if err != nil {
return types.UndefTipSet, errors.Wrapf(err, "fetching tipset: %s", key)
}
}
}
func (gsf *GraphSyncFetcher) fetchRemainingTipsets(ctx context.Context, startingTipset types.TipSet, rpf *requestPeerFinder, done func(types.TipSet) (bool, error)) ([]types.TipSet, error) {
out := []types.TipSet{startingTipset}
isDone, err := done(startingTipset)
if err != nil {
return nil, err
}
// fetch remaining tipsets recursively
recursionDepth := 1
anchor := startingTipset // The tipset above the one we actually want to fetch.
for !isDone {
// Because a graphsync query always starts from a single CID,
// we fetch tipsets anchored from any block in the last (i.e. highest) tipset and
// recursively fetching sets of parents.
childBlock := anchor.At(0)
peer := rpf.CurrentPeer()
logGraphsyncFetcher.Infof("fetching chain from height %d, block %s, peer %s, %d levels", childBlock.Height, childBlock.Cid(), peer, recursionDepth)
err := gsf.fetchBlocksRecursively(ctx, childBlock.Cid(), peer, recursionDepth)
if err != nil {
// something went wrong in a graphsync request, but we want to keep trying other peers, so
// just log error
logGraphsyncFetcher.Infof("request failed, trying another peer: %s", err)
}
var incomplete []cid.Cid
for i := 0; !isDone && i < recursionDepth; i++ {
tsKey, err := anchor.Parents()
if err != nil {
return nil, err
}
var verifiedTip types.TipSet
verifiedTip, incomplete, err = gsf.loadAndVerify(ctx, tsKey)
if err != nil {
return nil, err
}
if len(incomplete) == 0 {
out = append(out, verifiedTip)
isDone, err = done(verifiedTip)
if err != nil {
return nil, err
}
anchor = verifiedTip
} else {
logGraphsyncFetcher.Infof("incomplete fetch for tipset %s, trying new peer", tsKey)
err := rpf.FindNextPeer()
if err != nil {
return nil, errors.Wrapf(err, "fetching tipset: %s", tsKey)
}
break // Stop verifying, make another fetch
}
}
if len(incomplete) == 0 && recursionDepth < maxRecursionDepth {
recursionDepth *= recursionMultiplier
}
}
return out, nil
}
// fetchBlocks requests a single set of cids as individual blocks, fetching
// non-recursively
func (gsf *GraphSyncFetcher) fetchBlocks(ctx context.Context, cids []cid.Cid, targetPeer peer.ID) error {
selector := gsf.ssb.ExploreFields(func(efsb selectorbuilder.ExploreFieldsSpecBuilder) {
efsb.Insert("messages", gsf.ssb.Matcher())
efsb.Insert("messageReceipts", gsf.ssb.Matcher())
}).Node()
var wg sync.WaitGroup
// Any of the multiple parallel requests might fail. Wait for all of them to complete, then
// return any error (in this case, the first one to be received).
var setAnyError sync.Once
var anyError error
for _, c := range cids {
requestCtx, requestCancel := context.WithCancel(ctx)
defer requestCancel()
requestChan, errChan := gsf.exchange.Request(requestCtx, targetPeer, cidlink.Link{Cid: c}, selector)
wg.Add(1)
go func(requestChan <-chan graphsync.ResponseProgress, errChan <-chan error, cancelFunc func()) {
defer wg.Done()
err := gsf.consumeResponse(requestChan, errChan, cancelFunc)
if err != nil {
setAnyError.Do(func() {
anyError = err
})
}
}(requestChan, errChan, requestCancel)
}
wg.Wait()
return anyError
}
func (gsf *GraphSyncFetcher) consumeResponse(requestChan <-chan graphsync.ResponseProgress, errChan <-chan error, cancelFunc func()) error {
timer := gsf.systemClock.NewTimer(progressTimeout)
var anyError error
for errChan != nil || requestChan != nil {
select {
case err, ok := <-errChan:
if !ok {
errChan = nil
}
anyError = err
timer.Reset(progressTimeout)
case _, ok := <-requestChan:
if !ok {
requestChan = nil
}
timer.Reset(progressTimeout)
case <-timer.Chan():
cancelFunc()
}
}
return anyError
}
// fetchBlocksRecursively gets the blocks from recursionDepth ancestor tipsets
// starting from baseCid.
func (gsf *GraphSyncFetcher) fetchBlocksRecursively(ctx context.Context, baseCid cid.Cid, targetPeer peer.ID, recursionDepth int) error {
requestCtx, requestCancel := context.WithCancel(ctx)
defer requestCancel()
// recursive selector to fetch n sets of parent blocks
// starting from block matching base cid:
// - fetch all parent blocks, with messages/receipts
// - with exactly the first parent block, repeat again for its parents
// - continue up to recursion depth
selector := gsf.ssb.ExploreRecursive(recursionDepth, gsf.ssb.ExploreFields(func(efsb selectorbuilder.ExploreFieldsSpecBuilder) {
efsb.Insert("parents", gsf.ssb.ExploreUnion(
gsf.ssb.ExploreAll(
gsf.ssb.ExploreFields(func(efsb selectorbuilder.ExploreFieldsSpecBuilder) {
efsb.Insert("messages", gsf.ssb.Matcher())
efsb.Insert("messageReceipts", gsf.ssb.Matcher())
}),
),
gsf.ssb.ExploreIndex(0, gsf.ssb.ExploreRecursiveEdge()),
))
})).Node()
requestChan, errChan := gsf.exchange.Request(requestCtx, targetPeer, cidlink.Link{Cid: baseCid}, selector)
return gsf.consumeResponse(requestChan, errChan, requestCancel)
}
// Loads the IPLD blocks for all blocks in a tipset, and checks for the presence of the
// message and receipt list structures in the store.
// Returns the tipset if complete. Otherwise it returns UndefTipSet and the CIDs of
// all blocks missing either their header, messages or receipts.
func (gsf *GraphSyncFetcher) loadAndVerify(ctx context.Context, key types.TipSetKey) (types.TipSet, []cid.Cid, error) {
// Load the block headers that exist.
incomplete := make(map[cid.Cid]struct{})
tip, err := gsf.loadTipHeaders(ctx, key, incomplete)
if err != nil {
return types.UndefTipSet, nil, err
}
err = gsf.loadAndVerifySubComponents(ctx, tip, incomplete,
func(blk *types.Block) cid.Cid { return blk.Messages }, func(rawBlock blocks.Block) error {
messages, err := types.DecodeMessages(rawBlock.RawData())
if err != nil {
return errors.Wrapf(err, "fetched data (cid %s) was not a message collection", rawBlock.Cid().String())
}
if err := gsf.validator.ValidateMessagesSyntax(ctx, messages); err != nil {
return errors.Wrapf(err, "invalid messages for for message collection (cid %s)", rawBlock.Cid())
}
return nil
})
if err != nil {
return types.UndefTipSet, nil, err
}
err = gsf.loadAndVerifySubComponents(ctx, tip, incomplete,
func(blk *types.Block) cid.Cid { return blk.MessageReceipts }, func(rawBlock blocks.Block) error {
receipts, err := types.DecodeReceipts(rawBlock.RawData())
if err != nil {
return errors.Wrapf(err, "fetched data (cid %s) was not a message receipt collection", rawBlock.Cid().String())
}
if err := gsf.validator.ValidateReceiptsSyntax(ctx, receipts); err != nil {
return errors.Wrapf(err, "invalid receipts for for receipt collection (cid %s)", rawBlock.Cid())
}
return nil
})
if err != nil {
return types.UndefTipSet, nil, err
}
if len(incomplete) > 0 {
incompleteArr := make([]cid.Cid, 0, len(incomplete))
for cid := range incomplete {
incompleteArr = append(incompleteArr, cid)
}
return types.UndefTipSet, incompleteArr, nil
}
return tip, nil, nil
}
// Loads and validates the block headers for a tipset. Returns the tipset if complete,
// else the cids of blocks which are not yet stored.
func (gsf *GraphSyncFetcher) loadTipHeaders(ctx context.Context, key types.TipSetKey, incomplete map[cid.Cid]struct{}) (types.TipSet, error) {
rawBlocks := make([]blocks.Block, 0, key.Len())
for it := key.Iter(); !it.Complete(); it.Next() {
hasBlock, err := gsf.store.Has(it.Value())
if err != nil {
return types.UndefTipSet, err
}
if !hasBlock {
incomplete[it.Value()] = struct{}{}
continue
}
rawBlock, err := gsf.store.Get(it.Value())
if err != nil {
return types.UndefTipSet, err
}
rawBlocks = append(rawBlocks, rawBlock)
}
// Validate the headers.
validatedBlocks, err := sanitizeBlocks(ctx, rawBlocks, gsf.validator)
if err != nil || len(validatedBlocks) == 0 {
return types.UndefTipSet, err
}
tip, err := types.NewTipSet(validatedBlocks...)
return tip, err
}
type getBlockComponentFn func(*types.Block) cid.Cid
type verifyComponentFn func(blocks.Block) error
// Loads and validates the block messages for a tipset. Returns the tipset if complete,
// else the cids of blocks which are not yet stored.
func (gsf *GraphSyncFetcher) loadAndVerifySubComponents(ctx context.Context,
tip types.TipSet,
incomplete map[cid.Cid]struct{},
getBlockComponent getBlockComponentFn,
verifyComponent verifyComponentFn) error {
subComponents := make([]blocks.Block, 0, tip.Len())
// Check that nested structures are also stored, recording any that are missing as incomplete.
for i := 0; i < tip.Len(); i++ {
blk := tip.At(i)
link := getBlockComponent(blk)
ok, err := gsf.store.Has(link)
if err != nil {
return err
}
if !ok {
incomplete[blk.Cid()] = struct{}{}
continue
}
rawBlock, err := gsf.store.Get(link)
if err != nil {
return err
}
subComponents = append(subComponents, rawBlock)
}
for _, rawBlock := range subComponents {
err := verifyComponent(rawBlock)
if err != nil {
return err
}
}
return nil
}
type requestPeerFinder struct {
peerTracker graphsyncFallbackPeerTracker
currentPeer peer.ID
triedPeers map[peer.ID]struct{}
}
func newRequestPeerFinder(peerTracker graphsyncFallbackPeerTracker, fetchFromSelf bool) (*requestPeerFinder, error) {
pri := &requestPeerFinder{
peerTracker: peerTracker,
triedPeers: make(map[peer.ID]struct{}),
}
// If the new cid triggering this request came from ourselves then
// the first peer to request from should be ourselves.
if fetchFromSelf {
pri.triedPeers[peerTracker.Self()] = struct{}{}
pri.currentPeer = peerTracker.Self()
return pri, nil
}
// Get a peer ID from the peer tracker
err := pri.FindNextPeer()
if err != nil {
return nil, err
}
return pri, nil
}
func (pri *requestPeerFinder) CurrentPeer() peer.ID {
return pri.currentPeer
}
func (pri *requestPeerFinder) FindNextPeer() error {
chains := pri.peerTracker.List()
for _, chain := range chains {
if _, tried := pri.triedPeers[chain.Peer]; !tried {
pri.triedPeers[chain.Peer] = struct{}{}
pri.currentPeer = chain.Peer
return nil
}
}
return fmt.Errorf("Unable to find any untried peers")
}
func sanitizeBlocks(ctx context.Context, unsanitized []blocks.Block, validator consensus.BlockSyntaxValidator) ([]*types.Block, error) {
var blocks []*types.Block
for _, u := range unsanitized {
block, err := types.DecodeBlock(u.RawData())
if err != nil {
return nil, errors.Wrapf(err, "fetched data (cid %s) was not a block", u.Cid().String())
}
if err := validator.ValidateSyntax(ctx, block); err != nil {
return nil, errors.Wrapf(err, "invalid block %s", block.Cid())
}
blocks = append(blocks, block)
}
return blocks, nil
}
| 1 | 21,792 | I think we need an issue to track that chain sync omits BLS messages, and then to fix and test it! | filecoin-project-venus | go |
@@ -1,7 +1,7 @@
C2::Application.configure do
config.action_mailer.preview_path = "#{Rails.root}/lib/mail_previews"
config.action_mailer.register_preview_interceptor :css_inline_styler
- config.action_mailer.asset_host = "http://localhost:5000"
+ config.action_mailer.asset_host = "http://2c429f18.ngrok.io" || "http://localhost:5000"
config.action_controller.perform_caching = false
config.action_mailer.delivery_method = :letter_opener_web
config.active_record.migration_error = :page_load | 1 | C2::Application.configure do
config.action_mailer.preview_path = "#{Rails.root}/lib/mail_previews"
config.action_mailer.register_preview_interceptor :css_inline_styler
config.action_mailer.asset_host = "http://localhost:5000"
config.action_controller.perform_caching = false
config.action_mailer.delivery_method = :letter_opener_web
config.active_record.migration_error = :page_load
config.active_support.deprecation = :log
config.assets.debug = true
config.cache_classes = false
config.consider_all_requests_local = true
config.eager_load = false
end
| 1 | 16,407 | what is this default about? I am not using ngrok atm so would prefer an env var. | 18F-C2 | rb |
@@ -93,6 +93,19 @@ return [
'backendSkin' => 'Backend\Skins\Standard',
+ /*
+ |--------------------------------------------------------------------------
+ | Determines if logging in backend should run UpdateManager
+ |--------------------------------------------------------------------------
+ |
+ | If value is true it runs UpdateMananger after login in backend.
+ | It's recommended to set this value to 'false' in production enviroments
+ | because it cleares cache on every login in backend.
+ |
+ */
+
+ 'runUpdateManagerAfterBackendLogin' => true,
+
/*
|--------------------------------------------------------------------------
| Determines which modules to load | 1 | <?php
return [
/*
|--------------------------------------------------------------------------
| Specifies the default CMS theme.
|--------------------------------------------------------------------------
|
| This parameter value can be overridden by the CMS back-end settings.
|
*/
'activeTheme' => 'demo',
/*
|--------------------------------------------------------------------------
| Bleeding edge updates
|--------------------------------------------------------------------------
|
| If you are developing with October, it is important to have the latest
| code base. Set this value to 'true' to tell the platform to download
| and use the development copies of core files and plugins.
|
*/
'edgeUpdates' => false,
/*
|--------------------------------------------------------------------------
| Back-end URI prefix
|--------------------------------------------------------------------------
|
| Specifies the URL name used for accessing back-end pages.
| For example: backend -> http://localhost/backend
|
*/
'backendUri' => 'backend',
/*
|--------------------------------------------------------------------------
| Back-end force HTTPS security
|--------------------------------------------------------------------------
|
| Use this setting to force a secure protocol when accessing any back-end
| pages, including the authentication pages. If set to null, this setting
| is enabled when debug mode (app.debug) is disabled.
|
*/
'backendForceSecure' => null,
/*
|--------------------------------------------------------------------------
| Back-end login remember
|--------------------------------------------------------------------------
|
| Define live duration of backend sessions :
|
| true - session never expire (cookie expiration in 5 years)
|
| false - session have a limited time (see session.lifetime)
|
| null - The form login display a checkbox that allow user to choose
| wanted behavior
|
*/
'backendForceRemember' => true,
/*
|--------------------------------------------------------------------------
| Back-end timezone
|--------------------------------------------------------------------------
|
| This acts as the default setting for a back-end user's timezone. This can
| be changed by the user at any time using the backend preferences. All
| dates displayed in the back-end will be converted to this timezone.
|
*/
'backendTimezone' => 'UTC',
/*
|--------------------------------------------------------------------------
| Back-end Skin
|--------------------------------------------------------------------------
|
| Specifies the back-end skin to use.
|
*/
'backendSkin' => 'Backend\Skins\Standard',
/*
|--------------------------------------------------------------------------
| Determines which modules to load
|--------------------------------------------------------------------------
|
| Specify which modules should be registered when using the application.
|
*/
'loadModules' => ['System', 'Backend', 'Cms'],
/*
|--------------------------------------------------------------------------
| Prevents application updates
|--------------------------------------------------------------------------
|
| If using composer or git to download updates to the core files, set this
| value to 'true' to prevent the update gateway from trying to download
| these files again as part of the application update process. Plugins
| and themes will still be downloaded.
|
*/
'disableCoreUpdates' => false,
/*
|--------------------------------------------------------------------------
| Specific plugins to disable
|--------------------------------------------------------------------------
|
| Specify plugin codes which will always be disabled in the application.
|
*/
'disablePlugins' => [],
/*
|--------------------------------------------------------------------------
| Determines if the routing caching is enabled.
|--------------------------------------------------------------------------
|
| If the caching is enabled, the page URL map is saved in the cache. If a page
| URL was changed on the disk, the old URL value could be still saved in the cache.
| To update the cache the back-end Clear Cache feature should be used. It is recommended
| to disable the caching during the development, and enable it in the production mode.
|
*/
'enableRoutesCache' => false,
/*
|--------------------------------------------------------------------------
| Time to live for the URL map.
|--------------------------------------------------------------------------
|
| The URL map used in the CMS page routing process. By default
| the map is updated every time when a page is saved in the back-end or when the
| interval, in minutes, specified with the urlMapCacheTTL parameter expires.
|
*/
'urlCacheTtl' => 10,
/*
|--------------------------------------------------------------------------
| Time to live for parsed CMS objects.
|--------------------------------------------------------------------------
|
| Specifies the number of minutes the CMS object cache lives. After the interval
| is expired item are re-cached. Note that items are re-cached automatically when
| the corresponding template file is modified.
|
*/
'parsedPageCacheTTL' => 10,
/*
|--------------------------------------------------------------------------
| Determines if the asset caching is enabled.
|--------------------------------------------------------------------------
|
| If the caching is enabled, combined assets are cached. If a asset file
| is changed on the disk, the old file contents could be still saved in the cache.
| To update the cache the back-end Clear Cache feature should be used. It is recommended
| to disable the caching during the development, and enable it in the production mode.
|
*/
'enableAssetCache' => false,
/*
|--------------------------------------------------------------------------
| Determines if the asset minification is enabled.
|--------------------------------------------------------------------------
|
| If the minification is enabled, combined assets are compressed (minified).
| It is recommended to disable the minification during development, and
| enable it in production mode. If set to null, assets are minified
| when debug mode (app.debug) is disabled.
|
*/
'enableAssetMinify' => null,
/*
|--------------------------------------------------------------------------
| Check import timestamps when combining assets
|--------------------------------------------------------------------------
|
| If deep hashing is enabled, the combiner cache will be reset when a change
| is detected on imported files, in addition to those referenced directly.
| This will cause slower page performance. If set to null, deep hashing
| is used when debug mode (app.debug) is enabled.
|
*/
'enableAssetDeepHashing' => null,
/*
|--------------------------------------------------------------------------
| Public plugins path
|--------------------------------------------------------------------------
|
| Specifies the public plugins path relative to the application base URL,
| or you can specify a full URL path.
|
*/
'pluginsPath' => '/plugins',
/*
|--------------------------------------------------------------------------
| Public themes path
|--------------------------------------------------------------------------
|
| Specifies the public themes path relative to the application base URL,
| or you can specify a full URL path.
|
*/
'themesPath' => '/themes',
/*
|--------------------------------------------------------------------------
| Resource storage
|--------------------------------------------------------------------------
|
| Specifies the configuration for resource storage, such as media and
| upload files. These resources are used:
|
| media - generated by the media manager.
| uploads - generated by attachment model relationships.
|
| For each resource you can specify:
|
| disk - filesystem disk, as specified in filesystems.php config.
| folder - a folder prefix for storing all generated files inside.
| path - the public path relative to the application base URL,
| or you can specify a full URL path.
*/
'storage' => [
'uploads' => [
'disk' => 'local',
'folder' => 'uploads',
'path' => '/storage/app/uploads',
],
'media' => [
'disk' => 'local',
'folder' => 'media',
'path' => '/storage/app/media',
],
],
/*
|--------------------------------------------------------------------------
| Convert Line Endings
|--------------------------------------------------------------------------
|
| Determines if October should convert line endings from the windows style
| \r\n to the unix style \n.
|
*/
'convertLineEndings' => false,
/*
|--------------------------------------------------------------------------
| Linking policy
|--------------------------------------------------------------------------
|
| Controls how URL links are generated throughout the application.
|
| detect - detect hostname and use the current schema
| secure - detect hostname and force HTTPS schema
| insecure - detect hostname and force HTTP schema
| force - force hostname and schema using app.url config value
|
*/
'linkPolicy' => 'detect',
/*
|--------------------------------------------------------------------------
| Default permission mask
|--------------------------------------------------------------------------
|
| Specifies a default file and folder permission for newly created objects.
|
*/
'defaultMask' => ['file' => null, 'folder' => null],
/*
|--------------------------------------------------------------------------
| Safe mode
|--------------------------------------------------------------------------
|
| If safe mode is enabled, the PHP code section is disabled in the CMS
| for security reasons. If set to null, safe mode is enabled when
| debug mode (app.debug) is disabled.
|
*/
'enableSafeMode' => null,
/*
|--------------------------------------------------------------------------
| Cross Site Request Forgery (CSRF) Protection
|--------------------------------------------------------------------------
|
| If the CSRF protection is enabled, all "postback" requests are checked
| for a valid security token.
|
*/
'enableCsrfProtection' => true,
/*
|--------------------------------------------------------------------------
| Force bytecode invalidation
|--------------------------------------------------------------------------
|
| When using OPcache with opcache.validate_timestamps set to 0 or APC
| with apc.stat set to 0 and Twig cache enabled, clearing the template
| cache won't update the cache, set to true to get around this.
|
*/
'forceBytecodeInvalidation' => true,
/*
|--------------------------------------------------------------------------
| Twig Strict Variables
|--------------------------------------------------------------------------
|
| If strict_variables is disabled, Twig will silently ignore invalid
| variables (variables and or attributes/methods that do not exist) and
| replace them with a null value. When enabled, Twig throws an exception
| instead. If set to null, it is enabled when debug mode (app.debug) is
| enabled.
|
*/
'enableTwigStrictVariables' => false,
/*
|--------------------------------------------------------------------------
| Base Directory Restriction
|--------------------------------------------------------------------------
|
| Restricts loading backend template and config files to within the base
| directory of the application.
|
| WARNING: This should always be enabled for security reasons. However, in
| some cases you may need to disable this; for instance when developing
| plugins that are stored elsewhere in the filesystem for organizational
| reasons and then symlinked into the application plugins/ directory.
|
| NEVER have this disabled in production.
|
*/
'restrictBaseDir' => true,
];
| 1 | 15,278 | @Samuell1 Might be better to say "Automatically check for plugin updates on login". | octobercms-october | php |
@@ -1,4 +1,6 @@
+using System;
using System.Diagnostics;
+using System.Runtime.CompilerServices;
namespace Datadog.Trace.Util
{ | 1 | using System.Diagnostics;
namespace Datadog.Trace.Util
{
internal static class ProcessHelpers
{
/// <summary>
/// Wrapper around <see cref="Process.GetCurrentProcess"/> and <see cref="Process.ProcessName"/>
///
/// On .NET Framework the <see cref="Process"/> class is guarded by a
/// LinkDemand for FullTrust, so partial trust callers will throw an exception.
/// This exception is thrown when the caller method is being JIT compiled, NOT
/// when Process.GetCurrentProcess is called, so this wrapper method allows
/// us to catch the exception.
/// </summary>
/// <returns>Returns the name of the current process</returns>
public static string GetCurrentProcessName()
{
using (var currentProcess = Process.GetCurrentProcess())
{
return currentProcess.ProcessName;
}
}
/// <summary>
/// Wrapper around <see cref="Process.GetCurrentProcess"/> and its property accesses
///
/// On .NET Framework the <see cref="Process"/> class is guarded by a
/// LinkDemand for FullTrust, so partial trust callers will throw an exception.
/// This exception is thrown when the caller method is being JIT compiled, NOT
/// when Process.GetCurrentProcess is called, so this wrapper method allows
/// us to catch the exception.
/// </summary>
/// <param name="processName">The name of the current process</param>
/// <param name="machineName">The machine name of the current process</param>
/// <param name="processId">The ID of the current process</param>
public static void GetCurrentProcessInformation(out string processName, out string machineName, out int processId)
{
using (var currentProcess = Process.GetCurrentProcess())
{
processName = currentProcess.ProcessName;
machineName = currentProcess.MachineName;
processId = currentProcess.Id;
}
}
}
}
| 1 | 18,231 | Thanks for the additional Process helper! Can we also cache the first `Process.GetCurrentProcess()` result in a static field so we don't have to repeatedly call it? It means we would also need to dispose it when the static `_runtimeMetricsWriter` instance is disposed | DataDog-dd-trace-dotnet | .cs |
@@ -73,6 +73,19 @@ func (*mockStatsEngine) GetTaskHealthMetrics() (*ecstcs.HealthMetadata, []*ecstc
return nil, nil, nil
}
+// TestDisableMetrics tests the StartMetricsSession will return immediately if
+// the metrics was disabled
+func TestDisableMetrics(t *testing.T) {
+ params := TelemetrySessionParams{
+ Cfg: &config.Config{
+ DisableMetrics: true,
+ DisableDockerHealthCheck: true,
+ },
+ }
+
+ StartMetricsSession(params)
+}
+
func TestFormatURL(t *testing.T) {
endpoint := "http://127.0.0.0.1/"
wsurl := formatURL(endpoint, testClusterArn, testInstanceArn) | 1 | // +build unit
// Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package tcshandler
import (
"errors"
"fmt"
"io"
"math/rand"
"net/url"
"strings"
"sync"
"testing"
"time"
"context"
"github.com/aws/amazon-ecs-agent/agent/api/mocks"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/eventstream"
"github.com/aws/amazon-ecs-agent/agent/tcs/client"
"github.com/aws/amazon-ecs-agent/agent/tcs/model/ecstcs"
"github.com/aws/amazon-ecs-agent/agent/wsclient"
wsmock "github.com/aws/amazon-ecs-agent/agent/wsclient/mock/utils"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/docker/docker/api/types"
"github.com/golang/mock/gomock"
"github.com/gorilla/websocket"
"github.com/stretchr/testify/assert"
)
const (
testTaskArn = "arn:aws:ecs:us-east-1:123:task/def"
testTaskDefinitionFamily = "task-def"
testClusterArn = "arn:aws:ecs:us-east-1:123:cluster/default"
testInstanceArn = "arn:aws:ecs:us-east-1:123:container-instance/abc"
testMessageId = "testMessageId"
testPublishMetricsInterval = 1 * time.Millisecond
)
type mockStatsEngine struct{}
var testCreds = credentials.NewStaticCredentials("test-id", "test-secret", "test-token")
var testCfg = &config.Config{
AcceptInsecureCert: true,
AWSRegion: "us-east-1",
}
func (*mockStatsEngine) GetInstanceMetrics() (*ecstcs.MetricsMetadata, []*ecstcs.TaskMetric, error) {
req := createPublishMetricsRequest()
return req.Metadata, req.TaskMetrics, nil
}
func (*mockStatsEngine) ContainerDockerStats(taskARN string, id string) (*types.Stats, error) {
return nil, fmt.Errorf("not implemented")
}
func (*mockStatsEngine) GetTaskHealthMetrics() (*ecstcs.HealthMetadata, []*ecstcs.TaskHealth, error) {
return nil, nil, nil
}
func TestFormatURL(t *testing.T) {
endpoint := "http://127.0.0.0.1/"
wsurl := formatURL(endpoint, testClusterArn, testInstanceArn)
parsed, err := url.Parse(wsurl)
if err != nil {
t.Fatal("Should be able to parse url")
}
if parsed.Path != "/ws" {
t.Fatal("Wrong path")
}
if parsed.Query().Get("cluster") != testClusterArn {
t.Fatal("Wrong cluster")
}
if parsed.Query().Get("containerInstance") != testInstanceArn {
t.Fatal("Wrong cluster")
}
}
func TestStartSession(t *testing.T) {
// Start test server.
closeWS := make(chan []byte)
server, serverChan, requestChan, serverErr, err := wsmock.GetMockServer(closeWS)
server.StartTLS()
defer server.Close()
if err != nil {
t.Fatal(err)
}
wait := &sync.WaitGroup{}
ctx, cancel := context.WithCancel(context.Background())
wait.Add(1)
go func() {
select {
case sErr := <-serverErr:
t.Error(sErr)
case <-ctx.Done():
}
wait.Done()
}()
defer func() {
closeSocket(closeWS)
close(serverChan)
}()
deregisterInstanceEventStream := eventstream.NewEventStream("Deregister_Instance", context.Background())
// Start a session with the test server.
go startSession(server.URL, testCfg, testCreds, &mockStatsEngine{},
defaultHeartbeatTimeout, defaultHeartbeatJitter,
testPublishMetricsInterval, deregisterInstanceEventStream)
// startSession internally starts publishing metrics from the mockStatsEngine object.
time.Sleep(testPublishMetricsInterval)
// Read request channel to get the metric data published to the server.
request := <-requestChan
cancel()
wait.Wait()
go func() {
for {
select {
case <-requestChan:
}
}
}()
// Decode and verify the metric data.
payload, err := getPayloadFromRequest(request)
if err != nil {
t.Fatal("Error decoding payload: ", err)
}
// Decode and verify the metric data.
_, responseType, err := wsclient.DecodeData([]byte(payload), tcsclient.NewTCSDecoder())
if err != nil {
t.Fatal("error decoding data: ", err)
}
if responseType != "PublishMetricsRequest" {
t.Fatal("Unexpected responseType: ", responseType)
}
}
func TestSessionConnectionClosedByRemote(t *testing.T) {
// Start test server.
closeWS := make(chan []byte)
server, serverChan, _, serverErr, err := wsmock.GetMockServer(closeWS)
server.StartTLS()
defer server.Close()
if err != nil {
t.Fatal(err)
}
go func() {
serr := <-serverErr
if !websocket.IsCloseError(serr, websocket.CloseNormalClosure) {
t.Error(serr)
}
}()
sleepBeforeClose := 10 * time.Millisecond
go func() {
time.Sleep(sleepBeforeClose)
closeSocket(closeWS)
close(serverChan)
}()
ctx, cancel := context.WithCancel(context.Background())
deregisterInstanceEventStream := eventstream.NewEventStream("Deregister_Instance", ctx)
deregisterInstanceEventStream.StartListening()
defer cancel()
// Start a session with the test server.
err = startSession(server.URL, testCfg, testCreds, &mockStatsEngine{},
defaultHeartbeatTimeout, defaultHeartbeatJitter,
testPublishMetricsInterval, deregisterInstanceEventStream)
if err == nil {
t.Error("Expected io.EOF on closed connection")
}
if err != io.EOF {
t.Error("Expected io.EOF on closed connection, got: ", err)
}
}
// TestConnectionInactiveTimeout tests the tcs client reconnect when it loses network
// connection or it's inactive for too long
func TestConnectionInactiveTimeout(t *testing.T) {
// Start test server.
closeWS := make(chan []byte)
server, _, requestChan, serverErr, err := wsmock.GetMockServer(closeWS)
server.StartTLS()
defer server.Close()
if err != nil {
t.Fatal(err)
}
go func() {
for {
select {
case <-requestChan:
}
}
}()
ctx, cancel := context.WithCancel(context.Background())
deregisterInstanceEventStream := eventstream.NewEventStream("Deregister_Instance", ctx)
deregisterInstanceEventStream.StartListening()
defer cancel()
// Start a session with the test server.
err = startSession(server.URL, testCfg, testCreds, &mockStatsEngine{},
50*time.Millisecond, 100*time.Millisecond,
testPublishMetricsInterval, deregisterInstanceEventStream)
// if we are not blocked here, then the test pass as it will reconnect in StartSession
assert.Error(t, err, "Close the connection should cause the tcs client return error")
assert.True(t, websocket.IsCloseError(<-serverErr, websocket.CloseAbnormalClosure),
"Read from closed connection should produce an io.EOF error")
closeSocket(closeWS)
}
func TestDiscoverEndpointAndStartSession(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockEcs := mock_api.NewMockECSClient(ctrl)
mockEcs.EXPECT().DiscoverTelemetryEndpoint(gomock.Any()).Return("", errors.New("error"))
err := startTelemetrySession(TelemetrySessionParams{ECSClient: mockEcs}, nil)
if err == nil {
t.Error("Expected error from startTelemetrySession when DiscoverTelemetryEndpoint returns error")
}
}
func getPayloadFromRequest(request string) (string, error) {
lines := strings.Split(request, "\r\n")
if len(lines) > 0 {
return lines[len(lines)-1], nil
}
return "", errors.New("Could not get payload")
}
// closeSocket tells the server to send a close frame. This lets us test
// what happens if the connection is closed by the remote server.
func closeSocket(ws chan<- []byte) {
ws <- websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")
close(ws)
}
func createPublishMetricsRequest() *ecstcs.PublishMetricsRequest {
cluster := testClusterArn
ci := testInstanceArn
taskArn := testTaskArn
taskDefinitionFamily := testTaskDefinitionFamily
var fval float64
fval = rand.Float64()
var ival int64
ival = rand.Int63n(10)
ts := time.Now()
idle := false
messageId := testMessageId
return &ecstcs.PublishMetricsRequest{
Metadata: &ecstcs.MetricsMetadata{
Cluster: &cluster,
ContainerInstance: &ci,
Idle: &idle,
MessageId: &messageId,
},
TaskMetrics: []*ecstcs.TaskMetric{
{
ContainerMetrics: []*ecstcs.ContainerMetric{
{
CpuStatsSet: &ecstcs.CWStatsSet{
Max: &fval,
Min: &fval,
SampleCount: &ival,
Sum: &fval,
},
MemoryStatsSet: &ecstcs.CWStatsSet{
Max: &fval,
Min: &fval,
SampleCount: &ival,
Sum: &fval,
},
},
},
TaskArn: &taskArn,
TaskDefinitionFamily: &taskDefinitionFamily,
},
},
Timestamp: &ts,
}
}
| 1 | 21,204 | just wondering what is the result of breaking the logic we test here? it seems that in that case we will just not return immediately, but i'm not sure whether the test will fail? | aws-amazon-ecs-agent | go |
@@ -198,8 +198,9 @@ func TestInferModulePath(t *testing.T) {
t.Error(err)
}
}()
-
- pctx := newTestProcessContext(dir)
+ srcDir := filepath.Join(dir, "src")
+ os.Mkdir(srcDir, 0777)
+ pctx := newTestProcessContext(srcDir)
gopath, cleanup := tc.testGOPATH(dir)
defer cleanup()
pctx.env = []string{gopath} | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
)
func TestInit(t *testing.T) {
// TODO(light): Test cases:
// Wrong or missing arguments
// Empty directory exists
// Non-empty directory exists
t.Run("CorrectPreconditions", func(t *testing.T) {
dir, err := ioutil.TempDir("", testTempDirPrefix)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
}()
ctx := context.Background()
pctx := newTestProcessContext(dir)
const projectName = "myspecialproject"
if err := run(ctx, pctx, []string{"init", "--module-path=example.com/foo/" + projectName, projectName}); err != nil {
t.Errorf("run returned error: %+v", err)
}
// Check that project directory exists.
projectDir := filepath.Join(dir, projectName)
if info, err := os.Stat(projectDir); err != nil {
t.Fatalf("stat project directory: %+v", err)
} else if !info.IsDir() {
t.Fatalf("%s is %v; want directory", projectDir, info.Mode())
}
// Dockerfile contains "# gocdk-image: myspecialproject" magic comment.
dockerfilePath := filepath.Join(projectDir, "Dockerfile")
dockerfileData, err := ioutil.ReadFile(dockerfilePath)
if err != nil {
t.Errorf("read Dockerfile: %+v", err)
} else {
const dockerfileComment = "# gocdk-image: " + projectName
if !containsLine(dockerfileData, dockerfileComment) {
t.Errorf("%s does not contain magic comment %q. Found:\n%s", dockerfilePath, dockerfileComment, dockerfileData)
}
}
// Contains a valid go.mod file.
goModList := exec.Command("go", "list", "-m", "-f", "{{with .Error}}{{.Err}}{{else}}OK{{end}}")
goModList.Dir = projectDir
if output, err := goModList.CombinedOutput(); err != nil {
t.Errorf("verifying module: go list returned error: %v. Output:\n%s", err, output)
} else if !bytes.Equal(output, []byte("OK\n")) {
t.Errorf("verifying module: unexpected output from go list (want \"OK\"):\n%s", output)
}
// Ensure that at least one file exists in dev biome with extension .tf.
devBiomePath := filepath.Join(projectDir, "biomes", "dev")
devBiomeContents, err := ioutil.ReadDir(devBiomePath)
if err != nil {
t.Error(err)
} else {
foundTF := false
var foundNames []string
for _, info := range devBiomeContents {
foundNames = append(foundNames, info.Name())
if filepath.Ext(info.Name()) == ".tf" {
foundTF = true
}
}
if !foundTF {
t.Errorf("%s contains %v; want to contain at least one \".tf\" file", devBiomePath, foundNames)
}
}
// dev biome contains settings we expect.
devBiomeJSONPath := filepath.Join(devBiomePath, "biome.json")
devBiomeJSONData, err := ioutil.ReadFile(devBiomeJSONPath)
if err != nil {
t.Error(err)
} else {
var cfg *biomeConfig
if err := json.Unmarshal(devBiomeJSONData, &cfg); err != nil {
t.Errorf("could not parse %s: %v", devBiomeJSONPath, err)
} else {
if cfg.ServeEnabled == nil || !*cfg.ServeEnabled || cfg.Launcher == nil || *cfg.Launcher != "local" {
t.Errorf("%s content = %s; want serve_enabled = true, launcher = \"local\"", devBiomeJSONPath, devBiomeJSONData)
}
}
}
// .gitignore contains *.tfvars.
const tfVarsIgnorePattern = "*.tfvars"
gitignorePath := filepath.Join(projectDir, ".gitignore")
gitignoreData, err := ioutil.ReadFile(gitignorePath)
if err != nil {
t.Errorf("read .gitignore: %+v", err)
} else {
if !containsLine(gitignoreData, tfVarsIgnorePattern) {
t.Errorf("%s does not contain %q. Found:\n%s", gitignorePath, tfVarsIgnorePattern, gitignoreData)
}
}
// .dockerignore contains *.tfvars.
dockerignorePath := filepath.Join(projectDir, ".dockerignore")
dockerignoreData, err := ioutil.ReadFile(dockerignorePath)
if err != nil {
t.Errorf("read .dockerignore: %+v", err)
} else {
if !containsLine(dockerignoreData, tfVarsIgnorePattern) {
t.Errorf("%s does not contain %q. Found:\n%s", dockerignorePath, tfVarsIgnorePattern, dockerignoreData)
}
}
// Running `go build` at top of project should succeed and produce a binary.
goBuild := exec.Command("go", "build")
goBuild.Dir = projectDir
if output, err := goBuild.CombinedOutput(); err != nil {
t.Errorf("go build returned error: %v. Output:\n%s", err, output)
} else if _, err := os.Stat(filepath.Join(projectDir, exeName(projectName))); err != nil {
t.Errorf("could not stat built binary: %v", err)
}
})
}
func TestInferModulePath(t *testing.T) {
testCases := []struct {
name string
testGOPATH func(string) (string, func())
wantErr bool
}{
{
"no GOPATH entry",
func(dir string) (string, func()) {
return "", func() {}
},
true,
},
{
"single GOPATH entry",
func(dir string) (string, func()) {
return "GOPATH=" + dir, func() {}
},
false,
},
{
"multiple GOPATH entries",
func(dir string) (string, func()) {
dir2, err := ioutil.TempDir("", testTempDirPrefix+"-2")
if err != nil {
t.Fatal(err)
}
cleanup := func() {
if err := os.RemoveAll(dir2); err != nil {
t.Error(err)
}
}
multiPath := "GOPATH=" + dir + string(filepath.ListSeparator) + dir2
return multiPath, cleanup
},
false,
},
}
ctx := context.Background()
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
dir, err := ioutil.TempDir("", testTempDirPrefix)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
}()
pctx := newTestProcessContext(dir)
gopath, cleanup := tc.testGOPATH(dir)
defer cleanup()
pctx.env = []string{gopath}
err = run(ctx, pctx, []string{"init", "myspecialproject"})
if (err != nil) != tc.wantErr {
t.Errorf("got err %v but wantErr is %v", err, tc.wantErr)
}
})
}
}
func exeName(name string) string {
if runtime.GOOS == "windows" {
return name + ".exe"
}
return name
}
func containsLine(data []byte, want string) bool {
for _, line := range bytes.Split(data, []byte("\n")) {
if bytes.Equal(line, []byte(want)) {
return true
}
}
return false
}
const testTempDirPrefix = "gocdk-test"
// newTestProject creates a temporary project using "gocdk init" and
// returns a pctx with workdir set to the project directory, and a cleanup
// function.
func newTestProject(ctx context.Context) (*processContext, func(), error) {
dir, err := ioutil.TempDir("", testTempDirPrefix)
if err != nil {
return nil, nil, err
}
dir, err = filepath.EvalSymlinks(dir) // in case TMPDIR has a symlink like on darwin
if err != nil {
return nil, nil, err
}
cleanup := func() {
os.RemoveAll(dir)
}
pctx := newTestProcessContext(dir)
if err := run(ctx, pctx, []string{"init", "-m", "example.com/test", "--allow-existing-dir", dir}); err != nil {
cleanup()
return nil, nil, err
}
return pctx, cleanup, nil
}
| 1 | 18,495 | We should fail the test if this returns `err != nil`. | google-go-cloud | go |
@@ -140,6 +140,9 @@ func (r *ReconcileClusterDeployment) reconcileExistingInstallingClusterInstall(c
}
updated = false
+ // Fun extra variable to keep track of whether we should increment metricProvisionFailedTerminal
+ // later; because we only want to do that if (we change that status and) the status update succeeds.
+ provisionFailedTerminal := false
conditions, updated = controllerutils.SetClusterDeploymentConditionWithChangeCheck(conditions,
hivev1.ProvisionStoppedCondition,
stopped.Status, | 1 | package clusterdeployment
import (
"context"
"reflect"
"time"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
hivev1 "github.com/openshift/hive/apis/hive/v1"
hivecontractsv1alpha1 "github.com/openshift/hive/apis/hivecontracts/v1alpha1"
"github.com/openshift/hive/pkg/constants"
hivemetrics "github.com/openshift/hive/pkg/controller/metrics"
controllerutils "github.com/openshift/hive/pkg/controller/utils"
)
func (r *ReconcileClusterDeployment) reconcileExistingInstallingClusterInstall(cd *hivev1.ClusterDeployment, logger log.FieldLogger) (reconcile.Result, error) {
ref := cd.Spec.ClusterInstallRef
gvk := schema.GroupVersionKind{
Group: ref.Group,
Version: ref.Version,
Kind: ref.Kind,
}
logger = logger.WithField("clusterinstall", ref.Name).WithField("gvk", gvk)
logger.Debug("reconciling existing clusterinstall")
ci := &hivecontractsv1alpha1.ClusterInstall{}
err := controllerutils.GetDuckType(context.TODO(), r.Client,
gvk,
types.NamespacedName{Namespace: cd.Namespace, Name: ref.Name},
ci)
if apierrors.IsNotFound(err) {
logger.Debug("cluster is not found, so skipping")
return reconcile.Result{}, nil
}
if err != nil {
logger.WithError(err).Error("failed to get the cluster install")
return reconcile.Result{}, err
}
specModified := false
statusModified := false
// copy the cluster metadata
if met := ci.Spec.ClusterMetadata; met != nil &&
met.InfraID != "" &&
met.ClusterID != "" &&
met.AdminKubeconfigSecretRef.Name != "" &&
met.AdminPasswordSecretRef.Name != "" {
if !reflect.DeepEqual(cd.Spec.ClusterMetadata, ci.Spec.ClusterMetadata) {
cd.Spec.ClusterMetadata = ci.Spec.ClusterMetadata
specModified = true
}
}
if cd.Status.InstallRestarts != ci.Status.InstallRestarts {
cd.Status.InstallRestarts = ci.Status.InstallRestarts
statusModified = true
}
conditions := cd.Status.Conditions
// copy the required conditions
requiredConditions := []string{
hivev1.ClusterInstallFailed,
hivev1.ClusterInstallCompleted,
hivev1.ClusterInstallStopped,
hivev1.ClusterInstallRequirementsMet,
}
for _, req := range requiredConditions {
cond := controllerutils.FindClusterInstallCondition(ci.Status.Conditions, req)
if cond == nil {
continue
}
updated := false
conditions, updated = controllerutils.SetClusterDeploymentConditionWithChangeCheck(conditions,
hivev1.ClusterDeploymentConditionType("ClusterInstall"+cond.Type), // this transformation is part of the contract
cond.Status,
cond.Reason,
cond.Message,
controllerutils.UpdateConditionIfReasonOrMessageChange,
)
if updated {
statusModified = true
}
}
// additionally copy failed to provision failed condition
failed := controllerutils.FindClusterDeploymentCondition(conditions, hivev1.ClusterInstallFailedClusterDeploymentCondition)
updated := false
conditions, updated = controllerutils.SetClusterDeploymentConditionWithChangeCheck(conditions,
hivev1.ProvisionFailedCondition, // this transformation is part of the contract
failed.Status,
failed.Reason,
failed.Message,
controllerutils.UpdateConditionIfReasonOrMessageChange,
)
if updated {
statusModified = true
}
// take actions based on the conditions
// like,
// update install started time when requirements met.
// update installed = true when completed
// update the installed timestamp when complete
requirementsMet := controllerutils.FindClusterDeploymentCondition(conditions, hivev1.ClusterInstallRequirementsMetClusterDeploymentCondition)
if requirementsMet.Status == corev1.ConditionTrue {
if !reflect.DeepEqual(cd.Status.InstallStartedTimestamp, &requirementsMet.LastTransitionTime) {
cd.Status.InstallStartedTimestamp = &requirementsMet.LastTransitionTime
statusModified = true
kickstartDuration := time.Since(ci.CreationTimestamp.Time)
logger.WithField("elapsed", kickstartDuration.Seconds()).Info("calculated time to first provision seconds")
metricInstallDelaySeconds.Observe(float64(kickstartDuration.Seconds()))
}
}
completed := controllerutils.FindClusterDeploymentCondition(conditions, hivev1.ClusterInstallCompletedClusterDeploymentCondition)
stopped := controllerutils.FindClusterDeploymentCondition(conditions, hivev1.ClusterInstallStoppedClusterDeploymentCondition)
reason := stopped.Reason
msg := stopped.Message
if stopped.Status == corev1.ConditionTrue && completed.Status == corev1.ConditionFalse {
// we must have reached the limit for retrying and therefore
// gave up with not completed
reason = installAttemptsLimitReachedReason
msg = "Install attempts limit reached"
}
updated = false
conditions, updated = controllerutils.SetClusterDeploymentConditionWithChangeCheck(conditions,
hivev1.ProvisionStoppedCondition,
stopped.Status,
reason,
msg,
controllerutils.UpdateConditionIfReasonOrMessageChange,
)
if updated {
statusModified = true
}
completed = controllerutils.FindClusterDeploymentCondition(conditions, hivev1.ClusterInstallCompletedClusterDeploymentCondition)
if completed.Status == corev1.ConditionTrue { // the cluster install is complete
cd.Spec.Installed = true
cd.Status.InstalledTimestamp = &completed.LastTransitionTime
specModified = true
statusModified = true
installStartTime := ci.CreationTimestamp
if cd.Status.InstallStartedTimestamp != nil {
installStartTime = *cd.Status.InstallStartedTimestamp // we expect that the install started when requirements met
}
installDuration := cd.Status.InstalledTimestamp.Sub(installStartTime.Time)
logger.WithField("duration", installDuration.Seconds()).Debug("install job completed")
metricInstallJobDuration.Observe(float64(installDuration.Seconds()))
metricCompletedInstallJobRestarts.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).
Observe(float64(cd.Status.InstallRestarts))
metricClustersInstalled.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc()
if r.protectedDelete {
// Set protected delete on for the ClusterDeployment.
// If the ClusterDeployment already has the ProtectedDelete annotation, do not overwrite it. This allows the
// user an opportunity to explicitly exclude a ClusterDeployment from delete protection at the time of
// creation of the ClusterDeployment.
if _, annotationPresent := cd.Annotations[constants.ProtectedDeleteAnnotation]; !annotationPresent {
initializeAnnotations(cd)
cd.Annotations[constants.ProtectedDeleteAnnotation] = "true"
specModified = true
}
}
}
if specModified {
if err := r.Update(context.TODO(), cd); err != nil {
logger.WithError(err).Error("failed to update the spec of clusterdeployment")
return reconcile.Result{}, err
}
}
if statusModified {
cd.Status.Conditions = conditions
if err := r.Status().Update(context.TODO(), cd); err != nil {
logger.WithError(err).Error("failed to update the status of clusterdeployment")
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
func getClusterImageSetFromClusterInstall(client client.Client, cd *hivev1.ClusterDeployment) (string, error) {
ref := cd.Spec.ClusterInstallRef
gvk := schema.GroupVersionKind{
Group: ref.Group,
Version: ref.Version,
Kind: ref.Kind,
}
ci := &hivecontractsv1alpha1.ClusterInstall{}
err := controllerutils.GetDuckType(context.TODO(), client,
gvk,
types.NamespacedName{Namespace: cd.Namespace, Name: ref.Name},
ci)
if err != nil {
return "", err
}
return ci.Spec.ImageSetRef.Name, nil
}
const clusterInstallIndexFieldName = "spec.clusterinstalls"
func indexClusterInstall(o client.Object) []string {
var res []string
cd := o.(*hivev1.ClusterDeployment)
if cd.Spec.ClusterInstallRef != nil {
res = append(res, cd.Spec.ClusterInstallRef.Name)
}
return res
}
func (r *ReconcileClusterDeployment) watchClusterInstall(gvk schema.GroupVersionKind, logger log.FieldLogger) error {
_, ok := r.watchingClusterInstall[gvk.String()]
if ok {
return nil
}
logger.WithField("gvk", gvk).Debug("adding cluster install watches")
obj := &unstructured.Unstructured{}
obj.SetGroupVersionKind(gvk)
err := r.watcher.Watch(&source.Kind{Type: obj}, handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request {
retval := []reconcile.Request{}
cdList := &hivev1.ClusterDeploymentList{}
if err := r.Client.List(context.TODO(), cdList,
client.MatchingFields{clusterInstallIndexFieldName: o.GetName()},
client.InNamespace(o.GetNamespace())); err != nil {
logger.WithError(err).Error("failed to list cluster deployment matching cluster install index")
return retval
}
for _, cd := range cdList.Items {
retval = append(retval, reconcile.Request{NamespacedName: types.NamespacedName{
Namespace: cd.Namespace,
Name: cd.Name,
}})
}
logger.WithField("retval", retval).Debug("trigger reconcile for clusterdeployments for cluster install objects")
return retval
}))
if err != nil {
return err
}
logger.WithField("gvk", gvk).Debug("added new watcher for cluster install")
r.watchingClusterInstall[gvk.String()] = struct{}{}
return nil
}
| 1 | 19,471 | What is the drawback of not having this flag as a gating condition to report the metric? | openshift-hive | go |
@@ -22,9 +22,14 @@ import (
)
func gracefullyStopProcess(pid int) error {
+ fmt.Printf("Graceful stop...")
err := syscall.Kill(pid, syscall.SIGINT)
if err != nil {
return fmt.Errorf("kill: %v", err)
}
return nil
}
+
+func getAppName() string {
+ return filepath.Base(os.Args[0])
+} | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package caddycmd
import (
"fmt"
"syscall"
)
func gracefullyStopProcess(pid int) error {
err := syscall.Kill(pid, syscall.SIGINT)
if err != nil {
return fmt.Errorf("kill: %v", err)
}
return nil
}
| 1 | 13,349 | `getProcessName()` will be less ambiguous, since Caddy has "apps" that it runs internally. | caddyserver-caddy | go |
@@ -4741,6 +4741,7 @@ const char * const DMLFormats[] =
"<exec cmd=\"!DumpIL /i %s\">%s</exec>", // DML_IL
"<exec cmd=\"!DumpRCW -cw /d %s\">%s</exec>", // DML_ComWrapperRCW
"<exec cmd=\"!DumpCCW -cw /d %s\">%s</exec>", // DML_ComWrapperCCW
+ "<exec cmd=\"dps %s L2\">%s</exec>", // DML_TaggedMemory (hardcoded current size to 2 pointer sizes)
};
void ConvertToLower(__out_ecount(len) char *buffer, size_t len) | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
// ==++==
//
//
// ==--==
#include "sos.h"
#include "disasm.h"
#include <dbghelp.h>
#include "corhdr.h"
#include "cor.h"
#include "dacprivate.h"
#include "sospriv.h"
#include "corerror.h"
#include "safemath.h"
#include <psapi.h>
#include <cordebug.h>
#include <xcordebug.h>
#include <metahost.h>
#include <mscoree.h>
#include <tchar.h>
#include "debugshim.h"
#include "gcinfo.h"
#ifndef STRESS_LOG
#define STRESS_LOG
#endif // STRESS_LOG
#define STRESS_LOG_READONLY
#include "stresslog.h"
#ifdef FEATURE_PAL
#include <sys/stat.h>
#include <dlfcn.h>
#endif // !FEATURE_PAL
#include "coreclrhost.h"
#include <set>
#include <string>
#if defined(__APPLE__)
#include <mach-o/dyld.h>
#endif
const char * const CorElementTypeName[ELEMENT_TYPE_MAX]=
{
#define TYPEINFO(e,ns,c,s,g,ia,ip,if,im,gv) c,
#include "cortypeinfo.h"
#undef TYPEINFO
};
const char * const CorElementTypeNamespace[ELEMENT_TYPE_MAX]=
{
#define TYPEINFO(e,ns,c,s,g,ia,ip,if,im,gv) ns,
#include "cortypeinfo.h"
#undef TYPEINFO
};
IXCLRDataProcess *g_clrData = NULL;
ISOSDacInterface *g_sos = NULL;
#ifndef IfFailRet
#define IfFailRet(EXPR) do { Status = (EXPR); if(FAILED(Status)) { return (Status); } } while (0)
#endif
// Max number of reverted rejit versions that !dumpmd and !ip2md will print
const UINT kcMaxRevertedRejitData = 10;
const UINT kcMaxTieredVersions = 10;
#ifndef FEATURE_PAL
// ensure we always allocate on the process heap
void* __cdecl operator new(size_t size) throw()
{ return HeapAlloc(GetProcessHeap(), 0, size); }
void __cdecl operator delete(void* pObj) throw()
{ HeapFree(GetProcessHeap(), 0, pObj); }
void* __cdecl operator new[](size_t size) throw()
{ return HeapAlloc(GetProcessHeap(), 0, size); }
void __cdecl operator delete[](void* pObj) throw()
{ HeapFree(GetProcessHeap(), 0, pObj); }
/**********************************************************************\
* Routine Description: *
* *
* This function is called to get the memory address given a symbol *
* name. It handles difference in symbol name between ntsd and *
* windbg. *
* *
\**********************************************************************/
DWORD_PTR GetValueFromExpression(___in __in_z const char *const instr)
{
_ASSERTE(g_pRuntime != nullptr);
LoadRuntimeSymbols();
std::string symbol;
symbol.append(GetRuntimeModuleName());
symbol.append("!");
symbol.append(instr);
ULONG64 dwAddr;
const char* str = symbol.c_str();
char name[256];
dwAddr = 0;
HRESULT hr = g_ExtSymbols->GetOffsetByName (str, &dwAddr);
if (SUCCEEDED(hr))
return (DWORD_PTR)dwAddr;
else if (hr == S_FALSE && dwAddr)
return (DWORD_PTR)dwAddr;
strcpy_s (name, _countof(name), str);
char *ptr;
if ((ptr = strstr (name, "__")) != NULL)
{
ptr[0] = ':';
ptr[1] = ':';
ptr += 2;
while ((ptr = strstr(ptr, "__")) != NULL)
{
ptr[0] = ':';
ptr[1] = ':';
ptr += 2;
}
dwAddr = 0;
hr = g_ExtSymbols->GetOffsetByName (name, &dwAddr);
if (SUCCEEDED(hr))
return (DWORD_PTR)dwAddr;
else if (hr == S_FALSE && dwAddr)
return (DWORD_PTR)dwAddr;
}
else if ((ptr = strstr (name, "::")) != NULL)
{
ptr[0] = '_';
ptr[1] = '_';
ptr += 2;
while ((ptr = strstr(ptr, "::")) != NULL)
{
ptr[0] = '_';
ptr[1] = '_';
ptr += 2;
}
dwAddr = 0;
hr = g_ExtSymbols->GetOffsetByName (name, &dwAddr);
if (SUCCEEDED(hr))
return (DWORD_PTR)dwAddr;
else if (hr == S_FALSE && dwAddr)
return (DWORD_PTR)dwAddr;
}
return 0;
}
#endif // FEATURE_PAL
void ReportOOM()
{
ExtOut("SOS Error: Out of memory\n");
}
BOOL IsDumpFile()
{
static int g_fDumpFile = -1;
if (g_fDumpFile == -1) {
ULONG Class;
ULONG Qualifier;
g_ExtControl->GetDebuggeeType(&Class,&Qualifier);
if (Qualifier >= DEBUG_DUMP_SMALL)
g_fDumpFile = 1;
else
g_fDumpFile = 0;
}
return g_fDumpFile != 0;
}
BOOL g_InMinidumpSafeMode = FALSE;
BOOL IsMiniDumpFileNODAC ()
{
#ifndef FEATURE_PAL
ULONG Class;
ULONG Qualifier;
g_ExtControl->GetDebuggeeType(&Class,&Qualifier);
if (Qualifier == DEBUG_DUMP_SMALL)
{
g_ExtControl->GetDumpFormatFlags(&Qualifier);
if ((Qualifier & DEBUG_FORMAT_USER_SMALL_FULL_MEMORY) == 0)
{
return TRUE;
}
}
#endif // FEATURE_PAL
return FALSE;
}
// We use this predicate to mean the smallest, most restrictive kind of
// minidump file. There is no heap dump, only that set of information
// gathered to make !clrstack, !threads, !help, !eeversion and !pe work.
BOOL IsMiniDumpFile ()
{
#ifndef FEATURE_PAL
// It is okay for this to be static, because although the debugger may debug multiple
// managed processes at once, I don't believe multiple dumpfiles of different
// types is a scenario to worry about.
if (IsMiniDumpFileNODAC())
{
// Beyond recognizing the dump type above, all we can rely on for this
// is a flag set by the user indicating they want a safe mode minidump
// experience. This is primarily for testing.
return g_InMinidumpSafeMode;
}
#endif // FEATURE_PAL
return FALSE;
}
ULONG DebuggeeType()
{
static ULONG Class = DEBUG_CLASS_UNINITIALIZED;
if (Class == DEBUG_CLASS_UNINITIALIZED) {
ULONG Qualifier;
g_ExtControl->GetDebuggeeType(&Class,&Qualifier);
}
return Class;
}
const WCHAR GetTargetDirectorySeparatorW()
{
if (IsWindowsTarget()) {
return W('\\');
}
else {
return W('/');
}
}
#ifndef FEATURE_PAL
// Check if a file exist
BOOL FileExist (const char *filename)
{
WIN32_FIND_DATA FindFileData;
HANDLE handle = FindFirstFile (filename, &FindFileData);
if (handle != INVALID_HANDLE_VALUE) {
FindClose (handle);
return TRUE;
}
else
return FALSE;
}
BOOL FileExist (const WCHAR *filename)
{
WIN32_FIND_DATAW FindFileData;
HANDLE handle = FindFirstFileW (filename, &FindFileData);
if (handle != INVALID_HANDLE_VALUE) {
FindClose (handle);
return TRUE;
}
else
return FALSE;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to find out if a dll is bbt-ized *
* *
\**********************************************************************/
BOOL IsRetailBuild (size_t base)
{
IMAGE_DOS_HEADER DosHeader;
if (g_ExtData->ReadVirtual(TO_CDADDR(base), &DosHeader, sizeof(DosHeader), NULL) != S_OK)
return FALSE;
IMAGE_NT_HEADERS32 Header32;
if (g_ExtData->ReadVirtual(TO_CDADDR(base + DosHeader.e_lfanew), &Header32, sizeof(Header32), NULL) != S_OK)
return FALSE;
// If there is no COMHeader, this can not be managed code.
if (Header32.OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_DEBUG].VirtualAddress == 0)
return FALSE;
size_t debugDirAddr = base + Header32.OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_DEBUG].VirtualAddress;
size_t nSize = Header32.OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_DEBUG].Size;
IMAGE_DEBUG_DIRECTORY debugDir;
size_t nbytes = 0;
while (nbytes < nSize) {
if (g_ExtData->ReadVirtual(TO_CDADDR(debugDirAddr+nbytes), &debugDir, sizeof(debugDir), NULL) != S_OK)
return FALSE;
if (debugDir.Type == 0xA) {
return TRUE;
}
nbytes += sizeof(debugDir);
}
return FALSE;
}
#endif // !FEATURE_PAL
/**********************************************************************\
* Routine Description: *
* *
* This function is called to read memory from the debugee's *
* address space. If the initial read fails, it attempts to read *
* only up to the edge of the page containing "offset". *
* *
\**********************************************************************/
BOOL SafeReadMemory (TADDR offset, PVOID lpBuffer, ULONG cb, PULONG lpcbBytesRead)
{
BOOL bRet = SUCCEEDED(g_ExtData->ReadVirtual(TO_CDADDR(offset), lpBuffer, cb, lpcbBytesRead));
if (!bRet)
{
cb = _min(cb, (ULONG)(NextOSPageAddress(offset) - offset));
bRet = SUCCEEDED(g_ExtData->ReadVirtual(TO_CDADDR(offset), lpBuffer, cb, lpcbBytesRead));
}
return bRet;
}
ULONG OSPageSize ()
{
static ULONG pageSize = 0;
if (pageSize == 0)
g_ExtControl->GetPageSize(&pageSize);
return pageSize;
}
size_t NextOSPageAddress (size_t addr)
{
size_t pageSize = OSPageSize();
return (addr+pageSize)&(~(pageSize-1));
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to get the address of MethodDesc *
* given an ip address *
* *
\**********************************************************************/
void IP2MethodDesc (DWORD_PTR IP, DWORD_PTR &methodDesc, JITTypes &jitType,
DWORD_PTR &gcinfoAddr)
{
CLRDATA_ADDRESS EIP = TO_CDADDR(IP);
DacpCodeHeaderData codeHeaderData;
methodDesc = NULL;
gcinfoAddr = NULL;
if (codeHeaderData.Request(g_sos, EIP) != S_OK)
{
return;
}
methodDesc = (DWORD_PTR) codeHeaderData.MethodDescPtr;
jitType = (JITTypes) codeHeaderData.JITType;
gcinfoAddr = (DWORD_PTR) codeHeaderData.GCInfo;
}
BOOL IsValueField (DacpFieldDescData *pFD)
{
return (pFD->Type == ELEMENT_TYPE_VALUETYPE);
}
void DisplayDataMember (DacpFieldDescData* pFD, DWORD_PTR dwAddr, BOOL fAlign=TRUE)
{
if (dwAddr > 0)
{
// we must have called this function for a "real" (non-zero size) data type
PREFIX_ASSUME(gElementTypeInfo[pFD->Type] != 0);
DWORD_PTR dwTmp = dwAddr;
bool bVTStatic = (pFD->bIsStatic && pFD->Type == ELEMENT_TYPE_VALUETYPE);
if (gElementTypeInfo[pFD->Type] != NO_SIZE || bVTStatic)
{
union Value
{
char ch;
short Short;
DWORD_PTR ptr;
int Int;
unsigned int UInt;
__int64 Int64;
unsigned __int64 UInt64;
float Float;
double Double;
} value;
ZeroMemory(&value, sizeof(value));
if (bVTStatic)
{
// static VTypes are boxed
moveBlock (value, dwTmp, gElementTypeInfo[ELEMENT_TYPE_CLASS]);
}
else
{
moveBlock (value, dwTmp, gElementTypeInfo[pFD->Type]);
}
switch (pFD->Type)
{
case ELEMENT_TYPE_I1:
// there's no ANSI conformant type specifier for
// signed char, so use the next best thing,
// signed short (sign extending)
if (fAlign)
ExtOut("%" POINTERSIZE "hd", (short)value.ch);
else
ExtOut("%d", value.ch);
break;
case ELEMENT_TYPE_I2:
if (fAlign)
ExtOut("%" POINTERSIZE "hd", value.Short);
else
ExtOut("%d", value.Short);
break;
case ELEMENT_TYPE_I4:
if (fAlign)
ExtOut("%" POINTERSIZE "d", value.Int);
else
ExtOut("%d", value.Int);
break;
case ELEMENT_TYPE_I8:
if (fAlign)
ExtOut("%" POINTERSIZE "I64d", value.Int64);
else
ExtOut("%I64d", value.Int64);
break;
case ELEMENT_TYPE_U1:
case ELEMENT_TYPE_BOOLEAN:
if (fAlign)
// there's no ANSI conformant type specifier for
// unsigned char, so use the next best thing,
// unsigned short, not extending the sign
ExtOut("%" POINTERSIZE "hu", (USHORT)value.Short);
else
ExtOut("%u", value.ch);
break;
case ELEMENT_TYPE_U2:
if (fAlign)
ExtOut("%" POINTERSIZE "hu", value.Short);
else
ExtOut("%u", value.Short);
break;
case ELEMENT_TYPE_U4:
if (fAlign)
ExtOut("%" POINTERSIZE "u", value.UInt);
else
ExtOut("%u", value.UInt);
break;
case ELEMENT_TYPE_U8:
if (fAlign)
ExtOut("%" POINTERSIZE "I64u", value.UInt64);
else
ExtOut("%I64u", value.UInt64);
break;
case ELEMENT_TYPE_I:
case ELEMENT_TYPE_U:
if (fAlign)
ExtOut("%" POINTERSIZE "p", SOS_PTR(value.ptr));
else
ExtOut("%p", SOS_PTR(value.ptr));
break;
case ELEMENT_TYPE_R4:
ExtOut("%f", value.Float);
break;
case ELEMENT_TYPE_R8:
ExtOut("%f", value.Double);
break;
case ELEMENT_TYPE_CHAR:
if (fAlign)
ExtOut("%" POINTERSIZE "hx", value.Short);
else
ExtOut("%x", value.Short);
break;
case ELEMENT_TYPE_VALUETYPE:
if (value.ptr)
DMLOut(DMLValueClass(pFD->MTOfType, dwTmp));
else
ExtOut("%p", SOS_PTR(0));
break;
default:
if (value.ptr)
DMLOut(DMLObject(value.ptr));
else
ExtOut("%p", SOS_PTR(0));
break;
}
}
else
{
if (pFD->Type == ELEMENT_TYPE_VALUETYPE)
DMLOut(DMLValueClass(pFD->MTOfType, dwTmp));
else
ExtOut("%p", SOS_PTR(0));
}
}
else
{
ExtOut("%" POINTERSIZE "s", " ");
}
}
void GetStaticFieldPTR(DWORD_PTR* pOutPtr, DacpDomainLocalModuleData* pDLMD, DacpMethodTableData* pMTD, DacpFieldDescData* pFDD, BYTE* pFlags = 0)
{
DWORD_PTR dwTmp;
if (pFDD->Type == ELEMENT_TYPE_VALUETYPE
|| pFDD->Type == ELEMENT_TYPE_CLASS)
{
dwTmp = (DWORD_PTR) pDLMD->pGCStaticDataStart + pFDD->dwOffset;
}
else
{
dwTmp = (DWORD_PTR) pDLMD->pNonGCStaticDataStart + pFDD->dwOffset;
}
*pOutPtr = 0;
if (pMTD->bIsDynamic)
{
ExtOut("dynamic statics NYI");
return;
}
else
{
if (pFlags && pMTD->bIsShared)
{
BYTE flags;
DWORD_PTR pTargetFlags = (DWORD_PTR) pDLMD->pClassData + RidFromToken(pMTD->cl) - 1;
move_xp (flags, pTargetFlags);
*pFlags = flags;
}
*pOutPtr = dwTmp;
}
return;
}
void GetDLMFlags(DacpDomainLocalModuleData* pDLMD, DacpMethodTableData* pMTD, BYTE* pFlags)
{
if (pMTD->bIsDynamic)
{
ExtOut("dynamic statics NYI");
return;
}
else
{
if (pFlags)
{
BYTE flags;
DWORD_PTR pTargetFlags = (DWORD_PTR) pDLMD->pClassData + RidFromToken(pMTD->cl) - 1;
move_xp (flags, pTargetFlags);
*pFlags = flags;
}
}
return;
}
void GetThreadStaticFieldPTR(DWORD_PTR* pOutPtr, DacpThreadLocalModuleData* pTLMD, DacpMethodTableData* pMTD, DacpFieldDescData* pFDD, BYTE* pFlags = 0)
{
DWORD_PTR dwTmp;
if (pFDD->Type == ELEMENT_TYPE_VALUETYPE
|| pFDD->Type == ELEMENT_TYPE_CLASS)
{
dwTmp = (DWORD_PTR) pTLMD->pGCStaticDataStart + pFDD->dwOffset;
}
else
{
dwTmp = (DWORD_PTR) pTLMD->pNonGCStaticDataStart + pFDD->dwOffset;
}
*pOutPtr = 0;
if (pMTD->bIsDynamic)
{
ExtOut("dynamic thread statics NYI");
return;
}
else
{
if (pFlags)
{
BYTE flags;
DWORD_PTR pTargetFlags = (DWORD_PTR) pTLMD->pClassData + RidFromToken(pMTD->cl) - 1;
move_xp (flags, pTargetFlags);
*pFlags = flags;
}
*pOutPtr = dwTmp;
}
return;
}
void DisplaySharedStatic(ULONG64 dwModuleDomainID, DacpMethodTableData* pMT, DacpFieldDescData *pFD)
{
DacpAppDomainStoreData adsData;
if (adsData.Request(g_sos)!=S_OK)
{
ExtOut("Unable to get AppDomain information\n");
}
ArrayHolder<CLRDATA_ADDRESS> pArray = new CLRDATA_ADDRESS[adsData.DomainCount];
if (pArray==NULL)
{
ReportOOM();
return;
}
if (g_sos->GetAppDomainList(adsData.DomainCount,pArray, NULL)!=S_OK)
{
ExtOut("Unable to get array of AppDomains\n");
return;
}
#if defined(_TARGET_WIN64_)
ExtOut(" >> Domain:Value ");
#else
ExtOut(" >> Domain:Value ");
#endif
// Skip the SystemDomain and SharedDomain
for (int i = 0; i < adsData.DomainCount ; i ++)
{
DacpAppDomainData appdomainData;
if (appdomainData.Request(g_sos,pArray[i])!=S_OK)
{
ExtOut("Unable to get AppDomain %lx\n",pArray[i]);
return;
}
DacpDomainLocalModuleData vDomainLocalModule;
if (g_sos->GetDomainLocalModuleDataFromAppDomain(appdomainData.AppDomainPtr, (int)dwModuleDomainID, &vDomainLocalModule) != S_OK)
{
// On .NET Core, dwModuleDomainID is the address of the DomainLocalModule.
if (vDomainLocalModule.Request(g_sos, dwModuleDomainID) != S_OK)
{
DMLOut(" %s:NotInit ", DMLDomain(pArray[i]));
continue;
}
}
DWORD_PTR dwTmp;
BYTE Flags = 0;
GetStaticFieldPTR(&dwTmp, &vDomainLocalModule , pMT, pFD, &Flags);
if ((Flags&1) == 0) {
// We have not initialized this yet.
DMLOut(" %s:NotInit ", DMLDomain(pArray[i]));
continue;
}
else if (Flags & 2) {
// We have not initialized this yet.
DMLOut(" %s:FailInit", DMLDomain(pArray[i]));
continue;
}
DMLOut(" %s:", DMLDomain(appdomainData.AppDomainPtr));
DisplayDataMember(pFD, dwTmp, FALSE);
}
ExtOut(" <<\n");
}
void DisplayThreadStatic (DacpModuleData* pModule, DacpMethodTableData* pMT, DacpFieldDescData *pFD, BOOL fIsShared)
{
SIZE_T dwModuleIndex = (SIZE_T)pModule->dwModuleIndex;
SIZE_T dwModuleDomainID = (SIZE_T)pModule->dwModuleID;
DacpThreadStoreData ThreadStore;
ThreadStore.Request(g_sos);
ExtOut(" >> Thread:Value");
CLRDATA_ADDRESS CurThread = ThreadStore.firstThread;
while (CurThread)
{
DacpThreadData vThread;
if (vThread.Request(g_sos, CurThread) != S_OK)
{
ExtOut(" error getting thread %p, aborting this field\n", SOS_PTR(CurThread));
return;
}
if (vThread.osThreadId != 0)
{
CLRDATA_ADDRESS appDomainAddr = vThread.domain;
// Get the DLM (we need this to check the ClassInit flags).
// It's annoying that we have to issue one request for
// domain-neutral modules and domain-specific modules.
DacpDomainLocalModuleData vDomainLocalModule;
if (fIsShared)
{
if (g_sos->GetDomainLocalModuleDataFromAppDomain(appDomainAddr, (int)dwModuleDomainID, &vDomainLocalModule) != S_OK)
{
// On .NET Core, dwModuleDomainID is the address of the DomainLocalModule.
if (vDomainLocalModule.Request(g_sos, dwModuleDomainID) != S_OK)
{
// Not initialized, go to next thread and continue looping
CurThread = vThread.nextThread;
continue;
}
}
}
else
{
if (g_sos->GetDomainLocalModuleDataFromModule(pMT->Module, &vDomainLocalModule) != S_OK)
{
// Not initialized, go to next thread
// and continue looping
CurThread = vThread.nextThread;
continue;
}
}
// Get the TLM
DacpThreadLocalModuleData vThreadLocalModule;
if (g_sos->GetThreadLocalModuleData(CurThread, (int)dwModuleIndex, &vThreadLocalModule) != S_OK)
{
// Not initialized, go to next thread
// and continue looping
CurThread = vThread.nextThread;
continue;
}
DWORD_PTR dwTmp;
BYTE Flags = 0;
GetThreadStaticFieldPTR(&dwTmp, &vThreadLocalModule, pMT, pFD, &Flags);
if ((Flags&4) == 0)
{
// Not allocated, go to next thread
// and continue looping
CurThread = vThread.nextThread;
continue;
}
Flags = 0;
GetDLMFlags(&vDomainLocalModule, pMT, &Flags);
if ((Flags&1) == 0)
{
// Not initialized, go to next thread
// and continue looping
CurThread = vThread.nextThread;
continue;
}
ExtOut(" %x:", vThread.osThreadId);
DisplayDataMember(pFD, dwTmp, FALSE);
}
// Go to next thread
CurThread = vThread.nextThread;
}
ExtOut(" <<\n");
}
const char * ElementTypeName(unsigned type)
{
switch (type) {
case ELEMENT_TYPE_PTR:
return "PTR";
break;
case ELEMENT_TYPE_BYREF:
return "BYREF";
break;
case ELEMENT_TYPE_VALUETYPE:
return "VALUETYPE";
break;
case ELEMENT_TYPE_CLASS:
return "CLASS";
break;
case ELEMENT_TYPE_VAR:
return "VAR";
break;
case ELEMENT_TYPE_ARRAY:
return "ARRAY";
break;
case ELEMENT_TYPE_FNPTR:
return "FNPTR";
break;
case ELEMENT_TYPE_SZARRAY:
return "SZARRAY";
break;
case ELEMENT_TYPE_MVAR:
return "MVAR";
break;
default:
if ((type >= _countof(CorElementTypeName)) || (CorElementTypeName[type] == NULL))
{
return "";
}
return CorElementTypeName[type];
break;
}
} // ElementTypeName
const char * ElementTypeNamespace(unsigned type)
{
if ((type >= _countof(CorElementTypeName)) || (CorElementTypeNamespace[type] == NULL))
{
return "";
}
return CorElementTypeNamespace[type];
}
void ComposeName_s(CorElementType Type, __out_ecount(capacity_buffer) LPSTR buffer, size_t capacity_buffer)
{
const char *p = ElementTypeNamespace(Type);
if ((p) && (*p != '\0'))
{
strcpy_s(buffer,capacity_buffer,p);
strcat_s(buffer,capacity_buffer,".");
strcat_s(buffer,capacity_buffer,ElementTypeName(Type));
}
else
{
strcpy_s(buffer,capacity_buffer,ElementTypeName(Type));
}
}
// NOTE: pszName is changed
// INPUT MAXCHARS RETURN
// HelloThere 5 ...re
// HelloThere 8 ...There
LPWSTR FormatTypeName (__out_ecount (maxChars) LPWSTR pszName, UINT maxChars)
{
UINT iStart = 0;
UINT iLen = (int) _wcslen(pszName);
if (iLen > maxChars)
{
iStart = iLen - maxChars;
UINT numDots = (maxChars < 3) ? maxChars : 3;
for (UINT i=0; i < numDots; i++)
pszName[iStart+i] = '.';
}
return pszName + iStart;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump all fields of a managed object. *
* dwStartAddr specifies the beginning memory address. *
* bFirst is used to avoid printing header every time. *
* *
\**********************************************************************/
void DisplayFields(CLRDATA_ADDRESS cdaMT, DacpMethodTableData *pMTD, DacpMethodTableFieldData *pMTFD, DWORD_PTR dwStartAddr, BOOL bFirst, BOOL bValueClass)
{
static DWORD numInstanceFields = 0;
if (bFirst)
{
ExtOutIndent();
ExtOut("%" POINTERSIZE "s %8s %8s %20s %2s %8s %" POINTERSIZE "s %s\n",
"MT", "Field", "Offset", "Type", "VT", "Attr", "Value", "Name");
numInstanceFields = 0;
}
BOOL fIsShared = pMTD->bIsShared;
if (pMTD->ParentMethodTable)
{
DacpMethodTableData vParentMethTable;
if (vParentMethTable.Request(g_sos,pMTD->ParentMethodTable) != S_OK)
{
ExtOut("Invalid parent MethodTable\n");
return;
}
DacpMethodTableFieldData vParentMethTableFields;
if (vParentMethTableFields.Request(g_sos,pMTD->ParentMethodTable) != S_OK)
{
ExtOut("Invalid parent EEClass\n");
return;
}
DisplayFields(pMTD->ParentMethodTable, &vParentMethTable, &vParentMethTableFields, dwStartAddr, FALSE, bValueClass);
}
DWORD numStaticFields = 0;
CLRDATA_ADDRESS dwAddr = pMTFD->FirstField;
DacpFieldDescData vFieldDesc;
// Get the module name
DacpModuleData module;
if (module.Request(g_sos, pMTD->Module)!=S_OK)
return;
ToRelease<IMetaDataImport> pImport = MDImportForModule(&module);
while (numInstanceFields < pMTFD->wNumInstanceFields
|| numStaticFields < pMTFD->wNumStaticFields)
{
if (IsInterrupt())
return;
ExtOutIndent ();
if ((vFieldDesc.Request(g_sos, dwAddr)!=S_OK) ||
(vFieldDesc.Type >= ELEMENT_TYPE_MAX))
{
ExtOut("Unable to display fields\n");
return;
}
dwAddr = vFieldDesc.NextField;
DWORD offset = vFieldDesc.dwOffset;
if(!((vFieldDesc.bIsThreadLocal || vFieldDesc.bIsContextLocal || fIsShared) && vFieldDesc.bIsStatic))
{
if (!bValueClass)
{
offset += sizeof(BaseObject);
}
}
DMLOut("%s %8x %8x ", DMLMethodTable(vFieldDesc.MTOfType),
TokenFromRid(vFieldDesc.mb, mdtFieldDef),
offset);
char ElementName[mdNameLen];
if ((vFieldDesc.Type == ELEMENT_TYPE_VALUETYPE ||
vFieldDesc.Type == ELEMENT_TYPE_CLASS) && vFieldDesc.MTOfType)
{
NameForMT_s((DWORD_PTR)vFieldDesc.MTOfType, g_mdName, mdNameLen);
ExtOut("%20.20S ", FormatTypeName(g_mdName, 20));
}
else
{
if (vFieldDesc.Type == ELEMENT_TYPE_CLASS && vFieldDesc.TokenOfType != mdTypeDefNil)
{
// Get the name from Metadata!!!
NameForToken_s(TokenFromRid(vFieldDesc.TokenOfType, mdtTypeDef), pImport, g_mdName, mdNameLen, false);
ExtOut("%20.20S ", FormatTypeName(g_mdName, 20));
}
else
{
// If ET type from signature is different from fielddesc, then the signature one is more descriptive.
// For example, E_T_STRING in field desc will be E_T_CLASS. In minidump's case, we won't have
// the method table for it.
ComposeName_s(vFieldDesc.Type != vFieldDesc.sigType ? vFieldDesc.sigType : vFieldDesc.Type, ElementName, sizeof(ElementName)/sizeof(ElementName[0]));
ExtOut("%20.20s ", ElementName);
}
}
ExtOut("%2s ", (IsElementValueType(vFieldDesc.Type)) ? "1" : "0");
if (vFieldDesc.bIsStatic && (vFieldDesc.bIsThreadLocal || vFieldDesc.bIsContextLocal))
{
numStaticFields ++;
if (fIsShared)
ExtOut("%8s %" POINTERSIZE "s", "shared", vFieldDesc.bIsThreadLocal ? "TLstatic" : "CLstatic");
else
ExtOut("%8s ", vFieldDesc.bIsThreadLocal ? "TLstatic" : "CLstatic");
NameForToken_s(TokenFromRid(vFieldDesc.mb, mdtFieldDef), pImport, g_mdName, mdNameLen, false);
ExtOut(" %S\n", g_mdName);
if (IsMiniDumpFile())
{
ExtOut(" <no information>\n");
}
else
{
if (vFieldDesc.bIsThreadLocal)
{
DacpModuleData vModule;
if (vModule.Request(g_sos,pMTD->Module) == S_OK)
{
DisplayThreadStatic(&vModule, pMTD, &vFieldDesc, fIsShared);
}
}
else if (vFieldDesc.bIsContextLocal)
{
ExtOut("\nDisplay of context static variables is not implemented\n");
}
}
}
else if (vFieldDesc.bIsStatic)
{
numStaticFields ++;
if (fIsShared)
{
ExtOut("%8s %" POINTERSIZE "s", "shared", "static");
NameForToken_s(TokenFromRid(vFieldDesc.mb, mdtFieldDef), pImport, g_mdName, mdNameLen, false);
ExtOut(" %S\n", g_mdName);
if (IsMiniDumpFile())
{
ExtOut(" <no information>\n");
}
else
{
DacpModuleData vModule;
if (vModule.Request(g_sos,pMTD->Module) == S_OK)
{
DisplaySharedStatic(vModule.dwModuleID, pMTD, &vFieldDesc);
}
}
}
else
{
ExtOut("%8s ", "static");
DacpDomainLocalModuleData vDomainLocalModule;
// The MethodTable isn't shared, so the module must not be loaded domain neutral. We can
// get the specific DomainLocalModule instance without needing to know the AppDomain in advance.
if (g_sos->GetDomainLocalModuleDataFromModule(pMTD->Module, &vDomainLocalModule) != S_OK)
{
ExtOut(" <no information>\n");
}
else
{
DWORD_PTR dwTmp;
GetStaticFieldPTR(&dwTmp, &vDomainLocalModule, pMTD, &vFieldDesc);
DisplayDataMember(&vFieldDesc, dwTmp);
NameForToken_s(TokenFromRid(vFieldDesc.mb, mdtFieldDef), pImport, g_mdName, mdNameLen, false);
ExtOut(" %S\n", g_mdName);
}
}
}
else
{
numInstanceFields ++;
ExtOut("%8s ", "instance");
if (dwStartAddr > 0)
{
DWORD_PTR dwTmp = dwStartAddr + vFieldDesc.dwOffset + (bValueClass ? 0 : sizeof(BaseObject));
DisplayDataMember(&vFieldDesc, dwTmp);
}
else
{
ExtOut(" %8s", " ");
}
NameForToken_s(TokenFromRid(vFieldDesc.mb, mdtFieldDef), pImport, g_mdName, mdNameLen, false);
ExtOut(" %S\n", g_mdName);
}
}
return;
}
HRESULT GetNonSharedStaticFieldValueFromName(
UINT64* pValue,
DWORD_PTR moduleAddr,
const char *typeName,
__in_z LPCWSTR wszFieldName,
CorElementType fieldType)
{
HRESULT hr = S_OK;
mdTypeDef mdType = 0;
GetInfoFromName(moduleAddr, typeName, &mdType);
if (mdType == 0)
{
return E_FAIL; // Failed to find type token
}
CLRDATA_ADDRESS cdaMethodTable = 0;
if (FAILED(hr = g_sos->GetMethodDescFromToken(moduleAddr, mdType, &cdaMethodTable)) ||
!IsValidToken(moduleAddr, mdType) ||
cdaMethodTable == 0)
{
return FAILED(hr) ? hr : E_FAIL; // Invalid type token or type is not loaded yet
}
DacpMethodTableData vMethodTable;
if ((hr = vMethodTable.Request(g_sos, cdaMethodTable)) != S_OK)
{
return FAILED(hr) ? hr : E_FAIL; // Failed to get method table data
}
if (vMethodTable.bIsShared)
{
ExtOut(" %s: %s\n", "Method table is shared (not implemented)", typeName);
return E_NOTIMPL;
}
DacpMethodTableFieldData vMethodTableFields;
if (FAILED(hr = vMethodTableFields.Request(g_sos, cdaMethodTable)))
{
return hr; // Failed to get field data
}
DacpModuleData vModule;
if ((hr = vModule.Request(g_sos, vMethodTable.Module)) != S_OK)
{
return FAILED(hr) ? hr : E_FAIL; // Failed to get module data
}
DacpDomainLocalModuleData vDomainLocalModule;
if ((hr = g_sos->GetDomainLocalModuleDataFromModule(vMethodTable.Module, &vDomainLocalModule)) != S_OK)
{
return FAILED(hr) ? hr : E_FAIL; // Failed to get domain local module data
}
ToRelease<IMetaDataImport> pImport = MDImportForModule(&vModule);
CLRDATA_ADDRESS cdaField = vMethodTableFields.FirstField;
DacpFieldDescData vFieldDesc;
bool found = false;
for (DWORD staticFieldIndex = 0; staticFieldIndex < vMethodTableFields.wNumStaticFields; )
{
if ((hr = vFieldDesc.Request(g_sos, cdaField)) != S_OK || vFieldDesc.Type >= ELEMENT_TYPE_MAX)
{
return FAILED(hr) ? hr : E_FAIL; // Failed to get member field desc
}
cdaField = vFieldDesc.NextField;
if (!vFieldDesc.bIsStatic)
{
continue;
}
++staticFieldIndex;
if (vFieldDesc.Type != fieldType)
{
continue;
}
if (FAILED(hr = NameForToken_s(TokenFromRid(vFieldDesc.mb, mdtFieldDef), pImport, g_mdName, mdNameLen, false)))
{
return hr; // Failed to get member field name
}
if (_wcscmp(g_mdName, wszFieldName) != 0)
{
continue;
}
if (vFieldDesc.bIsThreadLocal || vFieldDesc.bIsContextLocal)
{
ExtOut(" %s: %s.%S\n", "Static field is thread-local or context-local (not implemented)", typeName, wszFieldName);
return E_NOTIMPL;
}
found = true;
break;
}
if (!found)
{
return E_FAIL; // Static field not found
}
DWORD_PTR pValueAddr = 0;
GetStaticFieldPTR(&pValueAddr, &vDomainLocalModule, &vMethodTable, &vFieldDesc);
if (pValueAddr == 0)
{
return E_FAIL; // Failed to get static field address
}
UINT64 value = 0;
if (FAILED(MOVEBLOCK(value, pValueAddr, gElementTypeInfo[fieldType])))
{
return E_FAIL; // Failed to read static field
}
*pValue = value;
return S_OK;
}
// Return value: -1 = error,
// 0 = field not found,
// > 0 = offset to field from objAddr
int GetObjFieldOffset(CLRDATA_ADDRESS cdaObj, __in_z LPCWSTR wszFieldName, BOOL bFirst)
{
TADDR mt = NULL;
if FAILED(GetMTOfObject(TO_TADDR(cdaObj), &mt))
return -1;
return GetObjFieldOffset(cdaObj, TO_CDADDR(mt), wszFieldName, bFirst);
}
// Return value: -1 = error,
// 0 = field not found,
// > 0 = offset to field from objAddr
int GetObjFieldOffset(CLRDATA_ADDRESS cdaObj, CLRDATA_ADDRESS cdaMT, __in_z LPCWSTR wszFieldName,
BOOL bFirst/*=TRUE*/, DacpFieldDescData* pDacpFieldDescData/*=NULL*/)
{
#define EXITPOINT(EXPR) do { if(!(EXPR)) { return -1; } } while (0)
DacpObjectData objData;
DacpMethodTableData dmtd;
DacpMethodTableFieldData vMethodTableFields;
DacpFieldDescData vFieldDesc;
DacpModuleData module;
static DWORD numInstanceFields = 0; // Static due to recursion visiting parents
if (bFirst)
{
numInstanceFields = 0;
}
EXITPOINT(objData.Request(g_sos, cdaObj) == S_OK);
EXITPOINT(dmtd.Request(g_sos, cdaMT) == S_OK);
if (dmtd.ParentMethodTable)
{
DWORD retVal = GetObjFieldOffset (cdaObj, dmtd.ParentMethodTable,
wszFieldName, FALSE, pDacpFieldDescData);
if (retVal != 0)
{
// return in case of error or success.
// Fall through for field-not-found.
return retVal;
}
}
EXITPOINT (vMethodTableFields.Request(g_sos,cdaMT) == S_OK);
EXITPOINT (module.Request(g_sos,dmtd.Module) == S_OK);
CLRDATA_ADDRESS dwAddr = vMethodTableFields.FirstField;
ToRelease<IMetaDataImport> pImport = MDImportForModule(&module);
while (numInstanceFields < vMethodTableFields.wNumInstanceFields)
{
EXITPOINT (vFieldDesc.Request(g_sos, dwAddr) == S_OK);
if (!vFieldDesc.bIsStatic)
{
DWORD offset = vFieldDesc.dwOffset + sizeof(BaseObject);
NameForToken_s (TokenFromRid(vFieldDesc.mb, mdtFieldDef), pImport, g_mdName, mdNameLen, false);
if (_wcscmp (wszFieldName, g_mdName) == 0)
{
if (pDacpFieldDescData != NULL)
{
*pDacpFieldDescData = vFieldDesc;
}
return offset;
}
numInstanceFields ++;
}
dwAddr = vFieldDesc.NextField;
}
// Field name not found...
return 0;
#undef EXITPOINT
}
// Return value: -1 = error
// -2 = not found
// >= 0 = offset to field from cdaValue
int GetValueFieldOffset(CLRDATA_ADDRESS cdaMT, __in_z LPCWSTR wszFieldName, DacpFieldDescData* pDacpFieldDescData)
{
#define EXITPOINT(EXPR) do { if(!(EXPR)) { return -1; } } while (0)
const int NOT_FOUND = -2;
DacpMethodTableData dmtd;
DacpMethodTableFieldData vMethodTableFields;
DacpFieldDescData vFieldDesc;
DacpModuleData module;
static DWORD numInstanceFields = 0; // Static due to recursion visiting parents
numInstanceFields = 0;
EXITPOINT(vMethodTableFields.Request(g_sos, cdaMT) == S_OK);
EXITPOINT(dmtd.Request(g_sos, cdaMT) == S_OK);
EXITPOINT(module.Request(g_sos, dmtd.Module) == S_OK);
if (dmtd.ParentMethodTable)
{
DWORD retVal = GetValueFieldOffset(dmtd.ParentMethodTable, wszFieldName, pDacpFieldDescData);
if (retVal != (DWORD)NOT_FOUND)
{
// Return in case of error or success. Fall through for field-not-found.
return retVal;
}
}
CLRDATA_ADDRESS dwAddr = vMethodTableFields.FirstField;
ToRelease<IMetaDataImport> pImport = MDImportForModule(&module);
while (numInstanceFields < vMethodTableFields.wNumInstanceFields)
{
EXITPOINT(vFieldDesc.Request(g_sos, dwAddr) == S_OK);
if (!vFieldDesc.bIsStatic)
{
NameForToken_s(TokenFromRid(vFieldDesc.mb, mdtFieldDef), pImport, g_mdName, mdNameLen, false);
if (_wcscmp(wszFieldName, g_mdName) == 0)
{
if (pDacpFieldDescData != NULL)
{
*pDacpFieldDescData = vFieldDesc;
}
return vFieldDesc.dwOffset;
}
numInstanceFields++;
}
dwAddr = vFieldDesc.NextField;
}
// Field name not found...
return NOT_FOUND;
#undef EXITPOINT
}
// Returns an AppDomain address if AssemblyPtr is loaded into that domain only. Otherwise
// returns NULL
CLRDATA_ADDRESS IsInOneDomainOnly(CLRDATA_ADDRESS AssemblyPtr)
{
CLRDATA_ADDRESS appDomain = NULL;
DacpAppDomainStoreData adstore;
if (adstore.Request(g_sos) != S_OK)
{
ExtOut("Unable to get appdomain store\n");
return NULL;
}
size_t AllocSize;
if (!ClrSafeInt<size_t>::multiply(sizeof(CLRDATA_ADDRESS), adstore.DomainCount, AllocSize))
{
ReportOOM();
return NULL;
}
ArrayHolder<CLRDATA_ADDRESS> pArray = new CLRDATA_ADDRESS[adstore.DomainCount];
if (pArray==NULL)
{
ReportOOM();
return NULL;
}
if (g_sos->GetAppDomainList(adstore.DomainCount, pArray, NULL)!=S_OK)
{
ExtOut ("Failed to get appdomain list\n");
return NULL;
}
for (int i = 0; i < adstore.DomainCount; i++)
{
if (IsInterrupt())
return NULL;
DacpAppDomainData dadd;
if (dadd.Request(g_sos, pArray[i]) != S_OK)
{
ExtOut ("Unable to get AppDomain %p\n", SOS_PTR(pArray[i]));
return NULL;
}
if (dadd.AssemblyCount)
{
size_t AssemblyAllocSize;
if (!ClrSafeInt<size_t>::multiply(sizeof(CLRDATA_ADDRESS), dadd.AssemblyCount, AssemblyAllocSize))
{
ReportOOM();
return NULL;
}
ArrayHolder<CLRDATA_ADDRESS> pAsmArray = new CLRDATA_ADDRESS[dadd.AssemblyCount];
if (pAsmArray==NULL)
{
ReportOOM();
return NULL;
}
if (g_sos->GetAssemblyList(dadd.AppDomainPtr,dadd.AssemblyCount,pAsmArray, NULL)!=S_OK)
{
ExtOut("Unable to get array of Assemblies\n");
return NULL;
}
for (LONG n = 0; n < dadd.AssemblyCount; n ++)
{
if (IsInterrupt())
return NULL;
if (AssemblyPtr == pAsmArray[n])
{
if (appDomain != NULL)
{
// We have found more than one AppDomain that loaded this
// assembly, we must return NULL.
return NULL;
}
appDomain = dadd.AppDomainPtr;
}
}
}
}
return appDomain;
}
CLRDATA_ADDRESS GetAppDomainForMT(CLRDATA_ADDRESS mtPtr)
{
DacpMethodTableData mt;
if (mt.Request(g_sos, mtPtr) != S_OK)
{
return NULL;
}
DacpModuleData module;
if (module.Request(g_sos, mt.Module) != S_OK)
{
return NULL;
}
DacpAssemblyData assembly;
if (assembly.Request(g_sos, module.Assembly) != S_OK)
{
return NULL;
}
DacpAppDomainStoreData adstore;
if (adstore.Request(g_sos) != S_OK)
{
return NULL;
}
return (assembly.ParentDomain == adstore.sharedDomain) ?
IsInOneDomainOnly(assembly.AssemblyPtr) :
assembly.ParentDomain;
}
CLRDATA_ADDRESS GetAppDomain(CLRDATA_ADDRESS objPtr)
{
CLRDATA_ADDRESS appDomain = NULL;
DacpObjectData objData;
if (objData.Request(g_sos,objPtr) != S_OK)
{
return NULL;
}
// First check eeclass->module->assembly->domain.
// Then check the object flags word
// finally, search threads for a reference to the object, and look at the thread context.
DacpMethodTableData mt;
if (mt.Request(g_sos,objData.MethodTable) != S_OK)
{
return NULL;
}
DacpModuleData module;
if (module.Request(g_sos,mt.Module) != S_OK)
{
return NULL;
}
DacpAssemblyData assembly;
if (assembly.Request(g_sos,module.Assembly) != S_OK)
{
return NULL;
}
DacpAppDomainStoreData adstore;
if (adstore.Request(g_sos) != S_OK)
{
return NULL;
}
if (assembly.ParentDomain == adstore.sharedDomain)
{
sos::Object obj(TO_TADDR(objPtr));
ULONG value = 0;
if (!obj.TryGetHeader(value))
{
return NULL;
}
DWORD adIndex = (value >> SBLK_APPDOMAIN_SHIFT) & SBLK_MASK_APPDOMAININDEX;
if ( ((value & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) != 0) || adIndex==0)
{
// No AppDomainID information. We'll make use of a heuristic.
// If the assembly is in the shared domain, we can report it as
// being in domain X if the only other domain that has the assembly
// loaded is domain X.
appDomain = IsInOneDomainOnly(assembly.AssemblyPtr);
if (appDomain == NULL && ((value & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) != 0))
{
if ((value & BIT_SBLK_IS_HASHCODE) == 0)
{
UINT index = value & MASK_SYNCBLOCKINDEX;
// We have a syncblock, the appdomain ID may be in there.
DacpSyncBlockData syncBlockData;
if (syncBlockData.Request(g_sos,index) == S_OK)
{
appDomain = syncBlockData.appDomainPtr;
}
}
}
}
else if ((value & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0)
{
size_t AllocSize;
if (!ClrSafeInt<size_t>::multiply(sizeof(CLRDATA_ADDRESS), adstore.DomainCount, AllocSize))
{
return NULL;
}
// we know we have a non-zero adIndex. Find the appdomain.
ArrayHolder<CLRDATA_ADDRESS> pArray = new CLRDATA_ADDRESS[adstore.DomainCount];
if (pArray==NULL)
{
return NULL;
}
if (g_sos->GetAppDomainList(adstore.DomainCount, pArray, NULL)!=S_OK)
{
return NULL;
}
for (int i = 0; i < adstore.DomainCount; i++)
{
DacpAppDomainData dadd;
if (dadd.Request(g_sos, pArray[i]) != S_OK)
{
return NULL;
}
if (dadd.dwId == adIndex)
{
appDomain = pArray[i];
break;
}
}
}
}
else
{
appDomain = assembly.ParentDomain;
}
return appDomain;
}
HRESULT FileNameForModule (DWORD_PTR pModuleAddr, __out_ecount (MAX_LONGPATH) WCHAR *fileName)
{
DacpModuleData ModuleData;
fileName[0] = L'\0';
HRESULT hr = ModuleData.Request(g_sos, TO_CDADDR(pModuleAddr));
if (SUCCEEDED(hr))
{
hr = FileNameForModule(&ModuleData,fileName);
}
return hr;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to find the file name given a Module. *
* *
\**********************************************************************/
// fileName should be at least MAX_LONGPATH
HRESULT FileNameForModule(const DacpModuleData* const pModuleData, __out_ecount(MAX_LONGPATH) WCHAR* fileName)
{
fileName[0] = W('\0');
HRESULT hr = S_OK;
CLRDATA_ADDRESS dwAddr = pModuleData->File;
if (dwAddr == 0)
{
// TODO: We have dynamic module
return E_NOTIMPL;
}
CLRDATA_ADDRESS base = 0;
hr = g_sos->GetPEFileBase(dwAddr, &base);
if (SUCCEEDED(hr))
{
hr = g_sos->GetPEFileName(dwAddr, MAX_LONGPATH, fileName, NULL);
if (SUCCEEDED(hr) && fileName[0] != W('\0'))
return hr; // done
#ifndef FEATURE_PAL
// Try the base *
if (base)
{
hr = DllsName((ULONG_PTR)base, fileName);
if (SUCCEEDED(hr) && fileName[0] != W('\0'))
return hr; // done
}
#endif // !FEATURE_PAL
}
ToRelease<IXCLRDataModule> pModule;
if (SUCCEEDED(g_sos->GetModule(pModuleData->Address, &pModule)))
{
ULONG32 nameLen = 0;
hr = pModule->GetFileName(MAX_LONGPATH, &nameLen, fileName);
}
return hr;
}
void AssemblyInfo(DacpAssemblyData *pAssembly)
{
ExtOut("ClassLoader: %p\n", SOS_PTR(pAssembly->ClassLoader));
if ((ULONG64)pAssembly->AssemblySecDesc != NULL)
ExtOut("SecurityDescriptor: %p\n", SOS_PTR(pAssembly->AssemblySecDesc));
ExtOut(" Module\n");
ArrayHolder<CLRDATA_ADDRESS> Modules = new CLRDATA_ADDRESS[pAssembly->ModuleCount];
if (Modules == NULL
|| g_sos->GetAssemblyModuleList(pAssembly->AssemblyPtr, pAssembly->ModuleCount, Modules, NULL) != S_OK)
{
ReportOOM();
return;
}
for (UINT n = 0; n < pAssembly->ModuleCount; n++)
{
if (IsInterrupt())
{
return;
}
CLRDATA_ADDRESS ModuleAddr = Modules[n];
DMLOut(" %s " WIN86_8SPACES, DMLModule(ModuleAddr));
DacpModuleData moduleData;
if (moduleData.Request(g_sos, ModuleAddr) == S_OK)
{
WCHAR fileName[MAX_LONGPATH];
FileNameForModule (&moduleData, fileName);
if (fileName[0])
{
ExtOut("%S\n", fileName);
}
else
{
ExtOut("%S\n", (moduleData.bIsReflection) ? W("Dynamic Module") : W("Unknown Module"));
}
}
else
{
ExtOut("Request module data FAILED\n");
}
}
}
const char *GetStageText(DacpAppDomainDataStage stage)
{
switch(stage)
{
case STAGE_CREATING:
return "CREATING";
case STAGE_READYFORMANAGEDCODE:
return "READYFORMANAGEDCODE";
case STAGE_ACTIVE:
return "ACTIVE";
case STAGE_OPEN:
return "OPEN";
case STAGE_UNLOAD_REQUESTED:
return "UNLOAD_REQUESTED";
case STAGE_EXITING:
return "EXITING";
case STAGE_EXITED:
return "EXITED";
case STAGE_FINALIZING:
return "FINALIZING";
case STAGE_FINALIZED:
return "FINALIZED";
case STAGE_HANDLETABLE_NOACCESS:
return "HANDLETABLE_NOACCESS";
case STAGE_CLEARED:
return "CLEARED";
case STAGE_COLLECTED:
return "COLLECTED";
case STAGE_CLOSED:
return "CLOSED";
}
return "UNKNOWN";
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to dump the contents of a domain. *
* *
\**********************************************************************/
void DomainInfo (DacpAppDomainData *pDomain)
{
ExtOut("LowFrequencyHeap: %p\n", SOS_PTR(pDomain->pLowFrequencyHeap));
ExtOut("HighFrequencyHeap: %p\n", SOS_PTR(pDomain->pHighFrequencyHeap));
ExtOut("StubHeap: %p\n", SOS_PTR(pDomain->pStubHeap));
ExtOut("Stage: %s\n", GetStageText(pDomain->appDomainStage));
if ((ULONG64)pDomain->AppSecDesc != NULL)
ExtOut("SecurityDescriptor: %p\n", SOS_PTR(pDomain->AppSecDesc));
ExtOut("Name: ");
if (g_sos->GetAppDomainName(pDomain->AppDomainPtr, mdNameLen, g_mdName, NULL)!=S_OK)
{
ExtOut("Error getting AppDomain friendly name\n");
}
else
{
ExtOut("%S\n", (g_mdName[0] != L'\0') ? g_mdName : W("None"));
}
if (pDomain->AssemblyCount == 0)
return;
ArrayHolder<CLRDATA_ADDRESS> pArray = new CLRDATA_ADDRESS[pDomain->AssemblyCount];
if (pArray==NULL)
{
ReportOOM();
return;
}
if (g_sos->GetAssemblyList(pDomain->AppDomainPtr,pDomain->AssemblyCount,pArray, NULL)!=S_OK)
{
ExtOut("Unable to get array of Assemblies\n");
return;
}
LONG n;
// Assembly vAssembly;
for (n = 0; n < pDomain->AssemblyCount; n ++)
{
if (IsInterrupt())
return;
if (n != 0)
ExtOut("\n");
DMLOut("Assembly: %s", DMLAssembly(pArray[n]));
DacpAssemblyData assemblyData;
if (assemblyData.Request(g_sos, pArray[n], pDomain->AppDomainPtr) == S_OK)
{
if (assemblyData.isDynamic)
ExtOut(" (Dynamic)");
ExtOut(" [");
if (g_sos->GetAssemblyName(pArray[n], mdNameLen, g_mdName, NULL) == S_OK)
ExtOut("%S", g_mdName);
ExtOut("]\n");
AssemblyInfo(&assemblyData);
}
}
ExtOut("\n");
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to find the name of a MethodDesc using *
* metadata API. *
* *
\**********************************************************************/
BOOL NameForMD_s (DWORD_PTR pMD, __out_ecount (capacity_mdName) WCHAR *mdName, size_t capacity_mdName)
{
mdName[0] = L'\0';
CLRDATA_ADDRESS StartAddr = TO_CDADDR(pMD);
DacpMethodDescData MethodDescData;
// don't need to check for minidump file as all commands are seals
// We also do not have EEJitManager to validate anyway.
//
if (!IsMiniDumpFile() && MethodDescData.Request(g_sos,StartAddr) != S_OK)
{
ExtOut("%p is not a MethodDesc\n", SOS_PTR(StartAddr));
return FALSE;
}
if (g_sos->GetMethodDescName(StartAddr, mdNameLen, mdName, NULL) != S_OK)
{
wcscpy_s(mdName, capacity_mdName, W("UNKNOWN"));
return FALSE;
}
return TRUE;
}
/**********************************************************************\
* Routine Description: *
* *
* This function is called to find the name of a MethodTable using *
* metadata API. *
* *
\**********************************************************************/
BOOL NameForMT_s(DWORD_PTR MTAddr, __out_ecount (capacity_mdName) WCHAR *mdName, size_t capacity_mdName)
{
HRESULT hr = g_sos->GetMethodTableName(TO_CDADDR(MTAddr), (ULONG32)capacity_mdName, mdName, NULL);
return SUCCEEDED(hr);
}
WCHAR *CreateMethodTableName(TADDR mt, TADDR cmt)
{
bool array = false;
WCHAR *res = NULL;
if (mt == sos::MethodTable::GetFreeMT())
{
res = new WCHAR[5];
wcscpy_s(res, 5, W("Free"));
return res;
}
if (mt == sos::MethodTable::GetArrayMT() && cmt != NULL)
{
mt = cmt;
array = true;
}
unsigned int needed = 0;
HRESULT hr = g_sos->GetMethodTableName(mt, 0, NULL, &needed);
// If failed, we will return null.
if (SUCCEEDED(hr))
{
// +2 for [], if we need it.
res = new WCHAR[needed+2];
hr = g_sos->GetMethodTableName(mt, needed, res, NULL);
if (FAILED(hr))
{
delete [] res;
res = NULL;
}
else if (array)
{
res[needed-1] = '[';
res[needed] = ']';
res[needed+1] = 0;
}
}
return res;
}
/**********************************************************************\
* Routine Description: *
* *
* Return TRUE if str2 is a substring of str1 and str1 and str2 *
* share the same file path.
* *
\**********************************************************************/
BOOL IsSameModuleName (const char *str1, const char *str2)
{
if (strlen (str1) < strlen (str2))
return FALSE;
const char *ptr1 = str1 + strlen(str1)-1;
const char *ptr2 = str2 + strlen(str2)-1;
while (ptr2 >= str2)
{
#ifndef FEATURE_PAL
if (tolower(*ptr1) != tolower(*ptr2))
#else
if (*ptr1 != *ptr2)
#endif
{
return FALSE;
}
ptr2--;
ptr1--;
}
if (ptr1 >= str1 && *ptr1 != GetTargetDirectorySeparatorW() && *ptr1 != ':')
{
return FALSE;
}
return TRUE;
}
/**********************************************************************\
* Routine Description: *
* *
* Return TRUE if moduleAddr is the address of a module. *
* *
\**********************************************************************/
BOOL IsModule (DWORD_PTR moduleAddr)
{
DacpModuleData module;
return (module.Request(g_sos, TO_CDADDR(moduleAddr))==S_OK);
}
/**********************************************************************\
* Routine Description: *
* *
* Return TRUE if value is the address of a MethodTable. *
* We verify that MethodTable and EEClass are right.
* *
\**********************************************************************/
BOOL IsMethodTable (DWORD_PTR value)
{
DacpMethodTableData mtabledata;
if (mtabledata.Request(g_sos, TO_CDADDR(value))!=S_OK)
{
return FALSE;
}
return TRUE;
}
/**********************************************************************\
* Routine Description: *
* *
* Return TRUE if value is the address of a MethodDesc. *
* We verify that MethodTable and EEClass are right.
* *
\**********************************************************************/
BOOL IsMethodDesc (DWORD_PTR value)
{
// Just by retrieving one successfully from the DAC, we know we have a MethodDesc.
DacpMethodDescData MethodDescData;
if (MethodDescData.Request(g_sos, TO_CDADDR(value)) != S_OK)
{
return FALSE;
}
return TRUE;
}
DacpUsefulGlobalsData g_special_usefulGlobals;
BOOL IsObjectArray (DacpObjectData *pData)
{
if (pData->ObjectType == OBJ_ARRAY)
return g_special_usefulGlobals.ArrayMethodTable == pData->MethodTable;
return FALSE;
}
BOOL IsObjectArray (DWORD_PTR obj)
{
DWORD_PTR mtAddr = NULL;
if (SUCCEEDED(GetMTOfObject(obj, &mtAddr)))
return TO_TADDR(g_special_usefulGlobals.ArrayMethodTable) == mtAddr;
return FALSE;
}
BOOL IsStringObject (size_t obj)
{
DWORD_PTR mtAddr = NULL;
if (SUCCEEDED(GetMTOfObject(obj, &mtAddr)))
return TO_TADDR(g_special_usefulGlobals.StringMethodTable) == mtAddr;
return FALSE;
}
BOOL IsDerivedFrom(CLRDATA_ADDRESS mtObj, __in_z LPCWSTR baseString)
{
DacpMethodTableData dmtd;
CLRDATA_ADDRESS walkMT = mtObj;
while (walkMT != NULL)
{
if (dmtd.Request(g_sos, walkMT) != S_OK)
{
break;
}
NameForMT_s(TO_TADDR(walkMT), g_mdName, mdNameLen);
if (_wcscmp(baseString, g_mdName) == 0)
{
return TRUE;
}
walkMT = dmtd.ParentMethodTable;
}
return FALSE;
}
BOOL IsDerivedFrom(CLRDATA_ADDRESS mtObj, DWORD_PTR modulePtr, mdTypeDef typeDef)
{
DacpMethodTableData dmtd;
for (CLRDATA_ADDRESS walkMT = mtObj;
walkMT != NULL && dmtd.Request(g_sos, walkMT) == S_OK;
walkMT = dmtd.ParentMethodTable)
{
if (dmtd.Module == modulePtr && dmtd.cl == typeDef)
{
return TRUE;
}
}
return FALSE;
}
BOOL TryGetMethodDescriptorForDelegate(CLRDATA_ADDRESS delegateAddr, CLRDATA_ADDRESS* pMD)
{
if (!sos::IsObject(delegateAddr, false))
{
return FALSE;
}
sos::Object delegateObj = TO_TADDR(delegateAddr);
for (int i = 0; i < 2; i++)
{
int offset;
if ((offset = GetObjFieldOffset(delegateObj.GetAddress(), delegateObj.GetMT(), i == 0 ? W("_methodPtrAux") : W("_methodPtr"))) != 0)
{
CLRDATA_ADDRESS methodPtr;
MOVE(methodPtr, delegateObj.GetAddress() + offset);
if (methodPtr != NULL)
{
if (g_sos->GetMethodDescPtrFromIP(methodPtr, pMD) == S_OK)
{
return TRUE;
}
DacpCodeHeaderData codeHeaderData;
if (codeHeaderData.Request(g_sos, methodPtr) == S_OK)
{
*pMD = codeHeaderData.MethodDescPtr;
return TRUE;
}
}
}
}
return FALSE;
}
void DumpStackObjectsOutput(const char *location, DWORD_PTR objAddr, BOOL verifyFields)
{
// rule out pointers that are outside of the gc heap.
if (g_snapshot.GetHeap(objAddr) == NULL)
return;
DacpObjectData objectData;
if (objectData.Request(g_sos, TO_CDADDR(objAddr)) != S_OK)
return;
if (sos::IsObject(objAddr, verifyFields != FALSE)
&& !sos::MethodTable::IsFreeMT(TO_TADDR(objectData.MethodTable)))
{
DMLOut("%-" POINTERSIZE "s %s ", location, DMLObject(objAddr));
if (g_sos->GetObjectClassName(TO_CDADDR(objAddr), mdNameLen, g_mdName, NULL)==S_OK)
{
ExtOut("%S", g_mdName);
if (IsStringObject(objAddr))
{
ExtOut(" ");
StringObjectContent(objAddr, FALSE, 40);
}
else if (IsObjectArray(objAddr) &&
(g_sos->GetMethodTableName(objectData.ElementTypeHandle, mdNameLen, g_mdName, NULL) == S_OK))
{
ExtOut(" ");
ExtOut("(%S[])", g_mdName);
}
}
else
{
ExtOut("<unknown type>");
}
ExtOut("\n");
}
}
void DumpStackObjectsOutput(DWORD_PTR ptr, DWORD_PTR objAddr, BOOL verifyFields)
{
char location[64];
sprintf_s(location, 64, "%p", (DWORD_PTR *)ptr);
DumpStackObjectsOutput(location, objAddr, verifyFields);
}
void DumpStackObjectsInternal(size_t StackTop, size_t StackBottom, BOOL verifyFields)
{
for (DWORD_PTR ptr = StackTop; ptr <= StackBottom; ptr += sizeof(DWORD_PTR))
{
if (IsInterrupt())
return;
DWORD_PTR objAddr;
move_xp(objAddr, ptr);
DumpStackObjectsOutput(ptr, objAddr, verifyFields);
}
}
void DumpRegObjectHelper(const char *regName, BOOL verifyFields)
{
DWORD_PTR reg;
#ifdef FEATURE_PAL
if (FAILED(g_ExtRegisters->GetValueByName(regName, ®)))
return;
#else
DEBUG_VALUE value;
ULONG IREG;
if (FAILED(g_ExtRegisters->GetIndexByName(regName, &IREG)) ||
FAILED(g_ExtRegisters->GetValue(IREG, &value)))
return;
#if defined(SOS_TARGET_X86) || defined(SOS_TARGET_ARM)
reg = (DWORD_PTR) value.I32;
#elif defined(SOS_TARGET_AMD64) || defined(SOS_TARGET_ARM64)
reg = (DWORD_PTR) value.I64;
#else
#error Unsupported target
#endif
#endif // FEATURE_PAL
DumpStackObjectsOutput(regName, reg, verifyFields);
}
void DumpStackObjectsHelper (
TADDR StackTop,
TADDR StackBottom,
BOOL verifyFields)
{
ExtOut(g_targetMachine->GetDumpStackObjectsHeading());
LPCSTR* regs;
unsigned int cnt;
g_targetMachine->GetGCRegisters(®s, &cnt);
for (size_t i = 0; i < cnt; ++i)
DumpRegObjectHelper(regs[i], verifyFields);
// Make certain StackTop is dword aligned:
DumpStackObjectsInternal(StackTop & ~ALIGNCONST, StackBottom, verifyFields);
}
void AddToModuleList(DWORD_PTR * &moduleList, int &numModule, int &maxList,
DWORD_PTR dwModuleAddr)
{
int i;
for (i = 0; i < numModule; i ++)
{
if (moduleList[i] == dwModuleAddr)
break;
}
if (i == numModule)
{
moduleList[numModule] = dwModuleAddr;
numModule ++;
if (numModule == maxList)
{
int listLength = 0;
if (!ClrSafeInt<int>::multiply(maxList, 2, listLength))
{
ExtOut("<integer overflow>\n");
numModule = 0;
ControlC = 1;
return;
}
DWORD_PTR *list = new DWORD_PTR [listLength];
if (list == NULL)
{
numModule = 0;
ControlC = 1;
return;
}
memcpy (list, moduleList, maxList * sizeof(PVOID));
delete[] moduleList;
moduleList = list;
maxList *= 2;
}
}
}
BOOL IsFusionLoadedModule (LPCSTR fusionName, LPCSTR mName)
{
// The fusion name will be in this format:
// <module name>, Version=<version>, Culture=<culture>, PublicKeyToken=<token>
// If fusionName up to the comma matches mName (case insensitive),
// we consider that a match was found.
LPCSTR commaPos = strchr (fusionName, ',');
if (commaPos)
{
// verify that fusionName and mName match up to a comma.
while (*fusionName != ',')
{
if (*mName == '\0')
{
return FALSE;
}
#ifndef FEATURE_PAL
if (tolower(*fusionName) != tolower(*mName))
#else
if (*fusionName != *mName)
#endif
{
return FALSE;
}
fusionName++;
mName++;
}
return TRUE;
}
return FALSE;
}
BOOL DebuggerModuleNamesMatch (CLRDATA_ADDRESS PEFileAddr, ___in __in_z LPSTR mName)
{
// Another way to see if a module is the same is
// to accept that mName may be the debugger's name for
// a loaded module. We can get the debugger's name for
// the module we are looking at right now, and compare
// it with mName, if they match exactly, we can add
// the module to the list.
if (PEFileAddr)
{
CLRDATA_ADDRESS pebase = 0;
if (g_sos->GetPEFileBase(PEFileAddr, &pebase) == S_OK)
{
if (pebase)
{
ULONG Index;
ULONG64 base;
if (g_ExtSymbols->GetModuleByOffset(pebase, 0, &Index, &base) == S_OK)
{
CHAR ModuleName[MAX_LONGPATH+1];
if (g_ExtSymbols->GetModuleNames(Index, base, NULL, 0, NULL, ModuleName,
MAX_LONGPATH, NULL, NULL, 0, NULL) == S_OK)
{
if (_stricmp (ModuleName, mName) == 0)
{
return TRUE;
}
}
}
}
}
}
return FALSE;
}
DWORD_PTR *ModuleFromName(__in_opt LPSTR mName, int *numModule)
{
if (numModule == NULL)
return NULL;
DWORD_PTR *moduleList = NULL;
*numModule = 0;
HRESULT hr;
DacpAppDomainStoreData adsData;
if ((hr = adsData.Request(g_sos)) != S_OK)
{
ExtDbgOut("DacpAppDomainStoreData.Request FAILED %08x\n", hr);
return NULL;
}
ArrayHolder<CLRDATA_ADDRESS> pAssemblyArray = NULL;
ArrayHolder<CLRDATA_ADDRESS> pModules = NULL;
int arrayLength = 0;
int numSpecialDomains = (adsData.sharedDomain != NULL) ? 2 : 1;
if (!ClrSafeInt<int>::addition(adsData.DomainCount, numSpecialDomains, arrayLength))
{
ExtOut("<integer overflow>\n");
return NULL;
}
ArrayHolder<CLRDATA_ADDRESS> pArray = new CLRDATA_ADDRESS[arrayLength];
if (pArray == NULL)
{
ReportOOM();
return NULL;
}
pArray[0] = adsData.systemDomain;
if (adsData.sharedDomain != NULL)
{
pArray[1] = adsData.sharedDomain;
}
if ((hr = g_sos->GetAppDomainList(adsData.DomainCount, pArray.GetPtr() + numSpecialDomains, NULL)) != S_OK)
{
ExtOut("Unable to get array of AppDomains: %08x\n", hr);
return NULL;
}
// List all domain
size_t AllocSize;
int maxList = arrayLength; // account for system and shared domains
if (maxList <= 0 || !ClrSafeInt<size_t>::multiply(maxList, sizeof(PVOID), AllocSize))
{
ExtOut("<integer overflow>\n");
return NULL;
}
moduleList = new DWORD_PTR[maxList];
if (moduleList == NULL)
{
ReportOOM();
return NULL;
}
ArrayHolder<char> fileName = new char[MAX_LONGPATH];
// Search all domains to find a module
for (int n = 0; n < adsData.DomainCount+numSpecialDomains; n++)
{
if (IsInterrupt())
{
ExtOut("<interrupted>\n");
goto Failure;
}
DacpAppDomainData appDomain;
if (FAILED(hr = appDomain.Request(g_sos, pArray[n])))
{
// Don't print a failure message here, there is a very normal case when checking
// for modules after clr is loaded but before any AppDomains or assemblies are created
// for example:
// >sxe ld:clr
// >g
// ...
// ModLoad: runtime dll
// >!bpmd Foo.dll Foo.Bar
// we will correctly give the answer that whatever module you were looking for, it isn't loaded yet
ExtDbgOut("DacpAppDomainData.Request FAILED %08x\n", hr);
goto Failure;
}
if (appDomain.AssemblyCount)
{
pAssemblyArray = new CLRDATA_ADDRESS[appDomain.AssemblyCount];
if (pAssemblyArray==NULL)
{
ReportOOM();
goto Failure;
}
if (FAILED(hr = g_sos->GetAssemblyList(appDomain.AppDomainPtr, appDomain.AssemblyCount, pAssemblyArray, NULL)))
{
ExtOut("Unable to get array of Assemblies for the given AppDomain: %08x\n", hr);
goto Failure;
}
for (int nAssem = 0; nAssem < appDomain.AssemblyCount; nAssem ++)
{
if (IsInterrupt())
{
ExtOut("<interrupted>\n");
goto Failure;
}
DacpAssemblyData assemblyData;
if (FAILED(hr = assemblyData.Request(g_sos, pAssemblyArray[nAssem])))
{
ExtOut("Failed to request assembly: %08x\n", hr);
goto Failure;
}
pModules = new CLRDATA_ADDRESS[assemblyData.ModuleCount];
if (FAILED(hr = g_sos->GetAssemblyModuleList(assemblyData.AssemblyPtr, assemblyData.ModuleCount, pModules, NULL)))
{
ExtOut("Failed to get the modules for the given assembly: %08x\n", hr);
goto Failure;
}
for (UINT nModule = 0; nModule < assemblyData.ModuleCount; nModule++)
{
if (IsInterrupt())
{
ExtOut("<interrupted>\n");
goto Failure;
}
CLRDATA_ADDRESS ModuleAddr = pModules[nModule];
DacpModuleData ModuleData;
if (FAILED(hr = ModuleData.Request(g_sos, ModuleAddr)))
{
ExtDbgOut("Failed to request module data from assembly at %p %08x\n", ModuleAddr, hr);
continue;
}
if (mName != NULL)
{
ArrayHolder<WCHAR> moduleName = new WCHAR[MAX_LONGPATH];
FileNameForModule(&ModuleData, moduleName);
int bytesWritten = WideCharToMultiByte(CP_ACP, 0, moduleName, -1, fileName, MAX_LONGPATH, NULL, NULL);
_ASSERTE(bytesWritten > 0);
}
if ((mName == NULL) ||
IsSameModuleName(fileName, mName) ||
DebuggerModuleNamesMatch(ModuleData.File, mName) ||
IsFusionLoadedModule(fileName, mName))
{
AddToModuleList(moduleList, *numModule, maxList, (DWORD_PTR)ModuleAddr);
}
}
pModules = NULL;
}
pAssemblyArray = NULL;
}
}
return moduleList;
// We do not want to return a half-constructed list. Instead, we return NULL on a failure.
Failure:
delete [] moduleList;
return NULL;
}
#ifndef FEATURE_PAL
/**********************************************************************\
* Routine Description: *
* *
* Retrieve module base associated with the IXCLRDataModule *
* instance passed in, and the extent type requested. *
* *
\**********************************************************************/
HRESULT GetClrModuleImages(__in IXCLRDataModule* module, __in CLRDataModuleExtentType desiredType, __out PULONG64 pBase, __out PULONG64 pSize)
{
CLRDATA_ENUM enumExtents;
HRESULT hr;
_ASSERTE(pBase != nullptr);
_ASSERTE(pSize != nullptr);
*pBase = 0;
*pSize = 0;
if (FAILED(hr = module->StartEnumExtents(&enumExtents)))
{
return hr;
}
CLRDATA_MODULE_EXTENT extent;
while (module->EnumExtent(&enumExtents, &extent) == S_OK)
{
if ((desiredType == CLRDATA_MODULE_OTHER) || (desiredType == extent.type))
{
ULONG64 moduleBase;
if (FAILED(hr = g_ExtSymbols->GetModuleByOffset(extent.base, 0, nullptr, &moduleBase)))
{
if (desiredType == CLRDATA_MODULE_PE_FILE)
{
*pBase = extent.base;
*pSize = extent.length;
hr = S_OK;
}
break;
}
DEBUG_MODULE_PARAMETERS params;
if (FAILED(hr = g_ExtSymbols->GetModuleParameters(1, &moduleBase, 0, ¶ms)))
{
break;
}
*pBase = moduleBase;
*pSize = params.Size;
hr = S_OK;
break;
}
}
module->EndEnumExtents(enumExtents);
return hr;
}
#endif // FEATURE_PAL
/**********************************************************************\
* Routine Description: *
* *
* Find the IXCLRDataModule instance for the base address. *
* *
\**********************************************************************/
HRESULT GetModuleFromAddress(___in CLRDATA_ADDRESS peAddress, ___out IXCLRDataModule** ppModule)
{
*ppModule = nullptr;
int numModule;
ArrayHolder<DWORD_PTR> moduleList = ModuleFromName(NULL, &numModule);
if (moduleList != nullptr)
{
for (int i = 0; i < numModule; i++)
{
ToRelease<IXCLRDataModule> module;
HRESULT hr = g_sos->GetModule(moduleList[i], &module);
if (FAILED(hr)) {
return hr;
}
ULONG32 flags;
if ((hr = module->GetFlags(&flags)) != S_OK) {
continue;
}
if (flags != CLRDATA_MODULE_DEFAULT) {
continue;
}
DacpGetModuleData moduleData;
hr = moduleData.Request(module);
if (FAILED(hr)) {
#ifdef FEATURE_PAL
return hr;
#else
hr = GetClrModuleImages(module, CLRDATA_MODULE_PE_FILE, &moduleData.LoadedPEAddress, &moduleData.LoadedPESize);
if (FAILED(hr))
{
return hr;
}
#endif
}
if (peAddress == moduleData.LoadedPEAddress)
{
*ppModule = module.Detach();
return S_OK;
}
}
}
return E_INVALIDARG;
}
/**********************************************************************\
* Routine Description: *
* *
* Find the EE data given a name. *
* *
\**********************************************************************/
void GetInfoFromName(DWORD_PTR ModulePtr, const char* name, mdTypeDef* retMdTypeDef)
{
DWORD_PTR ignoredModuleInfoRet = NULL;
if (retMdTypeDef)
*retMdTypeDef = 0;
ToRelease<IMetaDataImport> pImport = MDImportForModule (ModulePtr);
if (pImport == 0)
return;
static WCHAR wszName[MAX_CLASSNAME_LENGTH];
size_t n;
size_t length = strlen (name);
for (n = 0; n <= length; n ++)
wszName[n] = name[n];
// First enumerate methods. We're taking advantage of the DAC's
// CLRDataModule::EnumMethodDefinitionByName which can parse
// method names (whether in nested classes, or explicit interface
// method implementations).
ToRelease<IXCLRDataModule> ModuleDefinition;
if (g_sos->GetModule(ModulePtr, &ModuleDefinition) == S_OK)
{
CLRDATA_ENUM h;
if (ModuleDefinition->StartEnumMethodDefinitionsByName(wszName, 0, &h) == S_OK)
{
IXCLRDataMethodDefinition *pMeth = NULL;
BOOL fStatus = FALSE;
while (ModuleDefinition->EnumMethodDefinitionByName(&h, &pMeth) == S_OK)
{
if (fStatus && !retMdTypeDef)
ExtOut("-----------------------\n");
mdTypeDef token;
if (pMeth->GetTokenAndScope(&token, NULL) == S_OK)
{
GetInfoFromModule(ModulePtr, token, retMdTypeDef ? &ignoredModuleInfoRet : NULL);
fStatus = TRUE;
}
pMeth->Release();
}
ModuleDefinition->EndEnumMethodDefinitionsByName(h);
if (fStatus)
return;
}
}
// Now look for types, type members and fields
mdTypeDef cl;
mdToken tkEnclose = mdTokenNil;
WCHAR *pName;
WCHAR *pHead = wszName;
while ( ((pName = _wcschr (pHead,L'+')) != NULL) ||
((pName = _wcschr (pHead,L'/')) != NULL)) {
pName[0] = L'\0';
if (FAILED(pImport->FindTypeDefByName(pHead,tkEnclose,&tkEnclose)))
return;
pHead = pName+1;
}
pName = pHead;
// @todo: Handle Nested classes correctly.
if (SUCCEEDED (pImport->FindTypeDefByName (pName, tkEnclose, &cl)))
{
if (retMdTypeDef)
*retMdTypeDef = cl;
GetInfoFromModule(ModulePtr, cl, retMdTypeDef ? &ignoredModuleInfoRet : NULL);
return;
}
// See if it is a method
WCHAR *pwzMethod;
if ((pwzMethod = _wcsrchr(pName, L'.')) == NULL)
return;
if (pwzMethod[-1] == L'.')
pwzMethod --;
pwzMethod[0] = L'\0';
pwzMethod ++;
// @todo: Handle Nested classes correctly.
if (SUCCEEDED(pImport->FindTypeDefByName (pName, tkEnclose, &cl)))
{
if (retMdTypeDef)
*retMdTypeDef = cl;
mdMethodDef token;
ULONG cTokens;
HCORENUM henum = NULL;
// is Member?
henum = NULL;
if (SUCCEEDED (pImport->EnumMembersWithName (&henum, cl, pwzMethod,
&token, 1, &cTokens))
&& cTokens == 1)
{
if (!retMdTypeDef) ExtOut("Member (mdToken token) of\n");
GetInfoFromModule(ModulePtr, cl, retMdTypeDef ? &ignoredModuleInfoRet : NULL);
return;
}
// is Field?
henum = NULL;
if (SUCCEEDED (pImport->EnumFieldsWithName (&henum, cl, pwzMethod,
&token, 1, &cTokens))
&& cTokens == 1)
{
if (!retMdTypeDef) ExtOut("Field (mdToken token) of\n");
GetInfoFromModule(ModulePtr, cl, retMdTypeDef ? &ignoredModuleInfoRet : NULL);
return;
}
}
}
/**********************************************************************\
* Routine Description: *
* *
* Find the EE data given a token. *
* *
\**********************************************************************/
DWORD_PTR GetMethodDescFromModule(DWORD_PTR ModuleAddr, ULONG token)
{
if (TypeFromToken(token) != mdtMethodDef)
return NULL;
CLRDATA_ADDRESS md = 0;
if (FAILED(g_sos->GetMethodDescFromToken(ModuleAddr, token, &md)))
{
return NULL;
}
else if (0 == md)
{
// a NULL ReturnValue means the method desc is not loaded yet
return MD_NOT_YET_LOADED;
}
else if ( !IsMethodDesc((DWORD_PTR)md))
{
return NULL;
}
return (DWORD_PTR)md;
}
/**********************************************************************\
* Routine Description: *
* *
* Find the MethodDefinitions given a name. *
* *
\**********************************************************************/
HRESULT GetMethodDefinitionsFromName(TADDR ModulePtr, IXCLRDataModule* mod, const char *name, IXCLRDataMethodDefinition **ppOut, int numMethods, int *numMethodsNeeded)
{
if (name == NULL)
return E_FAIL;
size_t n;
size_t length = strlen (name);
for (n = 0; n <= length; n ++)
g_mdName[n] = name[n];
CLRDATA_ENUM h;
int methodCount = 0;
if (mod->StartEnumMethodDefinitionsByName(g_mdName, 0, &h) == S_OK)
{
IXCLRDataMethodDefinition *pMeth = NULL;
while (mod->EnumMethodDefinitionByName(&h, &pMeth) == S_OK)
{
methodCount++;
pMeth->Release();
}
mod->EndEnumMethodDefinitionsByName(h);
}
if(numMethodsNeeded != NULL)
*numMethodsNeeded = methodCount;
if(ppOut == NULL)
return S_OK;
if(numMethods > methodCount)
numMethods = methodCount;
if (methodCount > 0)
{
if (mod->StartEnumMethodDefinitionsByName(g_mdName, 0, &h) == S_OK)
{
IXCLRDataMethodDefinition *pMeth = NULL;
for (int i = 0; i < numMethods && mod->EnumMethodDefinitionByName(&h, &pMeth) == S_OK; i++)
{
ppOut[i] = pMeth;
}
mod->EndEnumMethodDefinitionsByName(h);
}
}
return S_OK;
}
/**********************************************************************\
* Routine Description: *
* *
* Find the EE data given a name. *
* *
\**********************************************************************/
HRESULT GetMethodDescsFromName(TADDR ModulePtr, IXCLRDataModule* mod, const char *name, DWORD_PTR **pOut,int *numMethods)
{
if (name == NULL || pOut == NULL || numMethods == NULL)
return E_FAIL;
*pOut = NULL;
*numMethods = 0;
size_t n;
size_t length = strlen (name);
for (n = 0; n <= length; n ++)
g_mdName[n] = name[n];
CLRDATA_ENUM h;
int methodCount = 0;
if (mod->StartEnumMethodDefinitionsByName(g_mdName, 0, &h) == S_OK)
{
IXCLRDataMethodDefinition *pMeth = NULL;
while (mod->EnumMethodDefinitionByName(&h, &pMeth) == S_OK)
{
methodCount++;
pMeth->Release();
}
mod->EndEnumMethodDefinitionsByName(h);
}
if (methodCount > 0)
{
*pOut = new TADDR[methodCount];
if (*pOut==NULL)
{
ReportOOM();
return E_OUTOFMEMORY;
}
*numMethods = methodCount;
if (mod->StartEnumMethodDefinitionsByName(g_mdName, 0, &h) == S_OK)
{
int i = 0;
IXCLRDataMethodDefinition *pMeth = NULL;
while (mod->EnumMethodDefinitionByName(&h, &pMeth) == S_OK)
{
mdTypeDef token;
if (pMeth->GetTokenAndScope(&token, NULL) != S_OK)
(*pOut)[i] = NULL;
(*pOut)[i] = GetMethodDescFromModule(ModulePtr, token);
if ((*pOut)[i] == NULL)
{
*numMethods = 0;
return E_FAIL;
}
i++;
pMeth->Release();
}
mod->EndEnumMethodDefinitionsByName(h);
}
}
return S_OK;
}
/**********************************************************************\
* Routine Description: *
* *
* Find the EE data given a token. *
* *
\**********************************************************************/
void GetInfoFromModule (DWORD_PTR ModuleAddr, ULONG token, DWORD_PTR *ret)
{
switch (TypeFromToken(token))
{
case mdtMethodDef:
break;
case mdtTypeDef:
break;
case mdtTypeRef:
break;
case mdtFieldDef:
break;
default:
ExtOut("This token type is not supported\n");
return;
break;
}
CLRDATA_ADDRESS md = 0;
if (FAILED(g_sos->GetMethodDescFromToken(ModuleAddr, token, &md)) || !IsValidToken (ModuleAddr, token))
{
ExtOut("<invalid module token>\n");
return;
}
if (ret != NULL)
{
*ret = (DWORD_PTR)md;
return;
}
ExtOut("Token: %p\n", SOS_PTR(token));
switch (TypeFromToken(token))
{
case mdtFieldDef:
{
NameForToken_s(ModuleAddr, token, g_mdName, mdNameLen);
ExtOut("Field name: %S\n", g_mdName);
break;
}
case mdtMethodDef:
{
if (md)
{
DMLOut("MethodDesc: %s\n", DMLMethodDesc(md));
// Easiest to get full parameterized method name from ..::GetMethodName
if (g_sos->GetMethodDescName(md, mdNameLen, g_mdName, NULL) != S_OK)
{
// Fall back to just method name without parameters..
NameForToken_s(ModuleAddr, token, g_mdName, mdNameLen);
}
}
else
{
ExtOut("MethodDesc: <not loaded yet>\n");
NameForToken_s(ModuleAddr, token, g_mdName, mdNameLen);
}
ExtOut("Name: %S\n", g_mdName);
// Nice to have a little more data
if (md)
{
DacpMethodDescData MethodDescData;
if (MethodDescData.Request(g_sos, md) == S_OK)
{
if (MethodDescData.bHasNativeCode)
{
DMLOut("JITTED Code Address: %s\n", DMLIP(MethodDescData.NativeCodeAddr));
}
else
{
#ifndef FEATURE_PAL
if (IsDMLEnabled())
DMLOut("Not JITTED yet. Use <exec cmd=\"!bpmd -md %p\">!bpmd -md %p</exec> to break on run.\n",
SOS_PTR(md), SOS_PTR(md));
else
ExtOut("Not JITTED yet. Use !bpmd -md %p to break on run.\n", SOS_PTR(md));
#else
ExtOut("Not JITTED yet. Use 'bpmd -md %p' to break on run.\n", SOS_PTR(md));
#endif
}
}
else
{
ExtOut ("<Error getting MethodDesc information>\n");
}
}
else
{
ExtOut("Not JITTED yet.\n");
}
break;
}
case mdtTypeDef:
case mdtTypeRef:
{
if (md)
{
DMLOut("MethodTable: %s\n", DMLMethodTable(md));
DacpMethodTableData mtabledata;
if (mtabledata.Request(g_sos, md) == S_OK)
{
DMLOut("EEClass: %s\n", DMLClass(mtabledata.Class));
}
else
{
ExtOut("EEClass: <error getting EEClass>\n");
}
}
else
{
ExtOut("MethodTable: <not loaded yet>\n");
ExtOut("EEClass: <not loaded yet>\n");
}
NameForToken_s(ModuleAddr, token, g_mdName, mdNameLen);
ExtOut("Name: %S\n", g_mdName);
break;
}
default:
break;
}
return;
}
BOOL IsMTForFreeObj(DWORD_PTR pMT)
{
return (pMT == g_special_usefulGlobals.FreeMethodTable);
}
const char *EHTypeName(EHClauseType et)
{
if (et == EHFault)
return "FAULT";
else if (et == EHFinally)
return "FINALLY";
else if (et == EHFilter)
return "FILTER";
else if (et == EHTyped)
return "TYPED";
else
return "UNKNOWN";
}
// 2.x version
void DumpTieredNativeCodeAddressInfo_2x(struct DacpTieredVersionData_2x * pTieredVersionData, const UINT cTieredVersionData)
{
ExtOut("Code Version History:\n");
for(int i = cTieredVersionData - 1; i >= 0; --i)
{
const char *descriptor = NULL;
switch(pTieredVersionData[i].TieredInfo)
{
case DacpTieredVersionData_2x::TIERED_UNKNOWN:
default:
descriptor = "Unknown Tier";
break;
case DacpTieredVersionData_2x::NON_TIERED:
descriptor = "Non-Tiered";
break;
case DacpTieredVersionData_2x::TIERED_0:
descriptor = "Tier 0";
break;
case DacpTieredVersionData_2x::TIERED_1:
descriptor = "Tier 1";
break;
}
DMLOut(" CodeAddr: %s (%s)\n", DMLIP(pTieredVersionData[i].NativeCodeAddr), descriptor);
ExtOut(" NativeCodeVersion: %p\n", SOS_PTR(pTieredVersionData[i].NativeCodeVersionNodePtr));
}
}
void DumpTieredNativeCodeAddressInfo(struct DacpTieredVersionData * pTieredVersionData, const UINT cTieredVersionData,
ULONG rejitID, CLRDATA_ADDRESS ilAddr, CLRDATA_ADDRESS ilNodeAddr)
{
ExtOut(" ILCodeVersion: %p\n", SOS_PTR(ilNodeAddr));
ExtOut(" ReJIT ID: %d\n", rejitID);
DMLOut(" IL Addr: %s\n", DMLIL(ilAddr));
if (IsRuntimeVersionAtLeast(3)) {
for(int i = cTieredVersionData - 1; i >= 0; --i)
{
const char *descriptor = NULL;
switch(pTieredVersionData[i].OptimizationTier)
{
case DacpTieredVersionData::OptimizationTier_Unknown:
default:
descriptor = "Unknown Tier";
break;
case DacpTieredVersionData::OptimizationTier_MinOptJitted:
descriptor = "MinOptJitted";
break;
case DacpTieredVersionData::OptimizationTier_Optimized:
descriptor = "Optimized";
break;
case DacpTieredVersionData::OptimizationTier_QuickJitted:
descriptor = "QuickJitted";
break;
case DacpTieredVersionData::OptimizationTier_OptimizedTier1:
descriptor = "OptimizedTier1";
break;
case DacpTieredVersionData::OptimizationTier_ReadyToRun:
descriptor = "ReadyToRun";
break;
}
DMLOut(" CodeAddr: %s (%s)\n", DMLIP(pTieredVersionData[i].NativeCodeAddr), descriptor);
ExtOut(" NativeCodeVersion: %p\n", SOS_PTR(pTieredVersionData[i].NativeCodeVersionNodePtr));
}
}
else {
DumpTieredNativeCodeAddressInfo_2x((DacpTieredVersionData_2x*)pTieredVersionData, cTieredVersionData);
}
}
void DumpRejitData(CLRDATA_ADDRESS pMethodDesc, DacpReJitData * pReJitData)
{
int rejitID = (int)pReJitData->rejitID;
CLRDATA_ADDRESS ilAddr = 0;
CLRDATA_ADDRESS ilNodeAddr = 0;
struct DacpReJitData2 rejitData;
ReleaseHolder<ISOSDacInterface7> sos7;
if (SUCCEEDED(g_sos->QueryInterface(__uuidof(ISOSDacInterface7), &sos7)) &&
SUCCEEDED(sos7->GetReJITInformation(pMethodDesc,
rejitID,
&rejitData)))
{
ilAddr = rejitData.il;
ilNodeAddr = rejitData.ilCodeVersionNodePtr;
}
struct DacpTieredVersionData codeAddrs[kcMaxTieredVersions];
int cCodeAddrs;
ReleaseHolder<ISOSDacInterface5> sos5;
if (SUCCEEDED(g_sos->QueryInterface(__uuidof(ISOSDacInterface5), &sos5)) &&
SUCCEEDED(sos5->GetTieredVersions(pMethodDesc,
rejitID,
codeAddrs,
kcMaxTieredVersions,
&cCodeAddrs)))
{
DumpTieredNativeCodeAddressInfo(codeAddrs, cCodeAddrs, rejitID, ilAddr, ilNodeAddr);
}
}
// For !ip2md requests, this function helps us ensure that rejitted version corresponding
// to the specified IP always gets dumped. It may have already been dumped if it was the
// current rejit version (which is always dumped) or one of the reverted versions that we
// happened to dump before we clipped their number down to kcRejitDataRevertedMax.
BOOL ShouldDumpRejitDataRequested(DacpMethodDescData * pMethodDescData, DacpReJitData * pRevertedRejitData, UINT cRevertedRejitData)
{
if (pMethodDescData->rejitDataRequested.rejitID == 0)
return FALSE;
if (pMethodDescData->rejitDataRequested.rejitID == pMethodDescData->rejitDataCurrent.rejitID)
return FALSE;
for (ULONG i=0; i < cRevertedRejitData; i++)
{
if (pMethodDescData->rejitDataRequested.rejitID == pRevertedRejitData[i].rejitID)
return FALSE;
}
return TRUE;
}
void DumpAllRejitDataIfNecessary(DacpMethodDescData * pMethodDescData, DacpReJitData * pRevertedRejitData, UINT cRevertedRejitData)
{
// If there's no rejit info to output, then skip
if ((pMethodDescData->rejitDataCurrent.rejitID == 0) &&
(pMethodDescData->rejitDataRequested.rejitID == 0) &&
(cRevertedRejitData == 0))
{
return;
}
// Dump reverted rejit infos
for (ULONG i=0; i < cRevertedRejitData; i++)
{
DumpRejitData(pMethodDescData->MethodDescPtr, &pRevertedRejitData[i]);
}
// For !ip2md, ensure we dump the rejit version corresponding to the specified IP
// (if not already dumped)
if (ShouldDumpRejitDataRequested(pMethodDescData, pRevertedRejitData, cRevertedRejitData))
DumpRejitData(pMethodDescData->MethodDescPtr, &pMethodDescData->rejitDataRequested);
// If we maxed out the reverted versions we dumped, let user know there may be more
if (cRevertedRejitData == kcMaxRevertedRejitData)
ExtOut(" (... possibly more reverted versions ...)\n");
}
void DumpMDInfoFromMethodDescData(DacpMethodDescData * pMethodDescData, DacpReJitData * pRevertedRejitData, UINT cRevertedRejitData, BOOL fStackTraceFormat)
{
static WCHAR wszNameBuffer[1024]; // should be large enough
BOOL bFailed = FALSE;
if (g_sos->GetMethodDescName(pMethodDescData->MethodDescPtr, 1024, wszNameBuffer, NULL) != S_OK)
{
wcscpy_s(wszNameBuffer, _countof(wszNameBuffer), W("UNKNOWN"));
bFailed = TRUE;
}
if (!fStackTraceFormat)
{
ExtOut("Method Name: %S\n", wszNameBuffer);
DacpMethodTableData mtdata;
if (SUCCEEDED(mtdata.Request(g_sos, pMethodDescData->MethodTablePtr)))
{
DMLOut("Class: %s\n", DMLClass(mtdata.Class));
}
DMLOut("MethodTable: %s\n", DMLMethodTable(pMethodDescData->MethodTablePtr));
ExtOut("mdToken: %p\n", SOS_PTR(pMethodDescData->MDToken));
DMLOut("Module: %s\n", DMLModule(pMethodDescData->ModulePtr));
ExtOut("IsJitted: %s\n", pMethodDescData->bHasNativeCode ? "yes" : "no");
DMLOut("Current CodeAddr: %s\n", DMLIP(pMethodDescData->NativeCodeAddr));
int rejitID = (int)pMethodDescData->rejitDataCurrent.rejitID;
CLRDATA_ADDRESS ilAddr = 0;
CLRDATA_ADDRESS ilNodeAddr = 0;
ExtOut("Version History:\n");
struct DacpReJitData2 rejitData;
ReleaseHolder<ISOSDacInterface7> sos7;
if (SUCCEEDED(g_sos->QueryInterface(__uuidof(ISOSDacInterface7), &sos7)))
{
if SUCCEEDED(sos7->GetReJITInformation(pMethodDescData->MethodDescPtr,
rejitID,
&rejitData))
{
ilAddr = rejitData.il;
ilNodeAddr = rejitData.ilCodeVersionNodePtr;
}
int pendingRejitID;
struct DacpReJitData2 pendingRejitData;
if (sos7->GetPendingReJITID(pMethodDescData->MethodDescPtr, &pendingRejitID) == S_OK &&
SUCCEEDED(sos7->GetReJITInformation(pMethodDescData->MethodDescPtr, pendingRejitID, &pendingRejitData)))
{
// Special case, there is no jitted code yet but still need to output the IL information
ExtOut(" ILCodeVersion: %p (pending)\n", SOS_PTR(pendingRejitData.ilCodeVersionNodePtr));
ExtOut(" ReJIT ID: %d\n", pendingRejitID);
DMLOut(" IL Addr: %s\n", DMLIL(pendingRejitData.il));
}
}
struct DacpTieredVersionData codeAddrs[kcMaxTieredVersions];
int cCodeAddrs;
ReleaseHolder<ISOSDacInterface5> sos5;
if (SUCCEEDED(g_sos->QueryInterface(__uuidof(ISOSDacInterface5), &sos5)) &&
SUCCEEDED(sos5->GetTieredVersions(pMethodDescData->MethodDescPtr,
rejitID,
codeAddrs,
kcMaxTieredVersions,
&cCodeAddrs)))
{
DumpTieredNativeCodeAddressInfo(codeAddrs, cCodeAddrs, rejitID, ilAddr, ilNodeAddr);
}
DumpAllRejitDataIfNecessary(pMethodDescData, pRevertedRejitData, cRevertedRejitData);
}
else
{
if (!bFailed)
{
ExtOut("%S", wszNameBuffer);
}
else
{
// Only clutter the display with module/token for cases where we
// can't get the MethodDesc name for some reason.
DMLOut("Unknown MethodDesc (Module %s, mdToken %08x)",
DMLModule(pMethodDescData->ModulePtr),
pMethodDescData->MDToken);
}
}
}
void DumpMDInfo(DWORD_PTR dwMethodDescAddr, CLRDATA_ADDRESS dwRequestedIP /* = 0 */, BOOL fStackTraceFormat /* = FALSE */)
{
DacpMethodDescData MethodDescData;
DacpReJitData revertedRejitData[kcMaxRevertedRejitData];
ULONG cNeededRevertedRejitData;
if (g_sos->GetMethodDescData(
TO_CDADDR(dwMethodDescAddr),
dwRequestedIP,
&MethodDescData,
_countof(revertedRejitData),
revertedRejitData,
&cNeededRevertedRejitData) != S_OK)
{
ExtOut("%p is not a MethodDesc\n", SOS_PTR(dwMethodDescAddr));
return;
}
DumpMDInfoFromMethodDescData(&MethodDescData, revertedRejitData, cNeededRevertedRejitData, fStackTraceFormat);
}
void GetDomainList (DWORD_PTR *&domainList, int &numDomain)
{
DacpAppDomainStoreData adsData;
numDomain = 0;
if (adsData.Request(g_sos)!=S_OK)
{
return;
}
// Do prefast integer checks before the malloc.
size_t AllocSize;
LONG DomainAllocCount;
LONG NumExtraDomains = (adsData.sharedDomain != NULL) ? 2 : 1;
if (!ClrSafeInt<LONG>::addition(adsData.DomainCount, NumExtraDomains, DomainAllocCount) ||
!ClrSafeInt<size_t>::multiply(DomainAllocCount, sizeof(PVOID), AllocSize) ||
(domainList = new DWORD_PTR[DomainAllocCount]) == NULL)
{
return;
}
domainList[numDomain++] = (DWORD_PTR) adsData.systemDomain;
if (adsData.sharedDomain != NULL)
{
domainList[numDomain++] = (DWORD_PTR) adsData.sharedDomain;
}
CLRDATA_ADDRESS *pArray = new CLRDATA_ADDRESS[adsData.DomainCount];
if (pArray==NULL)
{
return;
}
if (g_sos->GetAppDomainList(adsData.DomainCount, pArray, NULL)!=S_OK)
{
delete [] pArray;
return;
}
for (int n=0;n<adsData.DomainCount;n++)
{
if (IsInterrupt())
break;
domainList[numDomain++] = (DWORD_PTR) pArray[n];
}
delete [] pArray;
}
HRESULT GetThreadList(DWORD_PTR **threadList, int *numThread)
{
_ASSERTE(threadList != NULL);
_ASSERTE(numThread != NULL);
if (threadList == NULL || numThread == NULL)
{
return E_FAIL;
}
*numThread = 0;
DacpThreadStoreData ThreadStore;
if ( ThreadStore.Request(g_sos) != S_OK)
{
ExtOut("Failed to request threads from the thread store.");
return E_FAIL;
}
*threadList = new DWORD_PTR[ThreadStore.threadCount];
if (*threadList == NULL)
{
ReportOOM();
return E_OUTOFMEMORY;
}
CLRDATA_ADDRESS CurThread = ThreadStore.firstThread;
while (CurThread != NULL)
{
if (IsInterrupt())
return S_FALSE;
DacpThreadData Thread;
if (Thread.Request(g_sos, CurThread) != S_OK)
{
ExtOut("Failed to request Thread at %p\n", SOS_PTR(CurThread));
return E_FAIL;
}
(*threadList)[(*numThread)++] = (DWORD_PTR)CurThread;
CurThread = Thread.nextThread;
}
return S_OK;
}
CLRDATA_ADDRESS GetCurrentManagedThread ()
{
DacpThreadStoreData ThreadStore;
ThreadStore.Request(g_sos);
ULONG Tid;
g_ExtSystem->GetCurrentThreadSystemId(&Tid);
CLRDATA_ADDRESS CurThread = ThreadStore.firstThread;
while (CurThread)
{
DacpThreadData Thread;
if (Thread.Request(g_sos, CurThread) != S_OK)
{
return NULL;
}
if (Thread.osThreadId == Tid)
{
return CurThread;
}
CurThread = Thread.nextThread;
}
return NULL;
}
void ReloadSymbolWithLineInfo()
{
_ASSERTE(g_pRuntime != nullptr);
#ifndef FEATURE_PAL
static BOOL bLoadSymbol = FALSE;
if (!bLoadSymbol)
{
ULONG Options;
g_ExtSymbols->GetSymbolOptions(&Options);
if (!(Options & SYMOPT_LOAD_LINES))
{
g_ExtSymbols->AddSymbolOptions(SYMOPT_LOAD_LINES);
if (SUCCEEDED(g_ExtSymbols->GetModuleByModuleName(MSCOREE_SHIM_A, 0, NULL, NULL)))
{
g_ExtSymbols->Reload("/f" MSCOREE_SHIM_A);
}
std::string reloadCommand;
reloadCommand.append("/f ");
reloadCommand.append(GetRuntimeDllName());
g_ExtSymbols->Reload(reloadCommand.c_str());
}
// reload mscoree.pdb and clrjit.pdb to get line info
bLoadSymbol = TRUE;
}
#endif
}
// Return 1 if the function is our stub
// Return MethodDesc if the function is managed
// Otherwise return 0
size_t FunctionType (size_t EIP)
{
ULONG64 base = 0;
ULONG ulLoaded, ulUnloaded, ulIndex;
// Get the number of loaded and unloaded modules
if (FAILED(g_ExtSymbols->GetNumberModules(&ulLoaded, &ulUnloaded)))
return 0;
if (SUCCEEDED(g_ExtSymbols->GetModuleByOffset(TO_CDADDR(EIP), 0, &ulIndex, &base)) && base != 0)
{
if (ulIndex < ulLoaded)
{
IMAGE_DOS_HEADER DosHeader;
if (g_ExtData->ReadVirtual(TO_CDADDR(base), &DosHeader, sizeof(DosHeader), NULL) != S_OK)
return 0;
IMAGE_NT_HEADERS Header;
if (g_ExtData->ReadVirtual(TO_CDADDR(base + DosHeader.e_lfanew), &Header, sizeof(Header), NULL) != S_OK)
return 0;
// If there is no COMHeader, this can not be managed code.
if (Header.OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_COMHEADER].VirtualAddress == 0)
return 0;
IMAGE_COR20_HEADER ComPlusHeader;
if (g_ExtData->ReadVirtual(TO_CDADDR(base + Header.OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_COMHEADER].VirtualAddress),
&ComPlusHeader, sizeof(ComPlusHeader), NULL) != S_OK)
return 0;
// If there is no Precompiled image info, it can not be prejit code
if (ComPlusHeader.ManagedNativeHeader.VirtualAddress == 0) {
return 0;
}
}
}
CLRDATA_ADDRESS dwStartAddr = TO_CDADDR(EIP);
CLRDATA_ADDRESS pMD;
if (g_sos->GetMethodDescPtrFromIP(dwStartAddr, &pMD) != S_OK)
{
return 1;
}
return (size_t) pMD;
}
//
// Return true if major runtime version (logical product version like 2.1,
// 3.0 or 5.x). Currently only major versions of 3 or 5 are supported.
//
bool IsRuntimeVersion(DWORD major)
{
VS_FIXEDFILEINFO fileInfo;
if (SUCCEEDED(g_pRuntime->GetEEVersion(&fileInfo, nullptr, 0)))
{
return IsRuntimeVersion(fileInfo, major);
}
return false;
}
bool IsRuntimeVersion(VS_FIXEDFILEINFO& fileInfo, DWORD major)
{
switch (major)
{
case 5:
return HIWORD(fileInfo.dwFileVersionMS) == 5;
case 3:
return HIWORD(fileInfo.dwFileVersionMS) == 4 && LOWORD(fileInfo.dwFileVersionMS) == 700;
default:
_ASSERTE(FALSE);
break;
}
return false;
}
bool IsRuntimeVersionAtLeast(DWORD major)
{
VS_FIXEDFILEINFO fileInfo;
if (SUCCEEDED(g_pRuntime->GetEEVersion(&fileInfo, nullptr, 0)))
{
return IsRuntimeVersionAtLeast(fileInfo, major);
}
return false;
}
bool IsRuntimeVersionAtLeast(VS_FIXEDFILEINFO& fileInfo, DWORD major)
{
switch (major)
{
case 3:
if (HIWORD(fileInfo.dwFileVersionMS) == 4 && LOWORD(fileInfo.dwFileVersionMS) == 700)
{
return true;
}
// fall through
case 5:
if (HIWORD(fileInfo.dwFileVersionMS) >= 5)
{
return true;
}
// fall through
break;
default:
_ASSERTE(FALSE);
break;
}
return false;
}
// Returns true if there is a change in the data structures that SOS depends on like
// stress log structs (StressMsg, StressLogChunck, ThreadStressLog, etc), exception
// stack traces (StackTraceElement), the PredefinedTlsSlots enums, etc.
bool CheckBreakingRuntimeChange(int* pVersion)
{
bool result = false;
// Assume version 1 if no ISOSDacInterface9 (runtimes < 5.0)
int version = 1;
if (g_sos != nullptr)
{
ReleaseHolder<ISOSDacInterface9> sos9;
if (SUCCEEDED(g_sos->QueryInterface(__uuidof(ISOSDacInterface9), &sos9)))
{
if (SUCCEEDED(sos9->GetBreakingChangeVersion(&version)))
{
if (version > SOS_BREAKING_CHANGE_VERSION)
{
ExtWarn("WARNING: SOS needs to be upgraded for this version of the runtime. Some commands may not work correctly.\n");
ExtWarn("For more information see https://go.microsoft.com/fwlink/?linkid=2135652\n");
ExtWarn("\n");
result = true;
}
}
}
}
if (pVersion != nullptr)
{
*pVersion = version;
}
return result;
}
#ifndef FEATURE_PAL
BOOL GetSOSVersion(VS_FIXEDFILEINFO *pFileInfo)
{
_ASSERTE(pFileInfo);
WCHAR wszFullPath[MAX_LONGPATH];
DWORD cchFullPath = GetModuleFileNameW(g_hInstance, wszFullPath, _countof(wszFullPath));
DWORD dwHandle = 0;
DWORD infoSize = GetFileVersionInfoSizeW(wszFullPath, &dwHandle);
if (infoSize)
{
ArrayHolder<BYTE> pVersionInfo = new BYTE[infoSize];
if (pVersionInfo)
{
if (GetFileVersionInfoW(wszFullPath, NULL, infoSize, pVersionInfo))
{
VS_FIXEDFILEINFO *pTmpFileInfo = NULL;
UINT uLen = 0;
if (VerQueryValue(pVersionInfo, "\\", (LPVOID *) &pTmpFileInfo, &uLen))
{
if (pFileInfo->dwFileVersionMS == (DWORD)-1) {
return FALSE;
}
*pFileInfo = *pTmpFileInfo; // Copy the info
return TRUE;
}
}
}
}
return FALSE;
}
#endif // !FEATURE_PAL
size_t ObjectSize(DWORD_PTR obj,BOOL fIsLargeObject)
{
DWORD_PTR dwMT;
MOVE(dwMT, obj);
return ObjectSize(obj, dwMT, FALSE, fIsLargeObject);
}
size_t ObjectSize(DWORD_PTR obj, DWORD_PTR mt, BOOL fIsValueClass, BOOL fIsLargeObject)
{
BOOL bContainsPointers;
size_t size = 0;
if (!GetSizeEfficient(obj, mt, fIsLargeObject, size, bContainsPointers))
{
return 0;
}
return size;
}
// This takes an array of values and sets every non-printable character
// to be a period.
void Flatten(__out_ecount(len) char *data, unsigned int len)
{
for (unsigned int i = 0; i < len; ++i)
if (data[i] < 32 || data[i] > 126)
data[i] = '.';
data[len] = 0;
}
void CharArrayContent(TADDR pos, ULONG num, bool widechar)
{
if (!pos || num <= 0)
return;
if (widechar)
{
ArrayHolder<WCHAR> data = new WCHAR[num+1];
if (!data)
{
ReportOOM();
return;
}
ULONG readLen = 0;
if (!SafeReadMemory(pos, data, num<<1, &readLen))
return;
Flatten(data.GetPtr(), readLen >> 1);
ExtOut("%S", data.GetPtr());
}
else
{
ArrayHolder<char> data = new char[num+1];
if (!data)
{
ReportOOM();
return;
}
ULONG readLen = 0;
if (!SafeReadMemory(pos, data, num, &readLen))
return;
_ASSERTE(readLen <= num);
Flatten(data, readLen);
ExtOut("%s", data.GetPtr());
}
}
void StringObjectContent(size_t obj, BOOL fLiteral, const int length)
{
DacpObjectData objData;
if (objData.Request(g_sos, TO_CDADDR(obj))!=S_OK)
{
ExtOut("<Invalid Object>");
return;
}
strobjInfo stInfo { 0, 0 };
if (MOVE(stInfo, obj) != S_OK)
{
ExtOut ("Error getting string data\n");
return;
}
if (objData.Size > 0x200000 || stInfo.m_StringLength > 0x200000)
{
ExtOut ("<String is invalid or too large to print>\n");
return;
}
ArrayHolder<WCHAR> pwszBuf = new WCHAR[stInfo.m_StringLength+1];
if (pwszBuf == NULL)
{
return;
}
DWORD_PTR dwAddr = (DWORD_PTR)pwszBuf.GetPtr();
if (g_sos->GetObjectStringData(TO_CDADDR(obj), stInfo.m_StringLength+1, pwszBuf, NULL)!=S_OK)
{
ExtOut("<Invalid Object>");
return;
}
if (!fLiteral)
{
pwszBuf[stInfo.m_StringLength] = L'\0';
ExtOut ("%S", pwszBuf.GetPtr());
}
else
{
ULONG32 count = stInfo.m_StringLength;
WCHAR buffer[256];
WCHAR out[512];
while (count)
{
DWORD toRead = 255;
if (count < toRead)
toRead = count;
ULONG bytesRead;
wcsncpy_s(buffer,_countof(buffer),(LPWSTR) dwAddr, toRead);
bytesRead = toRead*sizeof(WCHAR);
DWORD wcharsRead = bytesRead/2;
buffer[wcharsRead] = L'\0';
ULONG j,k=0;
for (j = 0; j < wcharsRead; j ++)
{
if (_iswprint (buffer[j])) {
out[k] = buffer[j];
k ++;
}
else
{
out[k++] = L'\\';
switch (buffer[j]) {
case L'\n':
out[k++] = L'n';
break;
case L'\0':
out[k++] = L'0';
break;
case L'\t':
out[k++] = L't';
break;
case L'\v':
out[k++] = L'v';
break;
case L'\b':
out[k++] = L'b';
break;
case L'\r':
out[k++] = L'r';
break;
case L'\f':
out[k++] = L'f';
break;
case L'\a':
out[k++] = L'a';
break;
case L'\\':
break;
case L'\?':
out[k++] = L'?';
break;
default:
out[k++] = L'?';
break;
}
}
}
out[k] = L'\0';
ExtOut ("%S", out);
count -= wcharsRead;
dwAddr += bytesRead;
}
}
}
#ifdef _TARGET_WIN64_
#include <limits.h>
__int64 str64hex(const char *ptr)
{
__int64 value = 0;
unsigned char nCount = 0;
if(ptr==NULL)
return 0;
// Ignore leading 0x if present
if (*ptr=='0' && toupper(*(ptr+1))=='X') {
ptr = ptr + 2;
}
while (1) {
char digit;
if (isdigit(*ptr)) {
digit = *ptr - '0';
} else if (isalpha(*ptr)) {
digit = (((char)toupper(*ptr)) - 'A') + 10;
if (digit >= 16) {
break; // terminate
}
} else {
break;
}
if (nCount>15) {
return _UI64_MAX; // would be an overflow
}
value = value << 4;
value |= digit;
ptr++;
nCount++;
}
return value;
}
#endif // _TARGET_WIN64_
BOOL GetValueForCMD (const char *ptr, const char *end, ARGTYPE type, size_t *value)
{
if (type == COSTRING) {
// Allocate memory for the length of the string. Whitespace terminates
// User must free the string data.
char *pszValue = NULL;
size_t dwSize = (end - ptr);
pszValue= new char[dwSize+1];
if (pszValue == NULL)
{
return FALSE;
}
strncpy_s(pszValue,dwSize+1,ptr,dwSize); // _TRUNCATE
*value = (size_t) pszValue;
} else {
char *last;
if (type == COHEX) {
#ifdef _TARGET_WIN64_
*value = str64hex(ptr);
#else
*value = strtoul(ptr,&last,16);
#endif
}
else {
#ifdef _TARGET_WIN64_
*value = _atoi64(ptr);
#else
*value = strtoul(ptr,&last,10);
#endif
}
#ifdef _TARGET_WIN64_
last = (char *) ptr;
// Ignore leading 0x if present
if (*last=='0' && toupper(*(last+1))=='X') {
last = last + 2;
}
while (isdigit(*last) || (toupper(*last)>='A' && toupper(*last)<='F')) {
last++;
}
#endif
if (last != end) {
return FALSE;
}
}
return TRUE;
}
void SetValueForCMD (void *vptr, ARGTYPE type, size_t value)
{
switch (type) {
case COBOOL:
*(BOOL*)vptr = (BOOL) value;
break;
case COSIZE_T:
case COSTRING:
case COHEX:
*(SIZE_T*)vptr = value;
break;
}
}
BOOL GetCMDOption(const char *string, CMDOption *option, size_t nOption,
CMDValue *arg, size_t maxArg, size_t *nArg)
{
const char *end;
const char *ptr = string;
BOOL endofOption = FALSE;
for (size_t n = 0; n < nOption; n ++)
{
if (IsInterrupt())
return FALSE;
option[n].hasSeen = FALSE;
}
if (nArg) {
*nArg = 0;
}
while (ptr[0] != '\0')
{
if (IsInterrupt())
return FALSE;
// skip any space
if (isspace (ptr[0])) {
while (isspace (ptr[0]))
{
if (IsInterrupt())
return FALSE;
ptr ++;
}
continue;
}
end = ptr;
// Arguments can be quoted with ". We'll remove the quotes and
// allow spaces to exist in the string.
BOOL bQuotedArg = FALSE;
if (ptr[0] == '\'' && ptr[1] != '-')
{
bQuotedArg = TRUE;
// skip quote
ptr++;
end++;
while (end[0] != '\'' && end[0] != '\0')
{
if (IsInterrupt())
return FALSE;
end ++;
}
if (end[0] != '\'')
{
// Error, th ere was a start quote but no end quote
ExtOut ("Missing quote in %s\n", ptr);
return FALSE;
}
}
else // whitespace terminates
{
while (!isspace(end[0]) && end[0] != '\0')
{
if (IsInterrupt())
return FALSE;
end ++;
}
}
#ifndef FEATURE_PAL
if (ptr[0] != '-' && ptr[0] != '/') {
#else
if (ptr[0] != '-') {
#endif
if (maxArg == 0) {
ExtOut ("Incorrect argument: %s\n", ptr);
return FALSE;
}
endofOption = TRUE;
if (*nArg >= maxArg) {
ExtOut ("Incorrect argument: %s\n", ptr);
return FALSE;
}
size_t value;
if (!GetValueForCMD (ptr,end,arg[*nArg].type,&value)) {
char oldChar = *end;
*(char *)end = '\0';
value = (size_t)GetExpression (ptr);
*(char *)end = oldChar;
/*
It is silly to do this, what if 0 is a valid expression for
the command?
if (value == 0) {
ExtOut ("Invalid argument: %s\n", ptr);
return FALSE;
}
*/
}
SetValueForCMD (arg[*nArg].vptr, arg[*nArg].type, value);
(*nArg) ++;
}
else if (endofOption) {
ExtOut ("Wrong option: %s\n", ptr);
return FALSE;
}
else {
char buffer[80];
if (end-ptr > 79) {
ExtOut ("Invalid option %s\n", ptr);
return FALSE;
}
strncpy_s (buffer,_countof(buffer), ptr, end-ptr);
size_t n;
for (n = 0; n < nOption; n ++)
{
if (IsInterrupt())
return FALSE;
if (_stricmp (buffer, option[n].name) == 0) {
if (option[n].hasSeen) {
ExtOut ("Invalid option: option specified multiple times: %s\n", buffer);
return FALSE;
}
option[n].hasSeen = TRUE;
if (option[n].hasValue) {
// skip any space
ptr = end;
if (isspace (ptr[0])) {
while (isspace (ptr[0]))
{
if (IsInterrupt())
return FALSE;
ptr ++;
}
}
if (ptr[0] == '\0') {
ExtOut ("Missing value for option %s\n", buffer);
return FALSE;
}
end = ptr;
while (!isspace(end[0]) && end[0] != '\0')
{
if (IsInterrupt())
return FALSE;
end ++;
}
size_t value;
if (!GetValueForCMD (ptr,end,option[n].type,&value)) {
char oldChar = *end;
*(char *)end = '\0';
value = (size_t)GetExpression (ptr);
*(char *)end = oldChar;
}
SetValueForCMD (option[n].vptr,option[n].type,value);
}
else {
SetValueForCMD (option[n].vptr,option[n].type,TRUE);
}
break;
}
}
if (n == nOption) {
ExtOut ("Unknown option: %s\n", buffer);
return FALSE;
}
}
ptr = end;
if (bQuotedArg)
{
ptr++;
}
}
return TRUE;
}
ReadVirtualCache g_special_rvCacheSpace;
ReadVirtualCache *rvCache = &g_special_rvCacheSpace;
void ResetGlobals(void)
{
// There are some globals used in SOS that exist for efficiency in one command,
// but should be reset because the next execution of an SOS command could be on
// another managed process. Reset them to a default state here, as this command
// is called on every SOS entry point.
g_sos->GetUsefulGlobals(&g_special_usefulGlobals);
g_special_mtCache.Clear();
g_special_rvCacheSpace.Clear();
Output::ResetIndent();
}
//---------------------------------------------------------------------------------------
//
// Loads private DAC interface, and points g_clrData to it.
//
// Return Value:
// HRESULT indicating success or failure
//
HRESULT LoadClrDebugDll(void)
{
_ASSERTE(g_pRuntime != nullptr);
HRESULT hr = g_pRuntime->GetClrDataProcess(&g_clrData);
if (FAILED(hr))
{
#ifdef FEATURE_PAL
return hr;
#else
// Fail if ExtensionApis wasn't initialized because we are hosted under dotnet-dump
if (Ioctl == nullptr) {
return hr;
}
// Try getting the DAC interface from dbgeng if the above fails on Windows
WDBGEXTS_CLR_DATA_INTERFACE Query;
Query.Iid = &__uuidof(IXCLRDataProcess);
if (!Ioctl(IG_GET_CLR_DATA_INTERFACE, &Query, sizeof(Query))) {
return hr;
}
g_clrData = (IXCLRDataProcess*)Query.Iface;
g_clrData->Flush();
#endif
}
else
{
g_clrData->AddRef();
g_clrData->Flush();
}
hr = g_clrData->QueryInterface(__uuidof(ISOSDacInterface), (void**)&g_sos);
if (FAILED(hr))
{
g_sos = NULL;
return hr;
}
return S_OK;
}
/// <summary>
/// Loads the runtime module symbols for the commands like dumplog that
/// lookup runtime symbols. This is done on-demand because it takes a
/// long time under windbg/cdb and not needed for most commands.
/// </summary>
void LoadRuntimeSymbols()
{
_ASSERTE(g_pRuntime != nullptr);
#ifndef FEATURE_PAL
ULONG64 moduleAddress = g_pRuntime->GetModuleAddress();
DEBUG_MODULE_PARAMETERS params;
HRESULT hr = g_ExtSymbols->GetModuleParameters(1, &moduleAddress, 0, ¶ms);
if (SUCCEEDED(hr))
{
if (params.SymbolType == SymDeferred)
{
PCSTR runtimeDllName = ::GetRuntimeDllName();
std::string reloadCommand;
reloadCommand.append("/f ");
reloadCommand.append(runtimeDllName);
g_ExtSymbols->Reload(reloadCommand.c_str());
g_ExtSymbols->GetModuleParameters(1, &moduleAddress, 0, ¶ms);
if (params.SymbolType != SymPdb && params.SymbolType != SymDia)
{
ExtOut("Symbols for %s not loaded. Some SOS commands may not work.\n", runtimeDllName);
}
}
}
#endif
}
typedef enum
{
GC_HEAP_INVALID = 0,
GC_HEAP_WKS = 1,
GC_HEAP_SVR = 2
} GC_HEAP_TYPE;
/**********************************************************************\
* Routine Description: *
* *
* This function is called to find out if runtime is server build *
* *
\**********************************************************************/
DacpGcHeapData *g_pHeapData = NULL;
DacpGcHeapData g_HeapData;
BOOL InitializeHeapData()
{
if (g_pHeapData == NULL)
{
if (g_HeapData.Request(g_sos) != S_OK)
{
return FALSE;
}
g_pHeapData = &g_HeapData;
}
return TRUE;
}
BOOL IsServerBuild()
{
return InitializeHeapData() ? g_pHeapData->bServerMode : FALSE;
}
UINT GetMaxGeneration()
{
return InitializeHeapData() ? g_pHeapData->g_max_generation : 0;
}
UINT GetGcHeapCount()
{
return InitializeHeapData() ? g_pHeapData->HeapCount : 0;
}
BOOL GetGcStructuresValid()
{
// We don't want to use the cached HeapData, because this can change
// each time the program runs for a while.
DacpGcHeapData heapData;
HRESULT hr;
if ((hr = heapData.Request(g_sos)) != S_OK)
{
ExtOut("GetGcStructuresValid: request heap data FAILED %08x\n", hr);
return FALSE;
}
return heapData.bGcStructuresValid;
}
void GetAllocContextPtrs(AllocInfo *pallocInfo)
{
// gets the allocation contexts for all threads. This provides information about how much of
// the current allocation quantum has been allocated and the heap to which the quantum belongs.
// The allocation quantum is a fixed size chunk of zeroed memory from which allocations will come
// until it's filled. Each managed thread has its own allocation context.
pallocInfo->num = 0;
pallocInfo->array = NULL;
// get the thread store (See code:ClrDataAccess::RequestThreadStoreData for details)
DacpThreadStoreData ThreadStore;
if ( ThreadStore.Request(g_sos) != S_OK)
{
return;
}
int numThread = ThreadStore.threadCount;
if (numThread)
{
pallocInfo->array = new needed_alloc_context[numThread];
if (pallocInfo->array == NULL)
{
return;
}
}
// get details for each thread in the thread store
CLRDATA_ADDRESS CurThread = ThreadStore.firstThread;
while (CurThread != NULL)
{
if (IsInterrupt())
return;
DacpThreadData Thread;
// Get information about the thread (we're getting the values of several of the
// fields of the Thread instance from the target) See code:ClrDataAccess::RequestThreadData for
// details
if (Thread.Request(g_sos, CurThread) != S_OK)
{
return;
}
if (Thread.allocContextPtr != 0)
{
// get a list of all the allocation contexts
int j;
for (j = 0; j < pallocInfo->num; j ++)
{
if (pallocInfo->array[j].alloc_ptr == (BYTE *) Thread.allocContextPtr)
break;
}
if (j == pallocInfo->num)
{
pallocInfo->num ++;
pallocInfo->array[j].alloc_ptr = (BYTE *) Thread.allocContextPtr;
pallocInfo->array[j].alloc_limit = (BYTE *) Thread.allocContextLimit;
}
}
CurThread = Thread.nextThread;
}
}
HRESULT ReadVirtualCache::Read(TADDR address, PVOID buffer, ULONG bufferSize, PULONG lpcbBytesRead)
{
// address can be any random ULONG64, as it can come from VerifyObjectMember(), and this
// can pass random pointer values in case of GC heap corruption
if (bufferSize == 0)
return S_OK;
if (bufferSize > CACHE_SIZE)
{
// Don't even try with the cache
return g_ExtData->ReadVirtual(TO_CDADDR(address), buffer, bufferSize, lpcbBytesRead);
}
if (!m_cacheValid || (address < m_startCache) || (address > (m_startCache + m_cacheSize - bufferSize)))
{
ULONG cbBytesRead = 0;
m_cacheValid = FALSE;
m_startCache = address;
// Avoid an int overflow
if (m_startCache + CACHE_SIZE < m_startCache)
m_startCache = (TADDR)(-CACHE_SIZE);
HRESULT hr = g_ExtData->ReadVirtual(TO_CDADDR(m_startCache), m_cache, CACHE_SIZE, &cbBytesRead);
if (hr != S_OK)
{
return hr;
}
m_cacheSize = cbBytesRead;
m_cacheValid = TRUE;
}
// If the address is within the cache, copy the cached memory to the input buffer
LONG_PTR cacheOffset = address - m_startCache;
if (cacheOffset >= 0 && cacheOffset < CACHE_SIZE)
{
int size = _min(bufferSize, m_cacheSize);
memcpy(buffer, (LPVOID)(m_cache + cacheOffset), size);
if (lpcbBytesRead != NULL)
{
*lpcbBytesRead = size;
}
}
else
{
return E_FAIL;
}
return S_OK;
}
HRESULT GetMTOfObject(TADDR obj, TADDR *mt)
{
if (!mt)
return E_POINTER;
// Read the MethodTable and if we succeed, get rid of the mark bits.
HRESULT hr = rvCache->Read(obj, mt, sizeof(TADDR), NULL);
if (SUCCEEDED(hr))
*mt &= ~3;
return hr;
}
#ifndef FEATURE_PAL
StressLogMem::~StressLogMem ()
{
MemRange * range = list;
while (range)
{
MemRange * temp = range->next;
delete range;
range = temp;
}
}
bool StressLogMem::Init (ULONG64 stressLogAddr, IDebugDataSpaces* memCallBack)
{
size_t ThreadStressLogAddr = NULL;
HRESULT hr = memCallBack->ReadVirtual(UL64_TO_CDA(stressLogAddr + offsetof (StressLog, logs)),
&ThreadStressLogAddr, sizeof (ThreadStressLogAddr), 0);
if (hr != S_OK)
{
return false;
}
while(ThreadStressLogAddr != NULL)
{
size_t ChunkListHeadAddr = NULL;
hr = memCallBack->ReadVirtual(TO_CDADDR(ThreadStressLogAddr + ThreadStressLog::OffsetOfListHead ()),
&ChunkListHeadAddr, sizeof (ChunkListHeadAddr), 0);
if (hr != S_OK || ChunkListHeadAddr == NULL)
{
return false;
}
size_t StressLogChunkAddr = ChunkListHeadAddr;
do
{
AddRange (StressLogChunkAddr, sizeof (StressLogChunk));
hr = memCallBack->ReadVirtual(TO_CDADDR(StressLogChunkAddr + offsetof (StressLogChunk, next)),
&StressLogChunkAddr, sizeof (StressLogChunkAddr), 0);
if (hr != S_OK)
{
return false;
}
if (StressLogChunkAddr == NULL)
{
return true;
}
} while (StressLogChunkAddr != ChunkListHeadAddr);
hr = memCallBack->ReadVirtual(TO_CDADDR(ThreadStressLogAddr + ThreadStressLog::OffsetOfNext ()),
&ThreadStressLogAddr, sizeof (ThreadStressLogAddr), 0);
if (hr != S_OK)
{
return false;
}
}
return true;
}
bool StressLogMem::IsInStressLog (ULONG64 addr)
{
MemRange * range = list;
while (range)
{
if (range->InRange (addr))
return true;
range = range->next;
}
return false;
}
#endif // !FEATURE_PAL
unsigned int Output::g_bSuppressOutput = 0;
unsigned int Output::g_Indent = 0;
bool Output::g_bDbgOutput = false;
bool Output::g_bDMLExposed = false;
unsigned int Output::g_DMLEnable = 0;
template <class T, int count, int size> const int StaticData<T, count, size>::Count = count;
template <class T, int count, int size> const int StaticData<T, count, size>::Size = size;
StaticData<char, 4, 1024> CachedString::cache;
CachedString::CachedString()
: mPtr(0), mRefCount(0), mIndex(~0), mSize(cache.Size)
{
Create();
}
CachedString::CachedString(const CachedString &rhs)
: mPtr(0), mRefCount(0), mIndex(~0), mSize(cache.Size)
{
Copy(rhs);
}
CachedString::~CachedString()
{
Clear();
}
const CachedString &CachedString::operator=(const CachedString &rhs)
{
Clear();
Copy(rhs);
return *this;
}
void CachedString::Copy(const CachedString &rhs)
{
if (rhs.IsOOM())
{
SetOOM();
}
else
{
mPtr = rhs.mPtr;
mIndex = rhs.mIndex;
mSize = rhs.mSize;
if (rhs.mRefCount)
{
mRefCount = rhs.mRefCount;
(*mRefCount)++;
}
else
{
// We only create count the first time we copy it, so
// we initialize it to 2.
mRefCount = rhs.mRefCount = new unsigned int(2);
if (!mRefCount)
SetOOM();
}
}
}
void CachedString::Clear()
{
if (!mRefCount || --*mRefCount == 0)
{
if (mIndex == -1)
{
if (mPtr)
delete [] mPtr;
}
else if (mIndex >= 0 && mIndex < cache.Count)
{
cache.InUse[mIndex] = false;
}
if (mRefCount)
delete mRefCount;
}
mPtr = 0;
mIndex = ~0;
mRefCount = 0;
mSize = cache.Size;
}
void CachedString::Create()
{
mIndex = -1;
mRefCount = 0;
// First try to find a string in the cache to use.
for (int i = 0; i < cache.Count; ++i)
if (!cache.InUse[i])
{
cache.InUse[i] = true;
mPtr = cache.Data[i];
mIndex = i;
break;
}
// We did not find a string to use, so we'll create a new one.
if (mIndex == -1)
{
mPtr = new char[cache.Size];
if (!mPtr)
SetOOM();
}
}
void CachedString::SetOOM()
{
Clear();
mIndex = -2;
}
void CachedString::Allocate(int size)
{
Clear();
mPtr = new char[size];
if (mPtr)
{
mSize = size;
mIndex = -1;
}
else
{
SetOOM();
}
}
size_t CountHexCharacters(CLRDATA_ADDRESS val)
{
size_t ret = 0;
while (val)
{
val >>= 4;
ret++;
}
return ret;
}
// SOS is single threaded so a global buffer doesn't need any locking
char g_printBuffer[8192];
//---------------------------------------------------------------------
// Because debuggers and hosts SOS runs on now output formatting always
// happens with the C++ runtime functions and not dbgeng. This means
// the special dbgeng formatting charaters are not supported: %N, %I,
// %ma, %mu, %msa, %msu, %y, %ly and %p takes an architecture size
// pointer (size_t) instead of always a 64bit one.
//---------------------------------------------------------------------
HRESULT
OutputVaList(
ULONG mask,
PCSTR format,
va_list args)
{
int length = _vsnprintf_s((char* const)&g_printBuffer, sizeof(g_printBuffer), _TRUNCATE, format, args);
if (length > 0)
{
return g_ExtControl->OutputVaList(mask, (char* const)&g_printBuffer, args);
}
return E_FAIL;
}
HRESULT
ControlledOutputVaList(
ULONG outputControl,
ULONG mask,
PCSTR format,
va_list args)
{
int length = _vsnprintf_s((char* const)&g_printBuffer, sizeof(g_printBuffer), _TRUNCATE, format, args);
if (length > 0)
{
return g_ExtControl->ControlledOutputVaList(outputControl, mask, (char* const)&g_printBuffer, args);
}
return E_FAIL;
}
HRESULT
OutputText(
ULONG mask,
PCSTR format,
...)
{
va_list args;
va_start (args, format);
HRESULT result = OutputVaList(mask, format, args);
va_end (args);
return result;
}
void WhitespaceOut(int count)
{
static const int FixedIndentWidth = 0x40;
static const char FixedIndentString[FixedIndentWidth+1] =
" ";
if (count <= 0)
return;
int mod = count & 0x3F;
count &= ~0x3F;
if (mod > 0)
OutputText(DEBUG_OUTPUT_NORMAL, "%.*s", mod, FixedIndentString);
for ( ; count > 0; count -= FixedIndentWidth)
OutputText(DEBUG_OUTPUT_NORMAL, FixedIndentString);
}
void DMLOut(PCSTR format, ...)
{
if (Output::IsOutputSuppressed())
return;
va_list args;
va_start(args, format);
ExtOutIndent();
if (IsDMLEnabled() && !Output::IsDMLExposed())
{
ControlledOutputVaList(DEBUG_OUTCTL_AMBIENT_DML, DEBUG_OUTPUT_NORMAL, format, args);
}
else
{
OutputVaList(DEBUG_OUTPUT_NORMAL, format, args);
}
va_end(args);
}
void IfDMLOut(PCSTR format, ...)
{
if (Output::IsOutputSuppressed() || !IsDMLEnabled())
return;
va_list args;
va_start(args, format);
ExtOutIndent();
g_ExtControl->ControlledOutputVaList(DEBUG_OUTCTL_AMBIENT_DML, DEBUG_OUTPUT_NORMAL, format, args);
va_end(args);
}
void ExtOut(PCSTR Format, ...)
{
if (Output::IsOutputSuppressed())
return;
va_list Args;
va_start(Args, Format);
ExtOutIndent();
OutputVaList(DEBUG_OUTPUT_NORMAL, Format, Args);
va_end(Args);
}
void ExtWarn(PCSTR Format, ...)
{
if (Output::IsOutputSuppressed())
return;
va_list Args;
va_start(Args, Format);
OutputVaList(DEBUG_OUTPUT_WARNING, Format, Args);
va_end(Args);
}
void ExtErr(PCSTR Format, ...)
{
va_list Args;
va_start(Args, Format);
OutputVaList(DEBUG_OUTPUT_ERROR, Format, Args);
va_end(Args);
}
/// <summary>
/// Internal trace output for extensions library
/// </summary>
void TraceError(PCSTR format, ...)
{
if (Output::g_bDbgOutput)
{
va_list args;
va_start(args, format);
OutputVaList(DEBUG_OUTPUT_ERROR, format, args);
va_end(args);
}
}
void ExtDbgOut(PCSTR Format, ...)
{
if (Output::g_bDbgOutput)
{
va_list Args;
va_start(Args, Format);
ExtOutIndent();
OutputVaList(DEBUG_OUTPUT_NORMAL, Format, Args);
va_end(Args);
}
}
const char * const DMLFormats[] =
{
NULL, // DML_None (do not use)
"<exec cmd=\"!DumpMT /d %s\">%s</exec>", // DML_MethodTable
"<exec cmd=\"!DumpMD /d %s\">%s</exec>", // DML_MethodDesc
"<exec cmd=\"!DumpClass /d %s\">%s</exec>", // DML_EEClass
"<exec cmd=\"!DumpModule /d %s\">%s</exec>", // DML_Module
"<exec cmd=\"!U /d %s\">%s</exec>", // DML_IP
"<exec cmd=\"!DumpObj /d %s\">%s</exec>", // DML_Object
"<exec cmd=\"!DumpDomain /d %s\">%s</exec>", // DML_Domain
"<exec cmd=\"!DumpAssembly /d %s\">%s</exec>", // DML_Assembly
"<exec cmd=\"~~[%s]s\">%s</exec>", // DML_ThreadID
"<exec cmd=\"!DumpVC /d %s %s\">%s</exec>", // DML_ValueClass
"<exec cmd=\"!DumpHeap /d -mt %s\">%s</exec>", // DML_DumpHeapMT
"<exec cmd=\"!ListNearObj /d %s\">%s</exec>", // DML_ListNearObj
"<exec cmd=\"!ThreadState %s\">%s</exec>", // DML_ThreadState
"<exec cmd=\"!PrintException /d %s\">%s</exec>",// DML_PrintException
"<exec cmd=\"!DumpRCW /d %s\">%s</exec>", // DML_RCWrapper
"<exec cmd=\"!DumpCCW /d %s\">%s</exec>", // DML_CCWrapper
"<exec cmd=\"!ClrStack -i %S %d\">%S</exec>", // DML_ManagedVar
"<exec cmd=\"!DumpAsync -addr %s -tasks -completed -fields -stacks -roots\">%s</exec>", // DML_Async
"<exec cmd=\"!DumpIL /i %s\">%s</exec>", // DML_IL
"<exec cmd=\"!DumpRCW -cw /d %s\">%s</exec>", // DML_ComWrapperRCW
"<exec cmd=\"!DumpCCW -cw /d %s\">%s</exec>", // DML_ComWrapperCCW
};
void ConvertToLower(__out_ecount(len) char *buffer, size_t len)
{
for (size_t i = 0; i < len && buffer[i]; ++i)
buffer[i] = (char)tolower(buffer[i]);
}
/* Build a hex display of addr.
*/
int GetHex(CLRDATA_ADDRESS addr, __out_ecount(len) char *out, size_t len, bool fill)
{
int count = sprintf_s(out, len, fill ? "%p" : "%x", (size_t)addr);
ConvertToLower(out, len);
return count;
}
CachedString Output::BuildHexValue(CLRDATA_ADDRESS addr, FormatType type, bool fill)
{
CachedString ret;
if (ret.IsOOM())
{
ReportOOM();
return ret;
}
if (IsDMLEnabled())
{
char hex[POINTERSIZE_BYTES*2 + 1];
GetHex(addr, hex, _countof(hex), fill);
sprintf_s(ret, ret.GetStrLen(), DMLFormats[type], hex, hex);
}
else
{
GetHex(addr, ret, ret.GetStrLen(), fill);
}
return ret;
}
CachedString Output::BuildVCValue(CLRDATA_ADDRESS mt, CLRDATA_ADDRESS addr, FormatType type, bool fill)
{
_ASSERTE(type == DML_ValueClass);
CachedString ret;
if (ret.IsOOM())
{
ReportOOM();
return ret;
}
if (IsDMLEnabled())
{
char hexaddr[POINTERSIZE_BYTES*2 + 1];
char hexmt[POINTERSIZE_BYTES*2 + 1];
GetHex(addr, hexaddr, _countof(hexaddr), fill);
GetHex(mt, hexmt, _countof(hexmt), fill);
sprintf_s(ret, ret.GetStrLen(), DMLFormats[type], hexmt, hexaddr, hexaddr);
}
else
{
GetHex(addr, ret, ret.GetStrLen(), fill);
}
return ret;
}
CachedString Output::BuildManagedVarValue(__in_z LPCWSTR expansionName, ULONG frame, __in_z LPCWSTR simpleName, FormatType type)
{
_ASSERTE(type == DML_ManagedVar);
CachedString ret;
if (ret.IsOOM())
{
ReportOOM();
return ret;
}
// calculate the number of digits in frame (this assumes base-10 display of frames)
int numFrameDigits = 0;
if (frame > 0)
{
ULONG tempFrame = frame;
while (tempFrame > 0)
{
++numFrameDigits;
tempFrame /= 10;
}
}
else
{
numFrameDigits = 1;
}
size_t totalStringLength = strlen(DMLFormats[type]) + _wcslen(expansionName) + numFrameDigits + _wcslen(simpleName) + 1;
if (totalStringLength > ret.GetStrLen())
{
ret.Allocate(static_cast<int>(totalStringLength));
if (ret.IsOOM())
{
ReportOOM();
return ret;
}
}
if (IsDMLEnabled())
{
sprintf_s(ret, ret.GetStrLen(), DMLFormats[type], expansionName, frame, simpleName);
}
else
{
sprintf_s(ret, ret.GetStrLen(), "%S", simpleName);
}
return ret;
}
CachedString Output::BuildManagedVarValue(__in_z LPCWSTR expansionName, ULONG frame, int indexInArray, FormatType type)
{
WCHAR indexString[24];
swprintf_s(indexString, _countof(indexString), W("[%d]"), indexInArray);
return BuildManagedVarValue(expansionName, frame, indexString, type);
}
EnableDMLHolder::EnableDMLHolder(BOOL enable)
: mEnable(enable)
{
#ifndef FEATURE_PAL
// If the user has not requested that we use DML, it's still possible that
// they have instead specified ".prefer_dml 1". If enable is false,
// we will check here for .prefer_dml. Since this class is only used once
// per command issued to SOS, this should only check the setting once per
// sos command issued.
if (!mEnable && Output::g_DMLEnable <= 0)
{
ULONG opts;
HRESULT hr = g_ExtControl->GetEngineOptions(&opts);
mEnable = SUCCEEDED(hr) && (opts & DEBUG_ENGOPT_PREFER_DML) == DEBUG_ENGOPT_PREFER_DML;
}
if (mEnable)
{
Output::g_DMLEnable++;
}
#endif // FEATURE_PAL
}
EnableDMLHolder::~EnableDMLHolder()
{
#ifndef FEATURE_PAL
if (mEnable)
Output::g_DMLEnable--;
#endif
}
bool IsDMLEnabled()
{
return IsInitializedByDbgEng() && Output::g_DMLEnable > 0;
}
NoOutputHolder::NoOutputHolder(BOOL bSuppress)
: mSuppress(bSuppress)
{
if (mSuppress)
Output::g_bSuppressOutput++;
}
NoOutputHolder::~NoOutputHolder()
{
if (mSuppress)
Output::g_bSuppressOutput--;
}
//
// Code to support mapping RVAs to managed code line numbers.
//
//
// Retrieves the IXCLRDataMethodInstance* instance associated with the
// passed in native offset.
HRESULT
GetClrMethodInstance(
___in ULONG64 NativeOffset,
___out IXCLRDataMethodInstance** Method)
{
HRESULT Status;
CLRDATA_ENUM MethEnum;
Status = g_clrData->StartEnumMethodInstancesByAddress(NativeOffset, NULL, &MethEnum);
if (Status == S_OK)
{
Status = g_clrData->EnumMethodInstanceByAddress(&MethEnum, Method);
g_clrData->EndEnumMethodInstancesByAddress(MethEnum);
}
// Any alternate success is a true failure here.
return (Status == S_OK || FAILED(Status)) ? Status : E_NOINTERFACE;
}
//
// Enumerates over the IL address map associated with the passed in
// managed method, and returns the highest non-epilog offset.
HRESULT
GetLastMethodIlOffset(
___in IXCLRDataMethodInstance* Method,
___out PULONG32 MethodOffs)
{
HRESULT Status;
CLRDATA_IL_ADDRESS_MAP MapLocal[16];
CLRDATA_IL_ADDRESS_MAP* Map = MapLocal;
ULONG32 MapCount = _countof(MapLocal);
ULONG32 MapNeeded;
ULONG32 HighestOffset;
for (;;)
{
if ((Status = Method->GetILAddressMap(MapCount, &MapNeeded, Map)) != S_OK)
{
return Status;
}
if (MapNeeded <= MapCount)
{
break;
}
// Need more map entries.
if (Map != MapLocal)
{
// Already went around and the answer changed,
// which should not be possible.
delete[] Map;
return E_UNEXPECTED;
}
Map = new CLRDATA_IL_ADDRESS_MAP[MapNeeded];
if (!Map)
{
return E_OUTOFMEMORY;
}
MapCount = MapNeeded;
}
HighestOffset = 0;
for (size_t i = 0; i < MapNeeded; i++)
{
if (Map[i].ilOffset != (ULONG32)CLRDATA_IL_OFFSET_NO_MAPPING &&
Map[i].ilOffset != (ULONG32)CLRDATA_IL_OFFSET_PROLOG &&
Map[i].ilOffset != (ULONG32)CLRDATA_IL_OFFSET_EPILOG &&
Map[i].ilOffset > HighestOffset)
{
HighestOffset = Map[i].ilOffset;
}
}
if (Map != MapLocal)
{
delete[] Map;
}
*MethodOffs = HighestOffset;
return S_OK;
}
//
// Convert a native offset (possibly already associated with a managed
// method identified by the passed in IXCLRDataMethodInstance) to a
// triplet (ImageInfo, MethodToken, MethodOffset) that can be used to
// represent an "IL offset".
HRESULT
ConvertNativeToIlOffset(
___in ULONG64 nativeOffset,
___in BOOL bAdjustOffsetForLineNumber,
___out IXCLRDataModule** ppModule,
___out mdMethodDef* methodToken,
___out PULONG32 methodOffs)
{
ToRelease<IXCLRDataMethodInstance> pMethodInst(NULL);
HRESULT Status;
if ((Status = GetClrMethodInstance(nativeOffset, &pMethodInst)) != S_OK)
{
ExtDbgOut("ConvertNativeToIlOffset(%p): GetClrMethodInstance FAILED %08x\n", nativeOffset, Status);
return Status;
}
if (bAdjustOffsetForLineNumber)
{
CLRDATA_ADDRESS startAddr;
if (pMethodInst->GetRepresentativeEntryAddress(&startAddr) == S_OK)
{
if (nativeOffset >= (startAddr + g_targetMachine->StackWalkIPAdjustOffset()))
{
nativeOffset -= g_targetMachine->StackWalkIPAdjustOffset();
}
}
}
if ((Status = pMethodInst->GetILOffsetsByAddress(nativeOffset, 1, NULL, methodOffs)) != S_OK)
{
ExtDbgOut("ConvertNativeToIlOffset(%p): GetILOffsetsByAddress FAILED %08x\n", nativeOffset, Status);
*methodOffs = 0;
}
else
{
switch((LONG)*methodOffs)
{
case CLRDATA_IL_OFFSET_NO_MAPPING:
return E_NOINTERFACE;
case CLRDATA_IL_OFFSET_PROLOG:
// Treat all of the prologue as part of
// the first source line.
*methodOffs = 0;
break;
case CLRDATA_IL_OFFSET_EPILOG:
// Back up until we find the last real
// IL offset.
if ((Status = GetLastMethodIlOffset(pMethodInst, methodOffs)) != S_OK)
{
return Status;
}
break;
}
}
return pMethodInst->GetTokenAndScope(methodToken, ppModule);
}
// Based on a native offset, passed in the first argument this function
// identifies the corresponding source file name and line number.
HRESULT
GetLineByOffset(
___in ULONG64 nativeOffset,
___out ULONG *pLinenum,
__out_ecount(cchFileName) WCHAR* pwszFileName,
___in ULONG cchFileName,
___in BOOL bAdjustOffsetForLineNumber /* = FALSE */)
{
HRESULT Status = S_OK;
ULONG32 methodToken;
ULONG32 methodOffs;
// Find the image, method token and IL offset that correspond to "nativeOffset"
ToRelease<IXCLRDataModule> pModule(NULL);
IfFailRet(ConvertNativeToIlOffset(nativeOffset, bAdjustOffsetForLineNumber, &pModule, &methodToken, &methodOffs));
ToRelease<IMetaDataImport> pMDImport(NULL);
Status = pModule->QueryInterface(IID_IMetaDataImport, (LPVOID *) &pMDImport);
if (FAILED(Status))
{
ExtDbgOut("GetLineByOffset(%p): QueryInterface(IID_IMetaDataImport) FAILED %08x\n", nativeOffset, Status);
}
SymbolReader symbolReader;
IfFailRet(symbolReader.LoadSymbols(pMDImport, pModule));
return symbolReader.GetLineByILOffset(methodToken, methodOffs, pLinenum, pwszFileName, cchFileName);
}
void TableOutput::ReInit(int numColumns, int defaultColumnWidth, Alignment alignmentDefault, int indent, int padding)
{
Clear();
mColumns = numColumns;
mDefaultWidth = defaultColumnWidth;
mIndent = indent;
mPadding = padding;
mCurrCol = 0;
mDefaultAlign = alignmentDefault;
}
void TableOutput::SetWidths(int columns, ...)
{
SOS_Assert(columns > 0);
SOS_Assert(columns <= mColumns);
AllocWidths();
va_list list;
va_start(list, columns);
for (int i = 0; i < columns; ++i)
mWidths[i] = va_arg(list, int);
va_end(list);
}
void TableOutput::SetColWidth(int col, int width)
{
SOS_Assert(col >= 0 && col < mColumns);
SOS_Assert(width >= 0);
AllocWidths();
mWidths[col] = width;
}
void TableOutput::SetColAlignment(int col, Alignment align)
{
SOS_Assert(col >= 0 && col < mColumns);
if (!mAlignments)
{
mAlignments = new Alignment[mColumns];
for (int i = 0; i < mColumns; ++i)
mAlignments[i] = mDefaultAlign;
}
mAlignments[col] = align;
}
void TableOutput::Clear()
{
if (mAlignments)
{
delete [] mAlignments;
mAlignments = 0;
}
if (mWidths)
{
delete [] mWidths;
mWidths = 0;
}
}
void TableOutput::AllocWidths()
{
if (!mWidths)
{
mWidths = new int[mColumns];
for (int i = 0; i < mColumns; ++i)
mWidths[i] = mDefaultWidth;
}
}
int TableOutput::GetColumnWidth(int col)
{
SOS_Assert(col < mColumns);
if (mWidths)
return mWidths[col];
return mDefaultWidth;
}
Alignment TableOutput::GetColAlign(int col)
{
SOS_Assert(col < mColumns);
if (mAlignments)
return mAlignments[col];
return mDefaultAlign;
}
const char *TableOutput::GetWhitespace(int amount)
{
static char WhiteSpace[256] = "";
static int count = 0;
if (count == 0)
{
count = _countof(WhiteSpace);
for (int i = 0; i < count-1; ++i)
WhiteSpace[i] = ' ';
WhiteSpace[count-1] = 0;
}
SOS_Assert(amount < count);
return &WhiteSpace[count-amount-1];
}
void TableOutput::OutputBlankColumns(int col)
{
if (col < mCurrCol)
{
ExtOut("\n");
mCurrCol = 0;
}
int whitespace = 0;
for (int i = mCurrCol; i < col; ++i)
whitespace += GetColumnWidth(i) + mPadding;
ExtOut(GetWhitespace(whitespace));
}
void TableOutput::OutputIndent()
{
if (mIndent)
ExtOut(GetWhitespace(mIndent));
}
#ifndef FEATURE_PAL
PEOffsetMemoryReader::PEOffsetMemoryReader(TADDR moduleBaseAddress) :
m_moduleBaseAddress(moduleBaseAddress),
m_refCount(1)
{}
HRESULT __stdcall PEOffsetMemoryReader::QueryInterface(REFIID riid, VOID** ppInterface)
{
if(riid == __uuidof(IDiaReadExeAtOffsetCallback))
{
*ppInterface = static_cast<IDiaReadExeAtOffsetCallback*>(this);
AddRef();
return S_OK;
}
else if(riid == __uuidof(IUnknown))
{
*ppInterface = static_cast<IUnknown*>(this);
AddRef();
return S_OK;
}
else
{
return E_NOINTERFACE;
}
}
ULONG __stdcall PEOffsetMemoryReader::AddRef()
{
return InterlockedIncrement((volatile LONG *) &m_refCount);
}
ULONG __stdcall PEOffsetMemoryReader::Release()
{
ULONG count = InterlockedDecrement((volatile LONG *) &m_refCount);
if(count == 0)
{
delete this;
}
return count;
}
// IDiaReadExeAtOffsetCallback implementation
HRESULT __stdcall PEOffsetMemoryReader::ReadExecutableAt(DWORDLONG fileOffset, DWORD cbData, DWORD* pcbData, BYTE data[])
{
return SafeReadMemory(m_moduleBaseAddress + fileOffset, data, cbData, pcbData) ? S_OK : E_FAIL;
}
PERvaMemoryReader::PERvaMemoryReader(TADDR moduleBaseAddress) :
m_moduleBaseAddress(moduleBaseAddress),
m_refCount(1)
{}
HRESULT __stdcall PERvaMemoryReader::QueryInterface(REFIID riid, VOID** ppInterface)
{
if(riid == __uuidof(IDiaReadExeAtRVACallback))
{
*ppInterface = static_cast<IDiaReadExeAtRVACallback*>(this);
AddRef();
return S_OK;
}
else if(riid == __uuidof(IUnknown))
{
*ppInterface = static_cast<IUnknown*>(this);
AddRef();
return S_OK;
}
else
{
return E_NOINTERFACE;
}
}
ULONG __stdcall PERvaMemoryReader::AddRef()
{
return InterlockedIncrement((volatile LONG *) &m_refCount);
}
ULONG __stdcall PERvaMemoryReader::Release()
{
ULONG count = InterlockedDecrement((volatile LONG *) &m_refCount);
if(count == 0)
{
delete this;
}
return count;
}
// IDiaReadExeAtOffsetCallback implementation
HRESULT __stdcall PERvaMemoryReader::ReadExecutableAtRVA(DWORD relativeVirtualAddress, DWORD cbData, DWORD* pcbData, BYTE data[])
{
return SafeReadMemory(m_moduleBaseAddress + relativeVirtualAddress, data, cbData, pcbData) ? S_OK : E_FAIL;
}
#endif // FEATURE_PAL
static void AddAssemblyName(WString& methodOutput, CLRDATA_ADDRESS mdesc)
{
DacpMethodDescData mdescData;
if (SUCCEEDED(mdescData.Request(g_sos, mdesc)))
{
DacpModuleData dmd;
if (SUCCEEDED(dmd.Request(g_sos, mdescData.ModulePtr)))
{
ToRelease<IXCLRDataModule> pModule;
if (SUCCEEDED(g_sos->GetModule(mdescData.ModulePtr, &pModule)))
{
ArrayHolder<WCHAR> wszFileName = new WCHAR[MAX_LONGPATH + 1];
ULONG32 nameLen = 0;
if (SUCCEEDED(pModule->GetFileName(MAX_LONGPATH, &nameLen, wszFileName)))
{
if (wszFileName[0] != W('\0'))
{
WCHAR *pJustName = _wcsrchr(wszFileName, GetTargetDirectorySeparatorW());
if (pJustName == NULL)
pJustName = wszFileName - 1;
methodOutput += (pJustName + 1);
methodOutput += W("!");
}
}
}
}
}
}
WString GetFrameFromAddress(TADDR frameAddr, IXCLRDataStackWalk *pStackWalk, BOOL bAssemblyName)
{
TADDR vtAddr;
MOVE(vtAddr, frameAddr);
WString frameOutput;
frameOutput += W("[");
if (SUCCEEDED(g_sos->GetFrameName(TO_CDADDR(vtAddr), mdNameLen, g_mdName, NULL)))
frameOutput += g_mdName;
else
frameOutput += W("Frame");
frameOutput += WString(W(": ")) + Pointer(frameAddr) + W("] ");
// Print the frame's associated function info, if it has any.
CLRDATA_ADDRESS mdesc = 0;
if (SUCCEEDED(g_sos->GetMethodDescPtrFromFrame(frameAddr, &mdesc)))
{
if (SUCCEEDED(g_sos->GetMethodDescName(mdesc, mdNameLen, g_mdName, NULL)))
{
if (bAssemblyName)
{
AddAssemblyName(frameOutput, mdesc);
}
frameOutput += g_mdName;
}
else
{
frameOutput += W("<unknown method>");
}
}
else if (pStackWalk)
{
// The Frame did not have direct function info, so try to get the method instance
// (in this case a MethodDesc), and read the name from it.
ToRelease<IXCLRDataFrame> frame;
if (SUCCEEDED(pStackWalk->GetFrame(&frame)))
{
ToRelease<IXCLRDataMethodInstance> methodInstance;
if (SUCCEEDED(frame->GetMethodInstance(&methodInstance)))
{
// GetName can return S_FALSE if mdNameLen is not large enough. However we are already
// passing a pretty big buffer in. If this returns S_FALSE (meaning the buffer is too
// small) then we should not output it anyway.
if (methodInstance->GetName(0, mdNameLen, NULL, g_mdName) == S_OK)
frameOutput += g_mdName;
}
}
}
return frameOutput;
}
WString MethodNameFromIP(CLRDATA_ADDRESS ip, BOOL bSuppressLines, BOOL bAssemblyName, BOOL bDisplacement, BOOL bAdjustIPForLineNumber)
{
ULONG linenum;
WString methodOutput;
CLRDATA_ADDRESS mdesc = 0;
if (FAILED(g_sos->GetMethodDescPtrFromIP(ip, &mdesc)))
{
methodOutput = W("<unknown>");
}
else
{
DacpMethodDescData mdescData;
if (SUCCEEDED(g_sos->GetMethodDescName(mdesc, mdNameLen, g_mdName, NULL)))
{
if (bAssemblyName)
{
AddAssemblyName(methodOutput, mdesc);
}
methodOutput += g_mdName;
if (bDisplacement)
{
if (SUCCEEDED(mdescData.Request(g_sos, mdesc)))
{
ULONG64 disp = (ip - mdescData.NativeCodeAddr);
if (disp)
{
methodOutput += W(" + ");
methodOutput += Decimal(disp);
}
}
}
}
else if (SUCCEEDED(mdescData.Request(g_sos, mdesc)))
{
DacpModuleData dmd;
BOOL bModuleNameWorked = FALSE;
ULONG64 addrInModule = ip;
if (SUCCEEDED(dmd.Request(g_sos, mdescData.ModulePtr)))
{
CLRDATA_ADDRESS peFileBase = 0;
if (SUCCEEDED(g_sos->GetPEFileBase(dmd.File, &peFileBase)))
{
if (peFileBase)
{
addrInModule = peFileBase;
}
}
}
ULONG Index;
ULONG64 moduleBase;
if (SUCCEEDED(g_ExtSymbols->GetModuleByOffset(UL64_TO_CDA(addrInModule), 0, &Index, &moduleBase)))
{
ArrayHolder<char> szModuleName = new char[MAX_LONGPATH+1];
if (SUCCEEDED(g_ExtSymbols->GetModuleNames(Index, moduleBase, NULL, 0, NULL, szModuleName, MAX_LONGPATH, NULL, NULL, 0, NULL)))
{
MultiByteToWideChar (CP_ACP, 0, szModuleName, MAX_LONGPATH, g_mdName, _countof(g_mdName));
methodOutput += g_mdName;
methodOutput += W("!");
}
}
methodOutput += W("<unknown method>");
}
else
{
methodOutput = W("<unknown>");
}
ArrayHolder<WCHAR> wszFileName = new WCHAR[MAX_LONGPATH];
if (!bSuppressLines &&
SUCCEEDED(GetLineByOffset(TO_CDADDR(ip), &linenum, wszFileName, MAX_LONGPATH, bAdjustIPForLineNumber)))
{
methodOutput += WString(W(" [")) + wszFileName + W(" @ ") + Decimal(linenum) + W("]");
}
}
return methodOutput;
}
HRESULT GetGCRefs(ULONG osID, SOSStackRefData **ppRefs, unsigned int *pRefCnt, SOSStackRefError **ppErrors, unsigned int *pErrCount)
{
if (ppRefs == NULL || pRefCnt == NULL)
return E_POINTER;
if (pErrCount)
*pErrCount = 0;
*pRefCnt = 0;
unsigned int count = 0;
ToRelease<ISOSStackRefEnum> pEnum;
if (FAILED(g_sos->GetStackReferences(osID, &pEnum)) || FAILED(pEnum->GetCount(&count)))
{
ExtOut("Failed to enumerate GC references.\n");
return E_FAIL;
}
*ppRefs = new SOSStackRefData[count];
if (FAILED(pEnum->Next(count, *ppRefs, pRefCnt)))
{
ExtOut("Failed to enumerate GC references.\n");
return E_FAIL;
}
SOS_Assert(count == *pRefCnt);
// Enumerate errors found. Any bad HRESULT recieved while enumerating errors is NOT a fatal error.
// Hence we return S_FALSE if we encounter one.
if (ppErrors && pErrCount)
{
ToRelease<ISOSStackRefErrorEnum> pErrors;
if (FAILED(pEnum->EnumerateErrors(&pErrors)))
{
ExtOut("Failed to enumerate GC reference errors.\n");
return S_FALSE;
}
if (FAILED(pErrors->GetCount(&count)))
{
ExtOut("Failed to enumerate GC reference errors.\n");
return S_FALSE;
}
*ppErrors = new SOSStackRefError[count];
if (FAILED(pErrors->Next(count, *ppErrors, pErrCount)))
{
ExtOut("Failed to enumerate GC reference errors.\n");
*pErrCount = 0;
return S_FALSE;
}
SOS_Assert(count == *pErrCount);
}
return S_OK;
}
InternalFrameManager::InternalFrameManager() : m_cInternalFramesActual(0), m_iInternalFrameCur(0) {}
HRESULT InternalFrameManager::Init(ICorDebugThread3 * pThread3)
{
_ASSERTE(pThread3 != NULL);
return pThread3->GetActiveInternalFrames(
_countof(m_rgpInternalFrame2),
&m_cInternalFramesActual,
&(m_rgpInternalFrame2[0]));
}
HRESULT InternalFrameManager::PrintPrecedingInternalFrames(ICorDebugFrame * pFrame)
{
HRESULT Status;
for (; m_iInternalFrameCur < m_cInternalFramesActual; m_iInternalFrameCur++)
{
BOOL bIsCloser = FALSE;
IfFailRet(m_rgpInternalFrame2[m_iInternalFrameCur]->IsCloserToLeaf(pFrame, &bIsCloser));
if (!bIsCloser)
{
// Current internal frame is now past pFrame, so we're done
return S_OK;
}
IfFailRet(PrintCurrentInternalFrame());
}
// Exhausted list of internal frames. Done!
return S_OK;
}
HRESULT InternalFrameManager::PrintCurrentInternalFrame()
{
_ASSERTE(m_iInternalFrameCur < m_cInternalFramesActual);
HRESULT Status;
CORDB_ADDRESS address;
IfFailRet(m_rgpInternalFrame2[m_iInternalFrameCur]->GetAddress(&address));
ToRelease<ICorDebugInternalFrame> pInternalFrame;
IfFailRet(m_rgpInternalFrame2[m_iInternalFrameCur]->QueryInterface(IID_ICorDebugInternalFrame, (LPVOID *) &pInternalFrame));
CorDebugInternalFrameType type;
IfFailRet(pInternalFrame->GetFrameType(&type));
LPCSTR szFrameType = NULL;
switch(type)
{
default:
szFrameType = "Unknown internal frame.";
break;
case STUBFRAME_M2U:
szFrameType = "Managed to Unmanaged transition";
break;
case STUBFRAME_U2M:
szFrameType = "Unmanaged to Managed transition";
break;
case STUBFRAME_APPDOMAIN_TRANSITION:
szFrameType = "AppDomain transition";
break;
case STUBFRAME_LIGHTWEIGHT_FUNCTION:
szFrameType = "Lightweight function";
break;
case STUBFRAME_FUNC_EVAL:
szFrameType = "Function evaluation";
break;
case STUBFRAME_INTERNALCALL:
szFrameType = "Internal call";
break;
case STUBFRAME_CLASS_INIT:
szFrameType = "Class initialization";
break;
case STUBFRAME_EXCEPTION:
szFrameType = "Exception";
break;
case STUBFRAME_SECURITY:
szFrameType = "Security";
break;
case STUBFRAME_JIT_COMPILATION:
szFrameType = "JIT Compilation";
break;
}
DMLOut("%p %s ", SOS_PTR(address), SOS_PTR(0));
ExtOut("[%s: %p]\n", szFrameType, SOS_PTR(address));
return S_OK;
}
#ifdef FEATURE_PAL
struct MemoryRegion
{
private:
uint64_t m_startAddress;
uint64_t m_endAddress;
CLRDATA_ADDRESS m_peFile;
BYTE* m_metadataMemory;
volatile LONG m_busy;
HRESULT CacheMetadata()
{
if (m_metadataMemory == nullptr)
{
HRESULT hr;
CLRDATA_ADDRESS baseAddress;
if (FAILED(hr = g_sos->GetPEFileBase(m_peFile, &baseAddress))) {
return hr;
}
ArrayHolder<WCHAR> imagePath = new WCHAR[MAX_LONGPATH];
if (FAILED(hr = g_sos->GetPEFileName(m_peFile, MAX_LONGPATH, imagePath.GetPtr(), NULL))) {
return hr;
}
IMAGE_DOS_HEADER DosHeader;
if (FAILED(hr = g_ExtData->ReadVirtual(baseAddress, &DosHeader, sizeof(DosHeader), NULL))) {
return hr;
}
IMAGE_NT_HEADERS Header;
if (FAILED(hr = g_ExtData->ReadVirtual(baseAddress + DosHeader.e_lfanew, &Header, sizeof(Header), NULL))) {
return hr;
}
// If there is no COMHeader, this can not be managed code.
if (Header.OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_COMHEADER].VirtualAddress == 0) {
return E_ACCESSDENIED;
}
ULONG32 imageSize = Header.OptionalHeader.SizeOfImage;
ULONG32 timeStamp = Header.FileHeader.TimeDateStamp;
ULONG32 bufferSize = (ULONG32)Size();
ArrayHolder<BYTE> buffer = new NOTHROW BYTE[bufferSize];
if (buffer == nullptr) {
return E_OUTOFMEMORY;
}
ULONG32 actualSize = 0;
if (FAILED(hr = GetMetadataLocator(imagePath, timeStamp, imageSize, nullptr, 0, 0, bufferSize, buffer, &actualSize))) {
return hr;
}
m_metadataMemory = buffer.Detach();
}
return S_OK;
}
public:
MemoryRegion(uint64_t start, uint64_t end, CLRDATA_ADDRESS peFile) :
m_startAddress(start),
m_endAddress(end),
m_peFile(peFile),
m_metadataMemory(nullptr),
m_busy(0)
{
}
uint64_t StartAddress() const { return m_startAddress; }
uint64_t EndAddress() const { return m_endAddress; }
uint64_t Size() const { return m_endAddress - m_startAddress; }
CLRDATA_ADDRESS const PEFile() { return m_peFile; }
bool operator<(const MemoryRegion& rhs) const
{
return (m_startAddress < rhs.m_startAddress) && (m_endAddress <= rhs.m_startAddress);
}
// Returns true if "rhs" is wholly contained in this one
bool Contains(const MemoryRegion& rhs) const
{
return (m_startAddress <= rhs.m_startAddress) && (m_endAddress >= rhs.m_endAddress);
}
HRESULT ReadMetadata(CLRDATA_ADDRESS address, ULONG32 bufferSize, BYTE* buffer)
{
_ASSERTE((m_startAddress <= address) && (m_endAddress >= (address + bufferSize)));
HRESULT hr = E_ACCESSDENIED;
// Skip in-memory and dynamic modules or if CacheMetadata failed
if (m_peFile != 0)
{
if (InterlockedIncrement(&m_busy) == 1)
{
// Attempt to get the assembly metadata from local file or by downloading from a symbol server
hr = CacheMetadata();
if (FAILED(hr)) {
// If we can get the metadata from the assembly, mark this region to always fail.
m_peFile = 0;
}
}
InterlockedDecrement(&m_busy);
}
if (FAILED(hr)) {
return hr;
}
// Read the memory from the cached metadata blob
_ASSERTE(m_metadataMemory != nullptr);
uint64_t offset = address - m_startAddress;
memcpy(buffer, m_metadataMemory + offset, bufferSize);
return S_OK;
}
void Dispose()
{
if (m_metadataMemory != nullptr)
{
delete[] m_metadataMemory;
m_metadataMemory = nullptr;
}
}
};
std::set<MemoryRegion> g_metadataRegions;
bool g_metadataRegionsPopulated = false;
void FlushMetadataRegions()
{
for (const MemoryRegion& region : g_metadataRegions)
{
const_cast<MemoryRegion&>(region).Dispose();
}
g_metadataRegions.clear();
g_metadataRegionsPopulated = false;
}
void PopulateMetadataRegions()
{
g_metadataRegions.clear();
// Only populate the metadata regions if core dump
if (IsDumpFile())
{
int numModule;
ArrayHolder<DWORD_PTR> moduleList = ModuleFromName(NULL, &numModule);
if (moduleList != nullptr)
{
for (int i = 0; i < numModule; i++)
{
DacpModuleData moduleData;
if (SUCCEEDED(moduleData.Request(g_sos, moduleList[i])))
{
if (moduleData.metadataStart != 0)
{
MemoryRegion region(moduleData.metadataStart, moduleData.metadataStart + moduleData.metadataSize, moduleData.File);
g_metadataRegions.insert(region);
#ifdef DUMP_METADATA_INFO
ArrayHolder<WCHAR> name = new WCHAR[MAX_LONGPATH];
name[0] = '\0';
if (moduleData.File != 0)
{
g_sos->GetPEFileName(moduleData.File, MAX_LONGPATH, name.GetPtr(), NULL);
}
ExtOut("%016x %016x %016x %S\n", moduleData.metadataStart, moduleData.metadataStart + moduleData.metadataSize, moduleData.metadataSize, name.GetPtr());
#endif
}
}
}
}
else
{
ExtDbgOut("PopulateMetadataRegions ModuleFromName returns null\n");
}
}
}
HRESULT GetMetadataMemory(CLRDATA_ADDRESS address, ULONG32 bufferSize, BYTE* buffer)
{
// Populate the metadata memory region map
if (!g_metadataRegionsPopulated)
{
g_metadataRegionsPopulated = true;
PopulateMetadataRegions();
}
// Check if the memory address is in a metadata memory region
MemoryRegion region(address, address + bufferSize, 0);
const auto& found = g_metadataRegions.find(region);
if (found != g_metadataRegions.end() && found->Contains(region)) {
return const_cast<MemoryRegion&>(*found).ReadMetadata(address, bufferSize, buffer);
}
return E_ACCESSDENIED;
}
#endif // FEATURE_PAL
| 1 | 14,148 | Is there any way to not hardcode this given we know the size in bytes? The public API has the flexibility of being a Span. Should we maybe not do anything printing DML? The runtime allocates the buffer, but it's a "scratch memory" area, The delegate gets it passed in and they decide how to use it. Also, how is DML used if the feature is for macOS support? | dotnet-diagnostics | cpp |
@@ -64,6 +64,13 @@ nebula::cpp2::ErrorCode ScanEdgeProcessor::checkAndBuildContexts(const cpp2::Sca
std::vector<cpp2::EdgeProp> returnProps = {*req.return_columns_ref()};
ret = handleEdgeProps(returnProps);
buildEdgeColName(returnProps);
+ ret = buildFilter(req, [](const cpp2::ScanEdgeRequest& r) -> const std::string* {
+ if (r.filter_ref().has_value()) {
+ return r.get_filter();
+ } else {
+ return nullptr;
+ }
+ });
return ret;
}
| 1 | /* Copyright (c) 2020 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#include "storage/query/ScanEdgeProcessor.h"
#include "common/utils/NebulaKeyUtils.h"
#include "storage/StorageFlags.h"
#include "storage/exec/QueryUtils.h"
namespace nebula {
namespace storage {
ProcessorCounters kScanEdgeCounters;
void ScanEdgeProcessor::process(const cpp2::ScanEdgeRequest& req) {
if (executor_ != nullptr) {
executor_->add([req, this]() { this->doProcess(req); });
} else {
doProcess(req);
}
}
void ScanEdgeProcessor::doProcess(const cpp2::ScanEdgeRequest& req) {
spaceId_ = req.get_space_id();
enableReadFollower_ = req.get_enable_read_from_follower();
limit_ = req.get_limit();
auto retCode = getSpaceVidLen(spaceId_);
if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) {
for (auto& p : req.get_parts()) {
pushResultCode(retCode, p.first);
}
onFinished();
return;
}
this->planContext_ = std::make_unique<PlanContext>(
this->env_, spaceId_, this->spaceVidLen_, this->isIntId_, req.common_ref());
retCode = checkAndBuildContexts(req);
if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) {
for (auto& p : req.get_parts()) {
pushResultCode(retCode, p.first);
}
onFinished();
return;
}
if (!FLAGS_query_concurrently) {
runInSingleThread(req);
} else {
runInMultipleThread(req);
}
}
nebula::cpp2::ErrorCode ScanEdgeProcessor::checkAndBuildContexts(const cpp2::ScanEdgeRequest& req) {
auto ret = getSpaceEdgeSchema();
if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) {
return ret;
}
std::vector<cpp2::EdgeProp> returnProps = {*req.return_columns_ref()};
ret = handleEdgeProps(returnProps);
buildEdgeColName(returnProps);
return ret;
}
void ScanEdgeProcessor::buildEdgeColName(const std::vector<cpp2::EdgeProp>& edgeProps) {
for (const auto& edgeProp : edgeProps) {
auto edgeType = edgeProp.get_type();
auto edgeName = edgeContext_.edgeNames_[edgeType];
for (const auto& prop : *edgeProp.props_ref()) {
resultDataSet_.colNames.emplace_back(edgeName + "." + prop);
}
}
}
void ScanEdgeProcessor::onProcessFinished() {
resp_.set_edge_data(std::move(resultDataSet_));
resp_.set_cursors(std::move(cursors_));
}
StoragePlan<Cursor> ScanEdgeProcessor::buildPlan(
RuntimeContext* context,
nebula::DataSet* result,
std::unordered_map<PartitionID, cpp2::ScanCursor>* cursors) {
StoragePlan<Cursor> plan;
std::vector<std::unique_ptr<FetchEdgeNode>> edges;
for (const auto& ec : edgeContext_.propContexts_) {
edges.emplace_back(
std::make_unique<FetchEdgeNode>(context, &edgeContext_, ec.first, &ec.second));
}
auto output = std::make_unique<ScanEdgePropNode>(
context, std::move(edges), enableReadFollower_, limit_, cursors, result);
plan.addNode(std::move(output));
return plan;
}
folly::Future<std::pair<nebula::cpp2::ErrorCode, PartitionID>> ScanEdgeProcessor::runInExecutor(
RuntimeContext* context,
nebula::DataSet* result,
std::unordered_map<PartitionID, cpp2::ScanCursor>* cursors,
PartitionID partId,
Cursor cursor) {
return folly::via(executor_,
[this, context, result, cursors, partId, input = std::move(cursor)]() {
auto plan = buildPlan(context, result, cursors);
auto ret = plan.go(partId, input);
if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) {
return std::make_pair(ret, partId);
}
return std::make_pair(nebula::cpp2::ErrorCode::SUCCEEDED, partId);
});
}
void ScanEdgeProcessor::runInSingleThread(const cpp2::ScanEdgeRequest& req) {
contexts_.emplace_back(RuntimeContext(planContext_.get()));
std::unordered_set<PartitionID> failedParts;
auto plan = buildPlan(&contexts_.front(), &resultDataSet_, &cursors_);
for (const auto& partEntry : req.get_parts()) {
auto partId = partEntry.first;
auto cursor = partEntry.second;
auto ret = plan.go(partId, cursor.get_has_next() ? *cursor.get_next_cursor() : "");
if (ret != nebula::cpp2::ErrorCode::SUCCEEDED &&
failedParts.find(partId) == failedParts.end()) {
failedParts.emplace(partId);
handleErrorCode(ret, spaceId_, partId);
}
}
onProcessFinished();
onFinished();
}
void ScanEdgeProcessor::runInMultipleThread(const cpp2::ScanEdgeRequest& req) {
cursorsOfPart_.resize(req.get_parts().size());
for (size_t i = 0; i < req.get_parts().size(); i++) {
nebula::DataSet result = resultDataSet_;
results_.emplace_back(std::move(result));
contexts_.emplace_back(RuntimeContext(planContext_.get()));
}
size_t i = 0;
std::vector<folly::Future<std::pair<nebula::cpp2::ErrorCode, PartitionID>>> futures;
for (const auto& [partId, cursor] : req.get_parts()) {
futures.emplace_back(runInExecutor(&contexts_[i],
&results_[i],
&cursorsOfPart_[i],
partId,
cursor.get_has_next() ? *cursor.get_next_cursor() : ""));
i++;
}
folly::collectAll(futures).via(executor_).thenTry([this](auto&& t) mutable {
CHECK(!t.hasException());
const auto& tries = t.value();
for (size_t j = 0; j < tries.size(); j++) {
CHECK(!tries[j].hasException());
const auto& [code, partId] = tries[j].value();
if (code != nebula::cpp2::ErrorCode::SUCCEEDED) {
handleErrorCode(code, spaceId_, partId);
} else {
resultDataSet_.append(std::move(results_[j]));
cursors_.merge(std::move(cursorsOfPart_[j]));
}
}
this->onProcessFinished();
this->onFinished();
});
}
} // namespace storage
} // namespace nebula
| 1 | 32,124 | How about just override the `buildFilter`? Passing another function here is a little weird. | vesoft-inc-nebula | cpp |
@@ -71,8 +71,9 @@ var (
ignoredPackages = app.Flag("ignored-packages", "Space separated list of specs ignoring rebuilds if their dependencies have been updated. Will still build if all of the spec's RPMs have not been built.").String()
- pkgsToBuild = app.Flag("packages", "Space separated list of top-level packages that should be built. Omit this argument to build all packages.").String()
- pkgsToRebuild = app.Flag("rebuild-packages", "Space separated list of base package names packages that should be rebuilt.").String()
+ pkgsToBuild = app.Flag("packages", "Space separated list of top-level packages that should be built. Omit this argument to build all packages.").String()
+ pkgsToRebuild = app.Flag("rebuild-packages", "Space separated list of base package names packages that should be rebuilt.").String()
+ rpmHydratedTest = app.Flag("rpm-hydrated-test", "Is this just a TestRPM build with all RPMs hydrated?").String()
logFile = exe.LogFileFlag(app)
logLevel = exe.LogLevelFlag(app) | 1 | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
package main
import (
"fmt"
"os"
"os/signal"
"runtime"
"sync"
"github.com/juliangruber/go-intersect"
"golang.org/x/sys/unix"
"gopkg.in/alecthomas/kingpin.v2"
"microsoft.com/pkggen/internal/exe"
"microsoft.com/pkggen/internal/logger"
"microsoft.com/pkggen/internal/pkggraph"
"microsoft.com/pkggen/internal/pkgjson"
"microsoft.com/pkggen/internal/shell"
"microsoft.com/pkggen/scheduler/buildagents"
"microsoft.com/pkggen/scheduler/schedulerutils"
)
const (
// default worker count to 0 to automatically scale with the number of logical CPUs.
defaultWorkerCount = "0"
defaultBuildAttempts = "1"
)
// schedulerChannels represents the communication channels used by a build agent.
// Unlike BuildChannels, schedulerChannels holds bidirectional channels that
// only the top-level scheduler should have. BuildChannels contains directional channels.
type schedulerChannels struct {
Requests chan *schedulerutils.BuildRequest
Results chan *schedulerutils.BuildResult
Cancel chan struct{}
}
var (
app = kingpin.New("scheduler", "A tool to schedule package builds from a dependency graph.")
inputGraphFile = exe.InputFlag(app, "Path to the DOT graph file to build.")
outputGraphFile = exe.OutputFlag(app, "Path to save the built DOT graph file.")
workDir = app.Flag("work-dir", "The directory to create the build folder").Required().String()
workerTar = app.Flag("worker-tar", "Full path to worker_chroot.tar.gz").Required().ExistingFile()
repoFile = app.Flag("repo-file", "Full path to local.repo").Required().ExistingFile()
rpmDir = app.Flag("rpm-dir", "The directory to use as the local repo and to submit RPM packages to").Required().ExistingDir()
srpmDir = app.Flag("srpm-dir", "The output directory for source RPM packages").Required().String()
cacheDir = app.Flag("cache-dir", "The cache directory containing downloaded dependency RPMS from Mariner Base").Required().ExistingDir()
buildLogsDir = app.Flag("build-logs-dir", "Directory to store package build logs").Required().ExistingDir()
imageConfig = app.Flag("image-config-file", "Optional image config file to extract a package list from.").String()
baseDirPath = app.Flag("base-dir", "Base directory for relative file paths from the config. Defaults to config's directory.").ExistingDir()
distTag = app.Flag("dist-tag", "The distribution tag SRPMs will be built with.").Required().String()
distroReleaseVersion = app.Flag("distro-release-version", "The distro release version that the SRPM will be built with.").Required().String()
distroBuildNumber = app.Flag("distro-build-number", "The distro build number that the SRPM will be built with.").Required().String()
rpmmacrosFile = app.Flag("rpmmacros-file", "Optional file path to an rpmmacros file for rpmbuild to use.").ExistingFile()
buildAttempts = app.Flag("build-attempts", "Sets the number of times to try building a package.").Default(defaultBuildAttempts).Int()
runCheck = app.Flag("run-check", "Run the check during package builds.").Bool()
noCleanup = app.Flag("no-cleanup", "Whether or not to delete the chroot folder after the build is done").Bool()
noCache = app.Flag("no-cache", "Disables using prebuilt cached packages.").Bool()
stopOnFailure = app.Flag("stop-on-failure", "Stop on failed build").Bool()
validBuildAgentFlags = []string{buildagents.TestAgentFlag, buildagents.ChrootAgentFlag}
buildAgent = app.Flag("build-agent", "Type of build agent to build packages with.").PlaceHolder(exe.PlaceHolderize(validBuildAgentFlags)).Required().Enum(validBuildAgentFlags...)
buildAgentProgram = app.Flag("build-agent-program", "Path to the build agent that will be invoked to build packages.").String()
workers = app.Flag("workers", "Number of concurrent build agents to spawn. If set to 0, will automatically set to the logical CPU count.").Default(defaultWorkerCount).Int()
ignoredPackages = app.Flag("ignored-packages", "Space separated list of specs ignoring rebuilds if their dependencies have been updated. Will still build if all of the spec's RPMs have not been built.").String()
pkgsToBuild = app.Flag("packages", "Space separated list of top-level packages that should be built. Omit this argument to build all packages.").String()
pkgsToRebuild = app.Flag("rebuild-packages", "Space separated list of base package names packages that should be rebuilt.").String()
logFile = exe.LogFileFlag(app)
logLevel = exe.LogLevelFlag(app)
)
func main() {
app.Version(exe.ToolkitVersion)
kingpin.MustParse(app.Parse(os.Args[1:]))
logger.InitBestEffort(*logFile, *logLevel)
if *workers <= 0 {
*workers = runtime.NumCPU()
logger.Log.Debugf("No worker count supplied, discovered %d logical CPUs.", *workers)
}
if *buildAttempts <= 0 {
logger.Log.Fatalf("Value in --build-attempts must be greater than zero. Found %d", *buildAttempts)
}
ignoredPackages := exe.ParseListArgument(*ignoredPackages)
// Generate the list of packages that need to be built.
// If none are requested then all packages will be built.
packagesNamesToBuild := exe.ParseListArgument(*pkgsToBuild)
packagesNamesToRebuild := exe.ParseListArgument(*pkgsToRebuild)
ignoredAndRebuiltPackages := intersect.Hash(ignoredPackages, packagesNamesToRebuild)
if len(ignoredAndRebuiltPackages) != 0 {
logger.Log.Fatalf("Can't ignore and force a rebuild of a package at the same time. Abusing packages: %v", ignoredAndRebuiltPackages)
}
packageVersToBuild, err := schedulerutils.CalculatePackagesToBuild(packagesNamesToBuild, packagesNamesToRebuild, *imageConfig, *baseDirPath)
if err != nil {
logger.Log.Fatalf("Unable to generate package build list, error: %s", err)
}
// Setup a build agent to handle build requests from the scheduler.
buildAgentConfig := &buildagents.BuildAgentConfig{
Program: *buildAgentProgram,
CacheDir: *cacheDir,
RepoFile: *repoFile,
RpmDir: *rpmDir,
SrpmDir: *srpmDir,
WorkDir: *workDir,
WorkerTar: *workerTar,
DistTag: *distTag,
DistroReleaseVersion: *distroReleaseVersion,
DistroBuildNumber: *distroBuildNumber,
RpmmacrosFile: *rpmmacrosFile,
NoCleanup: *noCleanup,
RunCheck: *runCheck,
LogDir: *buildLogsDir,
LogLevel: *logLevel,
}
agent, err := buildagents.BuildAgentFactory(*buildAgent)
if err != nil {
logger.Log.Fatalf("Unable to select build agent, error: %s", err)
}
err = agent.Initialize(buildAgentConfig)
if err != nil {
logger.Log.Fatalf("Unable to initialize build agent, error: %s", err)
}
// Setup cleanup routines to ensure no builds are left running when scheduler is exiting.
// Ensure no outstanding agents are running on graceful exit
defer cancelOutstandingBuilds(agent)
// On a SIGINT or SIGTERM stop all agents.
signals := make(chan os.Signal, 1)
signal.Notify(signals, unix.SIGINT, unix.SIGTERM)
go cancelBuildsOnSignal(signals, agent)
err = buildGraph(*inputGraphFile, *outputGraphFile, agent, *workers, *buildAttempts, *stopOnFailure, !*noCache, packageVersToBuild, packagesNamesToRebuild, ignoredPackages)
if err != nil {
logger.Log.Fatalf("Unable to build package graph.\nFor details see the build summary section above.\nError: %s", err)
}
}
// cancelOutstandingBuilds stops any builds that are currently running.
func cancelOutstandingBuilds(agent buildagents.BuildAgent) {
err := agent.Close()
if err != nil {
logger.Log.Errorf("Unable to close build agent, error: %s", err)
}
// Issue a SIGINT to all children processes to allow them to gracefully exit.
shell.PermanentlyStopAllProcesses(unix.SIGINT)
}
// cancelBuildsOnSignal will stop any builds running on SIGINT/SIGTERM.
func cancelBuildsOnSignal(signals chan os.Signal, agent buildagents.BuildAgent) {
sig := <-signals
logger.Log.Error(sig)
cancelOutstandingBuilds(agent)
os.Exit(1)
}
// buildGraph builds all packages in the dependency graph requested.
// It will save the resulting graph to outputFile.
func buildGraph(inputFile, outputFile string, agent buildagents.BuildAgent, workers, buildAttempts int, stopOnFailure, canUseCache bool, packagesToBuild []*pkgjson.PackageVer, packagesNamesToRebuild, ignoredPackages []string) (err error) {
// graphMutex guards pkgGraph from concurrent reads and writes during build.
var graphMutex sync.RWMutex
isGraphOptimized, pkgGraph, goalNode, err := schedulerutils.InitializeGraph(inputFile, packagesToBuild)
if err != nil {
return
}
// Setup and start the worker pool and scheduler routine.
numberOfNodes := pkgGraph.Nodes().Len()
channels := startWorkerPool(agent, workers, buildAttempts, numberOfNodes, &graphMutex, ignoredPackages)
logger.Log.Infof("Building %d nodes with %d workers", numberOfNodes, workers)
// After this call pkgGraph will be given to multiple routines and accessing it requires acquiring the mutex.
builtGraph, err := buildAllNodes(stopOnFailure, isGraphOptimized, canUseCache, packagesNamesToRebuild, pkgGraph, &graphMutex, goalNode, channels)
if builtGraph != nil {
graphMutex.RLock()
defer graphMutex.RUnlock()
saveErr := pkggraph.WriteDOTGraphFile(builtGraph, outputFile)
if saveErr != nil {
logger.Log.Errorf("Failed to save built graph, error: %s", saveErr)
}
}
return
}
// startWorkerPool starts the worker pool and returns the communication channels between the workers and the scheduler.
// channelBufferSize controls how many entries in the channels can be buffered before blocking writes to them.
func startWorkerPool(agent buildagents.BuildAgent, workers, buildAttempts, channelBufferSize int, graphMutex *sync.RWMutex, ignoredPackages []string) (channels *schedulerChannels) {
channels = &schedulerChannels{
Requests: make(chan *schedulerutils.BuildRequest, channelBufferSize),
Results: make(chan *schedulerutils.BuildResult, channelBufferSize),
Cancel: make(chan struct{}),
}
// Downcast the bidirectional scheduler channels into directional channels for the build workers.
directionalChannels := &schedulerutils.BuildChannels{
Requests: channels.Requests,
Results: channels.Results,
Cancel: channels.Cancel,
}
// Start the workers now so they begin working as soon as a new job is queued.
for i := 0; i < workers; i++ {
logger.Log.Debugf("Starting worker #%d", i)
go schedulerutils.BuildNodeWorker(directionalChannels, agent, graphMutex, buildAttempts, ignoredPackages)
}
return
}
// buildAllNodes will build all nodes in a given dependency graph.
// This routine only contains control flow logic for build scheduling.
// It iteratively:
// - Calculates any unblocked nodes.
// - Submits these nodes to the worker pool to be processed.
// - Grabs a single build result from the worker pool.
// - Attempts to satisfy any unresolved dynamic dependencies with new implicit provides from the build result.
// - Attempts to subgraph the graph to only contain the requested packages if possible.
// - Repeat.
func buildAllNodes(stopOnFailure, isGraphOptimized, canUseCache bool, packagesNamesToRebuild []string, pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, goalNode *pkggraph.PkgNode, channels *schedulerChannels) (builtGraph *pkggraph.PkgGraph, err error) {
var (
// stopBuilding tracks if the build has entered a failed state and this routine should stop as soon as possible.
stopBuilding bool
// useCachedImplicit tracks if cached implicit provides can be used to satisfy unresolved dynamic dependencies.
// Local packages are preferred over cached remotes ones to satisfy these unresolved dependencies, however
// the scheduler does not know what packages provide which implicit provides until the packages have been built.
// Therefore the scheduler will attempt to build all possible packages without consuming any cached dynamic dependencies first.
useCachedImplicit bool
)
// Start the build at the leaf nodes.
// The build will bubble up through the graph as it processes nodes.
buildState := schedulerutils.NewGraphBuildState()
nodesToBuild := schedulerutils.LeafNodes(pkgGraph, graphMutex, goalNode, buildState, useCachedImplicit)
for {
logger.Log.Debugf("Found %d unblocked nodes", len(nodesToBuild))
// Each node that is ready to build must be converted into a build request and submitted to the worker pool.
newRequests := schedulerutils.ConvertNodesToRequests(pkgGraph, graphMutex, nodesToBuild, packagesNamesToRebuild, buildState, canUseCache)
for _, req := range newRequests {
buildState.RecordBuildRequest(req)
channels.Requests <- req
}
nodesToBuild = nil
// If there are no active builds running try enabling cached packages for unresolved dynamic dependencies to unblocked more nodes.
// Otherwise there is nothing left that can be built.
if len(buildState.ActiveBuilds()) == 0 {
if useCachedImplicit {
err = fmt.Errorf("could not build all packages")
break
} else {
logger.Log.Warn("Enabling cached packages to satisfy unresolved dynamic dependencies.")
useCachedImplicit = true
nodesToBuild = schedulerutils.LeafNodes(pkgGraph, graphMutex, goalNode, buildState, useCachedImplicit)
continue
}
}
// Process the the next build result
res := <-channels.Results
schedulerutils.PrintBuildResult(res)
buildState.RecordBuildResult(res)
if !stopBuilding {
if res.Err == nil {
// If the graph has already been optimized and is now solvable without any additional information
// then skip processing any new implicit provides.
if !isGraphOptimized {
var (
didOptimize bool
newGraph *pkggraph.PkgGraph
newGoalNode *pkggraph.PkgNode
)
didOptimize, newGraph, newGoalNode, err = updateGraphWithImplicitProvides(res, pkgGraph, graphMutex, useCachedImplicit)
if err != nil {
// Failures to manipulate the graph are fatal.
// There is no guarantee the graph is still a directed acyclic graph and is solvable.
stopBuilding = true
stopBuild(channels, buildState)
} else if didOptimize {
isGraphOptimized = true
// Replace the graph and goal node pointers.
// Any outstanding builds of nodes that are no longer in the graph will gracefully handle this.
// When querying their edges, the graph library will return an empty iterator (graph.Empty).
pkgGraph = newGraph
goalNode = newGoalNode
}
}
nodesToBuild = schedulerutils.FindUnblockedNodesFromResult(res, pkgGraph, graphMutex, buildState)
} else if stopOnFailure {
stopBuilding = true
err = res.Err
stopBuild(channels, buildState)
}
}
// If the goal node is available, mark the build as stopping.
// There may still be outstanding builds if the graph was recently subgraphed
// due to an unresolved implicit provide being satisfied and nodes that are no
// longer in the graph are building.
if buildState.IsNodeAvailable(goalNode) {
logger.Log.Infof("All packages built")
stopBuilding = true
}
activeSRPMs := buildState.ActiveSRPMs()
activeSRPMsCount := len(activeSRPMs)
if stopBuilding {
if activeSRPMsCount == 0 {
break
}
}
if res.Node.Type == pkggraph.TypeBuild {
logger.Log.Infof("%d currently active build(s): %v.", activeSRPMsCount, activeSRPMs)
}
}
builtGraph = pkgGraph
schedulerutils.PrintBuildSummary(builtGraph, graphMutex, buildState)
return
}
// updateGraphWithImplicitProvides will update the graph with new implicit provides if available.
// It will also attempt to subgraph the graph if it becomes solvable with the new implicit provides.
func updateGraphWithImplicitProvides(res *schedulerutils.BuildResult, pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, useCachedImplicit bool) (didOptimize bool, newGraph *pkggraph.PkgGraph, newGoalNode *pkggraph.PkgNode, err error) {
// acquire a writer lock since this routine will collapse nodes
graphMutex.Lock()
defer graphMutex.Unlock()
didInjectAny, err := schedulerutils.InjectMissingImplicitProvides(res, pkgGraph, useCachedImplicit)
if err != nil {
logger.Log.Errorf("Failed to add implicit provides for (%s). Error: %s", res.Node.FriendlyName(), err)
} else if didInjectAny {
// Failure to optimize the graph is non fatal as there may simply be unresolved dynamic dependencies
var subgraphErr error
newGraph, newGoalNode, subgraphErr = schedulerutils.OptimizeGraph(pkgGraph, useCachedImplicit)
if subgraphErr == nil {
logger.Log.Infof("Created solvable subgraph with new implicit provide information")
didOptimize = true
}
}
return
}
// stopBuild will stop all future builds from being scheduled by sending a cancellation signal
// to the worker pool and draining any outstanding build requests.
func stopBuild(channels *schedulerChannels, buildState *schedulerutils.GraphBuildState) {
logger.Log.Error("Stopping build")
// Close the cancel channel to prevent and buffered requests from being built.
// Upon seeing the cancel channel is closed, the build worker will stop instead
// of processing a new request.
close(channels.Cancel)
// For any workers that are current parked with no buffered requests, close the
// requests channel to wake up any build workers waiting on a request to be buffered.
// Upon being woken up by a closed requests channel, the build worker will stop.
close(channels.Requests)
// Drain the request buffer to sync the build state with the new number of outstanding builds.
for req := range channels.Requests {
buildState.RemoveBuildRequest(req)
}
}
| 1 | 16,114 | This should just be a `.Bool()` flag I think, we don't encode anything beyond y/n here. See `$(RUN_CHECK)` and `$(STOP_ON_PKG_FAIL)` for examples of how to pass those in. | microsoft-CBL-Mariner | go |
@@ -2042,6 +2042,7 @@ bool SwiftLanguageRuntime::GetDynamicTypeAndAddress_Promise(
address.SetLoadAddress(val_ptr_addr, &m_process->GetTarget());
return true;
} break;
+ case swift::MetadataKind::Function:
case swift::MetadataKind::Optional:
case swift::MetadataKind::Struct:
case swift::MetadataKind::Tuple: { | 1 | //===-- SwiftLanguageRuntime.cpp --------------------------------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "lldb/Target/SwiftLanguageRuntime.h"
#include <string.h>
#include "llvm/Support/raw_ostream.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "swift/ABI/MetadataValues.h"
#include "swift/ABI/System.h"
#include "swift/AST/ASTContext.h"
#include "swift/AST/ASTMangler.h"
#include "swift/AST/Decl.h"
#include "swift/AST/Module.h"
#include "swift/AST/Types.h"
#include "swift/Demangling/Demangle.h"
#include "swift/Demangling/Demangler.h"
#include "swift/Reflection/ReflectionContext.h"
#include "swift/Reflection/TypeRefBuilder.h"
#include "swift/Remote/MemoryReader.h"
#include "swift/Remote/RemoteAddress.h"
#include "swift/RemoteAST/RemoteAST.h"
#include "swift/Runtime/Metadata.h"
#include "lldb/Breakpoint/StoppointCallbackContext.h"
#include "lldb/Core/Debugger.h"
#include "lldb/Core/Mangled.h"
#include "lldb/Core/Module.h"
#include "lldb/Core/PluginManager.h"
#include "lldb/Core/Section.h"
#include "lldb/Core/UniqueCStringMap.h"
#include "lldb/Core/Value.h"
#include "lldb/Core/ValueObjectConstResult.h"
#include "lldb/DataFormatters/StringPrinter.h"
#include "lldb/DataFormatters/TypeSynthetic.h"
#include "lldb/DataFormatters/ValueObjectPrinter.h"
#include "lldb/Host/HostInfo.h"
#include "lldb/Host/OptionParser.h"
#include "lldb/Interpreter/CommandInterpreter.h"
#include "lldb/Interpreter/CommandObject.h"
#include "lldb/Interpreter/CommandObjectMultiword.h"
#include "lldb/Interpreter/CommandReturnObject.h"
#include "lldb/Interpreter/OptionValueBoolean.h"
#include "lldb/Symbol/ClangASTContext.h"
#include "lldb/Symbol/CompileUnit.h"
#include "lldb/Symbol/ObjectFile.h"
#include "lldb/Symbol/SwiftASTContext.h"
#include "lldb/Symbol/Symbol.h"
#include "lldb/Symbol/TypeList.h"
#include "lldb/Symbol/VariableList.h"
#include "lldb/Target/ExecutionContext.h"
#include "lldb/Target/ProcessStructReader.h"
#include "lldb/Target/RegisterContext.h"
#include "lldb/Target/StackFrame.h"
#include "lldb/Target/Target.h"
#include "lldb/Target/ThreadPlanRunToAddress.h"
#include "lldb/Target/ThreadPlanStepInRange.h"
#include "lldb/Target/ThreadPlanStepOverRange.h"
#include "lldb/Utility/Status.h"
#include "lldb/Utility/CleanUp.h"
#include "lldb/Utility/DataBuffer.h"
#include "lldb/Utility/LLDBAssert.h"
#include "lldb/Utility/Log.h"
#include "lldb/Utility/StringLexer.h"
// FIXME: we should not need this
#include "Plugins/Language/Swift/SwiftFormatters.h"
using namespace lldb;
using namespace lldb_private;
namespace lldb_private {
swift::Type GetSwiftType(void *opaque_ptr) {
return reinterpret_cast<swift::TypeBase *>(opaque_ptr);
}
swift::CanType GetCanonicalSwiftType(void *opaque_ptr) {
return reinterpret_cast<swift::TypeBase *>(opaque_ptr)->getCanonicalType();
}
swift::CanType GetCanonicalSwiftType(const CompilerType &type) {
return GetCanonicalSwiftType(
reinterpret_cast<void *>(type.GetOpaqueQualType()));
}
swift::Type GetSwiftType(const CompilerType &type) {
return GetSwiftType(reinterpret_cast<void *>(type.GetOpaqueQualType()));
}
} // namespace lldb_private
SwiftLanguageRuntime::~SwiftLanguageRuntime() = default;
static bool HasReflectionInfo(ObjectFile *obj_file) {
auto findSectionInObject = [&](std::string name) {
ConstString section_name(name);
SectionSP section_sp =
obj_file->GetSectionList()->FindSectionByName(section_name);
if (section_sp)
return true;
return false;
};
bool hasReflectionSection = false;
hasReflectionSection |= findSectionInObject("__swift5_fieldmd");
hasReflectionSection |= findSectionInObject("__swift5_assocty");
hasReflectionSection |= findSectionInObject("__swift5_builtin");
hasReflectionSection |= findSectionInObject("__swift5_capture");
hasReflectionSection |= findSectionInObject("__swift5_typeref");
hasReflectionSection |= findSectionInObject("__swift5_reflstr");
return hasReflectionSection;
}
void SwiftLanguageRuntime::SetupReflection() {
reflection_ctx.reset(new NativeReflectionContext(this->GetMemoryReader()));
auto &target = m_process->GetTarget();
auto M = target.GetExecutableModule();
auto *obj_file = M->GetObjectFile();
if (!obj_file)
return;
Address start_address = obj_file->GetHeaderAddress();
auto load_ptr = static_cast<uintptr_t>(start_address.GetLoadAddress(&target));
// Bail out if we can't read the executable instead of crashing.
if (load_ptr == 0 || load_ptr == LLDB_INVALID_ADDRESS)
return;
reflection_ctx.reset(new NativeReflectionContext(this->GetMemoryReader()));
reflection_ctx->addImage(swift::remote::RemoteAddress(load_ptr));
auto module_list = GetTargetRef().GetImages();
module_list.ForEach([&](const ModuleSP &module_sp) -> bool {
auto *obj_file = module_sp->GetObjectFile();
if (!obj_file)
return false;
Address start_address = obj_file->GetHeaderAddress();
auto load_ptr = static_cast<uintptr_t>(
start_address.GetLoadAddress(&(m_process->GetTarget())));
if (load_ptr == 0 || load_ptr == LLDB_INVALID_ADDRESS)
return false;
if (HasReflectionInfo(obj_file))
reflection_ctx->addImage(swift::remote::RemoteAddress(load_ptr));
return true;
});
}
SwiftLanguageRuntime::SwiftLanguageRuntime(Process *process)
: LanguageRuntime(process), m_negative_cache_mutex(),
m_SwiftNativeNSErrorISA(), m_memory_reader_sp(), m_promises_map(),
m_bridged_synthetics_map(), m_box_metadata_type() {
SetupSwiftError();
SetupExclusivity();
SetupReflection();
}
static llvm::Optional<lldb::addr_t>
FindSymbolForSwiftObject(Target &target, const ConstString &object,
const SymbolType sym_type) {
llvm::Optional<lldb::addr_t> retval;
SymbolContextList sc_list;
if (target.GetImages().FindSymbolsWithNameAndType(object, sym_type,
sc_list)) {
SymbolContext SwiftObject_Class;
if (sc_list.GetSize() == 1 &&
sc_list.GetContextAtIndex(0, SwiftObject_Class)) {
if (SwiftObject_Class.symbol) {
lldb::addr_t SwiftObject_class_addr =
SwiftObject_Class.symbol->GetAddress().GetLoadAddress(&target);
if (SwiftObject_class_addr &&
SwiftObject_class_addr != LLDB_INVALID_ADDRESS)
retval = SwiftObject_class_addr;
}
}
}
return retval;
}
AppleObjCRuntimeV2 *SwiftLanguageRuntime::GetObjCRuntime() {
if (auto objc_runtime = GetProcess()->GetObjCLanguageRuntime()) {
if (objc_runtime->GetPluginName() ==
AppleObjCRuntimeV2::GetPluginNameStatic())
return (AppleObjCRuntimeV2 *)objc_runtime;
}
return nullptr;
}
void SwiftLanguageRuntime::SetupSwiftError() {
Target &target(m_process->GetTarget());
if (m_SwiftNativeNSErrorISA.hasValue())
return;
ConstString g_SwiftNativeNSError("__SwiftNativeNSError");
m_SwiftNativeNSErrorISA = FindSymbolForSwiftObject(
target, g_SwiftNativeNSError, eSymbolTypeObjCClass);
}
void SwiftLanguageRuntime::SetupExclusivity() {
Target &target(m_process->GetTarget());
ConstString g_disableExclusivityChecking("_swift_disableExclusivityChecking");
m_dynamic_exclusivity_flag_addr = FindSymbolForSwiftObject(
target, g_disableExclusivityChecking, eSymbolTypeData);
Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_EXPRESSIONS));
if (log)
log->Printf("SwiftLanguageRuntime: _swift_disableExclusivityChecking = %llu",
m_dynamic_exclusivity_flag_addr ?
*m_dynamic_exclusivity_flag_addr : 0);
}
void SwiftLanguageRuntime::ModulesDidLoad(const ModuleList &module_list) {
module_list.ForEach([&](const ModuleSP &module_sp) -> bool {
auto *obj_file = module_sp->GetObjectFile();
if (!obj_file)
return true;
Address start_address = obj_file->GetHeaderAddress();
auto load_ptr = static_cast<uintptr_t>(
start_address.GetLoadAddress(&(m_process->GetTarget())));
if (load_ptr == 0 || load_ptr == LLDB_INVALID_ADDRESS)
return false;
if (!reflection_ctx)
return false;
if (HasReflectionInfo(obj_file))
reflection_ctx->addImage(swift::remote::RemoteAddress(load_ptr));
return true;
});
}
static bool GetObjectDescription_ResultVariable(Process *process, Stream &str,
ValueObject &object) {
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_DATAFORMATTERS));
StreamString expr_string;
expr_string.Printf("Swift._DebuggerSupport.stringForPrintObject(%s)",
object.GetName().GetCString());
if (log)
log->Printf("[GetObjectDescription_ResultVariable] expression: %s",
expr_string.GetData());
ValueObjectSP result_sp;
EvaluateExpressionOptions eval_options;
eval_options.SetLanguage(lldb::eLanguageTypeSwift);
eval_options.SetResultIsInternal(true);
eval_options.SetGenerateDebugInfo(true);
auto eval_result = process->GetTarget().EvaluateExpression(
expr_string.GetData(),
process->GetThreadList().GetSelectedThread()->GetSelectedFrame().get(),
result_sp, eval_options);
if (log) {
switch (eval_result) {
case eExpressionCompleted:
log->Printf("[GetObjectDescription_ResultVariable] eExpressionCompleted");
break;
case eExpressionSetupError:
log->Printf(
"[GetObjectDescription_ResultVariable] eExpressionSetupError");
break;
case eExpressionParseError:
log->Printf(
"[GetObjectDescription_ResultVariable] eExpressionParseError");
break;
case eExpressionDiscarded:
log->Printf("[GetObjectDescription_ResultVariable] eExpressionDiscarded");
break;
case eExpressionInterrupted:
log->Printf(
"[GetObjectDescription_ResultVariable] eExpressionInterrupted");
break;
case eExpressionHitBreakpoint:
log->Printf(
"[GetObjectDescription_ResultVariable] eExpressionHitBreakpoint");
break;
case eExpressionTimedOut:
log->Printf("[GetObjectDescription_ResultVariable] eExpressionTimedOut");
break;
case eExpressionResultUnavailable:
log->Printf(
"[GetObjectDescription_ResultVariable] eExpressionResultUnavailable");
break;
case eExpressionStoppedForDebug:
log->Printf(
"[GetObjectDescription_ResultVariable] eExpressionStoppedForDebug");
break;
}
}
// sanitize the result of the expression before moving forward
if (!result_sp) {
if (log)
log->Printf("[GetObjectDescription_ResultVariable] expression generated "
"no result");
return false;
}
if (result_sp->GetError().Fail()) {
if (log)
log->Printf("[GetObjectDescription_ResultVariable] expression generated "
"error: %s",
result_sp->GetError().AsCString());
return false;
}
if (false == result_sp->GetCompilerType().IsValid()) {
if (log)
log->Printf("[GetObjectDescription_ResultVariable] expression generated "
"invalid type");
return false;
}
lldb_private::formatters::StringPrinter::ReadStringAndDumpToStreamOptions
dump_options;
dump_options.SetEscapeNonPrintables(false).SetQuote('\0').SetPrefixToken(
nullptr);
if (lldb_private::formatters::swift::String_SummaryProvider(
*result_sp.get(), str, TypeSummaryOptions()
.SetLanguage(lldb::eLanguageTypeSwift)
.SetCapping(eTypeSummaryUncapped),
dump_options)) {
if (log)
log->Printf("[GetObjectDescription_ResultVariable] expression completed "
"successfully");
return true;
} else {
if (log)
log->Printf("[GetObjectDescription_ResultVariable] expression generated "
"invalid string data");
return false;
}
}
static bool GetObjectDescription_ObjectReference(Process *process, Stream &str,
ValueObject &object) {
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_DATAFORMATTERS));
StreamString expr_string;
expr_string.Printf("Swift._DebuggerSupport.stringForPrintObject(Swift."
"unsafeBitCast(0x%" PRIx64 ", to: AnyObject.self))",
object.GetValueAsUnsigned(0));
if (log)
log->Printf("[GetObjectDescription_ObjectReference] expression: %s",
expr_string.GetData());
ValueObjectSP result_sp;
EvaluateExpressionOptions eval_options;
eval_options.SetLanguage(lldb::eLanguageTypeSwift);
eval_options.SetResultIsInternal(true);
eval_options.SetGenerateDebugInfo(true);
auto eval_result = process->GetTarget().EvaluateExpression(
expr_string.GetData(),
process->GetThreadList().GetSelectedThread()->GetSelectedFrame().get(),
result_sp, eval_options);
if (log) {
switch (eval_result) {
case eExpressionCompleted:
log->Printf(
"[GetObjectDescription_ObjectReference] eExpressionCompleted");
break;
case eExpressionSetupError:
log->Printf(
"[GetObjectDescription_ObjectReference] eExpressionSetupError");
break;
case eExpressionParseError:
log->Printf(
"[GetObjectDescription_ObjectReference] eExpressionParseError");
break;
case eExpressionDiscarded:
log->Printf(
"[GetObjectDescription_ObjectReference] eExpressionDiscarded");
break;
case eExpressionInterrupted:
log->Printf(
"[GetObjectDescription_ObjectReference] eExpressionInterrupted");
break;
case eExpressionHitBreakpoint:
log->Printf(
"[GetObjectDescription_ObjectReference] eExpressionHitBreakpoint");
break;
case eExpressionTimedOut:
log->Printf("[GetObjectDescription_ObjectReference] eExpressionTimedOut");
break;
case eExpressionResultUnavailable:
log->Printf("[GetObjectDescription_ObjectReference] "
"eExpressionResultUnavailable");
break;
case eExpressionStoppedForDebug:
log->Printf(
"[GetObjectDescription_ObjectReference] eExpressionStoppedForDebug");
break;
}
}
// sanitize the result of the expression before moving forward
if (!result_sp) {
if (log)
log->Printf("[GetObjectDescription_ObjectReference] expression generated "
"no result");
return false;
}
if (result_sp->GetError().Fail()) {
if (log)
log->Printf("[GetObjectDescription_ObjectReference] expression generated "
"error: %s",
result_sp->GetError().AsCString());
return false;
}
if (false == result_sp->GetCompilerType().IsValid()) {
if (log)
log->Printf("[GetObjectDescription_ObjectReference] expression generated "
"invalid type");
return false;
}
lldb_private::formatters::StringPrinter::ReadStringAndDumpToStreamOptions
dump_options;
dump_options.SetEscapeNonPrintables(false).SetQuote('\0').SetPrefixToken(
nullptr);
if (lldb_private::formatters::swift::String_SummaryProvider(
*result_sp.get(), str, TypeSummaryOptions()
.SetLanguage(lldb::eLanguageTypeSwift)
.SetCapping(eTypeSummaryUncapped),
dump_options)) {
if (log)
log->Printf("[GetObjectDescription_ObjectReference] expression completed "
"successfully");
return true;
} else {
if (log)
log->Printf("[GetObjectDescription_ObjectReference] expression generated "
"invalid string data");
return false;
}
}
static bool GetObjectDescription_ObjectCopy(Process *process, Stream &str,
ValueObject &object) {
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_DATAFORMATTERS));
ValueObjectSP static_sp(object.GetStaticValue());
CompilerType static_type(static_sp->GetCompilerType());
if (auto non_reference_type = static_type.GetNonReferenceType())
static_type = non_reference_type;
Status error;
// If we are in a generic context, here the static type of the object
// might end up being generic (i.e. <T>). We want to make sure that
// we correctly map the type into context before asking questions or
// printing, as IRGen requires a fully realized type to work on.
auto frame_sp =
process->GetThreadList().GetSelectedThread()->GetSelectedFrame();
auto *swift_ast_ctx =
llvm::dyn_cast_or_null<SwiftASTContext>(static_type.GetTypeSystem());
if (swift_ast_ctx)
static_type =
swift_ast_ctx->MapIntoContext(frame_sp,
static_type.GetOpaqueQualType());
lldb::addr_t copy_location = process->AllocateMemory(
static_type.GetByteStride(), ePermissionsReadable | ePermissionsWritable,
error);
if (copy_location == LLDB_INVALID_ADDRESS) {
if (log)
log->Printf("[GetObjectDescription_ObjectCopy] copy_location invalid");
return false;
}
CleanUp cleanup(
[process, copy_location] { process->DeallocateMemory(copy_location); });
DataExtractor data_extractor;
if (0 == static_sp->GetData(data_extractor, error)) {
if (log)
log->Printf("[GetObjectDescription_ObjectCopy] data extraction failed");
return false;
}
if (0 ==
process->WriteMemory(copy_location, data_extractor.GetDataStart(),
data_extractor.GetByteSize(), error)) {
if (log)
log->Printf("[GetObjectDescription_ObjectCopy] memory copy failed");
return false;
}
StreamString expr_string;
expr_string.Printf("Swift._DebuggerSupport.stringForPrintObject(Swift."
"UnsafePointer<%s>(bitPattern: 0x%" PRIx64 ")!.pointee)",
static_type.GetTypeName().GetCString(), copy_location);
if (log)
log->Printf("[GetObjectDescription_ObjectCopy] expression: %s",
expr_string.GetData());
ValueObjectSP result_sp;
EvaluateExpressionOptions eval_options;
eval_options.SetLanguage(lldb::eLanguageTypeSwift);
eval_options.SetResultIsInternal(true);
eval_options.SetGenerateDebugInfo(true);
auto eval_result = process->GetTarget().EvaluateExpression(
expr_string.GetData(),
process->GetThreadList().GetSelectedThread()->GetSelectedFrame().get(),
result_sp, eval_options);
if (log) {
switch (eval_result) {
case eExpressionCompleted:
log->Printf("[GetObjectDescription_ObjectCopy] eExpressionCompleted");
break;
case eExpressionSetupError:
log->Printf("[GetObjectDescription_ObjectCopy] eExpressionSetupError");
break;
case eExpressionParseError:
log->Printf("[GetObjectDescription_ObjectCopy] eExpressionParseError");
break;
case eExpressionDiscarded:
log->Printf("[GetObjectDescription_ObjectCopy] eExpressionDiscarded");
break;
case eExpressionInterrupted:
log->Printf("[GetObjectDescription_ObjectCopy] eExpressionInterrupted");
break;
case eExpressionHitBreakpoint:
log->Printf("[GetObjectDescription_ObjectCopy] eExpressionHitBreakpoint");
break;
case eExpressionTimedOut:
log->Printf("[GetObjectDescription_ObjectCopy] eExpressionTimedOut");
break;
case eExpressionResultUnavailable:
log->Printf(
"[GetObjectDescription_ObjectCopy] eExpressionResultUnavailable");
break;
case eExpressionStoppedForDebug:
log->Printf(
"[GetObjectDescription_ObjectCopy] eExpressionStoppedForDebug");
break;
}
}
// sanitize the result of the expression before moving forward
if (!result_sp) {
if (log)
log->Printf(
"[GetObjectDescription_ObjectCopy] expression generated no result");
str.Printf("expression produced no result");
return true;
}
if (result_sp->GetError().Fail()) {
if (log)
log->Printf(
"[GetObjectDescription_ObjectCopy] expression generated error: %s",
result_sp->GetError().AsCString());
str.Printf("expression produced error: %s",
result_sp->GetError().AsCString());
return true;
}
if (false == result_sp->GetCompilerType().IsValid()) {
if (log)
log->Printf("[GetObjectDescription_ObjectCopy] expression generated "
"invalid type");
str.Printf("expression produced invalid result type");
return true;
}
lldb_private::formatters::StringPrinter::ReadStringAndDumpToStreamOptions
dump_options;
dump_options.SetEscapeNonPrintables(false).SetQuote('\0').SetPrefixToken(
nullptr);
if (lldb_private::formatters::swift::String_SummaryProvider(
*result_sp.get(), str, TypeSummaryOptions()
.SetLanguage(lldb::eLanguageTypeSwift)
.SetCapping(eTypeSummaryUncapped),
dump_options)) {
if (log)
log->Printf("[GetObjectDescription_ObjectCopy] expression completed "
"successfully");
} else {
if (log)
log->Printf("[GetObjectDescription_ObjectCopy] expression generated "
"invalid string data");
str.Printf("expression produced unprintable string");
}
return true;
}
static bool IsSwiftResultVariable(const ConstString &name) {
if (name) {
llvm::StringRef name_sr(name.GetStringRef());
if (name_sr.size() > 2 &&
(name_sr.startswith("$R") || name_sr.startswith("$E")) &&
::isdigit(name_sr[2]))
return true;
}
return false;
}
static bool IsSwiftReferenceType(ValueObject &object) {
CompilerType object_type(object.GetCompilerType());
if (llvm::dyn_cast_or_null<SwiftASTContext>(object_type.GetTypeSystem())) {
Flags type_flags(object_type.GetTypeInfo());
if (type_flags.AllSet(eTypeIsClass | eTypeHasValue |
eTypeInstanceIsPointer))
return true;
}
return false;
}
bool SwiftLanguageRuntime::GetObjectDescription(Stream &str,
ValueObject &object) {
if (object.IsUninitializedReference()) {
str.Printf("<uninitialized>");
return true;
}
if (::IsSwiftResultVariable(object.GetName())) {
// if this thing is a Swift expression result variable, it has two
// properties:
// a) its name is something we can refer to in expressions for free
// b) its type may be something we can't actually talk about in expressions
// so, just use the result variable's name in the expression and be done
// with it
StreamString probe_stream;
if (GetObjectDescription_ResultVariable(m_process, probe_stream, object)) {
str.Printf("%s", probe_stream.GetData());
return true;
}
} else if (::IsSwiftReferenceType(object)) {
// if this is a Swift class, it has two properties:
// a) we do not need its type name, AnyObject is just as good
// b) its value is something we can directly use to refer to it
// so, just use the ValueObject's pointer-value and be done with it
StreamString probe_stream;
if (GetObjectDescription_ObjectReference(m_process, probe_stream, object)) {
str.Printf("%s", probe_stream.GetData());
return true;
}
}
// in general, don't try to use the name of the ValueObject as it might end up
// referring to the wrong thing
return GetObjectDescription_ObjectCopy(m_process, str, object);
}
bool SwiftLanguageRuntime::GetObjectDescription(
Stream &str, Value &value, ExecutionContextScope *exe_scope) {
// This is only interesting to do with a ValueObject for Swift
return false;
}
bool SwiftLanguageRuntime::IsSwiftMangledName(const char *name) {
return swift::Demangle::isSwiftSymbol(name);
}
std::string SwiftLanguageRuntime::DemangleSymbolAsString (const char *symbol,
bool simplified) {
if (simplified) {
swift::Demangle::DemangleOptions options(swift::Demangle::DemangleOptions::
SimplifiedUIDemangleOptions());
return swift::Demangle::demangleSymbolAsString(
symbol, strlen(symbol), options);
} else
return swift::Demangle::demangleSymbolAsString(symbol, strlen(symbol));
}
std::string SwiftLanguageRuntime::DemangleSymbolAsString (const ConstString &symbol,
bool simplified) {
if (simplified) {
swift::Demangle::DemangleOptions options(swift::Demangle::DemangleOptions::
SimplifiedUIDemangleOptions());
return swift::Demangle::demangleSymbolAsString(
symbol.GetStringRef(), options);
} else
return swift::Demangle::demangleSymbolAsString(symbol.GetStringRef());
}
bool SwiftLanguageRuntime::IsSwiftClassName(const char *name)
{
return swift::Demangle::isClass(name);
}
const std::string SwiftLanguageRuntime::GetCurrentMangledName(const char *mangled_name)
{
#ifndef USE_NEW_MANGLING
return std::string(mangled_name);
#else
//FIXME: Check if we need to cache these lookups...
swift::Demangle::Context demangle_ctx;
swift::Demangle::NodePointer node_ptr = demangle_ctx.demangleSymbolAsNode(mangled_name);
if (!node_ptr)
{
// Sometimes this gets passed the prefix of a name, in which case we
// won't be able to demangle it. In that case return what was passed in.
printf ("Couldn't get mangled name for %s.\n", mangled_name);
return mangled_name;
}
else
return swift::Demangle::mangleNode(node_ptr);
#endif
}
void SwiftLanguageRuntime::MethodName::Clear() {
m_full.Clear();
m_basename = llvm::StringRef();
m_context = llvm::StringRef();
m_arguments = llvm::StringRef();
m_qualifiers = llvm::StringRef();
m_template_args = llvm::StringRef();
m_metatype_ref = llvm::StringRef();
m_return_type = llvm::StringRef();
m_type = eTypeInvalid;
m_parsed = false;
m_parse_error = false;
}
static bool StringHasAllOf(const llvm::StringRef &s, const char *which) {
for (const char *c = which; *c != 0; c++) {
if (s.find(*c) == llvm::StringRef::npos)
return false;
}
return true;
}
static bool StringHasAnyOf(const llvm::StringRef &s,
std::initializer_list<const char *> which,
size_t &where) {
for (const char *item : which) {
size_t where_item = s.find(item);
if (where_item != llvm::StringRef::npos) {
where = where_item;
return true;
}
}
where = llvm::StringRef::npos;
return false;
}
static bool UnpackTerminatedSubstring(const llvm::StringRef &s,
const char start, const char stop,
llvm::StringRef &dest) {
size_t pos_of_start = s.find(start);
if (pos_of_start == llvm::StringRef::npos)
return false;
size_t pos_of_stop = s.rfind(stop);
if (pos_of_stop == llvm::StringRef::npos)
return false;
size_t token_count = 1;
size_t idx = pos_of_start + 1;
while (idx < s.size()) {
if (s[idx] == start)
++token_count;
if (s[idx] == stop) {
if (token_count == 1) {
dest = s.slice(pos_of_start, idx + 1);
return true;
}
}
idx++;
}
return false;
}
static bool UnpackQualifiedName(const llvm::StringRef &s, llvm::StringRef &decl,
llvm::StringRef &basename, bool &was_operator) {
size_t pos_of_dot = s.rfind('.');
if (pos_of_dot == llvm::StringRef::npos)
return false;
decl = s.substr(0, pos_of_dot);
basename = s.substr(pos_of_dot + 1);
size_t idx_of_operator;
was_operator = StringHasAnyOf(basename, {"@infix", "@prefix", "@postfix"},
idx_of_operator);
if (was_operator)
basename = basename.substr(0, idx_of_operator - 1);
return !decl.empty() && !basename.empty();
}
static bool ParseLocalDeclName(const swift::Demangle::NodePointer &node,
StreamString &identifier,
swift::Demangle::Node::Kind &parent_kind,
swift::Demangle::Node::Kind &kind) {
swift::Demangle::Node::iterator end = node->end();
for (swift::Demangle::Node::iterator pos = node->begin(); pos != end; ++pos) {
swift::Demangle::NodePointer child = *pos;
swift::Demangle::Node::Kind child_kind = child->getKind();
switch (child_kind) {
case swift::Demangle::Node::Kind::Number:
break;
default:
if (child->hasText()) {
identifier.PutCString(child->getText());
return true;
}
break;
}
}
return false;
}
static bool ParseFunction(const swift::Demangle::NodePointer &node,
StreamString &identifier,
swift::Demangle::Node::Kind &parent_kind,
swift::Demangle::Node::Kind &kind) {
swift::Demangle::Node::iterator end = node->end();
swift::Demangle::Node::iterator pos = node->begin();
// First child is the function's scope
parent_kind = (*pos)->getKind();
++pos;
// Second child is either the type (no identifier)
if (pos != end) {
switch ((*pos)->getKind()) {
case swift::Demangle::Node::Kind::Type:
break;
case swift::Demangle::Node::Kind::LocalDeclName:
if (ParseLocalDeclName(*pos, identifier, parent_kind, kind))
return true;
else
return false;
break;
default:
case swift::Demangle::Node::Kind::InfixOperator:
case swift::Demangle::Node::Kind::PostfixOperator:
case swift::Demangle::Node::Kind::PrefixOperator:
case swift::Demangle::Node::Kind::Identifier:
if ((*pos)->hasText())
identifier.PutCString((*pos)->getText());
return true;
}
}
return false;
}
static bool ParseGlobal(const swift::Demangle::NodePointer &node,
StreamString &identifier,
swift::Demangle::Node::Kind &parent_kind,
swift::Demangle::Node::Kind &kind) {
swift::Demangle::Node::iterator end = node->end();
for (swift::Demangle::Node::iterator pos = node->begin(); pos != end; ++pos) {
swift::Demangle::NodePointer child = *pos;
if (child) {
kind = child->getKind();
switch (child->getKind()) {
case swift::Demangle::Node::Kind::Allocator:
identifier.PutCString("__allocating_init");
ParseFunction(child, identifier, parent_kind, kind);
return true;
case swift::Demangle::Node::Kind::Constructor:
identifier.PutCString("init");
ParseFunction(child, identifier, parent_kind, kind);
return true;
case swift::Demangle::Node::Kind::Deallocator:
identifier.PutCString("__deallocating_deinit");
ParseFunction(child, identifier, parent_kind, kind);
return true;
case swift::Demangle::Node::Kind::Destructor:
identifier.PutCString("deinit");
ParseFunction(child, identifier, parent_kind, kind);
return true;
case swift::Demangle::Node::Kind::Getter:
case swift::Demangle::Node::Kind::Setter:
case swift::Demangle::Node::Kind::Function:
return ParseFunction(child, identifier, parent_kind, kind);
// Ignore these, they decorate a function at the same level, but don't
// contain any text
case swift::Demangle::Node::Kind::ObjCAttribute:
break;
default:
return false;
}
}
}
return false;
}
bool SwiftLanguageRuntime::MethodName::ExtractFunctionBasenameFromMangled(
const ConstString &mangled, ConstString &basename, bool &is_method) {
bool success = false;
swift::Demangle::Node::Kind kind = swift::Demangle::Node::Kind::Global;
swift::Demangle::Node::Kind parent_kind = swift::Demangle::Node::Kind::Global;
if (mangled) {
const char *mangled_cstr = mangled.GetCString();
const size_t mangled_cstr_len = mangled.GetLength();
if (mangled_cstr_len > 3) {
llvm::StringRef mangled_ref(mangled_cstr, mangled_cstr_len);
// Only demangle swift functions
// This is a no-op right now for the new mangling, because you
// have to demangle the whole name to figure this out anyway.
// I'm leaving the test here in case we actually need to do this
// only to functions.
swift::Demangle::Context demangle_ctx;
swift::Demangle::NodePointer node =
demangle_ctx.demangleSymbolAsNode(mangled_ref);
StreamString identifier;
if (node) {
switch (node->getKind()) {
case swift::Demangle::Node::Kind::Global:
success = ParseGlobal(node, identifier, parent_kind, kind);
break;
default:
break;
}
if (!identifier.GetString().empty()) {
basename = ConstString(identifier.GetString());
}
}
}
}
if (success) {
switch (kind) {
case swift::Demangle::Node::Kind::Allocator:
case swift::Demangle::Node::Kind::Constructor:
case swift::Demangle::Node::Kind::Deallocator:
case swift::Demangle::Node::Kind::Destructor:
is_method = true;
break;
case swift::Demangle::Node::Kind::Getter:
case swift::Demangle::Node::Kind::Setter:
// don't handle getters and setters right now...
return false;
case swift::Demangle::Node::Kind::Function:
switch (parent_kind) {
case swift::Demangle::Node::Kind::BoundGenericClass:
case swift::Demangle::Node::Kind::BoundGenericEnum:
case swift::Demangle::Node::Kind::BoundGenericStructure:
case swift::Demangle::Node::Kind::Class:
case swift::Demangle::Node::Kind::Enum:
case swift::Demangle::Node::Kind::Structure:
is_method = true;
break;
default:
break;
}
break;
default:
break;
}
}
return success;
}
void SwiftLanguageRuntime::MethodName::Parse() {
if (!m_parsed && m_full) {
// ConstString mangled;
// m_full.GetMangledCounterpart(mangled);
// printf ("\n parsing = '%s'\n", m_full.GetCString());
// if (mangled)
// printf (" mangled = '%s'\n", mangled.GetCString());
m_parse_error = false;
m_parsed = true;
llvm::StringRef full(m_full.GetCString());
bool was_operator = false;
if (full.find("::") != llvm::StringRef::npos) {
// :: is not an allowed operator in Swift (func ::(...) { fails to
// compile)
// but it's a very legitimate token in C++ - as a defense, reject anything
// with a :: in it as invalid Swift
m_parse_error = true;
return;
}
if (StringHasAllOf(full, ".:()")) {
const size_t open_paren = full.find(" (");
llvm::StringRef funcname = full.substr(0, open_paren);
UnpackQualifiedName(funcname, m_context, m_basename, was_operator);
if (was_operator)
m_type = eTypeOperator;
// check for obvious constructor/destructor cases
else if (m_basename.equals("__deallocating_destructor"))
m_type = eTypeDeallocator;
else if (m_basename.equals("__allocating_constructor"))
m_type = eTypeAllocator;
else if (m_basename.equals("init"))
m_type = eTypeConstructor;
else if (m_basename.equals("destructor"))
m_type = eTypeDestructor;
else
m_type = eTypeUnknownMethod;
const size_t idx_of_colon =
full.find(':', open_paren == llvm::StringRef::npos ? 0 : open_paren);
full = full.substr(idx_of_colon + 2);
if (full.empty())
return;
if (full[0] == '<') {
if (UnpackTerminatedSubstring(full, '<', '>', m_template_args)) {
full = full.substr(m_template_args.size());
} else {
m_parse_error = true;
return;
}
}
if (full.empty())
return;
if (full[0] == '(') {
if (UnpackTerminatedSubstring(full, '(', ')', m_metatype_ref)) {
full = full.substr(m_template_args.size());
if (full[0] == '<') {
if (UnpackTerminatedSubstring(full, '<', '>', m_template_args)) {
full = full.substr(m_template_args.size());
} else {
m_parse_error = true;
return;
}
}
} else {
m_parse_error = true;
return;
}
}
if (full.empty())
return;
if (full[0] == '(') {
if (UnpackTerminatedSubstring(full, '(', ')', m_arguments)) {
full = full.substr(m_template_args.size());
} else {
m_parse_error = true;
return;
}
}
if (full.empty())
return;
size_t idx_of_ret = full.find("->");
if (idx_of_ret == llvm::StringRef::npos) {
full = full.substr(idx_of_ret);
if (full.empty()) {
m_parse_error = true;
return;
}
if (full[0] == ' ')
full = full.substr(1);
m_return_type = full;
}
} else if (full.find('.') != llvm::StringRef::npos) {
// this is probably just a full name (module.type.func)
UnpackQualifiedName(full, m_context, m_basename, was_operator);
if (was_operator)
m_type = eTypeOperator;
else
m_type = eTypeUnknownMethod;
} else {
// this is most probably just a basename
m_basename = full;
m_type = eTypeUnknownMethod;
}
}
}
llvm::StringRef SwiftLanguageRuntime::MethodName::GetBasename() {
if (!m_parsed)
Parse();
return m_basename;
}
const CompilerType &SwiftLanguageRuntime::GetBoxMetadataType() {
if (m_box_metadata_type.IsValid())
return m_box_metadata_type;
static ConstString g_type_name("__lldb_autogen_boxmetadata");
const bool is_packed = false;
if (ClangASTContext *ast_ctx =
GetProcess()->GetTarget().GetScratchClangASTContext()) {
CompilerType voidstar =
ast_ctx->GetBasicType(lldb::eBasicTypeVoid).GetPointerType();
CompilerType uint32 = ClangASTContext::GetIntTypeFromBitSize(
ast_ctx->getASTContext(), 32, false);
m_box_metadata_type = ast_ctx->GetOrCreateStructForIdentifier(
g_type_name, {{"kind", voidstar}, {"offset", uint32}}, is_packed);
}
return m_box_metadata_type;
}
std::shared_ptr<swift::remote::MemoryReader>
SwiftLanguageRuntime::GetMemoryReader() {
class MemoryReader : public swift::remote::MemoryReader {
public:
MemoryReader(Process *p, size_t max_read_amount = INT32_MAX)
: m_process(p) {
lldbassert(m_process && "MemoryReader requires a valid Process");
m_max_read_amount = max_read_amount;
}
virtual ~MemoryReader() = default;
bool queryDataLayout(DataLayoutQueryType type, void *inBuffer,
void *outBuffer) override {
switch (type) {
case DLQ_GetPointerSize: {
auto result = static_cast<uint8_t *>(outBuffer);
*result = m_process->GetAddressByteSize();
return true;
}
case DLQ_GetSizeSize: {
auto result = static_cast<uint8_t *>(outBuffer);
*result = m_process->GetAddressByteSize(); // FIXME: sizeof(size_t)
return true;
}
}
return false;
}
swift::remote::RemoteAddress
getSymbolAddress(const std::string &name) override {
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (name.empty())
return swift::remote::RemoteAddress(nullptr);
if (log)
log->Printf("[MemoryReader] asked to retrieve address of symbol %s",
name.c_str());
ConstString name_cs(name.c_str(), name.size());
SymbolContextList sc_list;
if (m_process->GetTarget().GetImages().FindSymbolsWithNameAndType(
name_cs, lldb::eSymbolTypeAny, sc_list)) {
SymbolContext sym_ctx;
// Remove undefined symbols from the list:
size_t num_sc_matches = sc_list.GetSize();
if (num_sc_matches > 1) {
SymbolContextList tmp_sc_list(sc_list);
sc_list.Clear();
for (size_t idx = 0; idx < num_sc_matches; idx++) {
tmp_sc_list.GetContextAtIndex(idx, sym_ctx);
if (sym_ctx.symbol &&
sym_ctx.symbol->GetType() != lldb::eSymbolTypeUndefined) {
sc_list.Append(sym_ctx);
}
}
}
if (sc_list.GetSize() == 1 && sc_list.GetContextAtIndex(0, sym_ctx)) {
if (sym_ctx.symbol) {
auto load_addr =
sym_ctx.symbol->GetLoadAddress(&m_process->GetTarget());
if (log)
log->Printf("[MemoryReader] symbol resolved to 0x%" PRIx64,
load_addr);
return swift::remote::RemoteAddress(load_addr);
}
}
}
if (log)
log->Printf("[MemoryReader] symbol resolution failed");
return swift::remote::RemoteAddress(nullptr);
}
bool readBytes(swift::remote::RemoteAddress address, uint8_t *dest,
uint64_t size) override {
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf("[MemoryReader] asked to read %" PRIu64
" bytes at address 0x%" PRIx64,
size, address.getAddressData());
if (size > m_max_read_amount) {
if (log)
log->Printf(
"[MemoryReader] memory read exceeds maximum allowed size");
return false;
}
Target &target(m_process->GetTarget());
Address addr(address.getAddressData());
Status error;
if (size > target.ReadMemory(addr, false, dest, size, error)) {
if (log)
log->Printf(
"[MemoryReader] memory read returned fewer bytes than asked for");
return false;
}
if (error.Fail()) {
if (log)
log->Printf("[MemoryReader] memory read returned error: %s",
error.AsCString());
return false;
}
if (log && log->GetVerbose()) {
StreamString stream;
for (uint64_t i = 0; i < size; i++) {
stream.PutHex8(dest[i]);
stream.PutChar(' ');
}
log->Printf("[MemoryReader] memory read returned data: %s",
stream.GetData());
}
return true;
}
bool readString(swift::remote::RemoteAddress address,
std::string &dest) override {
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf(
"[MemoryReader] asked to read string data at address 0x%" PRIx64,
address.getAddressData());
uint32_t read_size = 50 * 1024;
std::vector<char> storage(read_size, 0);
Target &target(m_process->GetTarget());
Address addr(address.getAddressData());
Status error;
target.ReadCStringFromMemory(addr, &storage[0], storage.size(), error);
if (error.Success()) {
dest.assign(&storage[0]);
if (log)
log->Printf("[MemoryReader] memory read returned data: %s",
dest.c_str());
return true;
} else {
if (log)
log->Printf("[MemoryReader] memory read returned error: %s",
error.AsCString());
return false;
}
}
private:
Process *m_process;
size_t m_max_read_amount;
};
if (!m_memory_reader_sp)
m_memory_reader_sp.reset(new MemoryReader(GetProcess()));
return m_memory_reader_sp;
}
SwiftLanguageRuntime::MetadataPromise::MetadataPromise(
ValueObject &for_object, SwiftLanguageRuntime &runtime,
lldb::addr_t location)
: m_for_object_sp(for_object.GetSP()), m_swift_runtime(runtime),
m_metadata_location(location) {}
CompilerType
SwiftLanguageRuntime::MetadataPromise::FulfillTypePromise(Status *error) {
if (error)
error->Clear();
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf("[MetadataPromise] asked to fulfill type promise at location "
"0x%" PRIx64,
m_metadata_location);
if (m_compiler_type.hasValue())
return m_compiler_type.getValue();
auto swift_ast_ctx = m_for_object_sp->GetScratchSwiftASTContext();
if (!swift_ast_ctx) {
error->SetErrorString("couldn't get Swift scratch context");
return CompilerType();
}
auto &remote_ast = m_swift_runtime.GetRemoteASTContext(*swift_ast_ctx);
swift::remoteAST::Result<swift::Type> result =
remote_ast.getTypeForRemoteTypeMetadata(
swift::remote::RemoteAddress(m_metadata_location));
if (result) {
m_compiler_type = {swift_ast_ctx.get(), result.getValue().getPointer()};
if (log)
log->Printf("[MetadataPromise] result is type %s",
m_compiler_type->GetTypeName().AsCString());
return m_compiler_type.getValue();
} else {
const auto &failure = result.getFailure();
if (error)
error->SetErrorStringWithFormat("error in resolving type: %s",
failure.render().c_str());
if (log)
log->Printf("[MetadataPromise] failure: %s", failure.render().c_str());
return (m_compiler_type = CompilerType()).getValue();
}
}
llvm::Optional<swift::MetadataKind>
SwiftLanguageRuntime::MetadataPromise::FulfillKindPromise(Status *error) {
if (error)
error->Clear();
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf("[MetadataPromise] asked to fulfill kind promise at location "
"0x%" PRIx64,
m_metadata_location);
if (m_metadata_kind.hasValue())
return m_metadata_kind;
auto swift_ast_ctx = m_for_object_sp->GetScratchSwiftASTContext();
if (!swift_ast_ctx) {
error->SetErrorString("couldn't get Swift scratch context");
return llvm::None;
}
auto &remote_ast = m_swift_runtime.GetRemoteASTContext(*swift_ast_ctx);
swift::remoteAST::Result<swift::MetadataKind> result =
remote_ast.getKindForRemoteTypeMetadata(
swift::remote::RemoteAddress(m_metadata_location));
if (result) {
m_metadata_kind = result.getValue();
if (log)
log->Printf("[MetadataPromise] result is kind %u", result.getValue());
return m_metadata_kind;
} else {
const auto &failure = result.getFailure();
if (error)
error->SetErrorStringWithFormat("error in resolving type: %s",
failure.render().c_str());
if (log)
log->Printf("[MetadataPromise] failure: %s", failure.render().c_str());
return m_metadata_kind;
}
}
bool SwiftLanguageRuntime::MetadataPromise::IsStaticallyDetermined() {
if (llvm::Optional<swift::MetadataKind> kind_promise = FulfillKindPromise()) {
switch (kind_promise.getValue()) {
case swift::MetadataKind::Class:
case swift::MetadataKind::Existential:
case swift::MetadataKind::ObjCClassWrapper:
return false;
default:
return true;
}
}
llvm_unreachable("Unknown metadata kind");
}
SwiftLanguageRuntime::MetadataPromiseSP
SwiftLanguageRuntime::GetMetadataPromise(lldb::addr_t addr,
ValueObject &for_object) {
auto swift_ast_ctx = for_object.GetScratchSwiftASTContext();
if (!swift_ast_ctx || swift_ast_ctx->HasFatalErrors())
return nullptr;
if (addr == 0 || addr == LLDB_INVALID_ADDRESS)
return nullptr;
typename decltype(m_promises_map)::key_type key{
swift_ast_ctx->GetASTContext(), addr};
auto iter = m_promises_map.find(key), end = m_promises_map.end();
if (iter != end)
return iter->second;
MetadataPromiseSP promise_sp(
new MetadataPromise(for_object, *this, std::get<1>(key)));
m_promises_map.emplace(key, promise_sp);
return promise_sp;
}
swift::remoteAST::RemoteASTContext &
SwiftLanguageRuntime::GetRemoteASTContext(SwiftASTContext &swift_ast_ctx) {
// If we already have a remote AST context for this AST context,
// return it.
auto known = m_remote_ast_contexts.find(swift_ast_ctx.GetASTContext());
if (known != m_remote_ast_contexts.end())
return *known->second;
// Initialize a new remote AST context.
return *m_remote_ast_contexts
.emplace(swift_ast_ctx.GetASTContext(),
llvm::make_unique<swift::remoteAST::RemoteASTContext>(
*swift_ast_ctx.GetASTContext(), GetMemoryReader()))
.first->second;
}
void SwiftLanguageRuntime::ReleaseAssociatedRemoteASTContext(
swift::ASTContext *ctx) {
m_remote_ast_contexts.erase(ctx);
}
llvm::Optional<uint64_t>
SwiftLanguageRuntime::GetMemberVariableOffset(CompilerType instance_type,
ValueObject *instance,
ConstString member_name,
Status *error) {
if (!instance_type.IsValid())
return llvm::None;
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
// Using the module context for RemoteAST is cheaper bit only safe
// when there is no dynamic type resolution involved.
auto *module_ctx =
llvm::dyn_cast_or_null<SwiftASTContext>(instance_type.GetTypeSystem());
if (!module_ctx || module_ctx->HasFatalErrors())
return llvm::None;
llvm::Optional<SwiftASTContextReader> scratch_ctx;
if (instance) {
scratch_ctx = instance->GetScratchSwiftASTContext();
if (!scratch_ctx)
return llvm::None;
}
auto *remote_ast = &GetRemoteASTContext(*module_ctx);
if (log)
log->Printf(
"[GetMemberVariableOffset] asked to resolve offset for member %s",
member_name.AsCString());
// Check whether we've already cached this offset.
auto *swift_type = GetCanonicalSwiftType(instance_type).getPointer();
// Perform the cache lookup.
auto key = std::make_tuple(swift_type, member_name.GetCString());
auto it = m_member_offsets.find(key);
if (it != m_member_offsets.end())
return it->second;
// Dig out metadata describing the type, if it's easy to find.
// FIXME: the Remote AST library should make this easier.
swift::remote::RemoteAddress optmeta(nullptr);
const swift::TypeKind type_kind = swift_type->getKind();
switch (type_kind) {
case swift::TypeKind::Class:
case swift::TypeKind::BoundGenericClass: {
if (log)
log->Printf("[MemberVariableOffsetResolver] type is a class - trying to "
"get metadata for valueobject %s",
(instance ? instance->GetName().AsCString() : "<null>"));
if (instance) {
lldb::addr_t pointer = instance->GetPointerValue();
if (!pointer || pointer == LLDB_INVALID_ADDRESS)
break;
swift::remote::RemoteAddress address(pointer);
if (auto metadata = remote_ast->getHeapMetadataForObject(address))
optmeta = metadata.getValue();
}
if (log)
log->Printf("[MemberVariableOffsetResolver] optmeta = 0x%" PRIx64,
optmeta.getAddressData());
break;
}
default:
// Bind generic parameters if necessary.
if (instance && swift_type->hasTypeParameter())
if (auto *frame = instance->GetExecutionContextRef().GetFrameSP().get())
if (auto bound = DoArchetypeBindingForType(*frame, instance_type)) {
if (log)
log->Printf(
"[MemberVariableOffsetResolver] resolved non-class type = %s",
bound.GetTypeName().AsCString());
swift_type = GetCanonicalSwiftType(bound).getPointer();
auto key = std::make_tuple(swift_type, member_name.GetCString());
auto it = m_member_offsets.find(key);
if (it != m_member_offsets.end())
return it->second;
assert(bound.GetTypeSystem() == scratch_ctx->get());
remote_ast = &GetRemoteASTContext(*scratch_ctx->get());
}
}
// Determine the member offset.
swift::remoteAST::Result<uint64_t> result = remote_ast->getOffsetOfMember(
swift_type, optmeta, member_name.GetStringRef());
if (result) {
if (log)
log->Printf("[MemberVariableOffsetResolver] offset discovered = %" PRIu64,
(uint64_t)result.getValue());
// Cache this result.
auto key = std::make_tuple(swift_type, member_name.GetCString());
m_member_offsets.insert(std::make_pair(key, result.getValue()));
return result.getValue();
}
const auto &failure = result.getFailure();
if (error)
error->SetErrorStringWithFormat("error in resolving type offset: %s",
failure.render().c_str());
if (log)
log->Printf("[MemberVariableOffsetResolver] failure: %s",
failure.render().c_str());
// Try remote mirrors.
if (!reflection_ctx)
return llvm::None;
ConstString mangled_name(instance_type.GetMangledTypeName());
StringRef mangled_no_prefix =
swift::Demangle::dropSwiftManglingPrefix(mangled_name.GetStringRef());
swift::Demangle::Demangler Dem;
auto demangled = Dem.demangleType(mangled_no_prefix);
auto *type_ref = swift::Demangle::decodeMangledType(
reflection_ctx->getBuilder(), demangled);
if (!type_ref)
return llvm::None;
auto type_info =
reflection_ctx->getBuilder().getTypeConverter().getTypeInfo(type_ref);
if (!type_info)
return llvm::None;
auto record_type_info =
llvm::dyn_cast<swift::reflection::RecordTypeInfo>(type_info);
if (record_type_info) {
for (auto &field : record_type_info->getFields()) {
if (ConstString(field.Name) == member_name)
return field.Offset;
}
}
lldb::addr_t pointer = instance->GetPointerValue();
auto class_instance_type_info = reflection_ctx->getInstanceTypeInfo(pointer);
if (class_instance_type_info) {
auto class_type_info = llvm::dyn_cast<swift::reflection::RecordTypeInfo>(
class_instance_type_info);
if (class_type_info) {
for (auto &field : class_type_info->getFields()) {
if (ConstString(field.Name) == member_name)
return field.Offset;
}
}
}
return llvm::None;
}
static size_t BaseClassDepth(ValueObject &in_value) {
ValueObject *ptr = &in_value;
size_t depth = 0;
while (ptr->IsBaseClass()) {
depth++;
ptr = ptr->GetParent();
}
return depth;
}
/// Determine whether the scratch SwiftASTContext has been locked.
static bool IsScratchContextLocked(Target &target) {
if (target.GetSwiftScratchContextLock().try_lock()) {
target.GetSwiftScratchContextLock().unlock();
return false;
}
return true;
}
/// Determine whether the scratch SwiftASTContext has been locked.
static bool IsScratchContextLocked(TargetSP target) {
return target ? IsScratchContextLocked(*target) : true;
}
bool SwiftLanguageRuntime::GetDynamicTypeAndAddress_Class(
ValueObject &in_value, SwiftASTContext &scratch_ctx,
lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name,
Address &address) {
AddressType address_type;
lldb::addr_t class_metadata_ptr = in_value.GetPointerValue(&address_type);
if (auto objc_runtime = GetObjCRuntime()) {
if (objc_runtime->IsTaggedPointer(class_metadata_ptr)) {
Value::ValueType value_type;
return objc_runtime->GetDynamicTypeAndAddress(
in_value, use_dynamic, class_type_or_name, address, value_type,
/* allow_swift = */ true);
}
}
if (class_metadata_ptr == LLDB_INVALID_ADDRESS || class_metadata_ptr == 0)
return false;
address.SetRawAddress(class_metadata_ptr);
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
auto &remote_ast = GetRemoteASTContext(scratch_ctx);
swift::remote::RemoteAddress instance_address(class_metadata_ptr);
auto metadata_address = remote_ast.getHeapMetadataForObject(instance_address);
if (!metadata_address) {
if (log) {
log->Printf("could not read heap metadata for object at %llu: %s\n",
class_metadata_ptr,
metadata_address.getFailure().render().c_str());
}
return false;
}
auto instance_type =
remote_ast.getTypeForRemoteTypeMetadata(metadata_address.getValue(),
/*skipArtificial=*/true);
if (!instance_type) {
if (log) {
log->Printf("could not get type metadata from address %llu: %s\n",
metadata_address.getValue(),
instance_type.getFailure().render().c_str());
}
return false;
}
// The read lock must have been acquired by the caller.
class_type_or_name.SetCompilerType(
{&scratch_ctx, instance_type.getValue().getPointer()});
return true;
}
SwiftLanguageRuntime::SwiftErrorDescriptor::SwiftErrorDescriptor()
: m_kind(Kind::eNotAnError) {}
bool SwiftLanguageRuntime::IsValidErrorValue(
ValueObject &in_value, SwiftErrorDescriptor *out_error_descriptor) {
// see GetDynamicTypeAndAddress_ErrorType for details
CompilerType var_type = in_value.GetStaticValue()->GetCompilerType();
SwiftASTContext::ProtocolInfo protocol_info;
if (!SwiftASTContext::GetProtocolTypeInfo(var_type, protocol_info))
return false;
if (!protocol_info.m_is_errortype)
return false;
unsigned index = SwiftASTContext::ProtocolInfo::error_instance_index;
ValueObjectSP instance_type_sp(
in_value.GetStaticValue()->GetChildAtIndex(index, true));
if (!instance_type_sp)
return false;
lldb::addr_t metadata_location = instance_type_sp->GetValueAsUnsigned(0);
if (metadata_location == 0 || metadata_location == LLDB_INVALID_ADDRESS)
return false;
SetupSwiftError();
if (m_SwiftNativeNSErrorISA.hasValue()) {
if (auto objc_runtime = GetObjCRuntime()) {
if (auto descriptor =
objc_runtime->GetClassDescriptor(*instance_type_sp)) {
if (descriptor->GetISA() != m_SwiftNativeNSErrorISA.getValue()) {
// not a __SwiftNativeNSError - but statically typed as ErrorType
// return true here
if (out_error_descriptor) {
*out_error_descriptor = SwiftErrorDescriptor();
out_error_descriptor->m_kind = SwiftErrorDescriptor::Kind::eBridged;
out_error_descriptor->m_bridged.instance_ptr_value =
instance_type_sp->GetValueAsUnsigned(LLDB_INVALID_ADDRESS);
}
return true;
}
}
}
}
if (GetObjCRuntime()) {
// this is a swift native error but it can be bridged to ObjC
// so it needs to be layout compatible
size_t ptr_size = m_process->GetAddressByteSize();
size_t metadata_offset =
ptr_size + 4 + (ptr_size == 8 ? 4 : 0); // CFRuntimeBase
metadata_offset += ptr_size + ptr_size + ptr_size; // CFIndex + 2*CFRef
metadata_location += metadata_offset;
Status error;
lldb::addr_t metadata_ptr_value =
m_process->ReadPointerFromMemory(metadata_location, error);
if (metadata_ptr_value == 0 || metadata_ptr_value == LLDB_INVALID_ADDRESS ||
error.Fail())
return false;
if (out_error_descriptor) {
*out_error_descriptor = SwiftErrorDescriptor();
out_error_descriptor->m_kind =
SwiftErrorDescriptor::Kind::eSwiftBridgeableNative;
out_error_descriptor->m_bridgeable_native.metadata_location =
metadata_location;
out_error_descriptor->m_bridgeable_native.metadata_ptr_value =
metadata_ptr_value;
}
} else {
// this is a swift native error and it has no way to be bridged to ObjC
// so it adopts a more compact layout
Status error;
size_t ptr_size = m_process->GetAddressByteSize();
size_t metadata_offset = 2 * ptr_size;
metadata_location += metadata_offset;
lldb::addr_t metadata_ptr_value =
m_process->ReadPointerFromMemory(metadata_location, error);
if (metadata_ptr_value == 0 || metadata_ptr_value == LLDB_INVALID_ADDRESS ||
error.Fail())
return false;
lldb::addr_t witness_table_location = metadata_location + ptr_size;
lldb::addr_t payload_location = witness_table_location + ptr_size;
if (out_error_descriptor) {
*out_error_descriptor = SwiftErrorDescriptor();
out_error_descriptor->m_kind =
SwiftErrorDescriptor::Kind::eSwiftPureNative;
out_error_descriptor->m_pure_native.metadata_location =
metadata_ptr_value;
out_error_descriptor->m_pure_native.payload_ptr = payload_location;
}
}
return true;
}
bool SwiftLanguageRuntime::GetDynamicTypeAndAddress_ErrorType(
ValueObject &in_value, lldb::DynamicValueType use_dynamic,
TypeAndOrName &class_type_or_name, Address &address) {
// layout of error type
// pointer to -------> SwiftError {
// --------------
// CFRuntimeBase
// CFIndex
// CFStringRef
// CFDictionaryRef
// --------------
// Metadata
// WitnessTable
// hashable Metadata
// hashable WitnessTable
// --------------
// tail allocated actual object data *
// }
// * for a struct, it's the inline data
// * for a class, it's the inline pointer-to-the-data (aka, the swift class
// instance)
SwiftErrorDescriptor error_descriptor;
if (!IsValidErrorValue(in_value, &error_descriptor))
return false;
Status error;
CompilerType var_type(in_value.GetStaticValue()->GetCompilerType());
size_t ptr_size = m_process->GetAddressByteSize();
SwiftASTContext *swift_ast_ctx =
llvm::dyn_cast_or_null<SwiftASTContext>(var_type.GetTypeSystem());
if (!swift_ast_ctx)
return false;
switch (error_descriptor.m_kind) {
case SwiftErrorDescriptor::Kind::eNotAnError:
return false;
case SwiftErrorDescriptor::Kind::eSwiftBridgeableNative: {
MetadataPromiseSP promise_sp(GetMetadataPromise(
error_descriptor.m_bridgeable_native.metadata_ptr_value, in_value));
if (!promise_sp)
return false;
error_descriptor.m_bridgeable_native.metadata_location += 4 * ptr_size;
if (!promise_sp->IsStaticallyDetermined()) {
// figure out the actual dynamic type via the metadata at the "isa"
// pointer
error_descriptor.m_bridgeable_native.metadata_location =
m_process->ReadPointerFromMemory(
error_descriptor.m_bridgeable_native.metadata_location, error);
if (error_descriptor.m_bridgeable_native.metadata_location == 0 ||
error_descriptor.m_bridgeable_native.metadata_location ==
LLDB_INVALID_ADDRESS ||
error.Fail())
return false;
error_descriptor.m_bridgeable_native.metadata_ptr_value =
m_process->ReadPointerFromMemory(
error_descriptor.m_bridgeable_native.metadata_location, error);
if (error_descriptor.m_bridgeable_native.metadata_ptr_value == 0 ||
error_descriptor.m_bridgeable_native.metadata_ptr_value ==
LLDB_INVALID_ADDRESS ||
error.Fail())
return false;
promise_sp = GetMetadataPromise(
error_descriptor.m_bridgeable_native.metadata_ptr_value, in_value);
if (!promise_sp || !promise_sp->FulfillTypePromise()) {
// this could still be a random ObjC object
if (auto objc_runtime = GetObjCRuntime()) {
DataExtractor extractor(
&error_descriptor.m_bridgeable_native.metadata_location,
sizeof(error_descriptor.m_bridgeable_native.metadata_location),
GetProcess()->GetByteOrder(), GetProcess()->GetAddressByteSize());
ExecutionContext exe_ctx(GetProcess());
auto scratch_ast =
GetProcess()->GetTarget().GetScratchClangASTContext();
if (scratch_ast) {
auto valobj_sp = ValueObject::CreateValueObjectFromData(
in_value.GetName().AsCString(), extractor, exe_ctx,
scratch_ast->GetBasicType(eBasicTypeObjCID));
if (valobj_sp) {
Value::ValueType value_type;
if (objc_runtime->GetDynamicTypeAndAddress(
*valobj_sp, use_dynamic, class_type_or_name, address,
value_type)) {
address.SetLoadAddress(
error_descriptor.m_bridgeable_native.metadata_location,
&GetProcess()->GetTarget());
if (!class_type_or_name.GetCompilerType().IsPointerType()) {
// the language runtimes do not return pointer-to-types when
// doing dynamic type resolution
// what usually happens is that the static type has
// pointer-like traits that ValueObjectDynamic
// then preserves in the dynamic value - since the static type
// here is a Swift protocol object
// the dynamic type won't know to pointerize. But we truly
// need an ObjCObjectPointer here or else
// type printing WILL be confused. Hence, make the pointer
// type ourselves if we didn't get one already
class_type_or_name.SetCompilerType(
class_type_or_name.GetCompilerType().GetPointerType());
}
return true;
}
}
}
}
return false;
}
}
if (!promise_sp)
return false;
address.SetLoadAddress(
error_descriptor.m_bridgeable_native.metadata_location,
&m_process->GetTarget());
CompilerType metadata_type(promise_sp->FulfillTypePromise());
if (metadata_type.IsValid() && error.Success()) {
class_type_or_name.SetCompilerType(metadata_type);
return true;
}
} break;
case SwiftErrorDescriptor::Kind::eBridged: {
if (error_descriptor.m_bridged.instance_ptr_value != 0 &&
error_descriptor.m_bridged.instance_ptr_value != LLDB_INVALID_ADDRESS) {
Status error_type_lookup_error;
if (CompilerType error_type =
swift_ast_ctx->GetNSErrorType(error_type_lookup_error)) {
class_type_or_name.SetCompilerType(error_type);
address.SetRawAddress(error_descriptor.m_bridged.instance_ptr_value);
return true;
}
}
} break;
case SwiftErrorDescriptor::Kind::eSwiftPureNative: {
Status error;
if (MetadataPromiseSP promise_sp = GetMetadataPromise(
error_descriptor.m_pure_native.metadata_location, in_value)) {
if (promise_sp->IsStaticallyDetermined()) {
if (CompilerType compiler_type = promise_sp->FulfillTypePromise()) {
class_type_or_name.SetCompilerType(compiler_type);
address.SetRawAddress(error_descriptor.m_pure_native.payload_ptr);
return true;
}
} else {
error_descriptor.m_pure_native.metadata_location =
m_process->ReadPointerFromMemory(
error_descriptor.m_pure_native.payload_ptr, error);
if (error_descriptor.m_pure_native.metadata_location == 0 ||
error_descriptor.m_pure_native.metadata_location ==
LLDB_INVALID_ADDRESS ||
error.Fail())
return false;
error_descriptor.m_pure_native.payload_ptr =
error_descriptor.m_pure_native.metadata_location;
error_descriptor.m_pure_native.metadata_location =
m_process->ReadPointerFromMemory(
error_descriptor.m_pure_native.payload_ptr, error);
if (MetadataPromiseSP promise_sp = GetMetadataPromise(
error_descriptor.m_pure_native.metadata_location, in_value)) {
if (CompilerType compiler_type = promise_sp->FulfillTypePromise()) {
class_type_or_name.SetCompilerType(compiler_type);
address.SetRawAddress(error_descriptor.m_pure_native.payload_ptr);
return true;
}
}
}
}
} break;
}
return false;
}
bool SwiftLanguageRuntime::GetDynamicTypeAndAddress_Protocol(
ValueObject &in_value, SwiftASTContext &scratch_ctx,
lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name,
Address &address) {
CompilerType var_type(in_value.GetCompilerType());
SwiftASTContext::ProtocolInfo protocol_info;
if (!SwiftASTContext::GetProtocolTypeInfo(var_type, protocol_info))
return false;
if (protocol_info.m_is_errortype)
return GetDynamicTypeAndAddress_ErrorType(in_value, use_dynamic,
class_type_or_name, address);
MetadataPromiseSP promise_sp;
ValueObjectSP instance_type_sp(
in_value.GetStaticValue()->GetChildAtIndex(
protocol_info.GetInstanceTypeIndex(), true));
if (!instance_type_sp)
return false;
ValueObjectSP payload0_sp(
in_value.GetStaticValue()->GetChildAtIndex(0, true));
if (!payload0_sp)
return false;
// @objc protocols are automatically class-only, and there is no
// static/dynamic to deal with
bool is_class = protocol_info.m_is_objc || protocol_info.m_is_class_only ||
protocol_info.m_is_anyobject;
if (!is_class) {
promise_sp =
GetMetadataPromise(instance_type_sp->GetValueAsUnsigned(0), in_value);
if (!promise_sp)
return false;
if (promise_sp->FulfillKindPromise().hasValue() &&
promise_sp->FulfillKindPromise().getValue() ==
swift::MetadataKind::Class)
is_class = true;
}
if (is_class) {
if (GetDynamicTypeAndAddress_Class(*payload0_sp, scratch_ctx, use_dynamic,
class_type_or_name, address))
return true;
// only for @objc protocols, try to fallback to the ObjC runtime as a source
// of type information
// this is not exactly a great solution and we need to be careful with how
// we use the results of this
// computation, but assuming some care, at least data formatters will work
if (!protocol_info.m_is_objc)
return false;
auto objc_runtime = GetObjCRuntime();
if (!objc_runtime)
return false;
auto descriptor_sp = objc_runtime->GetClassDescriptor(*payload0_sp);
if (!descriptor_sp)
return false;
std::vector<clang::NamedDecl *> decls;
DeclVendor *vendor = objc_runtime->GetDeclVendor();
if (!vendor)
return false;
vendor->FindDecls(descriptor_sp->GetClassName(), true, 1, decls);
if (decls.size() == 0)
return false;
CompilerType type = ClangASTContext::GetTypeForDecl(decls[0]);
if (!type.IsValid())
return false;
lldb::addr_t class_metadata_ptr = payload0_sp->GetAddressOf();
if (class_metadata_ptr && class_metadata_ptr != LLDB_INVALID_ADDRESS)
address.SetRawAddress(class_metadata_ptr);
class_type_or_name.SetCompilerType(type.GetPointerType());
return class_type_or_name.GetCompilerType().IsValid();
}
if (promise_sp->FulfillKindPromise().hasValue() &&
(promise_sp->FulfillKindPromise().getValue() ==
swift::MetadataKind::Struct ||
promise_sp->FulfillKindPromise().getValue() ==
swift::MetadataKind::Enum ||
promise_sp->FulfillKindPromise().getValue() ==
swift::MetadataKind::Tuple)) {
Status error;
class_type_or_name.SetCompilerType(promise_sp->FulfillTypePromise());
if (error.Fail())
return false;
// Project the payload.
switch (SwiftASTContext::GetAllocationStrategy(
class_type_or_name.GetCompilerType())) {
case SwiftASTContext::TypeAllocationStrategy::eInline:
address.SetRawAddress(in_value.GetValue().GetScalar().ULongLong());
return true;
case SwiftASTContext::TypeAllocationStrategy::ePointer:
address.SetRawAddress(payload0_sp->GetValueAsUnsigned(0) +
(sizeof(lldb::addr_t) * 2));
return true;
default:
// TODO we don't know how to deal with the dynamic case quite yet
return false;
}
}
return false;
}
bool SwiftLanguageRuntime::GetDynamicTypeAndAddress_Promise(
ValueObject &in_value, MetadataPromiseSP promise_sp,
lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name,
Address &address) {
if (!promise_sp)
return false;
CompilerType var_type(in_value.GetCompilerType());
Status error;
if (!promise_sp->FulfillKindPromise())
return false;
switch (promise_sp->FulfillKindPromise().getValue()) {
case swift::MetadataKind::Class:
case swift::MetadataKind::ObjCClassWrapper: {
CompilerType dyn_type(promise_sp->FulfillTypePromise());
if (!dyn_type.IsValid())
return false;
class_type_or_name.SetCompilerType(dyn_type);
lldb::addr_t val_ptr_addr = in_value.GetAddressOf();
val_ptr_addr = GetProcess()->ReadPointerFromMemory(val_ptr_addr, error);
address.SetLoadAddress(val_ptr_addr, &m_process->GetTarget());
return true;
} break;
case swift::MetadataKind::Optional:
case swift::MetadataKind::Struct:
case swift::MetadataKind::Tuple: {
CompilerType dyn_type(promise_sp->FulfillTypePromise());
if (!dyn_type.IsValid())
return false;
class_type_or_name.SetCompilerType(dyn_type);
lldb::addr_t val_ptr_addr = in_value.GetAddressOf();
address.SetLoadAddress(val_ptr_addr, &m_process->GetTarget());
return true;
} break;
case swift::MetadataKind::Enum: {
CompilerType dyn_type(promise_sp->FulfillTypePromise());
if (!dyn_type.IsValid())
return false;
class_type_or_name.SetCompilerType(dyn_type);
lldb::addr_t val_ptr_addr = in_value.GetAddressOf();
{
auto swift_type = GetSwiftType(dyn_type);
if (swift_type->getOptionalObjectType())
val_ptr_addr = GetProcess()->ReadPointerFromMemory(val_ptr_addr, error);
}
address.SetLoadAddress(val_ptr_addr, &m_process->GetTarget());
return true;
} break;
case swift::MetadataKind::Existential: {
CompilerType protocol_type(promise_sp->FulfillTypePromise());
SwiftASTContext *swift_ast_ctx =
llvm::dyn_cast_or_null<SwiftASTContext>(protocol_type.GetTypeSystem());
lldb::addr_t existential_address = in_value.GetAddressOf();
if (!existential_address || existential_address == LLDB_INVALID_ADDRESS)
return false;
auto &target = m_process->GetTarget();
assert(IsScratchContextLocked(target) &&
"Swift scratch context not locked ahead");
auto scratch_ctx = in_value.GetSwiftASTContext();
auto &remote_ast = GetRemoteASTContext(*scratch_ctx);
swift::remote::RemoteAddress remote_existential(existential_address);
auto result = remote_ast.getDynamicTypeAndAddressForExistential(
remote_existential, GetSwiftType(protocol_type));
if (!result.isSuccess())
return false;
auto type_and_address = result.getValue();
CompilerType dynamic_type(type_and_address.first);
class_type_or_name.SetCompilerType(dynamic_type);
address.SetLoadAddress(type_and_address.second.getAddressData(),
&m_process->GetTarget());
return true;
} break;
default:
break;
}
return false;
}
SwiftLanguageRuntime::MetadataPromiseSP
SwiftLanguageRuntime::GetPromiseForTypeNameAndFrame(const char *type_name,
StackFrame *frame) {
if (!frame || !type_name || !type_name[0])
return nullptr;
StreamString type_metadata_ptr_var_name;
type_metadata_ptr_var_name.Printf("$%s", type_name);
VariableList *var_list = frame->GetVariableList(false);
if (!var_list)
return nullptr;
VariableSP var_sp(var_list->FindVariable(
ConstString(type_metadata_ptr_var_name.GetData())));
if (!var_sp)
return nullptr;
ValueObjectSP metadata_ptr_var_sp(
frame->GetValueObjectForFrameVariable(var_sp, lldb::eNoDynamicValues));
if (!metadata_ptr_var_sp ||
metadata_ptr_var_sp->UpdateValueIfNeeded() == false)
return nullptr;
lldb::addr_t metadata_location(metadata_ptr_var_sp->GetValueAsUnsigned(0));
if (metadata_location == 0 || metadata_location == LLDB_INVALID_ADDRESS)
return nullptr;
return GetMetadataPromise(metadata_location, *metadata_ptr_var_sp);
}
CompilerType
SwiftLanguageRuntime::DoArchetypeBindingForType(StackFrame &stack_frame,
CompilerType base_type) {
auto sc = stack_frame.GetSymbolContext(lldb::eSymbolContextEverything);
Status error;
// A failing Clang import in a module context permanently damages
// that module context. Binding archetypes can trigger an import of
// another module, so switch to a scratch context where such an
// operation is safe.
auto &target = m_process->GetTarget();
assert(IsScratchContextLocked(target) &&
"Swift scratch context not locked ahead of archetype binding");
auto scratch_ctx = target.GetScratchSwiftASTContext(error, stack_frame);
if (!scratch_ctx)
return base_type;
base_type = scratch_ctx->ImportType(base_type, error);
if (base_type.GetTypeInfo() & lldb::eTypeIsSwift) {
swift::Type target_swift_type(GetSwiftType(base_type));
target_swift_type = target_swift_type.transform(
[this, &stack_frame,
&scratch_ctx](swift::Type candidate_type) -> swift::Type {
swift::TypeBase *type = candidate_type.getPointer();
StreamString type_name;
if (!GetAbstractTypeName(type_name, type))
return candidate_type;
CompilerType concrete_type = this->GetConcreteType(
&stack_frame, ConstString(type_name.GetString()));
Status import_error;
CompilerType target_concrete_type =
scratch_ctx->ImportType(concrete_type, import_error);
if (target_concrete_type.IsValid())
return swift::Type(GetSwiftType(target_concrete_type));
return candidate_type;
});
return {target_swift_type.getPointer()};
}
return base_type;
}
bool SwiftLanguageRuntime::GetAbstractTypeName(StreamString &name,
swift::Type swift_type) {
StreamString assoc;
auto *dependent_member =
llvm::dyn_cast<swift::DependentMemberType>(swift_type.getPointer());
swift::TypeBase *base = swift_type.getPointer();
while (dependent_member) {
base = dependent_member->getBase().getPointer();
assoc.Printf(".%s", dependent_member->getName());
dependent_member = llvm::dyn_cast<swift::DependentMemberType>(base);
}
auto *generic_type_param = llvm::dyn_cast<swift::GenericTypeParamType>(base);
if (!generic_type_param)
return false;
name.Printf(u8"\u03C4_%d_%d%s", generic_type_param->getDepth(),
generic_type_param->getIndex(), assoc.GetString());
return true;
}
bool SwiftLanguageRuntime::GetDynamicTypeAndAddress_GenericTypeParam(
ValueObject &in_value, SwiftASTContext &scratch_ctx,
lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name,
Address &address) {
StreamString assoc;
StackFrame *frame(in_value.GetFrameSP().get());
StreamString type_name;
auto swift_type = GetSwiftType(in_value.GetCompilerType());
if (!GetAbstractTypeName(type_name, swift_type))
return false;
auto promise_sp = GetPromiseForTypeNameAndFrame(type_name.GetData(), frame);
if (!promise_sp)
return false;
if (!GetDynamicTypeAndAddress_Promise(in_value, promise_sp, use_dynamic,
class_type_or_name, address))
return false;
if (promise_sp->IsStaticallyDetermined())
return true;
// Ask RemoteAST about the dynamic type, as it might be different from the
// static one of the class.
Status error;
lldb::addr_t addr_of_meta = address.GetLoadAddress(&m_process->GetTarget());
addr_of_meta = m_process->ReadPointerFromMemory(addr_of_meta, error);
if (addr_of_meta == LLDB_INVALID_ADDRESS || addr_of_meta == 0 ||
error.Fail())
return true;
auto &remote_ast = GetRemoteASTContext(scratch_ctx);
swift::remote::RemoteAddress metadata_address(addr_of_meta);
auto instance_type =
remote_ast.getTypeForRemoteTypeMetadata(metadata_address,
/*skipArtificial*/ true);
// If we got this far, we know we already have a valid dynamic type
// in our hand. If RemoteAST gives us a different answer, update the
// type, otherwise return what we have.
if (instance_type)
class_type_or_name.SetCompilerType(
{&scratch_ctx, instance_type.getValue().getPointer()});
return true;
}
bool SwiftLanguageRuntime::GetDynamicTypeAndAddress_Tuple(
ValueObject &in_value, SwiftASTContext &scratch_ctx,
lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name,
Address &address) {
Status error;
std::vector<CompilerType> dyn_types;
for (size_t idx = 0; idx < in_value.GetNumChildren(); idx++) {
ValueObjectSP child_sp(in_value.GetChildAtIndex(idx, true));
TypeAndOrName type_and_or_name;
Address address;
Value::ValueType value_type;
CompilerType child_type;
if (!GetDynamicTypeAndAddress(*child_sp.get(), use_dynamic,
type_and_or_name, address, value_type))
child_type = child_sp->GetCompilerType();
else
child_type = type_and_or_name.GetCompilerType();
dyn_types.push_back(scratch_ctx.ImportType(child_type, error));
}
CompilerType dyn_tuple_type = scratch_ctx.CreateTupleType(dyn_types);
class_type_or_name.SetCompilerType(dyn_tuple_type);
lldb::addr_t tuple_address = in_value.GetAddressOf(true, nullptr);
if (error.Fail() || !tuple_address || tuple_address == LLDB_INVALID_ADDRESS)
return false;
address.SetLoadAddress(tuple_address, in_value.GetTargetSP().get());
return true;
}
bool SwiftLanguageRuntime::GetDynamicTypeAndAddress_Struct(
ValueObject &in_value, CompilerType &bound_type,
lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name,
Address &address) {
class_type_or_name.SetCompilerType(bound_type);
lldb::addr_t struct_address = in_value.GetAddressOf(true, nullptr);
if (!struct_address || struct_address == LLDB_INVALID_ADDRESS)
if (!SwiftASTContext::IsPossibleZeroSizeType(bound_type))
return false;
address.SetLoadAddress(struct_address, in_value.GetTargetSP().get());
return true;
}
bool SwiftLanguageRuntime::GetDynamicTypeAndAddress_Enum(
ValueObject &in_value, CompilerType &bound_type,
lldb::DynamicValueType use_dynamic, TypeAndOrName &class_type_or_name,
Address &address) {
class_type_or_name.SetCompilerType(bound_type);
lldb::addr_t enum_address = in_value.GetAddressOf(true, nullptr);
if (!enum_address || LLDB_INVALID_ADDRESS == enum_address)
if (!SwiftASTContext::IsPossibleZeroSizeType(bound_type))
return false;
address.SetLoadAddress(enum_address, in_value.GetTargetSP().get());
return true;
}
bool SwiftLanguageRuntime::GetDynamicTypeAndAddress_IndirectEnumCase(
ValueObject &in_value, lldb::DynamicValueType use_dynamic,
TypeAndOrName &class_type_or_name, Address &address) {
static ConstString g_offset("offset");
DataExtractor data;
Status error;
if (!(in_value.GetParent() && in_value.GetParent()->GetData(data, error) &&
error.Success()))
return false;
bool has_payload;
bool is_indirect;
CompilerType payload_type;
if (!SwiftASTContext::GetSelectedEnumCase(
in_value.GetParent()->GetCompilerType(), data, nullptr, &has_payload,
&payload_type, &is_indirect))
return false;
if (has_payload && is_indirect && payload_type)
class_type_or_name.SetCompilerType(payload_type);
lldb::addr_t box_addr = in_value.GetValueAsUnsigned(LLDB_INVALID_ADDRESS);
if (box_addr == LLDB_INVALID_ADDRESS)
return false;
box_addr = MaskMaybeBridgedPointer(box_addr);
lldb::addr_t box_location = m_process->ReadPointerFromMemory(box_addr, error);
if (box_location == LLDB_INVALID_ADDRESS)
return false;
box_location = MaskMaybeBridgedPointer(box_location);
ProcessStructReader reader(m_process, box_location, GetBoxMetadataType());
uint32_t offset = reader.GetField<uint32_t>(g_offset);
lldb::addr_t box_value = box_addr + offset;
// try to read one byte at the box value
m_process->ReadUnsignedIntegerFromMemory(box_value, 1, 0, error);
if (error.Fail()) // and if that fails, then we're off in no man's land
return false;
Flags type_info(payload_type.GetTypeInfo());
if (type_info.AllSet(eTypeIsSwift | eTypeIsClass)) {
lldb::addr_t old_box_value = box_value;
box_value = m_process->ReadPointerFromMemory(box_value, error);
if (box_value == LLDB_INVALID_ADDRESS)
return false;
DataExtractor data(&box_value, m_process->GetAddressByteSize(),
m_process->GetByteOrder(),
m_process->GetAddressByteSize());
ValueObjectSP valobj_sp(ValueObject::CreateValueObjectFromData(
"_", data, *m_process, payload_type));
if (!valobj_sp)
return false;
Value::ValueType value_type;
if (!GetDynamicTypeAndAddress(*valobj_sp, use_dynamic, class_type_or_name,
address, value_type))
return false;
address.SetRawAddress(old_box_value);
return true;
} else if (type_info.AllSet(eTypeIsSwift | eTypeIsProtocol)) {
SwiftASTContext::ProtocolInfo protocol_info;
if (!SwiftASTContext::GetProtocolTypeInfo(payload_type, protocol_info))
return false;
auto ptr_size = m_process->GetAddressByteSize();
std::vector<uint8_t> buffer(ptr_size * protocol_info.m_num_storage_words,
0);
for (uint32_t idx = 0; idx < protocol_info.m_num_storage_words; idx++) {
lldb::addr_t word = m_process->ReadUnsignedIntegerFromMemory(
box_value + idx * ptr_size, ptr_size, 0, error);
if (error.Fail())
return false;
memcpy(&buffer[idx * ptr_size], &word, ptr_size);
}
DataExtractor data(&buffer[0], buffer.size(), m_process->GetByteOrder(),
m_process->GetAddressByteSize());
ValueObjectSP valobj_sp(ValueObject::CreateValueObjectFromData(
"_", data, *m_process, payload_type));
if (!valobj_sp)
return false;
Value::ValueType value_type;
if (!GetDynamicTypeAndAddress(*valobj_sp, use_dynamic, class_type_or_name,
address, value_type))
return false;
address.SetRawAddress(box_value);
return true;
} else {
// This is most likely a statically known type.
address.SetLoadAddress(box_value, &m_process->GetTarget());
return true;
}
}
// Dynamic type resolution tends to want to generate scalar data - but there are
// caveats
// Per original comment here
// "Our address is the location of the dynamic type stored in memory. It isn't
// a load address,
// because we aren't pointing to the LOCATION that stores the pointer to us,
// we're pointing to us..."
// See inlined comments for exceptions to this general rule.
Value::ValueType SwiftLanguageRuntime::GetValueType(
Value::ValueType static_value_type, const CompilerType &static_type,
const CompilerType &dynamic_type, bool is_indirect_enum_case) {
Flags static_type_flags(static_type.GetTypeInfo());
Flags dynamic_type_flags(dynamic_type.GetTypeInfo());
if (dynamic_type_flags.AllSet(eTypeIsSwift)) {
// for a protocol object where does the dynamic data live if the target
// object is a struct? (for a class, it's easy)
if (static_type_flags.AllSet(eTypeIsSwift | eTypeIsProtocol) &&
dynamic_type_flags.AnySet(eTypeIsStructUnion | eTypeIsEnumeration)) {
SwiftASTContext *swift_ast_ctx =
llvm::dyn_cast_or_null<SwiftASTContext>(static_type.GetTypeSystem());
if (swift_ast_ctx && swift_ast_ctx->IsErrorType(static_type)) {
// ErrorType values are always a pointer
return Value::eValueTypeLoadAddress;
}
switch (SwiftASTContext::GetAllocationStrategy(dynamic_type)) {
case SwiftASTContext::TypeAllocationStrategy::eDynamic:
case SwiftASTContext::TypeAllocationStrategy::eUnknown:
break;
case SwiftASTContext::TypeAllocationStrategy::eInline: // inline data;
// same as the
// static data
return static_value_type;
case SwiftASTContext::TypeAllocationStrategy::ePointer: // pointed-to; in
// the target
return Value::eValueTypeLoadAddress;
}
}
if (static_type_flags.AllSet(eTypeIsSwift | eTypeIsGenericTypeParam)) {
// if I am handling a non-pointer Swift type obtained from an archetype,
// then the runtime vends the location
// of the object, not the object per se (since the object is not a pointer
// itself, this is way easier to achieve)
// hence, it's a load address, not a scalar containing a pointer as for
// ObjC classes
if (dynamic_type_flags.AllClear(eTypeIsPointer | eTypeIsReference |
eTypeInstanceIsPointer))
return Value::eValueTypeLoadAddress;
}
if (static_type_flags.AllSet(eTypeIsSwift | eTypeIsPointer) &&
static_type_flags.AllClear(eTypeIsGenericTypeParam)) {
// FIXME: This branch is not covered by any testcases in the test suite.
if (is_indirect_enum_case || static_type_flags.AllClear(eTypeIsBuiltIn))
return Value::eValueTypeLoadAddress;
}
}
// Enabling this makes the inout_variables test hang.
// return Value::eValueTypeScalar;
if (static_type_flags.AllSet(eTypeIsSwift) &&
dynamic_type_flags.AllSet(eTypeIsSwift) &&
dynamic_type_flags.AllClear(eTypeIsPointer | eTypeInstanceIsPointer))
return static_value_type;
else
return Value::eValueTypeScalar;
}
static bool IsIndirectEnumCase(ValueObject &valobj) {
return (valobj.GetLanguageFlags() &
SwiftASTContext::LanguageFlags::eIsIndirectEnumCase) ==
SwiftASTContext::LanguageFlags::eIsIndirectEnumCase;
}
bool SwiftLanguageRuntime::GetDynamicTypeAndAddress(
ValueObject &in_value, lldb::DynamicValueType use_dynamic,
TypeAndOrName &class_type_or_name, Address &address,
Value::ValueType &value_type) {
class_type_or_name.Clear();
if (use_dynamic == lldb::eNoDynamicValues || !CouldHaveDynamicValue(in_value))
return false;
// Dynamic type resolution in RemoteAST might pull in other Swift modules, so
// use the scratch context where such operations are legal and safe.
assert(IsScratchContextLocked(in_value.GetTargetSP()) &&
"Swift scratch context not locked ahead of dynamic type resolution");
auto scratch_ctx = in_value.GetScratchSwiftASTContext();
if (!scratch_ctx)
return false;
auto retry_once = [&]() {
// Retry exactly once using the per-module fallback scratch context.
auto &target = m_process->GetTarget();
if (!target.UseScratchTypesystemPerModule()) {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (log)
log->Printf("Dynamic type resolution detected fatal errors in "
"shared Swift state. Falling back to per-module "
"scratch context.\n");
target.SetUseScratchTypesystemPerModule(true);
return GetDynamicTypeAndAddress(in_value, use_dynamic, class_type_or_name,
address, value_type);
}
return false;
};
if (scratch_ctx->HasFatalErrors())
return retry_once();
// Import the type into the scratch context. Any form of dynamic
// type resolution may trigger a cross-module import.
CompilerType val_type(in_value.GetCompilerType());
Flags type_info(val_type.GetTypeInfo());
if (!type_info.AnySet(eTypeIsSwift))
return false;
bool success = false;
bool is_indirect_enum_case = IsIndirectEnumCase(in_value);
// Type kinds with metadata don't need archetype binding.
if (is_indirect_enum_case)
// ..._IndirectEnumCase() recurses, no need to bind archetypes.
success = GetDynamicTypeAndAddress_IndirectEnumCase(
in_value, use_dynamic, class_type_or_name, address);
else if (type_info.AnySet(eTypeIsClass) ||
type_info.AllSet(eTypeIsBuiltIn | eTypeIsPointer | eTypeHasValue))
success = GetDynamicTypeAndAddress_Class(
in_value, *scratch_ctx, use_dynamic, class_type_or_name, address);
else if (type_info.AnySet(eTypeIsProtocol))
success = GetDynamicTypeAndAddress_Protocol(
in_value, *scratch_ctx, use_dynamic, class_type_or_name, address);
else if (type_info.AnySet(eTypeIsGenericTypeParam))
// ..._GenericTypeParam performs the archetype binding *and* sets address.
success = GetDynamicTypeAndAddress_GenericTypeParam(
in_value, *scratch_ctx, use_dynamic, class_type_or_name, address);
else if (type_info.AnySet(eTypeIsTuple))
// GetDynamicTypeAndAddress_Tuple recursively iterates over all children.
success = GetDynamicTypeAndAddress_Tuple(
in_value, *scratch_ctx, use_dynamic, class_type_or_name, address);
else {
// Perform archetype binding in the scratch context.
auto *frame = in_value.GetExecutionContextRef().GetFrameSP().get();
if (!frame)
return false;
CompilerType bound_type = DoArchetypeBindingForType(*frame, val_type);
if (!bound_type)
return false;
else if (type_info.AnySet(eTypeIsEnumeration))
success = GetDynamicTypeAndAddress_Enum(in_value, bound_type, use_dynamic,
class_type_or_name, address);
else if (type_info.AnySet(eTypeIsStructUnion))
success = GetDynamicTypeAndAddress_Struct(
in_value, bound_type, use_dynamic, class_type_or_name, address);
}
if (success)
value_type = GetValueType(
in_value.GetValue().GetValueType(), in_value.GetCompilerType(),
class_type_or_name.GetCompilerType(), is_indirect_enum_case);
else if (scratch_ctx->HasFatalErrors())
return retry_once();
return success;
}
TypeAndOrName
SwiftLanguageRuntime::FixUpDynamicType(const TypeAndOrName &type_and_or_name,
ValueObject &static_value) {
TypeAndOrName ret(type_and_or_name);
bool should_be_made_into_ref = false;
bool should_be_made_into_ptr = false;
Flags type_flags(static_value.GetCompilerType().GetTypeInfo());
Flags type_andor_name_flags(type_and_or_name.GetCompilerType().GetTypeInfo());
// if the static type is a pointer or reference, so should the dynamic type
// caveat: if the static type is a Swift class instance, the dynamic type
// could either be a Swift type (no need to change anything), or an ObjC type
// in which case it needs to be made into a pointer
if (type_flags.AnySet(eTypeIsPointer))
should_be_made_into_ptr =
(type_flags.AllClear(eTypeIsGenericTypeParam | eTypeIsBuiltIn) &&
!IsIndirectEnumCase(static_value));
else if (type_flags.AnySet(eTypeInstanceIsPointer))
should_be_made_into_ptr = !type_andor_name_flags.AllSet(eTypeIsSwift);
else if (type_flags.AnySet(eTypeIsReference))
should_be_made_into_ref = true;
else if (type_flags.AllSet(eTypeIsSwift | eTypeIsProtocol))
should_be_made_into_ptr =
type_and_or_name.GetCompilerType().IsRuntimeGeneratedType() &&
!type_and_or_name.GetCompilerType().IsPointerType();
if (type_and_or_name.HasType()) {
// The type will always be the type of the dynamic object. If our parent's
// type was a pointer,
// then our type should be a pointer to the type of the dynamic object. If
// a reference, then the original type
// should be okay...
CompilerType orig_type = type_and_or_name.GetCompilerType();
CompilerType corrected_type = orig_type;
if (should_be_made_into_ptr)
corrected_type = orig_type.GetPointerType();
else if (should_be_made_into_ref)
corrected_type = orig_type.GetLValueReferenceType();
ret.SetCompilerType(corrected_type);
}
return ret;
}
bool SwiftLanguageRuntime::FixupReference(lldb::addr_t &addr,
CompilerType type) {
swift::CanType swift_can_type = GetCanonicalSwiftType(type);
switch (swift_can_type->getKind()) {
case swift::TypeKind::UnownedStorage: {
Target &target = m_process->GetTarget();
llvm::Triple triple = target.GetArchitecture().GetTriple();
// On Darwin the Swift runtime stores unowned references to
// Objective-C objects as a pointer to a struct that has the
// actual object pointer at offset zero. The least significant bit
// of the reference pointer indicates whether the reference refers
// to an Objective-C or Swift object.
//
// This is a property of the Swift runtime(!). In the future it
// may be necessary to check for the version of the Swift runtime
// (or indirectly by looking at the version of the remote
// operating system) to determine how to interpret references.
if (triple.isOSDarwin()) {
// Check whether this is a reference to an Objective-C object.
if ((addr & 1) == 0) {
// This is a Swift object, no further processing necessary.
return true;
}
Status error;
if (!m_process)
return false;
// Clear the discriminator bit to get at the pointer to the struct.
addr &= ~1ULL;
size_t ptr_size = m_process->GetAddressByteSize();
// Read the pointer to the Objective-C object.
target.ReadMemory(addr & ~1ULL, false, &addr, ptr_size, error);
}
}
default:
// Adjust the pointer to strip away the spare bits.
Target &target = m_process->GetTarget();
llvm::Triple triple = target.GetArchitecture().GetTriple();
switch (triple.getArch()) {
case llvm::Triple::ArchType::aarch64:
addr &= ~SWIFT_ABI_ARM64_SWIFT_SPARE_BITS_MASK;
break;
case llvm::Triple::ArchType::arm:
addr &= ~SWIFT_ABI_ARM_SWIFT_SPARE_BITS_MASK;
break;
case llvm::Triple::ArchType::x86:
addr &= ~SWIFT_ABI_I386_SWIFT_SPARE_BITS_MASK;
break;
case llvm::Triple::ArchType::x86_64:
addr &= ~SWIFT_ABI_X86_64_SWIFT_SPARE_BITS_MASK;
break;
default:
llvm_unreachable("unsupported arch");
break;
}
break;
}
return true;
}
bool SwiftLanguageRuntime::IsRuntimeSupportValue(ValueObject &valobj) {
llvm::StringRef g_dollar_tau(u8"$\u03C4_");
auto valobj_name = valobj.GetName().GetStringRef();
if (valobj_name.startswith(g_dollar_tau))
return true;
auto valobj_type_name = valobj.GetTypeName().GetStringRef();
if (valobj_name.startswith("globalinit_") &&
valobj_type_name == "Builtin.Word")
return true;
if (valobj_name == "_argc" || valobj_name == "_unsafeArgv" ||
valobj_name == "$error" || valobj_name == "$tmpClosure")
return true;
return false;
}
bool SwiftLanguageRuntime::CouldHaveDynamicValue(ValueObject &in_value) {
// if (in_value.IsDynamic())
// return false;
if (IsIndirectEnumCase(in_value))
return true;
CompilerType var_type(in_value.GetCompilerType());
Flags var_type_flags(var_type.GetTypeInfo());
if (var_type_flags.AllSet(eTypeIsSwift | eTypeInstanceIsPointer)) {
// Swift class instances are actually pointers, but base class instances
// are inlined at offset 0 in the class data. If we just let base classes
// be dynamic, it would cause an infinite recursion. So we would usually
// disable it
// But if the base class is a generic type we still need to bind it, and
// that is
// a good job for dynamic types to perform
if (in_value.IsBaseClass()) {
CompilerType base_type(in_value.GetCompilerType());
if (SwiftASTContext::IsFullyRealized(base_type))
return false;
}
return true;
}
return var_type.IsPossibleDynamicType(nullptr, false, false, true);
}
CompilerType
SwiftLanguageRuntime::GetConcreteType(ExecutionContextScope *exe_scope,
ConstString abstract_type_name) {
if (!exe_scope)
return CompilerType();
StackFrame *frame(exe_scope->CalculateStackFrame().get());
if (!frame)
return CompilerType();
MetadataPromiseSP promise_sp(
GetPromiseForTypeNameAndFrame(abstract_type_name.GetCString(), frame));
if (!promise_sp)
return CompilerType();
return promise_sp->FulfillTypePromise();
}
namespace {
enum class ThunkKind
{
Unknown = 0,
AllocatingInit,
PartialApply,
ObjCAttribute,
Reabstraction,
ProtocolConformance,
};
enum class ThunkAction
{
Unknown = 0,
GetThunkTarget,
StepIntoConformance,
StepThrough
};
}
static ThunkKind
GetThunkKind(llvm::StringRef symbol_name)
{
swift::Demangle::Node::Kind kind;
swift::Demangle::Context demangle_ctx;
if (!demangle_ctx.isThunkSymbol(symbol_name))
return ThunkKind::Unknown;
swift::Demangle::NodePointer nodes = demangle_ctx.demangleSymbolAsNode(symbol_name);
size_t num_global_children = nodes->getNumChildren();
if (num_global_children == 0)
return ThunkKind::Unknown;
if (nodes->getKind() != swift::Demangle::Node::Kind::Global)
return ThunkKind::Unknown;
if (nodes->getNumChildren() == 0)
return ThunkKind::Unknown;
swift::Demangle::NodePointer node_ptr = nodes->getFirstChild();
kind = node_ptr->getKind();
switch (kind)
{
case swift::Demangle::Node::Kind::ObjCAttribute:
return ThunkKind::ObjCAttribute;
break;
case swift::Demangle::Node::Kind::ProtocolWitness:
if (node_ptr->getNumChildren() == 0)
return ThunkKind::Unknown;
if (node_ptr->getFirstChild()->getKind()
== swift::Demangle::Node::Kind::ProtocolConformance)
return ThunkKind::ProtocolConformance;
break;
case swift::Demangle::Node::Kind::ReabstractionThunkHelper:
return ThunkKind::Reabstraction;
case swift::Demangle::Node::Kind::PartialApplyForwarder:
return ThunkKind::PartialApply;
case swift::Demangle::Node::Kind::Allocator:
if (node_ptr->getNumChildren() == 0)
return ThunkKind::Unknown;
if (node_ptr->getFirstChild()->getKind()
== swift::Demangle::Node::Kind::Class)
return ThunkKind::AllocatingInit;
break;
default:
break;
}
return ThunkKind::Unknown;
}
static const char *GetThunkKindName (ThunkKind kind)
{
switch (kind)
{
case ThunkKind::Unknown:
return "Unknown";
case ThunkKind::AllocatingInit:
return "StepThrough";
case ThunkKind::PartialApply:
return "GetThunkTarget";
case ThunkKind::ObjCAttribute:
return "GetThunkTarget";
case ThunkKind::Reabstraction:
return "GetThunkTarget";
case ThunkKind::ProtocolConformance:
return "StepIntoConformance";
}
}
static ThunkAction
GetThunkAction (ThunkKind kind)
{
switch (kind)
{
case ThunkKind::Unknown:
return ThunkAction::Unknown;
case ThunkKind::AllocatingInit:
return ThunkAction::StepThrough;
case ThunkKind::PartialApply:
return ThunkAction::GetThunkTarget;
case ThunkKind::ObjCAttribute:
return ThunkAction::GetThunkTarget;
case ThunkKind::Reabstraction:
return ThunkAction::StepThrough;
case ThunkKind::ProtocolConformance:
return ThunkAction::StepIntoConformance;
}
}
bool SwiftLanguageRuntime::GetTargetOfPartialApply(SymbolContext &curr_sc,
ConstString &apply_name,
SymbolContext &sc) {
if (!curr_sc.module_sp)
return false;
SymbolContextList sc_list;
swift::Demangle::Context demangle_ctx;
// Make sure this is a partial apply:
std::string apply_target = demangle_ctx.getThunkTarget(apply_name.GetStringRef());
if (!apply_target.empty()) {
size_t num_symbols = curr_sc.module_sp->FindFunctions(
ConstString(apply_target), NULL, eFunctionNameTypeFull, true, false, false, sc_list);
if (num_symbols == 0)
return false;
CompileUnit *curr_cu = curr_sc.comp_unit;
size_t num_found = 0;
for (size_t i = 0; i < num_symbols; i++) {
SymbolContext tmp_sc;
if (sc_list.GetContextAtIndex(i, tmp_sc)) {
if (tmp_sc.comp_unit && curr_cu && tmp_sc.comp_unit == curr_cu) {
sc = tmp_sc;
num_found++;
} else if (curr_sc.module_sp == tmp_sc.module_sp) {
sc = tmp_sc;
num_found++;
}
}
}
if (num_found == 1)
return true;
else {
sc.Clear(false);
return false;
}
} else {
return false;
}
}
bool SwiftLanguageRuntime::IsSymbolARuntimeThunk(const Symbol &symbol) {
llvm::StringRef symbol_name = symbol.GetMangled().GetMangledName().GetStringRef();
if (symbol_name.empty())
return false;
swift::Demangle::Context demangle_ctx;
return demangle_ctx.isThunkSymbol(symbol_name);
}
lldb::ThreadPlanSP
SwiftLanguageRuntime::GetStepThroughTrampolinePlan(Thread &thread,
bool stop_others) {
// Here are the trampolines we have at present.
// 1) The thunks from protocol invocations to the call in the actual object
// implementing the protocol.
// 2) Thunks for going from Swift ObjC classes to their actual method
// invocations
// 3) Thunks that retain captured objects in closure invocations.
ThreadPlanSP new_thread_plan_sp;
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
StackFrameSP stack_sp = thread.GetStackFrameAtIndex(0);
if (!stack_sp)
return new_thread_plan_sp;
SymbolContext sc = stack_sp->GetSymbolContext(eSymbolContextEverything);
Symbol *symbol = sc.symbol;
// Note, I don't really need to consult IsSymbolARuntimeThunk here, but it
// is fast to do and
// keeps this list and the one in IsSymbolARuntimeThunk in sync.
if (!symbol || !IsSymbolARuntimeThunk(*symbol))
return new_thread_plan_sp;
// Only do this if you are at the beginning of the thunk function:
lldb::addr_t cur_addr = thread.GetRegisterContext()->GetPC();
lldb::addr_t symbol_addr = symbol->GetAddress().GetLoadAddress(
&thread.GetProcess()->GetTarget());
if (symbol_addr != cur_addr)
return new_thread_plan_sp;
Address target_address;
ConstString symbol_mangled_name = symbol->GetMangled().GetMangledName();
const char *symbol_name = symbol_mangled_name.AsCString();
ThunkKind thunk_kind = GetThunkKind(symbol_mangled_name.GetStringRef());
ThunkAction thunk_action = GetThunkAction(thunk_kind);
switch (thunk_action)
{
case ThunkAction::Unknown:
return new_thread_plan_sp;
case ThunkAction::GetThunkTarget:
{
swift::Demangle::Context demangle_ctx;
std::string thunk_target = demangle_ctx.getThunkTarget(symbol_name);
if (thunk_target.empty())
{
if (log)
log->Printf("Stepped to thunk \"%s\" (kind: %s) but could not "
"find the thunk target. ",
symbol_name,
GetThunkKindName(thunk_kind));
return new_thread_plan_sp;
}
if (log)
log->Printf("Stepped to thunk \"%s\" (kind: %s) stepping to target: \"%s\".",
symbol_name, GetThunkKindName(thunk_kind), thunk_target.c_str());
ModuleList modules = thread.GetProcess()->GetTarget().GetImages();
SymbolContextList sc_list;
modules.FindFunctionSymbols(ConstString(thunk_target),
eFunctionNameTypeFull, sc_list);
if (sc_list.GetSize() == 1) {
SymbolContext sc;
sc_list.GetContextAtIndex(0, sc);
if (sc.symbol)
target_address = sc.symbol->GetAddress();
}
}
break;
case ThunkAction::StepIntoConformance:
{
// The TTW symbols encode the protocol conformance requirements and it
// is possible to go to
// the AST and get it to replay the logic that it used to determine
// what to dispatch to.
// But that ties us too closely to the logic of the compiler, and
// these thunks are quite
// simple, they just do a little retaining, and then call the correct
// function.
// So for simplicity's sake, I'm just going to get the base name of
// the function
// this protocol thunk is preparing to call, then step into through
// the thunk, stopping if I end up
// in a frame with that function name.
swift::Demangle::Context demangle_ctx;
swift::Demangle::NodePointer demangled_nodes =
demangle_ctx.demangleSymbolAsNode(symbol_mangled_name.GetStringRef());
// Now find the ProtocolWitness node in the demangled result.
swift::Demangle::NodePointer witness_node = demangled_nodes;
bool found_witness_node = false;
while (witness_node) {
if (witness_node->getKind() ==
swift::Demangle::Node::Kind::ProtocolWitness) {
found_witness_node = true;
break;
}
witness_node = witness_node->getFirstChild();
}
if (!found_witness_node) {
if (log)
log->Printf("Stepped into witness thunk \"%s\" but could not "
"find the ProtocolWitness node in the demangled "
"nodes.",
symbol_name);
return new_thread_plan_sp;
}
size_t num_children = witness_node->getNumChildren();
if (num_children < 2) {
if (log)
log->Printf("Stepped into witness thunk \"%s\" but the "
"ProtocolWitness node doesn't have enough nodes.",
symbol_name);
return new_thread_plan_sp;
}
swift::Demangle::NodePointer function_node =
witness_node->getChild(1);
if (function_node == nullptr ||
function_node->getKind() !=
swift::Demangle::Node::Kind::Function) {
if (log)
log->Printf("Stepped into witness thunk \"%s\" but could not "
"find the function in the ProtocolWitness node.",
symbol_name);
return new_thread_plan_sp;
}
// Okay, now find the name of this function.
num_children = function_node->getNumChildren();
swift::Demangle::NodePointer name_node(nullptr);
for (size_t i = 0; i < num_children; i++) {
if (function_node->getChild(i)->getKind() ==
swift::Demangle::Node::Kind::Identifier) {
name_node = function_node->getChild(i);
break;
}
}
if (!name_node) {
if (log)
log->Printf("Stepped into witness thunk \"%s\" but could not "
"find the Function name in the function node.",
symbol_name);
return new_thread_plan_sp;
}
std::string function_name(name_node->getText());
if (function_name.empty()) {
if (log)
log->Printf("Stepped into witness thunk \"%s\" but the Function "
"name was empty.",
symbol_name);
return new_thread_plan_sp;
}
// We have to get the address range of the thunk symbol, and make a
// "step through range stepping in"
AddressRange sym_addr_range(sc.symbol->GetAddress(),
sc.symbol->GetByteSize());
new_thread_plan_sp.reset(new ThreadPlanStepInRange(
thread, sym_addr_range, sc, function_name.c_str(),
eOnlyDuringStepping, eLazyBoolNo, eLazyBoolNo));
return new_thread_plan_sp;
}
break;
case ThunkAction::StepThrough:
{
if (log)
log->Printf("Stepping through thunk: %s kind: %s",
symbol_name, GetThunkKindName(thunk_kind));
AddressRange sym_addr_range(sc.symbol->GetAddress(),
sc.symbol->GetByteSize());
new_thread_plan_sp.reset(new ThreadPlanStepInRange(
thread, sym_addr_range, sc, nullptr, eOnlyDuringStepping,
eLazyBoolNo, eLazyBoolNo));
return new_thread_plan_sp;
}
break;
}
if (target_address.IsValid()) {
new_thread_plan_sp.reset(
new ThreadPlanRunToAddress(thread, target_address, stop_others));
}
return new_thread_plan_sp;
}
void SwiftLanguageRuntime::FindFunctionPointersInCall(
StackFrame &frame, std::vector<Address> &addresses, bool debug_only,
bool resolve_thunks) {
// Extract the mangled name from the stack frame, and realize the function
// type in the Target's SwiftASTContext.
// Then walk the arguments looking for function pointers. If we find one in
// the FIRST argument, we can fetch
// the pointer value and return that.
// FIXME: when we can ask swift/llvm for the location of function arguments,
// then we can do this for all the
// function pointer arguments we find.
SymbolContext sc = frame.GetSymbolContext(eSymbolContextSymbol);
if (sc.symbol) {
Mangled mangled_name = sc.symbol->GetMangled();
if (mangled_name.GuessLanguage() == lldb::eLanguageTypeSwift) {
Status error;
Target &target = frame.GetThread()->GetProcess()->GetTarget();
ExecutionContext exe_ctx(frame);
auto swift_ast = target.GetScratchSwiftASTContext(error, frame);
if (swift_ast) {
CompilerType function_type = swift_ast->GetTypeFromMangledTypename(
mangled_name.GetMangledName().AsCString(), error);
if (error.Success()) {
if (function_type.IsFunctionType()) {
// FIXME: For now we only check the first argument since we don't
// know how to find the values
// of arguments further in the argument list.
// int num_arguments = function_type.GetFunctionArgumentCount();
// for (int i = 0; i < num_arguments; i++)
for (int i = 0; i < 1; i++) {
CompilerType argument_type =
function_type.GetFunctionArgumentTypeAtIndex(i);
if (argument_type.IsFunctionPointerType()) {
// We found a function pointer argument. Try to track down its
// value. This is a hack
// for now, we really should ask swift/llvm how to find the
// argument(s) given the
// Swift decl for this function, and then look those up in the
// frame.
ABISP abi_sp(frame.GetThread()->GetProcess()->GetABI());
ValueList argument_values;
Value input_value;
CompilerType clang_void_ptr_type =
target.GetScratchClangASTContext()
->GetBasicType(eBasicTypeVoid)
.GetPointerType();
input_value.SetValueType(Value::eValueTypeScalar);
input_value.SetCompilerType(clang_void_ptr_type);
argument_values.PushValue(input_value);
bool success = abi_sp->GetArgumentValues(
*(frame.GetThread().get()), argument_values);
if (success) {
// Now get a pointer value from the zeroth argument.
Status error;
DataExtractor data;
ExecutionContext exe_ctx;
frame.CalculateExecutionContext(exe_ctx);
error = argument_values.GetValueAtIndex(0)->GetValueAsData(
&exe_ctx, data, 0, NULL);
lldb::offset_t offset = 0;
lldb::addr_t fn_ptr_addr = data.GetPointer(&offset);
Address fn_ptr_address;
fn_ptr_address.SetLoadAddress(fn_ptr_addr, &target);
// Now check to see if this has debug info:
bool add_it = true;
if (resolve_thunks) {
SymbolContext sc;
fn_ptr_address.CalculateSymbolContext(
&sc, eSymbolContextEverything);
if (sc.comp_unit && sc.symbol) {
ConstString symbol_name =
sc.symbol->GetMangled().GetMangledName();
if (symbol_name) {
SymbolContext target_context;
if (GetTargetOfPartialApply(sc, symbol_name,
target_context)) {
if (target_context.symbol)
fn_ptr_address =
target_context.symbol->GetAddress();
else if (target_context.function)
fn_ptr_address =
target_context.function->GetAddressRange()
.GetBaseAddress();
}
}
}
}
if (debug_only) {
LineEntry line_entry;
fn_ptr_address.CalculateSymbolContextLineEntry(line_entry);
if (!line_entry.IsValid())
add_it = false;
}
if (add_it)
addresses.push_back(fn_ptr_address);
}
}
}
}
}
}
}
}
}
//------------------------------------------------------------------
// Exception breakpoint Precondition class for Swift:
//------------------------------------------------------------------
void SwiftLanguageRuntime::SwiftExceptionPrecondition::AddTypeName(
const char *class_name) {
m_type_names.insert(class_name);
}
void SwiftLanguageRuntime::SwiftExceptionPrecondition::AddEnumSpec(
const char *enum_name, const char *element_name) {
std::unordered_map<std::string, std::vector<std::string>>::value_type
new_value(enum_name, std::vector<std::string>());
auto result = m_enum_spec.emplace(new_value);
result.first->second.push_back(element_name);
}
SwiftLanguageRuntime::SwiftExceptionPrecondition::SwiftExceptionPrecondition() {
}
ValueObjectSP
SwiftLanguageRuntime::CalculateErrorValueObjectFromValue(
Value &value, ConstString name, bool persistent)
{
ValueObjectSP error_valobj_sp;
Status error;
SwiftASTContext *ast_context = llvm::dyn_cast_or_null<SwiftASTContext>(
m_process->GetTarget().GetScratchTypeSystemForLanguage(
&error, eLanguageTypeSwift));
if (!ast_context || error.Fail())
return error_valobj_sp;
CompilerType swift_error_proto_type = ast_context->GetErrorType();
value.SetCompilerType(swift_error_proto_type);
error_valobj_sp = ValueObjectConstResult::Create(
m_process, value, name);
if (error_valobj_sp && error_valobj_sp->GetError().Success()) {
error_valobj_sp = error_valobj_sp->GetQualifiedRepresentationIfAvailable(
lldb::eDynamicCanRunTarget, true);
if (!IsValidErrorValue(*(error_valobj_sp.get()))) {
error_valobj_sp.reset();
}
}
if (persistent && error_valobj_sp) {
ExecutionContext ctx =
error_valobj_sp->GetExecutionContextRef().Lock(false);
auto *exe_scope = ctx.GetBestExecutionContextScope();
if (!exe_scope)
return error_valobj_sp;
Target &target = m_process->GetTarget();
auto *persistent_state =
target.GetSwiftPersistentExpressionState(*exe_scope);
const bool is_error = true;
auto prefix = persistent_state->GetPersistentVariablePrefix(is_error);
ConstString persistent_variable_name(
persistent_state->GetNextPersistentVariableName(target, prefix));
lldb::ValueObjectSP const_valobj_sp;
// Check in case our value is already a constant value
if (error_valobj_sp->GetIsConstant()) {
const_valobj_sp = error_valobj_sp;
const_valobj_sp->SetName(persistent_variable_name);
} else
const_valobj_sp =
error_valobj_sp->CreateConstantValue(persistent_variable_name);
lldb::ValueObjectSP live_valobj_sp = error_valobj_sp;
error_valobj_sp = const_valobj_sp;
ExpressionVariableSP clang_expr_variable_sp(
persistent_state->CreatePersistentVariable(error_valobj_sp));
clang_expr_variable_sp->m_live_sp = live_valobj_sp;
clang_expr_variable_sp->m_flags |=
ClangExpressionVariable::EVIsProgramReference;
error_valobj_sp = clang_expr_variable_sp->GetValueObject();
}
return error_valobj_sp;
}
ValueObjectSP
SwiftLanguageRuntime::CalculateErrorValue(StackFrameSP frame_sp,
ConstString variable_name) {
ProcessSP process_sp(frame_sp->GetThread()->GetProcess());
ABISP abi_sp(process_sp->GetABI());
ValueList argument_values;
Value input_value;
Status error;
Target *target = frame_sp->CalculateTarget().get();
ValueObjectSP error_valobj_sp;
ClangASTContext *clang_ast_context = target->GetScratchClangASTContext();
CompilerType clang_void_ptr_type =
clang_ast_context->GetBasicType(eBasicTypeVoid).GetPointerType();
input_value.SetValueType(Value::eValueTypeScalar);
input_value.SetCompilerType(clang_void_ptr_type);
argument_values.PushValue(input_value);
auto *runtime = process_sp->GetSwiftLanguageRuntime();
if (!runtime)
return error_valobj_sp;
llvm::Optional<Value> arg0 =
runtime->GetErrorReturnLocationAfterReturn(frame_sp);
if (!arg0)
return error_valobj_sp;
ExecutionContext exe_ctx;
frame_sp->CalculateExecutionContext(exe_ctx);
auto *exe_scope = exe_ctx.GetBestExecutionContextScope();
if (!exe_scope)
return error_valobj_sp;
auto ast_context = target->GetScratchSwiftASTContext(error, *frame_sp);
if (!ast_context || error.Fail())
return error_valobj_sp;
CompilerType swift_error_proto_type = ast_context->GetErrorType();
if (!swift_error_proto_type.IsValid())
return error_valobj_sp;
arg0->SetCompilerType(swift_error_proto_type);
error_valobj_sp =
ValueObjectConstResult::Create(exe_scope, *arg0, variable_name);
if (error_valobj_sp->GetError().Fail())
return error_valobj_sp;
error_valobj_sp = error_valobj_sp->GetQualifiedRepresentationIfAvailable(
lldb::eDynamicCanRunTarget, true);
return error_valobj_sp;
}
void SwiftLanguageRuntime::RegisterGlobalError(Target &target, ConstString name,
lldb::addr_t addr) {
Status ast_context_error;
SwiftASTContext *ast_context = llvm::dyn_cast_or_null<SwiftASTContext>(
target.GetScratchTypeSystemForLanguage(&ast_context_error,
eLanguageTypeSwift));
if (ast_context_error.Success() && ast_context &&
!ast_context->HasFatalErrors()) {
SwiftPersistentExpressionState *persistent_state =
llvm::cast<SwiftPersistentExpressionState>(
target.GetPersistentExpressionStateForLanguage(
lldb::eLanguageTypeSwift));
std::string module_name = "$__lldb_module_for_";
module_name.append(&name.GetCString()[1]);
Status module_creation_error;
swift::ModuleDecl *module_decl = ast_context->CreateModule(
ConstString(module_name), module_creation_error);
if (module_creation_error.Success() && module_decl) {
const bool is_static = false;
const auto specifier = swift::VarDecl::Specifier::Let;
const bool is_capture_list = false;
swift::VarDecl *var_decl = new (*ast_context->GetASTContext())
swift::VarDecl(is_static, specifier, is_capture_list, swift::SourceLoc(),
ast_context->GetIdentifier(name.GetCString()),
module_decl);
var_decl->setType(GetSwiftType(ast_context->GetErrorType()));
var_decl->setInterfaceType(var_decl->getType());
var_decl->setDebuggerVar(true);
persistent_state->RegisterSwiftPersistentDecl(var_decl);
ConstString mangled_name;
{
swift::Mangle::ASTMangler mangler(true);
mangled_name = ConstString(mangler.mangleGlobalVariableFull(var_decl));
}
lldb::addr_t symbol_addr;
{
ProcessSP process_sp(target.GetProcessSP());
Status alloc_error;
symbol_addr = process_sp->AllocateMemory(
process_sp->GetAddressByteSize(),
lldb::ePermissionsWritable | lldb::ePermissionsReadable,
alloc_error);
if (alloc_error.Success() && symbol_addr != LLDB_INVALID_ADDRESS) {
Status write_error;
process_sp->WritePointerToMemory(symbol_addr, addr, write_error);
if (write_error.Success()) {
persistent_state->RegisterSymbol(mangled_name, symbol_addr);
}
}
}
}
}
}
bool SwiftLanguageRuntime::SwiftExceptionPrecondition::EvaluatePrecondition(
StoppointCallbackContext &context) {
if (!m_type_names.empty()) {
StackFrameSP frame_sp = context.exe_ctx_ref.GetFrameSP();
if (!frame_sp)
return true;
ValueObjectSP error_valobj_sp =
CalculateErrorValue(frame_sp, ConstString("__swift_error_var"));
if (!error_valobj_sp || error_valobj_sp->GetError().Fail())
return true;
// This shouldn't fail, since at worst it will return me the object I just
// successfully got.
std::string full_error_name(
error_valobj_sp->GetCompilerType().GetTypeName().AsCString());
size_t last_dot_pos = full_error_name.rfind('.');
std::string type_name_base;
if (last_dot_pos == std::string::npos)
type_name_base = full_error_name;
else {
if (last_dot_pos + 1 <= full_error_name.size())
type_name_base =
full_error_name.substr(last_dot_pos + 1, full_error_name.size());
}
// The type name will be the module and then the type. If the match name
// has a dot, we require a complete
// match against the type, if the type name has no dot, we match it against
// the base.
for (std::string name : m_type_names) {
if (name.rfind('.') != std::string::npos) {
if (name == full_error_name)
return true;
} else {
if (name == type_name_base)
return true;
}
}
return false;
}
return true;
}
void SwiftLanguageRuntime::SwiftExceptionPrecondition::GetDescription(
Stream &stream, lldb::DescriptionLevel level) {
if (level == eDescriptionLevelFull || level == eDescriptionLevelVerbose) {
if (m_type_names.size() > 0) {
stream.Printf("\nType Filters:");
for (std::string name : m_type_names) {
stream.Printf(" %s", name.c_str());
}
stream.Printf("\n");
}
}
}
Status SwiftLanguageRuntime::SwiftExceptionPrecondition::ConfigurePrecondition(
Args &args) {
Status error;
std::vector<std::string> object_typenames;
args.GetOptionValuesAsStrings("exception-typename", object_typenames);
for (auto type_name : object_typenames)
AddTypeName(type_name.c_str());
return error;
}
void SwiftLanguageRuntime::AddToLibraryNegativeCache(const char *library_name) {
std::lock_guard<std::mutex> locker(m_negative_cache_mutex);
m_library_negative_cache.insert(library_name);
}
bool SwiftLanguageRuntime::IsInLibraryNegativeCache(const char *library_name) {
std::lock_guard<std::mutex> locker(m_negative_cache_mutex);
return m_library_negative_cache.count(library_name) == 1;
}
lldb::addr_t
SwiftLanguageRuntime::MaskMaybeBridgedPointer(lldb::addr_t addr,
lldb::addr_t *masked_bits) {
if (!m_process)
return addr;
const ArchSpec &arch_spec(m_process->GetTarget().GetArchitecture());
ArchSpec::Core core_kind = arch_spec.GetCore();
bool is_arm = false;
bool is_intel = false;
bool is_32 = false;
bool is_64 = false;
if (core_kind == ArchSpec::Core::eCore_arm_arm64) {
is_arm = is_64 = true;
} else if (core_kind >= ArchSpec::Core::kCore_arm_first &&
core_kind <= ArchSpec::Core::kCore_arm_last) {
is_arm = true;
} else if (core_kind >= ArchSpec::Core::kCore_x86_64_first &&
core_kind <= ArchSpec::Core::kCore_x86_64_last) {
is_intel = true;
} else if (core_kind >= ArchSpec::Core::kCore_x86_32_first &&
core_kind <= ArchSpec::Core::kCore_x86_32_last) {
is_intel = true;
} else {
// this is a really random CPU core to be running on - just get out fast
return addr;
}
switch (arch_spec.GetAddressByteSize()) {
case 4:
is_32 = true;
break;
case 8:
is_64 = true;
break;
default:
// this is a really random pointer size to be running on - just get out fast
return addr;
}
lldb::addr_t mask = 0;
if (is_arm && is_64)
mask = SWIFT_ABI_ARM64_SWIFT_SPARE_BITS_MASK;
if (is_arm && is_32)
mask = SWIFT_ABI_ARM_SWIFT_SPARE_BITS_MASK;
if (is_intel && is_64)
mask = SWIFT_ABI_X86_64_SWIFT_SPARE_BITS_MASK;
if (is_intel && is_32)
mask = SWIFT_ABI_I386_SWIFT_SPARE_BITS_MASK;
if (masked_bits)
*masked_bits = addr & mask;
return addr & ~mask;
}
lldb::addr_t
SwiftLanguageRuntime::MaybeMaskNonTrivialReferencePointer(
lldb::addr_t addr,
SwiftASTContext::NonTriviallyManagedReferenceStrategy strategy) {
if (addr == 0)
return addr;
AppleObjCRuntime *objc_runtime = GetObjCRuntime();
if (objc_runtime) {
// tagged pointers don't perform any masking
if (objc_runtime->IsTaggedPointer(addr))
return addr;
}
if (!m_process)
return addr;
const ArchSpec &arch_spec(m_process->GetTarget().GetArchitecture());
ArchSpec::Core core_kind = arch_spec.GetCore();
bool is_arm = false;
bool is_intel = false;
bool is_32 = false;
bool is_64 = false;
if (core_kind == ArchSpec::Core::eCore_arm_arm64) {
is_arm = is_64 = true;
} else if (core_kind >= ArchSpec::Core::kCore_arm_first &&
core_kind <= ArchSpec::Core::kCore_arm_last) {
is_arm = true;
} else if (core_kind >= ArchSpec::Core::kCore_x86_64_first &&
core_kind <= ArchSpec::Core::kCore_x86_64_last) {
is_intel = true;
} else if (core_kind >= ArchSpec::Core::kCore_x86_32_first &&
core_kind <= ArchSpec::Core::kCore_x86_32_last) {
is_intel = true;
} else {
// this is a really random CPU core to be running on - just get out fast
return addr;
}
switch (arch_spec.GetAddressByteSize()) {
case 4:
is_32 = true;
break;
case 8:
is_64 = true;
break;
default:
// this is a really random pointer size to be running on - just get out fast
return addr;
}
lldb::addr_t mask = 0;
if (strategy == SwiftASTContext::NonTriviallyManagedReferenceStrategy::eWeak) {
bool is_indirect = true;
// On non-objc platforms, the weak reference pointer always pointed to a
// runtime structure.
// For ObjC platforms, the masked value determines whether it is indirect.
uint32_t value = 0;
if (objc_runtime)
{
if (is_intel) {
if (is_64) {
mask = SWIFT_ABI_X86_64_OBJC_WEAK_REFERENCE_MARKER_MASK;
value = SWIFT_ABI_X86_64_OBJC_WEAK_REFERENCE_MARKER_VALUE;
} else {
mask = SWIFT_ABI_I386_OBJC_WEAK_REFERENCE_MARKER_MASK;
value = SWIFT_ABI_I386_OBJC_WEAK_REFERENCE_MARKER_VALUE;
}
} else if (is_arm) {
if (is_64) {
mask = SWIFT_ABI_ARM64_OBJC_WEAK_REFERENCE_MARKER_MASK;
value = SWIFT_ABI_ARM64_OBJC_WEAK_REFERENCE_MARKER_VALUE;
} else {
mask = SWIFT_ABI_ARM_OBJC_WEAK_REFERENCE_MARKER_MASK;
value = SWIFT_ABI_ARM_OBJC_WEAK_REFERENCE_MARKER_VALUE;
}
}
} else {
// This name is a little confusing. The "DEFAULT" marking in System.h
// is supposed to mean: the value for non-ObjC platforms. So
// DEFAULT_OBJC here actually means "non-ObjC".
mask = SWIFT_ABI_DEFAULT_OBJC_WEAK_REFERENCE_MARKER_MASK;
value = SWIFT_ABI_DEFAULT_OBJC_WEAK_REFERENCE_MARKER_VALUE;
}
is_indirect = ((addr & mask) == value);
if (!is_indirect)
return addr;
// The masked value of address is a pointer to the runtime structure.
// The first field of the structure is the actual pointer.
Process *process = GetProcess();
Status error;
lldb::addr_t masked_addr = addr & ~mask;
lldb::addr_t isa_addr = process->ReadPointerFromMemory(masked_addr, error);
if (error.Fail())
{
// FIXME: do some logging here.
return addr;
}
return isa_addr;
} else {
if (is_arm && is_64)
mask = SWIFT_ABI_ARM64_OBJC_NUM_RESERVED_LOW_BITS;
else if (is_intel && is_64)
mask = SWIFT_ABI_X86_64_OBJC_NUM_RESERVED_LOW_BITS;
else
mask = SWIFT_ABI_DEFAULT_OBJC_NUM_RESERVED_LOW_BITS;
mask = (1 << mask) | (1 << (mask + 1));
return addr & ~mask;
}
return addr;
}
ConstString SwiftLanguageRuntime::GetErrorBackstopName() {
return ConstString("swift_errorInMain");
}
ConstString SwiftLanguageRuntime::GetStandardLibraryBaseName() {
static ConstString g_swiftCore("swiftCore");
return g_swiftCore;
}
ConstString SwiftLanguageRuntime::GetStandardLibraryName() {
PlatformSP platform_sp(m_process->GetTarget().GetPlatform());
if (platform_sp)
return platform_sp->GetFullNameForDylib(GetStandardLibraryBaseName());
return GetStandardLibraryBaseName();
}
class ProjectionSyntheticChildren : public SyntheticChildren {
public:
struct FieldProjection {
ConstString name;
CompilerType type;
int32_t byte_offset;
FieldProjection(CompilerType parent_type, ExecutionContext *exe_ctx,
size_t idx) {
const bool transparent_pointers = false;
const bool omit_empty_base_classes = true;
const bool ignore_array_bounds = false;
bool child_is_base_class = false;
bool child_is_deref_of_parent = false;
std::string child_name;
uint32_t child_byte_size;
uint32_t child_bitfield_bit_size;
uint32_t child_bitfield_bit_offset;
uint64_t language_flags;
type = parent_type.GetChildCompilerTypeAtIndex(
exe_ctx, idx, transparent_pointers, omit_empty_base_classes,
ignore_array_bounds, child_name, child_byte_size, byte_offset,
child_bitfield_bit_size, child_bitfield_bit_offset,
child_is_base_class, child_is_deref_of_parent, nullptr,
language_flags);
if (child_is_base_class)
type.Clear(); // invalidate - base classes are dealt with outside of the
// projection
else
name.SetCStringWithLength(child_name.c_str(), child_name.size());
}
bool IsValid() { return !name.IsEmpty() && type.IsValid(); }
explicit operator bool() { return IsValid(); }
};
struct TypeProjection {
std::vector<FieldProjection> field_projections;
ConstString type_name;
};
typedef std::unique_ptr<TypeProjection> TypeProjectionUP;
bool IsScripted() { return false; }
std::string GetDescription() { return "projection synthetic children"; }
ProjectionSyntheticChildren(const Flags &flags, TypeProjectionUP &&projection)
: SyntheticChildren(flags), m_projection(std::move(projection)) {}
protected:
TypeProjectionUP m_projection;
class ProjectionFrontEndProvider : public SyntheticChildrenFrontEnd {
public:
ProjectionFrontEndProvider(ValueObject &backend,
TypeProjectionUP &projection)
: SyntheticChildrenFrontEnd(backend), m_num_bases(0),
m_projection(projection.get()) {
lldbassert(m_projection && "need a valid projection");
CompilerType type(backend.GetCompilerType());
m_num_bases = type.GetNumDirectBaseClasses();
}
size_t CalculateNumChildren() override {
return m_projection->field_projections.size() + m_num_bases;
}
lldb::ValueObjectSP GetChildAtIndex(size_t idx) override {
if (idx < m_num_bases) {
if (ValueObjectSP base_object_sp =
m_backend.GetChildAtIndex(idx, true)) {
CompilerType base_type(base_object_sp->GetCompilerType());
ConstString base_type_name(base_type.GetTypeName());
if (base_type_name.IsEmpty() ||
!SwiftLanguageRuntime::IsSwiftClassName(base_type_name.GetCString()))
return base_object_sp;
base_object_sp = m_backend.GetSyntheticBase(
0, base_type, true,
Mangled(base_type_name, true)
.GetDemangledName(lldb::eLanguageTypeSwift));
return base_object_sp;
} else
return nullptr;
}
idx -= m_num_bases;
if (idx < m_projection->field_projections.size()) {
auto &projection(m_projection->field_projections.at(idx));
return m_backend.GetSyntheticChildAtOffset(
projection.byte_offset, projection.type, true, projection.name);
}
return nullptr;
}
size_t GetIndexOfChildWithName(const ConstString &name) override {
for (size_t idx = 0; idx < m_projection->field_projections.size();
idx++) {
if (m_projection->field_projections.at(idx).name == name)
return idx;
}
return UINT32_MAX;
}
bool Update() override { return false; }
bool MightHaveChildren() override { return true; }
ConstString GetSyntheticTypeName() override {
return m_projection->type_name;
}
private:
size_t m_num_bases;
TypeProjectionUP::element_type *m_projection;
};
public:
SyntheticChildrenFrontEnd::AutoPointer GetFrontEnd(ValueObject &backend) {
return SyntheticChildrenFrontEnd::AutoPointer(
new ProjectionFrontEndProvider(backend, m_projection));
}
};
lldb::SyntheticChildrenSP
SwiftLanguageRuntime::GetBridgedSyntheticChildProvider(ValueObject &valobj) {
const char *type_name(valobj.GetCompilerType().GetTypeName().AsCString());
if (type_name && *type_name) {
auto iter = m_bridged_synthetics_map.find(type_name),
end = m_bridged_synthetics_map.end();
if (iter != end)
return iter->second;
}
ProjectionSyntheticChildren::TypeProjectionUP type_projection(
new ProjectionSyntheticChildren::TypeProjectionUP::element_type());
if (auto swift_ast_ctx = valobj.GetScratchSwiftASTContext()) {
Status error;
CompilerType swift_type =
swift_ast_ctx->GetTypeFromMangledTypename(type_name, error);
if (swift_type.IsValid()) {
ExecutionContext exe_ctx(GetProcess());
bool any_projected = false;
for (size_t idx = 0, e = swift_type.GetNumChildren(true, &exe_ctx);
idx < e; idx++) {
// if a projection fails, keep going - we have offsets here, so it
// should be OK to skip some members
if (auto projection = ProjectionSyntheticChildren::FieldProjection(
swift_type, &exe_ctx, idx)) {
any_projected = true;
type_projection->field_projections.push_back(projection);
}
}
if (any_projected) {
type_projection->type_name = swift_type.GetDisplayTypeName();
SyntheticChildrenSP synth_sp =
SyntheticChildrenSP(new ProjectionSyntheticChildren(
SyntheticChildren::Flags(), std::move(type_projection)));
return (m_bridged_synthetics_map[type_name] = synth_sp);
}
}
}
return nullptr;
}
void SwiftLanguageRuntime::WillStartExecutingUserExpression() {
std::lock_guard<std::mutex> lock(m_active_user_expr_mutex);
Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_EXPRESSIONS));
if (m_active_user_expr_count == 0 &&
m_dynamic_exclusivity_flag_addr) {
// We're executing the first user expression. Toggle the flag.
Status error;
TypeSystem *type_system =
m_process->GetTarget().GetScratchTypeSystemForLanguage(
&error,
eLanguageTypeC_plus_plus);
if (error.Fail()) {
if (log)
log->Printf("SwiftLanguageRuntime: Unable to get pointer to type "
"system: %s", error.AsCString());
return;
}
ConstString BoolName("bool");
size_t bool_size =
type_system->GetBuiltinTypeByName(BoolName).GetByteSize(nullptr);
Scalar original_value;
m_process->ReadScalarIntegerFromMemory(*m_dynamic_exclusivity_flag_addr,
bool_size, false, original_value,
error);
m_original_dynamic_exclusivity_flag_state = original_value.UInt() != 0;
if (error.Fail()) {
if (log)
log->Printf("SwiftLanguageRuntime: Unable to read "
"disableExclusivityChecking flag state: %s",
error.AsCString());
} else {
Scalar new_value(1U);
m_process->WriteScalarToMemory(*m_dynamic_exclusivity_flag_addr,
new_value, bool_size, error);
if (error.Fail()) {
if (log)
log->Printf("SwiftLanguageRuntime: Unable to set "
"disableExclusivityChecking flag state: %s",
error.AsCString());
} else {
if (log)
log->Printf("SwiftLanguageRuntime: Changed "
"disableExclusivityChecking flag state from %u to 1",
m_original_dynamic_exclusivity_flag_state);
}
}
}
++m_active_user_expr_count;
if (log)
log->Printf("SwiftLanguageRuntime: starting user expression. "
"Number active: %u", m_active_user_expr_count);
}
void SwiftLanguageRuntime::DidFinishExecutingUserExpression() {
std::lock_guard<std::mutex> lock(m_active_user_expr_mutex);
Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_EXPRESSIONS));
--m_active_user_expr_count;
if (log)
log->Printf("SwiftLanguageRuntime: finished user expression. "
"Number active: %u", m_active_user_expr_count);
if (m_active_user_expr_count == 0 &&
m_dynamic_exclusivity_flag_addr) {
Status error;
TypeSystem *type_system =
m_process->GetTarget().GetScratchTypeSystemForLanguage(
&error,
eLanguageTypeC_plus_plus);
if (error.Fail()) {
if (log)
log->Printf("SwiftLanguageRuntime: Unable to get pointer to type "
"system: %s", error.AsCString());
return;
}
ConstString BoolName("bool");
size_t bool_size =
type_system->GetBuiltinTypeByName(BoolName).GetByteSize(nullptr);
Scalar original_value(m_original_dynamic_exclusivity_flag_state ? 1U : 0U);
m_process->WriteScalarToMemory(*m_dynamic_exclusivity_flag_addr,
original_value, bool_size, error);
if (error.Fail()) {
if (log)
log->Printf("SwiftLanguageRuntime: Unable to reset "
"disableExclusivityChecking flag state: %s",
error.AsCString());
} else {
if (log)
log->Printf("SwiftLanguageRuntime: Changed "
"disableExclusivityChecking flag state back to %u",
m_original_dynamic_exclusivity_flag_state);
}
}
}
llvm::Optional<Value> SwiftLanguageRuntime::GetErrorReturnLocationAfterReturn(
lldb::StackFrameSP frame_sp)
{
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
llvm::Optional<Value> error_val;
llvm::StringRef error_reg_name;
ArchSpec arch_spec(GetTargetRef().GetArchitecture());
switch (arch_spec.GetMachine()) {
case llvm::Triple::ArchType::arm:
error_reg_name = "r6";
break;
case llvm::Triple::ArchType::aarch64:
error_reg_name = "x21";
break;
case llvm::Triple::ArchType::x86_64:
error_reg_name = "r12";
break;
default:
break;
}
if (error_reg_name.empty())
return error_val;
RegisterContextSP reg_ctx = frame_sp->GetRegisterContext();
const RegisterInfo *reg_info = reg_ctx->GetRegisterInfoByName(error_reg_name);
lldbassert(reg_info && "didn't get the right register name for swift error register");
if (!reg_info)
return error_val;
RegisterValue reg_value;
if (!reg_ctx->ReadRegister(reg_info, reg_value))
{
// Do some logging here.
return error_val;
}
lldb::addr_t error_addr = reg_value.GetAsUInt64();
if (error_addr == 0)
return error_val;
Value val;
if (reg_value.GetScalarValue(val.GetScalar())) {
val.SetValueType(Value::eValueTypeScalar);
val.SetContext(Value::eContextTypeRegisterInfo,
const_cast<RegisterInfo *>(reg_info));
error_val = val;
}
return error_val;
}
llvm::Optional<Value> SwiftLanguageRuntime::GetErrorReturnLocationBeforeReturn(
lldb::StackFrameSP frame_sp, bool &need_to_check_after_return) {
Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
llvm::Optional<Value> error_val;
if (!frame_sp)
{
need_to_check_after_return = false;
return error_val;
}
// For Architectures where the error isn't returned in a register,
// there's a magic variable that points to the value. Check that first:
ConstString error_location_name("$error");
VariableListSP variables_sp = frame_sp->GetInScopeVariableList(false);
VariableSP error_loc_var_sp = variables_sp->FindVariable(
error_location_name, eValueTypeVariableArgument);
if (error_loc_var_sp) {
need_to_check_after_return = false;
ValueObjectSP error_loc_val_sp = frame_sp->GetValueObjectForFrameVariable(
error_loc_var_sp, eNoDynamicValues);
if (error_loc_val_sp && error_loc_val_sp->GetError().Success())
error_val = error_loc_val_sp->GetValue();
// if (log)
// log->Printf("Found return address: 0x%" PRIu64 " from error variable.", return_addr);
return error_val;
}
// Otherwise, see if we know which register it lives in from the calling convention.
// This should probably go in the ABI plugin not here, but the Swift ABI can change with
// swiftlang versions and that would make it awkward in the ABI.
Function *func = frame_sp->GetSymbolContext(eSymbolContextFunction).function;
if (!func)
{
need_to_check_after_return = false;
return error_val;
}
need_to_check_after_return = func->CanThrow();
return error_val;
}
//------------------------------------------------------------------
// Static Functions
//------------------------------------------------------------------
LanguageRuntime *
SwiftLanguageRuntime::CreateInstance(Process *process,
lldb::LanguageType language) {
if (language == eLanguageTypeSwift)
return new SwiftLanguageRuntime(process);
else
return NULL;
}
lldb::BreakpointResolverSP
SwiftLanguageRuntime::CreateExceptionResolver(Breakpoint *bkpt, bool catch_bp,
bool throw_bp) {
BreakpointResolverSP resolver_sp;
if (throw_bp)
resolver_sp.reset(new BreakpointResolverName(
bkpt, "swift_willThrow", eFunctionNameTypeBase, eLanguageTypeUnknown,
Breakpoint::Exact, 0, eLazyBoolNo));
// FIXME: We don't do catch breakpoints for ObjC yet.
// Should there be some way for the runtime to specify what it can do in this
// regard?
return resolver_sp;
}
static const char *
SwiftDemangleNodeKindToCString(const swift::Demangle::Node::Kind node_kind) {
#define NODE(e) \
case swift::Demangle::Node::Kind::e: \
return #e;
switch (node_kind) {
#include "swift/Demangling/DemangleNodes.def"
}
return "swift::Demangle::Node::Kind::???";
#undef NODE
}
static OptionDefinition g_swift_demangle_options[] = {
// clang-format off
{LLDB_OPT_SET_1, false, "expand", 'e', OptionParser::eNoArgument, nullptr, {}, 0, eArgTypeNone, "Whether LLDB should print the demangled tree"},
// clang-format on
};
class CommandObjectSwift_Demangle : public CommandObjectParsed {
public:
CommandObjectSwift_Demangle(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "demangle",
"Demangle a Swift mangled name",
"language swift demangle"),
m_options() {}
~CommandObjectSwift_Demangle() {}
virtual Options *GetOptions() { return &m_options; }
class CommandOptions : public Options {
public:
CommandOptions() : Options(), m_expand(false, false) {
OptionParsingStarting(nullptr);
}
virtual ~CommandOptions() {}
Status SetOptionValue(uint32_t option_idx, llvm::StringRef option_arg,
ExecutionContext *execution_context) override {
Status error;
const int short_option = m_getopt_table[option_idx].val;
switch (short_option) {
case 'e':
m_expand.SetCurrentValue(true);
break;
default:
error.SetErrorStringWithFormat("invalid short option character '%c'",
short_option);
break;
}
return error;
}
void OptionParsingStarting(ExecutionContext *execution_context) override {
m_expand.Clear();
}
llvm::ArrayRef<OptionDefinition> GetDefinitions() override {
return llvm::makeArrayRef(g_swift_demangle_options);
}
// Options table: Required for subclasses of Options.
OptionValueBoolean m_expand;
};
protected:
void PrintNode(swift::Demangle::NodePointer node_ptr, Stream &stream,
int depth = 0) {
if (!node_ptr)
return;
std::string indent(2 * depth, ' ');
stream.Printf("%s", indent.c_str());
stream.Printf("kind=%s",
SwiftDemangleNodeKindToCString(node_ptr->getKind()));
if (node_ptr->hasText()) {
std::string Text = node_ptr->getText();
stream.Printf(", text=\"%s\"", Text.c_str());
}
if (node_ptr->hasIndex())
stream.Printf(", index=%" PRIu64, node_ptr->getIndex());
stream.Printf("\n");
for (auto &&child : *node_ptr) {
PrintNode(child, stream, depth + 1);
}
}
bool DoExecute(Args &command, CommandReturnObject &result) {
for (size_t i = 0; i < command.GetArgumentCount(); i++) {
const char *arg = command.GetArgumentAtIndex(i);
if (arg && *arg) {
swift::Demangle::Context demangle_ctx;
auto node_ptr = demangle_ctx.demangleSymbolAsNode(llvm::StringRef(arg));
if (node_ptr) {
if (m_options.m_expand) {
PrintNode(node_ptr, result.GetOutputStream());
}
result.GetOutputStream().Printf(
"%s ---> %s\n", arg,
swift::Demangle::nodeToString(node_ptr).c_str());
}
}
}
result.SetStatus(lldb::eReturnStatusSuccessFinishResult);
return true;
}
CommandOptions m_options;
};
class CommandObjectSwift_RefCount : public CommandObjectRaw {
public:
CommandObjectSwift_RefCount(CommandInterpreter &interpreter)
: CommandObjectRaw(interpreter, "refcount",
"Inspect the reference count data for a Swift object",
"language swift refcount",
eCommandProcessMustBePaused | eCommandRequiresFrame) {}
~CommandObjectSwift_RefCount() {}
virtual Options *GetOptions() { return nullptr; }
private:
enum class ReferenceCountType {
eReferenceStrong,
eReferenceUnowned,
eReferenceWeak,
};
llvm::Optional<uint32_t> getReferenceCount(StringRef ObjName,
ReferenceCountType Type,
ExecutionContext &exe_ctx,
StackFrameSP &Frame) {
std::string Kind;
switch (Type) {
case ReferenceCountType::eReferenceStrong:
Kind = "";
break;
case ReferenceCountType::eReferenceUnowned:
Kind = "Unowned";
break;
case ReferenceCountType::eReferenceWeak:
Kind = "Weak";
break;
default:
llvm_unreachable("Unhandled refcount type in switch!");
}
EvaluateExpressionOptions eval_options;
eval_options.SetLanguage(lldb::eLanguageTypeSwift);
eval_options.SetResultIsInternal(true);
ValueObjectSP result_valobj_sp;
std::string Expr =
(llvm::Twine("Swift._get") + Kind + llvm::Twine("RetainCount(") +
ObjName + llvm::Twine(")"))
.str();
bool evalStatus = exe_ctx.GetTargetSP()->EvaluateExpression(
Expr, Frame.get(), result_valobj_sp, eval_options);
if (evalStatus != eExpressionCompleted)
return llvm::None;
bool success = false;
uint32_t count = result_valobj_sp->GetSyntheticValue()->GetValueAsUnsigned(
UINT32_MAX, &success);
if (!success)
return llvm::None;
return count;
}
protected:
bool DoExecute(llvm::StringRef command, CommandReturnObject &result) {
StackFrameSP frame_sp(m_exe_ctx.GetFrameSP());
EvaluateExpressionOptions options;
options.SetLanguage(lldb::eLanguageTypeSwift);
options.SetResultIsInternal(true);
ValueObjectSP result_valobj_sp;
// We want to evaluate first the object we're trying to get the
// refcount of, in order, to, e.g. see whether it's available.
// So, given `language swift refcount patatino`, we try to
// evaluate `expr patatino` and fail early in case there is
// an error.
bool evalStatus = m_exe_ctx.GetTargetSP()->EvaluateExpression(
command, frame_sp.get(), result_valobj_sp, options);
if (evalStatus != eExpressionCompleted) {
result.SetStatus(lldb::eReturnStatusFailed);
if (result_valobj_sp && result_valobj_sp->GetError().Fail())
result.AppendError(result_valobj_sp->GetError().AsCString());
return false;
}
// At this point, we're sure we're grabbing in our hands a valid
// object and we can ask questions about it. `refcounts` are only
// defined on class objects, so we throw an error in case we're
// trying to look at something else.
result_valobj_sp = result_valobj_sp->GetQualifiedRepresentationIfAvailable(
lldb::eDynamicCanRunTarget, true);
CompilerType result_type(result_valobj_sp->GetCompilerType());
if (!(result_type.GetTypeInfo() & lldb::eTypeInstanceIsPointer)) {
result.AppendError("refcount only available for class types");
result.SetStatus(lldb::eReturnStatusFailed);
return false;
}
// Ask swift debugger support in the compiler about the objects
// reference counts, and return them to the user.
llvm::Optional<uint32_t> strong = getReferenceCount(
command, ReferenceCountType::eReferenceStrong, m_exe_ctx, frame_sp);
llvm::Optional<uint32_t> unowned = getReferenceCount(
command, ReferenceCountType::eReferenceUnowned, m_exe_ctx, frame_sp);
llvm::Optional<uint32_t> weak = getReferenceCount(
command, ReferenceCountType::eReferenceWeak, m_exe_ctx, frame_sp);
std::string unavailable = "<unavailable>";
result.AppendMessageWithFormat(
"refcount data: (strong = %s, unowned = %s, weak = %s)\n",
strong ? std::to_string(*strong).c_str() : unavailable.c_str(),
unowned ? std::to_string(*unowned).c_str() : unavailable.c_str(),
weak ? std::to_string(*weak).c_str() : unavailable.c_str());
result.SetStatus(lldb::eReturnStatusSuccessFinishResult);
return true;
}
};
class CommandObjectMultiwordSwift : public CommandObjectMultiword {
public:
CommandObjectMultiwordSwift(CommandInterpreter &interpreter)
: CommandObjectMultiword(
interpreter, "swift",
"A set of commands for operating on the Swift Language Runtime.",
"swift <subcommand> [<subcommand-options>]") {
LoadSubCommand("demangle", CommandObjectSP(new CommandObjectSwift_Demangle(
interpreter)));
LoadSubCommand("refcount", CommandObjectSP(new CommandObjectSwift_RefCount(
interpreter)));
}
virtual ~CommandObjectMultiwordSwift() {}
};
void SwiftLanguageRuntime::Initialize() {
PluginManager::RegisterPlugin(
GetPluginNameStatic(), "Language runtime for the Swift language",
CreateInstance,
[](CommandInterpreter &interpreter) -> lldb::CommandObjectSP {
return CommandObjectSP(new CommandObjectMultiwordSwift(interpreter));
});
}
void SwiftLanguageRuntime::Terminate() {
PluginManager::UnregisterPlugin(CreateInstance);
}
lldb_private::ConstString SwiftLanguageRuntime::GetPluginNameStatic() {
static ConstString g_name("swift");
return g_name;
}
//------------------------------------------------------------------
// PluginInterface protocol
//------------------------------------------------------------------
lldb_private::ConstString SwiftLanguageRuntime::GetPluginName() {
return GetPluginNameStatic();
}
uint32_t SwiftLanguageRuntime::GetPluginVersion() { return 1; }
| 1 | 17,941 | Is this needed? | apple-swift-lldb | cpp |
@@ -348,7 +348,7 @@ export default Ember.Controller.extend(BillingCategories, EKMixin,
confirmDeleteValue(value) {
let i18n = this.get('i18n');
let title = i18n.t('admin.lookup.titles.deleteLookupValue');
- let message = i18n.t('admin.lookup.messages.deleteLookupValue', { value });
+ let message = i18n.t('messages.delete_singular', { name: value.concat(' value') });
this.displayConfirm(title, message, 'deleteValue', Ember.Object.create({
valueToDelete: value
})); | 1 | import Ember from 'ember';
import BillingCategories from 'hospitalrun/mixins/billing-categories';
import csvParse from 'npm:csv-parse';
import ModalHelper from 'hospitalrun/mixins/modal-helper';
import InventoryTypeList from 'hospitalrun/mixins/inventory-type-list';
import UnitTypes from 'hospitalrun/mixins/unit-types';
import VisitTypes from 'hospitalrun/mixins/visit-types';
import { EKMixin, keyDown } from 'ember-keyboard';
const {
computed, get, inject
} = Ember;
export default Ember.Controller.extend(BillingCategories, EKMixin,
InventoryTypeList, ModalHelper, UnitTypes, VisitTypes, {
fileSystem: inject.service('filesystem'),
lookupLists: inject.service(),
canEditValues: computed('model.lookupType', function() {
let lookupType = this.get('model.lookupType');
return (lookupType !== 'imaging_pricing_types' && lookupType !== 'lab_pricing_types');
}),
lookupTypes: computed(function() {
return [{
name: this.get('i18n').t('admin.lookup.anesthesiaTypes'),
value: 'anesthesia_types',
model: {
procedure: 'anesthesiaType'
}
}, {
name: this.get('i18n').t('admin.lookup.anesthesiologists'),
value: 'anesthesiologists',
model: {
procedure: 'anesthesiologist'
}
}, {
defaultValues: 'defaultBillingCategories',
name: this.get('i18n').t('admin.lookup.billingCategories'),
value: 'billing_categories',
models: {
'billing-line-item': 'category'
}
}, {
name: this.get('i18n').t('admin.lookup.clinicList'),
value: 'clinic_list',
models: { // Models that use this lookup -- use this later to update models on lookup changes
patient: 'clinic'
}
}, {
name: this.get('i18n').t('admin.lookup.countryList'),
value: 'country_list',
models: {
patient: 'country'
}
}, {
name: this.get('i18n').t('admin.lookup.diagnosisList'),
value: 'diagnosis_list',
models: {
diagnosis: 'diagnosis'
}
}, {
name: this.get('i18n').t('admin.lookup.cptCodeList'),
value: 'cpt_code_list',
models: {
procedure: 'cptCode'
}
}, {
name: this.get('i18n').t('admin.lookup.expenseAccountList'),
value: 'expense_account_list',
models: {
'inv-request': 'expenseAccount',
pricing: 'expenseAccount'
}
}, {
name: this.get('i18n').t('admin.lookup.aisleLocationList'),
value: 'aisle_location_list',
models: {
inventory: 'aisleLocation',
'inv-location': 'aisleLocation',
'inv-purchase': 'aisleLocation',
'inv-request': [
'deliveryAisle',
'locationsAffected' // Special use case that we need to handle
]
}
}, {
name: this.get('i18n').t('admin.lookup.warehouseList'),
value: 'warehouse_list',
models: {
inventory: 'location',
'inv-location': 'location',
'inv-purchase': 'location',
'inv-request': [
'deliveryLocation',
'locationsAffected' // Special use case that we need to handle
]
}
}, {
name: this.get('i18n').t('admin.lookup.incidentDepartments'),
value: 'incident_departments',
models: {
incident: 'department'
}
}, {
defaultValues: 'defaultInventoryTypes',
name: this.get('i18n').t('admin.lookup.inventoryTypes'),
value: 'inventory_types',
models: {
inventory: 'inventoryType'
}
}, {
name: this.get('i18n').t('admin.lookup.imagingPricingTypes'),
value: 'imaging_pricing_types',
models: {
pricing: 'pricingType'
}
}, {
name: this.get('i18n').t('admin.lookup.labPricingTypes'),
value: 'lab_pricing_types',
models: {
pricing: 'pricingType'
}
}, {
name: this.get('i18n').t('admin.lookup.patientStatusList'),
value: 'patient_status_list',
models: {
patient: 'status'
}
}, {
name: this.get('i18n').t('admin.lookup.physicianList'),
value: 'physician_list',
models: {
appointment: 'provider',
visit: 'examiner',
procedure: [
'assistant',
'physician'
]
}
}, {
name: this.get('i18n').t('admin.lookup.procedureList'),
value: 'procedure_list',
models: {
procedure: 'description'
}
}, {
name: this.get('i18n').t('admin.lookup.procedureLocations'),
value: 'procedure_locations',
models: {
procedure: 'location'
}
}, {
name: this.get('i18n').t('admin.lookup.procedurePricingTypes'),
value: 'procedure_pricing_types',
models: {
pricing: 'pricingType'
}
}, {
name: this.get('i18n').t('admin.lookup.radiologists'),
value: 'radiologists',
model: {
imaging: 'radiologist'
}
}, {
name: this.get('i18n').t('labels.sex'),
value: 'sex',
model: {
patient: 'sex'
}
}, {
defaultValues: 'defaultUnitList',
name: this.get('i18n').t('admin.lookup.unitTypes'),
value: 'unit_types',
models: {
inventory: 'distributionUnit',
'inv-purchase': 'distributionUnit'
}
}, {
name: this.get('i18n').t('admin.lookup.vendorList'),
value: 'vendor_list',
models: {
'inv-purchase': 'vendor'
}
}, {
name: this.get('i18n').t('admin.lookup.visitLocationList'),
value: 'visit_location_list',
models: {
appointment: 'location',
visit: 'location'
}
}, {
defaultValues: 'defaultVisitTypes',
name: this.get('i18n').t('admin.lookup.visitTypes'),
value: 'visit_types',
models: {
visit: 'visitType'
}
}, {
name: this.get('i18n').t('admin.lookup.wardPricingTypes'),
value: 'ward_pricing_types',
models: {
pricing: 'pricingType'
}
}];
}),
importFile: computed.alias('lookupTypeList.importFile'),
lookupTitle: computed('model.lookupType', function() {
let lookupType = this.get('model.lookupType');
let lookupTypes = this.get('lookupTypes');
let lookupDesc;
if (!Ember.isEmpty(lookupType)) {
lookupDesc = lookupTypes.findBy('value', lookupType);
if (!Ember.isEmpty(lookupDesc)) {
return lookupDesc.name;
}
}
}),
lookupTypeList: computed('model.lookupType', function() {
let lookupType = this.get('model.lookupType');
let lookupItem;
if (!Ember.isEmpty(lookupType)) {
lookupItem = this.get('model').findBy('id', lookupType);
if (Ember.isEmpty(lookupItem) || !lookupItem.get('isLoaded')) {
let defaultValues = [];
let lookupTypes = this.get('lookupTypes');
let lookupDesc = lookupTypes.findBy('value', lookupType);
let store = this.get('store');
if (!Ember.isEmpty(lookupDesc) && !Ember.isEmpty(lookupDesc.defaultValues)) {
defaultValues = this.get(lookupDesc.defaultValues);
}
lookupItem = store.push(store.normalize('lookup', {
id: lookupType,
value: defaultValues
}));
}
if (!Ember.isEmpty(lookupItem) && Ember.isEmpty(lookupItem.get('userCanAdd'))) {
lookupItem.set('userCanAdd', true);
}
return lookupItem;
}
}),
lookupTypeValues: computed('model.lookupType', 'lookupTypeList.value.[]', function() {
let lookupType = this.get('model.lookupType');
let values = this.get('lookupTypeList.value');
if (!Ember.isEmpty(values)) {
values.sort(this._sortValues);
values = values.map((value) => {
return {
canModify: this._canModifyValue(value, lookupType),
value
};
});
}
return Ember.ArrayProxy.create({ content: Ember.A(values) });
}),
showOrganizeByType: computed('model.lookupType', function() {
let lookupType = this.get('model.lookupType');
return (!Ember.isEmpty(lookupType) && lookupType.indexOf('pricing_types') > 0);
}),
_canModifyValue(value, lookupType) {
switch (lookupType) {
case 'inventory_types': {
if (value === 'Medication') {
return false;
}
break;
}
case 'lab_pricing_types': {
if (value === 'Lab Procedure') {
return false;
}
break;
}
case 'imaging_pricing_types': {
if (value === 'Imaging Procedure') {
return false;
}
break;
}
case 'visit_types': {
if (value === 'Admission') {
return false;
} else if (value === 'Imaging') {
return false;
} else if (value === 'Lab') {
return false;
} else if (value === 'Pharmacy') {
return false;
}
}
}
return true;
},
_importLookupList(file) {
let fileSystem = get(this, 'fileSystem');
let lookupTypeList = get(this, 'lookupTypeList');
let lookupValues = get(lookupTypeList, 'value');
fileSystem.fileToString(file).then((values) => {
csvParse(values, { trim: true }, (err, data) =>{
data.forEach((row) => {
let [newValue] = row;
if (!lookupValues.includes(newValue)) {
lookupValues.addObject(newValue);
}
});
lookupValues.sort();
let i18n = get(this, 'i18n');
let message = i18n.t('admin.lookup.alertImportListSaveMessage');
let title = i18n.t('admin.lookup.alertImportListSaveTitle');
lookupTypeList.save().then(() => {
let lookupLists = get(this, 'lookupLists');
lookupLists.resetLookupList(get(lookupTypeList, 'id'));
this.displayAlert(title, message);
this.set('importFile');
this.set('model.importFileName');
});
});
});
},
_sortValues(a, b) {
return Ember.compare(a.toLowerCase(), b.toLowerCase());
},
activateKeyboard: Ember.on('init', function() {
this.set('keyboardActivated', true);
}),
updateListKeyboard: Ember.on(keyDown('ctrl+KeyS'), keyDown('cmd+KeyS'), function(event) {
this.send('updateList');
event.preventDefault();
}),
actions: {
addValue() {
this.send('openModal', 'admin.lookup.edit', Ember.Object.create({
isNew: true
}));
},
confirmDeleteValue(value) {
let i18n = this.get('i18n');
let title = i18n.t('admin.lookup.titles.deleteLookupValue');
let message = i18n.t('admin.lookup.messages.deleteLookupValue', { value });
this.displayConfirm(title, message, 'deleteValue', Ember.Object.create({
valueToDelete: value
}));
},
deleteValue(value) {
let lookupTypeList = this.get('lookupTypeList');
let lookupTypeValues = lookupTypeList.get('value');
let valueToDelete = value.get('valueToDelete');
lookupTypeValues.removeObject(valueToDelete.toString());
lookupTypeList.save();
},
editValue(value) {
if (!Ember.isEmpty(value)) {
this.send('openModal', 'admin.lookup.edit', Ember.Object.create({
isNew: false,
originalValue: value.toString(),
value: value.toString()
}));
}
},
importList() {
let fileToImport = this.get('importFile');
if (!fileToImport || !fileToImport.type) {
this.displayAlert(
this.get('i18n').t('admin.lookup.alertImportListTitle'),
this.get('i18n').t('admin.lookup.alertImportListMessage')
);
} else {
this._importLookupList(fileToImport);
}
},
updateList() {
let lookupTypeList = this.get('lookupTypeList');
lookupTypeList.save().then(() => {
let lookupLists = get(this, 'lookupLists');
lookupLists.resetLookupList(get(lookupTypeList, 'id'));
this.displayAlert(
this.get('i18n').t('admin.lookup.alertImportListUpdateTitle'),
this.get('i18n').t('admin.lookup.alertImportListUpdateMessage')
);
});
},
updateValue(valueObject) {
let updateList = false;
let lookupTypeList = this.get('lookupTypeList');
let values = lookupTypeList.get('value');
let value = valueObject.get('value');
if (valueObject.get('isNew')) {
updateList = true;
} else {
let originalValue = valueObject.get('originalValue');
if (value !== originalValue) {
values.removeObject(originalValue);
updateList = true;
// TODO UPDATE ALL EXISTING DATA LOOKUPS (NODEJS JOB)
}
}
if (updateList) {
values.addObject(value);
values = values.sort(this._sortValues);
lookupTypeList.set('value', values);
this.send('updateList');
}
}
}
});
| 1 | 13,475 | This code is passing a non localized string when it should be passing in a localized string or it should use the name of the item being deleted. | HospitalRun-hospitalrun-frontend | js |
@@ -18,6 +18,8 @@ package container
import (
"context"
+ "github.com/chaos-mesh/chaos-mesh/pkg/selector/generic"
+
"go.uber.org/fx"
v1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client" | 1 | // Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package container
import (
"context"
"go.uber.org/fx"
v1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
"github.com/chaos-mesh/chaos-mesh/controllers/config"
"github.com/chaos-mesh/chaos-mesh/pkg/selector/pod"
)
type SelectImpl struct {
c client.Client
r client.Reader
pod.Option
}
type Container struct {
v1.Pod
ContainerName string
}
func (c *Container) Id() string {
return c.Pod.Namespace + "/" + c.Pod.Name + "/" + c.ContainerName
}
func (impl *SelectImpl) Select(ctx context.Context, cs *v1alpha1.ContainerSelector) ([]*Container, error) {
pods, err := pod.SelectAndFilterPods(ctx, impl.c, impl.r, &cs.PodSelector, impl.ClusterScoped, impl.TargetNamespace, impl.EnableFilterNamespace)
if err != nil {
return nil, err
}
containerNameMap := make(map[string]struct{})
for _, name := range cs.ContainerNames {
containerNameMap[name] = struct{}{}
}
var result []*Container
for _, pod := range pods {
if len(cs.ContainerNames) == 0 {
result = append(result, &Container{
Pod: pod,
ContainerName: pod.Spec.Containers[0].Name,
})
continue
}
for _, container := range pod.Spec.Containers {
if _, ok := containerNameMap[container.Name]; ok {
result = append(result, &Container{
Pod: pod,
ContainerName: container.Name,
})
}
}
}
return result, nil
}
type Params struct {
fx.In
Client client.Client
Reader client.Reader `name:"no-cache"`
}
func New(params Params) *SelectImpl {
return &SelectImpl{
params.Client,
params.Reader,
pod.Option{
ClusterScoped: config.ControllerCfg.ClusterScoped,
TargetNamespace: config.ControllerCfg.TargetNamespace,
EnableFilterNamespace: config.ControllerCfg.EnableFilterNamespace,
},
}
}
| 1 | 25,398 | how about moving it under L26 | chaos-mesh-chaos-mesh | go |
@@ -0,0 +1,17 @@
+/**
+ * Memoize a function.
+ * @method memoize
+ * @memberof axe.utils
+ * @param {Function} fn Function to memoize
+ * @return {Function}
+ */
+axe._memoizedFns = [];
+axe.utils.memoize = function(fn) {
+ // keep track of each function that is memoized so it can be cleared at
+ // the end of a run. each memoized function has its own cache, so there is
+ // no method to clear all memoized caches. instead, we have to clear each
+ // individual memoized function ourselves.
+ const memoized = axe.imports.memoize(fn);
+ axe._memoizedFns.push(memoized);
+ return memoized;
+}; | 1 | 1 | 15,097 | I think this needs to be tested | dequelabs-axe-core | js |
|
@@ -2,6 +2,9 @@ class Trail < ApplicationRecord
extend FriendlyId
include PgSearch
+
+ DEFAULT_IMAGE_URL = "https://images.thoughtbot.com/upcase/trail-title-cards/default.jpg"
+
multisearchable against: [:name, :description], if: :published?
validates :name, :description, presence: true | 1 | class Trail < ApplicationRecord
extend FriendlyId
include PgSearch
multisearchable against: [:name, :description], if: :published?
validates :name, :description, presence: true
has_many :classifications, as: :classifiable
has_many :repositories, dependent: :destroy
has_many :statuses, as: :completeable, dependent: :destroy
has_many :topics, through: :classifications
has_many :users, through: :statuses
has_many \
:steps,
-> { order "steps.position ASC" },
dependent: :destroy,
inverse_of: :trail
has_many :exercises,
through: :steps,
source: :completeable,
source_type: "Exercise"
has_many :videos, through: :steps, source: :completeable, source_type: "Video"
friendly_id :name, use: [:slugged, :finders]
def self.accessible_without_subscription?
false
end
def self.published
where(published: true)
end
def self.completed_for(user)
TrailWithProgressQuery.new(all, user: user).select(&:complete?)
end
def to_s
name
end
# Override setters so it preserves the order
def step_ids=(new_step_ids)
super
new_step_ids = new_step_ids.reject(&:blank?).map(&:to_i)
new_step_ids.each_with_index do |step_id, index|
steps.where(id: step_id).update_all(position: index + 1)
end
end
def completeables
steps.map(&:completeable)
end
def update_state_for(user)
TrailWithProgress.new(
self,
user: user,
status_finder: StatusFinder.new(user: user),
).update_status
end
def self.most_recent_published
order(created_at: :desc).published
end
def teachers
Teacher.joins(:video).merge(videos).to_a.uniq(&:user_id)
end
def topic_name
topic.name
end
def first_completeable
first_step.completeable
end
def sample_video
videos.where(accessible_without_subscription: true).first.wrapped
end
def time_to_complete
videos.sum(:length_in_minutes) + exercise_time
end
private
def first_step
if steps.loaded?
steps.sort_by(&:position).first
else
steps.first
end
end
def exercise_time
exercises.count * Exercise::AVERAGE_COMPLETION_TIME_IN_MINUTES
end
end
| 1 | 18,682 | Style/MutableConstant: Freeze mutable objects assigned to constants. | thoughtbot-upcase | rb |
@@ -155,16 +155,15 @@ public final class DeflateWithPresetDictCompressionMode extends CompressionMode
private static class DeflateWithPresetDictCompressor extends Compressor {
- final byte[] dictBytes;
- final int blockLength;
+ private final int dictLength, blockLength;
final Deflater compressor;
byte[] compressed;
boolean closed;
DeflateWithPresetDictCompressor(int level, int dictLength, int blockLength) {
- compressor = new Deflater(level, true);
+ compressor = BugfixDeflater_JDK8252739.createDeflaterInstance(level, true, dictLength);
compressed = new byte[64];
- this.dictBytes = new byte[dictLength];
+ this.dictLength = dictLength;
this.blockLength = blockLength;
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.codecs.lucene87;
import java.io.IOException;
import java.util.zip.DataFormatException;
import java.util.zip.Deflater;
import java.util.zip.Inflater;
import org.apache.lucene.codecs.compressing.CompressionMode;
import org.apache.lucene.codecs.compressing.Compressor;
import org.apache.lucene.codecs.compressing.Decompressor;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.store.DataInput;
import org.apache.lucene.store.DataOutput;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BytesRef;
/**
* A compression mode that trades speed for compression ratio. Although
* compression and decompression might be slow, this compression mode should
* provide a good compression ratio. This mode might be interesting if/when
* your index size is much bigger than your OS cache.
* @lucene.internal
*/
public final class DeflateWithPresetDictCompressionMode extends CompressionMode {
private final int dictLength, subBlockLength;
/** Sole constructor. */
public DeflateWithPresetDictCompressionMode(int dictLength, int subBlockLength) {
this.dictLength = dictLength;
this.subBlockLength = subBlockLength;
}
@Override
public Compressor newCompressor() {
// notes:
// 3 is the highest level that doesn't have lazy match evaluation
// 6 is the default, higher than that is just a waste of cpu
return new DeflateWithPresetDictCompressor(6, dictLength, subBlockLength);
}
@Override
public Decompressor newDecompressor() {
return new DeflateWithPresetDictDecompressor();
}
@Override
public String toString() {
return "BEST_COMPRESSION";
}
private static final class DeflateWithPresetDictDecompressor extends Decompressor {
byte[] compressed;
DeflateWithPresetDictDecompressor() {
compressed = new byte[0];
}
private void doDecompress(DataInput in, Inflater decompressor, BytesRef bytes) throws IOException {
final int compressedLength = in.readVInt();
if (compressedLength == 0) {
return;
}
// pad with extra "dummy byte": see javadocs for using Inflater(true)
// we do it for compliance, but it's unnecessary for years in zlib.
final int paddedLength = compressedLength + 1;
compressed = ArrayUtil.grow(compressed, paddedLength);
in.readBytes(compressed, 0, compressedLength);
compressed[compressedLength] = 0; // explicitly set dummy byte to 0
// extra "dummy byte"
decompressor.setInput(compressed, 0, paddedLength);
try {
bytes.length += decompressor.inflate(bytes.bytes, bytes.length, bytes.bytes.length - bytes.length);
} catch (DataFormatException e) {
throw new IOException(e);
}
if (decompressor.finished() == false) {
throw new CorruptIndexException("Invalid decoder state: needsInput=" + decompressor.needsInput()
+ ", needsDict=" + decompressor.needsDictionary(), in);
}
}
@Override
public void decompress(DataInput in, int originalLength, int offset, int length, BytesRef bytes) throws IOException {
assert offset + length <= originalLength;
if (length == 0) {
bytes.length = 0;
return;
}
final int dictLength = in.readVInt();
final int blockLength = in.readVInt();
bytes.bytes = ArrayUtil.grow(bytes.bytes, dictLength);
bytes.offset = bytes.length = 0;
final Inflater decompressor = new Inflater(true);
try {
// Read the dictionary
doDecompress(in, decompressor, bytes);
if (dictLength != bytes.length) {
throw new CorruptIndexException("Unexpected dict length", in);
}
int offsetInBlock = dictLength;
int offsetInBytesRef = offset;
// Skip unneeded blocks
while (offsetInBlock + blockLength < offset) {
final int compressedLength = in.readVInt();
in.skipBytes(compressedLength);
offsetInBlock += blockLength;
offsetInBytesRef -= blockLength;
}
// Read blocks that intersect with the interval we need
while (offsetInBlock < offset + length) {
bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + blockLength);
decompressor.reset();
decompressor.setDictionary(bytes.bytes, 0, dictLength);
doDecompress(in, decompressor, bytes);
offsetInBlock += blockLength;
}
bytes.offset = offsetInBytesRef;
bytes.length = length;
assert bytes.isValid();
} finally {
decompressor.end();
}
}
@Override
public Decompressor clone() {
return new DeflateWithPresetDictDecompressor();
}
}
private static class DeflateWithPresetDictCompressor extends Compressor {
final byte[] dictBytes;
final int blockLength;
final Deflater compressor;
byte[] compressed;
boolean closed;
DeflateWithPresetDictCompressor(int level, int dictLength, int blockLength) {
compressor = new Deflater(level, true);
compressed = new byte[64];
this.dictBytes = new byte[dictLength];
this.blockLength = blockLength;
}
private void doCompress(byte[] bytes, int off, int len, DataOutput out) throws IOException {
if (len == 0) {
out.writeVInt(0);
return;
}
compressor.setInput(bytes, off, len);
compressor.finish();
if (compressor.needsInput()) {
throw new IllegalStateException();
}
int totalCount = 0;
for (;;) {
final int count = compressor.deflate(compressed, totalCount, compressed.length - totalCount);
totalCount += count;
assert totalCount <= compressed.length;
if (compressor.finished()) {
break;
} else {
compressed = ArrayUtil.grow(compressed);
}
}
out.writeVInt(totalCount);
out.writeBytes(compressed, totalCount);
}
@Override
public void compress(byte[] bytes, int off, int len, DataOutput out) throws IOException {
final int dictLength = Math.min(dictBytes.length, len);
System.arraycopy(bytes, off, dictBytes, 0, dictLength);
out.writeVInt(dictLength);
out.writeVInt(blockLength);
final int end = off + len;
// Compress the dictionary first
compressor.reset();
doCompress(bytes, off, dictLength, out);
// And then sub blocks
for (int start = off + dictLength; start < end; start += blockLength) {
compressor.reset();
// NOTE: offset MUST be 0 when setting the dictionary in order to work around JDK-8252739
compressor.setDictionary(dictBytes, 0, dictLength);
doCompress(bytes, start, Math.min(blockLength, off + len - start), out);
}
}
@Override
public void close() throws IOException {
if (closed == false) {
compressor.end();
closed = true;
}
}
}
}
| 1 | 36,735 | Just a thought, really. If it's a bug that can be probed for (and it can be - see Adrian's repro) then it could as well be a static initialization of a supplier of Deflater instances; if we probe for a buggy JVM, we return the wrapper. If we don't we return the Deflater. This way on non-affected JVMs nothing happens and if we do use the wrapper, we know the JVM is broken. | apache-lucene-solr | java |
@@ -95,7 +95,7 @@ public class FSTTester<T> {
return br;
}
- static String getRandomString(Random random) {
+ public static String getRandomString(Random random) {
final String term;
if (random.nextBoolean()) {
term = TestUtil.randomRealisticUnicodeString(random); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.util.fst;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IntsRef;
import org.apache.lucene.util.IntsRefBuilder;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.util.UnicodeUtil;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
/** Helper class to test FSTs. */
public class FSTTester<T> {
final Random random;
final List<InputOutput<T>> pairs;
final int inputMode;
final Outputs<T> outputs;
final Directory dir;
final boolean doReverseLookup;
long nodeCount;
long arcCount;
public FSTTester(Random random, Directory dir, int inputMode, List<InputOutput<T>> pairs, Outputs<T> outputs, boolean doReverseLookup) {
this.random = random;
this.dir = dir;
this.inputMode = inputMode;
this.pairs = pairs;
this.outputs = outputs;
this.doReverseLookup = doReverseLookup;
}
static String inputToString(int inputMode, IntsRef term) {
return inputToString(inputMode, term, true);
}
static String inputToString(int inputMode, IntsRef term, boolean isValidUnicode) {
if (!isValidUnicode) {
return term.toString();
} else if (inputMode == 0) {
// utf8
return toBytesRef(term).utf8ToString() + " " + term;
} else {
// utf32
return UnicodeUtil.newString(term.ints, term.offset, term.length) + " " + term;
}
}
private static BytesRef toBytesRef(IntsRef ir) {
BytesRef br = new BytesRef(ir.length);
for(int i=0;i<ir.length;i++) {
int x = ir.ints[ir.offset+i];
assert x >= 0 && x <= 255;
br.bytes[i] = (byte) x;
}
br.length = ir.length;
return br;
}
static String getRandomString(Random random) {
final String term;
if (random.nextBoolean()) {
term = TestUtil.randomRealisticUnicodeString(random);
} else {
// we want to mix in limited-alphabet symbols so
// we get more sharing of the nodes given how few
// terms we are testing...
term = simpleRandomString(random);
}
return term;
}
static String simpleRandomString(Random r) {
final int end = r.nextInt(10);
if (end == 0) {
// allow 0 length
return "";
}
final char[] buffer = new char[end];
for (int i = 0; i < end; i++) {
buffer[i] = (char) TestUtil.nextInt(r, 97, 102);
}
return new String(buffer, 0, end);
}
static IntsRef toIntsRef(String s, int inputMode) {
return toIntsRef(s, inputMode, new IntsRefBuilder());
}
static IntsRef toIntsRef(String s, int inputMode, IntsRefBuilder ir) {
if (inputMode == 0) {
// utf8
return toIntsRef(new BytesRef(s), ir);
} else {
// utf32
return toIntsRefUTF32(s, ir);
}
}
static IntsRef toIntsRefUTF32(String s, IntsRefBuilder ir) {
final int charLength = s.length();
int charIdx = 0;
int intIdx = 0;
ir.clear();
while(charIdx < charLength) {
ir.grow(intIdx+1);
final int utf32 = s.codePointAt(charIdx);
ir.append(utf32);
charIdx += Character.charCount(utf32);
intIdx++;
}
return ir.get();
}
static IntsRef toIntsRef(BytesRef br, IntsRefBuilder ir) {
ir.grow(br.length);
ir.clear();
for(int i=0;i<br.length;i++) {
ir.append(br.bytes[br.offset+i]&0xFF);
}
return ir.get();
}
/** Holds one input/output pair. */
public static class InputOutput<T> implements Comparable<InputOutput<T>> {
public final IntsRef input;
public final T output;
public InputOutput(IntsRef input, T output) {
this.input = input;
this.output = output;
}
@Override
public int compareTo(InputOutput<T> other) {
if (other instanceof InputOutput) {
return input.compareTo((other).input);
} else {
throw new IllegalArgumentException();
}
}
}
public void doTest(boolean testPruning) throws IOException {
// no pruning
doTest(0, 0, true);
if (testPruning) {
// simple pruning
doTest(TestUtil.nextInt(random, 1, 1 + pairs.size()), 0, true);
// leafy pruning
doTest(0, TestUtil.nextInt(random, 1, 1 + pairs.size()), true);
}
}
// runs the term, returning the output, or null if term
// isn't accepted. if prefixLength is non-null it must be
// length 1 int array; prefixLength[0] is set to the length
// of the term prefix that matches
private T run(FST<T> fst, IntsRef term, int[] prefixLength) throws IOException {
assert prefixLength == null || prefixLength.length == 1;
final FST.Arc<T> arc = fst.getFirstArc(new FST.Arc<T>());
final T NO_OUTPUT = fst.outputs.getNoOutput();
T output = NO_OUTPUT;
final FST.BytesReader fstReader = fst.getBytesReader();
for(int i=0;i<=term.length;i++) {
final int label;
if (i == term.length) {
label = FST.END_LABEL;
} else {
label = term.ints[term.offset+i];
}
// System.out.println(" loop i=" + i + " label=" + label + " output=" + fst.outputs.outputToString(output) + " curArc: target=" + arc.target + " isFinal?=" + arc.isFinal());
if (fst.findTargetArc(label, arc, arc, fstReader) == null) {
// System.out.println(" not found");
if (prefixLength != null) {
prefixLength[0] = i;
return output;
} else {
return null;
}
}
output = fst.outputs.add(output, arc.output());
}
if (prefixLength != null) {
prefixLength[0] = term.length;
}
return output;
}
private T randomAcceptedWord(FST<T> fst, IntsRefBuilder in) throws IOException {
FST.Arc<T> arc = fst.getFirstArc(new FST.Arc<T>());
final List<FST.Arc<T>> arcs = new ArrayList<>();
in.clear();
final T NO_OUTPUT = fst.outputs.getNoOutput();
T output = NO_OUTPUT;
final FST.BytesReader fstReader = fst.getBytesReader();
while(true) {
// read all arcs:
fst.readFirstTargetArc(arc, arc, fstReader);
arcs.add(new FST.Arc<T>().copyFrom(arc));
while(!arc.isLast()) {
fst.readNextArc(arc, fstReader);
arcs.add(new FST.Arc<T>().copyFrom(arc));
}
// pick one
arc = arcs.get(random.nextInt(arcs.size()));
arcs.clear();
// accumulate output
output = fst.outputs.add(output, arc.output());
// append label
if (arc.label() == FST.END_LABEL) {
break;
}
in.append(arc.label());
}
return output;
}
FST<T> doTest(int prune1, int prune2, boolean allowRandomSuffixSharing) throws IOException {
if (LuceneTestCase.VERBOSE) {
System.out.println("\nTEST: prune1=" + prune1 + " prune2=" + prune2);
}
final FSTCompiler<T> fstCompiler = new FSTCompiler.Builder<>(inputMode == 0 ? FST.INPUT_TYPE.BYTE1 : FST.INPUT_TYPE.BYTE4, outputs)
.minSuffixCount1(prune1)
.minSuffixCount2(prune2)
.shouldShareSuffix(prune1==0 && prune2==0)
.shouldShareNonSingletonNodes(allowRandomSuffixSharing ? random.nextBoolean() : true)
.shareMaxTailLength(allowRandomSuffixSharing ? TestUtil.nextInt(random, 1, 10) : Integer.MAX_VALUE)
.build();
for(InputOutput<T> pair : pairs) {
if (pair.output instanceof List) {
@SuppressWarnings("unchecked") List<Long> longValues = (List<Long>) pair.output;
@SuppressWarnings("unchecked") final FSTCompiler<Object> fstCompilerObject = (FSTCompiler<Object>) fstCompiler;
for(Long value : longValues) {
fstCompilerObject.add(pair.input, value);
}
} else {
fstCompiler.add(pair.input, pair.output);
}
}
FST<T> fst = fstCompiler.compile();
if (random.nextBoolean() && fst != null) {
IOContext context = LuceneTestCase.newIOContext(random);
IndexOutput out = dir.createOutput("fst.bin", context);
fst.save(out, out);
out.close();
IndexInput in = dir.openInput("fst.bin", context);
try {
fst = new FST<T>(in, in, outputs);
} finally {
in.close();
dir.deleteFile("fst.bin");
}
}
if (LuceneTestCase.VERBOSE && pairs.size() <= 20 && fst != null) {
System.out.println("Printing FST as dot file to stdout:");
final Writer w = new OutputStreamWriter(System.out, Charset.defaultCharset());
Util.toDot(fst, w, false, false);
w.flush();
System.out.println("END dot file");
}
if (LuceneTestCase.VERBOSE) {
if (fst == null) {
System.out.println(" fst has 0 nodes (fully pruned)");
} else {
System.out.println(" fst has " + fstCompiler.getNodeCount() + " nodes and " + fstCompiler.getArcCount() + " arcs");
}
}
if (prune1 == 0 && prune2 == 0) {
verifyUnPruned(inputMode, fst);
} else {
verifyPruned(inputMode, fst, prune1, prune2);
}
nodeCount = fstCompiler.getNodeCount();
arcCount = fstCompiler.getArcCount();
return fst;
}
protected boolean outputsEqual(T a, T b) {
return a.equals(b);
}
// FST is complete
@SuppressWarnings("deprecation")
private void verifyUnPruned(int inputMode, FST<T> fst) throws IOException {
final FST<Long> fstLong;
final Set<Long> validOutputs;
long minLong = Long.MAX_VALUE;
long maxLong = Long.MIN_VALUE;
if (doReverseLookup) {
@SuppressWarnings("unchecked") FST<Long> fstLong0 = (FST<Long>) fst;
fstLong = fstLong0;
validOutputs = new HashSet<>();
for(InputOutput<T> pair: pairs) {
Long output = (Long) pair.output;
maxLong = Math.max(maxLong, output);
minLong = Math.min(minLong, output);
validOutputs.add(output);
}
} else {
fstLong = null;
validOutputs = null;
}
if (pairs.size() == 0) {
assertNull(fst);
return;
}
if (LuceneTestCase.VERBOSE) {
System.out.println("TEST: now verify " + pairs.size() + " terms");
for(InputOutput<T> pair : pairs) {
assertNotNull(pair);
assertNotNull(pair.input);
assertNotNull(pair.output);
System.out.println(" " + inputToString(inputMode, pair.input) + ": " + outputs.outputToString(pair.output));
}
}
assertNotNull(fst);
// visit valid pairs in order -- make sure all words
// are accepted, and FSTEnum's next() steps through
// them correctly
if (LuceneTestCase.VERBOSE) {
System.out.println("TEST: check valid terms/next()");
}
{
IntsRefFSTEnum<T> fstEnum = new IntsRefFSTEnum<>(fst);
for(InputOutput<T> pair : pairs) {
IntsRef term = pair.input;
if (LuceneTestCase.VERBOSE) {
System.out.println("TEST: check term=" + inputToString(inputMode, term) + " output=" + fst.outputs.outputToString(pair.output));
}
T output = run(fst, term, null);
assertNotNull("term " + inputToString(inputMode, term) + " is not accepted", output);
assertTrue(outputsEqual(pair.output, output));
// verify enum's next
IntsRefFSTEnum.InputOutput<T> t = fstEnum.next();
assertNotNull(t);
assertEquals("expected input=" + inputToString(inputMode, term) + " but fstEnum returned " + inputToString(inputMode, t.input), term, t.input);
assertTrue(outputsEqual(pair.output, t.output));
}
assertNull(fstEnum.next());
}
final Map<IntsRef,T> termsMap = new HashMap<>();
for(InputOutput<T> pair : pairs) {
termsMap.put(pair.input, pair.output);
}
if (doReverseLookup && maxLong > minLong) {
// Do random lookups so we test null (output doesn't
// exist) case:
assertNull(Util.getByOutput(fstLong, minLong-7));
assertNull(Util.getByOutput(fstLong, maxLong+7));
final int num = LuceneTestCase.atLeast(random, 100);
for(int iter=0;iter<num;iter++) {
Long v = TestUtil.nextLong(random, minLong, maxLong);
IntsRef input = Util.getByOutput(fstLong, v);
assertTrue(validOutputs.contains(v) || input == null);
}
}
// find random matching word and make sure it's valid
if (LuceneTestCase.VERBOSE) {
System.out.println("TEST: verify random accepted terms");
}
final IntsRefBuilder scratch = new IntsRefBuilder();
int num = LuceneTestCase.atLeast(random, 500);
for(int iter=0;iter<num;iter++) {
T output = randomAcceptedWord(fst, scratch);
assertTrue("accepted word " + inputToString(inputMode, scratch.get()) + " is not valid", termsMap.containsKey(scratch.get()));
assertTrue(outputsEqual(termsMap.get(scratch.get()), output));
if (doReverseLookup) {
//System.out.println("lookup output=" + output + " outs=" + fst.outputs);
IntsRef input = Util.getByOutput(fstLong, (Long) output);
assertNotNull(input);
//System.out.println(" got " + Util.toBytesRef(input, new BytesRef()).utf8ToString());
assertEquals(scratch.get(), input);
}
}
// test IntsRefFSTEnum.seek:
if (LuceneTestCase.VERBOSE) {
System.out.println("TEST: verify seek");
}
IntsRefFSTEnum<T> fstEnum = new IntsRefFSTEnum<>(fst);
num = LuceneTestCase.atLeast(random, 100);
for(int iter=0;iter<num;iter++) {
if (LuceneTestCase.VERBOSE) {
System.out.println(" iter=" + iter);
}
if (random.nextBoolean()) {
// seek to term that doesn't exist:
while(true) {
final IntsRef term = toIntsRef(getRandomString(random), inputMode);
int pos = Collections.binarySearch(pairs, new InputOutput<T>(term, null));
if (pos < 0) {
pos = -(pos+1);
// ok doesn't exist
//System.out.println(" seek " + inputToString(inputMode, term));
final IntsRefFSTEnum.InputOutput<T> seekResult;
if (random.nextInt(3) == 0) {
if (LuceneTestCase.VERBOSE) {
System.out.println(" do non-exist seekExact term=" + inputToString(inputMode, term));
}
seekResult = fstEnum.seekExact(term);
pos = -1;
} else if (random.nextBoolean()) {
if (LuceneTestCase.VERBOSE) {
System.out.println(" do non-exist seekFloor term=" + inputToString(inputMode, term));
}
seekResult = fstEnum.seekFloor(term);
pos--;
} else {
if (LuceneTestCase.VERBOSE) {
System.out.println(" do non-exist seekCeil term=" + inputToString(inputMode, term));
}
seekResult = fstEnum.seekCeil(term);
}
if (pos != -1 && pos < pairs.size()) {
//System.out.println(" got " + inputToString(inputMode,seekResult.input) + " output=" + fst.outputs.outputToString(seekResult.output));
assertNotNull("got null but expected term=" + inputToString(inputMode, pairs.get(pos).input), seekResult);
if (LuceneTestCase.VERBOSE) {
System.out.println(" got " + inputToString(inputMode, seekResult.input));
}
assertEquals("expected " + inputToString(inputMode, pairs.get(pos).input) + " but got " + inputToString(inputMode, seekResult.input), pairs.get(pos).input, seekResult.input);
assertTrue(outputsEqual(pairs.get(pos).output, seekResult.output));
} else {
// seeked before start or beyond end
//System.out.println("seek=" + seekTerm);
assertNull("expected null but got " + (seekResult==null ? "null" : inputToString(inputMode, seekResult.input)), seekResult);
if (LuceneTestCase.VERBOSE) {
System.out.println(" got null");
}
}
break;
}
}
} else {
// seek to term that does exist:
InputOutput<T> pair = pairs.get(random.nextInt(pairs.size()));
final IntsRefFSTEnum.InputOutput<T> seekResult;
if (random.nextInt(3) == 2) {
if (LuceneTestCase.VERBOSE) {
System.out.println(" do exists seekExact term=" + inputToString(inputMode, pair.input));
}
seekResult = fstEnum.seekExact(pair.input);
} else if (random.nextBoolean()) {
if (LuceneTestCase.VERBOSE) {
System.out.println(" do exists seekFloor " + inputToString(inputMode, pair.input));
}
seekResult = fstEnum.seekFloor(pair.input);
} else {
if (LuceneTestCase.VERBOSE) {
System.out.println(" do exists seekCeil " + inputToString(inputMode, pair.input));
}
seekResult = fstEnum.seekCeil(pair.input);
}
assertNotNull(seekResult);
assertEquals("got " + inputToString(inputMode, seekResult.input) + " but expected " + inputToString(inputMode, pair.input), pair.input, seekResult.input);
assertTrue(outputsEqual(pair.output, seekResult.output));
}
}
if (LuceneTestCase.VERBOSE) {
System.out.println("TEST: mixed next/seek");
}
// test mixed next/seek
num = LuceneTestCase.atLeast(random, 100);
for(int iter=0;iter<num;iter++) {
if (LuceneTestCase.VERBOSE) {
System.out.println("TEST: iter " + iter);
}
// reset:
fstEnum = new IntsRefFSTEnum<>(fst);
int upto = -1;
while(true) {
boolean isDone = false;
if (upto == pairs.size()-1 || random.nextBoolean()) {
// next
upto++;
if (LuceneTestCase.VERBOSE) {
System.out.println(" do next");
}
isDone = fstEnum.next() == null;
} else if (upto != -1 && upto < 0.75 * pairs.size() && random.nextBoolean()) {
int attempt = 0;
for(;attempt<10;attempt++) {
IntsRef term = toIntsRef(getRandomString(random), inputMode);
if (!termsMap.containsKey(term) && term.compareTo(pairs.get(upto).input) > 0) {
int pos = Collections.binarySearch(pairs, new InputOutput<T>(term, null));
assert pos < 0;
upto = -(pos+1);
if (random.nextBoolean()) {
upto--;
assertTrue(upto != -1);
if (LuceneTestCase.VERBOSE) {
System.out.println(" do non-exist seekFloor(" + inputToString(inputMode, term) + ")");
}
isDone = fstEnum.seekFloor(term) == null;
} else {
if (LuceneTestCase.VERBOSE) {
System.out.println(" do non-exist seekCeil(" + inputToString(inputMode, term) + ")");
}
isDone = fstEnum.seekCeil(term) == null;
}
break;
}
}
if (attempt == 10) {
continue;
}
} else {
final int inc = random.nextInt(pairs.size() - upto - 1);
upto += inc;
if (upto == -1) {
upto = 0;
}
if (random.nextBoolean()) {
if (LuceneTestCase.VERBOSE) {
System.out.println(" do seekCeil(" + inputToString(inputMode, pairs.get(upto).input) + ")");
}
isDone = fstEnum.seekCeil(pairs.get(upto).input) == null;
} else {
if (LuceneTestCase.VERBOSE) {
System.out.println(" do seekFloor(" + inputToString(inputMode, pairs.get(upto).input) + ")");
}
isDone = fstEnum.seekFloor(pairs.get(upto).input) == null;
}
}
if (LuceneTestCase.VERBOSE) {
if (!isDone) {
System.out.println(" got " + inputToString(inputMode, fstEnum.current().input));
} else {
System.out.println(" got null");
}
}
if (upto == pairs.size()) {
assertTrue(isDone);
break;
} else {
assertFalse(isDone);
assertEquals(pairs.get(upto).input, fstEnum.current().input);
assertTrue(outputsEqual(pairs.get(upto).output, fstEnum.current().output));
/*
if (upto < pairs.size()-1) {
int tryCount = 0;
while(tryCount < 10) {
final IntsRef t = toIntsRef(getRandomString(), inputMode);
if (pairs.get(upto).input.compareTo(t) < 0) {
final boolean expected = t.compareTo(pairs.get(upto+1).input) < 0;
if (LuceneTestCase.VERBOSE) {
System.out.println("TEST: call beforeNext(" + inputToString(inputMode, t) + "); current=" + inputToString(inputMode, pairs.get(upto).input) + " next=" + inputToString(inputMode, pairs.get(upto+1).input) + " expected=" + expected);
}
assertEquals(expected, fstEnum.beforeNext(t));
break;
}
tryCount++;
}
}
*/
}
}
}
}
private static class CountMinOutput<T> {
int count;
T output;
T finalOutput;
boolean isLeaf = true;
boolean isFinal;
}
// FST is pruned
private void verifyPruned(int inputMode, FST<T> fst, int prune1, int prune2) throws IOException {
if (LuceneTestCase.VERBOSE) {
System.out.println("TEST: now verify pruned " + pairs.size() + " terms; outputs=" + outputs);
for(InputOutput<T> pair : pairs) {
System.out.println(" " + inputToString(inputMode, pair.input) + ": " + outputs.outputToString(pair.output));
}
}
// To validate the FST, we brute-force compute all prefixes
// in the terms, matched to their "common" outputs, prune that
// set according to the prune thresholds, then assert the FST
// matches that same set.
// NOTE: Crazy RAM intensive!!
//System.out.println("TEST: tally prefixes");
// build all prefixes
final Map<IntsRef,CountMinOutput<T>> prefixes = new HashMap<>();
final IntsRefBuilder scratch = new IntsRefBuilder();
for(InputOutput<T> pair: pairs) {
scratch.copyInts(pair.input);
for(int idx=0;idx<=pair.input.length;idx++) {
scratch.setLength(idx);
CountMinOutput<T> cmo = prefixes.get(scratch.get());
if (cmo == null) {
cmo = new CountMinOutput<>();
cmo.count = 1;
cmo.output = pair.output;
prefixes.put(scratch.toIntsRef(), cmo);
} else {
cmo.count++;
T output1 = cmo.output;
if (output1.equals(outputs.getNoOutput())) {
output1 = outputs.getNoOutput();
}
T output2 = pair.output;
if (output2.equals(outputs.getNoOutput())) {
output2 = outputs.getNoOutput();
}
cmo.output = outputs.common(output1, output2);
}
if (idx == pair.input.length) {
cmo.isFinal = true;
cmo.finalOutput = cmo.output;
}
}
}
if (LuceneTestCase.VERBOSE) {
System.out.println("TEST: now prune");
}
// prune 'em
final Iterator<Map.Entry<IntsRef,CountMinOutput<T>>> it = prefixes.entrySet().iterator();
while(it.hasNext()) {
Map.Entry<IntsRef,CountMinOutput<T>> ent = it.next();
final IntsRef prefix = ent.getKey();
final CountMinOutput<T> cmo = ent.getValue();
if (LuceneTestCase.VERBOSE) {
System.out.println(" term prefix=" + inputToString(inputMode, prefix, false) + " count=" + cmo.count + " isLeaf=" + cmo.isLeaf + " output=" + outputs.outputToString(cmo.output) + " isFinal=" + cmo.isFinal);
}
final boolean keep;
if (prune1 > 0) {
keep = cmo.count >= prune1;
} else {
assert prune2 > 0;
if (prune2 > 1 && cmo.count >= prune2) {
keep = true;
} else if (prefix.length > 0) {
// consult our parent
scratch.setLength(prefix.length-1);
System.arraycopy(prefix.ints, prefix.offset, scratch.ints(), 0, scratch.length());
final CountMinOutput<T> cmo2 = prefixes.get(scratch.get());
//System.out.println(" parent count = " + (cmo2 == null ? -1 : cmo2.count));
keep = cmo2 != null && ((prune2 > 1 && cmo2.count >= prune2) || (prune2 == 1 && (cmo2.count >= 2 || prefix.length <= 1)));
} else if (cmo.count >= prune2) {
keep = true;
} else {
keep = false;
}
}
if (!keep) {
it.remove();
//System.out.println(" remove");
} else {
// clear isLeaf for all ancestors
//System.out.println(" keep");
scratch.copyInts(prefix);
scratch.setLength(scratch.length() - 1);
while(scratch.length() >= 0) {
final CountMinOutput<T> cmo2 = prefixes.get(scratch.get());
if (cmo2 != null) {
//System.out.println(" clear isLeaf " + inputToString(inputMode, scratch));
cmo2.isLeaf = false;
}
scratch.setLength(scratch.length() - 1);
}
}
}
if (LuceneTestCase.VERBOSE) {
System.out.println("TEST: after prune");
for(Map.Entry<IntsRef,CountMinOutput<T>> ent : prefixes.entrySet()) {
System.out.println(" " + inputToString(inputMode, ent.getKey(), false) + ": isLeaf=" + ent.getValue().isLeaf + " isFinal=" + ent.getValue().isFinal);
if (ent.getValue().isFinal) {
System.out.println(" finalOutput=" + outputs.outputToString(ent.getValue().finalOutput));
}
}
}
if (prefixes.size() <= 1) {
assertNull(fst);
return;
}
assertNotNull(fst);
// make sure FST only enums valid prefixes
if (LuceneTestCase.VERBOSE) {
System.out.println("TEST: check pruned enum");
}
IntsRefFSTEnum<T> fstEnum = new IntsRefFSTEnum<>(fst);
IntsRefFSTEnum.InputOutput<T> current;
while((current = fstEnum.next()) != null) {
if (LuceneTestCase.VERBOSE) {
System.out.println(" fstEnum.next prefix=" + inputToString(inputMode, current.input, false) + " output=" + outputs.outputToString(current.output));
}
final CountMinOutput<T> cmo = prefixes.get(current.input);
assertNotNull(cmo);
assertTrue(cmo.isLeaf || cmo.isFinal);
//if (cmo.isFinal && !cmo.isLeaf) {
if (cmo.isFinal) {
assertEquals(cmo.finalOutput, current.output);
} else {
assertEquals(cmo.output, current.output);
}
}
// make sure all non-pruned prefixes are present in the FST
if (LuceneTestCase.VERBOSE) {
System.out.println("TEST: verify all prefixes");
}
final int[] stopNode = new int[1];
for(Map.Entry<IntsRef,CountMinOutput<T>> ent : prefixes.entrySet()) {
if (ent.getKey().length > 0) {
final CountMinOutput<T> cmo = ent.getValue();
final T output = run(fst, ent.getKey(), stopNode);
if (LuceneTestCase.VERBOSE) {
System.out.println("TEST: verify prefix=" + inputToString(inputMode, ent.getKey(), false) + " output=" + outputs.outputToString(cmo.output));
}
// if (cmo.isFinal && !cmo.isLeaf) {
if (cmo.isFinal) {
assertEquals(cmo.finalOutput, output);
} else {
assertEquals(cmo.output, output);
}
assertEquals(ent.getKey().length, stopNode[0]);
}
}
}
}
| 1 | 38,333 | Looks like this should be publicly accessible for tests in any modules? | apache-lucene-solr | java |
@@ -19,7 +19,7 @@ if (is_array($env = @include dirname(__DIR__).'/.env.local.php')) {
throw new RuntimeException('Please run "composer require symfony/dotenv" to load the ".env" files configuring the application.');
} else {
$path = dirname(__DIR__).'/.env';
- $dotenv = new Dotenv(false);
+ $dotenv = new Dotenv(true);
// load all the .env files
if (method_exists($dotenv, 'loadEnv')) { | 1 | <?php
use Symfony\Component\Dotenv\Dotenv;
setlocale(LC_CTYPE, 'en_US.utf8');
setlocale(LC_NUMERIC, 'en_US.utf8');
// Configure PHP
error_reporting(E_ALL);
ini_set('display_errors', '0');
// Load cached env vars if the .env.local.php file exists
// Run "composer dump-env prod" to create it (requires symfony/flex >=1.2)
if (is_array($env = @include dirname(__DIR__).'/.env.local.php')) {
foreach ($env as $k => $v) {
$_ENV[$k] = $_ENV[$k] ?? (isset($_SERVER[$k]) && 0 !== strpos($k, 'HTTP_') ? $_SERVER[$k] : $v);
}
} elseif (!class_exists(Dotenv::class)) {
throw new RuntimeException('Please run "composer require symfony/dotenv" to load the ".env" files configuring the application.');
} else {
$path = dirname(__DIR__).'/.env';
$dotenv = new Dotenv(false);
// load all the .env files
if (method_exists($dotenv, 'loadEnv')) {
$dotenv->loadEnv($path);
} else {
// fallback code in case your Dotenv component is not 4.2 or higher (when loadEnv() was added)
if (file_exists($path) || !file_exists($p = "$path.dist")) {
$dotenv->load($path);
} else {
$dotenv->load($p);
}
if (null === $env = $_SERVER['APP_ENV'] ?? $_ENV['APP_ENV'] ?? null) {
$dotenv->populate(array('APP_ENV' => $env = 'dev'));
}
if ('test' !== $env && file_exists($p = "$path.local")) {
$dotenv->load($p);
$env = $_SERVER['APP_ENV'] ?? $_ENV['APP_ENV'] ?? $env;
}
if (file_exists($p = "$path.$env")) {
$dotenv->load($p);
}
if (file_exists($p = "$path.$env.local")) {
$dotenv->load($p);
}
}
}
$_SERVER += $_ENV;
$_SERVER['APP_ENV'] = $_ENV['APP_ENV'] = ($_SERVER['APP_ENV'] ?? $_ENV['APP_ENV'] ?? null) ?: 'dev';
$_SERVER['APP_DEBUG'] = $_SERVER['APP_DEBUG'] ?? $_ENV['APP_DEBUG'] ?? 'prod' !== $_SERVER['APP_ENV'];
$_SERVER['APP_DEBUG'] = $_ENV['APP_DEBUG'] = (int) $_SERVER['APP_DEBUG'] || filter_var($_SERVER['APP_DEBUG'], FILTER_VALIDATE_BOOLEAN) ? '1' : '0';
define('ELFINDER_IMAGEMAGICK_PS', false);
| 1 | 23,205 | Allow putenv is necessary to be able to get environment variables with `getenv`? | shopsys-shopsys | php |
@@ -0,0 +1,17 @@
+#include <iostream>
+#include <conio.h>
+using namespace std;
+
+int main()
+{
+ int year;
+ cout<<"Enter year to check\n";
+ cin>>year;
+ if((year%4==0 && year%100!=0) || year%400==0){
+ cout<<"leap year.\n";
+ }
+ else{
+ cout<<" Not leap year.\n";
+ }
+ return 0;
+} | 1 | 1 | 5,031 | use function to check it. - don't do everything in main | shoaibrayeen-Programmers-Community | c |
|
@@ -50,7 +50,7 @@ class BaseRulesEngine(object):
self.full_rules_path = rules_file_path.strip()
self.snapshot_timestamp = snapshot_timestamp
- def build_rule_book(self):
+ def build_rule_book(self, global_configs):
"""Build RuleBook from the rules definition file."""
raise NotImplementedError('Implement in a child class.')
| 1 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for policy scanner rules engines.
Loads YAML rules either from local file system or Cloud Storage bucket.
"""
import abc
from google.cloud.security.common.util import file_loader
from google.cloud.security.common.util import log_util
from google.cloud.security.scanner.audit import errors as audit_errors
# TODO: The next editor must remove this disable and correct issues.
# pylint: disable=missing-type-doc,missing-return-type-doc
# pylint: disable=missing-param-doc,missing-raises-doc
LOGGER = log_util.get_logger(__name__)
class BaseRulesEngine(object):
"""The base class for the rules engine."""
def __init__(self,
rules_file_path=None,
snapshot_timestamp=None):
"""Initialize.
Args:
rules_file_path: The path of the rules file, either local or GCS.
snapshot_timestamp: The snapshot to associate any data lookups.
"""
if not rules_file_path:
raise audit_errors.InvalidRuleDefinitionError(
'File path: {}'.format(rules_file_path))
self.full_rules_path = rules_file_path.strip()
self.snapshot_timestamp = snapshot_timestamp
def build_rule_book(self):
"""Build RuleBook from the rules definition file."""
raise NotImplementedError('Implement in a child class.')
def find_policy_violations(self, resource, policy, force_rebuild=False):
"""Determine whether IAM policy violates rules."""
raise NotImplementedError('Implement in a child class.')
def _load_rule_definitions(self):
"""Load the rule definitions file from GCS or local filesystem.
Returns:
The parsed dict from the rule definitions file.
"""
return file_loader.read_and_parse_file(self.full_rules_path)
class BaseRuleBook(object):
"""Base class for RuleBooks.
The RuleBook class encapsulates the logic for how the RulesEngine will
lookup rules and find policy discrepancies. The actual structure of
the RuleBook depends on how rules should be applied. For example,
Organization resource rules would be applied in a hierarchical manner.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def add_rule(self, rule_def, rule_index):
"""Add rule to rule book."""
raise NotImplementedError('Implement add_rule() in subclass')
| 1 | 26,649 | Sorry I'm confused. Why not kwarg this like the others? | forseti-security-forseti-security | py |
@@ -7171,15 +7171,15 @@ const instr_info_t vex_W_extensions[][2] = {
* registers are identical. We don't bother trying to detect that.
*/
{OP_vpgatherdd,0x66389018,"vpgatherdd",Vx,Hx,MVd,Hx,xx, mrm|vex|reqp,x,END_LIST},
- {OP_vpgatherdq,0x66389058,"vpgatherdq",Vx,Hx,MVq,Hx,xx, mrm|vex|reqp,x,END_LIST},
+ {OP_vpgatherdq,0x66389058,"vpgatherdq",Vx,Hx,MVd,Hx,xx, mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 67 */
- {OP_vpgatherqd,0x66389118,"vpgatherqd",Vx,Hx,MVd,Hx,xx, mrm|vex|reqp,x,END_LIST},
+ {OP_vpgatherqd,0x66389118,"vpgatherqd",Vx,Hx,MVq,Hx,xx, mrm|vex|reqp,x,END_LIST},
{OP_vpgatherqq,0x66389158,"vpgatherqq",Vx,Hx,MVq,Hx,xx, mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 68 */
{OP_vgatherdps,0x66389218,"vgatherdps",Vvs,Hx,MVd,Hx,xx, mrm|vex|reqp,x,END_LIST},
- {OP_vgatherdpd,0x66389258,"vgatherdpd",Vvd,Hx,MVq,Hx,xx, mrm|vex|reqp,x,END_LIST},
+ {OP_vgatherdpd,0x66389258,"vgatherdpd",Vvd,Hx,MVd,Hx,xx, mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 69 */
- {OP_vgatherqps,0x66389318,"vgatherqps",Vvs,Hx,MVd,Hx,xx, mrm|vex|reqp,x,END_LIST},
+ {OP_vgatherqps,0x66389318,"vgatherqps",Vvs,Hx,MVq,Hx,xx, mrm|vex|reqp,x,END_LIST},
{OP_vgatherqpd,0x66389358,"vgatherqpd",Vvd,Hx,MVq,Hx,xx, mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 70 */
{OP_vpmaskmovd,0x66388c18,"vpmaskmovd",Vx,xx,Hx,Mx,xx, mrm|vex|reqp|predcx,x,tvexw[71][0]}, | 1 | /* **********************************************************
* Copyright (c) 2011-2019 Google, Inc. All rights reserved.
* Copyright (c) 2001-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2001 Hewlett-Packard Company */
/* decode_table.c -- tables for decoding x86 instructions
*/
#include "../globals.h" /* need this to include decode.h (uint, etc.) */
#include "arch.h" /* need this to include decode.h (byte, etc. */
#include "instr.h" /* for REG_ constants */
#include "decode.h"
#include "decode_private.h"
/****************************************************************************
* All code below based on tables in the ``Intel Architecture Software
* Developer's Manual,'' Volume 2: Instruction Set Reference, 2001.
* Updated with information from later Intel manuals and AMD manuals.
*
* I added many new types not present in the Intel tables: see decode.h
*
* I don't list %eflags as a source or dest operand, but the particular
* flags written are encoded.
*
* XXX: some day it may be worth adding flags indicating which instrs
* are valid on which models of which processors (probably best to just add
* which cpuid flag must be set for the instr to be supported): for
* now though we do not rely on being able to predict which instrs are
* invalid.
*/
// We skip auto-formatting for the entire file to keep our aligned op_instr
// entries and our single-line table entries:
/* clang-format off */
/****************************************************************************
* Operand pointers into tables
* When there are multiple encodings of an opcode, this points to the first
* entry in a linked list.
* This array corresponds with the enum in opcode.h
* IF YOU CHANGE ONE YOU MUST CHANGE THE OTHER
*/
const instr_info_t * const op_instr[] =
{
/* OP_INVALID */ NULL,
/* OP_UNDECODED */ NULL,
/* OP_CONTD */ NULL,
/* OP_LABEL */ NULL,
/* OP_add */ &first_byte[0x05],
/* OP_or */ &first_byte[0x0d],
/* OP_adc */ &first_byte[0x15],
/* OP_sbb */ &first_byte[0x1d],
/* OP_and */ &first_byte[0x25],
/* OP_daa */ &first_byte[0x27],
/* OP_sub */ &first_byte[0x2d],
/* OP_das */ &first_byte[0x2f],
/* OP_xor */ &first_byte[0x35],
/* OP_aaa */ &first_byte[0x37],
/* OP_cmp */ &first_byte[0x3d],
/* OP_aas */ &first_byte[0x3f],
/* OP_inc */ &x64_extensions[0][0],
/* OP_dec */ &x64_extensions[8][0],
/* OP_push */ &first_byte[0x50],
/* OP_push_imm*/ &first_byte[0x68],
/* OP_pop */ &first_byte[0x58],
/* OP_pusha */ &first_byte[0x60],
/* OP_popa */ &first_byte[0x61],
/* OP_bound */ &evex_prefix_extensions[0][0],
/* OP_arpl */ &x64_extensions[16][0],
/* OP_imul */ &base_extensions[10][5],
/* OP_jo_short */ &first_byte[0x70],
/* OP_jno_short */ &first_byte[0x71],
/* OP_jb_short */ &first_byte[0x72],
/* OP_jnb_short */ &first_byte[0x73],
/* OP_jz_short */ &first_byte[0x74],
/* OP_jnz_short */ &first_byte[0x75],
/* OP_jbe_short */ &first_byte[0x76],
/* OP_jnbe_short */ &first_byte[0x77],
/* OP_js_short */ &first_byte[0x78],
/* OP_jns_short */ &first_byte[0x79],
/* OP_jp_short */ &first_byte[0x7a],
/* OP_jnp_short */ &first_byte[0x7b],
/* OP_jl_short */ &first_byte[0x7c],
/* OP_jnl_short */ &first_byte[0x7d],
/* OP_jle_short */ &first_byte[0x7e],
/* OP_jnle_short */ &first_byte[0x7f],
/* OP_call */ &first_byte[0xe8],
/* OP_call_ind */ &base_extensions[12][2],
/* OP_call_far */ &first_byte[0x9a],
/* OP_call_far_ind */ &base_extensions[12][3],
/* OP_jmp */ &first_byte[0xe9],
/* OP_jmp_short */ &first_byte[0xeb],
/* OP_jmp_ind */ &base_extensions[12][4],
/* OP_jmp_far */ &first_byte[0xea],
/* OP_jmp_far_ind */ &base_extensions[12][5],
/* OP_loopne */ &first_byte[0xe0],
/* OP_loope */ &first_byte[0xe1],
/* OP_loop */ &first_byte[0xe2],
/* OP_jecxz */ &first_byte[0xe3],
/* point ld & st at eAX & al instrs, they save 1 byte (no modrm),
* hopefully time taken considering them doesn't offset that */
/* OP_mov_ld */ &first_byte[0xa1],
/* OP_mov_st */ &first_byte[0xa3],
/* PR 250397: store of immed is mov_st not mov_imm, even though can be immed->reg,
* which we address by sharing part of the mov_st template chain */
/* OP_mov_imm */ &first_byte[0xb8],
/* OP_mov_seg */ &first_byte[0x8e],
/* OP_mov_priv */ &second_byte[0x20],
/* OP_test */ &first_byte[0xa9],
/* OP_lea */ &first_byte[0x8d],
/* OP_xchg */ &first_byte[0x91],
/* OP_cwde */ &first_byte[0x98],
/* OP_cdq */ &first_byte[0x99],
/* OP_fwait */ &first_byte[0x9b],
/* OP_pushf */ &first_byte[0x9c],
/* OP_popf */ &first_byte[0x9d],
/* OP_sahf */ &first_byte[0x9e],
/* OP_lahf */ &first_byte[0x9f],
/* OP_ret */ &first_byte[0xc2],
/* OP_ret_far */ &first_byte[0xca],
/* OP_les */ &vex_prefix_extensions[0][0],
/* OP_lds */ &vex_prefix_extensions[1][0],
/* OP_enter */ &first_byte[0xc8],
/* OP_leave */ &first_byte[0xc9],
/* OP_int3 */ &first_byte[0xcc],
/* OP_int */ &first_byte[0xcd],
/* OP_into */ &first_byte[0xce],
/* OP_iret */ &first_byte[0xcf],
/* OP_aam */ &first_byte[0xd4],
/* OP_aad */ &first_byte[0xd5],
/* OP_xlat */ &first_byte[0xd7],
/* OP_in */ &first_byte[0xe5],
/* OP_out */ &first_byte[0xe7],
/* OP_hlt */ &first_byte[0xf4],
/* OP_cmc */ &first_byte[0xf5],
/* OP_clc */ &first_byte[0xf8],
/* OP_stc */ &first_byte[0xf9],
/* OP_cli */ &first_byte[0xfa],
/* OP_sti */ &first_byte[0xfb],
/* OP_cld */ &first_byte[0xfc],
/* OP_std */ &first_byte[0xfd],
/* OP_lar */ &second_byte[0x02],
/* OP_lsl */ &second_byte[0x03],
/* OP_syscall */ &second_byte[0x05],
/* OP_clts */ &second_byte[0x06],
/* OP_sysret */ &second_byte[0x07],
/* OP_invd */ &second_byte[0x08],
/* OP_wbinvd */ &second_byte[0x09],
/* OP_ud2a */ &second_byte[0x0b],
/* OP_nop_modrm */ &second_byte[0x1f],
/* OP_movntps */ &prefix_extensions[11][0],
/* OP_movntpd */ &prefix_extensions[11][2],
/* OP_wrmsr */ &second_byte[0x30],
/* OP_rdtsc */ &second_byte[0x31],
/* OP_rdmsr */ &second_byte[0x32],
/* OP_rdpmc */ &second_byte[0x33],
/* OP_sysenter */ &second_byte[0x34],
/* OP_sysexit */ &second_byte[0x35],
/* OP_cmovo */ &second_byte[0x40],
/* OP_cmovno */ &e_vex_extensions[83][0],
/* OP_cmovb */ &e_vex_extensions[84][0],
/* OP_cmovnb */ &second_byte[0x43],
/* OP_cmovz */ &e_vex_extensions[86][0],
/* OP_cmovnz */ &e_vex_extensions[87][0],
/* OP_cmovbe */ &e_vex_extensions[88][0],
/* OP_cmovnbe */ &e_vex_extensions[89][0],
/* OP_cmovs */ &second_byte[0x48],
/* OP_cmovns */ &second_byte[0x49],
/* OP_cmovp */ &e_vex_extensions[90][0],
/* OP_cmovnp */ &e_vex_extensions[85][0],
/* OP_cmovl */ &second_byte[0x4c],
/* OP_cmovnl */ &second_byte[0x4d],
/* OP_cmovle */ &second_byte[0x4e],
/* OP_cmovnle */ &second_byte[0x4f],
/* OP_punpcklbw */ &prefix_extensions[32][0],
/* OP_punpcklwd */ &prefix_extensions[33][0],
/* OP_punpckldq */ &prefix_extensions[34][0],
/* OP_packsswb */ &prefix_extensions[35][0],
/* OP_pcmpgtb */ &prefix_extensions[36][0],
/* OP_pcmpgtw */ &prefix_extensions[37][0],
/* OP_pcmpgtd */ &prefix_extensions[38][0],
/* OP_packuswb */ &prefix_extensions[39][0],
/* OP_punpckhbw */ &prefix_extensions[40][0],
/* OP_punpckhwd */ &prefix_extensions[41][0],
/* OP_punpckhdq */ &prefix_extensions[42][0],
/* OP_packssdw */ &prefix_extensions[43][0],
/* OP_punpcklqdq */ &prefix_extensions[44][2],
/* OP_punpckhqdq */ &prefix_extensions[45][2],
/* OP_movd */ &prefix_extensions[46][0],
/* OP_movq */ &prefix_extensions[112][0],
/* OP_movdqu */ &prefix_extensions[112][1],
/* OP_movdqa */ &prefix_extensions[112][2],
/* OP_pshufw */ &prefix_extensions[47][0],
/* OP_pshufd */ &prefix_extensions[47][2],
/* OP_pshufhw */ &prefix_extensions[47][1],
/* OP_pshuflw */ &prefix_extensions[47][3],
/* OP_pcmpeqb */ &prefix_extensions[48][0],
/* OP_pcmpeqw */ &prefix_extensions[49][0],
/* OP_pcmpeqd */ &prefix_extensions[50][0],
/* OP_emms */ &vex_L_extensions[0][0],
/* OP_jo */ &second_byte[0x80],
/* OP_jno */ &second_byte[0x81],
/* OP_jb */ &second_byte[0x82],
/* OP_jnb */ &second_byte[0x83],
/* OP_jz */ &second_byte[0x84],
/* OP_jnz */ &second_byte[0x85],
/* OP_jbe */ &second_byte[0x86],
/* OP_jnbe */ &second_byte[0x87],
/* OP_js */ &second_byte[0x88],
/* OP_jns */ &second_byte[0x89],
/* OP_jp */ &second_byte[0x8a],
/* OP_jnp */ &second_byte[0x8b],
/* OP_jl */ &second_byte[0x8c],
/* OP_jnl */ &second_byte[0x8d],
/* OP_jle */ &second_byte[0x8e],
/* OP_jnle */ &second_byte[0x8f],
/* OP_seto */ &e_vex_extensions[79][0],
/* OP_setno */ &e_vex_extensions[80][0],
/* OP_setb */ &e_vex_extensions[81][0],
/* OP_setnb */ &e_vex_extensions[82][0],
/* OP_setz */ &second_byte[0x94],
/* OP_setnz */ &second_byte[0x95],
/* OP_setbe */ &second_byte[0x96],
/* OP_setnbe */ &second_byte[0x97],
/* OP_sets */ &e_vex_extensions[91][0],
/* OP_setns */ &e_vex_extensions[92][0],
/* OP_setp */ &second_byte[0x9a],
/* OP_setnp */ &second_byte[0x9b],
/* OP_setl */ &second_byte[0x9c],
/* OP_setnl */ &second_byte[0x9d],
/* OP_setle */ &second_byte[0x9e],
/* OP_setnle */ &second_byte[0x9f],
/* OP_cpuid */ &second_byte[0xa2],
/* OP_bt */ &second_byte[0xa3],
/* OP_shld */ &second_byte[0xa4],
/* OP_rsm */ &second_byte[0xaa],
/* OP_bts */ &second_byte[0xab],
/* OP_shrd */ &second_byte[0xac],
/* OP_cmpxchg */ &second_byte[0xb1],
/* OP_lss */ &second_byte[0xb2],
/* OP_btr */ &second_byte[0xb3],
/* OP_lfs */ &second_byte[0xb4],
/* OP_lgs */ &second_byte[0xb5],
/* OP_movzx */ &second_byte[0xb7],
/* OP_ud2b */ &second_byte[0xb9],
/* OP_btc */ &second_byte[0xbb],
/* OP_bsf */ &prefix_extensions[140][0],
/* OP_bsr */ &prefix_extensions[136][0],
/* OP_movsx */ &second_byte[0xbf],
/* OP_xadd */ &second_byte[0xc1],
/* OP_movnti */ &second_byte[0xc3],
/* OP_pinsrw */ &prefix_extensions[53][0],
/* OP_pextrw */ &prefix_extensions[54][0],
/* OP_bswap */ &second_byte[0xc8],
/* OP_psrlw */ &prefix_extensions[56][0],
/* OP_psrld */ &prefix_extensions[57][0],
/* OP_psrlq */ &prefix_extensions[58][0],
/* OP_paddq */ &prefix_extensions[59][0],
/* OP_pmullw */ &prefix_extensions[60][0],
/* OP_pmovmskb */ &prefix_extensions[62][0],
/* OP_psubusb */ &prefix_extensions[63][0],
/* OP_psubusw */ &prefix_extensions[64][0],
/* OP_pminub */ &prefix_extensions[65][0],
/* OP_pand */ &prefix_extensions[66][0],
/* OP_paddusb */ &prefix_extensions[67][0],
/* OP_paddusw */ &prefix_extensions[68][0],
/* OP_pmaxub */ &prefix_extensions[69][0],
/* OP_pandn */ &prefix_extensions[70][0],
/* OP_pavgb */ &prefix_extensions[71][0],
/* OP_psraw */ &prefix_extensions[72][0],
/* OP_psrad */ &prefix_extensions[73][0],
/* OP_pavgw */ &prefix_extensions[74][0],
/* OP_pmulhuw */ &prefix_extensions[75][0],
/* OP_pmulhw */ &prefix_extensions[76][0],
/* OP_movntq */ &prefix_extensions[78][0],
/* OP_movntdq */ &prefix_extensions[78][2],
/* OP_psubsb */ &prefix_extensions[79][0],
/* OP_psubsw */ &prefix_extensions[80][0],
/* OP_pminsw */ &prefix_extensions[81][0],
/* OP_por */ &prefix_extensions[82][0],
/* OP_paddsb */ &prefix_extensions[83][0],
/* OP_paddsw */ &prefix_extensions[84][0],
/* OP_pmaxsw */ &prefix_extensions[85][0],
/* OP_pxor */ &prefix_extensions[86][0],
/* OP_psllw */ &prefix_extensions[87][0],
/* OP_pslld */ &prefix_extensions[88][0],
/* OP_psllq */ &prefix_extensions[89][0],
/* OP_pmuludq */ &prefix_extensions[90][0],
/* OP_pmaddwd */ &prefix_extensions[91][0],
/* OP_psadbw */ &prefix_extensions[92][0],
/* OP_maskmovq */ &prefix_extensions[93][0],
/* OP_maskmovdqu */ &prefix_extensions[93][2],
/* OP_psubb */ &prefix_extensions[94][0],
/* OP_psubw */ &prefix_extensions[95][0],
/* OP_psubd */ &prefix_extensions[96][0],
/* OP_psubq */ &prefix_extensions[97][0],
/* OP_paddb */ &prefix_extensions[98][0],
/* OP_paddw */ &prefix_extensions[99][0],
/* OP_paddd */ &prefix_extensions[100][0],
/* OP_psrldq */ &prefix_extensions[101][2],
/* OP_pslldq */ &prefix_extensions[102][2],
/* OP_rol */ &base_extensions[ 4][0],
/* OP_ror */ &base_extensions[ 4][1],
/* OP_rcl */ &base_extensions[ 4][2],
/* OP_rcr */ &base_extensions[ 4][3],
/* OP_shl */ &base_extensions[ 4][4],
/* OP_shr */ &base_extensions[ 4][5],
/* OP_sar */ &base_extensions[ 4][7],
/* OP_not */ &base_extensions[10][2],
/* OP_neg */ &base_extensions[10][3],
/* OP_mul */ &base_extensions[10][4],
/* OP_div */ &base_extensions[10][6],
/* OP_idiv */ &base_extensions[10][7],
/* OP_sldt */ &base_extensions[13][0],
/* OP_str */ &base_extensions[13][1],
/* OP_lldt */ &base_extensions[13][2],
/* OP_ltr */ &base_extensions[13][3],
/* OP_verr */ &base_extensions[13][4],
/* OP_verw */ &base_extensions[13][5],
/* OP_sgdt */ &mod_extensions[0][0],
/* OP_sidt */ &mod_extensions[1][0],
/* OP_lgdt */ &mod_extensions[5][0],
/* OP_lidt */ &mod_extensions[4][0],
/* OP_smsw */ &base_extensions[14][4],
/* OP_lmsw */ &base_extensions[14][6],
/* OP_invlpg */ &mod_extensions[2][0],
/* OP_cmpxchg8b */ &base_extensions[16][1],
/* OP_fxsave32 */ &rex_w_extensions[0][0],
/* OP_fxrstor32 */ &rex_w_extensions[1][0],
/* OP_ldmxcsr */ &e_vex_extensions[61][0],
/* OP_stmxcsr */ &e_vex_extensions[62][0],
/* OP_lfence */ &mod_extensions[6][1],
/* OP_mfence */ &mod_extensions[7][1],
/* OP_clflush */ &mod_extensions[3][0],
/* OP_sfence */ &mod_extensions[3][1],
/* OP_prefetchnta */ &base_extensions[23][0],
/* OP_prefetcht0 */ &base_extensions[23][1],
/* OP_prefetcht1 */ &base_extensions[23][2],
/* OP_prefetcht2 */ &base_extensions[23][3],
/* OP_prefetch */ &base_extensions[24][0],
/* OP_prefetchw */ &base_extensions[24][1],
/* OP_movups */ &prefix_extensions[ 0][0],
/* OP_movss */ &mod_extensions[18][0],
/* OP_movupd */ &prefix_extensions[ 0][2],
/* OP_movsd */ &mod_extensions[19][0],
/* OP_movlps */ &prefix_extensions[ 2][0],
/* OP_movlpd */ &prefix_extensions[ 2][2],
/* OP_unpcklps */ &prefix_extensions[ 4][0],
/* OP_unpcklpd */ &prefix_extensions[ 4][2],
/* OP_unpckhps */ &prefix_extensions[ 5][0],
/* OP_unpckhpd */ &prefix_extensions[ 5][2],
/* OP_movhps */ &prefix_extensions[ 6][0],
/* OP_movhpd */ &prefix_extensions[ 6][2],
/* OP_movaps */ &prefix_extensions[ 8][0],
/* OP_movapd */ &prefix_extensions[ 8][2],
/* OP_cvtpi2ps */ &prefix_extensions[10][0],
/* OP_cvtsi2ss */ &prefix_extensions[10][1],
/* OP_cvtpi2pd */ &prefix_extensions[10][2],
/* OP_cvtsi2sd */ &prefix_extensions[10][3],
/* OP_cvttps2pi */ &prefix_extensions[12][0],
/* OP_cvttss2si */ &prefix_extensions[12][1],
/* OP_cvttpd2pi */ &prefix_extensions[12][2],
/* OP_cvttsd2si */ &prefix_extensions[12][3],
/* OP_cvtps2pi */ &prefix_extensions[13][0],
/* OP_cvtss2si */ &prefix_extensions[13][1],
/* OP_cvtpd2pi */ &prefix_extensions[13][2],
/* OP_cvtsd2si */ &prefix_extensions[13][3],
/* OP_ucomiss */ &prefix_extensions[14][0],
/* OP_ucomisd */ &prefix_extensions[14][2],
/* OP_comiss */ &prefix_extensions[15][0],
/* OP_comisd */ &prefix_extensions[15][2],
/* OP_movmskps */ &prefix_extensions[16][0],
/* OP_movmskpd */ &prefix_extensions[16][2],
/* OP_sqrtps */ &prefix_extensions[17][0],
/* OP_sqrtss */ &prefix_extensions[17][1],
/* OP_sqrtpd */ &prefix_extensions[17][2],
/* OP_sqrtsd */ &prefix_extensions[17][3],
/* OP_rsqrtps */ &prefix_extensions[18][0],
/* OP_rsqrtss */ &prefix_extensions[18][1],
/* OP_rcpps */ &prefix_extensions[19][0],
/* OP_rcpss */ &prefix_extensions[19][1],
/* OP_andps */ &prefix_extensions[20][0],
/* OP_andpd */ &prefix_extensions[20][2],
/* OP_andnps */ &prefix_extensions[21][0],
/* OP_andnpd */ &prefix_extensions[21][2],
/* OP_orps */ &prefix_extensions[22][0],
/* OP_orpd */ &prefix_extensions[22][2],
/* OP_xorps */ &prefix_extensions[23][0],
/* OP_xorpd */ &prefix_extensions[23][2],
/* OP_addps */ &prefix_extensions[24][0],
/* OP_addss */ &prefix_extensions[24][1],
/* OP_addpd */ &prefix_extensions[24][2],
/* OP_addsd */ &prefix_extensions[24][3],
/* OP_mulps */ &prefix_extensions[25][0],
/* OP_mulss */ &prefix_extensions[25][1],
/* OP_mulpd */ &prefix_extensions[25][2],
/* OP_mulsd */ &prefix_extensions[25][3],
/* OP_cvtps2pd */ &prefix_extensions[26][0],
/* OP_cvtss2sd */ &prefix_extensions[26][1],
/* OP_cvtpd2ps */ &prefix_extensions[26][2],
/* OP_cvtsd2ss */ &prefix_extensions[26][3],
/* OP_cvtdq2ps */ &prefix_extensions[27][0],
/* OP_cvttps2dq */ &prefix_extensions[27][1],
/* OP_cvtps2dq */ &prefix_extensions[27][2],
/* OP_subps */ &prefix_extensions[28][0],
/* OP_subss */ &prefix_extensions[28][1],
/* OP_subpd */ &prefix_extensions[28][2],
/* OP_subsd */ &prefix_extensions[28][3],
/* OP_minps */ &prefix_extensions[29][0],
/* OP_minss */ &prefix_extensions[29][1],
/* OP_minpd */ &prefix_extensions[29][2],
/* OP_minsd */ &prefix_extensions[29][3],
/* OP_divps */ &prefix_extensions[30][0],
/* OP_divss */ &prefix_extensions[30][1],
/* OP_divpd */ &prefix_extensions[30][2],
/* OP_divsd */ &prefix_extensions[30][3],
/* OP_maxps */ &prefix_extensions[31][0],
/* OP_maxss */ &prefix_extensions[31][1],
/* OP_maxpd */ &prefix_extensions[31][2],
/* OP_maxsd */ &prefix_extensions[31][3],
/* OP_cmpps */ &prefix_extensions[52][0],
/* OP_cmpss */ &prefix_extensions[52][1],
/* OP_cmppd */ &prefix_extensions[52][2],
/* OP_cmpsd */ &prefix_extensions[52][3],
/* OP_shufps */ &prefix_extensions[55][0],
/* OP_shufpd */ &prefix_extensions[55][2],
/* OP_cvtdq2pd */ &prefix_extensions[77][1],
/* OP_cvttpd2dq */ &prefix_extensions[77][2],
/* OP_cvtpd2dq */ &prefix_extensions[77][3],
/* OP_nop */ &rex_b_extensions[0][0],
/* OP_pause */ &prefix_extensions[103][1],
/* OP_ins */ &rep_extensions[1][0],
/* OP_rep_ins */ &rep_extensions[1][2],
/* OP_outs */ &rep_extensions[3][0],
/* OP_rep_outs */ &rep_extensions[3][2],
/* OP_movs */ &rep_extensions[5][0],
/* OP_rep_movs */ &rep_extensions[5][2],
/* OP_stos */ &rep_extensions[7][0],
/* OP_rep_stos */ &rep_extensions[7][2],
/* OP_lods */ &rep_extensions[9][0],
/* OP_rep_lods */ &rep_extensions[9][2],
/* OP_cmps */ &repne_extensions[1][0],
/* OP_rep_cmps */ &repne_extensions[1][2],
/* OP_repne_cmps */ &repne_extensions[1][4],
/* OP_scas */ &repne_extensions[3][0],
/* OP_rep_scas */ &repne_extensions[3][2],
/* OP_repne_scas */ &repne_extensions[3][4],
/* OP_fadd */ &float_low_modrm[0x00],
/* OP_fmul */ &float_low_modrm[0x01],
/* OP_fcom */ &float_low_modrm[0x02],
/* OP_fcomp */ &float_low_modrm[0x03],
/* OP_fsub */ &float_low_modrm[0x04],
/* OP_fsubr */ &float_low_modrm[0x05],
/* OP_fdiv */ &float_low_modrm[0x06],
/* OP_fdivr */ &float_low_modrm[0x07],
/* OP_fld */ &float_low_modrm[0x08],
/* OP_fst */ &float_low_modrm[0x0a],
/* OP_fstp */ &float_low_modrm[0x0b],
/* OP_fldenv */ &float_low_modrm[0x0c],
/* OP_fldcw */ &float_low_modrm[0x0d],
/* OP_fnstenv */ &float_low_modrm[0x0e],
/* OP_fnstcw */ &float_low_modrm[0x0f],
/* OP_fiadd */ &float_low_modrm[0x10],
/* OP_fimul */ &float_low_modrm[0x11],
/* OP_ficom */ &float_low_modrm[0x12],
/* OP_ficomp */ &float_low_modrm[0x13],
/* OP_fisub */ &float_low_modrm[0x14],
/* OP_fisubr */ &float_low_modrm[0x15],
/* OP_fidiv */ &float_low_modrm[0x16],
/* OP_fidivr */ &float_low_modrm[0x17],
/* OP_fild */ &float_low_modrm[0x18],
/* OP_fist */ &float_low_modrm[0x1a],
/* OP_fistp */ &float_low_modrm[0x1b],
/* OP_frstor */ &float_low_modrm[0x2c],
/* OP_fnsave */ &float_low_modrm[0x2e],
/* OP_fnstsw */ &float_low_modrm[0x2f],
/* OP_fbld */ &float_low_modrm[0x3c],
/* OP_fbstp */ &float_low_modrm[0x3e],
/* OP_fxch */ &float_high_modrm[1][0x08],
/* OP_fnop */ &float_high_modrm[1][0x10],
/* OP_fchs */ &float_high_modrm[1][0x20],
/* OP_fabs */ &float_high_modrm[1][0x21],
/* OP_ftst */ &float_high_modrm[1][0x24],
/* OP_fxam */ &float_high_modrm[1][0x25],
/* OP_fld1 */ &float_high_modrm[1][0x28],
/* OP_fldl2t */ &float_high_modrm[1][0x29],
/* OP_fldl2e */ &float_high_modrm[1][0x2a],
/* OP_fldpi */ &float_high_modrm[1][0x2b],
/* OP_fldlg2 */ &float_high_modrm[1][0x2c],
/* OP_fldln2 */ &float_high_modrm[1][0x2d],
/* OP_fldz */ &float_high_modrm[1][0x2e],
/* OP_f2xm1 */ &float_high_modrm[1][0x30],
/* OP_fyl2x */ &float_high_modrm[1][0x31],
/* OP_fptan */ &float_high_modrm[1][0x32],
/* OP_fpatan */ &float_high_modrm[1][0x33],
/* OP_fxtract */ &float_high_modrm[1][0x34],
/* OP_fprem1 */ &float_high_modrm[1][0x35],
/* OP_fdecstp */ &float_high_modrm[1][0x36],
/* OP_fincstp */ &float_high_modrm[1][0x37],
/* OP_fprem */ &float_high_modrm[1][0x38],
/* OP_fyl2xp1 */ &float_high_modrm[1][0x39],
/* OP_fsqrt */ &float_high_modrm[1][0x3a],
/* OP_fsincos */ &float_high_modrm[1][0x3b],
/* OP_frndint */ &float_high_modrm[1][0x3c],
/* OP_fscale */ &float_high_modrm[1][0x3d],
/* OP_fsin */ &float_high_modrm[1][0x3e],
/* OP_fcos */ &float_high_modrm[1][0x3f],
/* OP_fcmovb */ &float_high_modrm[2][0x00],
/* OP_fcmove */ &float_high_modrm[2][0x08],
/* OP_fcmovbe */ &float_high_modrm[2][0x10],
/* OP_fcmovu */ &float_high_modrm[2][0x18],
/* OP_fucompp */ &float_high_modrm[2][0x29],
/* OP_fcmovnb */ &float_high_modrm[3][0x00],
/* OP_fcmovne */ &float_high_modrm[3][0x08],
/* OP_fcmovnbe */ &float_high_modrm[3][0x10],
/* OP_fcmovnu */ &float_high_modrm[3][0x18],
/* OP_fnclex */ &float_high_modrm[3][0x22],
/* OP_fninit */ &float_high_modrm[3][0x23],
/* OP_fucomi */ &float_high_modrm[3][0x28],
/* OP_fcomi */ &float_high_modrm[3][0x30],
/* OP_ffree */ &float_high_modrm[5][0x00],
/* OP_fucom */ &float_high_modrm[5][0x20],
/* OP_fucomp */ &float_high_modrm[5][0x28],
/* OP_faddp */ &float_high_modrm[6][0x00],
/* OP_fmulp */ &float_high_modrm[6][0x08],
/* OP_fcompp */ &float_high_modrm[6][0x19],
/* OP_fsubrp */ &float_high_modrm[6][0x20],
/* OP_fsubp */ &float_high_modrm[6][0x28],
/* OP_fdivrp */ &float_high_modrm[6][0x30],
/* OP_fdivp */ &float_high_modrm[6][0x38],
/* OP_fucomip */ &float_high_modrm[7][0x28],
/* OP_fcomip */ &float_high_modrm[7][0x30],
/* SSE3 instructions */
/* OP_fisttp */ &float_low_modrm[0x29],
/* OP_haddpd */ &prefix_extensions[114][2],
/* OP_haddps */ &prefix_extensions[114][3],
/* OP_hsubpd */ &prefix_extensions[115][2],
/* OP_hsubps */ &prefix_extensions[115][3],
/* OP_addsubpd */ &prefix_extensions[116][2],
/* OP_addsubps */ &prefix_extensions[116][3],
/* OP_lddqu */ &prefix_extensions[117][3],
/* OP_monitor */ &rm_extensions[1][0],
/* OP_mwait */ &rm_extensions[1][1],
/* OP_movsldup */ &prefix_extensions[ 2][1],
/* OP_movshdup */ &prefix_extensions[ 6][1],
/* OP_movddup */ &prefix_extensions[ 2][3],
/* 3D-Now! instructions */
/* OP_femms */ &second_byte[0x0e],
/* OP_unknown_3dnow */ &suffix_extensions[0],
/* OP_pavgusb */ &suffix_extensions[1],
/* OP_pfadd */ &suffix_extensions[2],
/* OP_pfacc */ &suffix_extensions[3],
/* OP_pfcmpge */ &suffix_extensions[4],
/* OP_pfcmpgt */ &suffix_extensions[5],
/* OP_pfcmpeq */ &suffix_extensions[6],
/* OP_pfmin */ &suffix_extensions[7],
/* OP_pfmax */ &suffix_extensions[8],
/* OP_pfmul */ &suffix_extensions[9],
/* OP_pfrcp */ &suffix_extensions[10],
/* OP_pfrcpit1 */ &suffix_extensions[11],
/* OP_pfrcpit2 */ &suffix_extensions[12],
/* OP_pfrsqrt */ &suffix_extensions[13],
/* OP_pfrsqit1 */ &suffix_extensions[14],
/* OP_pmulhrw */ &suffix_extensions[15],
/* OP_pfsub */ &suffix_extensions[16],
/* OP_pfsubr */ &suffix_extensions[17],
/* OP_pi2fd */ &suffix_extensions[18],
/* OP_pf2id */ &suffix_extensions[19],
/* OP_pi2fw */ &suffix_extensions[20],
/* OP_pf2iw */ &suffix_extensions[21],
/* OP_pfnacc */ &suffix_extensions[22],
/* OP_pfpnacc */ &suffix_extensions[23],
/* OP_pswapd */ &suffix_extensions[24],
/* SSSE3 */
/* OP_pshufb */ &prefix_extensions[118][0],
/* OP_phaddw */ &prefix_extensions[119][0],
/* OP_phaddd */ &prefix_extensions[120][0],
/* OP_phaddsw */ &prefix_extensions[121][0],
/* OP_pmaddubsw */ &prefix_extensions[122][0],
/* OP_phsubw */ &prefix_extensions[123][0],
/* OP_phsubd */ &prefix_extensions[124][0],
/* OP_phsubsw */ &prefix_extensions[125][0],
/* OP_psignb */ &prefix_extensions[126][0],
/* OP_psignw */ &prefix_extensions[127][0],
/* OP_psignd */ &prefix_extensions[128][0],
/* OP_pmulhrsw */ &prefix_extensions[129][0],
/* OP_pabsb */ &prefix_extensions[130][0],
/* OP_pabsw */ &prefix_extensions[131][0],
/* OP_pabsd */ &prefix_extensions[132][0],
/* OP_palignr */ &prefix_extensions[133][0],
/* SSE4 (incl AMD (SSE4A) and Intel-specific (SSE4.1, SSE4.2) extensions */
/* OP_popcnt */ &second_byte[0xb8],
/* OP_movntss */ &prefix_extensions[11][1],
/* OP_movntsd */ &prefix_extensions[11][3],
/* OP_extrq */ &prefix_extensions[134][2],
/* OP_insertq */ &prefix_extensions[134][3],
/* OP_lzcnt */ &prefix_extensions[136][1],
/* OP_pblendvb */ &e_vex_extensions[132][0],
/* OP_blendvps */ &e_vex_extensions[130][0],
/* OP_blendvpd */ &e_vex_extensions[129][0],
/* OP_ptest */ &e_vex_extensions[3][0],
/* OP_pmovsxbw */ &e_vex_extensions[4][0],
/* OP_pmovsxbd */ &e_vex_extensions[5][0],
/* OP_pmovsxbq */ &e_vex_extensions[6][0],
/* OP_pmovsxwd */ &e_vex_extensions[7][0],
/* OP_pmovsxwq */ &e_vex_extensions[8][0],
/* OP_pmovsxdq */ &e_vex_extensions[9][0],
/* OP_pmuldq */ &e_vex_extensions[10][0],
/* OP_pcmpeqq */ &e_vex_extensions[11][0],
/* OP_movntdqa */ &e_vex_extensions[12][0],
/* OP_packusdw */ &e_vex_extensions[13][0],
/* OP_pmovzxbw */ &e_vex_extensions[14][0],
/* OP_pmovzxbd */ &e_vex_extensions[15][0],
/* OP_pmovzxbq */ &e_vex_extensions[16][0],
/* OP_pmovzxwd */ &e_vex_extensions[17][0],
/* OP_pmovzxwq */ &e_vex_extensions[18][0],
/* OP_pmovzxdq */ &e_vex_extensions[19][0],
/* OP_pcmpgtq */ &e_vex_extensions[20][0],
/* OP_pminsb */ &e_vex_extensions[21][0],
/* OP_pminsd */ &e_vex_extensions[22][0],
/* OP_pminuw */ &e_vex_extensions[23][0],
/* OP_pminud */ &e_vex_extensions[24][0],
/* OP_pmaxsb */ &e_vex_extensions[25][0],
/* OP_pmaxsd */ &e_vex_extensions[26][0],
/* OP_pmaxuw */ &e_vex_extensions[27][0],
/* OP_pmaxud */ &e_vex_extensions[28][0],
/* OP_pmulld */ &e_vex_extensions[29][0],
/* OP_phminposuw */ &e_vex_extensions[30][0],
/* OP_crc32 */ &prefix_extensions[139][3],
/* OP_pextrb */ &e_vex_extensions[36][0],
/* OP_pextrd */ &e_vex_extensions[38][0],
/* OP_extractps */ &e_vex_extensions[39][0],
/* OP_roundps */ &e_vex_extensions[40][0],
/* OP_roundpd */ &e_vex_extensions[41][0],
/* OP_roundss */ &e_vex_extensions[42][0],
/* OP_roundsd */ &e_vex_extensions[43][0],
/* OP_blendps */ &e_vex_extensions[44][0],
/* OP_blendpd */ &e_vex_extensions[45][0],
/* OP_pblendw */ &e_vex_extensions[46][0],
/* OP_pinsrb */ &e_vex_extensions[47][0],
/* OP_insertps */ &e_vex_extensions[48][0],
/* OP_pinsrd */ &e_vex_extensions[49][0],
/* OP_dpps */ &e_vex_extensions[50][0],
/* OP_dppd */ &e_vex_extensions[51][0],
/* OP_mpsadbw */ &e_vex_extensions[52][0],
/* OP_pcmpestrm */ &e_vex_extensions[53][0],
/* OP_pcmpestri */ &e_vex_extensions[54][0],
/* OP_pcmpistrm */ &e_vex_extensions[55][0],
/* OP_pcmpistri */ &e_vex_extensions[56][0],
/* x64 */
/* OP_movsxd */ &x64_extensions[16][1],
/* OP_swapgs */ &rm_extensions[2][0],
/* VMX */
/* OP_vmcall */ &rm_extensions[0][1],
/* OP_vmlaunch */ &rm_extensions[0][2],
/* OP_vmresume */ &rm_extensions[0][3],
/* OP_vmxoff */ &rm_extensions[0][4],
/* OP_vmptrst */ &mod_extensions[13][0],
/* OP_vmptrld */ &prefix_extensions[137][0],
/* OP_vmxon */ &prefix_extensions[137][1],
/* OP_vmclear */ &prefix_extensions[137][2],
/* OP_vmread */ &prefix_extensions[134][0],
/* OP_vmwrite */ &prefix_extensions[135][0],
/* undocumented */
/* OP_int1 */ &first_byte[0xf1],
/* OP_salc */ &first_byte[0xd6],
/* OP_ffreep */ &float_high_modrm[7][0x00],
/* AMD SVM */
/* OP_vmrun */ &rm_extensions[3][0],
/* OP_vmmcall */ &rm_extensions[3][1],
/* OP_vmload */ &rm_extensions[3][2],
/* OP_vmsave */ &rm_extensions[3][3],
/* OP_stgi */ &rm_extensions[3][4],
/* OP_clgi */ &rm_extensions[3][5],
/* OP_skinit */ &rm_extensions[3][6],
/* OP_invlpga */ &rm_extensions[3][7],
/* AMD though not part of SVM */
/* OP_rdtscp */ &rm_extensions[2][1],
/* Intel VMX additions */
/* OP_invept */ &third_byte_38[49],
/* OP_invvpid */ &third_byte_38[50],
/* added in Intel Westmere */
/* OP_pclmulqdq */ &e_vex_extensions[57][0],
/* OP_aesimc */ &e_vex_extensions[31][0],
/* OP_aesenc */ &e_vex_extensions[32][0],
/* OP_aesenclast */ &e_vex_extensions[33][0],
/* OP_aesdec */ &e_vex_extensions[34][0],
/* OP_aesdeclast */ &e_vex_extensions[35][0],
/* OP_aeskeygenassist*/ &e_vex_extensions[58][0],
/* added in Intel Atom */
/* OP_movbe */ &prefix_extensions[138][0],
/* added in Intel Sandy Bridge */
/* OP_xgetbv */ &rm_extensions[4][0],
/* OP_xsetbv */ &rm_extensions[4][1],
/* OP_xsave32 */ &rex_w_extensions[2][0],
/* OP_xrstor32 */ &rex_w_extensions[3][0],
/* OP_xsaveopt32 */ &rex_w_extensions[4][0],
/* AVX */
/* OP_vmovss */ &mod_extensions[ 8][0],
/* OP_vmovsd */ &mod_extensions[ 9][0],
/* OP_vmovups */ &prefix_extensions[ 0][4],
/* OP_vmovupd */ &prefix_extensions[ 0][6],
/* OP_vmovlps */ &prefix_extensions[ 2][4],
/* OP_vmovsldup */ &prefix_extensions[ 2][5],
/* OP_vmovlpd */ &prefix_extensions[ 2][6],
/* OP_vmovddup */ &prefix_extensions[ 2][7],
/* OP_vunpcklps */ &prefix_extensions[ 4][4],
/* OP_vunpcklpd */ &prefix_extensions[ 4][6],
/* OP_vunpckhps */ &prefix_extensions[ 5][4],
/* OP_vunpckhpd */ &prefix_extensions[ 5][6],
/* OP_vmovhps */ &prefix_extensions[ 6][4],
/* OP_vmovshdup */ &prefix_extensions[ 6][5],
/* OP_vmovhpd */ &prefix_extensions[ 6][6],
/* OP_vmovaps */ &prefix_extensions[ 8][4],
/* OP_vmovapd */ &prefix_extensions[ 8][6],
/* OP_vcvtsi2ss */ &prefix_extensions[10][5],
/* OP_vcvtsi2sd */ &prefix_extensions[10][7],
/* OP_vmovntps */ &prefix_extensions[11][4],
/* OP_vmovntpd */ &prefix_extensions[11][6],
/* OP_vcvttss2si */ &prefix_extensions[12][5],
/* OP_vcvttsd2si */ &prefix_extensions[12][7],
/* OP_vcvtss2si */ &prefix_extensions[13][5],
/* OP_vcvtsd2si */ &prefix_extensions[13][7],
/* OP_vucomiss */ &prefix_extensions[14][4],
/* OP_vucomisd */ &prefix_extensions[14][6],
/* OP_vcomiss */ &prefix_extensions[15][4],
/* OP_vcomisd */ &prefix_extensions[15][6],
/* OP_vmovmskps */ &prefix_extensions[16][4],
/* OP_vmovmskpd */ &prefix_extensions[16][6],
/* OP_vsqrtps */ &prefix_extensions[17][4],
/* OP_vsqrtss */ &prefix_extensions[17][5],
/* OP_vsqrtpd */ &prefix_extensions[17][6],
/* OP_vsqrtsd */ &prefix_extensions[17][7],
/* OP_vrsqrtps */ &prefix_extensions[18][4],
/* OP_vrsqrtss */ &prefix_extensions[18][5],
/* OP_vrcpps */ &prefix_extensions[19][4],
/* OP_vrcpss */ &prefix_extensions[19][5],
/* OP_vandps */ &prefix_extensions[20][4],
/* OP_vandpd */ &prefix_extensions[20][6],
/* OP_vandnps */ &prefix_extensions[21][4],
/* OP_vandnpd */ &prefix_extensions[21][6],
/* OP_vorps */ &prefix_extensions[22][4],
/* OP_vorpd */ &prefix_extensions[22][6],
/* OP_vxorps */ &prefix_extensions[23][4],
/* OP_vxorpd */ &prefix_extensions[23][6],
/* OP_vaddps */ &prefix_extensions[24][4],
/* OP_vaddss */ &prefix_extensions[24][5],
/* OP_vaddpd */ &prefix_extensions[24][6],
/* OP_vaddsd */ &prefix_extensions[24][7],
/* OP_vmulps */ &prefix_extensions[25][4],
/* OP_vmulss */ &prefix_extensions[25][5],
/* OP_vmulpd */ &prefix_extensions[25][6],
/* OP_vmulsd */ &prefix_extensions[25][7],
/* OP_vcvtps2pd */ &prefix_extensions[26][4],
/* OP_vcvtss2sd */ &prefix_extensions[26][5],
/* OP_vcvtpd2ps */ &prefix_extensions[26][6],
/* OP_vcvtsd2ss */ &prefix_extensions[26][7],
/* OP_vcvtdq2ps */ &prefix_extensions[27][4],
/* OP_vcvttps2dq */ &prefix_extensions[27][5],
/* OP_vcvtps2dq */ &prefix_extensions[27][6],
/* OP_vsubps */ &prefix_extensions[28][4],
/* OP_vsubss */ &prefix_extensions[28][5],
/* OP_vsubpd */ &prefix_extensions[28][6],
/* OP_vsubsd */ &prefix_extensions[28][7],
/* OP_vminps */ &prefix_extensions[29][4],
/* OP_vminss */ &prefix_extensions[29][5],
/* OP_vminpd */ &prefix_extensions[29][6],
/* OP_vminsd */ &prefix_extensions[29][7],
/* OP_vdivps */ &prefix_extensions[30][4],
/* OP_vdivss */ &prefix_extensions[30][5],
/* OP_vdivpd */ &prefix_extensions[30][6],
/* OP_vdivsd */ &prefix_extensions[30][7],
/* OP_vmaxps */ &prefix_extensions[31][4],
/* OP_vmaxss */ &prefix_extensions[31][5],
/* OP_vmaxpd */ &prefix_extensions[31][6],
/* OP_vmaxsd */ &prefix_extensions[31][7],
/* OP_vpunpcklbw */ &prefix_extensions[32][6],
/* OP_vpunpcklwd */ &prefix_extensions[33][6],
/* OP_vpunpckldq */ &prefix_extensions[34][6],
/* OP_vpacksswb */ &prefix_extensions[35][6],
/* OP_vpcmpgtb */ &prefix_extensions[36][6],
/* OP_vpcmpgtw */ &prefix_extensions[37][6],
/* OP_vpcmpgtd */ &prefix_extensions[38][6],
/* OP_vpackuswb */ &prefix_extensions[39][6],
/* OP_vpunpckhbw */ &prefix_extensions[40][6],
/* OP_vpunpckhwd */ &prefix_extensions[41][6],
/* OP_vpunpckhdq */ &prefix_extensions[42][6],
/* OP_vpackssdw */ &prefix_extensions[43][6],
/* OP_vpunpcklqdq */ &prefix_extensions[44][6],
/* OP_vpunpckhqdq */ &prefix_extensions[45][6],
/* OP_vmovd */ &vex_W_extensions[108][0],
/* OP_vpshufhw */ &prefix_extensions[47][5],
/* OP_vpshufd */ &prefix_extensions[47][6],
/* OP_vpshuflw */ &prefix_extensions[47][7],
/* OP_vpcmpeqb */ &prefix_extensions[48][6],
/* OP_vpcmpeqw */ &prefix_extensions[49][6],
/* OP_vpcmpeqd */ &prefix_extensions[50][6],
/* OP_vmovq */ &prefix_extensions[51][5],
/* OP_vcmpps */ &prefix_extensions[52][4],
/* OP_vcmpss */ &prefix_extensions[52][5],
/* OP_vcmppd */ &prefix_extensions[52][6],
/* OP_vcmpsd */ &prefix_extensions[52][7],
/* OP_vpinsrw */ &prefix_extensions[53][6],
/* OP_vpextrw */ &prefix_extensions[54][6],
/* OP_vshufps */ &prefix_extensions[55][4],
/* OP_vshufpd */ &prefix_extensions[55][6],
/* OP_vpsrlw */ &prefix_extensions[56][6],
/* OP_vpsrld */ &prefix_extensions[57][6],
/* OP_vpsrlq */ &prefix_extensions[58][6],
/* OP_vpaddq */ &prefix_extensions[59][6],
/* OP_vpmullw */ &prefix_extensions[60][6],
/* OP_vpmovmskb */ &prefix_extensions[62][6],
/* OP_vpsubusb */ &prefix_extensions[63][6],
/* OP_vpsubusw */ &prefix_extensions[64][6],
/* OP_vpminub */ &prefix_extensions[65][6],
/* OP_vpand */ &prefix_extensions[66][6],
/* OP_vpaddusb */ &prefix_extensions[67][6],
/* OP_vpaddusw */ &prefix_extensions[68][6],
/* OP_vpmaxub */ &prefix_extensions[69][6],
/* OP_vpandn */ &prefix_extensions[70][6],
/* OP_vpavgb */ &prefix_extensions[71][6],
/* OP_vpsraw */ &prefix_extensions[72][6],
/* OP_vpsrad */ &prefix_extensions[73][6],
/* OP_vpavgw */ &prefix_extensions[74][6],
/* OP_vpmulhuw */ &prefix_extensions[75][6],
/* OP_vpmulhw */ &prefix_extensions[76][6],
/* OP_vcvtdq2pd */ &prefix_extensions[77][5],
/* OP_vcvttpd2dq */ &prefix_extensions[77][6],
/* OP_vcvtpd2dq */ &prefix_extensions[77][7],
/* OP_vmovntdq */ &prefix_extensions[78][6],
/* OP_vpsubsb */ &prefix_extensions[79][6],
/* OP_vpsubsw */ &prefix_extensions[80][6],
/* OP_vpminsw */ &prefix_extensions[81][6],
/* OP_vpor */ &prefix_extensions[82][6],
/* OP_vpaddsb */ &prefix_extensions[83][6],
/* OP_vpaddsw */ &prefix_extensions[84][6],
/* OP_vpmaxsw */ &prefix_extensions[85][6],
/* OP_vpxor */ &prefix_extensions[86][6],
/* OP_vpsllw */ &prefix_extensions[87][6],
/* OP_vpslld */ &prefix_extensions[88][6],
/* OP_vpsllq */ &prefix_extensions[89][6],
/* OP_vpmuludq */ &prefix_extensions[90][6],
/* OP_vpmaddwd */ &prefix_extensions[91][6],
/* OP_vpsadbw */ &prefix_extensions[92][6],
/* OP_vmaskmovdqu */ &prefix_extensions[93][6],
/* OP_vpsubb */ &prefix_extensions[94][6],
/* OP_vpsubw */ &prefix_extensions[95][6],
/* OP_vpsubd */ &prefix_extensions[96][6],
/* OP_vpsubq */ &prefix_extensions[97][6],
/* OP_vpaddb */ &prefix_extensions[98][6],
/* OP_vpaddw */ &prefix_extensions[99][6],
/* OP_vpaddd */ &prefix_extensions[100][6],
/* OP_vpsrldq */ &prefix_extensions[101][6],
/* OP_vpslldq */ &prefix_extensions[102][6],
/* OP_vmovdqu */ &prefix_extensions[112][5],
/* OP_vmovdqa */ &prefix_extensions[112][6],
/* OP_vhaddpd */ &prefix_extensions[114][6],
/* OP_vhaddps */ &prefix_extensions[114][7],
/* OP_vhsubpd */ &prefix_extensions[115][6],
/* OP_vhsubps */ &prefix_extensions[115][7],
/* OP_vaddsubpd */ &prefix_extensions[116][6],
/* OP_vaddsubps */ &prefix_extensions[116][7],
/* OP_vlddqu */ &prefix_extensions[117][7],
/* OP_vpshufb */ &prefix_extensions[118][6],
/* OP_vphaddw */ &prefix_extensions[119][6],
/* OP_vphaddd */ &prefix_extensions[120][6],
/* OP_vphaddsw */ &prefix_extensions[121][6],
/* OP_vpmaddubsw */ &prefix_extensions[122][6],
/* OP_vphsubw */ &prefix_extensions[123][6],
/* OP_vphsubd */ &prefix_extensions[124][6],
/* OP_vphsubsw */ &prefix_extensions[125][6],
/* OP_vpsignb */ &prefix_extensions[126][6],
/* OP_vpsignw */ &prefix_extensions[127][6],
/* OP_vpsignd */ &prefix_extensions[128][6],
/* OP_vpmulhrsw */ &prefix_extensions[129][6],
/* OP_vpabsb */ &prefix_extensions[130][6],
/* OP_vpabsw */ &prefix_extensions[131][6],
/* OP_vpabsd */ &prefix_extensions[132][6],
/* OP_vpalignr */ &prefix_extensions[133][6],
/* OP_vpblendvb */ &e_vex_extensions[ 2][1],
/* OP_vblendvps */ &e_vex_extensions[ 0][1],
/* OP_vblendvpd */ &e_vex_extensions[ 1][1],
/* OP_vptest */ &e_vex_extensions[ 3][1],
/* OP_vpmovsxbw */ &e_vex_extensions[ 4][1],
/* OP_vpmovsxbd */ &e_vex_extensions[ 5][1],
/* OP_vpmovsxbq */ &e_vex_extensions[ 6][1],
/* OP_vpmovsxwd */ &e_vex_extensions[ 7][1],
/* OP_vpmovsxwq */ &e_vex_extensions[ 8][1],
/* OP_vpmovsxdq */ &e_vex_extensions[ 9][1],
/* OP_vpmuldq */ &e_vex_extensions[10][1],
/* OP_vpcmpeqq */ &e_vex_extensions[11][1],
/* OP_vmovntdqa */ &e_vex_extensions[12][1],
/* OP_vpackusdw */ &e_vex_extensions[13][1],
/* OP_vpmovzxbw */ &e_vex_extensions[14][1],
/* OP_vpmovzxbd */ &e_vex_extensions[15][1],
/* OP_vpmovzxbq */ &e_vex_extensions[16][1],
/* OP_vpmovzxwd */ &e_vex_extensions[17][1],
/* OP_vpmovzxwq */ &e_vex_extensions[18][1],
/* OP_vpmovzxdq */ &e_vex_extensions[19][1],
/* OP_vpcmpgtq */ &e_vex_extensions[20][1],
/* OP_vpminsb */ &e_vex_extensions[21][1],
/* OP_vpminsd */ &e_vex_extensions[22][1],
/* OP_vpminuw */ &e_vex_extensions[23][1],
/* OP_vpminud */ &e_vex_extensions[24][1],
/* OP_vpmaxsb */ &e_vex_extensions[25][1],
/* OP_vpmaxsd */ &e_vex_extensions[26][1],
/* OP_vpmaxuw */ &e_vex_extensions[27][1],
/* OP_vpmaxud */ &e_vex_extensions[28][1],
/* OP_vpmulld */ &e_vex_extensions[29][1],
/* OP_vphminposuw */ &e_vex_extensions[30][1],
/* OP_vaesimc */ &e_vex_extensions[31][1],
/* OP_vaesenc */ &e_vex_extensions[32][1],
/* OP_vaesenclast */ &e_vex_extensions[33][1],
/* OP_vaesdec */ &e_vex_extensions[34][1],
/* OP_vaesdeclast */ &e_vex_extensions[35][1],
/* OP_vpextrb */ &e_vex_extensions[36][1],
/* OP_vpextrd */ &e_vex_extensions[38][1],
/* OP_vextractps */ &e_vex_extensions[39][1],
/* OP_vroundps */ &e_vex_extensions[40][1],
/* OP_vroundpd */ &e_vex_extensions[41][1],
/* OP_vroundss */ &e_vex_extensions[42][1],
/* OP_vroundsd */ &e_vex_extensions[43][1],
/* OP_vblendps */ &e_vex_extensions[44][1],
/* OP_vblendpd */ &e_vex_extensions[45][1],
/* OP_vpblendw */ &e_vex_extensions[46][1],
/* OP_vpinsrb */ &e_vex_extensions[47][1],
/* OP_vinsertps */ &e_vex_extensions[48][1],
/* OP_vpinsrd */ &e_vex_extensions[49][1],
/* OP_vdpps */ &e_vex_extensions[50][1],
/* OP_vdppd */ &e_vex_extensions[51][1],
/* OP_vmpsadbw */ &e_vex_extensions[52][1],
/* OP_vpcmpestrm */ &e_vex_extensions[53][1],
/* OP_vpcmpestri */ &e_vex_extensions[54][1],
/* OP_vpcmpistrm */ &e_vex_extensions[55][1],
/* OP_vpcmpistri */ &e_vex_extensions[56][1],
/* OP_vpclmulqdq */ &e_vex_extensions[57][1],
/* OP_vaeskeygenassist*/ &e_vex_extensions[58][1],
/* OP_vtestps */ &e_vex_extensions[59][1],
/* OP_vtestpd */ &e_vex_extensions[60][1],
/* OP_vzeroupper */ &vex_L_extensions[0][1],
/* OP_vzeroall */ &vex_L_extensions[0][2],
/* OP_vldmxcsr */ &e_vex_extensions[61][1],
/* OP_vstmxcsr */ &e_vex_extensions[62][1],
/* OP_vbroadcastss */ &e_vex_extensions[64][1],
/* OP_vbroadcastsd */ &e_vex_extensions[65][1],
/* OP_vbroadcastf128*/ &e_vex_extensions[66][1],
/* OP_vmaskmovps */ &e_vex_extensions[67][1],
/* OP_vmaskmovpd */ &e_vex_extensions[68][1],
/* OP_vpermilps */ &e_vex_extensions[71][1],
/* OP_vpermilpd */ &e_vex_extensions[72][1],
/* OP_vperm2f128 */ &e_vex_extensions[73][1],
/* OP_vinsertf128 */ &e_vex_extensions[74][1],
/* OP_vextractf128 */ &e_vex_extensions[75][1],
/* added in Ivy Bridge I believe, and covered by F16C cpuid flag */
/* OP_vcvtph2ps */ &e_vex_extensions[63][1],
/* OP_vcvtps2ph */ &e_vex_extensions[76][1],
/* FMA */
/* OP_vfmadd132ps */ &vex_W_extensions[ 0][0],
/* OP_vfmadd132pd */ &vex_W_extensions[ 0][1],
/* OP_vfmadd213ps */ &vex_W_extensions[ 1][0],
/* OP_vfmadd213pd */ &vex_W_extensions[ 1][1],
/* OP_vfmadd231ps */ &vex_W_extensions[ 2][0],
/* OP_vfmadd231pd */ &vex_W_extensions[ 2][1],
/* OP_vfmadd132ss */ &vex_W_extensions[ 3][0],
/* OP_vfmadd132sd */ &vex_W_extensions[ 3][1],
/* OP_vfmadd213ss */ &vex_W_extensions[ 4][0],
/* OP_vfmadd213sd */ &vex_W_extensions[ 4][1],
/* OP_vfmadd231ss */ &vex_W_extensions[ 5][0],
/* OP_vfmadd231sd */ &vex_W_extensions[ 5][1],
/* OP_vfmaddsub132ps*/ &vex_W_extensions[ 6][0],
/* OP_vfmaddsub132pd*/ &vex_W_extensions[ 6][1],
/* OP_vfmaddsub213ps*/ &vex_W_extensions[ 7][0],
/* OP_vfmaddsub213pd*/ &vex_W_extensions[ 7][1],
/* OP_vfmaddsub231ps*/ &vex_W_extensions[ 8][0],
/* OP_vfmaddsub231pd*/ &vex_W_extensions[ 8][1],
/* OP_vfmsubadd132ps*/ &vex_W_extensions[ 9][0],
/* OP_vfmsubadd132pd*/ &vex_W_extensions[ 9][1],
/* OP_vfmsubadd213ps*/ &vex_W_extensions[10][0],
/* OP_vfmsubadd213pd*/ &vex_W_extensions[10][1],
/* OP_vfmsubadd231ps*/ &vex_W_extensions[11][0],
/* OP_vfmsubadd231pd*/ &vex_W_extensions[11][1],
/* OP_vfmsub132ps */ &vex_W_extensions[12][0],
/* OP_vfmsub132pd */ &vex_W_extensions[12][1],
/* OP_vfmsub213ps */ &vex_W_extensions[13][0],
/* OP_vfmsub213pd */ &vex_W_extensions[13][1],
/* OP_vfmsub231ps */ &vex_W_extensions[14][0],
/* OP_vfmsub231pd */ &vex_W_extensions[14][1],
/* OP_vfmsub132ss */ &vex_W_extensions[15][0],
/* OP_vfmsub132sd */ &vex_W_extensions[15][1],
/* OP_vfmsub213ss */ &vex_W_extensions[16][0],
/* OP_vfmsub213sd */ &vex_W_extensions[16][1],
/* OP_vfmsub231ss */ &vex_W_extensions[17][0],
/* OP_vfmsub231sd */ &vex_W_extensions[17][1],
/* OP_vfnmadd132ps */ &vex_W_extensions[18][0],
/* OP_vfnmadd132pd */ &vex_W_extensions[18][1],
/* OP_vfnmadd213ps */ &vex_W_extensions[19][0],
/* OP_vfnmadd213pd */ &vex_W_extensions[19][1],
/* OP_vfnmadd231ps */ &vex_W_extensions[20][0],
/* OP_vfnmadd231pd */ &vex_W_extensions[20][1],
/* OP_vfnmadd132ss */ &vex_W_extensions[21][0],
/* OP_vfnmadd132sd */ &vex_W_extensions[21][1],
/* OP_vfnmadd213ss */ &vex_W_extensions[22][0],
/* OP_vfnmadd213sd */ &vex_W_extensions[22][1],
/* OP_vfnmadd231ss */ &vex_W_extensions[23][0],
/* OP_vfnmadd231sd */ &vex_W_extensions[23][1],
/* OP_vfnmsub132ps */ &vex_W_extensions[24][0],
/* OP_vfnmsub132pd */ &vex_W_extensions[24][1],
/* OP_vfnmsub213ps */ &vex_W_extensions[25][0],
/* OP_vfnmsub213pd */ &vex_W_extensions[25][1],
/* OP_vfnmsub231ps */ &vex_W_extensions[26][0],
/* OP_vfnmsub231pd */ &vex_W_extensions[26][1],
/* OP_vfnmsub132ss */ &vex_W_extensions[27][0],
/* OP_vfnmsub132sd */ &vex_W_extensions[27][1],
/* OP_vfnmsub213ss */ &vex_W_extensions[28][0],
/* OP_vfnmsub213sd */ &vex_W_extensions[28][1],
/* OP_vfnmsub231ss */ &vex_W_extensions[29][0],
/* OP_vfnmsub231sd */ &vex_W_extensions[29][1],
/* SSE2 that were omitted before */
/* OP_movq2dq */ &prefix_extensions[61][1],
/* OP_movdq2q */ &prefix_extensions[61][3],
/* OP_fxsave64 */ &rex_w_extensions[0][1],
/* OP_fxrstor64 */ &rex_w_extensions[1][1],
/* OP_xsave64 */ &rex_w_extensions[2][1],
/* OP_xrstor64 */ &rex_w_extensions[3][1],
/* OP_xsaveopt64 */ &rex_w_extensions[4][1],
/* added in Intel Ivy Bridge: RDRAND and FSGSBASE cpuid flags */
/* OP_rdrand */ &mod_extensions[12][1],
/* OP_rdfsbase */ &mod_extensions[14][1],
/* OP_rdgsbase */ &mod_extensions[15][1],
/* OP_wrfsbase */ &mod_extensions[16][1],
/* OP_wrgsbase */ &mod_extensions[17][1],
/* coming in the future but adding now since enough details are known */
/* OP_rdseed */ &mod_extensions[13][1],
/* AMD FMA4 */
/* OP_vfmaddsubps */ &vex_W_extensions[30][0],
/* OP_vfmaddsubpd */ &vex_W_extensions[31][0],
/* OP_vfmsubaddps */ &vex_W_extensions[32][0],
/* OP_vfmsubaddpd */ &vex_W_extensions[33][0],
/* OP_vfmaddps */ &vex_W_extensions[34][0],
/* OP_vfmaddpd */ &vex_W_extensions[35][0],
/* OP_vfmaddss */ &vex_W_extensions[36][0],
/* OP_vfmaddsd */ &vex_W_extensions[37][0],
/* OP_vfmsubps */ &vex_W_extensions[38][0],
/* OP_vfmsubpd */ &vex_W_extensions[39][0],
/* OP_vfmsubss */ &vex_W_extensions[40][0],
/* OP_vfmsubsd */ &vex_W_extensions[41][0],
/* OP_vfnmaddps */ &vex_W_extensions[42][0],
/* OP_vfnmaddpd */ &vex_W_extensions[43][0],
/* OP_vfnmaddss */ &vex_W_extensions[44][0],
/* OP_vfnmaddsd */ &vex_W_extensions[45][0],
/* OP_vfnmsubps */ &vex_W_extensions[46][0],
/* OP_vfnmsubpd */ &vex_W_extensions[47][0],
/* OP_vfnmsubss */ &vex_W_extensions[48][0],
/* OP_vfnmsubsd */ &vex_W_extensions[49][0],
/* AMD XOP */
/* OP_vfrczps */ &xop_extensions[27],
/* OP_vfrczpd */ &xop_extensions[28],
/* OP_vfrczss */ &xop_extensions[29],
/* OP_vfrczsd */ &xop_extensions[30],
/* OP_vpcmov */ &vex_W_extensions[50][0],
/* OP_vpcomb */ &xop_extensions[19],
/* OP_vpcomw */ &xop_extensions[20],
/* OP_vpcomd */ &xop_extensions[21],
/* OP_vpcomq */ &xop_extensions[22],
/* OP_vpcomub */ &xop_extensions[23],
/* OP_vpcomuw */ &xop_extensions[24],
/* OP_vpcomud */ &xop_extensions[25],
/* OP_vpcomuq */ &xop_extensions[26],
/* OP_vpermil2pd */ &vex_W_extensions[65][0],
/* OP_vpermil2ps */ &vex_W_extensions[64][0],
/* OP_vphaddbw */ &xop_extensions[43],
/* OP_vphaddbd */ &xop_extensions[44],
/* OP_vphaddbq */ &xop_extensions[45],
/* OP_vphaddwd */ &xop_extensions[46],
/* OP_vphaddwq */ &xop_extensions[47],
/* OP_vphadddq */ &xop_extensions[48],
/* OP_vphaddubw */ &xop_extensions[49],
/* OP_vphaddubd */ &xop_extensions[50],
/* OP_vphaddubq */ &xop_extensions[51],
/* OP_vphadduwd */ &xop_extensions[52],
/* OP_vphadduwq */ &xop_extensions[53],
/* OP_vphaddudq */ &xop_extensions[54],
/* OP_vphsubbw */ &xop_extensions[55],
/* OP_vphsubwd */ &xop_extensions[56],
/* OP_vphsubdq */ &xop_extensions[57],
/* OP_vpmacssww */ &xop_extensions[ 1],
/* OP_vpmacsswd */ &xop_extensions[ 2],
/* OP_vpmacssdql */ &xop_extensions[ 3],
/* OP_vpmacssdd */ &xop_extensions[ 4],
/* OP_vpmacssdqh */ &xop_extensions[ 5],
/* OP_vpmacsww */ &xop_extensions[ 6],
/* OP_vpmacswd */ &xop_extensions[ 7],
/* OP_vpmacsdql */ &xop_extensions[ 8],
/* OP_vpmacsdd */ &xop_extensions[ 9],
/* OP_vpmacsdqh */ &xop_extensions[10],
/* OP_vpmadcsswd */ &xop_extensions[13],
/* OP_vpmadcswd */ &xop_extensions[14],
/* OP_vpperm */ &vex_W_extensions[51][0],
/* OP_vprotb */ &xop_extensions[15],
/* OP_vprotw */ &xop_extensions[16],
/* OP_vprotd */ &xop_extensions[17],
/* OP_vprotq */ &xop_extensions[18],
/* OP_vpshlb */ &vex_W_extensions[56][0],
/* OP_vpshlw */ &vex_W_extensions[57][0],
/* OP_vpshld */ &vex_W_extensions[58][0],
/* OP_vpshlq */ &vex_W_extensions[59][0],
/* OP_vpshab */ &vex_W_extensions[60][0],
/* OP_vpshaw */ &vex_W_extensions[61][0],
/* OP_vpshad */ &vex_W_extensions[62][0],
/* OP_vpshaq */ &vex_W_extensions[63][0],
/* AMD TBM */
/* OP_bextr */ &prefix_extensions[141][4],
/* OP_blcfill */ &base_extensions[27][1],
/* OP_blci */ &base_extensions[28][6],
/* OP_blcic */ &base_extensions[27][5],
/* OP_blcmsk */ &base_extensions[28][1],
/* OP_blcs */ &base_extensions[27][3],
/* OP_blsfill */ &base_extensions[27][2],
/* OP_blsic */ &base_extensions[27][6],
/* OP_t1mskc */ &base_extensions[27][7],
/* OP_tzmsk */ &base_extensions[27][4],
/* AMD LWP */
/* OP_llwpcb */ &base_extensions[29][0],
/* OP_slwpcb */ &base_extensions[29][1],
/* OP_lwpins */ &base_extensions[30][0],
/* OP_lwpval */ &base_extensions[30][1],
/* Intel BMI1 */
/* (includes non-immed form of OP_bextr) */
/* OP_andn */ &third_byte_38[100],
/* OP_blsr */ &base_extensions[31][1],
/* OP_blsmsk */ &base_extensions[31][2],
/* OP_blsi */ &base_extensions[31][3],
/* OP_tzcnt */ &prefix_extensions[140][1],
/* Intel BMI2 */
/* OP_bzhi */ &prefix_extensions[142][4],
/* OP_pext */ &prefix_extensions[142][5],
/* OP_pdep */ &prefix_extensions[142][7],
/* OP_sarx */ &prefix_extensions[141][5],
/* OP_shlx */ &prefix_extensions[141][6],
/* OP_shrx */ &prefix_extensions[141][7],
/* OP_rorx */ &third_byte_3a[56],
/* OP_mulx */ &prefix_extensions[143][7],
/* Intel Safer Mode Extensions */
/* OP_getsec */ &second_byte[0x37],
/* Misc Intel additions */
/* OP_vmfunc */ &rm_extensions[4][4],
/* OP_invpcid */ &third_byte_38[103],
/* Intel TSX */
/* OP_xabort */ &base_extensions[17][7],
/* OP_xbegin */ &base_extensions[18][7],
/* OP_xend */ &rm_extensions[4][5],
/* OP_xtest */ &rm_extensions[4][6],
/* AVX2 */
/* OP_vpgatherdd */ &vex_W_extensions[66][0],
/* OP_vpgatherdq */ &vex_W_extensions[66][1],
/* OP_vpgatherqd */ &vex_W_extensions[67][0],
/* OP_vpgatherqq */ &vex_W_extensions[67][1],
/* OP_vgatherdps */ &vex_W_extensions[68][0],
/* OP_vgatherdpd */ &vex_W_extensions[68][1],
/* OP_vgatherqps */ &vex_W_extensions[69][0],
/* OP_vgatherqpd */ &vex_W_extensions[69][1],
/* OP_vbroadcasti128 */ &e_vex_extensions[139][1],
/* OP_vinserti128 */ &e_vex_extensions[128][1],
/* OP_vextracti128 */ &e_vex_extensions[127][1],
/* OP_vpmaskmovd */ &vex_W_extensions[70][0],
/* OP_vpmaskmovq */ &vex_W_extensions[70][1],
/* OP_vperm2i128 */ &third_byte_3a[62],
/* OP_vpermd */ &e_vex_extensions[123][1],
/* OP_vpermps */ &e_vex_extensions[124][1],
/* OP_vpermq */ &e_vex_extensions[125][1],
/* OP_vpermpd */ &e_vex_extensions[126][1],
/* OP_vpblendd */ &third_byte_3a[61],
/* OP_vpsllvd */ &vex_W_extensions[73][0],
/* OP_vpsllvq */ &vex_W_extensions[73][1],
/* OP_vpsravd */ &e_vex_extensions[131][1],
/* OP_vpsrlvd */ &vex_W_extensions[72][0],
/* OP_vpsrlvq */ &vex_W_extensions[72][1],
/* OP_vpbroadcastb */ &e_vex_extensions[135][1],
/* OP_vpbroadcastw */ &e_vex_extensions[136][1],
/* OP_vpbroadcastd */ &e_vex_extensions[137][1],
/* OP_vpbroadcastq */ &e_vex_extensions[138][1],
/* added in Intel Skylake */
/* OP_xsavec32 */ &rex_w_extensions[5][0],
/* OP_xsavec64 */ &rex_w_extensions[5][1],
/* Intel ADX */
/* OP_adox */ &prefix_extensions[143][1],
/* OP_adcx */ &prefix_extensions[143][2],
/* AVX-512 VEX encoded (scalar opmask instructions) */
/* OP_kmovw */ &vex_W_extensions[74][0],
/* OP_kmovb */ &vex_W_extensions[75][0],
/* OP_kmovq */ &vex_W_extensions[74][1],
/* OP_kmovd */ &vex_W_extensions[75][1],
/* OP_kandw */ &vex_W_extensions[82][0],
/* OP_kandb */ &vex_W_extensions[83][0],
/* OP_kandq */ &vex_W_extensions[82][1],
/* OP_kandd */ &vex_W_extensions[83][1],
/* OP_kandnw */ &vex_W_extensions[84][0],
/* OP_kandnb */ &vex_W_extensions[85][0],
/* OP_kandnq */ &vex_W_extensions[84][1],
/* OP_kandnd */ &vex_W_extensions[85][1],
/* OP_kunpckbw */ &vex_W_extensions[87][0],
/* OP_kunpckwd */ &vex_W_extensions[86][0],
/* OP_kunpckdq */ &vex_W_extensions[86][1],
/* OP_knotw */ &vex_W_extensions[88][0],
/* OP_knotb */ &vex_W_extensions[89][0],
/* OP_knotq */ &vex_W_extensions[88][1],
/* OP_knotd */ &vex_W_extensions[89][1],
/* OP_korw */ &vex_W_extensions[90][0],
/* OP_korb */ &vex_W_extensions[91][0],
/* OP_korq */ &vex_W_extensions[90][1],
/* OP_kord */ &vex_W_extensions[91][1],
/* OP_kxnorw */ &vex_W_extensions[92][0],
/* OP_kxnorb */ &vex_W_extensions[93][0],
/* OP_kxnorq */ &vex_W_extensions[92][1],
/* OP_kxnord */ &vex_W_extensions[93][1],
/* OP_kxorw */ &vex_W_extensions[94][0],
/* OP_kxorb */ &vex_W_extensions[95][0],
/* OP_kxorq */ &vex_W_extensions[94][1],
/* OP_kxord */ &vex_W_extensions[95][1],
/* OP_kaddw */ &vex_W_extensions[96][0],
/* OP_kaddb */ &vex_W_extensions[97][0],
/* OP_kaddq */ &vex_W_extensions[96][1],
/* OP_kaddd */ &vex_W_extensions[97][1],
/* OP_kortestw */ &vex_W_extensions[98][0],
/* OP_kortestb */ &vex_W_extensions[99][0],
/* OP_kortestq */ &vex_W_extensions[98][1],
/* OP_kortestd */ &vex_W_extensions[99][1],
/* OP_kshiftlw */ &vex_W_extensions[100][1],
/* OP_kshiftlb */ &vex_W_extensions[100][0],
/* OP_kshiftlq */ &vex_W_extensions[101][1],
/* OP_kshiftld */ &vex_W_extensions[101][0],
/* OP_kshiftrw */ &vex_W_extensions[102][1],
/* OP_kshiftrb */ &vex_W_extensions[102][0],
/* OP_kshiftrq */ &vex_W_extensions[103][1],
/* OP_kshiftrd */ &vex_W_extensions[103][0],
/* OP_ktestw */ &vex_W_extensions[104][0],
/* OP_ktestb */ &vex_W_extensions[105][0],
/* OP_ktestq */ &vex_W_extensions[104][1],
/* OP_ktestd */ &vex_W_extensions[105][1],
/* AVX-512 EVEX encoded */
/* OP_valignd */ &evex_W_extensions[154][0],
/* OP_valignq */ &evex_W_extensions[154][1],
/* OP_vblendmpd */ &evex_W_extensions[155][1],
/* OP_vblendmps */ &evex_W_extensions[155][0],
/* OP_vbroadcastf32x2 */ &evex_W_extensions[147][0],
/* OP_vbroadcastf32x4 */ &evex_W_extensions[148][0],
/* OP_vbroadcastf32x8 */ &evex_W_extensions[149][0],
/* OP_vbroadcastf64x2 */ &evex_W_extensions[148][1],
/* OP_vbroadcastf64x4 */ &evex_W_extensions[149][1],
/* OP_vbroadcasti32x2 */ &evex_W_extensions[151][0],
/* OP_vbroadcasti32x4 */ &evex_W_extensions[152][0],
/* OP_vbroadcasti32x8 */ &evex_W_extensions[153][0],
/* OP_vbroadcasti64x2 */ &evex_W_extensions[152][1],
/* OP_vbroadcasti64x4 */ &evex_W_extensions[153][1],
/* OP_vcompresspd */ &evex_W_extensions[156][1],
/* OP_vcompressps */ &evex_W_extensions[156][0],
/* OP_vcvtpd2qq */ &evex_W_extensions[46][1],
/* OP_vcvtpd2udq */ &evex_W_extensions[47][1],
/* OP_vcvtpd2uqq */ &evex_W_extensions[48][1],
/* OP_vcvtps2qq */ &evex_W_extensions[46][0],
/* OP_vcvtps2udq */ &evex_W_extensions[47][0],
/* OP_vcvtps2uqq */ &evex_W_extensions[48][0],
/* OP_vcvtqq2pd */ &evex_W_extensions[57][1],
/* OP_vcvtqq2ps */ &evex_W_extensions[56][1],
/* OP_vcvtsd2usi */ &evex_W_extensions[53][0],
/* OP_vcvtss2usi */ &evex_W_extensions[52][0],
/* OP_vcvttpd2qq */ &evex_W_extensions[50][1],
/* OP_vcvttpd2udq */ &evex_W_extensions[49][1],
/* OP_vcvttpd2uqq */ &evex_W_extensions[51][1],
/* OP_vcvttps2qq */ &evex_W_extensions[50][0],
/* OP_vcvttps2udq */ &evex_W_extensions[49][0],
/* OP_vcvttps2uqq */ &evex_W_extensions[51][0],
/* OP_vcvttsd2usi */ &evex_W_extensions[55][0],
/* OP_vcvttss2usi */ &evex_W_extensions[54][0],
/* OP_vcvtudq2pd */ &evex_W_extensions[61][0],
/* OP_vcvtudq2ps */ &evex_W_extensions[60][0],
/* OP_vcvtuqq2pd */ &evex_W_extensions[61][1],
/* OP_vcvtuqq2ps */ &evex_W_extensions[60][1],
/* OP_vcvtusi2sd */ &evex_W_extensions[59][0],
/* OP_vcvtusi2ss */ &evex_W_extensions[58][0],
/* OP_vdbpsadbw */ &e_vex_extensions[52][2],
/* OP_vexp2pd */ &evex_W_extensions[184][1],
/* OP_vexp2ps */ &evex_W_extensions[184][0],
/* OP_vexpandpd */ &evex_W_extensions[157][1],
/* OP_vexpandps */ &evex_W_extensions[157][0],
/* OP_vextractf32x4 */ &evex_W_extensions[100][0],
/* OP_vextractf32x8 */ &evex_W_extensions[101][0],
/* OP_vextractf64x2 */ &evex_W_extensions[100][1],
/* OP_vextractf64x4 */ &evex_W_extensions[101][1],
/* OP_vextracti32x4 */ &evex_W_extensions[102][0],
/* OP_vextracti32x8 */ &evex_W_extensions[103][0],
/* OP_vextracti64x2 */ &evex_W_extensions[102][1],
/* OP_vextracti64x4 */ &evex_W_extensions[103][1],
/* OP_vfixupimmpd */ &evex_W_extensions[158][1],
/* OP_vfixupimmps */ &evex_W_extensions[158][0],
/* OP_vfixupimmsd */ &evex_W_extensions[159][1],
/* OP_vfixupimmss */ &evex_W_extensions[159][0],
/* OP_vfpclasspd */ &evex_W_extensions[182][1],
/* OP_vfpclassps */ &evex_W_extensions[182][0],
/* OP_vfpclasssd */ &evex_W_extensions[183][1],
/* OP_vfpclassss */ &evex_W_extensions[183][0],
/* OP_vgetexppd */ &evex_W_extensions[160][1],
/* OP_vgetexpps */ &evex_W_extensions[160][0],
/* OP_vgetexpsd */ &evex_W_extensions[161][1],
/* OP_vgetexpss */ &evex_W_extensions[161][0],
/* OP_vgetmantpd */ &evex_W_extensions[162][1],
/* OP_vgetmantps */ &evex_W_extensions[162][0],
/* OP_vgetmantsd */ &evex_W_extensions[163][1],
/* OP_vgetmantss */ &evex_W_extensions[163][0],
/* OP_vinsertf32x4 */ &evex_W_extensions[104][0],
/* OP_vinsertf32x8 */ &evex_W_extensions[105][0],
/* OP_vinsertf64x2 */ &evex_W_extensions[104][1],
/* OP_vinsertf64x4 */ &evex_W_extensions[105][1],
/* OP_vinserti32x4 */ &evex_W_extensions[106][0],
/* OP_vinserti32x8 */ &evex_W_extensions[107][0],
/* OP_vinserti64x2 */ &evex_W_extensions[106][1],
/* OP_vinserti64x4 */ &evex_W_extensions[107][1],
/* OP_vmovdqa32 */ &evex_W_extensions[8][0],
/* OP_vmovdqa64 */ &evex_W_extensions[8][1],
/* OP_vmovdqu16 */ &evex_W_extensions[10][1],
/* OP_vmovdqu32 */ &evex_W_extensions[11][0],
/* OP_vmovdqu64 */ &evex_W_extensions[11][1],
/* OP_vmovdqu8 */ &evex_W_extensions[10][0],
/* OP_vpabsq */ &evex_W_extensions[146][1],
/* OP_vpandd */ &evex_W_extensions[41][0],
/* OP_vpandnd */ &evex_W_extensions[42][0],
/* OP_vpandnq */ &evex_W_extensions[42][1],
/* OP_vpandq */ &evex_W_extensions[41][1],
/* OP_vpblendmb */ &evex_W_extensions[164][0],
/* OP_vpblendmd */ &evex_W_extensions[165][0],
/* OP_vpblendmq */ &evex_W_extensions[165][1],
/* OP_vpblendmw */ &evex_W_extensions[164][1],
/* OP_vpbroadcastmb2q */ &prefix_extensions[184][9],
/* OP_vpbroadcastmw2d */ &prefix_extensions[185][9],
/* OP_vpcmpb */ &evex_W_extensions[109][0],
/* OP_vpcmpd */ &evex_W_extensions[111][0],
/* OP_vpcmpq */ &evex_W_extensions[111][1],
/* OP_vpcmpub */ &evex_W_extensions[108][0],
/* OP_vpcmpud */ &evex_W_extensions[110][0],
/* OP_vpcmpuq */ &evex_W_extensions[110][1],
/* OP_vpcmpuw */ &evex_W_extensions[108][1],
/* OP_vpcmpw */ &evex_W_extensions[109][1],
/* OP_vpcompressd */ &evex_W_extensions[166][0],
/* OP_vpcompressq */ &evex_W_extensions[166][1],
/* OP_vpconflictd */ &evex_W_extensions[185][0],
/* OP_vpconflictq */ &evex_W_extensions[185][1],
/* OP_vpermi2b */ &evex_W_extensions[96][0],
/* OP_vpermi2d */ &evex_W_extensions[95][0],
/* OP_vpermi2pd */ &evex_W_extensions[94][1],
/* OP_vpermi2ps */ &evex_W_extensions[94][0],
/* OP_vpermi2q */ &evex_W_extensions[95][1],
/* OP_vpermi2w */ &evex_W_extensions[96][1],
/* OP_vpermt2b */ &evex_W_extensions[97][0],
/* OP_vpermt2d */ &evex_W_extensions[98][0],
/* OP_vpermt2pd */ &evex_W_extensions[99][1],
/* OP_vpermt2ps */ &evex_W_extensions[99][0],
/* OP_vpermt2q */ &evex_W_extensions[98][1],
/* OP_vpermt2w */ &evex_W_extensions[97][1],
/* OP_vpermw */ &third_byte_38[120],
/* OP_vpexpandd */ &evex_W_extensions[167][0],
/* OP_vpexpandq */ &evex_W_extensions[167][1],
/* OP_vpextrq */ &evex_W_extensions[144][1],
/* OP_vpinsrq */ &evex_W_extensions[143][1],
/* OP_vplzcntd */ &evex_W_extensions[186][0],
/* OP_vplzcntq */ &evex_W_extensions[186][1],
/* OP_vpmadd52huq */ &third_byte_38[158],
/* OP_vpmadd52luq */ &third_byte_38[157],
/* OP_vpmaxsq */ &evex_W_extensions[113][1],
/* OP_vpmaxuq */ &evex_W_extensions[115][1],
/* OP_vpminsq */ &evex_W_extensions[112][1],
/* OP_vpminuq */ &evex_W_extensions[114][1],
/* OP_vpmovb2m */ &evex_W_extensions[139][0],
/* OP_vpmovd2m */ &evex_W_extensions[140][0],
/* OP_vpmovdb */ &prefix_extensions[169][9],
/* OP_vpmovdw */ &prefix_extensions[172][9],
/* OP_vpmovm2b */ &evex_W_extensions[137][0],
/* OP_vpmovm2d */ &evex_W_extensions[138][0],
/* OP_vpmovm2q */ &evex_W_extensions[138][1],
/* OP_vpmovm2w */ &evex_W_extensions[137][1],
/* OP_vpmovq2m */ &evex_W_extensions[140][1],
/* OP_vpmovqb */ &prefix_extensions[160][9],
/* OP_vpmovqd */ &prefix_extensions[166][9],
/* OP_vpmovqw */ &prefix_extensions[163][9],
/* OP_vpmovsdb */ &prefix_extensions[170][9],
/* OP_vpmovsdw */ &prefix_extensions[173][9],
/* OP_vpmovsqb */ &prefix_extensions[161][9],
/* OP_vpmovsqd */ &prefix_extensions[167][9],
/* OP_vpmovsqw */ &prefix_extensions[164][9],
/* OP_vpmovswb */ &prefix_extensions[176][9],
/* OP_vpmovusdb */ &prefix_extensions[171][9],
/* OP_vpmovusdw */ &prefix_extensions[174][9],
/* OP_vpmovusqb */ &prefix_extensions[162][9],
/* OP_vpmovusqd */ &prefix_extensions[168][9],
/* OP_vpmovusqw */ &prefix_extensions[165][9],
/* OP_vpmovuswb */ &prefix_extensions[177][9],
/* OP_vpmovw2m */ &evex_W_extensions[139][1],
/* OP_vpmovwb */ &prefix_extensions[175][9],
/* OP_vpmullq */ &evex_W_extensions[45][1],
/* OP_vpord */ &evex_W_extensions[43][0],
/* OP_vporq */ &evex_W_extensions[43][1],
/* OP_vprold */ &evex_W_extensions[117][0],
/* OP_vprolq */ &evex_W_extensions[117][1],
/* OP_vprolvd */ &evex_W_extensions[116][0],
/* OP_vprolvq */ &evex_W_extensions[116][1],
/* OP_vprord */ &evex_W_extensions[119][0],
/* OP_vprorq */ &evex_W_extensions[119][1],
/* OP_vprorvd */ &evex_W_extensions[118][0],
/* OP_vprorvq */ &evex_W_extensions[118][1],
/* OP_vpsllvw */ &evex_W_extensions[129][1],
/* OP_vpsraq */ &evex_W_extensions[120][1],
/* OP_vpsravq */ &evex_W_extensions[127][1],
/* OP_vpsravw */ &evex_W_extensions[126][1],
/* OP_vpsrlvw */ &prefix_extensions[177][10],
/* OP_vpternlogd */ &evex_W_extensions[187][0],
/* OP_vpternlogq */ &evex_W_extensions[187][1],
/* OP_vptestmb */ &evex_W_extensions[168][0],
/* OP_vptestmd */ &evex_W_extensions[169][0],
/* OP_vptestmq */ &evex_W_extensions[169][1],
/* OP_vptestmw */ &evex_W_extensions[168][1],
/* OP_vptestnmb */ &evex_W_extensions[170][0],
/* OP_vptestnmd */ &evex_W_extensions[171][0],
/* OP_vptestnmq */ &evex_W_extensions[171][1],
/* OP_vptestnmw */ &evex_W_extensions[170][1],
/* OP_vpxord */ &evex_W_extensions[44][0],
/* OP_vpxorq */ &evex_W_extensions[44][1],
/* OP_vrangepd */ &evex_W_extensions[172][1],
/* OP_vrangeps */ &evex_W_extensions[172][0],
/* OP_vrangesd */ &evex_W_extensions[173][1],
/* OP_vrangess */ &evex_W_extensions[173][0],
/* OP_vrcp14pd */ &evex_W_extensions[131][1],
/* OP_vrcp14ps */ &evex_W_extensions[131][0],
/* OP_vrcp14sd */ &evex_W_extensions[132][1],
/* OP_vrcp14ss */ &evex_W_extensions[132][0],
/* OP_vrcp28pd */ &evex_W_extensions[133][1],
/* OP_vrcp28ps */ &evex_W_extensions[133][0],
/* OP_vrcp28sd */ &evex_W_extensions[134][1],
/* OP_vrcp28ss */ &evex_W_extensions[134][0],
/* OP_vreducepd */ &evex_W_extensions[174][1],
/* OP_vreduceps */ &evex_W_extensions[174][0],
/* OP_vreducesd */ &evex_W_extensions[175][1],
/* OP_vreducess */ &evex_W_extensions[175][0],
/* OP_vrndscalepd */ &e_vex_extensions[41][2],
/* OP_vrndscaleps */ &e_vex_extensions[40][2],
/* OP_vrndscalesd */ &e_vex_extensions[43][2],
/* OP_vrndscaless */ &e_vex_extensions[42][2],
/* OP_vrsqrt14pd */ &evex_W_extensions[176][1],
/* OP_vrsqrt14ps */ &evex_W_extensions[176][0],
/* OP_vrsqrt14sd */ &evex_W_extensions[177][1],
/* OP_vrsqrt14ss */ &evex_W_extensions[177][0],
/* OP_vrsqrt28pd */ &evex_W_extensions[178][1],
/* OP_vrsqrt28ps */ &evex_W_extensions[178][0],
/* OP_vrsqrt28sd */ &evex_W_extensions[179][1],
/* OP_vrsqrt28ss */ &evex_W_extensions[179][0],
/* OP_vscalefpd */ &evex_W_extensions[180][1],
/* OP_vscalefps */ &evex_W_extensions[180][0],
/* OP_vscalefsd */ &evex_W_extensions[181][1],
/* OP_vscalefss */ &evex_W_extensions[181][0],
/* OP_vshuff32x4 */ &evex_W_extensions[141][0],
/* OP_vshuff64x2 */ &evex_W_extensions[141][1],
/* OP_vshufi32x4 */ &evex_W_extensions[142][0],
/* OP_vshufi64x2 */ &evex_W_extensions[142][1],
};
/****************************************************************************
* Macros to make tables legible
*/
/* Jb is defined in dynamo.h, undefine it for this file */
#undef Jb
#define xx TYPE_NONE, OPSZ_NA
/* from Intel tables, using our corresponding OPSZ constants */
#define Ap TYPE_A, OPSZ_6_irex10_short4 /* NOTE - not legal for 64-bit instructions */
#define By TYPE_B, OPSZ_4_rex8
#define Cr TYPE_C, OPSZ_4x8
#define Dr TYPE_D, OPSZ_4x8
#define Eb TYPE_E, OPSZ_1
#define Ew TYPE_E, OPSZ_2
#define Ev TYPE_E, OPSZ_4_rex8_short2
#define Esv TYPE_E, OPSZ_4x8_short2 /* "stack v", or "d64" in Intel tables */
#define Ed TYPE_E, OPSZ_4
#define Ep TYPE_E, OPSZ_6_irex10_short4
#define Ed_q TYPE_E, OPSZ_4_rex8
#define Ey TYPE_E, OPSZ_4_rex8
#define Rd_Mb TYPE_E, OPSZ_1_reg4
#define Rd_Mw TYPE_E, OPSZ_2_reg4
#define Gb TYPE_G, OPSZ_1
#define Gw TYPE_G, OPSZ_2
#define Gv TYPE_G, OPSZ_4_rex8_short2
#define Gz TYPE_G, OPSZ_4_short2
#define Gd TYPE_G, OPSZ_4
#define Gd_q TYPE_G, OPSZ_4_rex8
#define Gr TYPE_G, OPSZ_4x8
#define Gy TYPE_G, OPSZ_4_rex8
#define Ib TYPE_I, OPSZ_1
#define Iw TYPE_I, OPSZ_2
#define Id TYPE_I, OPSZ_4
#define Iv TYPE_I, OPSZ_4_rex8_short2
#define Iz TYPE_I, OPSZ_4_short2
#define Jb TYPE_J, OPSZ_1
#define Jz TYPE_J, OPSZ_4_short2xi4
#define Ma TYPE_M, OPSZ_8_short4
#define Mp TYPE_M, OPSZ_6_irex10_short4
#define Ms TYPE_M, OPSZ_6x10
#define Ob TYPE_O, OPSZ_1
#define Ov TYPE_O, OPSZ_4_rex8_short2
#define Pd TYPE_P, OPSZ_4
#define Pq TYPE_P, OPSZ_8
#define Pw_q TYPE_P, OPSZ_2_of_8
#define Pd_q TYPE_P, OPSZ_4_of_8
#define Ppi TYPE_P, OPSZ_8
#define Nw_q TYPE_P_MODRM, OPSZ_2_of_8
#define Nq TYPE_P_MODRM, OPSZ_8
#define Qd TYPE_Q, OPSZ_4
#define Qq TYPE_Q, OPSZ_8
#define Qpi TYPE_Q, OPSZ_8
#define Rr TYPE_R, OPSZ_4x8
#define Rv TYPE_R, OPSZ_4_rex8_short2
#define Ry TYPE_R, OPSZ_4_rex8
#define Sw TYPE_S, OPSZ_2
#define Vq TYPE_V, OPSZ_8
#define Vdq TYPE_V, OPSZ_16
#define Vb_dq TYPE_V, OPSZ_1_of_16
#define Vw_dq TYPE_V, OPSZ_2_of_16
#define Vd_dq TYPE_V, OPSZ_4_of_16
#define Vd_q_dq TYPE_V, OPSZ_4_rex8_of_16
#define Vq_dq TYPE_V, OPSZ_8_of_16
#define Vps TYPE_V, OPSZ_16
#define Vpd TYPE_V, OPSZ_16
#define Vss TYPE_V, OPSZ_4_of_16
#define Vsd TYPE_V, OPSZ_8_of_16
#define Ups TYPE_V_MODRM, OPSZ_16
#define Upd TYPE_V_MODRM, OPSZ_16
#define Udq TYPE_V_MODRM, OPSZ_16
#define Uw_dq TYPE_V_MODRM, OPSZ_2_of_16
#define Uq_dq TYPE_V_MODRM, OPSZ_8_of_16
#define Wq TYPE_W, OPSZ_8
#define Wdq TYPE_W, OPSZ_16
#define Wb_dq TYPE_W, OPSZ_1_of_16
#define Ww_dq TYPE_W, OPSZ_2_of_16
#define Wd_dq TYPE_W, OPSZ_4_of_16
#define Wq_dq TYPE_W, OPSZ_8_of_16
#define Wps TYPE_W, OPSZ_16
#define Wpd TYPE_W, OPSZ_16
#define Wss TYPE_W, OPSZ_4_of_16
#define Wsd TYPE_W, OPSZ_8_of_16
#define Udq_Md TYPE_W, OPSZ_4_reg16
#define Xb TYPE_X, OPSZ_1
#define Xv TYPE_X, OPSZ_4_rex8_short2
#define Xz TYPE_X, OPSZ_4_short2
#define Yb TYPE_Y, OPSZ_1
#define Yv TYPE_Y, OPSZ_4_rex8_short2
#define Yz TYPE_Y, OPSZ_4_short2
/* AVX additions */
#define Vvs TYPE_V, OPSZ_16_vex32
#define Vvd TYPE_V, OPSZ_16_vex32
#define Vx TYPE_V, OPSZ_16_vex32
#define Vqq TYPE_V, OPSZ_32
#define Vdq_qq TYPE_V, OPSZ_16_of_32
#define Wvs TYPE_W, OPSZ_16_vex32
#define Wvd TYPE_W, OPSZ_16_vex32
#define Wx TYPE_W, OPSZ_16_vex32
#define Uvs TYPE_V_MODRM, OPSZ_16_vex32
#define Uvd TYPE_V_MODRM, OPSZ_16_vex32
#define Uss TYPE_V_MODRM, OPSZ_4_of_16
#define Usd TYPE_V_MODRM, OPSZ_8_of_16
#define Ux TYPE_V_MODRM, OPSZ_16_vex32
#define Udq TYPE_V_MODRM, OPSZ_16
#define Hvs TYPE_H, OPSZ_16_vex32
#define Hvd TYPE_H, OPSZ_16_vex32
#define Hss TYPE_H, OPSZ_4_of_16
#define Hsd TYPE_H, OPSZ_8_of_16
#define Hq_dq TYPE_H, OPSZ_8_of_16
#define Hdq TYPE_H, OPSZ_16
#define H12_dq TYPE_H, OPSZ_12_of_16
#define H12_8_dq TYPE_H, OPSZ_12_rex8_of_16
#define H14_dq TYPE_H, OPSZ_14_of_16
#define H15_dq TYPE_H, OPSZ_15_of_16
#define Hqq TYPE_H, OPSZ_32
#define Hx TYPE_H, OPSZ_16_vex32
#define Hh_x TYPE_H, OPSZ_half_16_vex32
#define Wvq_dq TYPE_W, OPSZ_8_of_16_vex32
#define Wh_x TYPE_W, OPSZ_half_16_vex32
#define Wi_x TYPE_W, OPSZ_quarter_16_vex32
#define Wj_x TYPE_W, OPSZ_eighth_16_vex32
#define Wqq TYPE_W, OPSZ_32
#define Mvs TYPE_M, OPSZ_16_vex32
#define Mvd TYPE_M, OPSZ_16_vex32
#define Mx TYPE_M, OPSZ_16_vex32
#define Ldq TYPE_L, OPSZ_16 /* immed is 1 byte but reg is xmm */
#define Lx TYPE_L, OPSZ_16_vex32 /* immed is 1 byte but reg is xmm/ymm */
#define Lvs TYPE_L, OPSZ_16_vex32 /* immed is 1 byte but reg is xmm/ymm */
#define Lvd TYPE_L, OPSZ_16_vex32 /* immed is 1 byte but reg is xmm/ymm */
#define Lss TYPE_L, OPSZ_4_of_16 /* immed is 1 byte but reg is xmm/ymm */
#define Lsd TYPE_L, OPSZ_8_of_16 /* immed is 1 byte but reg is xmm/ymm */
/* AVX-512 additions */
#define KP1b TYPE_K_REG, OPSZ_1b
#define KPb TYPE_K_REG, OPSZ_1
#define KPw TYPE_K_REG, OPSZ_2
#define KPd TYPE_K_REG, OPSZ_4
#define KPq TYPE_K_REG, OPSZ_8
#define KRb TYPE_K_MODRM_R, OPSZ_1
#define KRw TYPE_K_MODRM_R, OPSZ_2
#define KRd TYPE_K_MODRM_R, OPSZ_4
#define KRq TYPE_K_MODRM_R, OPSZ_8
#define KQb TYPE_K_MODRM, OPSZ_1
#define KQw TYPE_K_MODRM, OPSZ_2
#define KQd TYPE_K_MODRM, OPSZ_4
#define KQq TYPE_K_MODRM, OPSZ_8
#define KVb TYPE_K_VEX, OPSZ_1
#define KVw TYPE_K_VEX, OPSZ_2
#define KVd TYPE_K_VEX, OPSZ_4
#define KVq TYPE_K_VEX, OPSZ_8
#define KE1b TYPE_K_EVEX, OPSZ_1b
#define KE2b TYPE_K_EVEX, OPSZ_2b
#define KE4b TYPE_K_EVEX, OPSZ_4b
#define KEb TYPE_K_EVEX, OPSZ_1
#define KEw TYPE_K_EVEX, OPSZ_2
#define KEd TYPE_K_EVEX, OPSZ_4
#define KEq TYPE_K_EVEX, OPSZ_8
#define Eq TYPE_E, OPSZ_8
#define Ves TYPE_V, OPSZ_16_vex32_evex64
#define Ved TYPE_V, OPSZ_16_vex32_evex64
#define Vf TYPE_V, OPSZ_vex32_evex64
#define Vfs TYPE_V, OPSZ_vex32_evex64
#define Vfd TYPE_V, OPSZ_vex32_evex64
#define Vdq_f TYPE_V, OPSZ_16_of_32_evex64
#define Vqq_oq TYPE_V, OPSZ_32_of_64
#define Voq TYPE_V, OPSZ_64
#define Wes TYPE_W, OPSZ_16_vex32_evex64
#define Wed TYPE_W, OPSZ_16_vex32_evex64
#define We TYPE_W, OPSZ_16_vex32_evex64
#define Wf TYPE_W, OPSZ_vex32_evex64
#define Wfs TYPE_W, OPSZ_vex32_evex64
#define Wfd TYPE_W, OPSZ_vex32_evex64
#define Wd_f TYPE_W, OPSZ_4_of_32_evex64
#define Wq_f TYPE_W, OPSZ_8_of_32_evex64
#define Ve TYPE_V, OPSZ_16_vex32_evex64
#define We TYPE_W, OPSZ_16_vex32_evex64
#define Wh_e TYPE_W, OPSZ_half_16_vex32_evex64
#define Wi_e TYPE_W, OPSZ_quarter_16_vex32_evex64
#define Wj_e TYPE_W, OPSZ_eighth_16_vex32_evex64
#define Woq TYPE_W, OPSZ_64
#define Hes TYPE_H, OPSZ_16_vex32_evex64
#define Hed TYPE_H, OPSZ_16_vex32_evex64
#define He TYPE_H, OPSZ_16_vex32_evex64
#define Hh_e TYPE_H, OPSZ_half_16_vex32_evex64
#define Hf TYPE_H, OPSZ_vex32_evex64
#define Hfs TYPE_H, OPSZ_vex32_evex64
#define Hfd TYPE_H, OPSZ_vex32_evex64
#define Hdq_f TYPE_H, OPSZ_16_of_32_evex64
#define Mes TYPE_M, OPSZ_16_vex32_evex64
#define Med TYPE_M, OPSZ_16_vex32_evex64
#define Me TYPE_M, OPSZ_16_vex32_evex64
#define Ue TYPE_V_MODRM, OPSZ_16_vex32_evex64
/* my own codes
* size m = 32 or 16 bit depending on addr size attribute
* B=ds:eDI, Z=xlat's mem, K=float in mem, i_==indirect
*/
#define Mb TYPE_M, OPSZ_1
#define Md TYPE_M, OPSZ_4
#define Md_q TYPE_M, OPSZ_4_rex8
#define Mw TYPE_M, OPSZ_2
#define Mm TYPE_M, OPSZ_lea
#define Moq TYPE_M, OPSZ_512
#define Mxsave TYPE_M, OPSZ_xsave
#define Mps TYPE_M, OPSZ_16
#define Mpd TYPE_M, OPSZ_16
#define Mss TYPE_M, OPSZ_4
#define Msd TYPE_M, OPSZ_8
#define Mq TYPE_M, OPSZ_8
#define Mdq TYPE_M, OPSZ_16
#define Mqq TYPE_M, OPSZ_32
#define Mq_dq TYPE_M, OPSZ_8_rex16
#define Mv TYPE_M, OPSZ_4_rex8_short2
#define MVd TYPE_VSIB, OPSZ_4
#define MVq TYPE_VSIB, OPSZ_8
#define Zb TYPE_XLAT, OPSZ_1
#define Bq TYPE_MASKMOVQ, OPSZ_8
#define Bdq TYPE_MASKMOVQ, OPSZ_16
#define Fw TYPE_FLOATMEM, OPSZ_2
#define Fd TYPE_FLOATMEM, OPSZ_4
#define Fq TYPE_FLOATMEM, OPSZ_8
#define Fx TYPE_FLOATMEM, OPSZ_10
#define Fy TYPE_FLOATMEM, OPSZ_28_short14 /* _14_ if data16 */
#define Fz TYPE_FLOATMEM, OPSZ_108_short94 /* _98_ if data16 */
#define i_dx TYPE_INDIR_REG, REG_DX
#define i_Ev TYPE_INDIR_E, OPSZ_4_rex8_short2
#define i_Exi TYPE_INDIR_E, OPSZ_4x8_short2xi8
#define i_Ep TYPE_INDIR_E, OPSZ_6_irex10_short4
#define i_xSP TYPE_INDIR_VAR_XREG, REG_ESP
#define i_iSP TYPE_INDIR_VAR_XIREG, REG_ESP
#define i_xBP TYPE_INDIR_VAR_XREG, REG_EBP
/* negative offset from (%xsp) for pushes */
#define i_iSPo1 TYPE_INDIR_VAR_XIREG_OFFS_1, REG_ESP
#define i_vSPo2 TYPE_INDIR_VAR_REG_OFFS_2, REG_ESP
#define i_xSPo1 TYPE_INDIR_VAR_XREG_OFFS_1, REG_ESP
#define i_xSPo8 TYPE_INDIR_VAR_XREG_OFFS_8, REG_ESP
#define i_xSPs8 TYPE_INDIR_VAR_XREG_SIZEx8, REG_ESP
#define i_vSPs2 TYPE_INDIR_VAR_REG_SIZEx2, REG_ESP
#define i_vSPs3 TYPE_INDIR_VAR_REG_SIZEx3x5, REG_ESP
/* pop but unusual size */
#define i_xSPoN TYPE_INDIR_VAR_XREG_OFFS_N, REG_ESP
#define c1 TYPE_1, OPSZ_0
/* we pick the right constant based on the opcode */
#define cF TYPE_FLOATCONST, OPSZ_0
/* registers that are base 32 but vary down or up */
#define eAX TYPE_VAR_REG, REG_EAX
#define eCX TYPE_VAR_REG, REG_ECX
#define eDX TYPE_VAR_REG, REG_EDX
#define eBX TYPE_VAR_REG, REG_EBX
#define eSP TYPE_VAR_REG, REG_ESP
#define eBP TYPE_VAR_REG, REG_EBP
#define eSI TYPE_VAR_REG, REG_ESI
#define eDI TYPE_VAR_REG, REG_EDI
/* registers that are base 32 and can vary down but not up */
#define zAX TYPE_VARZ_REG, REG_EAX
#define zCX TYPE_VARZ_REG, REG_ECX
#define zDX TYPE_VARZ_REG, REG_EDX
#define zBX TYPE_VARZ_REG, REG_EBX
#define zSP TYPE_VARZ_REG, REG_ESP
#define zBP TYPE_VARZ_REG, REG_EBP
#define zSI TYPE_VARZ_REG, REG_ESI
#define zDI TYPE_VARZ_REG, REG_EDI
/* registers whose base matches the mode, and can vary down but not up.
* we use the 32-bit versions but expand in resolve_var_reg()
*/
#define xAX TYPE_VAR_XREG, REG_EAX
#define xCX TYPE_VAR_XREG, REG_ECX
#define xDX TYPE_VAR_XREG, REG_EDX
#define xBX TYPE_VAR_XREG, REG_EBX
#define xSP TYPE_VAR_XREG, REG_ESP
#define xBP TYPE_VAR_XREG, REG_EBP
#define xSI TYPE_VAR_XREG, REG_ESI
#define xDI TYPE_VAR_XREG, REG_EDI
/* jecxz and loop* vary by addr16 */
#define axCX TYPE_VAR_ADDR_XREG, REG_ECX
/* string ops also use addr16 */
#define axSI TYPE_VAR_ADDR_XREG, REG_ESI
#define axDI TYPE_VAR_ADDR_XREG, REG_EDI
#define axAX TYPE_VAR_ADDR_XREG, REG_EAX
/* 8-bit implicit registers (not from modrm) that can be exteded via rex.r */
#define al_x TYPE_REG_EX, REG_AL
#define cl_x TYPE_REG_EX, REG_CL
#define dl_x TYPE_REG_EX, REG_DL
#define bl_x TYPE_REG_EX, REG_BL
#define ah_x TYPE_REG_EX, REG_AH
#define ch_x TYPE_REG_EX, REG_CH
#define dh_x TYPE_REG_EX, REG_DH
#define bh_x TYPE_REG_EX, REG_BH
/* 4_rex8_short2 implicit registers (not from modrm) that can be exteded via rex.r */
#define eAX_x TYPE_VAR_REG_EX, REG_EAX
#define eCX_x TYPE_VAR_REG_EX, REG_ECX
#define eDX_x TYPE_VAR_REG_EX, REG_EDX
#define eBX_x TYPE_VAR_REG_EX, REG_EBX
#define eSP_x TYPE_VAR_REG_EX, REG_ESP
#define eBP_x TYPE_VAR_REG_EX, REG_EBP
#define eSI_x TYPE_VAR_REG_EX, REG_ESI
#define eDI_x TYPE_VAR_REG_EX, REG_EDI
/* 4x8_short2 implicit registers (not from modrm) that can be exteded via rex.r */
#define xAX_x TYPE_VAR_XREG_EX, REG_EAX
#define xCX_x TYPE_VAR_XREG_EX, REG_ECX
#define xDX_x TYPE_VAR_XREG_EX, REG_EDX
#define xBX_x TYPE_VAR_XREG_EX, REG_EBX
#define xSP_x TYPE_VAR_XREG_EX, REG_ESP
#define xBP_x TYPE_VAR_XREG_EX, REG_EBP
#define xSI_x TYPE_VAR_XREG_EX, REG_ESI
#define xDI_x TYPE_VAR_XREG_EX, REG_EDI
/* 4_rex8 implicit registers (not from modrm) that can be exteded via rex.r */
#define uAX_x TYPE_VAR_REGX_EX, REG_EAX
#define uCX_x TYPE_VAR_REGX_EX, REG_ECX
#define uDX_x TYPE_VAR_REGX_EX, REG_EDX
#define uBX_x TYPE_VAR_REGX_EX, REG_EBX
#define uSP_x TYPE_VAR_REGX_EX, REG_ESP
#define uBP_x TYPE_VAR_REGX_EX, REG_EBP
#define uSI_x TYPE_VAR_REGX_EX, REG_ESI
#define uDI_x TYPE_VAR_REGX_EX, REG_EDI
/* 4_rex8 implicit registers (not from modrm) */
#define uDX TYPE_VAR_REGX, REG_EDX
#define ax TYPE_REG, REG_AX
#define cx TYPE_REG, REG_CX
#define dx TYPE_REG, REG_DX
#define bx TYPE_REG, REG_BX
#define sp TYPE_REG, REG_SP
#define bp TYPE_REG, REG_BP
#define si TYPE_REG, REG_SI
#define di TYPE_REG, REG_DI
#define al TYPE_REG, REG_AL
#define cl TYPE_REG, REG_CL
#define dl TYPE_REG, REG_DL
#define bl TYPE_REG, REG_BL
#define ah TYPE_REG, REG_AH
#define ch TYPE_REG, REG_CH
#define dh TYPE_REG, REG_DH
#define bh TYPE_REG, REG_BH
#define eax TYPE_REG, REG_EAX
#define ecx TYPE_REG, REG_ECX
#define edx TYPE_REG, REG_EDX
#define ebx TYPE_REG, REG_EBX
#define esp TYPE_REG, REG_ESP
#define ebp TYPE_REG, REG_EBP
#define esi TYPE_REG, REG_ESI
#define edi TYPE_REG, REG_EDI
#define xsp TYPE_XREG, REG_ESP
#define xbp TYPE_XREG, REG_EBP
#define xcx TYPE_XREG, REG_ECX
#define cs TYPE_REG, SEG_CS
#define ss TYPE_REG, SEG_SS
#define ds TYPE_REG, SEG_DS
#define es TYPE_REG, SEG_ES
#define fs TYPE_REG, SEG_FS
#define gs TYPE_REG, SEG_GS
#define st0 TYPE_REG, REG_ST0
#define st1 TYPE_REG, REG_ST1
#define st2 TYPE_REG, REG_ST2
#define st3 TYPE_REG, REG_ST3
#define st4 TYPE_REG, REG_ST4
#define st5 TYPE_REG, REG_ST5
#define st6 TYPE_REG, REG_ST6
#define st7 TYPE_REG, REG_ST7
#define xmm0 TYPE_REG, REG_XMM0
/* flags */
#define no 0
#define mrm HAS_MODRM
#define xop (HAS_EXTRA_OPERANDS|EXTRAS_IN_CODE_FIELD)
#define mrm_xop (HAS_MODRM|HAS_EXTRA_OPERANDS|EXTRAS_IN_CODE_FIELD)
#define xop_next (HAS_EXTRA_OPERANDS)
#define i64 X64_INVALID
#define o64 X86_INVALID
#define reqp REQUIRES_PREFIX
#define vex REQUIRES_VEX
#define rex REQUIRES_REX
#define reqL0 REQUIRES_VEX_L_0
#define reqL1 REQUIRES_VEX_L_1
#define predcc HAS_PRED_CC
#define predcx HAS_PRED_COMPLEX
#define evex REQUIRES_EVEX
#define reqLL0 REQUIRES_EVEX_LL_0
#define reqLL1 REQUIRES_EVEX_LL_1
/* eflags */
#define x 0
#define fRC EFLAGS_READ_CF
#define fRP EFLAGS_READ_PF
#define fRA EFLAGS_READ_AF
#define fRZ EFLAGS_READ_ZF
#define fRS EFLAGS_READ_SF
#define fRT EFLAGS_READ_TF
#define fRI EFLAGS_READ_IF
#define fRD EFLAGS_READ_DF
#define fRO EFLAGS_READ_OF
#define fRN EFLAGS_READ_NT
#define fRR EFLAGS_READ_RF
#define fRX EFLAGS_READ_ALL
#define fR6 EFLAGS_READ_6
#define fWC EFLAGS_WRITE_CF
#define fWP EFLAGS_WRITE_PF
#define fWA EFLAGS_WRITE_AF
#define fWZ EFLAGS_WRITE_ZF
#define fWS EFLAGS_WRITE_SF
#define fWT EFLAGS_WRITE_TF
#define fWI EFLAGS_WRITE_IF
#define fWD EFLAGS_WRITE_DF
#define fWO EFLAGS_WRITE_OF
#define fWN EFLAGS_WRITE_NT
#define fWR EFLAGS_WRITE_RF
#define fWX EFLAGS_WRITE_ALL
#define fW6 EFLAGS_WRITE_6
/* flags affected by OP_int*
* FIXME: should we add AC and VM flags?
*/
#define fINT (fRX|fWT|fWN|fWI|fWR)
/* for constructing linked lists of table entries */
#define NA 0
#define END_LIST 0
#define tfb (ptr_int_t)&first_byte
#define tsb (ptr_int_t)&second_byte
#define tex (ptr_int_t)&base_extensions
#define t38 (ptr_int_t)&third_byte_38
#define t3a (ptr_int_t)&third_byte_3a
#define tpe (ptr_int_t)&prefix_extensions
#define tvex (ptr_int_t)&e_vex_extensions
#define modx (ptr_int_t)&mod_extensions
#define tre (ptr_int_t)&rep_extensions
#define tne (ptr_int_t)&repne_extensions
#define tfl (ptr_int_t)&float_low_modrm
#define tfh (ptr_int_t)&float_high_modrm
#define exop (ptr_int_t)&extra_operands
#define t64e (ptr_int_t)&x64_extensions
#define trexb (ptr_int_t)&rex_b_extensions
#define trexw (ptr_int_t)&rex_w_extensions
#define tvex (ptr_int_t)&e_vex_extensions
#define tvexw (ptr_int_t)&vex_W_extensions
#define txop (ptr_int_t)&xop_extensions
#define tevexw (ptr_int_t)&evex_W_extensions
/****************************************************************************
* One-byte opcodes
* This is from Tables A-2 & A-3
*/
const instr_info_t first_byte[] = {
/* {op/type, op encoding, name, dst1, dst2, src1, src2, src3, modrm?, eflags, code} */
/* 00 */
{OP_add, 0x000000, "add", Eb, xx, Gb, Eb, xx, mrm, fW6, tex[1][0]},
{OP_add, 0x010000, "add", Ev, xx, Gv, Ev, xx, mrm, fW6, tfb[0x00]},
{OP_add, 0x020000, "add", Gb, xx, Eb, Gb, xx, mrm, fW6, tfb[0x01]},
{OP_add, 0x030000, "add", Gv, xx, Ev, Gv, xx, mrm, fW6, tfb[0x02]},
{OP_add, 0x040000, "add", al, xx, Ib, al, xx, no, fW6, tfb[0x03]},
{OP_add, 0x050000, "add", eAX, xx, Iz, eAX, xx, no, fW6, tfb[0x04]},
{OP_push, 0x060000, "push", xsp, i_xSPo1, es, xsp, xx, i64, x, tfb[0x0e]},
{OP_pop, 0x070000, "pop", es, xsp, xsp, i_xSP, xx, i64, x, tsb[0xa1]},
/* 08 */
{OP_or, 0x080000, "or", Eb, xx, Gb, Eb, xx, mrm, fW6, tex[1][1]},
{OP_or, 0x090000, "or", Ev, xx, Gv, Ev, xx, mrm, fW6, tfb[0x08]},
{OP_or, 0x0a0000, "or", Gb, xx, Eb, Gb, xx, mrm, fW6, tfb[0x09]},
{OP_or, 0x0b0000, "or", Gv, xx, Ev, Gv, xx, mrm, fW6, tfb[0x0a]},
{OP_or, 0x0c0000, "or", al, xx, Ib, al, xx, no, fW6, tfb[0x0b]},
{OP_or, 0x0d0000, "or", eAX, xx, Iz, eAX, xx, no, fW6, tfb[0x0c]},
{OP_push,0x0e0000, "push", xsp, i_xSPo1, cs, xsp, xx, i64, x, tfb[0x16]},
{ESCAPE, 0x0f0000, "(escape)", xx, xx, xx, xx, xx, no, x, NA},
/* 10 */
{OP_adc, 0x100000, "adc", Eb, xx, Gb, Eb, xx, mrm, (fW6|fRC), tex[1][2]},
{OP_adc, 0x110000, "adc", Ev, xx, Gv, Ev, xx, mrm, (fW6|fRC), tfb[0x10]},
{OP_adc, 0x120000, "adc", Gb, xx, Eb, Gb, xx, mrm, (fW6|fRC), tfb[0x11]},
{OP_adc, 0x130000, "adc", Gv, xx, Ev, Gv, xx, mrm, (fW6|fRC), tfb[0x12]},
{OP_adc, 0x140000, "adc", al, xx, Ib, al, xx, no, (fW6|fRC), tfb[0x13]},
{OP_adc, 0x150000, "adc", eAX, xx, Iz, eAX, xx, no, (fW6|fRC), tfb[0x14]},
{OP_push, 0x160000, "push", xsp, i_xSPo1, ss, xsp, xx, i64, x, tfb[0x1e]},
{OP_pop, 0x170000, "pop", ss, xsp, xsp, i_xSP, xx, i64, x, tfb[0x1f]},
/* 18 */
{OP_sbb, 0x180000, "sbb", Eb, xx, Gb, Eb, xx, mrm, (fW6|fRC), tex[1][3]},
{OP_sbb, 0x190000, "sbb", Ev, xx, Gv, Ev, xx, mrm, (fW6|fRC), tfb[0x18]},
{OP_sbb, 0x1a0000, "sbb", Gb, xx, Eb, Gb, xx, mrm, (fW6|fRC), tfb[0x19]},
{OP_sbb, 0x1b0000, "sbb", Gv, xx, Ev, Gv, xx, mrm, (fW6|fRC), tfb[0x1a]},
{OP_sbb, 0x1c0000, "sbb", al, xx, Ib, al, xx, no, (fW6|fRC), tfb[0x1b]},
{OP_sbb, 0x1d0000, "sbb", eAX, xx, Iz, eAX, xx, no, (fW6|fRC), tfb[0x1c]},
{OP_push, 0x1e0000, "push", xsp, i_xSPo1, ds, xsp, xx, i64, x, tsb[0xa0]},
{OP_pop, 0x1f0000, "pop", ds, xsp, xsp, i_xSP, xx, i64, x, tfb[0x07]},
/* 20 */
{OP_and, 0x200000, "and", Eb, xx, Gb, Eb, xx, mrm, fW6, tex[1][4]},
{OP_and, 0x210000, "and", Ev, xx, Gv, Ev, xx, mrm, fW6, tfb[0x20]},
{OP_and, 0x220000, "and", Gb, xx, Eb, Gb, xx, mrm, fW6, tfb[0x21]},
{OP_and, 0x230000, "and", Gv, xx, Ev, Gv, xx, mrm, fW6, tfb[0x22]},
{OP_and, 0x240000, "and", al, xx, Ib, al, xx, no, fW6, tfb[0x23]},
{OP_and, 0x250000, "and", eAX, xx, Iz, eAX, xx, no, fW6, tfb[0x24]},
{PREFIX, 0x260000, "es", xx, xx, xx, xx, xx, no, x, SEG_ES},
{OP_daa, 0x270000, "daa", al, xx, al, xx, xx, i64, (fW6|fRC|fRA), END_LIST},
/* 28 */
{OP_sub, 0x280000, "sub", Eb, xx, Gb, Eb, xx, mrm, fW6, tex[1][5]},
{OP_sub, 0x290000, "sub", Ev, xx, Gv, Ev, xx, mrm, fW6, tfb[0x28]},
{OP_sub, 0x2a0000, "sub", Gb, xx, Eb, Gb, xx, mrm, fW6, tfb[0x29]},
{OP_sub, 0x2b0000, "sub", Gv, xx, Ev, Gv, xx, mrm, fW6, tfb[0x2a]},
{OP_sub, 0x2c0000, "sub", al, xx, Ib, al, xx, no, fW6, tfb[0x2b]},
{OP_sub, 0x2d0000, "sub", eAX, xx, Iz, eAX, xx, no, fW6, tfb[0x2c]},
{PREFIX, 0x2e0000, "cs", xx, xx, xx, xx, xx, no, x, SEG_CS},
{OP_das, 0x2f0000, "das", al, xx, al, xx, xx, i64, (fW6|fRC|fRA), END_LIST},
/* 30 */
{OP_xor, 0x300000, "xor", Eb, xx, Gb, Eb, xx, mrm, fW6, tex[1][6]},
{OP_xor, 0x310000, "xor", Ev, xx, Gv, Ev, xx, mrm, fW6, tfb[0x30]},
{OP_xor, 0x320000, "xor", Gb, xx, Eb, Gb, xx, mrm, fW6, tfb[0x31]},
{OP_xor, 0x330000, "xor", Gv, xx, Ev, Gv, xx, mrm, fW6, tfb[0x32]},
{OP_xor, 0x340000, "xor", al, xx, Ib, al, xx, no, fW6, tfb[0x33]},
{OP_xor, 0x350000, "xor", eAX, xx, Iz, eAX, xx, no, fW6, tfb[0x34]},
{PREFIX, 0x360000, "ss", xx, xx, xx, xx, xx, no, x, SEG_SS},
{OP_aaa, 0x370000, "aaa", ax, xx, ax, xx, xx, i64, (fW6|fRA), END_LIST},
/* 38 */
{OP_cmp, 0x380000, "cmp", xx, xx, Eb, Gb, xx, mrm, fW6, tex[1][7]},
{OP_cmp, 0x390000, "cmp", xx, xx, Ev, Gv, xx, mrm, fW6, tfb[0x38]},
{OP_cmp, 0x3a0000, "cmp", xx, xx, Gb, Eb, xx, mrm, fW6, tfb[0x39]},
{OP_cmp, 0x3b0000, "cmp", xx, xx, Gv, Ev, xx, mrm, fW6, tfb[0x3a]},
{OP_cmp, 0x3c0000, "cmp", xx, xx, al, Ib, xx, no, fW6, tfb[0x3b]},
{OP_cmp, 0x3d0000, "cmp", xx, xx, eAX, Iz, xx, no, fW6, tfb[0x3c]},
{PREFIX, 0x3e0000, "ds", xx, xx, xx, xx, xx, no, x, SEG_DS},
{OP_aas, 0x3f0000, "aas", ax, xx, ax, xx, xx, i64, (fW6|fRA), END_LIST},
/* 40 */
{X64_EXT, 0x400000, "(x64_ext 0)", xx, xx, xx, xx, xx, no, x, 0},
{X64_EXT, 0x410000, "(x64_ext 1)", xx, xx, xx, xx, xx, no, x, 1},
{X64_EXT, 0x420000, "(x64_ext 2)", xx, xx, xx, xx, xx, no, x, 2},
{X64_EXT, 0x430000, "(x64_ext 3)", xx, xx, xx, xx, xx, no, x, 3},
{X64_EXT, 0x440000, "(x64_ext 4)", xx, xx, xx, xx, xx, no, x, 4},
{X64_EXT, 0x450000, "(x64_ext 5)", xx, xx, xx, xx, xx, no, x, 5},
{X64_EXT, 0x460000, "(x64_ext 6)", xx, xx, xx, xx, xx, no, x, 6},
{X64_EXT, 0x470000, "(x64_ext 7)", xx, xx, xx, xx, xx, no, x, 7},
/* 48 */
{X64_EXT, 0x480000, "(x64_ext 8)", xx, xx, xx, xx, xx, no, x, 8},
{X64_EXT, 0x490000, "(x64_ext 9)", xx, xx, xx, xx, xx, no, x, 9},
{X64_EXT, 0x4a0000, "(x64_ext 10)", xx, xx, xx, xx, xx, no, x, 10},
{X64_EXT, 0x4b0000, "(x64_ext 11)", xx, xx, xx, xx, xx, no, x, 11},
{X64_EXT, 0x4c0000, "(x64_ext 12)", xx, xx, xx, xx, xx, no, x, 12},
{X64_EXT, 0x4d0000, "(x64_ext 13)", xx, xx, xx, xx, xx, no, x, 13},
{X64_EXT, 0x4e0000, "(x64_ext 14)", xx, xx, xx, xx, xx, no, x, 14},
{X64_EXT, 0x4f0000, "(x64_ext 15)", xx, xx, xx, xx, xx, no, x, 15},
/* 50 */
{OP_push, 0x500000, "push", xsp, i_xSPo1, xAX_x, xsp, xx, no, x, tfb[0x51]},
{OP_push, 0x510000, "push", xsp, i_xSPo1, xCX_x, xsp, xx, no, x, tfb[0x52]},
{OP_push, 0x520000, "push", xsp, i_xSPo1, xDX_x, xsp, xx, no, x, tfb[0x53]},
{OP_push, 0x530000, "push", xsp, i_xSPo1, xBX_x, xsp, xx, no, x, tfb[0x54]},
{OP_push, 0x540000, "push", xsp, i_xSPo1, xSP_x, xsp, xx, no, x, tfb[0x55]},
{OP_push, 0x550000, "push", xsp, i_xSPo1, xBP_x, xsp, xx, no, x, tfb[0x56]},
{OP_push, 0x560000, "push", xsp, i_xSPo1, xSI_x, xsp, xx, no, x, tfb[0x57]},
{OP_push, 0x570000, "push", xsp, i_xSPo1, xDI_x, xsp, xx, no, x, tex[12][6]},
/* 58 */
{OP_pop, 0x580000, "pop", xAX_x, xsp, xsp, i_xSP, xx, no, x, tfb[0x59]},
{OP_pop, 0x590000, "pop", xCX_x, xsp, xsp, i_xSP, xx, no, x, tfb[0x5a]},
{OP_pop, 0x5a0000, "pop", xDX_x, xsp, xsp, i_xSP, xx, no, x, tfb[0x5b]},
{OP_pop, 0x5b0000, "pop", xBX_x, xsp, xsp, i_xSP, xx, no, x, tfb[0x5c]},
{OP_pop, 0x5c0000, "pop", xSP_x, xsp, xsp, i_xSP, xx, no, x, tfb[0x5d]},
{OP_pop, 0x5d0000, "pop", xBP_x, xsp, xsp, i_xSP, xx, no, x, tfb[0x5e]},
{OP_pop, 0x5e0000, "pop", xSI_x, xsp, xsp, i_xSP, xx, no, x, tfb[0x5f]},
{OP_pop, 0x5f0000, "pop", xDI_x, xsp, xsp, i_xSP, xx, no, x, tex[26][0]},
/* 60 */
{OP_pusha, 0x600000, "pusha", xsp, i_xSPo8, xsp, eAX, eBX, xop|i64, x, exop[0x00]},
{OP_popa, 0x610000, "popa", xsp, eAX, xsp, i_xSPs8, xx, xop|i64, x, exop[0x02]},
{EVEX_PREFIX_EXT, 0x620000, "(evex_prefix_ext)", xx, xx, xx, xx, xx, no, x, END_LIST},
{X64_EXT, 0x630000, "(x64_ext 16)", xx, xx, xx, xx, xx, no, x, 16},
{PREFIX, 0x640000, "fs", xx, xx, xx, xx, xx, no, x, SEG_FS},
{PREFIX, 0x650000, "gs", xx, xx, xx, xx, xx, no, x, SEG_GS},
{PREFIX, 0x660000, "data size", xx, xx, xx, xx, xx, no, x, PREFIX_DATA},
{PREFIX, 0x670000, "addr size", xx, xx, xx, xx, xx, no, x, PREFIX_ADDR},
/* 68 */
{OP_push_imm, 0x680000, "push", xsp, i_xSPo1, Iz, xsp, xx, no, x, tfb[0x6a]},
{OP_imul, 0x690000, "imul", Gv, xx, Ev, Iz, xx, mrm, fW6, tfb[0x6b]},
{OP_push_imm, 0x6a0000, "push", xsp, i_xSPo1, Ib, xsp, xx, no, x, END_LIST},/* sign-extend to push 2/4/8 bytes */
{OP_imul, 0x6b0000, "imul", Gv, xx, Ev, Ib, xx, mrm, fW6, END_LIST},
{REP_EXT, 0x6c0000, "((rep) ins)", Yb, xx, i_dx, xx, xx, no, fRD, 0},
{REP_EXT, 0x6d0000, "((rep) ins)", Yz, xx, i_dx, xx, xx, no, fRD, 1},
{REP_EXT, 0x6e0000, "((rep) outs)", i_dx, xx, Xb, xx, xx, no, fRD, 2},
{REP_EXT, 0x6f0000, "((rep) outs)", i_dx, xx, Xz, xx, xx, no, fRD, 3},
/* 70 */
{OP_jo_short, 0x700000, "jo", xx, xx, Jb, xx, xx, no, fRO, END_LIST},
{OP_jno_short, 0x710000, "jno", xx, xx, Jb, xx, xx, no, fRO, END_LIST},
{OP_jb_short, 0x720000, "jb", xx, xx, Jb, xx, xx, no, fRC, END_LIST},
{OP_jnb_short, 0x730000, "jnb", xx, xx, Jb, xx, xx, no, fRC, END_LIST},
{OP_jz_short, 0x740000, "jz", xx, xx, Jb, xx, xx, no, fRZ, END_LIST},
{OP_jnz_short, 0x750000, "jnz", xx, xx, Jb, xx, xx, no, fRZ, END_LIST},
{OP_jbe_short, 0x760000, "jbe", xx, xx, Jb, xx, xx, no, (fRC|fRZ), END_LIST},
{OP_jnbe_short,0x770000, "jnbe",xx, xx, Jb, xx, xx, no, (fRC|fRZ), END_LIST},
/* 78 */
{OP_js_short, 0x780000, "js", xx, xx, Jb, xx, xx, no, fRS, END_LIST},
{OP_jns_short, 0x790000, "jns", xx, xx, Jb, xx, xx, no, fRS, END_LIST},
{OP_jp_short, 0x7a0000, "jp", xx, xx, Jb, xx, xx, no, fRP, END_LIST},
{OP_jnp_short, 0x7b0000, "jnp", xx, xx, Jb, xx, xx, no, fRP, END_LIST},
{OP_jl_short, 0x7c0000, "jl", xx, xx, Jb, xx, xx, no, (fRS|fRO), END_LIST},
{OP_jnl_short, 0x7d0000, "jnl", xx, xx, Jb, xx, xx, no, (fRS|fRO), END_LIST},
{OP_jle_short, 0x7e0000, "jle", xx, xx, Jb, xx, xx, no, (fRS|fRO|fRZ), END_LIST},
{OP_jnle_short,0x7f0000, "jnle",xx, xx, Jb, xx, xx, no, (fRS|fRO|fRZ), END_LIST},
/* 80 */
{EXTENSION, 0x800000, "(group 1a)", Eb, xx, Ib, xx, xx, mrm, x, 0},
{EXTENSION, 0x810000, "(group 1b)", Ev, xx, Iz, xx, xx, mrm, x, 1},
{EXTENSION, 0x820000, "(group 1c*)", Ev, xx, Ib, xx, xx, mrm|i64, x, 25}, /* PR 235092: gnu tools (gdb, objdump) think this is a bad opcode but windbg and the hardware disagree */
{EXTENSION, 0x830000, "(group 1c)", Ev, xx, Ib, xx, xx, mrm, x, 2},
{OP_test, 0x840000, "test", xx, xx, Eb, Gb, xx, mrm, fW6, tex[10][0]},
{OP_test, 0x850000, "test", xx, xx, Ev, Gv, xx, mrm, fW6, tfb[0x84]},
{OP_xchg, 0x860000, "xchg", Eb, Gb, Eb, Gb, xx, mrm, x, END_LIST},
{OP_xchg, 0x870000, "xchg", Ev, Gv, Ev, Gv, xx, mrm, x, tfb[0x86]},
/* 88 */
{OP_mov_st, 0x880000, "mov", Eb, xx, Gb, xx, xx, mrm, x, tex[18][0]},
{OP_mov_st, 0x890000, "mov", Ev, xx, Gv, xx, xx, mrm, x, tfb[0x88]},
{OP_mov_ld, 0x8a0000, "mov", Gb, xx, Eb, xx, xx, mrm, x, END_LIST},
{OP_mov_ld, 0x8b0000, "mov", Gv, xx, Ev, xx, xx, mrm, x, tfb[0x8a]},
{OP_mov_seg, 0x8c0000, "mov", Ev, xx, Sw, xx, xx, mrm, x, END_LIST},
{OP_lea, 0x8d0000, "lea", Gv, xx, Mm, xx, xx, mrm, x, END_LIST}, /* Intel has just M */
{OP_mov_seg, 0x8e0000, "mov", Sw, xx, Ev, xx, xx, mrm, x, tfb[0x8c]},
{XOP_PREFIX_EXT, 0x8f0000, "(xop_prefix_ext 0)", xx, xx, xx, xx, xx, no, x, 0},
/* 90 */
{PREFIX_EXT, 0x900000, "(prefix ext 103)", xx, xx, xx, xx, xx, no, x, 103},
{OP_xchg, 0x910000, "xchg", eCX_x, eAX, eCX_x, eAX, xx, no, x, tfb[0x92]},
{OP_xchg, 0x920000, "xchg", eDX_x, eAX, eDX_x, eAX, xx, no, x, tfb[0x93]},
{OP_xchg, 0x930000, "xchg", eBX_x, eAX, eBX_x, eAX, xx, no, x, tfb[0x94]},
{OP_xchg, 0x940000, "xchg", eSP_x, eAX, eSP_x, eAX, xx, no, x, tfb[0x95]},
{OP_xchg, 0x950000, "xchg", eBP_x, eAX, eBP_x, eAX, xx, no, x, tfb[0x96]},
{OP_xchg, 0x960000, "xchg", eSI_x, eAX, eSI_x, eAX, xx, no, x, tfb[0x97]},
{OP_xchg, 0x970000, "xchg", eDI_x, eAX, eDI_x, eAX, xx, no, x, tfb[0x87]},
/* 98 */
{OP_cwde, 0x980000, "cwde", eAX, xx, ax, xx, xx, no, x, END_LIST},/*16-bit=="cbw", src is al not ax; FIXME: newer gdb calls it "cwtl"?!?*/
/* PR 354096: does not write to ax/eax/rax: sign-extends into dx/edx/rdx */
{OP_cdq, 0x990000, "cdq", eDX, xx, eAX, xx, xx, no, x, END_LIST},/*16-bit=="cwd";64-bit=="cqo"*/
{OP_call_far, 0x9a0000, "lcall", xsp, i_vSPo2, Ap, xsp, xx, i64, x, END_LIST},
{OP_fwait, 0x9b0000, "fwait", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pushf, 0x9c0000, "pushf", xsp, i_xSPo1, xsp, xx, xx, no, fRX, END_LIST},
{OP_popf, 0x9d0000, "popf", xsp, xx, xsp, i_xSP, xx, no, fWX, END_LIST},
{OP_sahf, 0x9e0000, "sahf", xx, xx, ah, xx, xx, no, (fW6&(~fWO)), END_LIST},
{OP_lahf, 0x9f0000, "lahf", ah, xx, xx, xx, xx, no, (fR6&(~fRO)), END_LIST},
/* a0 */
{OP_mov_ld, 0xa00000, "mov", al, xx, Ob, xx, xx, no, x, tfb[0x8b]},
{OP_mov_ld, 0xa10000, "mov", eAX, xx, Ov, xx, xx, no, x, tfb[0xa0]},
{OP_mov_st, 0xa20000, "mov", Ob, xx, al, xx, xx, no, x, tfb[0x89]},
{OP_mov_st, 0xa30000, "mov", Ov, xx, eAX, xx, xx, no, x, tfb[0xa2]},
{REP_EXT, 0xa40000, "((rep) movs)", Yb, xx, Xb, xx, xx, no, fRD, 4},
{REP_EXT, 0xa50000, "((rep) movs)", Yv, xx, Xv, xx, xx, no, fRD, 5},
{REPNE_EXT, 0xa60000, "((rep/ne) cmps)", Xb, xx, Yb, xx, xx, no, (fW6|fRD|fRZ), 0},
{REPNE_EXT, 0xa70000, "((rep/ne) cmps)", Xv, xx, Yv, xx, xx, no, (fW6|fRD|fRZ), 1},
/* a8 */
{OP_test, 0xa80000, "test", xx, xx, al, Ib, xx, no, fW6, tfb[0x85]},
{OP_test, 0xa90000, "test", xx, xx, eAX, Iz, xx, no, fW6, tfb[0xa8]},
{REP_EXT, 0xaa0000, "((rep) stos)", Yb, xx, al, xx, xx, no, fRD, 6},
{REP_EXT, 0xab0000, "((rep) stos)", Yv, xx, eAX, xx, xx, no, fRD, 7},
{REP_EXT, 0xac0000, "((rep) lods)", al, xx, Xb, xx, xx, no, fRD, 8},
{REP_EXT, 0xad0000, "((rep) lods)", eAX, xx, Xv, xx, xx, no, fRD, 9},
{REPNE_EXT, 0xae0000, "((rep/ne) scas)", al, xx, Yb, xx, xx, no, (fW6|fRD|fRZ), 2},
{REPNE_EXT, 0xaf0000, "((rep/ne) scas)", eAX, xx, Yv, xx, xx, no, (fW6|fRD|fRZ), 3},
/* b0 */
{OP_mov_imm, 0xb00000, "mov", al_x, xx, Ib, xx, xx, no, x, tfb[0xb1]},
{OP_mov_imm, 0xb10000, "mov", cl_x, xx, Ib, xx, xx, no, x, tfb[0xb2]},
{OP_mov_imm, 0xb20000, "mov", dl_x, xx, Ib, xx, xx, no, x, tfb[0xb3]},
{OP_mov_imm, 0xb30000, "mov", bl_x, xx, Ib, xx, xx, no, x, tfb[0xb4]},
{OP_mov_imm, 0xb40000, "mov", ah_x, xx, Ib, xx, xx, no, x, tfb[0xb5]},
{OP_mov_imm, 0xb50000, "mov", ch_x, xx, Ib, xx, xx, no, x, tfb[0xb6]},
{OP_mov_imm, 0xb60000, "mov", dh_x, xx, Ib, xx, xx, no, x, tfb[0xb7]},
/* PR 250397: we point at the tail end of the mov_st templates */
{OP_mov_imm, 0xb70000, "mov", bh_x, xx, Ib, xx, xx, no, x, tex[18][0]},
/* b8 */
{OP_mov_imm, 0xb80000, "mov", eAX_x, xx, Iv, xx, xx, no, x, tfb[0xb9]},
{OP_mov_imm, 0xb90000, "mov", eCX_x, xx, Iv, xx, xx, no, x, tfb[0xba]},
{OP_mov_imm, 0xba0000, "mov", eDX_x, xx, Iv, xx, xx, no, x, tfb[0xbb]},
{OP_mov_imm, 0xbb0000, "mov", eBX_x, xx, Iv, xx, xx, no, x, tfb[0xbc]},
{OP_mov_imm, 0xbc0000, "mov", eSP_x, xx, Iv, xx, xx, no, x, tfb[0xbd]},
{OP_mov_imm, 0xbd0000, "mov", eBP_x, xx, Iv, xx, xx, no, x, tfb[0xbe]},
{OP_mov_imm, 0xbe0000, "mov", eSI_x, xx, Iv, xx, xx, no, x, tfb[0xbf]},
{OP_mov_imm, 0xbf0000, "mov", eDI_x, xx, Iv, xx, xx, no, x, tfb[0xb0]},
/* c0 */
{EXTENSION, 0xc00000, "(group 2a)", Eb, xx, Ib, xx, xx, mrm, x, 3},
{EXTENSION, 0xc10000, "(group 2b)", Ev, xx, Ib, xx, xx, mrm, x, 4},
{OP_ret, 0xc20000, "ret", xsp, xx, Iw, xsp, i_iSP, no, x, tfb[0xc3]},
{OP_ret, 0xc30000, "ret", xsp, xx, xsp, i_iSP, xx, no, x, END_LIST},
{VEX_PREFIX_EXT, 0xc40000, "(vex_prefix_ext 0)", xx, xx, xx, xx, xx, no, x, 0},
{VEX_PREFIX_EXT, 0xc50000, "(vex_prefix_ext 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXTENSION, 0xc60000, "(group 11a)", Eb, xx, Ib, xx, xx, mrm, x, 17},
{EXTENSION, 0xc70000, "(group 11b)", Ev, xx, Iz, xx, xx, mrm, x, 18},
/* c8 */
{OP_enter, 0xc80000, "enter", xsp, i_xSPoN, Iw, Ib, xsp, xop, x, exop[0x05]},
{OP_leave, 0xc90000, "leave", xsp, xbp, xbp, xsp, i_xBP, no, x, END_LIST},
{OP_ret_far, 0xca0000, "lret", xsp, xx, Iw, xsp, i_vSPs2, no, x, tfb[0xcb]},
{OP_ret_far, 0xcb0000, "lret", xsp, xx, xsp, i_vSPs2, xx, no, x, END_LIST},
/* we ignore the operations on the kernel stack */
{OP_int3, 0xcc0000, "int3", xx, xx, xx, xx, xx, no, fINT, END_LIST},
{OP_int, 0xcd0000, "int", xx, xx, Ib, xx, xx, no, fINT, END_LIST},
{OP_into, 0xce0000, "into", xx, xx, xx, xx, xx, i64, fINT, END_LIST},
{OP_iret, 0xcf0000, "iret", xsp, xx, xsp, i_vSPs3, xx, no, fWX, END_LIST},
/* d0 */
{EXTENSION, 0xd00000, "(group 2c)", Eb, xx, c1, xx, xx, mrm, x, 5},
{EXTENSION, 0xd10000, "(group 2d)", Ev, xx, c1, xx, xx, mrm, x, 6},
{EXTENSION, 0xd20000, "(group 2e)", Eb, xx, cl, xx, xx, mrm, x, 7},
{EXTENSION, 0xd30000, "(group 2f)", Ev, xx, cl, xx, xx, mrm, x, 8},
{OP_aam, 0xd40000, "aam", ax, xx, Ib, ax, xx, i64, fW6, END_LIST},
{OP_aad, 0xd50000, "aad", ax, xx, Ib, ax, xx, i64, fW6, END_LIST},
{OP_salc, 0xd60000, "salc", al, xx, xx, xx, xx, i64, fRC, END_LIST},/*undocumented*/
{OP_xlat, 0xd70000, "xlat", al, xx, Zb, xx, xx, no, x, END_LIST},
/* d8 */
{FLOAT_EXT, 0xd80000, "(float)", xx, xx, xx, xx, xx, mrm, x, NA},/* all floats need modrm */
{FLOAT_EXT, 0xd90000, "(float)", xx, xx, xx, xx, xx, mrm, x, NA},
{FLOAT_EXT, 0xda0000, "(float)", xx, xx, xx, xx, xx, mrm, x, NA},
{FLOAT_EXT, 0xdb0000, "(float)", xx, xx, xx, xx, xx, mrm, x, NA},
{FLOAT_EXT, 0xdc0000, "(float)", xx, xx, xx, xx, xx, mrm, x, NA},
{FLOAT_EXT, 0xdd0000, "(float)", xx, xx, xx, xx, xx, mrm, x, NA},
{FLOAT_EXT, 0xde0000, "(float)", xx, xx, xx, xx, xx, mrm, x, NA},
{FLOAT_EXT, 0xdf0000, "(float)", xx, xx, xx, xx, xx, mrm, x, NA},
/* e0 */
{OP_loopne,0xe00000, "loopne", axCX, xx, Jb, axCX, xx, no, fRZ, END_LIST},
{OP_loope, 0xe10000, "loope", axCX, xx, Jb, axCX, xx, no, fRZ, END_LIST},
{OP_loop, 0xe20000, "loop", axCX, xx, Jb, axCX, xx, no, x, END_LIST},
{OP_jecxz, 0xe30000, "jecxz", xx, xx, Jb, axCX, xx, no, x, END_LIST},/*16-bit=="jcxz",64-bit="jrcxz"*/
/* FIXME: in & out access "I/O ports", are these memory addresses?
* if so, change Ib to Ob and change dx to i_dx (move to dest for out)
*/
{OP_in, 0xe40000, "in", al, xx, Ib, xx, xx, no, x, tfb[0xed]},
{OP_in, 0xe50000, "in", zAX, xx, Ib, xx, xx, no, x, tfb[0xe4]},
{OP_out, 0xe60000, "out", xx, xx, Ib, al, xx, no, x, tfb[0xef]},
{OP_out, 0xe70000, "out", xx, xx, Ib, zAX, xx, no, x, tfb[0xe6]},
/* e8 */
{OP_call, 0xe80000, "call", xsp, i_iSPo1, Jz, xsp, xx, no, x, END_LIST},
{OP_jmp, 0xe90000, "jmp", xx, xx, Jz, xx, xx, no, x, END_LIST},
{OP_jmp_far, 0xea0000, "ljmp", xx, xx, Ap, xx, xx, i64, x, END_LIST},
{OP_jmp_short, 0xeb0000, "jmp", xx, xx, Jb, xx, xx, no, x, END_LIST},
{OP_in, 0xec0000, "in", al, xx, dx, xx, xx, no, x, END_LIST},
{OP_in, 0xed0000, "in", zAX, xx, dx, xx, xx, no, x, tfb[0xec]},
{OP_out, 0xee0000, "out", xx, xx, al, dx, xx, no, x, END_LIST},
{OP_out, 0xef0000, "out", xx, xx, zAX, dx, xx, no, x, tfb[0xee]},
/* f0 */
{PREFIX, 0xf00000, "lock", xx, xx, xx, xx, xx, no, x, PREFIX_LOCK},
/* Also called OP_icebp. Undocumented. I'm assuming looks like OP_int* */
{OP_int1, 0xf10000, "int1", xx, xx, xx, xx, xx, no, fINT, END_LIST},
{PREFIX, 0xf20000, "repne", xx, xx, xx, xx, xx, no, x, PREFIX_REPNE},
{PREFIX, 0xf30000, "rep", xx, xx, xx, xx, xx, no, x, PREFIX_REP},
{OP_hlt, 0xf40000, "hlt", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_cmc, 0xf50000, "cmc", xx, xx, xx, xx, xx, no, fWC, END_LIST},
{EXTENSION, 0xf60000, "(group 3a)", Eb, xx, xx, xx, xx, mrm, x, 9},
{EXTENSION, 0xf70000, "(group 3b)", Ev, xx, xx, xx, xx, mrm, x, 10},
/* f8 */
{OP_clc, 0xf80000, "clc", xx, xx, xx, xx, xx, no, fWC, END_LIST},
{OP_stc, 0xf90000, "stc", xx, xx, xx, xx, xx, no, fWC, END_LIST},
{OP_cli, 0xfa0000, "cli", xx, xx, xx, xx, xx, no, fWI, END_LIST},
{OP_sti, 0xfb0000, "sti", xx, xx, xx, xx, xx, no, fWI, END_LIST},
{OP_cld, 0xfc0000, "cld", xx, xx, xx, xx, xx, no, fWD, END_LIST},
{OP_std, 0xfd0000, "std", xx, xx, xx, xx, xx, no, fWD, END_LIST},
{EXTENSION, 0xfe0000, "(group 4)", xx, xx, xx, xx, xx, mrm, x, 11},
{EXTENSION, 0xff0000, "(group 5)", xx, xx, xx, xx, xx, mrm, x, 12},
};
/****************************************************************************
* Two-byte opcodes
* This is from Tables A-4 & A-5
*/
const instr_info_t second_byte[] = {
/* 00 */
{EXTENSION, 0x0f0010, "(group 6)", xx, xx, xx, xx, xx, mrm, x, 13},
{EXTENSION, 0x0f0110, "(group 7)", xx, xx, xx, xx, xx, mrm, x, 14},
{OP_lar, 0x0f0210, "lar", Gv, xx, Ew, xx, xx, mrm, fWZ, END_LIST},
{OP_lsl, 0x0f0310, "lsl", Gv, xx, Ew, xx, xx, mrm, fWZ, END_LIST},
{INVALID, 0x0f0410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX: writes ss and cs */
{OP_syscall, 0x0f0510, "syscall", xcx, xx, xx, xx, xx, no, x, NA}, /* AMD/x64 only */
{OP_clts, 0x0f0610, "clts", xx, xx, xx, xx, xx, no, x, END_LIST},
/* XXX: writes ss and cs */
{OP_sysret, 0x0f0710, "sysret", xx, xx, xx, xx, xx, no, x, NA}, /* AMD/x64 only */
/* 08 */
{OP_invd, 0x0f0810, "invd", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_wbinvd, 0x0f0910, "wbinvd", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f0a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_ud2a, 0x0f0b10, "ud2a", xx, xx, xx, xx, xx, no, x, END_LIST}, /* "undefined instr" instr */
{INVALID, 0x0f0c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EXTENSION, 0x0f0d10, "(group amd)", xx, xx, xx, xx, xx, mrm, x, 24}, /* AMD only */
{OP_femms, 0x0f0e10, "femms", xx, xx, xx, xx, xx, no, x, END_LIST},
{SUFFIX_EXT, 0x0f0f10, "(group 3DNow!)", xx, xx, xx, xx, xx, mrm, x, 0},
/* 10 */
{PREFIX_EXT, 0x0f1010, "(prefix ext 0)", xx, xx, xx, xx, xx, mrm, x, 0},
{PREFIX_EXT, 0x0f1110, "(prefix ext 1)", xx, xx, xx, xx, xx, mrm, x, 1},
{PREFIX_EXT, 0x0f1210, "(prefix ext 2)", xx, xx, xx, xx, xx, mrm, x, 2},
{PREFIX_EXT, 0x0f1310, "(prefix ext 3)", xx, xx, xx, xx, xx, mrm, x, 3},
{PREFIX_EXT, 0x0f1410, "(prefix ext 4)", xx, xx, xx, xx, xx, mrm, x, 4},
{PREFIX_EXT, 0x0f1510, "(prefix ext 5)", xx, xx, xx, xx, xx, mrm, x, 5},
{PREFIX_EXT, 0x0f1610, "(prefix ext 6)", xx, xx, xx, xx, xx, mrm, x, 6},
{PREFIX_EXT, 0x0f1710, "(prefix ext 7)", xx, xx, xx, xx, xx, mrm, x, 7},
/* 18 */
{EXTENSION, 0x0f1810, "(group 16)", xx, xx, xx, xx, xx, mrm, x, 23},
/* xref case 9862/PR 214297 : 0f19-0f1e are "HINT_NOP": valid on P6+.
* we treat them the same as 0f1f but do not put on encoding chain.
* The operand is ignored but to support encoding it we must list it.
* i453: analysis routines now special case nop_modrm to ignore src opnd */
{OP_nop_modrm, 0x0f1910, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1a10, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1b10, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1c10, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1d10, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1e10, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1f10, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
/* 20 */
{OP_mov_priv, 0x0f2010, "mov", Rr, xx, Cr, xx, xx, mrm, fW6, tsb[0x21]},
{OP_mov_priv, 0x0f2110, "mov", Rr, xx, Dr, xx, xx, mrm, fW6, tsb[0x22]},
{OP_mov_priv, 0x0f2210, "mov", Cr, xx, Rr, xx, xx, mrm, fW6, tsb[0x23]},
{OP_mov_priv, 0x0f2310, "mov", Dr, xx, Rr, xx, xx, mrm, fW6, END_LIST},
{INVALID, 0x0f2410, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* FIXME: gdb thinks ok! */
{INVALID, 0x0f2510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f2610, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* FIXME: gdb thinks ok! */
{INVALID, 0x0f2710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* 28 */
{PREFIX_EXT, 0x0f2810, "(prefix ext 8)", xx, xx, xx, xx, xx, mrm, x, 8},
{PREFIX_EXT, 0x0f2910, "(prefix ext 9)", xx, xx, xx, xx, xx, mrm, x, 9},
{PREFIX_EXT, 0x0f2a10, "(prefix ext 10)", xx, xx, xx, xx, xx, mrm, x, 10},
{PREFIX_EXT, 0x0f2b10, "(prefix ext 11)", xx, xx, xx, xx, xx, mrm, x, 11},
{PREFIX_EXT, 0x0f2c10, "(prefix ext 12)", xx, xx, xx, xx, xx, mrm, x, 12},
{PREFIX_EXT, 0x0f2d10, "(prefix ext 13)", xx, xx, xx, xx, xx, mrm, x, 13},
{PREFIX_EXT, 0x0f2e10, "(prefix ext 14)", xx, xx, xx, xx, xx, mrm, x, 14},
{PREFIX_EXT, 0x0f2f10, "(prefix ext 15)", xx, xx, xx, xx, xx, mrm, x, 15},
/* 30 */
{OP_wrmsr, 0x0f3010, "wrmsr", xx, xx, edx, eax, ecx, no, x, END_LIST},
{OP_rdtsc, 0x0f3110, "rdtsc", edx, eax, xx, xx, xx, no, x, END_LIST},
{OP_rdmsr, 0x0f3210, "rdmsr", edx, eax, ecx, xx, xx, no, x, END_LIST},
{OP_rdpmc, 0x0f3310, "rdpmc", edx, eax, ecx, xx, xx, no, x, END_LIST},
/* XXX: sysenter writes cs and ss */
{OP_sysenter, 0x0f3410, "sysenter", xsp, xx, xx, xx, xx, no, x, END_LIST},
/* XXX: sysexit writes cs and ss */
{OP_sysexit, 0x0f3510, "sysexit", xsp, xx, xcx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f3610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX i#1313: various getsec leaf funcs at CPL 0 write to all kinds of
* processor state including eflags and eip. Leaf funcs are indicated by eax
* value, though. Here we only model the CPL > 0 effects, which conditionally
* write to ebx + ecx.
*/
{OP_getsec, 0x0f3710, "getsec", eax, ebx, eax, ebx, xx, xop|predcx, x, exop[13]},
/* 38 */
{ESCAPE_3BYTE_38, 0x0f3810, "(3byte 38)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f3910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{ESCAPE_3BYTE_3a, 0x0f3a10, "(3byte 3a)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f3b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f3c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f3d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f3e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f3f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* 40 */
{OP_cmovo, 0x0f4010, "cmovo", Gv, xx, Ev, xx, xx, mrm|predcc, fRO, END_LIST},
{E_VEX_EXT, 0x0f4110, "(e_vex ext 83)", xx, xx, xx, xx, xx, mrm, x, 83},
{E_VEX_EXT, 0x0f4210, "(e_vex ext 84)", xx, xx, xx, xx, xx, mrm, x, 84},
{OP_cmovnb, 0x0f4310, "cmovnb", Gv, xx, Ev, xx, xx, mrm|predcc, fRC, END_LIST},
{E_VEX_EXT, 0x0f4410, "(e_vex ext 86)", xx, xx, xx, xx, xx, mrm, x, 86},
{E_VEX_EXT, 0x0f4510, "(e_vex ext 87)", xx, xx, xx, xx, xx, mrm, x, 87},
{E_VEX_EXT, 0x0f4610, "(e_vex ext 88)", xx, xx, xx, xx, xx, mrm, x, 88},
{E_VEX_EXT, 0x0f4710, "(e_vex ext 89)", xx, xx, xx, xx, xx, mrm, x, 89},
/* 48 */
{OP_cmovs, 0x0f4810, "cmovs", Gv, xx, Ev, xx, xx, mrm|predcc, fRS, END_LIST},
{OP_cmovns, 0x0f4910, "cmovns", Gv, xx, Ev, xx, xx, mrm|predcc, fRS, END_LIST},
{E_VEX_EXT, 0x0f4a10, "(e_vex ext 90)", xx, xx, xx, xx, xx, mrm, x, 90},
{E_VEX_EXT, 0x0f4b10, "(e_vex ext 85)", xx, xx, xx, xx, xx, mrm, x, 85},
{OP_cmovl, 0x0f4c10, "cmovl", Gv, xx, Ev, xx, xx, mrm|predcc, (fRS|fRO), END_LIST},
{OP_cmovnl, 0x0f4d10, "cmovnl", Gv, xx, Ev, xx, xx, mrm|predcc, (fRS|fRO), END_LIST},
{OP_cmovle, 0x0f4e10, "cmovle", Gv, xx, Ev, xx, xx, mrm|predcc, (fRS|fRO|fRZ), END_LIST},
{OP_cmovnle,0x0f4f10, "cmovnle",Gv, xx, Ev, xx, xx, mrm|predcc, (fRS|fRO|fRZ), END_LIST},
/* 50 */
{PREFIX_EXT, 0x0f5010, "(prefix ext 16)", xx, xx, xx, xx, xx, mrm, x, 16},
{PREFIX_EXT, 0x0f5110, "(prefix ext 17)", xx, xx, xx, xx, xx, mrm, x, 17},
{PREFIX_EXT, 0x0f5210, "(prefix ext 18)", xx, xx, xx, xx, xx, mrm, x, 18},
{PREFIX_EXT, 0x0f5310, "(prefix ext 19)", xx, xx, xx, xx, xx, mrm, x, 19},
{PREFIX_EXT, 0x0f5410, "(prefix ext 20)", xx, xx, xx, xx, xx, mrm, x, 20},
{PREFIX_EXT, 0x0f5510, "(prefix ext 21)", xx, xx, xx, xx, xx, mrm, x, 21},
{PREFIX_EXT, 0x0f5610, "(prefix ext 22)", xx, xx, xx, xx, xx, mrm, x, 22},
{PREFIX_EXT, 0x0f5710, "(prefix ext 23)", xx, xx, xx, xx, xx, mrm, x, 23},
/* 58 */
{PREFIX_EXT, 0x0f5810, "(prefix ext 24)", xx, xx, xx, xx, xx, mrm, x, 24},
{PREFIX_EXT, 0x0f5910, "(prefix ext 25)", xx, xx, xx, xx, xx, mrm, x, 25},
{PREFIX_EXT, 0x0f5a10, "(prefix ext 26)", xx, xx, xx, xx, xx, mrm, x, 26},
{PREFIX_EXT, 0x0f5b10, "(prefix ext 27)", xx, xx, xx, xx, xx, mrm, x, 27},
{PREFIX_EXT, 0x0f5c10, "(prefix ext 28)", xx, xx, xx, xx, xx, mrm, x, 28},
{PREFIX_EXT, 0x0f5d10, "(prefix ext 29)", xx, xx, xx, xx, xx, mrm, x, 29},
{PREFIX_EXT, 0x0f5e10, "(prefix ext 30)", xx, xx, xx, xx, xx, mrm, x, 30},
{PREFIX_EXT, 0x0f5f10, "(prefix ext 31)", xx, xx, xx, xx, xx, mrm, x, 31},
/* 60 */
{PREFIX_EXT, 0x0f6010, "(prefix ext 32)", xx, xx, xx, xx, xx, mrm, x, 32},
{PREFIX_EXT, 0x0f6110, "(prefix ext 33)", xx, xx, xx, xx, xx, mrm, x, 33},
{PREFIX_EXT, 0x0f6210, "(prefix ext 34)", xx, xx, xx, xx, xx, mrm, x, 34},
{PREFIX_EXT, 0x0f6310, "(prefix ext 35)", xx, xx, xx, xx, xx, mrm, x, 35},
{PREFIX_EXT, 0x0f6410, "(prefix ext 36)", xx, xx, xx, xx, xx, mrm, x, 36},
{PREFIX_EXT, 0x0f6510, "(prefix ext 37)", xx, xx, xx, xx, xx, mrm, x, 37},
{PREFIX_EXT, 0x0f6610, "(prefix ext 38)", xx, xx, xx, xx, xx, mrm, x, 38},
{PREFIX_EXT, 0x0f6710, "(prefix ext 39)", xx, xx, xx, xx, xx, mrm, x, 39},
/* 68 */
{PREFIX_EXT, 0x0f6810, "(prefix ext 40)", xx, xx, xx, xx, xx, mrm, x, 40},
{PREFIX_EXT, 0x0f6910, "(prefix ext 41)", xx, xx, xx, xx, xx, mrm, x, 41},
{PREFIX_EXT, 0x0f6a10, "(prefix ext 42)", xx, xx, xx, xx, xx, mrm, x, 42},
{PREFIX_EXT, 0x0f6b10, "(prefix ext 43)", xx, xx, xx, xx, xx, mrm, x, 43},
{PREFIX_EXT, 0x0f6c10, "(prefix ext 44)", xx, xx, xx, xx, xx, mrm, x, 44},
{PREFIX_EXT, 0x0f6d10, "(prefix ext 45)", xx, xx, xx, xx, xx, mrm, x, 45},
{PREFIX_EXT, 0x0f6e10, "(prefix ext 46)", xx, xx, xx, xx, xx, mrm, x, 46},
{PREFIX_EXT, 0x0f6f10, "(prefix ext 112)", xx, xx, xx, xx, xx, mrm, x, 112},
/* 70 */
{PREFIX_EXT, 0x0f7010, "(prefix ext 47)", xx, xx, xx, xx, xx, mrm, x, 47},
{EXTENSION, 0x0f7110, "(group 12)", xx, xx, xx, xx, xx, mrm, x, 19},
{EXTENSION, 0x0f7210, "(group 13)", xx, xx, xx, xx, xx, mrm, x, 20},
{EXTENSION, 0x0f7310, "(group 14)", xx, xx, xx, xx, xx, mrm, x, 21},
{PREFIX_EXT, 0x0f7410, "(prefix ext 48)", xx, xx, xx, xx, xx, mrm, x, 48},
{PREFIX_EXT, 0x0f7510, "(prefix ext 49)", xx, xx, xx, xx, xx, mrm, x, 49},
{PREFIX_EXT, 0x0f7610, "(prefix ext 50)", xx, xx, xx, xx, xx, mrm, x, 50},
{VEX_L_EXT, 0x0f7710, "(vex L ext 0)", xx, xx, xx, xx, xx, no, x, 0},
/* 78 */
{PREFIX_EXT, 0x0f7810, "(prefix ext 134)", xx, xx, xx, xx, xx, mrm, x, 134},
{PREFIX_EXT, 0x0f7910, "(prefix ext 135)", xx, xx, xx, xx, xx, mrm, x, 135},
{PREFIX_EXT, 0x0f7a10, "(prefix ext 159)", xx, xx, xx, xx, xx, mrm, x, 159},
{PREFIX_EXT, 0x0f7b10, "(prefix ext 158)", xx, xx, xx, xx, xx, mrm, x, 158},
{PREFIX_EXT, 0x0f7c10, "(prefix ext 114)", xx, xx, xx, xx, xx, mrm, x, 114},
{PREFIX_EXT, 0x0f7d10, "(prefix ext 115)", xx, xx, xx, xx, xx, mrm, x, 115},
{PREFIX_EXT, 0x0f7e10, "(prefix ext 51)", xx, xx, xx, xx, xx, mrm, x, 51},
{PREFIX_EXT, 0x0f7f10, "(prefix ext 113)", xx, xx, xx, xx, xx, mrm, x, 113},
/* 80 */
{OP_jo, 0x0f8010, "jo", xx, xx, Jz, xx, xx, no, fRO, END_LIST},
{OP_jno, 0x0f8110, "jno", xx, xx, Jz, xx, xx, no, fRO, END_LIST},
{OP_jb, 0x0f8210, "jb", xx, xx, Jz, xx, xx, no, fRC, END_LIST},
{OP_jnb, 0x0f8310, "jnb", xx, xx, Jz, xx, xx, no, fRC, END_LIST},
{OP_jz, 0x0f8410, "jz", xx, xx, Jz, xx, xx, no, fRZ, END_LIST},
{OP_jnz, 0x0f8510, "jnz", xx, xx, Jz, xx, xx, no, fRZ, END_LIST},
{OP_jbe, 0x0f8610, "jbe", xx, xx, Jz, xx, xx, no, (fRC|fRZ), END_LIST},
{OP_jnbe,0x0f8710, "jnbe",xx, xx, Jz, xx, xx, no, (fRC|fRZ), END_LIST},
/* 88 */
{OP_js, 0x0f8810, "js", xx, xx, Jz, xx, xx, no, fRS, END_LIST},
{OP_jns, 0x0f8910, "jns", xx, xx, Jz, xx, xx, no, fRS, END_LIST},
{OP_jp, 0x0f8a10, "jp", xx, xx, Jz, xx, xx, no, fRP, END_LIST},
{OP_jnp, 0x0f8b10, "jnp", xx, xx, Jz, xx, xx, no, fRP, END_LIST},
{OP_jl, 0x0f8c10, "jl", xx, xx, Jz, xx, xx, no, (fRS|fRO), END_LIST},
{OP_jnl, 0x0f8d10, "jnl", xx, xx, Jz, xx, xx, no, (fRS|fRO), END_LIST},
{OP_jle, 0x0f8e10, "jle", xx, xx, Jz, xx, xx, no, (fRS|fRO|fRZ), END_LIST},
{OP_jnle,0x0f8f10, "jnle",xx, xx, Jz, xx, xx, no, (fRS|fRO|fRZ), END_LIST},
/* 90 */
{E_VEX_EXT, 0x0f9010, "(e_vex ext 79)", xx, xx, xx, xx, xx, mrm, x, 79},
{E_VEX_EXT, 0x0f9110, "(e_vex ext 80)", xx, xx, xx, xx, xx, mrm, x, 80},
{E_VEX_EXT, 0x0f9210, "(e_vex ext 81)", xx, xx, xx, xx, xx, mrm, x, 81},
{E_VEX_EXT, 0x0f9310, "(e_vex ext 82)", xx, xx, xx, xx, xx, mrm, x, 82},
{OP_setz, 0x0f9410, "setz", Eb, xx, xx, xx, xx, mrm, fRZ, END_LIST},
{OP_setnz, 0x0f9510, "setnz", Eb, xx, xx, xx, xx, mrm, fRZ, END_LIST},
{OP_setbe, 0x0f9610, "setbe", Eb, xx, xx, xx, xx, mrm, (fRC|fRZ), END_LIST},
{OP_setnbe,0x0f9710, "setnbe",Eb, xx, xx, xx, xx, mrm, (fRC|fRZ), END_LIST},
/* 98 */
{E_VEX_EXT, 0x0f9810, "(e_vex ext 91)", xx, xx, xx, xx, xx, mrm, x, 91},
{E_VEX_EXT, 0x0f9910, "(e_vex ext 92)", xx, xx, xx, xx, xx, mrm, x, 92},
{OP_setp, 0x0f9a10, "setp", Eb, xx, xx, xx, xx, mrm, fRP, END_LIST},
{OP_setnp, 0x0f9b10, "setnp", Eb, xx, xx, xx, xx, mrm, fRP, END_LIST},
{OP_setl, 0x0f9c10, "setl", Eb, xx, xx, xx, xx, mrm, (fRS|fRO), END_LIST},
{OP_setnl, 0x0f9d10, "setnl", Eb, xx, xx, xx, xx, mrm, (fRS|fRO), END_LIST},
{OP_setle, 0x0f9e10, "setle", Eb, xx, xx, xx, xx, mrm, (fRS|fRO|fRZ), END_LIST},
{OP_setnle,0x0f9f10, "setnle",Eb, xx, xx, xx, xx, mrm, (fRS|fRO|fRZ), END_LIST},
/* a0 */
{OP_push, 0x0fa010, "push", xsp, i_xSPo1, fs, xsp, xx, no, x, tsb[0xa8]},
{OP_pop, 0x0fa110, "pop", fs, xsp, xsp, i_xSP, xx, no, x, tsb[0xa9]},
{OP_cpuid, 0x0fa210, "cpuid", eax, ebx, eax, ecx, xx, xop, x, exop[0x06]},
{OP_bt, 0x0fa310, "bt", xx, xx, Ev, Gv, xx, mrm, fW6, tex[15][4]},
{OP_shld, 0x0fa410, "shld", Ev, xx, Gv, Ib, Ev, mrm, fW6, tsb[0xa5]},
{OP_shld, 0x0fa510, "shld", Ev, xx, Gv, cl, Ev, mrm, fW6, END_LIST},
{INVALID, 0x0fa610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fa710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* a8 */
{OP_push, 0x0fa810, "push", xsp, i_xSPo1, gs, xsp, xx, no, x, END_LIST},
{OP_pop, 0x0fa910, "pop", gs, xsp, xsp, i_xSP, xx, no, x, END_LIST},
{OP_rsm, 0x0faa10, "rsm", xx, xx, xx, xx, xx, no, fWX, END_LIST},
{OP_bts, 0x0fab10, "bts", Ev, xx, Gv, Ev, xx, mrm, fW6, tex[15][5]},
{OP_shrd, 0x0fac10, "shrd", Ev, xx, Gv, Ib, Ev, mrm, fW6, tsb[0xad]},
{OP_shrd, 0x0fad10, "shrd", Ev, xx, Gv, cl, Ev, mrm, fW6, END_LIST},
{EXTENSION, 0x0fae10, "(group 15)", xx, xx, xx, xx, xx, mrm, x, 22},
{OP_imul, 0x0faf10, "imul", Gv, xx, Ev, Gv, xx, mrm, fW6, tfb[0x69]},
/* b0 */
{OP_cmpxchg, 0x0fb010, "cmpxchg", Eb, al, Gb, Eb, al, mrm, fW6, END_LIST},
{OP_cmpxchg, 0x0fb110, "cmpxchg", Ev, eAX, Gv, Ev, eAX, mrm, fW6, tsb[0xb0]},
{OP_lss, 0x0fb210, "lss", Gv, ss, Mp, xx, xx, mrm, x, END_LIST},
{OP_btr, 0x0fb310, "btr", Ev, xx, Gv, Ev, xx, mrm, fW6, tex[15][6]},
{OP_lfs, 0x0fb410, "lfs", Gv, fs, Mp, xx, xx, mrm, x, END_LIST},
{OP_lgs, 0x0fb510, "lgs", Gv, gs, Mp, xx, xx, mrm, x, END_LIST},
{OP_movzx, 0x0fb610, "movzx", Gv, xx, Eb, xx, xx, mrm, x, END_LIST},
{OP_movzx, 0x0fb710, "movzx", Gv, xx, Ew, xx, xx, mrm, x, tsb[0xb6]},
/* b8 */
{OP_popcnt, 0xf30fb810, "popcnt", Gv, xx, Ev, xx, xx, mrm|reqp, fW6, END_LIST},
/* This is Group 10, but all identical (ud2b) so no reason to split opcode by /reg */
{OP_ud2b, 0x0fb910, "ud2b", xx, xx, xx, xx, xx, no, x, END_LIST},
{EXTENSION, 0x0fba10, "(group 8)", xx, xx, xx, xx, xx, mrm, x, 15},
{OP_btc, 0x0fbb10, "btc", Ev, xx, Gv, Ev, xx, mrm, fW6, tex[15][7]},
{PREFIX_EXT, 0x0fbc10, "(prefix ext 140)", xx, xx, xx, xx, xx, mrm, x, 140},
{PREFIX_EXT, 0x0fbd10, "(prefix ext 136)", xx, xx, xx, xx, xx, mrm, x, 136},
{OP_movsx, 0x0fbe10, "movsx", Gv, xx, Eb, xx, xx, mrm, x, END_LIST},
{OP_movsx, 0x0fbf10, "movsx", Gv, xx, Ew, xx, xx, mrm, x, tsb[0xbe]},
/* c0 */
{OP_xadd, 0x0fc010, "xadd", Eb, Gb, Eb, Gb, xx, mrm, fW6, END_LIST},
{OP_xadd, 0x0fc110, "xadd", Ev, Gv, Ev, Gv, xx, mrm, fW6, tsb[0xc0]},
{PREFIX_EXT, 0x0fc210, "(prefix ext 52)", xx, xx, xx, xx, xx, mrm, x, 52},
{OP_movnti, 0x0fc310, "movnti", Md_q, xx, Gd_q, xx, xx, mrm, x, END_LIST},
{PREFIX_EXT, 0x0fc410, "(prefix ext 53)", xx, xx, xx, xx, xx, mrm, x, 53},
{PREFIX_EXT, 0x0fc510, "(prefix ext 54)", xx, xx, xx, xx, xx, mrm, x, 54},
{PREFIX_EXT, 0x0fc610, "(prefix ext 55)", xx, xx, xx, xx, xx, mrm, x, 55},
{EXTENSION, 0x0fc710, "(group 9)", xx, xx, xx, xx, xx, mrm, x, 16},
/* c8 */
{OP_bswap, 0x0fc810, "bswap", uAX_x, xx, uAX_x, xx, xx, no, x, tsb[0xc9]},
{OP_bswap, 0x0fc910, "bswap", uCX_x, xx, uCX_x, xx, xx, no, x, tsb[0xca]},
{OP_bswap, 0x0fca10, "bswap", uDX_x, xx, uDX_x, xx, xx, no, x, tsb[0xcb]},
{OP_bswap, 0x0fcb10, "bswap", uBX_x, xx, uBX_x, xx, xx, no, x, tsb[0xcc]},
{OP_bswap, 0x0fcc10, "bswap", uSP_x, xx, uSP_x, xx, xx, no, x, tsb[0xcd]},
{OP_bswap, 0x0fcd10, "bswap", uBP_x, xx, uBP_x, xx, xx, no, x, tsb[0xce]},
{OP_bswap, 0x0fce10, "bswap", uSI_x, xx, uSI_x, xx, xx, no, x, tsb[0xcf]},
{OP_bswap, 0x0fcf10, "bswap", uDI_x, xx, uDI_x, xx, xx, no, x, END_LIST},
/* d0 */
{PREFIX_EXT, 0x0fd010, "(prefix ext 116)", xx, xx, xx, xx, xx, mrm, x, 116},
{PREFIX_EXT, 0x0fd110, "(prefix ext 56)", xx, xx, xx, xx, xx, mrm, x, 56},
{PREFIX_EXT, 0x0fd210, "(prefix ext 57)", xx, xx, xx, xx, xx, mrm, x, 57},
{PREFIX_EXT, 0x0fd310, "(prefix ext 58)", xx, xx, xx, xx, xx, mrm, x, 58},
{PREFIX_EXT, 0x0fd410, "(prefix ext 59)", xx, xx, xx, xx, xx, mrm, x, 59},
{PREFIX_EXT, 0x0fd510, "(prefix ext 60)", xx, xx, xx, xx, xx, mrm, x, 60},
{PREFIX_EXT, 0x0fd610, "(prefix ext 61)", xx, xx, xx, xx, xx, mrm, x, 61},
{PREFIX_EXT, 0x0fd710, "(prefix ext 62)", xx, xx, xx, xx, xx, mrm, x, 62},
/* d8 */
{PREFIX_EXT, 0x0fd810, "(prefix ext 63)", xx, xx, xx, xx, xx, mrm, x, 63},
{PREFIX_EXT, 0x0fd910, "(prefix ext 64)", xx, xx, xx, xx, xx, mrm, x, 64},
{PREFIX_EXT, 0x0fda10, "(prefix ext 65)", xx, xx, xx, xx, xx, mrm, x, 65},
{PREFIX_EXT, 0x0fdb10, "(prefix ext 66)", xx, xx, xx, xx, xx, mrm, x, 66},
{PREFIX_EXT, 0x0fdc10, "(prefix ext 67)", xx, xx, xx, xx, xx, mrm, x, 67},
{PREFIX_EXT, 0x0fdd10, "(prefix ext 68)", xx, xx, xx, xx, xx, mrm, x, 68},
{PREFIX_EXT, 0x0fde10, "(prefix ext 69)", xx, xx, xx, xx, xx, mrm, x, 69},
{PREFIX_EXT, 0x0fdf10, "(prefix ext 70)", xx, xx, xx, xx, xx, mrm, x, 70},
/* e0 */
{PREFIX_EXT, 0x0fe010, "(prefix ext 71)", xx, xx, xx, xx, xx, mrm, x, 71},
{PREFIX_EXT, 0x0fe110, "(prefix ext 72)", xx, xx, xx, xx, xx, mrm, x, 72},
{PREFIX_EXT, 0x0fe210, "(prefix ext 73)", xx, xx, xx, xx, xx, mrm, x, 73},
{PREFIX_EXT, 0x0fe310, "(prefix ext 74)", xx, xx, xx, xx, xx, mrm, x, 74},
{PREFIX_EXT, 0x0fe410, "(prefix ext 75)", xx, xx, xx, xx, xx, mrm, x, 75},
{PREFIX_EXT, 0x0fe510, "(prefix ext 76)", xx, xx, xx, xx, xx, mrm, x, 76},
{PREFIX_EXT, 0x0fe610, "(prefix ext 77)", xx, xx, xx, xx, xx, mrm, x, 77},
{PREFIX_EXT, 0x0fe710, "(prefix ext 78)", xx, xx, xx, xx, xx, mrm, x, 78},
/* e8 */
{PREFIX_EXT, 0x0fe810, "(prefix ext 79)", xx, xx, xx, xx, xx, mrm, x, 79},
{PREFIX_EXT, 0x0fe910, "(prefix ext 80)", xx, xx, xx, xx, xx, mrm, x, 80},
{PREFIX_EXT, 0x0fea10, "(prefix ext 81)", xx, xx, xx, xx, xx, mrm, x, 81},
{PREFIX_EXT, 0x0feb10, "(prefix ext 82)", xx, xx, xx, xx, xx, mrm, x, 82},
{PREFIX_EXT, 0x0fec10, "(prefix ext 83)", xx, xx, xx, xx, xx, mrm, x, 83},
{PREFIX_EXT, 0x0fed10, "(prefix ext 84)", xx, xx, xx, xx, xx, mrm, x, 84},
{PREFIX_EXT, 0x0fee10, "(prefix ext 85)", xx, xx, xx, xx, xx, mrm, x, 85},
{PREFIX_EXT, 0x0fef10, "(prefix ext 86)", xx, xx, xx, xx, xx, mrm, x, 86},
/* f0 */
{PREFIX_EXT, 0x0ff010, "(prefix ext 117)", xx, xx, xx, xx, xx, mrm, x, 117},
{PREFIX_EXT, 0x0ff110, "(prefix ext 87)", xx, xx, xx, xx, xx, mrm, x, 87},
{PREFIX_EXT, 0x0ff210, "(prefix ext 88)", xx, xx, xx, xx, xx, mrm, x, 88},
{PREFIX_EXT, 0x0ff310, "(prefix ext 89)", xx, xx, xx, xx, xx, mrm, x, 89},
{PREFIX_EXT, 0x0ff410, "(prefix ext 90)", xx, xx, xx, xx, xx, mrm, x, 90},
{PREFIX_EXT, 0x0ff510, "(prefix ext 91)", xx, xx, xx, xx, xx, mrm, x, 91},
{PREFIX_EXT, 0x0ff610, "(prefix ext 92)", xx, xx, xx, xx, xx, mrm, x, 92},
{PREFIX_EXT, 0x0ff710, "(prefix ext 93)", xx, xx, xx, xx, xx, mrm, x, 93},
/* f8 */
{PREFIX_EXT, 0x0ff810, "(prefix ext 94)", xx, xx, xx, xx, xx, mrm, x, 94},
{PREFIX_EXT, 0x0ff910, "(prefix ext 95)", xx, xx, xx, xx, xx, mrm, x, 95},
{PREFIX_EXT, 0x0ffa10, "(prefix ext 96)", xx, xx, xx, xx, xx, mrm, x, 96},
{PREFIX_EXT, 0x0ffb10, "(prefix ext 97)", xx, xx, xx, xx, xx, mrm, x, 97},
{PREFIX_EXT, 0x0ffc10, "(prefix ext 98)", xx, xx, xx, xx, xx, mrm, x, 98},
{PREFIX_EXT, 0x0ffd10, "(prefix ext 99)", xx, xx, xx, xx, xx, mrm, x, 99},
{PREFIX_EXT, 0x0ffe10, "(prefix ext 100)", xx, xx, xx, xx, xx, mrm, x, 100},
{INVALID, 0x0fff10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
};
/****************************************************************************
* Opcode extensions
* This is from Table A-6
*/
const instr_info_t base_extensions[][8] = {
/* group 1a -- first opcode byte 80: all assumed to have Ib */
{ /* extensions[0] */
{OP_add, 0x800020, "add", Eb, xx, Ib, Eb, xx, mrm, fW6, tex[25][0]},
{OP_or, 0x800021, "or", Eb, xx, Ib, Eb, xx, mrm, fW6, tex[25][1]},
{OP_adc, 0x800022, "adc", Eb, xx, Ib, Eb, xx, mrm, (fW6|fRC), tex[25][2]},
{OP_sbb, 0x800023, "sbb", Eb, xx, Ib, Eb, xx, mrm, (fW6|fRC), tex[25][3]},
{OP_and, 0x800024, "and", Eb, xx, Ib, Eb, xx, mrm, fW6, tex[25][4]},
{OP_sub, 0x800025, "sub", Eb, xx, Ib, Eb, xx, mrm, fW6, tex[25][5]},
{OP_xor, 0x800026, "xor", Eb, xx, Ib, Eb, xx, mrm, fW6, tex[25][6]},
{OP_cmp, 0x800027, "cmp", xx, xx, Eb, Ib, xx, mrm, fW6, tex[25][7]},
},
/* group 1b -- first opcode byte 81: all assumed to have Iz */
{ /* extensions[1] */
{OP_add, 0x810020, "add", Ev, xx, Iz, Ev, xx, mrm, fW6, tex[2][0]},
{OP_or, 0x810021, "or", Ev, xx, Iz, Ev, xx, mrm, fW6, tex[2][1]},
{OP_adc, 0x810022, "adc", Ev, xx, Iz, Ev, xx, mrm, (fW6|fRC), tex[2][2]},
{OP_sbb, 0x810023, "sbb", Ev, xx, Iz, Ev, xx, mrm, (fW6|fRC), tex[2][3]},
{OP_and, 0x810024, "and", Ev, xx, Iz, Ev, xx, mrm, fW6, tex[2][4]},
{OP_sub, 0x810025, "sub", Ev, xx, Iz, Ev, xx, mrm, fW6, tex[2][5]},
{OP_xor, 0x810026, "xor", Ev, xx, Iz, Ev, xx, mrm, fW6, tex[2][6]},
{OP_cmp, 0x810027, "cmp", xx, xx, Ev, Iz, xx, mrm, fW6, tex[2][7]},
},
/* group 1c -- first opcode byte 83 (for 82, see below "group 1c*"):
* all assumed to have Ib */
{ /* extensions[2] */
{OP_add, 0x830020, "add", Ev, xx, Ib, Ev, xx, mrm, fW6, tex[0][0]},
{OP_or, 0x830021, "or", Ev, xx, Ib, Ev, xx, mrm, fW6, tex[0][1]},
{OP_adc, 0x830022, "adc", Ev, xx, Ib, Ev, xx, mrm, (fW6|fRC), tex[0][2]},
{OP_sbb, 0x830023, "sbb", Ev, xx, Ib, Ev, xx, mrm, (fW6|fRC), tex[0][3]},
{OP_and, 0x830024, "and", Ev, xx, Ib, Ev, xx, mrm, fW6, tex[0][4]},
{OP_sub, 0x830025, "sub", Ev, xx, Ib, Ev, xx, mrm, fW6, tex[0][5]},
{OP_xor, 0x830026, "xor", Ev, xx, Ib, Ev, xx, mrm, fW6, tex[0][6]},
{OP_cmp, 0x830027, "cmp", xx, xx, Ev, Ib, xx, mrm, fW6, tex[0][7]},
},
/* group 2a -- first opcode byte c0: all assumed to have Ib */
{ /* extensions[3] */
{OP_rol, 0xc00020, "rol", Eb, xx, Ib, Eb, xx, mrm, (fWC|fWO), tex[5][0]},
{OP_ror, 0xc00021, "ror", Eb, xx, Ib, Eb, xx, mrm, (fWC|fWO), tex[5][1]},
{OP_rcl, 0xc00022, "rcl", Eb, xx, Ib, Eb, xx, mrm, (fRC|fWC|fWO), tex[5][2]},
{OP_rcr, 0xc00023, "rcr", Eb, xx, Ib, Eb, xx, mrm, (fRC|fWC|fWO), tex[5][3]},
{OP_shl, 0xc00024, "shl", Eb, xx, Ib, Eb, xx, mrm, fW6, tex[5][4]},
{OP_shr, 0xc00025, "shr", Eb, xx, Ib, Eb, xx, mrm, fW6, tex[5][5]},
/* PR 332254: /6 is an alias for /4; we do not add to encoding chain though */
{OP_shl, 0xc00026, "shl", Eb, xx, Ib, Eb, xx, mrm, fW6, END_LIST},
{OP_sar, 0xc00027, "sar", Eb, xx, Ib, Eb, xx, mrm, fW6, tex[5][7]},
},
/* group 2b -- first opcode byte c1: all assumed to have Ib */
{ /* extensions[4] */
{OP_rol, 0xc10020, "rol", Ev, xx, Ib, Ev, xx, mrm, (fWC|fWO), tex[6][0]},
{OP_ror, 0xc10021, "ror", Ev, xx, Ib, Ev, xx, mrm, (fWC|fWO), tex[6][1]},
{OP_rcl, 0xc10022, "rcl", Ev, xx, Ib, Ev, xx, mrm, (fRC|fWC|fWO), tex[6][2]},
{OP_rcr, 0xc10023, "rcr", Ev, xx, Ib, Ev, xx, mrm, (fRC|fWC|fWO), tex[6][3]},
{OP_shl, 0xc10024, "shl", Ev, xx, Ib, Ev, xx, mrm, fW6, tex[6][4]},
{OP_shr, 0xc10025, "shr", Ev, xx, Ib, Ev, xx, mrm, fW6, tex[6][5]},
/* PR 332254: /6 is an alias for /4; we do not add to encoding chain though */
{OP_shl, 0xc10026, "shl", Ev, xx, Ib, Ev, xx, mrm, fW6, END_LIST},
{OP_sar, 0xc10027, "sar", Ev, xx, Ib, Ev, xx, mrm, fW6, tex[6][7]},
},
/* group 2c -- first opcode byte d0 */
{ /* extensions[5] */
{OP_rol, 0xd00020, "rol", Eb, xx, c1, Eb, xx, mrm, (fWC|fWO), tex[8][0]},
{OP_ror, 0xd00021, "ror", Eb, xx, c1, Eb, xx, mrm, (fWC|fWO), tex[8][1]},
{OP_rcl, 0xd00022, "rcl", Eb, xx, c1, Eb, xx, mrm, (fRC|fWC|fWO), tex[8][2]},
{OP_rcr, 0xd00023, "rcr", Eb, xx, c1, Eb, xx, mrm, (fRC|fWC|fWO), tex[8][3]},
{OP_shl, 0xd00024, "shl", Eb, xx, c1, Eb, xx, mrm, fW6, tex[8][4]},
{OP_shr, 0xd00025, "shr", Eb, xx, c1, Eb, xx, mrm, fW6, tex[8][5]},
/* PR 332254: /6 is an alias for /4; we do not add to encoding chain though */
{OP_shl, 0xd00026, "shl", Eb, xx, c1, Eb, xx, mrm, fW6, END_LIST},
{OP_sar, 0xd00027, "sar", Eb, xx, c1, Eb, xx, mrm, fW6, tex[8][7]},
},
/* group 2d -- first opcode byte d1 */
{ /* extensions[6] */
{OP_rol, 0xd10020, "rol", Ev, xx, c1, Ev, xx, mrm, (fWC|fWO), tex[3][0]},
{OP_ror, 0xd10021, "ror", Ev, xx, c1, Ev, xx, mrm, (fWC|fWO), tex[3][1]},
{OP_rcl, 0xd10022, "rcl", Ev, xx, c1, Ev, xx, mrm, (fRC|fWC|fWO), tex[3][2]},
{OP_rcr, 0xd10023, "rcr", Ev, xx, c1, Ev, xx, mrm, (fRC|fWC|fWO), tex[3][3]},
{OP_shl, 0xd10024, "shl", Ev, xx, c1, Ev, xx, mrm, fW6, tex[3][4]},
{OP_shr, 0xd10025, "shr", Ev, xx, c1, Ev, xx, mrm, fW6, tex[3][5]},
/* PR 332254: /6 is an alias for /4; we do not add to encoding chain though */
{OP_shl, 0xd10026, "shl", Ev, xx, c1, Ev, xx, mrm, fW6, END_LIST},
{OP_sar, 0xd10027, "sar", Ev, xx, c1, Ev, xx, mrm, fW6, tex[3][7]},
},
/* group 2e -- first opcode byte d2 */
{ /* extensions[7] */
{OP_rol, 0xd20020, "rol", Eb, xx, cl, Eb, xx, mrm, (fWC|fWO), END_LIST},
{OP_ror, 0xd20021, "ror", Eb, xx, cl, Eb, xx, mrm, (fWC|fWO), END_LIST},
{OP_rcl, 0xd20022, "rcl", Eb, xx, cl, Eb, xx, mrm, (fRC|fWC|fWO), END_LIST},
{OP_rcr, 0xd20023, "rcr", Eb, xx, cl, Eb, xx, mrm, (fRC|fWC|fWO), END_LIST},
{OP_shl, 0xd20024, "shl", Eb, xx, cl, Eb, xx, mrm, fW6, END_LIST},
{OP_shr, 0xd20025, "shr", Eb, xx, cl, Eb, xx, mrm, fW6, END_LIST},
/* PR 332254: /6 is an alias for /4; we do not add to encoding chain though */
{OP_shl, 0xd20026, "shl", Eb, xx, cl, Eb, xx, mrm, fW6, END_LIST},
{OP_sar, 0xd20027, "sar", Eb, xx, cl, Eb, xx, mrm, fW6, END_LIST},
},
/* group 2f -- first opcode byte d3 */
{ /* extensions[8] */
{OP_rol, 0xd30020, "rol", Ev, xx, cl, Ev, xx, mrm, (fWC|fWO), tex[7][0]},
{OP_ror, 0xd30021, "ror", Ev, xx, cl, Ev, xx, mrm, (fWC|fWO), tex[7][1]},
{OP_rcl, 0xd30022, "rcl", Ev, xx, cl, Ev, xx, mrm, (fRC|fWC|fWO), tex[7][2]},
{OP_rcr, 0xd30023, "rcr", Ev, xx, cl, Ev, xx, mrm, (fRC|fWC|fWO), tex[7][3]},
{OP_shl, 0xd30024, "shl", Ev, xx, cl, Ev, xx, mrm, fW6, tex[7][4]},
{OP_shr, 0xd30025, "shr", Ev, xx, cl, Ev, xx, mrm, fW6, tex[7][5]},
/* PR 332254: /6 is an alias for /4; we do not add to encoding chain though */
{OP_shl, 0xd30026, "shl", Ev, xx, cl, Ev, xx, mrm, fW6, END_LIST},
{OP_sar, 0xd30027, "sar", Ev, xx, cl, Ev, xx, mrm, fW6, tex[7][7]},
},
/* group 3a -- first opcode byte f6 */
{ /* extensions[9] */
{OP_test, 0xf60020, "test", xx, xx, Eb, Ib, xx, mrm, fW6, END_LIST},
/* PR 332254: /1 is an alias for /0; we do not add to encoding chain though */
{OP_test, 0xf60021, "test", xx, xx, Eb, Ib, xx, mrm, fW6, END_LIST},
{OP_not, 0xf60022, "not", Eb, xx, Eb, xx, xx, mrm, x, END_LIST},
{OP_neg, 0xf60023, "neg", Eb, xx, Eb, xx, xx, mrm, fW6, END_LIST},
{OP_mul, 0xf60024, "mul", ax, xx, Eb, al, xx, mrm, fW6, END_LIST},
{OP_imul, 0xf60025, "imul", ax, xx, Eb, al, xx, mrm, fW6, tsb[0xaf]},
{OP_div, 0xf60026, "div", ah, al, Eb, ax, xx, mrm, fW6, END_LIST},
{OP_idiv, 0xf60027, "idiv", ah, al, Eb, ax, xx, mrm, fW6, END_LIST},
},
/* group 3b -- first opcode byte f7 */
{ /* extensions[10] */
{OP_test, 0xf70020, "test", xx, xx, Ev, Iz, xx, mrm, fW6, tex[9][0]},
/* PR 332254: /1 is an alias for /0; we do not add to encoding chain though */
{OP_test, 0xf70021, "test", xx, xx, Ev, Iz, xx, mrm, fW6, END_LIST},
{OP_not, 0xf70022, "not", Ev, xx, Ev, xx, xx, mrm, x, tex[9][2]},
{OP_neg, 0xf70023, "neg", Ev, xx, Ev, xx, xx, mrm, fW6, tex[9][3]},
{OP_mul, 0xf70024, "mul", eDX, eAX, Ev, eAX, xx, mrm, fW6, tex[9][4]},
{OP_imul, 0xf70025, "imul", eDX, eAX, Ev, eAX, xx, mrm, fW6, tex[9][5]},
{OP_div, 0xf70026, "div", eDX, eAX, Ev, eDX, eAX, mrm, fW6, tex[9][6]},
{OP_idiv, 0xf70027, "idiv", eDX, eAX, Ev, eDX, eAX, mrm, fW6, tex[9][7]},
},
/* group 4 (first byte fe) */
{ /* extensions[11] */
{OP_inc, 0xfe0020, "inc", Eb, xx, Eb, xx, xx, mrm, (fW6&(~fWC)), END_LIST},
{OP_dec, 0xfe0021, "dec", Eb, xx, Eb, xx, xx, mrm, (fW6&(~fWC)), END_LIST},
{INVALID, 0xfe0022, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xfe0023, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xfe0024, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xfe0025, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xfe0026, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xfe0027, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* group 5 (first byte ff) */
{ /* extensions[12] */
{OP_inc, 0xff0020, "inc", Ev, xx, Ev, xx, xx, mrm, (fW6&(~fWC)), tex[11][0]},
{OP_dec, 0xff0021, "dec", Ev, xx, Ev, xx, xx, mrm, (fW6&(~fWC)), tex[11][1]},
{OP_call_ind, 0xff0022, "call", xsp, i_iSPo1, i_Exi, xsp, xx, mrm, x, END_LIST},
/* Note how a far call's stack operand size matches far ret rather than call */
{OP_call_far_ind, 0xff0023, "lcall", xsp, i_vSPo2, i_Ep, xsp, xx, mrm, x, END_LIST},
{OP_jmp_ind, 0xff0024, "jmp", xx, xx, i_Exi, xx, xx, mrm, x, END_LIST},
{OP_jmp_far_ind, 0xff0025, "ljmp", xx, xx, i_Ep, xx, xx, mrm, x, END_LIST},
{OP_push, 0xff0026, "push", xsp, i_xSPo1, Esv, xsp, xx, mrm, x, tfb[0x06]},
{INVALID, 0xff0027, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* group 6 (first bytes 0f 00) */
{ /* extensions[13] */
{OP_sldt, 0x0f0030, "sldt", Ew, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_str, 0x0f0031, "str", Ew, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_lldt, 0x0f0032, "lldt", xx, xx, Ew, xx, xx, mrm, x, END_LIST},
{OP_ltr, 0x0f0033, "ltr", xx, xx, Ew, xx, xx, mrm, x, END_LIST},
{OP_verr, 0x0f0034, "verr", xx, xx, Ew, xx, xx, mrm, fWZ, END_LIST},
{OP_verw, 0x0f0035, "verw", xx, xx, Ew, xx, xx, mrm, fWZ, END_LIST},
{INVALID, 0x0f0036, "(bad)",xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0037, "(bad)",xx, xx, xx, xx, xx, no, x, NA},
},
/* group 7 (first bytes 0f 01) */
{ /* extensions[14] */
{MOD_EXT, 0x0f0130, "(group 7 mod ext 0)", xx, xx, xx, xx, xx, no, x, 0},
{MOD_EXT, 0x0f0131, "(group 7 mod ext 1)", xx, xx, xx, xx, xx, no, x, 1},
{MOD_EXT, 0x0f0132, "(group 7 mod ext 5)", xx, xx, xx, xx, xx, no, x, 5},
{MOD_EXT, 0x0f0133, "(group 7 mod ext 4)", xx, xx, xx, xx, xx, no, x, 4},
{OP_smsw, 0x0f0134, "smsw", Ew, xx, xx, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0f0135, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_lmsw, 0x0f0136, "lmsw", xx, xx, Ew, xx, xx, mrm, x, END_LIST},
{MOD_EXT, 0x0f0137, "(group 7 mod ext 2)", xx, xx, xx, xx, xx, no, x, 2},
},
/* group 8 (first bytes 0f ba): all assumed to have Ib */
{ /* extensions[15] */
{INVALID, 0x0fba30, "(bad)",xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fba31, "(bad)",xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fba32, "(bad)",xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fba33, "(bad)",xx, xx, xx, xx, xx, no, x, NA},
{OP_bt, 0x0fba34, "bt", xx, xx, Ev, Ib, xx, mrm, fW6, END_LIST},
{OP_bts, 0x0fba35, "bts", Ev, xx, Ib, Ev, xx, mrm, fW6, END_LIST},
{OP_btr, 0x0fba36, "btr", Ev, xx, Ib, Ev, xx, mrm, fW6, END_LIST},
{OP_btc, 0x0fba37, "btc", Ev, xx, Ib, Ev, xx, mrm, fW6, END_LIST},
},
/* group 9 (first bytes 0f c7) */
{ /* extensions[16] */
{INVALID, 0x0fc730, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_cmpxchg8b, 0x0fc731, "cmpxchg8b", Mq_dq, eAX, Mq_dq, eAX, eDX, mrm_xop, fWZ, exop[0x07]},/*"cmpxchg16b" w/ rex.w*/
{INVALID, 0x0fc732, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fc733, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{REX_W_EXT, 0x0fc734, "(rex.w ext 5)", xx, xx, xx, xx, xx, mrm, x, 5},
{INVALID, 0x0fc735, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{MOD_EXT, 0x0fc736, "(group 9 mod ext 12)", xx, xx, xx, xx, xx, mrm, x, 12},
{MOD_EXT, 0x0fc737, "(mod ext 13)", xx, xx, xx, xx, xx, mrm, x, 13},
},
/* group 10 is all ud2b and is not used by us since identical */
/* group 11a (first byte c6) */
{ /* extensions[17] */
{OP_mov_st, 0xc60020, "mov", Eb, xx, Ib, xx, xx, mrm, x, END_LIST},
{INVALID, 0xc60021, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc60022, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc60023, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc60024, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc60025, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc60026, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX i#1314: this also sets eip */
{OP_xabort, 0xf8c60067, "xabort", eax, xx, Ib, xx, xx, mrm, x, END_LIST},
},
/* group 11b (first byte c7) */
{ /* extensions[18] */
/* PR 250397: be aware that mov_imm shares this tail end of mov_st templates */
{OP_mov_st, 0xc70020, "mov", Ev, xx, Iz, xx, xx, mrm, x, tex[17][0]},
{INVALID, 0xc70021, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc70022, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc70023, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc70024, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc70025, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc70026, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_xbegin, 0xf8c70067, "xbegin", xx, xx, Jz, xx, xx, mrm, x, END_LIST},
},
/* group 12 (first bytes 0f 71): all assumed to have Ib */
{ /* extensions[19] */
{INVALID, 0x0f7130, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x0f7132, "(prefix ext 104)", xx, xx, xx, xx, xx, no, x, 104},
{INVALID, 0x0f7133, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x0f7134, "(prefix ext 105)", xx, xx, xx, xx, xx, no, x, 105},
{INVALID, 0x0f7135, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x0f7136, "(prefix ext 106)", xx, xx, xx, xx, xx, no, x, 106},
{INVALID, 0x0f7137, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* group 13 (first bytes 0f 72): all assumed to have Ib */
{ /* extensions[20] */
{EVEX_W_EXT, 0x660f7230, "(evex_W ext 119)", xx, xx, xx, xx, xx, mrm|evex, x, 119},
{EVEX_W_EXT, 0x660f7231, "(evex_W ext 117)", xx, xx, xx, xx, xx, mrm|evex, x, 117},
{PREFIX_EXT, 0x0f7232, "(prefix ext 107)", xx, xx, xx, xx, xx, no, x, 107},
{INVALID, 0x0f7233, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x0f7234, "(prefix ext 108)", xx, xx, xx, xx, xx, no, x, 108},
{INVALID, 0x0f7235, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x0f7236, "(prefix ext 109)", xx, xx, xx, xx, xx, no, x, 109},
{INVALID, 0x0f7237, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* group 14 (first bytes 0f 73): all assumed to have Ib */
{ /* extensions[21] */
{INVALID, 0x0f7330, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7331, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x0f7332, "(prefix ext 110)", xx, xx, xx, xx, xx, no, x, 110},
{PREFIX_EXT, 0x0f7333, "(prefix ext 101)", xx, xx, xx, xx, xx, no, x, 101},
{INVALID, 0x0f7334, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7335, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x0f7336, "(prefix ext 111)", xx, xx, xx, xx, xx, no, x, 111},
{PREFIX_EXT, 0x0f7337, "(prefix ext 102)", xx, xx, xx, xx, xx, no, x, 102},
},
/* group 15 (first bytes 0f ae) */
{ /* extensions[22] */
/* Intel tables imply they may add opcodes in the mod=3 (non-mem) space in future */
{MOD_EXT, 0x0fae30, "(group 15 mod ext 14)", xx, xx, xx, xx, xx, mrm, x, 14},
{MOD_EXT, 0x0fae31, "(group 15 mod ext 15)", xx, xx, xx, xx, xx, mrm, x, 15},
{MOD_EXT, 0x0fae32, "(group 15 mod ext 16)", xx, xx, xx, xx, xx, mrm, x, 16},
{MOD_EXT, 0x0fae33, "(group 15 mod ext 17)", xx, xx, xx, xx, xx, mrm, x, 17},
{REX_W_EXT, 0x0fae34, "(rex.w ext 2)", xx, xx, xx, xx, xx, mrm, x, 2},
{MOD_EXT, 0x0fae35, "(group 15 mod ext 6)", xx, xx, xx, xx, xx, no, x, 6},
{MOD_EXT, 0x0fae36, "(group 15 mod ext 7)", xx, xx, xx, xx, xx, no, x, 7},
{MOD_EXT, 0x0fae37, "(group 15 mod ext 3)", xx, xx, xx, xx, xx, no, x, 3},
},
/* group 16 (first bytes 0f 18) */
{ /* extensions[23] */
/* Intel tables imply they may add opcodes in the mod=3 (non-mem) space in future */
{OP_prefetchnta, 0x0f1830, "prefetchnta", xx, xx, Mb, xx, xx, mrm, x, END_LIST},
{OP_prefetcht0, 0x0f1831, "prefetcht0", xx, xx, Mb, xx, xx, mrm, x, END_LIST},
{OP_prefetcht1, 0x0f1832, "prefetcht1", xx, xx, Mb, xx, xx, mrm, x, END_LIST},
{OP_prefetcht2, 0x0f1833, "prefetcht2", xx, xx, Mb, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1834, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1835, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1836, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1837, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
},
/* group AMD (first bytes 0f 0d) */
{ /* extensions[24] */
{OP_prefetch, 0x0f0d30, "prefetch", xx, xx, Mb, xx, xx, mrm, x, END_LIST},
{OP_prefetchw, 0x0f0d31, "prefetchw", xx, xx, Mb, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0f0d32, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0d33, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0d34, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0d35, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0d36, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0d37, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* group 1c* -- first opcode byte 82
* see PR 235092 for the discrepancies in what 0x82 should be: empirically
* and according to recent Intel manuals it matches 0x80, not 0x83 (as old
* Intel manuals implied) or invalid (as gnu tools claim).
* not linked into any encode chain.
*/
{ /* extensions[25]: all assumed to have Ib */
{OP_add, 0x820020, "add", Eb, xx, Ib, Eb, xx, mrm|i64, fW6, END_LIST},
{OP_or, 0x820021, "or", Eb, xx, Ib, Eb, xx, mrm|i64, fW6, END_LIST},
{OP_adc, 0x820022, "adc", Eb, xx, Ib, Eb, xx, mrm|i64, (fW6|fRC), END_LIST},
{OP_sbb, 0x820023, "sbb", Eb, xx, Ib, Eb, xx, mrm|i64, (fW6|fRC), END_LIST},
{OP_and, 0x820024, "and", Eb, xx, Ib, Eb, xx, mrm|i64, fW6, END_LIST},
{OP_sub, 0x820025, "sub", Eb, xx, Ib, Eb, xx, mrm|i64, fW6, END_LIST},
{OP_xor, 0x820026, "xor", Eb, xx, Ib, Eb, xx, mrm|i64, fW6, END_LIST},
{OP_cmp, 0x820027, "cmp", xx, xx, Eb, Ib, xx, mrm|i64, fW6, END_LIST},
},
/* group 1d (Intel now calling Group 1A) -- first opcode byte 8f */
{ /* extensions[26] */
{OP_pop, 0x8f0020, "pop", Esv, xsp, xsp, i_xSP, xx, mrm, x, tfb[0x17]},
/* we shouldn't ever get here for these, as this becomes an XOP prefix */
{INVALID, 0x8f0021, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x8f0022, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x8f0023, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x8f0024, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x8f0025, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x8f0026, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x8f0027, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* XOP group 1 */
{ /* extensions[27] */
{INVALID, 0x090138, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_blcfill, 0x090139, "blcfill", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{OP_blsfill, 0x09013a, "blsfill", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{OP_blcs, 0x09013b, "blcs", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{OP_tzmsk, 0x09013c, "tzmsk", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{OP_blcic, 0x09013d, "blcic", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{OP_blsic, 0x09013e, "blsic", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{OP_t1mskc, 0x09013f, "t1mskc", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
},
/* XOP group 2 */
{ /* extensions[28] */
{INVALID, 0x090238, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_blcmsk, 0x090239, "blcmsk",By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{INVALID, 0x09023a, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x09023b, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x09023c, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x09023d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_blci, 0x09023e, "blci", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{INVALID, 0x09023f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* XOP group 3 */
{ /* extensions[29] */
/* XXX i#1311: these instrs implicitly write to memory which we should
* find a way to encode into the IR.
*/
{OP_llwpcb, 0x091238, "llwpcb", xx, xx, Ry, xx, xx, mrm|vex, x, END_LIST},
{OP_slwpcb, 0x091239, "slwpcb", Ry, xx, xx, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0x09123a, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x09123b, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x09123c, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x09123d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x09123e, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x09123f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* XOP group 4: all assumed to have a 4-byte immediate by xop_a_extra[] */
{ /* extensions[30] */
/* XXX i#1311: these instrs implicitly write to memory which we should
* find a way to encode into the IR.
*/
{OP_lwpins, 0x0a1238, "lwpins", xx, xx, By, Ed, Id, mrm|vex, fWC, END_LIST},
{OP_lwpval, 0x0a1239, "lwpval", xx, xx, By, Ed, Id, mrm|vex, x, END_LIST},
{INVALID, 0x0a123a, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0a123b, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0a123c, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0a123d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0a123e, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0a123f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* group 17 */
{ /* extensions[31] */
{INVALID, 0x38f338, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_blsr, 0x38f339, "blsr", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{OP_blsmsk, 0x38f33a, "blsmsk", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{OP_blsi, 0x38f33b, "blsi", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{INVALID, 0x38f33c, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x38f33d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x38f33e, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x38f33f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
};
/****************************************************************************
* Two-byte instructions that differ depending on presence of
* prefixes, indexed in this order:
* none, 0xf3, 0x66, 0xf2
* A second set is used for vex-encoded instructions, indexed in the
* same order by prefix.
* A third set is used for evex-encoded instructions, indexed in the
* same order by prefix.
*
* N.B.: to avoid having a full entry here when there is only one
* valid opcode prefix, use |reqp in the original entry instead of
* pointing to this table.
*/
const instr_info_t prefix_extensions[][12] = {
/* prefix extension 0 */
{
{OP_movups, 0x0f1010, "movups", Vps, xx, Wps, xx, xx, mrm, x, tpe[1][0]},
{MOD_EXT, 0xf30f1010, "(mod ext 18)", xx, xx, xx, xx, xx, mrm, x, 18},
{OP_movupd, 0x660f1010, "movupd", Vpd, xx, Wpd, xx, xx, mrm, x, tpe[1][2]},
{MOD_EXT, 0xf20f1010, "(mod ext 19)", xx, xx, xx, xx, xx, mrm, x, 19},
{OP_vmovups, 0x0f1010, "vmovups", Vvs, xx, Wvs, xx, xx, mrm|vex, x, tpe[1][4]},
{MOD_EXT, 0xf30f1010, "(mod ext 8)", xx, xx, xx, xx, xx, mrm|vex, x, 8},
{OP_vmovupd, 0x660f1010, "vmovupd", Vvd, xx, Wvd, xx, xx, mrm|vex, x, tpe[1][6]},
{MOD_EXT, 0xf20f1010, "(mod ext 9)", xx, xx, xx, xx, xx, mrm|vex, x, 9},
{EVEX_W_EXT, 0x0f1010, "(evex_W ext 0)", xx, xx, xx, xx, xx, mrm|evex, x, 0},
{MOD_EXT, 0xf30f1010, "(mod ext 20)", xx, xx, xx, xx, xx, mrm|evex, x, 20},
{EVEX_W_EXT, 0x660f1010, "(evex_W ext 2)", xx, xx, xx, xx, xx, mrm|evex, x, 2},
{MOD_EXT, 0xf20f1010, "(mod ext 21)", xx, xx, xx, xx, xx, mrm|evex, x, 21},
}, /* prefix extension 1 */
{
{OP_movups, 0x0f1110, "movups", Wps, xx, Vps, xx, xx, mrm, x, END_LIST},
{OP_movss, 0xf30f1110, "movss", Wss, xx, Vss, xx, xx, mrm, x, END_LIST},
{OP_movupd, 0x660f1110, "movupd", Wpd, xx, Vpd, xx, xx, mrm, x, END_LIST},
{OP_movsd, 0xf20f1110, "movsd", Wsd, xx, Vsd, xx, xx, mrm, x, END_LIST},
{OP_vmovups, 0x0f1110, "vmovups", Wvs, xx, Vvs, xx, xx, mrm|vex, x, tevexw[0][0]},
{MOD_EXT, 0xf30f1110, "(mod ext 10)", xx, xx, xx, xx, xx, mrm|vex, x, 10},
{OP_vmovupd, 0x660f1110, "vmovupd", Wvd, xx, Vvd, xx, xx, mrm|vex, x, tevexw[2][1]},
{MOD_EXT, 0xf20f1110, "(mod ext 11)", xx, xx, xx, xx, xx, mrm|vex, x, 11},
{EVEX_W_EXT, 0x0f1110, "(evex_W ext 1)", xx, xx, xx, xx, xx, mrm|evex, x, 1},
{MOD_EXT, 0xf30f1110, "(mod ext 22)", xx, xx, xx, xx, xx, mrm|evex, x, 22},
{EVEX_W_EXT, 0x660f1110, "(evex_W ext 3)", xx, xx, xx, xx, xx, mrm|evex, x, 3},
{MOD_EXT, 0xf20f1110, "(mod ext 23)", xx, xx, xx, xx, xx, mrm|evex, x, 23},
}, /* prefix extension 2 */
{
/* i#319: note that the reg-reg form of the load version (0f12) is legal
* and has a separate pneumonic ("movhlps"), yet the reg-reg form of
* the store version (0f13) is illegal
*/
{OP_movlps, 0x0f1210, "movlps", Vq_dq, xx, Wq_dq, xx, xx, mrm, x, tpe[3][0]}, /*"movhlps" if reg-reg */
{OP_movsldup, 0xf30f1210, "movsldup", Vps, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_movlpd, 0x660f1210, "movlpd", Vq_dq, xx, Mq, xx, xx, mrm, x, tpe[3][2]},
{OP_movddup, 0xf20f1210, "movddup", Vpd, xx, Wq_dq, xx, xx, mrm, x, END_LIST},
{OP_vmovlps, 0x0f1210, "vmovlps", Vq_dq, xx, Hq_dq, Wq_dq, xx, mrm|vex|reqL0, x, tpe[3][4]}, /*"vmovhlps" if reg-reg */
{OP_vmovsldup,0xf30f1210, "vmovsldup", Vvs, xx, Wvs, xx, xx, mrm|vex, x, tevexw[18][0]},
{OP_vmovlpd, 0x660f1210, "vmovlpd", Vq_dq, xx, Hq_dq, Mq, xx, mrm|vex, x, tpe[3][6]},
{OP_vmovddup, 0xf20f1210, "vmovddup", Vvd, xx, Wh_x, xx, xx, mrm|vex, x, tevexw[19][1]},
{EVEX_W_EXT, 0x0f1210, "(evex_W ext 14)", xx, xx, xx, xx, xx, mrm|evex, x, 14},
{EVEX_W_EXT, 0xf30f1210, "(evex_W ext 18)", xx, xx, xx, xx, xx, mrm|evex, x, 18},
{EVEX_W_EXT, 0x660f1210, "(evex_W ext 16)", xx, xx, xx, xx, xx, mrm|evex, x, 16},
{EVEX_W_EXT, 0xf20f1210, "(evex_W ext 19)", xx, xx, xx, xx, xx, mrm|evex, x, 19},
}, /* prefix extension 3 */
{
{OP_movlps, 0x0f1310, "movlps", Mq, xx, Vq_dq, xx, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_movlpd, 0x660f1310, "movlpd", Mq, xx, Vq_dq, xx, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovlps, 0x0f1310, "vmovlps", Mq, xx, Vq_dq, xx, xx, mrm|vex, x, tevexw[14][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovlpd, 0x660f1310, "vmovlpd", Mq, xx, Vq_dq, xx, xx, mrm|vex, x, tevexw[16][1]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x0f1310, "(evex_W ext 15)", xx, xx, xx, xx, xx, mrm|evex, x, 15},
{INVALID, 0xf30f1310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660f1310, "(evex_W ext 17)", xx, xx, xx, xx, xx, mrm|evex, x, 17},
{INVALID, 0xf20f1310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 4 */
{
{OP_unpcklps, 0x0f1410, "unpcklps", Vps, xx, Wq_dq, Vps, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_unpcklpd, 0x660f1410, "unpcklpd", Vpd, xx, Wq_dq, Vpd, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vunpcklps, 0x0f1410, "vunpcklps", Vvs, xx, Hh_x, Wh_x, xx, mrm|vex, x, tevexw[25][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vunpcklpd, 0x660f1410, "vunpcklpd", Vvd, xx, Hh_x, Wh_x, xx, mrm|vex, x, tevexw[26][1]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x0f1410, "(evex_W ext 25)", xx, xx, xx, xx, xx, mrm|evex, x, 25},
{INVALID, 0xf30f1410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660f1410, "(evex_W ext 26)", xx, xx, xx, xx, xx, mrm|evex, x, 26},
{INVALID, 0xf20f1410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 5 */
{
{OP_unpckhps, 0x0f1510, "unpckhps", Vps, xx, Wq_dq, Vps, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_unpckhpd, 0x660f1510, "unpckhpd", Vpd, xx, Wq_dq, Vpd, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vunpckhps, 0x0f1510, "vunpckhps", Vvs, xx, Hh_x, Wh_x, xx, mrm|vex, x, tevexw[27][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vunpckhpd, 0x660f1510, "vunpckhpd", Vvd, xx, Hh_x, Wh_x, xx, mrm|vex, x, tevexw[28][1]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x0f1510, "(evex_W ext 27)", xx, xx, xx, xx, xx, mrm|evex, x, 27},
{INVALID, 0xf30f1510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660f1510, "(evex_W ext 28)", xx, xx, xx, xx, xx, mrm|evex, x, 28},
{INVALID, 0xf20f1510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 6 */
{
/* i#319: note that the reg-reg form of the load version (0f16) is legal
* and has a separate pneumonic ("movhlps"), yet the reg-reg form of
* the store version (0f17) is illegal
*/
{OP_movhps, 0x0f1610, "movhps", Vq_dq, xx, Wq_dq, xx, xx, mrm, x, tpe[7][0]}, /*"movlhps" if reg-reg */
{OP_movshdup, 0xf30f1610, "movshdup", Vps, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_movhpd, 0x660f1610, "movhpd", Vq_dq, xx, Mq, xx, xx, mrm, x, tpe[7][2]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovhps, 0x0f1610, "vmovhps", Vq_dq, xx, Hq_dq, Wq_dq, xx, mrm|vex|reqL0, x, tpe[7][4]}, /*"vmovlhps" if reg-reg */
{OP_vmovshdup, 0xf30f1610, "vmovshdup", Vvs, xx, Wvs, xx, xx, mrm|vex, x, tevexw[24][0]},
{OP_vmovhpd, 0x660f1610, "vmovhpd", Vq_dq, xx, Hq_dq, Mq, xx, mrm|vex|reqL0, x, tpe[7][6]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x0f1610, "(evex_W ext 20)", xx, xx, xx, xx, xx, mrm|evex, x, 20},
{EVEX_W_EXT, 0xf30f1610, "(evex_W ext 24)", xx, xx, xx, xx, xx, mrm|evex, x, 24},
{EVEX_W_EXT, 0x660f1610, "(evex_W ext 22)", xx, xx, xx, xx, xx, mrm|evex, x, 22},
{INVALID, 0xf20f1610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 7 */
{
{OP_movhps, 0x0f1710, "movhps", Mq, xx, Vq_dq, xx, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_movhpd, 0x660f1710, "movhpd", Mq, xx, Vq_dq, xx, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovhps, 0x0f1710, "vmovhps", Mq, xx, Vq_dq, xx, xx, mrm|vex|reqL0, x, tevexw[20][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovhpd, 0x660f1710, "vmovhpd", Mq, xx, Vq_dq, xx, xx, mrm|vex|reqL0, x, tevexw[22][1]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x0f1710, "(evex_W ext 21)", xx, xx, xx, xx, xx, mrm|evex, x, 21},
{INVALID, 0xf30f1710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660f1710, "(evex_W ext 23)", xx, xx, xx, xx, xx, mrm|evex, x, 23},
{INVALID, 0xf20f1710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 8 */
{
{OP_movaps, 0x0f2810, "movaps", Vps, xx, Wps, xx, xx, mrm, x, tpe[9][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_movapd, 0x660f2810, "movapd", Vpd, xx, Wpd, xx, xx, mrm, x, tpe[9][2]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovaps, 0x0f2810, "vmovaps", Vvs, xx, Wvs, xx, xx, mrm|vex, x, tpe[9][4]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovapd, 0x660f2810, "vmovapd", Vvd, xx, Wvd, xx, xx, mrm|vex, x, tpe[9][6]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x0f2810, "(evex_W ext 4)", xx, xx, xx, xx, xx, mrm|evex, x, 4},
{INVALID, 0xf30f2810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660f2810, "(evex_W ext 6)", xx, xx, xx, xx, xx, mrm|evex, x, 6},
{INVALID, 0xf20f2810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 9 */
{
{OP_movaps, 0x0f2910, "movaps", Wps, xx, Vps, xx, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_movapd, 0x660f2910, "movapd", Wpd, xx, Vpd, xx, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovaps, 0x0f2910, "vmovaps", Wvs, xx, Vvs, xx, xx, mrm|vex, x, tevexw[4][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovapd, 0x660f2910, "vmovapd", Wvd, xx, Vvd, xx, xx, mrm|vex, x, tevexw[6][1]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x0f2910, "(evex_W ext 5)", xx, xx, xx, xx, xx, mrm|evex, x, 5},
{INVALID, 0xf30f2910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660f2910, "(evex_W ext 7)", xx, xx, xx, xx, xx, mrm|evex, x, 7},
{INVALID, 0xf20f2910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 10 */
{
{OP_cvtpi2ps, 0x0f2a10, "cvtpi2ps", Vq_dq, xx, Qq, xx, xx, mrm, x, END_LIST},
{OP_cvtsi2ss, 0xf30f2a10, "cvtsi2ss", Vss, xx, Ed_q, xx, xx, mrm, x, END_LIST},
{OP_cvtpi2pd, 0x660f2a10, "cvtpi2pd", Vpd, xx, Qq, xx, xx, mrm, x, END_LIST},
{OP_cvtsi2sd, 0xf20f2a10, "cvtsi2sd", Vsd, xx, Ed_q, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0f2a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvtsi2ss, 0xf30f2a10, "vcvtsi2ss", Vdq, xx, H12_dq, Ed_q, xx, mrm|vex, x, tevexw[31][0]},
{INVALID, 0x660f2a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvtsi2sd, 0xf20f2a10, "vcvtsi2sd", Vdq, xx, Hsd, Ed_q, xx, mrm|vex, x, tevexw[32][0]},
{INVALID, 0x0f2a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf30f2a10, "(evex_W ext 31)", xx, xx, xx, xx, xx, mrm|evex, x, 31},
{INVALID, 0x660f2a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf20f2a10, "(evex_W ext 32)", xx, xx, xx, xx, xx, mrm|evex, x, 32},
}, /* prefix extension 11 */
{
{OP_movntps, 0x0f2b10, "movntps", Mps, xx, Vps, xx, xx, mrm, x, END_LIST},
{OP_movntss, 0xf30f2b10, "movntss", Mss, xx, Vss, xx, xx, mrm, x, END_LIST},
{OP_movntpd, 0x660f2b10, "movntpd", Mpd, xx, Vpd, xx, xx, mrm, x, END_LIST},
{OP_movntsd, 0xf20f2b10, "movntsd", Msd, xx, Vsd, xx, xx, mrm, x, END_LIST},
{OP_vmovntps, 0x0f2b10, "vmovntps", Mvs, xx, Vvs, xx, xx, mrm|vex, x, tevexw[33][0]},
/* XXX: AMD doesn't list movntss in their new manual => assuming no vex version */
{INVALID, 0xf30f2b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovntpd, 0x660f2b10, "vmovntpd", Mvd, xx, Vvd, xx, xx, mrm|vex, x, tevexw[34][1]},
{INVALID, 0xf20f2b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x0f2b10, "(evex_W ext 33)", xx, xx, xx, xx, xx, mrm|evex, x, 33},
{INVALID, 0xf30f2b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660f2b10, "(evex_W ext 34)", xx, xx, xx, xx, xx, mrm|evex, x, 34},
{INVALID, 0xf20f2b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 12 */
{
{OP_cvttps2pi, 0x0f2c10, "cvttps2pi", Pq, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_cvttss2si, 0xf30f2c10, "cvttss2si", Gd_q, xx, Wss, xx, xx, mrm, x, END_LIST},
{OP_cvttpd2pi, 0x660f2c10, "cvttpd2pi", Pq, xx, Wpd, xx, xx, mrm, x, END_LIST},
{OP_cvttsd2si, 0xf20f2c10, "cvttsd2si", Gd_q, xx, Wsd, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0f2c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvttss2si, 0xf30f2c10, "vcvttss2si", Gd_q, xx, Wss, xx, xx, mrm|vex, x, tevexw[35][0]},
{INVALID, 0x660f2c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvttsd2si, 0xf20f2c10, "vcvttsd2si", Gd_q, xx, Wsd, xx, xx, mrm|vex, x, tevexw[36][0]},
{INVALID, 0x0f2c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf30f2c10, "(evex_W ext 35)", xx, xx, xx, xx, xx, mrm|evex, x, 35},
{INVALID, 0x660f2c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf20f2c10, "(evex_W ext 36)", xx, xx, xx, xx, xx, mrm|evex, x, 36},
}, /* prefix extension 13 */
{
{OP_cvtps2pi, 0x0f2d10, "cvtps2pi", Pq, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_cvtss2si, 0xf30f2d10, "cvtss2si", Gd_q, xx, Wss, xx, xx, mrm, x, END_LIST},
{OP_cvtpd2pi, 0x660f2d10, "cvtpd2pi", Pq, xx, Wpd, xx, xx, mrm, x, END_LIST},
{OP_cvtsd2si, 0xf20f2d10, "cvtsd2si", Gd_q, xx, Wsd, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0f2d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvtss2si, 0xf30f2d10, "vcvtss2si", Gd_q, xx, Wss, xx, xx, mrm|vex, x, tevexw[29][0]},
{INVALID, 0x660f2d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvtsd2si, 0xf20f2d10, "vcvtsd2si", Gd_q, xx, Wsd, xx, xx, mrm|vex, x, tevexw[30][0]},
{INVALID, 0x0f2d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf30f2d10, "(evex_W ext 29)", xx, xx, xx, xx, xx, mrm|evex, x, 29},
{INVALID, 0x660f2d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf20f2d10, "(evex_W ext 30)", xx, xx, xx, xx, xx, mrm|evex, x, 30},
}, /* prefix extension 14 */
{
{OP_ucomiss, 0x0f2e10, "ucomiss", xx, xx, Vss, Wss, xx, mrm, fW6, END_LIST},
{INVALID, 0xf30f2e10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_ucomisd, 0x660f2e10, "ucomisd", xx, xx, Vsd, Wsd, xx, mrm, fW6, END_LIST},
{INVALID, 0xf20f2e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vucomiss, 0x0f2e10, "vucomiss", xx, xx, Vss, Wss, xx, mrm|vex, fW6, tevexw[37][0]},
{INVALID, 0xf30f2e10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vucomisd, 0x660f2e10, "vucomisd", xx, xx, Vsd, Wsd, xx, mrm|vex, fW6, tevexw[38][1]},
{INVALID, 0xf20f2e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x0f2e10, "(evex_W ext 37)", xx, xx, xx, xx, xx, mrm|evex, x, 37},
{INVALID, 0xf30f2e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660f2e10, "(evex_W ext 38)", xx, xx, xx, xx, xx, mrm|evex, x, 38},
{INVALID, 0xf20f2e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 15 */
{
{OP_comiss, 0x0f2f10, "comiss", xx, xx, Vss, Wss, xx, mrm, fW6, END_LIST},
{INVALID, 0xf30f2f10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_comisd, 0x660f2f10, "comisd", xx, xx, Vsd, Wsd, xx, mrm, fW6, END_LIST},
{INVALID, 0xf20f2f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcomiss, 0x0f2f10, "vcomiss", xx, xx, Vss, Wss, xx, mrm|vex, fW6, tevexw[39][0]},
{INVALID, 0xf30f2f10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vcomisd, 0x660f2f10, "vcomisd", xx, xx, Vsd, Wsd, xx, mrm|vex, fW6, tevexw[40][1]},
{INVALID, 0xf20f2f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x0f2e10, "(evex_W ext 39)", xx, xx, xx, xx, xx, mrm|evex, x, 39},
{INVALID, 0xf30f2f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660f2e10, "(evex_W ext 40)", xx, xx, xx, xx, xx, mrm|evex, x, 40},
{INVALID, 0xf20f2f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 16 */
{
{OP_movmskps, 0x0f5010, "movmskps", Gr, xx, Ups, xx, xx, mrm, x, END_LIST},
{INVALID, 0xf30f5010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_movmskpd, 0x660f5010, "movmskpd", Gr, xx, Upd, xx, xx, mrm, x, END_LIST},
{INVALID, 0xf20f5010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovmskps, 0x0f5010, "vmovmskps", Gr, xx, Uvs, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf30f5010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vmovmskpd, 0x660f5010, "vmovmskpd", Gr, xx, Uvd, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f5010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f5010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 17 */
{
{OP_sqrtps, 0x0f5110, "sqrtps", Vps, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_sqrtss, 0xf30f5110, "sqrtss", Vss, xx, Wss, xx, xx, mrm, x, END_LIST},
{OP_sqrtpd, 0x660f5110, "sqrtpd", Vpd, xx, Wpd, xx, xx, mrm, x, END_LIST},
{OP_sqrtsd, 0xf20f5110, "sqrtsd", Vsd, xx, Wsd, xx, xx, mrm, x, END_LIST},
{OP_vsqrtps, 0x0f5110, "vsqrtps", Vvs, xx, Wvs, xx, xx, mrm|vex, x, END_LIST},
{OP_vsqrtss, 0xf30f5110, "vsqrtss", Vdq, xx, H12_dq, Wss, xx, mrm|vex, x, END_LIST},
{OP_vsqrtpd, 0x660f5110, "vsqrtpd", Vvd, xx, Wvd, xx, xx, mrm|vex, x, END_LIST},
{OP_vsqrtsd, 0xf20f5110, "vsqrtsd", Vdq, xx, Hsd, Wsd, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 18 */
{
{OP_rsqrtps, 0x0f5210, "rsqrtps", Vps, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_rsqrtss, 0xf30f5210, "rsqrtss", Vss, xx, Wss, xx, xx, mrm, x, END_LIST},
{INVALID, 0x660f5210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vrsqrtps, 0x0f5210, "vrsqrtps", Vvs, xx, Wvs, xx, xx, mrm|vex, x, END_LIST},
{OP_vrsqrtss, 0xf30f5210, "vrsqrtss", Vdq, xx, H12_dq, Wss, xx, mrm|vex, x, END_LIST},
{INVALID, 0x660f5210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 19 */
{
{OP_rcpps, 0x0f5310, "rcpps", Vps, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_rcpss, 0xf30f5310, "rcpss", Vss, xx, Wss, xx, xx, mrm, x, END_LIST},
{INVALID, 0x660f5310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vrcpps, 0x0f5310, "vrcpps", Vvs, xx, Wvs, xx, xx, mrm|vex, x, END_LIST},
{OP_vrcpss, 0xf30f5310, "vrcpss", Vdq, xx, H12_dq, Wss, xx, mrm|vex, x, END_LIST},
{INVALID, 0x660f5310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f5310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 20 */
{
{OP_andps, 0x0f5410, "andps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{INVALID, 0xf30f5410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_andpd, 0x660f5410, "andpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{INVALID, 0xf20f5410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vandps, 0x0f5410, "vandps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, tpe[20][8]},
{INVALID, 0xf30f5410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vandpd, 0x660f5410, "vandpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, tpe[20][10]},
{INVALID, 0xf20f5410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vandps, 0x0f5410, "vandps", Ves, xx, KEw, Hes, Wes, mrm|evex, x, END_LIST},
{INVALID, 0xf30f5410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vandpd, 0x660f5450, "vandpd", Ved, xx, KEb, Hed, Wed, mrm|evex, x, END_LIST},
{INVALID, 0xf20f5410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 21 */
{
{OP_andnps, 0x0f5510, "andnps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{INVALID, 0xf30f5510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_andnpd, 0x660f5510, "andnpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{INVALID, 0xf20f5510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vandnps, 0x0f5510, "vandnps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, tpe[21][8]},
{INVALID, 0xf30f5510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vandnpd, 0x660f5510, "vandnpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, tpe[21][10]},
{INVALID, 0xf20f5510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vandnps, 0x0f5510, "vandnps", Ves, xx, KEw, Hes, Wes, mrm|evex, x, END_LIST},
{INVALID, 0xf30f5510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vandnpd, 0x660f5550, "vandnpd", Ved, xx, KEb, Hed, Wed, mrm|evex, x, END_LIST},
{INVALID, 0xf20f5510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 22 */
{
{OP_orps, 0x0f5610, "orps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{INVALID, 0xf30f5610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_orpd, 0x660f5610, "orpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{INVALID, 0xf20f5610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vorps, 0x0f5610, "vorps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, tpe[22][8]},
{INVALID, 0xf30f5610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vorpd, 0x660f5610, "vorpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, tpe[22][10]},
{INVALID, 0xf20f5610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vorps, 0x0f5610, "vorps", Ves, xx, KEw, Hes, Wes, mrm|evex, x, END_LIST},
{INVALID, 0xf30f5610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vorpd, 0x660f5650, "vorpd", Ved, xx, KEb, Hed, Wed, mrm|evex, x, END_LIST},
{INVALID, 0xf20f5610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 23 */
{
{OP_xorps, 0x0f5710, "xorps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{INVALID, 0xf30f5710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_xorpd, 0x660f5710, "xorpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{INVALID, 0xf20f5710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vxorps, 0x0f5710, "vxorps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, tpe[23][8]},
{INVALID, 0xf30f5710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vxorpd, 0x660f5710, "vxorpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, tpe[23][10]},
{INVALID, 0xf20f5710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vxorps, 0x0f5710, "vxorps", Ves, xx, KEw, Hes, Wes, mrm|evex, x, END_LIST},
{INVALID, 0xf30f5710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vxorpd, 0x660f5750, "vxorpd", Ved, xx, KEb, Hed, Wed, mrm|evex, x, END_LIST},
{INVALID, 0xf20f5710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 24 */
{
{OP_addps, 0x0f5810, "addps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{OP_addss, 0xf30f5810, "addss", Vss, xx, Wss, Vss, xx, mrm, x, END_LIST},
{OP_addpd, 0x660f5810, "addpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_addsd, 0xf20f5810, "addsd", Vsd, xx, Wsd, Vsd, xx, mrm, x, END_LIST},
{OP_vaddps, 0x0f5810, "vaddps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, tpe[24][8]},
{OP_vaddss, 0xf30f5810, "vaddss", Vdq, xx, Hdq, Wss, xx, mrm|vex, x, tpe[24][9]},
{OP_vaddpd, 0x660f5810, "vaddpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, tpe[24][10]},
{OP_vaddsd, 0xf20f5810, "vaddsd", Vdq, xx, Hdq, Wsd, xx, mrm|vex, x, tpe[24][11]},
{OP_vaddps, 0x0f5810, "vaddps", Ves, xx, KEw, Hes, Wes, mrm|evex, x, END_LIST},
{OP_vaddss, 0xf30f5810, "vaddss", Vdq, xx, KE1b, Hdq, Wss, mrm|evex, x, END_LIST},
{OP_vaddpd, 0x660f5850, "vaddpd", Ved, xx, KEb, Hed, Wed, mrm|evex, x, END_LIST},
{OP_vaddsd, 0xf20f5850, "vaddsd", Vdq, xx, KE1b, Hdq, Wsd, mrm|evex, x, END_LIST},
}, /* prefix extension 25 */
{
{OP_mulps, 0x0f5910, "mulps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{OP_mulss, 0xf30f5910, "mulss", Vss, xx, Wss, Vss, xx, mrm, x, END_LIST},
{OP_mulpd, 0x660f5910, "mulpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_mulsd, 0xf20f5910, "mulsd", Vsd, xx, Wsd, Vsd, xx, mrm, x, END_LIST},
{OP_vmulps, 0x0f5910, "vmulps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, tpe[25][8]},
{OP_vmulss, 0xf30f5910, "vmulss", Vdq, xx, Hdq, Wss, xx, mrm|vex, x, tpe[25][9]},
{OP_vmulpd, 0x660f5910, "vmulpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, tpe[25][10]},
{OP_vmulsd, 0xf20f5910, "vmulsd", Vdq, xx, Hdq, Wsd, xx, mrm|vex, x, tpe[25][11]},
{OP_vmulps, 0x0f5910, "vmulps", Ves, xx, KEw, Hes, Wes, mrm|evex, x, END_LIST},
{OP_vmulss, 0xf30f5910, "vmulss", Vdq, xx, KE1b, Hdq, Wss, mrm|evex, x, END_LIST},
{OP_vmulpd, 0x660f5950, "vmulpd", Ved, xx, KEb, Hed, Wed, mrm|evex, x, END_LIST},
{OP_vmulsd, 0xf20f5950, "vmulsd", Vdq, xx, KE1b, Hdq, Wsd, mrm|evex, x, END_LIST},
}, /* prefix extension 26 */
{
{OP_cvtps2pd, 0x0f5a10, "cvtps2pd", Vpd, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_cvtss2sd, 0xf30f5a10, "cvtss2sd", Vsd, xx, Wss, xx, xx, mrm, x, END_LIST},
{OP_cvtpd2ps, 0x660f5a10, "cvtpd2ps", Vps, xx, Wpd, xx, xx, mrm, x, END_LIST},
{OP_cvtsd2ss, 0xf20f5a10, "cvtsd2ss", Vss, xx, Wsd, xx, xx, mrm, x, END_LIST},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vcvtps2pd, 0x0f5a10, "vcvtps2pd", Vvd, xx, Wvs, xx, xx, mrm|vex, x, tpe[26][8]},
{OP_vcvtss2sd, 0xf30f5a10, "vcvtss2sd", Vdq, xx, Hsd, Wss, xx, mrm|vex, x, tpe[26][9]},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vcvtpd2ps, 0x660f5a10, "vcvtpd2ps", Vvs, xx, Wvd, xx, xx, mrm|vex, x, tpe[26][10]},
{OP_vcvtsd2ss, 0xf20f5a10, "vcvtsd2ss", Vdq, xx, H12_dq, Wsd, xx, mrm|vex, x, tpe[26][11]},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vcvtps2pd, 0x0f5a10, "vcvtps2pd", Ved, xx, KEb, Wes, xx, mrm|evex, x, END_LIST},
{OP_vcvtss2sd, 0xf30f5a10, "vcvtss2sd", Vdq, xx, KE1b, Hsd, Wss, mrm|evex, x, END_LIST},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vcvtpd2ps, 0x660f5a50, "vcvtpd2ps", Ves, xx, KEw, Wed, xx, mrm|evex, x, END_LIST},
{OP_vcvtsd2ss, 0xf20f5a50, "vcvtsd2ss", Vdq, xx, KE1b, H12_dq, Wsd, mrm|evex, x, END_LIST},
}, /* prefix extension 27 */
{
{OP_cvtdq2ps, 0x0f5b10, "cvtdq2ps", Vps, xx, Wdq, xx, xx, mrm, x, END_LIST},
{OP_cvttps2dq, 0xf30f5b10, "cvttps2dq", Vdq, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_cvtps2dq, 0x660f5b10, "cvtps2dq", Vdq, xx, Wps, xx, xx, mrm, x, END_LIST},
{INVALID, 0xf20f5b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvtdq2ps, 0x0f5b10, "vcvtdq2ps", Vvs, xx, Wx, xx, xx, mrm|vex, x, tevexw[56][0]},
{OP_vcvttps2dq, 0xf30f5b10, "vcvttps2dq", Vx, xx, Wvs, xx, xx, mrm|vex, x, tpe[27][9]},
{OP_vcvtps2dq, 0x660f5b10, "vcvtps2dq", Vx, xx, Wvs, xx, xx, mrm|vex, x, tpe[27][10]},
{INVALID, 0xf20f5b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x0f5b10, "(evex_W ext 56)", xx, xx, xx, xx, xx, mrm|evex, x, 56},
{OP_vcvttps2dq, 0xf30f5b10, "vcvttps2dq", Ve, xx, KEw, Wes, xx, mrm|evex, x, END_LIST},
{OP_vcvtps2dq, 0x660f5b10, "vcvtps2dq", Ve, xx, KEw, Wes, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf20f5b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 28 */
{
{OP_subps, 0x0f5c10, "subps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{OP_subss, 0xf30f5c10, "subss", Vss, xx, Wss, Vss, xx, mrm, x, END_LIST},
{OP_subpd, 0x660f5c10, "subpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_subsd, 0xf20f5c10, "subsd", Vsd, xx, Wsd, Vsd, xx, mrm, x, END_LIST},
{OP_vsubps, 0x0f5c10, "vsubps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, tpe[28][8]},
{OP_vsubss, 0xf30f5c10, "vsubss", Vdq, xx, Hdq, Wss, xx, mrm|vex, x, tpe[28][9]},
{OP_vsubpd, 0x660f5c10, "vsubpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, tpe[28][10]},
{OP_vsubsd, 0xf20f5c10, "vsubsd", Vdq, xx, Hdq, Wsd, xx, mrm|vex, x, tpe[28][11]},
{OP_vsubps, 0x0f5c10, "vsubps", Ves, xx, KEw, Hes, Wes, mrm|evex, x, END_LIST},
{OP_vsubss, 0xf30f5c10, "vsubss", Vdq, xx, KE1b, Hdq, Wss, mrm|evex, x, END_LIST},
{OP_vsubpd, 0x660f5c50, "vsubpd", Ved, xx, KEb, Hed, Wed, mrm|evex, x, END_LIST},
{OP_vsubsd, 0xf20f5c50, "vsubsd", Vdq, xx, KE1b, Hdq, Wsd, mrm|evex, x, END_LIST},
}, /* prefix extension 29 */
{
{OP_minps, 0x0f5d10, "minps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{OP_minss, 0xf30f5d10, "minss", Vss, xx, Wss, Vss, xx, mrm, x, END_LIST},
{OP_minpd, 0x660f5d10, "minpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_minsd, 0xf20f5d10, "minsd", Vsd, xx, Wsd, Vsd, xx, mrm, x, END_LIST},
{OP_vminps, 0x0f5d10, "vminps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, tpe[29][8]},
{OP_vminss, 0xf30f5d10, "vminss", Vdq, xx, Hdq, Wss, xx, mrm|vex, x, tpe[29][9]},
{OP_vminpd, 0x660f5d10, "vminpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, tpe[29][10]},
{OP_vminsd, 0xf20f5d10, "vminsd", Vdq, xx, Hdq, Wsd, xx, mrm|vex, x, tpe[29][11]},
{OP_vminps, 0x0f5d10, "vminps", Ves, xx, KEw, Hes, Wes, mrm|evex, x, END_LIST},
{OP_vminss, 0xf30f5d10, "vminss", Vdq, xx, KE1b, Hdq, Wss, mrm|evex, x, END_LIST},
{OP_vminpd, 0x660f5d50, "vminpd", Ved, xx, KEb, Hed, Wed, mrm|evex, x, END_LIST},
{OP_vminsd, 0xf20f5d50, "vminsd", Vdq, xx, KE1b, Hdq, Wsd, mrm|evex, x, END_LIST},
}, /* prefix extension 30 */
{
{OP_divps, 0x0f5e10, "divps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{OP_divss, 0xf30f5e10, "divss", Vss, xx, Wss, Vss, xx, mrm, x, END_LIST},
{OP_divpd, 0x660f5e10, "divpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_divsd, 0xf20f5e10, "divsd", Vsd, xx, Wsd, Vsd, xx, mrm, x, END_LIST},
{OP_vdivps, 0x0f5e10, "vdivps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, tpe[30][8]},
{OP_vdivss, 0xf30f5e10, "vdivss", Vdq, xx, Hdq, Wss, xx, mrm|vex, x, tpe[30][9]},
{OP_vdivpd, 0x660f5e10, "vdivpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, tpe[30][10]},
{OP_vdivsd, 0xf20f5e10, "vdivsd", Vdq, xx, Hdq, Wsd, xx, mrm|vex, x, tpe[30][11]},
{OP_vdivps, 0x0f5e10, "vdivps", Ves, xx, KEw, Hes, Wes, mrm|evex, x, END_LIST},
{OP_vdivss, 0xf30f5e10, "vdivss", Vdq, xx, KE1b, Hdq, Wss, mrm|evex, x, END_LIST},
{OP_vdivpd, 0x660f5e50, "vdivpd", Ved, xx, KEb, Hed, Wed, mrm|evex, x, END_LIST},
{OP_vdivsd, 0xf20f5e50, "vdivsd", Vdq, xx, KE1b, Hdq, Wsd, mrm|evex, x, END_LIST},
}, /* prefix extension 31 */
{
{OP_maxps, 0x0f5f10, "maxps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{OP_maxss, 0xf30f5f10, "maxss", Vss, xx, Wss, Vss, xx, mrm, x, END_LIST},
{OP_maxpd, 0x660f5f10, "maxpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_maxsd, 0xf20f5f10, "maxsd", Vsd, xx, Wsd, Vsd, xx, mrm, x, END_LIST},
{OP_vmaxps, 0x0f5f10, "vmaxps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, tpe[31][8]},
{OP_vmaxss, 0xf30f5f10, "vmaxss", Vdq, xx, Hdq, Wss, xx, mrm|vex, x, tpe[31][9]},
{OP_vmaxpd, 0x660f5f10, "vmaxpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, tpe[31][10]},
{OP_vmaxsd, 0xf20f5f10, "vmaxsd", Vdq, xx, Hdq, Wsd, xx, mrm|vex, x, tpe[31][11]},
{OP_vmaxps, 0x0f5f10, "vmaxps", Ves, xx, KEw, Hes, Wes, mrm|evex, x, END_LIST},
{OP_vmaxss, 0xf30f5f10, "vmaxss", Vdq, xx, KE1b, Hdq, Wss, mrm|evex, x, END_LIST},
{OP_vmaxpd, 0x660f5f50, "vmaxpd", Ved, xx, KEb, Hed, Wed, mrm|evex, x, END_LIST},
{OP_vmaxsd, 0xf20f5f50, "vmaxsd", Vdq, xx, KE1b, Hdq, Wsd, mrm|evex, x, END_LIST},
}, /* prefix extension 32 */
{
{OP_punpcklbw, 0x0f6010, "punpcklbw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[32][2]},
{INVALID, 0xf30f6010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_punpcklbw, 0x660f6010, "punpcklbw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpunpcklbw, 0x660f6010, "vpunpcklbw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[32][10]},
{INVALID, 0xf20f6010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpunpcklbw, 0x660f6010, "vpunpcklbw", Ve, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f6010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 33 */
{
{OP_punpcklwd, 0x0f6110, "punpcklwd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[33][2]},
{INVALID, 0xf30f6110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_punpcklwd, 0x660f6110, "punpcklwd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpunpcklwd, 0x660f6110, "vpunpcklwd", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[33][10]},
{INVALID, 0xf20f6110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpunpcklwd, 0x660f6110, "vpunpcklwd", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f6110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 34 */
{
{OP_punpckldq, 0x0f6210, "punpckldq", Pq, xx, Qq, Pq, xx, mrm, x, tpe[34][2]},
{INVALID, 0xf30f6210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_punpckldq, 0x660f6210, "punpckldq", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpunpckldq, 0x660f6210, "vpunpckldq", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[34][10]},
{INVALID, 0xf20f6210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpunpckldq, 0x660f6210, "vpunpckldq", Ve, xx, KEw, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f6210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 35 */
{
{OP_packsswb, 0x0f6310, "packsswb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[35][2]},
{INVALID, 0xf30f6310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_packsswb, 0x660f6310, "packsswb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpacksswb, 0x660f6310, "vpacksswb", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[35][10]},
{INVALID, 0xf20f6310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpacksswb, 0x660f6310, "vpacksswb", Ve, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f6310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 36 */
{
{OP_pcmpgtb, 0x0f6410, "pcmpgtb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[36][2]},
{INVALID, 0xf30f6410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pcmpgtb, 0x660f6410, "pcmpgtb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpcmpgtb, 0x660f6410, "vpcmpgtb", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[36][10]},
{INVALID, 0xf20f6410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpcmpgtb, 0x660f6410, "vpcmpgtb", KPq, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f6410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 37 */
{
{OP_pcmpgtw, 0x0f6510, "pcmpgtw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[37][2]},
{INVALID, 0xf30f6510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pcmpgtw, 0x660f6510, "pcmpgtw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpcmpgtw, 0x660f6510, "vpcmpgtw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[37][10]},
{INVALID, 0xf20f6510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpcmpgtw, 0x660f6510, "vpcmpgtw", KPd, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f6510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 38 */
{
{OP_pcmpgtd, 0x0f6610, "pcmpgtd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[38][2]},
{INVALID, 0xf30f6610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pcmpgtd, 0x660f6610, "pcmpgtd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpcmpgtd, 0x660f6610, "vpcmpgtd", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[38][10]},
{INVALID, 0xf20f6610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpcmpgtd, 0x660f6610, "vpcmpgtd", KPb, xx, KEb, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f6610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 39 */
{
{OP_packuswb, 0x0f6710, "packuswb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[39][2]},
{INVALID, 0xf30f6710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_packuswb, 0x660f6710, "packuswb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpackuswb, 0x660f6710, "vpackuswb", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[39][10]},
{INVALID, 0xf20f6710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpackuswb, 0x660f6710, "vpackuswb", Ve, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f6710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 40 */
{
{OP_punpckhbw, 0x0f6810, "punpckhbw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[40][2]},
{INVALID, 0xf30f6810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_punpckhbw, 0x660f6810, "punpckhbw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpunpckhbw, 0x660f6810, "vpunpckhbw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[40][10]},
{INVALID, 0xf20f6810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpunpckhbw, 0x660f6810, "vpunpckhbw", Ve, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f6810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 41 */
{
{OP_punpckhwd, 0x0f6910, "punpckhwd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[41][2]},
{INVALID, 0xf30f6910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_punpckhwd, 0x660f6910, "punpckhwd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpunpckhwd, 0x660f6910, "vpunpckhwd", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[41][10]},
{INVALID, 0xf20f6910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpunpckhwd, 0x660f6910, "vpunpckhwd", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f6910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 42 */
{
{OP_punpckhdq, 0x0f6a10, "punpckhdq", Pq, xx, Qq, Pq, xx, mrm, x, tpe[42][2]},
{INVALID, 0xf30f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_punpckhdq, 0x660f6a10, "punpckhdq", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpunpckhdq, 0x660f6a10, "vpunpckhdq", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[42][10]},
{INVALID, 0xf20f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpunpckhdq, 0x660f6a10, "vpunpckhdq", Ve, xx, KEw, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 43 */
{
{OP_packssdw, 0x0f6b10, "packssdw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[43][2]},
{INVALID, 0xf30f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_packssdw, 0x660f6b10, "packssdw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpackssdw, 0x660f6b10, "vpackssdw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[43][10]},
{INVALID, 0xf20f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpackssdw, 0x660f6b10, "vpackssdw", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 44 */
{
{INVALID, 0x0f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_punpcklqdq, 0x660f6c10, "punpcklqdq", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpunpcklqdq, 0x660f6c10, "vpunpcklqdq", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[44][10]},
{INVALID, 0xf20f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpunpcklqdq, 0x660f6c50, "vpunpcklqdq", Ve, xx, KEb, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 45 */
{
{INVALID, 0x0f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_punpckhqdq, 0x660f6d10, "punpckhqdq", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpunpckhqdq, 0x660f6d10, "vpunpckhqdq", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[45][10]},
{INVALID, 0xf20f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpunpckhqdq, 0x660f6d50, "vpunpckhqdq", Ve, xx, KEb, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 46 */
{
/* movd zeroes the top bits when the destination is an mmx or xmm reg */
{OP_movd, 0x0f6e10, "movd", Pq, xx, Ed_q, xx, xx, mrm, x, tpe[46][2]},
{INVALID, 0xf30f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX: the opcode is called movq with rex.w + 0f. */
{OP_movd, 0x660f6e10, "movd", Vdq, xx, Ed_q, xx, xx, mrm, x, tpe[51][0]},
{INVALID, 0xf20f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f6e10, "(vex_W ext 108)", xx, xx, xx, xx, xx, mrm|vex, x, 108},
{INVALID, 0xf20f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660f6e10, "(evex_W ext 135)", xx, xx, xx, xx, xx, mrm|evex, x, 135},
{INVALID, 0xf20f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 47: all assumed to have Ib */
{
{OP_pshufw, 0x0f7010, "pshufw", Pq, xx, Qq, Ib, xx, mrm, x, END_LIST},
{OP_pshufhw, 0xf30f7010, "pshufhw", Vdq, xx, Wdq, Ib, xx, mrm, x, END_LIST},
{OP_pshufd, 0x660f7010, "pshufd", Vdq, xx, Wdq, Ib, xx, mrm, x, END_LIST},
{OP_pshuflw, 0xf20f7010, "pshuflw", Vdq, xx, Wdq, Ib, xx, mrm, x, END_LIST},
{INVALID, 0x0f7010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpshufhw, 0xf30f7010, "vpshufhw", Vx, xx, Wx, Ib, xx, mrm|vex, x, tpe[47][9]},
{OP_vpshufd, 0x660f7010, "vpshufd", Vx, xx, Wx, Ib, xx, mrm|vex, x, tpe[47][10]},
{OP_vpshuflw, 0xf20f7010, "vpshuflw", Vx, xx, Wx, Ib, xx, mrm|vex, x, tpe[47][11]},
{INVALID, 0x0f7010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpshufhw, 0xf30f7010, "vpshufhw", Ve, xx, KEd, Ib, We, mrm|evex, x, END_LIST},
{OP_vpshufd, 0x660f7010, "vpshufd", Ve, xx, KEw, Ib, We, mrm|evex, x, END_LIST},
{OP_vpshuflw, 0xf20f7010, "vpshuflw", Ve, xx, KEd, Ib, We, mrm|evex, x, END_LIST},
}, /* prefix extension 48 */
{
{OP_pcmpeqb, 0x0f7410, "pcmpeqb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[48][2]},
{INVALID, 0xf30f7410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pcmpeqb, 0x660f7410, "pcmpeqb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f7410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f7410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpcmpeqb, 0x660f7410, "vpcmpeqb", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[48][10]},
{INVALID, 0xf20f7410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f7410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpcmpeqb, 0x660f7410, "vpcmpeqb", KPq, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f7410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 49 */
{
{OP_pcmpeqw, 0x0f7510, "pcmpeqw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[49][2]},
{INVALID, 0xf30f7510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pcmpeqw, 0x660f7510, "pcmpeqw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f7510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f7510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpcmpeqw, 0x660f7510, "vpcmpeqw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[49][10]},
{INVALID, 0xf20f7510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f7510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpcmpeqw, 0x660f7510, "vpcmpeqw", KPd, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f7510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 50 */
{
{OP_pcmpeqd, 0x0f7610, "pcmpeqd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[50][2]},
{INVALID, 0xf30f7610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pcmpeqd, 0x660f7610, "pcmpeqd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f7610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f7610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpcmpeqd, 0x660f7610, "vpcmpeqd", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[50][10]},
{INVALID, 0xf20f7610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f7610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpcmpeqd, 0x660f7610, "vpcmpeqd", KPw, xx, KEw, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f7610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 51 */
{
{OP_movd, 0x0f7e10, "movd", Ed_q, xx, Pd_q, xx, xx, mrm, x, tpe[51][2]},
/* movq zeroes the top bits when the destination is an mmx or xmm reg */
{OP_movq, 0xf30f7e10, "movq", Vdq, xx, Wq_dq, xx, xx, mrm, x, tpe[61][2]},
{OP_movd, 0x660f7e10, "movd", Ed_q, xx, Vd_dq, xx, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovq, 0xf30f7e10, "vmovq", Vdq, xx, Wq_dq, xx, xx, mrm|vex, x, tpe[61][6]},
{VEX_W_EXT, 0x660f7e10, "(vex_W ext 109)", xx, xx, xx, xx, xx, mrm|vex, x, 109},
{INVALID, 0xf20f7e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660f7e10, "(vex_W ext 136)", xx, xx, xx, xx, xx, mrm|evex, x, 136},
{INVALID, 0xf20f7e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 52: all assumed to have Ib */
{
{OP_cmpps, 0x0fc210, "cmpps", Vps, xx, Wps, Ib, Vps, mrm, x, END_LIST},
{OP_cmpss, 0xf30fc210, "cmpss", Vss, xx, Wss, Ib, Vss, mrm, x, END_LIST},
{OP_cmppd, 0x660fc210, "cmppd", Vpd, xx, Wpd, Ib, Vpd, mrm, x, END_LIST},
{OP_cmpsd, 0xf20fc210, "cmpsd", Vsd, xx, Wsd, Ib, Vsd, mrm, x, END_LIST},
{OP_vcmpps, 0x0fc210, "vcmpps", Vvs, xx, Hvs, Wvs, Ib, mrm|vex, x, tpe[52][8]},
{OP_vcmpss, 0xf30fc210, "vcmpss", Vdq, xx, Hdq, Wss, Ib, mrm|vex, x, tpe[52][9]},
{OP_vcmppd, 0x660fc210, "vcmppd", Vvd, xx, Hvd, Wvd, Ib, mrm|vex, x, tpe[52][10]},
{OP_vcmpsd, 0xf20fc210, "vcmpsd", Vdq, xx, Hdq, Wsd, Ib, mrm|vex, x, tpe[52][11]},
{OP_vcmpps, 0x0fc210, "vcmpps", KPw, xx, KEw, Ib, Hes, xop|mrm|evex, x, exop[90]},
{OP_vcmpss, 0xf30fc210, "vcmpss", KP1b, xx, KE1b, Ib, Hdq, xop|mrm|evex, x, exop[91]},
{OP_vcmppd, 0x660fc250, "vcmppd", KPb, xx, KEb, Ib, Hed, xop|mrm|evex, x, exop[92]},
{OP_vcmpsd, 0xf20fc250, "vcmpsd", KP1b, xx, KE1b, Ib, Hdq, xop|mrm|evex, x, exop[93]},
}, /* prefix extension 53: all assumed to have Ib */
{ /* note that gnu tools print immed first: pinsrw $0x0,(%esp),%xmm0 */
/* FIXME i#1388: pinsrw actually reads only bottom word of reg */
{OP_pinsrw, 0x0fc410, "pinsrw", Pw_q, xx, Rd_Mw, Ib, xx, mrm, x, tpe[53][2]},
{INVALID, 0xf30fc410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pinsrw, 0x660fc410, "pinsrw", Vw_dq, xx, Rd_Mw, Ib, xx, mrm, x, END_LIST},
{INVALID, 0xf20fc410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fc410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fc410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpinsrw, 0x660fc410, "vpinsrw", Vdq, xx, H14_dq, Rd_Mw, Ib, mrm|vex, x, tpe[53][10]},
{INVALID, 0xf20fc410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fc410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fc410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpinsrw, 0x660fc410, "vpinsrw", Vdq, xx, H14_dq, Rd_Mw, Ib, mrm|evex, x, END_LIST},
{INVALID, 0xf20fc410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 54: all assumed to have Ib */
{ /* note that gnu tools print immed first: pextrw $0x7,%xmm7,%edx */
{OP_pextrw, 0x0fc510, "pextrw", Gd, xx, Nw_q, Ib, xx, mrm, x, tpe[54][2]},
{INVALID, 0xf30fc510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pextrw, 0x660fc510, "pextrw", Gd, xx, Uw_dq, Ib, xx, mrm, x, tvex[37][0]},
{INVALID, 0xf20fc510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fc510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fc510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpextrw, 0x660fc510, "vpextrw", Gd, xx, Uw_dq, Ib, xx, mrm|vex, x, tvex[37][1]},
{INVALID, 0xf20fc510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fc510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fc510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpextrw, 0x660fc510, "vpextrw", Gd, xx, Uw_dq, Ib, xx, mrm|evex, x, tvex[37][2]},
{INVALID, 0xf20fc510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 55: all assumed to have Ib */
{
{OP_shufps, 0x0fc610, "shufps", Vps, xx, Wps, Ib, Vps, mrm, x, END_LIST},
{INVALID, 0xf30fc610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_shufpd, 0x660fc610, "shufpd", Vpd, xx, Wpd, Ib, Vpd, mrm, x, END_LIST},
{INVALID, 0xf20fc610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vshufps, 0x0fc610, "vshufps", Vvs, xx, Hvs, Wvs, Ib, mrm|vex, x, tpe[55][8]},
{INVALID, 0xf30fc610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vshufpd, 0x660fc610, "vshufpd", Vvd, xx, Hvd, Wvd, Ib, mrm|vex, x, tpe[55][10]},
{INVALID, 0xf20fc610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vshufps, 0x0fc610, "vshufps", Ves, xx, KEw, Ib, Hes, xop|mrm|evex, x, exop[94]},
{INVALID, 0xf30fc610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vshufpd, 0x660fc650, "vshufpd", Ved, xx, KEb, Ib, Hed, xop|mrm|evex, x, exop[95]},
{INVALID, 0xf20fc610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 56 */
{
{OP_psrlw, 0x0fd110, "psrlw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[56][2]},
{INVALID, 0xf30fd110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psrlw, 0x660fd110, "psrlw", Vdq, xx, Wdq, Vdq, xx, mrm, x, tpe[104][0]},
{INVALID, 0xf20fd110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fd110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fd110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsrlw, 0x660fd110, "vpsrlw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[104][6]},
{INVALID, 0xf20fd110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fd110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsrlw, 0x660fd110, "vpsrlw", Ve, xx, KEd, He, We, mrm|evex, x, tpe[104][10]},
{INVALID, 0xf20fd110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 57 */
{
{OP_psrld, 0x0fd210, "psrld", Pq, xx, Qq, Pq, xx, mrm, x, tpe[57][2]},
{INVALID, 0xf30fd210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psrld, 0x660fd210, "psrld", Vdq, xx, Wdq, Vdq, xx, mrm, x, tpe[107][0]},
{INVALID, 0xf20fd210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fd210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fd210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsrld, 0x660fd210, "vpsrld", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[107][6]},
{INVALID, 0xf20fd210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fd210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660fd210, "(evex_W ext 122)", xx, xx, xx, xx, xx, mrm|evex, x, 122},
{INVALID, 0xf20fd210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 58 */
{
{OP_psrlq, 0x0fd310, "psrlq", Pq, xx, Qq, Pq, xx, mrm, x, tpe[58][2]},
{INVALID, 0xf30fd310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psrlq, 0x660fd310, "psrlq", Vdq, xx, Wdq, Vdq, xx, mrm, x, tpe[110][0]},
{INVALID, 0xf20fd310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fd310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fd310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsrlq, 0x660fd310, "vpsrlq", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[110][6]},
{INVALID, 0xf20fd310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fd310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660fd310, "(evex_W ext 124)", xx, xx, xx, xx, xx, mrm|evex, x, 124},
{INVALID, 0xf20fd310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 59 */
{
{OP_paddq, 0x0fd410, "paddq", Pq, xx, Qq, Pq, xx, mrm, x, tpe[59][2]},
{INVALID, 0xf30fd410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_paddq, 0x660fd410, "paddq", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fd410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fd410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fd410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpaddq, 0x660fd410, "vpaddq", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[59][10]},
{INVALID, 0xf20fd410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fd410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpaddq, 0x660fd450, "vpaddq", Ve, xx, KEb, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fd410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 60 */
{
{OP_pmullw, 0x0fd510, "pmullw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[60][2]},
{INVALID, 0xf30fd510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pmullw, 0x660fd510, "pmullw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fd510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fd510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fd510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmullw, 0x660fd510, "vpmullw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[60][10]},
{INVALID, 0xf20fd510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fd510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmullw, 0x660fd510, "vpmullw", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fd510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 61 */
{
{INVALID, 0x0fd610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_movq2dq, 0xf30fd610, "movq2dq", Vdq, xx, Nq, xx, xx, mrm, x, END_LIST},
{OP_movq, 0x660fd610, "movq", Wq_dq, xx, Vq_dq, xx, xx, mrm, x, END_LIST},
{OP_movdq2q, 0xf20fd610, "movdq2q", Pq, xx, Uq_dq, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0fd610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovq, 0x660fd610, "vmovq", Wq_dq, xx, Vq_dq, xx, xx, mrm|vex, x, tvexw[108][1]},
{INVALID, 0xf20fd610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fd610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fd610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fd610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 62 */
{
{OP_pmovmskb, 0x0fd710, "pmovmskb", Gd, xx, Nq, xx, xx, mrm, x, tpe[62][2]},
{INVALID, 0xf30fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pmovmskb, 0x660fd710, "pmovmskb", Gd, xx, Udq, xx, xx, mrm, x, END_LIST},
{INVALID, 0xf20fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmovmskb, 0x660fd710, "vpmovmskb", Gd, xx, Ux, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 63 */
{
{OP_psubusb, 0x0fd810, "psubusb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[63][2]},
{INVALID, 0xf30fd810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psubusb, 0x660fd810, "psubusb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fd810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fd810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fd810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsubusb, 0x660fd810, "vpsubusb", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[63][10]},
{INVALID, 0xf20fd810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fd810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsubusb, 0x660fd810, "vpsubusb", Ve, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fd810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 64 */
{
{OP_psubusw, 0x0fd910, "psubusw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[64][2]},
{INVALID, 0xf30fd910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psubusw, 0x660fd910, "psubusw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fd910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fd910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fd910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsubusw, 0x660fd910, "vpsubusw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[64][10]},
{INVALID, 0xf20fd910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fd910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsubusw, 0x660fd910, "vpsubusw", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fd910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 65 */
{
{OP_pminub, 0x0fda10, "pminub", Pq, xx, Qq, Pq, xx, mrm, x, tpe[65][2]},
{INVALID, 0xf30fda10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pminub, 0x660fda10, "pminub", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fda10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fda10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fda10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpminub, 0x660fda10, "vpminub", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[65][10]},
{INVALID, 0xf20fda10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fda10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fda10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpminub, 0x660fda10, "vpminub", Ve, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fda10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 66 */
{
{OP_pand, 0x0fdb10, "pand", Pq, xx, Qq, Pq, xx, mrm, x, tpe[66][2]},
{INVALID, 0xf30fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pand, 0x660fdb10, "pand", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpand, 0x660fdb10, "vpand", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660fdb10, "(evex_W ext 41)", xx, xx, xx, xx, xx, mrm|evex, x, 41},
{INVALID, 0xf20fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 67 */
{
{OP_paddusb, 0x0fdc10, "paddusb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[67][2]},
{INVALID, 0xf30fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_paddusb, 0x660fdc10, "paddusb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpaddusb, 0x660fdc10, "vpaddusb", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[67][10]},
{INVALID, 0xf20fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpaddusb, 0x660fdc10, "vpaddusb", Ve, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 68 */
{
{OP_paddusw, 0x0fdd10, "paddusw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[68][2]},
{INVALID, 0xf30fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_paddusw, 0x660fdd10, "paddusw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpaddusw, 0x660fdd10, "vpaddusw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[68][10]},
{INVALID, 0xf20fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpaddusw, 0x660fdd10, "vpaddusw", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 69 */
{
{OP_pmaxub, 0x0fde10, "pmaxub", Pq, xx, Qq, Pq, xx, mrm, x, tpe[69][2]},
{INVALID, 0xf30fde10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pmaxub, 0x660fde10, "pmaxub", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fde10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fde10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fde10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpmaxub, 0x660fde10, "vpmaxub", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[69][10]},
{INVALID, 0xf20fde10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fde10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fde10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmaxub, 0x660fde10, "vpmaxub", Ve, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fde10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 70 */
{
{OP_pandn, 0x0fdf10, "pandn", Pq, xx, Qq, Pq, xx, mrm, x, tpe[70][2]},
{INVALID, 0xf30fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pandn, 0x660fdf10, "pandn", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpandn, 0x660fdf10, "vpandn", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660fdf10, "(evex_W ext 42)", xx, xx, xx, xx, xx, mrm|evex, x, 42},
{INVALID, 0xf20fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 71 */
{
{OP_pavgb, 0x0fe010, "pavgb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[71][2]},
{INVALID, 0xf30fe010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pavgb, 0x660fe010, "pavgb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fe010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpavgb, 0x660fe010, "vpavgb", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[71][10]},
{INVALID, 0xf20fe010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpavgb, 0x660fe010, "vpavgb", Ve, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fe010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 72 */
{
{OP_psraw, 0x0fe110, "psraw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[72][2]},
{INVALID, 0xf30fe110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psraw, 0x660fe110, "psraw", Vdq, xx, Wdq, Vdq, xx, mrm, x, tpe[105][0]},
{INVALID, 0xf20fe110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsraw, 0x660fe110, "vpsraw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[105][6]},
{INVALID, 0xf20fe110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsraw, 0x660fe110, "vpsraw", Ve, xx, KEd, He, We, mrm|evex, x, tpe[105][10]},
{INVALID, 0xf20fe110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 73 */
{
{OP_psrad, 0x0fe210, "psrad", Pq, xx, Qq, Pq, xx, mrm, x, tpe[73][2]},
{INVALID, 0xf30fe210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psrad, 0x660fe210, "psrad", Vdq, xx, Wdq, Vdq, xx, mrm, x, tpe[108][0]},
{INVALID, 0xf20fe210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsrad, 0x660fe210, "vpsrad", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[108][6]},
{INVALID, 0xf20fe210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x60fe210, "(evex_W ext 120)", xx, xx, xx, xx, xx, mrm|evex, x, 120},
{INVALID, 0xf20fe210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 74 */
{
{OP_pavgw, 0x0fe310, "pavgw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[74][2]},
{INVALID, 0xf30fe310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pavgw, 0x660fe310, "pavgw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fe310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpavgw, 0x660fe310, "vpavgw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[74][10]},
{INVALID, 0xf20fe310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpavgw, 0x660fe310, "vpavgw", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fe310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 75 */
{
{OP_pmulhuw, 0x0fe410, "pmulhuw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[75][2]},
{INVALID, 0xf30fe410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pmulhuw, 0x660fe410, "pmulhuw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fe410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpmulhuw, 0x660fe410, "vpmulhuw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[75][10]},
{INVALID, 0xf20fe410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmulhuw, 0x660fe410, "vpmulhuw", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fe410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 76 */
{
{OP_pmulhw, 0x0fe510, "pmulhw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[76][2]},
{INVALID, 0xf30fe510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pmulhw, 0x660fe510, "pmulhw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fe510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpmulhw, 0x660fe510, "vpmulhw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[76][10]},
{INVALID, 0xf20fe510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmulhw, 0x660fe510, "vpmulhw", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fe510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 77 */
{
{INVALID, 0x0fe610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_cvtdq2pd, 0xf30fe610, "cvtdq2pd", Vpd, xx, Wq_dq, xx, xx, mrm, x, END_LIST},
{OP_cvttpd2dq,0x660fe610, "cvttpd2dq", Vdq, xx, Wpd, xx, xx, mrm, x, END_LIST},
{OP_cvtpd2dq, 0xf20fe610, "cvtpd2dq", Vdq, xx, Wpd, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0fe610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vcvtdq2pd, 0xf30fe610, "vcvtdq2pd", Vvd, xx, Wvq_dq, xx, xx, mrm|vex, x, tevexw[57][0]},
{OP_vcvttpd2dq,0x660fe610, "vcvttpd2dq", Vx, xx, Wvd, xx, xx, mrm|vex, x, tpe[77][10]},
{OP_vcvtpd2dq, 0xf20fe610, "vcvtpd2dq", Vx, xx, Wvd, xx, xx, mrm|vex, x, tpe[77][11]},
{INVALID, 0x0fe610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf30fe610, "(evex_W ext 57)", xx, xx, xx, xx, xx, mrm|evex, x, 57},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vcvttpd2dq,0x660fe650, "vcvttpd2dq", Ve, xx, KEb, Wed, xx, mrm|evex, x, END_LIST},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vcvtpd2dq, 0xf20fe650, "vcvtpd2dq", Ve, xx, KEb, Wed, xx, mrm|evex, x, END_LIST},
}, /* prefix extension 78 */
{
{OP_movntq, 0x0fe710, "movntq", Mq, xx, Pq, xx, xx, mrm, x, END_LIST},
{INVALID, 0xf30fe710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_movntdq, 0x660fe710, "movntdq", Mdq, xx, Vdq, xx, xx, mrm, x, END_LIST},
{INVALID, 0xf20fe710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vmovntdq, 0x660fe710, "vmovntdq", Mx, xx, Vx, xx, xx, mrm|vex, x, tpe[78][10]},
{INVALID, 0xf20fe710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovntdq, 0x660fe710, "vmovntdq", Me, xx, Ve, xx, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf20fe710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 79 */
{
{OP_psubsb, 0x0fe810, "psubsb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[79][2]},
{INVALID, 0xf30fe810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psubsb, 0x660fe810, "psubsb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fe810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsubsb, 0x660fe810, "vpsubsb", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[79][10]},
{INVALID, 0xf20fe810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsubsb, 0x660fe810, "vpsubsb", Ve, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fe810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 80 */
{
{OP_psubsw, 0x0fe910, "psubsw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[80][2]},
{INVALID, 0xf30fe910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psubsw, 0x660fe910, "psubsw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fe910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsubsw, 0x660fe910, "vpsubsw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[80][10]},
{INVALID, 0xf20fe910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsubsw, 0x660fe910, "vpsubsw", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fe910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 81 */
{
{OP_pminsw, 0x0fea10, "pminsw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[81][2]},
{INVALID, 0xf30fea10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pminsw, 0x660fea10, "pminsw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fea10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fea10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fea10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpminsw, 0x660fea10, "vpminsw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[81][10]},
{INVALID, 0xf20fea10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fea10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fea10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpminsw, 0x660fea10, "vpminsw", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fea10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 82 */
{
{OP_por, 0x0feb10, "por", Pq, xx, Qq, Pq, xx, mrm, x, tpe[82][2]},
{INVALID, 0xf30feb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_por, 0x660feb10, "por", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20feb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0feb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30feb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpor, 0x660feb10, "vpor", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20feb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0feb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30feb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660feb10, "(evex_W ext 43)", xx, xx, xx, xx, xx, mrm|evex, x, 43},
{INVALID, 0xf20feb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 83 */
{
{OP_paddsb, 0x0fec10, "paddsb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[83][2]},
{INVALID, 0xf30fec10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_paddsb, 0x660fec10, "paddsb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fec10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fec10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fec10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpaddsb, 0x660fec10, "vpaddsb", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[83][10]},
{INVALID, 0xf20fec10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fec10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fec10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpaddsb, 0x660fec10, "vpaddsb", Ve, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fec10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 84 */
{
{OP_paddsw, 0x0fed10, "paddsw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[84][2]},
{INVALID, 0xf30fed10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_paddsw, 0x660fed10, "paddsw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fed10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fed10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fed10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpaddsw, 0x660fed10, "vpaddsw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[84][10]},
{INVALID, 0xf20fed10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fed10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fed10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpaddsw, 0x660fed10, "vpaddsw", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fed10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 85 */
{
{OP_pmaxsw, 0x0fee10, "pmaxsw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[85][2]},
{INVALID, 0xf30fee10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pmaxsw, 0x660fee10, "pmaxsw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fee10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fee10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fee10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpmaxsw, 0x660fee10, "vpmaxsw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[85][10]},
{INVALID, 0xf20fee10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fee10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fee10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmaxsw, 0x660fee10, "vpmaxsw", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20fee10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 86 */
{
{OP_pxor, 0x0fef10, "pxor", Pq, xx, Qq, Pq, xx, mrm, x, tpe[86][2]},
{INVALID, 0xf30fef10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pxor, 0x660fef10, "pxor", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fef10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fef10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fef10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpxor, 0x660fef10, "vpxor", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fef10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fef10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fef10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660fef10, "(evex_W ext 44)", xx, xx, xx, xx, xx, mrm|evex, x, 44},
{INVALID, 0xf20fef10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 87 */
{
{OP_psllw, 0x0ff110, "psllw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[87][2]},
{INVALID, 0xf30ff110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psllw, 0x660ff110, "psllw", Vdq, xx, Wdq, Vdq, xx, mrm, x, tpe[106][0]},
{INVALID, 0xf20ff110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsllw, 0x660ff110, "vpsllw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[106][6]},
{INVALID, 0xf20ff110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsllw, 0x660ff110, "vpsllw", Ve, xx, KEd, He, We, mrm|evex, x, tpe[106][10]},
{INVALID, 0xf20ff110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 88 */
{
{OP_pslld, 0x0ff210, "pslld", Pq, xx, Qq, Pq, xx, mrm, x, tpe[88][2]},
{INVALID, 0xf30ff210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pslld, 0x660ff210, "pslld", Vdq, xx, Wdq, Vdq, xx, mrm, x, tpe[109][0]},
{INVALID, 0xf20ff210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpslld, 0x660ff210, "vpslld", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[109][6]},
{INVALID, 0xf20ff210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpslld, 0x660ff210, "vpslld", Ve, xx, KEw, He, We, mrm|evex, x, tpe[109][10]},
{INVALID, 0xf20ff210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 89 */
{
{OP_psllq, 0x0ff310, "psllq", Pq, xx, Qq, Pq, xx, mrm, x, tpe[89][2]},
{INVALID, 0xf30ff310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psllq, 0x660ff310, "psllq", Vdq, xx, Wdq, Vdq, xx, mrm, x, tpe[111][0]},
{INVALID, 0xf20ff310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsllq, 0x660ff310, "vpsllq", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[111][6]},
{INVALID, 0xf20ff310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsllq, 0x660ff350, "vpsllq", Ve, xx, KEb, He, We, mrm|evex, x, tpe[111][10]},
{INVALID, 0xf20ff310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 90 */
{
{OP_pmuludq, 0x0ff410, "pmuludq", Pq, xx, Qq, Pq, xx, mrm, x, tpe[90][2]},
{INVALID, 0xf30ff410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pmuludq, 0x660ff410, "pmuludq", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ff410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpmuludq, 0x660ff410, "vpmuludq", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[90][10]},
{INVALID, 0xf20ff410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmuludq, 0x660ff450, "vpmuludq", Ve, xx, KEb, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20ff410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 91 */
{
{OP_pmaddwd, 0x0ff510, "pmaddwd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[91][2]},
{INVALID, 0xf30ff510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pmaddwd, 0x660ff510, "pmaddwd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ff510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpmaddwd, 0x660ff510, "vpmaddwd", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[91][10]},
{INVALID, 0xf20ff510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmaddwd, 0x660ff510, "vpmaddwd", Ve, xx, KEw, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20ff510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 92 */
{
{OP_psadbw, 0x0ff610, "psadbw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[92][2]},
{INVALID, 0xf30ff610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psadbw, 0x660ff610, "psadbw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ff610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsadbw, 0x660ff610, "vpsadbw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[92][10]},
{INVALID, 0xf20ff610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsadbw, 0x660ff610, "vpsadbw", Ve, xx, He, We, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf20ff610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 93 */
{
{OP_maskmovq, 0x0ff710, "maskmovq", Bq, xx, Pq, Nq, xx, mrm|predcx, x, END_LIST}, /* Intel table says "Ppi, Qpi" */
{INVALID, 0xf30ff710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_maskmovdqu, 0x660ff710, "maskmovdqu", Bdq, xx, Vdq, Udq, xx, mrm|predcx, x, END_LIST},
{INVALID, 0xf20ff710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vmaskmovdqu, 0x660ff710, "vmaskmovdqu", Bdq, xx, Vdq, Udq, xx, mrm|vex|reqL0|predcx, x, END_LIST},
{INVALID, 0xf20ff710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ff710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ff710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ff710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 94 */
{
{OP_psubb, 0x0ff810, "psubb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[94][2]},
{INVALID, 0xf30ff810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psubb, 0x660ff810, "psubb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ff810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsubb, 0x660ff810, "vpsubb", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[94][10]},
{INVALID, 0xf20ff810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsubb, 0x660ff810, "vpsubb", Ve, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20ff810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 95 */
{
{OP_psubw, 0x0ff910, "psubw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[95][2]},
{INVALID, 0xf30ff910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psubw, 0x660ff910, "psubw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ff910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsubw, 0x660ff910, "vpsubw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[95][10]},
{INVALID, 0xf20ff910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsubw, 0x660ff910, "vpsubw", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20ff910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 96 */
{
{OP_psubd, 0x0ffa10, "psubd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[96][2]},
{INVALID, 0xf30ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psubd, 0x660ffa10, "psubd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsubd, 0x660ffa10, "vpsubd", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[96][10]},
{INVALID, 0xf20ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsubd, 0x660ffa10, "vpsubd", Ve, xx, KEw, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 97 */
{
{OP_psubq, 0x0ffb10, "psubq", Pq, xx, Qq, Pq, xx, mrm, x, tpe[97][2]},
{INVALID, 0xf30ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psubq, 0x660ffb10, "psubq", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsubq, 0x660ffb10, "vpsubq", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[97][10]},
{INVALID, 0xf20ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsubq, 0x660ffb50, "vpsubq", Ve, xx, KEb, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 98 */
{
{OP_paddb, 0x0ffc10, "paddb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[98][2]},
{INVALID, 0xf30ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_paddb, 0x660ffc10, "paddb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpaddb, 0x660ffc10, "vpaddb", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[98][10]},
{INVALID, 0xf20ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpaddb, 0x660ffc10, "vpaddb", Ve, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 99 */
{
{OP_paddw, 0x0ffd10, "paddw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[99][2]},
{INVALID, 0xf30ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_paddw, 0x660ffd10, "paddw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpaddw, 0x660ffd10, "vpaddw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[99][10]},
{INVALID, 0xf20ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpaddw, 0x660ffd10, "vpaddw", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 100 */
{
{OP_paddd, 0x0ffe10, "paddd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[100][2]},
{INVALID, 0xf30ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_paddd, 0x660ffe10, "paddd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpaddd, 0x660ffe10, "vpaddd", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[100][10]},
{INVALID, 0xf20ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpaddd, 0x660ffe10, "vpaddd", Ve, xx, KEw, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 101: all assumed to have Ib */
{
{INVALID, 0x0f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psrldq, 0x660f7333, "psrldq", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsrldq, 0x660f7333, "vpsrldq", Hx, xx, Ib, Ux, xx, mrm|vex, x, tpe[101][10]},
{INVALID, 0xf20f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsrldq, 0x660f7333, "vpsrldq", He, xx, Ib, We, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf20f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 102: all assumed to have Ib */
{
{INVALID, 0x0f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pslldq, 0x660f7337, "pslldq", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpslldq, 0x660f7337, "vpslldq", Hx, xx, Ib, Ux, xx, mrm|vex, x, tpe[102][10]},
{INVALID, 0xf20f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpslldq, 0x660f7337, "vpslldq", He, xx, Ib, We, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf20f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 103 */
{
{REX_B_EXT, 0x900000, "(rex.b ext 0)", xx, xx, xx, xx, xx, no, x, 0},
{OP_pause,0xf3900000, "pause", xx, xx, xx, xx, xx, no, x, END_LIST},
/* we chain these even though encoding won't find them */
{OP_nop, 0x66900000, "nop", xx, xx, xx, xx, xx, no, x, tpe[103][3]},
/* windbg displays as "repne nop" */
{OP_nop, 0xf2900000, "nop", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 104: all assumed to have Ib */
{
/* Intel tables imply they may add opcodes in the mod<3 (mem) space in future */
{OP_psrlw, 0x0f7132, "psrlw", Nq, xx, Ib, Nq, xx, mrm, x, tpe[104][2]},
{INVALID, 0xf30f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psrlw, 0x660f7132, "psrlw", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsrlw, 0x660f7132, "vpsrlw", Hx, xx, Ib, Ux, xx, mrm|vex, x, tpe[56][10]},
{INVALID, 0xf20f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsrlw, 0x660f7132, "vpsrlw", He, xx, KEd, Ib, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 105: all assumed to have Ib */
{
/* Intel tables imply they may add opcodes in the mod<3 (mem) space in future */
{OP_psraw, 0x0f7134, "psraw", Nq, xx, Ib, Nq, xx, mrm, x, tpe[105][2]},
{INVALID, 0xf30f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psraw, 0x660f7134, "psraw", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsraw, 0x660f7134, "vpsraw", Hx, xx, Ib, Ux, xx, mrm|vex, x, tpe[72][10]},
{INVALID, 0xf20f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsraw, 0x660f7134, "vpsraw", He, xx, KEw, Ib, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 106: all assumed to have Ib */
{
/* Intel tables imply they may add opcodes in the mod<3 (mem) space in future */
{OP_psllw, 0x0f7136, "psllw", Nq, xx, Ib, Nq, xx, mrm, x, tpe[106][2]},
{INVALID, 0xf30f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psllw, 0x660f7136, "psllw", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsllw, 0x660f7136, "vpsllw", Hx, xx, Ib, Ux, xx, mrm|vex, x, tpe[87][10]},
{INVALID, 0xf20f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsllw, 0x660f7136, "vpsllw", He, xx, KEd, Ib, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 107: all assumed to have Ib */
{
/* Intel tables imply they may add opcodes in the mod<3 (mem) space in future */
{OP_psrld, 0x0f7232, "psrld", Nq, xx, Ib, Nq, xx, mrm, x, tpe[107][2]},
{INVALID, 0xf30f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psrld, 0x660f7232, "psrld", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsrld, 0x660f7232, "vpsrld", Hx, xx, Ib, Ux, xx, mrm|vex, x, tevexw[122][0]},
{INVALID, 0xf20f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660f7232, "(evex_W ext 123)", xx, xx, xx, xx, xx, mrm|evex, x, 123},
{INVALID, 0xf20f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 108: all assumed to have Ib */
{
/* Intel tables imply they may add opcodes in the mod<3 (mem) space in future */
{OP_psrad, 0x0f7234, "psrad", Nq, xx, Ib, Nq, xx, mrm, x, tpe[108][2]},
{INVALID, 0xf30f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psrad, 0x660f7234, "psrad", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsrad, 0x660f7234, "vpsrad", Hx, xx, Ib, Ux, xx, mrm|vex, x, tevexw[120][0]},
{INVALID, 0xf20f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660f7234, "(evex_W ext 121)", xx, xx, xx, xx, xx, mrm|evex, x, 121},
{INVALID, 0xf20f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 109: all assumed to have Ib */
{
/* Intel tables imply they may add opcodes in the mod<3 (mem) space in future */
{OP_pslld, 0x0f7236, "pslld", Nq, xx, Ib, Nq, xx, mrm, x, tpe[109][2]},
{INVALID, 0xf30f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pslld, 0x660f7236, "pslld", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpslld, 0x660f7236, "vpslld", Hx, xx, Ib, Ux, xx, mrm|vex, x, tpe[88][10]},
{INVALID, 0xf20f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpslld, 0x660f7236, "vpslld", He, xx, KEw, Ib, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 110: all assumed to have Ib */
{
/* Intel tables imply they may add opcodes in the mod<3 (mem) space in future */
{OP_psrlq, 0x0f7332, "psrlq", Nq, xx, Ib, Nq, xx, mrm, x, tpe[110][2]},
{INVALID, 0xf30f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psrlq, 0x660f7332, "psrlq", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsrlq, 0x660f7332, "vpsrlq", Hx, xx, Ib, Ux, xx, mrm|vex, x, tevexw[124][1]},
{INVALID, 0xf20f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf20f7332, "(evex_W ext 125)", xx, xx, xx, xx, xx, mrm|evex, x, 125},
{INVALID, 0xf20f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 111: all assumed to have Ib */
{
/* Intel tables imply they may add opcodes in the mod<3 (mem) space in future */
{OP_psllq, 0x0f7336, "psllq", Nq, xx, Ib, Nq, xx, mrm, x, tpe[111][2]},
{INVALID, 0xf30f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psllq, 0x660f7336, "psllq", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsllq, 0x660f7336, "vpsllq", Hx, xx, Ib, Ux, xx, mrm|vex, x, tpe[89][10]},
{INVALID, 0xf20f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsllq, 0x660f7376, "vpsllq", He, xx, KEb, Ib, We, mrm|evex, x, END_LIST},
{INVALID, 0xf20f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 112 */
{
{OP_movq, 0x0f6f10, "movq", Pq, xx, Qq, xx, xx, mrm, x, tpe[113][0]},
{OP_movdqu, 0xf30f6f10, "movdqu", Vdq, xx, Wdq, xx, xx, mrm, x, tpe[113][1]},
{OP_movdqa, 0x660f6f10, "movdqa", Vdq, xx, Wdq, xx, xx, mrm, x, tpe[113][2]},
{INVALID, 0xf20f6f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f6f10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vmovdqu, 0xf30f6f10, "vmovdqu", Vx, xx, Wx, xx, xx, mrm|vex, x, tpe[113][5]},
{OP_vmovdqa, 0x660f6f10, "vmovdqa", Vx, xx, Wx, xx, xx, mrm|vex, x, tpe[113][6]},
{INVALID, 0xf20f6f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f6f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf30f6f10, "(evex_W ext 11)", xx, xx, xx, xx, xx, mrm|evex, x, 11},
{EVEX_W_EXT, 0x660f6f10, "(evex_W ext 8)", xx, xx, xx, xx, xx, mrm|evex, x, 8},
{EVEX_W_EXT, 0xf20f6f10, "(evex_W ext 10)", xx, xx, xx, xx, xx, mrm|evex, x, 10},
}, /* prefix extension 113 */
{
{OP_movq, 0x0f7f10, "movq", Qq, xx, Pq, xx, xx, mrm, x, tpe[51][1]},
{OP_movdqu, 0xf30f7f10, "movdqu", Wdq, xx, Vdq, xx, xx, mrm, x, END_LIST},
{OP_movdqa, 0x660f7f10, "movdqa", Wdq, xx, Vdq, xx, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7f10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vmovdqu, 0xf30f7f10, "vmovdqu", Wx, xx, Vx, xx, xx, mrm|vex, x, END_LIST},
{OP_vmovdqa, 0x660f7f10, "vmovdqa", Wx, xx, Vx, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f7f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf30f7f10, "(evex_W ext 13)", xx, xx, xx, xx, xx, mrm|evex, x, 13},
{EVEX_W_EXT, 0x660f7f10, "(evex_W ext 9)", xx, xx, xx, xx, xx, mrm|evex, x, 9},
{EVEX_W_EXT, 0xf20f7f10, "(evex_W ext 12)", xx, xx, xx, xx, xx, mrm|evex, x, 12},
}, /* prefix extension 114 */
{
{INVALID, 0x0f7c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_haddpd, 0x660f7c10, "haddpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_haddps, 0xf20f7c10, "haddps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{INVALID, 0x0f7c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vhaddpd, 0x660f7c10, "vhaddpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, END_LIST},
{OP_vhaddps, 0xf20f7c10, "vhaddps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 115 */
{
{INVALID, 0x0f7d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_hsubpd, 0x660f7d10, "hsubpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_hsubps, 0xf20f7d10, "hsubps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{INVALID, 0x0f7d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vhsubpd, 0x660f7d10, "vhsubpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, END_LIST},
{OP_vhsubps, 0xf20f7d10, "vhsubps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 116 */
{
{INVALID, 0x0fd010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_addsubpd, 0x660fd010, "addsubpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_addsubps, 0xf20fd010, "addsubps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{INVALID, 0x0fd010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vaddsubpd, 0x660fd010, "vaddsubpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, END_LIST},
{OP_vaddsubps, 0xf20fd010, "vaddsubps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fd010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fd010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fd010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /* prefix extension 117 */
{
{INVALID, 0x0ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_lddqu, 0xf20ff010, "lddqu", Vdq, xx, Mdq, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vlddqu, 0xf20ff010, "vlddqu", Vx, xx, Mx, xx, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, /***************************************************
* SSSE3
*/
{ /* prefix extension 118 */
{OP_pshufb, 0x380018, "pshufb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[118][2]},
{INVALID, 0xf3380018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pshufb, 0x66380018, "pshufb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380018, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpshufb, 0x66380018, "vpshufb", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[118][10]},
{INVALID, 0xf2380018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpshufb, 0x66380018, "vpshufb", Ve, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf2380018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 119 */
{OP_phaddw, 0x380118, "phaddw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[119][2]},
{INVALID, 0xf3380118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_phaddw, 0x66380118, "phaddw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380118, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vphaddw, 0x66380118, "vphaddw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 120 */
{OP_phaddd, 0x380218, "phaddd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[120][2]},
{INVALID, 0xf3380218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_phaddd, 0x66380218, "phaddd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380218, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vphaddd, 0x66380218, "vphaddd", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 121 */
{OP_phaddsw, 0x380318, "phaddsw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[121][2]},
{INVALID, 0xf3380318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_phaddsw, 0x66380318, "phaddsw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380318, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vphaddsw, 0x66380318, "vphaddsw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 122 */
{OP_pmaddubsw, 0x380418, "pmaddubsw",Pq, xx, Qq, Pq, xx, mrm, x, tpe[122][2]},
{INVALID, 0xf3380418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pmaddubsw, 0x66380418, "pmaddubsw",Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380418, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmaddubsw, 0x66380418, "vpmaddubsw",Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[122][10]},
{INVALID, 0xf2380418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmaddubsw, 0x66380418, "vpmaddubsw",Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf2380418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 123 */
{OP_phsubw, 0x380518, "phsubw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[123][2]},
{INVALID, 0xf3380518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_phsubw, 0x66380518, "phsubw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380518, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vphsubw, 0x66380518, "vphsubw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 124 */
{OP_phsubd, 0x380618, "phsubd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[124][2]},
{INVALID, 0xf3380618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_phsubd, 0x66380618, "phsubd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380618, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vphsubd, 0x66380618, "vphsubd", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 125 */
{OP_phsubsw, 0x380718, "phsubsw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[125][2]},
{INVALID, 0xf3380718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_phsubsw, 0x66380718, "phsubsw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380718, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vphsubsw, 0x66380718, "vphsubsw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 126 */
{OP_psignb, 0x380818, "psignb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[126][2]},
{INVALID, 0xf3380818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psignb, 0x66380818, "psignb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380818, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsignb, 0x66380818, "vpsignb", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 127 */
{OP_psignw, 0x380918, "psignw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[127][2]},
{INVALID, 0xf3380918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psignw, 0x66380918, "psignw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380918, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsignw, 0x66380918, "vpsignw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 128 */
{OP_psignd, 0x380a18, "psignd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[128][2]},
{INVALID, 0xf3380a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psignd, 0x66380a18, "psignd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380a18, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsignd, 0x66380a18, "vpsignd", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 129 */
{OP_pmulhrsw, 0x380b18, "pmulhrsw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[129][2]},
{INVALID, 0xf3380b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pmulhrsw, 0x66380b18, "pmulhrsw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380b18, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmulhrsw, 0x66380b18, "vpmulhrsw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[129][10]},
{INVALID, 0xf2380b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmulhrsw, 0x66380b18, "vpmulhrsw", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf2380b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 130 */
{OP_pabsb, 0x381c18, "pabsb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[130][2]},
{INVALID, 0xf3381c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pabsb, 0x66381c18, "pabsb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2381c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381c18, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3381c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpabsb, 0x66381c18, "vpabsb", Vx, xx, Wx, xx, xx, mrm|vex, x, tpe[130][10]},
{INVALID, 0xf2381c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpabsb, 0x66381c18, "vpabsb", Ve, xx, KEq, We, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf2381c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 131 */
{OP_pabsw, 0x381d18, "pabsw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[131][2]},
{INVALID, 0xf3381d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pabsw, 0x66381d18, "pabsw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2381d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381d18, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3381d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpabsw, 0x66381d18, "vpabsw", Vx, xx, Wx, xx, xx, mrm|vex, x, tpe[131][10]},
{INVALID, 0xf2381d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpabsw, 0x66381d18, "vpabsw", Ve, xx, KEd, We, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf2381d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 132 */
{OP_pabsd, 0x381e18, "pabsd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[132][2]},
{INVALID, 0xf3381e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pabsd, 0x66381e18, "pabsd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2381e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381e18, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3381e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpabsd, 0x66381e18, "vpabsd", Vx, xx, Wx, xx, xx, mrm|vex, x, tevexw[145][0]},
{INVALID, 0xf2381e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x66381e18, "(evex_W ext 145)", xx, xx, xx, xx, xx, mrm|evex, x, 145},
{INVALID, 0xf2381e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 133: all assumed to have Ib */
{OP_palignr, 0x3a0f18, "palignr", Pq, xx, Qq, Ib, Pq, mrm, x, tpe[133][2]},
{INVALID, 0xf33a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_palignr, 0x663a0f18, "palignr", Vdq, xx, Wdq, Ib, Vdq, mrm, x, END_LIST},
{INVALID, 0xf23a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x3a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf33a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpalignr, 0x663a0f18, "vpalignr", Vx, xx, Hx, Wx, Ib, mrm|vex, x, tpe[133][10]},
{INVALID, 0xf23a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x3a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf33a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpalignr, 0x663a0f18, "vpalignr", Ve, xx, KEq, Ib, He, xop|mrm|evex, x, exop[100]},
{INVALID, 0xf23a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 134 */
{OP_vmread, 0x0f7810, "vmread", Ed_q, xx, Gd_q, xx, xx, mrm|o64, x, END_LIST},
{INVALID, 0xf30f7810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* FIXME PR 338279: this is listed as /0 but I'm not going to chain it into
* the reg extensions table until I can verify, since gdb thinks it
* does NOT need /0. Waiting for a processor that actually supports it.
* It's ok for DR proper to think a non-cti instr is valid when really it's not,
* though for our decoding library use we should get it right.
*/
{OP_extrq, 0x660f7810, "extrq", Udq, xx, Ib, Ib, xx, mrm, x, tpe[135][2]},
/* FIXME: is src or dst Udq? */
{OP_insertq, 0xf20f7810, "insertq", Vdq, xx, Udq, Ib, Ib, mrm, x, tpe[135][3]},
{INVALID, 0x0f7810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x0f7810, "(evex_W ext 49)", xx, xx, xx, xx, xx, mrm|evex, x, 49},
{EVEX_W_EXT, 0xf30f7810, "(evex_W ext 54)", xx, xx, xx, xx, xx, mrm|evex, x, 54},
{EVEX_W_EXT, 0x660f7810, "(evex_W ext 51)", xx, xx, xx, xx, xx, mrm|evex, x, 51},
{EVEX_W_EXT, 0xf20f7810, "(evex_W ext 55)", xx, xx, xx, xx, xx, mrm|evex, x, 55},
}, { /* prefix extension 135 */
{OP_vmwrite, 0x0f7910, "vmwrite", Gd_q, xx, Ed_q, xx, xx, mrm|o64, x, END_LIST},
{INVALID, 0xf30f7910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* FIXME: is src or dst Udq? */
{OP_extrq, 0x660f7910, "extrq", Vdq, xx, Udq, xx, xx, mrm, x, END_LIST},
{OP_insertq, 0xf20f7910, "insertq", Vdq, xx, Udq, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0f7910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x0f7910, "(evex_W ext 47)", xx, xx, xx, xx, xx, mrm|evex, x, 47},
{EVEX_W_EXT, 0xf30f7910, "(evex_W ext 52)", xx, xx, xx, xx, xx, mrm|evex, x, 52},
{EVEX_W_EXT, 0x660f7910, "(evex_W ext 48)", xx, xx, xx, xx, xx, mrm|evex, x, 48},
{EVEX_W_EXT, 0xf20f7910, "(evex_W ext 53)", xx, xx, xx, xx, xx, mrm|evex, x, 53},
}, { /* prefix extension 136 */
{OP_bsr, 0x0fbd10, "bsr", Gv, xx, Ev, xx, xx, mrm|predcx, fW6, END_LIST},
/* XXX: if cpuid doesn't show lzcnt support, this is treated as bsr */
{OP_lzcnt, 0xf30fbd10, "lzcnt", Gv, xx, Ev, xx, xx, mrm, fW6, END_LIST},
/* This is bsr w/ DATA_PREFIX, which we indicate by omitting 0x66 (i#1118).
* It's not in the encoding chain. Ditto for 0xf2. If we keep the "all
* prefix ext marked invalid are really treated valid" we don't need these,
* but better to be explicit where we have to so we can easily remove that.
*/
{OP_bsr, 0x0fbd10, "bsr", Gv, xx, Ev, xx, xx, mrm|predcx, fW6, NA},
{OP_bsr, 0x0fbd10, "bsr", Gv, xx, Ev, xx, xx, mrm|predcx, fW6, NA},
{INVALID, 0x0fbd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fbd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fbd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fbd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fbd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fbd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fbd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fbd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 137 */
{OP_vmptrld, 0x0fc736, "vmptrld", xx, xx, Mq, xx, xx, mrm|o64, x, END_LIST},
{OP_vmxon, 0xf30fc736, "vmxon", xx, xx, Mq, xx, xx, mrm|o64, x, END_LIST},
{OP_vmclear, 0x660fc736, "vmclear", Mq, xx, xx, xx, xx, mrm|o64, x, END_LIST},
{INVALID, 0xf20fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 138 */
{OP_movbe, 0x38f018, "movbe", Gv, xx, Mv, xx, xx, mrm, x, tpe[139][0]},
{INVALID, 0xf338f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* really this is regular data-size prefix */
{OP_movbe, 0x6638f018, "movbe", Gw, xx, Mw, xx, xx, mrm, x, tpe[139][2]},
{OP_crc32, 0xf238f018, "crc32", Gv, xx, Eb, Gv, xx, mrm, x, END_LIST},
{INVALID, 0x38f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x38f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 139 */
{OP_movbe, 0x38f118, "movbe", Mv, xx, Gv, xx, xx, mrm, x, tpe[138][2]},
{INVALID, 0xf338f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* really this is regular data-size prefix */
{OP_movbe, 0x6638f118, "movbe", Mw, xx, Gw, xx, xx, mrm, x, END_LIST},
{OP_crc32, 0xf238f118, "crc32", Gv, xx, Ev, Gv, xx, mrm, x, tpe[138][3]},
{INVALID, 0x38f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x38f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX: Intel Vol2B Sep2010 decode table claims crc32 has Gd
* instead of Gv, and that f2 f1 has Ey instead of Ev, and that
* there is a separate instruction with both 66 and f2 prefixes!
* But detail page doesn't corroborate that...
*/
}, { /* prefix extension 140 */
{OP_bsf, 0x0fbc10, "bsf", Gv, xx, Ev, xx, xx, mrm|predcx, fW6, END_LIST},
/* XXX: if cpuid doesn't show tzcnt support, this is treated as bsf */
{OP_tzcnt, 0xf30fbc10, "tzcnt", Gv, xx, Ev, xx, xx, mrm, fW6, END_LIST},
/* see OP_bsr comments above -- this is the same but for bsf: */
{OP_bsf, 0x0fbc10, "bsf", Gv, xx, Ev, xx, xx, mrm|predcx, fW6, NA},
{OP_bsf, 0x0fbc10, "bsf", Gv, xx, Ev, xx, xx, mrm|predcx, fW6, NA},
{INVALID, 0x0fbc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fbc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fbc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fbc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fbc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fbc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fbc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fbc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 141 */
{INVALID, 0x38f718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_bextr, 0x38f718, "bextr", Gy, xx, Ey, By, xx, mrm|vex, fW6, txop[60]},
{OP_sarx, 0xf338f718, "sarx", Gy, xx, Ey, By, xx, mrm|vex, x, END_LIST},
{OP_shlx, 0x6638f718, "shlx", Gy, xx, Ey, By, xx, mrm|vex, x, END_LIST},
{OP_shrx, 0xf238f718, "shrx", Gy, xx, Ey, By, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x38f718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 142 */
{INVALID, 0x38f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_bzhi, 0x38f518, "bzhi", Gy, xx, Ey, By, xx, mrm|vex, fW6, END_LIST},
{OP_pext, 0xf338f518, "pext", Gy, xx, Ey, By, xx, mrm|vex, x, END_LIST},
{INVALID, 0x6638f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pdep, 0xf238f518, "pdep", Gy, xx, Ey, By, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x38f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 143 */
{INVALID, 0x38f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_adox, 0xf338f618, "adox", Gy, xx, Ey, Gy, xx, mrm, (fWO|fRO), END_LIST},
{OP_adcx, 0x6638f618, "adcx", Gy, xx, Ey, Gy, xx, mrm, (fWC|fRC), END_LIST},
{INVALID, 0xf238f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x38f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_mulx, 0xf238f618, "mulx", By, Gy, Ey, uDX, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x38f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 144 */
{INVALID, 0x0f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f9010, "(vex_W ext 74)", xx, xx, xx, xx, xx, mrm|vex, x, 74},
{INVALID, 0xf30f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f9010, "(vex_W ext 75)", xx, xx, xx, xx, xx, mrm|vex, x, 75},
{INVALID, 0xf20f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 145 */
{INVALID, 0x0f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f9110, "(vex_W ext 76)", xx, xx, xx, xx, xx, mrm|vex, x, 76},
{INVALID, 0xf30f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f9110, "(vex_W ext 77)", xx, xx, xx, xx, xx, mrm|vex, x, 77},
{INVALID, 0xf20f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 146 */
{INVALID, 0x0f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f9210, "(vex_W ext 78)", xx, xx, xx, xx, xx, mrm|vex, x, 78},
{INVALID, 0xf30f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f9210, "(vex_W ext 79)", xx, xx, xx, xx, xx, mrm|vex, x, 79},
{VEX_W_EXT, 0xf20f9210, "(vex_W ext 106)",xx, xx, xx, xx, xx, mrm|vex, x, 106},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 147 */
{INVALID, 0x0f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f9310, "(vex_W ext 80)", xx, xx, xx, xx, xx, mrm|vex, x, 80},
{INVALID, 0xf30f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f9310, "(vex_W ext 81)", xx, xx, xx, xx, xx, mrm|vex, x, 81},
{VEX_W_EXT, 0xf20f9310, "(vex_W ext 107)",xx, xx, xx, xx, xx, mrm|vex, x, 107},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 148 */
{INVALID, 0x0f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f4110, "(vex_W ext 82)", xx, xx, xx, xx, xx, mrm|vex, x, 82},
{INVALID, 0xf30f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f4110, "(vex_W ext 83)", xx, xx, xx, xx, xx, mrm|vex, x, 83},
{INVALID, 0xf20f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 149 */
{INVALID, 0x0f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f4210, "(vex_W ext 84)", xx, xx, xx, xx, xx, mrm|vex, x, 84},
{INVALID, 0xf30f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f4210, "(vex_W ext 85)", xx, xx, xx, xx, xx, mrm|vex, x, 85},
{INVALID, 0xf20f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 150 */
{INVALID, 0x0f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f4b10, "(vex_W ext 86)", xx, xx, xx, xx, xx, mrm|vex, x, 86},
{INVALID, 0xf30f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f4b10, "(vex_W ext 87)", xx, xx, xx, xx, xx, mrm|vex, x, 87},
{INVALID, 0xf20f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 151 */
{INVALID, 0x0f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f4410, "(vex_W ext 88)", xx, xx, xx, xx, xx, mrm|vex, x, 88},
{INVALID, 0xf30f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f4410, "(vex_W ext 89)", xx, xx, xx, xx, xx, mrm|vex, x, 89},
{INVALID, 0xf20f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 152 */
{INVALID, 0x0f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f4510, "(vex_W ext 90)", xx, xx, xx, xx, xx, mrm|vex, x, 90},
{INVALID, 0xf30f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f4510, "(vex_W ext 91)", xx, xx, xx, xx, xx, mrm|vex, x, 91},
{INVALID, 0xf20f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 153 */
{INVALID, 0x0f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f4610, "(vex_W ext 92)", xx, xx, xx, xx, xx, mrm|vex, x, 92},
{INVALID, 0xf30f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f4610, "(vex_W ext 93)", xx, xx, xx, xx, xx, mrm|vex, x, 93},
{INVALID, 0xf20f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 154 */
{INVALID, 0x0f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f4710, "(vex_W ext 94)", xx, xx, xx, xx, xx, mrm|vex, x, 94},
{INVALID, 0xf30f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f4710, "(vex_W ext 95)", xx, xx, xx, xx, xx, mrm|vex, x, 95},
{INVALID, 0xf20f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 155 */
{INVALID, 0x0f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f4a10, "(vex_W ext 96)", xx, xx, xx, xx, xx, mrm|vex, x, 96},
{INVALID, 0xf30f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f4a10, "(vex_W ext 97)", xx, xx, xx, xx, xx, mrm|vex, x, 97},
{INVALID, 0xf20f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 156 */
{INVALID, 0x0f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f9810, "(vex_W ext 98)", xx, xx, xx, xx, xx, mrm|vex, x, 98},
{INVALID, 0xf30f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f9810, "(vex_W ext 99)", xx, xx, xx, xx, xx, mrm|vex, x, 99},
{INVALID, 0xf20f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 157 */
{INVALID, 0x0f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f9910, "(vex_W ext 104)", xx, xx, xx, xx, xx, mrm|vex, x, 104},
{INVALID, 0xf30f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f9910, "(vex_W ext 105)", xx, xx, xx, xx, xx, mrm|vex, x, 105},
{INVALID, 0xf20f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 158 */
{INVALID, 0x0f7b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf30f7b10, "(evex_W ext 58)", xx, xx, xx, xx, xx, mrm|evex, x, 58},
{EVEX_W_EXT, 0x660f7b10, "(evex_W ext 46)", xx, xx, xx, xx, xx, mrm|evex, x, 46},
{EVEX_W_EXT, 0xf20f7b10, "(evex_W ext 59)", xx, xx, xx, xx, xx, mrm|evex, x, 59},
}, { /* prefix extension 159 */
{INVALID, 0x0f7a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf30f7a10, "(evex_W ext 61)", xx, xx, xx, xx, xx, mrm|evex, x, 61},
{EVEX_W_EXT, 0x660f7a10, "(evex_W ext 50)", xx, xx, xx, xx, xx, mrm|evex, x, 50},
{EVEX_W_EXT, 0xf20f7a10, "(evex_W ext 60)", xx, xx, xx, xx, xx, mrm|evex, x, 60},
}, { /* prefix extension 160 */
{INVALID, 0x383218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmovqb, 0xf3383218, "vpmovqb", Wj_e, xx, KEb, Ve, xx, mrm|evex, x, END_LIST},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vpmovzxbq, 0x66383218, "vpmovzxbq", Ve, xx, KEb, Wj_e, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf2383218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 161 */
{INVALID, 0x382218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmovsqb, 0xf3382218, "vpmovsqb", Wj_e, xx, KEb, Ve, xx, mrm|evex, x, END_LIST},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vpmovsxbq, 0x66382218, "vpmovsxbq", Ve, xx, KEb, Wj_e, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf2382218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 162 */
{INVALID, 0x381218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66381218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2381218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66381218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2381218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmovusqb, 0xf3381218, "vpmovusqb", Wj_e, xx, KEb, Ve, xx, mrm|evex, x, END_LIST},
{EVEX_W_EXT, 0x66381218, "(evex_W ext 129)", xx, xx, xx, xx, xx, mrm, x, 129},
{INVALID, 0xf2381218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 163 */
{INVALID, 0x383418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmovqw, 0xf3383418, "vpmovqw", Wi_e, xx, KEb, Ve, xx, mrm|evex, x, END_LIST},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vpmovzxwq, 0x66383418, "vpmovzxwq", Ve, xx, KEb, Wi_e, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf2383418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 164 */
{INVALID, 0x382418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmovsqw, 0xf3382418, "vpmovsqw", Wi_e, xx, KEb, Ve, xx, mrm|evex, x, END_LIST},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vpmovsxwq, 0x66382418, "vpmovsxwq", Ve, xx, KEb, Wi_e, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf2382418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 165 */
{INVALID, 0x381418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66381418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2381418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66381418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2381418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmovusqw, 0xf3381418, "vpmovusqw", Wi_e, xx, KEb, Ve, xx, mrm|evex, x, END_LIST},
{EVEX_W_EXT, 0x66381418, "(evex_W ext 118)", xx, xx, xx, xx, xx, mrm|evex, x, 118},
{INVALID, 0xf2381418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 166 */
{INVALID, 0x383518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vpmovqd, 0xf3383518, "vpmovqd", Wh_e, xx, KEb, Ve, xx, mrm|evex, x, END_LIST},
{OP_vpmovzxdq, 0x66383518, "vpmovzxdq", Ve, xx, KEb, Wh_e, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf2383518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 167 */
{INVALID, 0x382518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vpmovsqd, 0xf3382518, "vpmovsqd", Wh_e, xx, KEb, Ve, xx, mrm|evex, x, END_LIST},
{OP_vpmovsxdq, 0x66382518, "vpmovsxdq", Ve, xx, KEb, Wh_e, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf2382518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 168 */
{INVALID, 0x381518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66381518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2381518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66381518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2381518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vpmovusqd, 0xf3381518, "vpmovusqd", Wh_e, xx, KEb, Ve, xx, mrm|evex, x, END_LIST},
{EVEX_W_EXT, 0x66381518, "(evex_W ext 116)", xx, xx, xx, xx, xx, mrm|evex, x, 116},
{INVALID, 0xf2381518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 169 */
{INVALID, 0x383118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vpmovdb, 0xf3383118, "vpmovdb", Wi_e, xx, KEw, Ve, xx, mrm|evex, x, END_LIST},
{OP_vpmovzxbd, 0x66383118, "vpmovzxbd", Ve, xx, KEw, Wi_e, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf2383118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 170 */
{INVALID, 0x382118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vpmovsdb, 0xf3382118, "vpmovsdb", Wi_e, xx, KEw, Ve, xx, mrm|evex, x, END_LIST},
{OP_vpmovsxbd, 0x66382118, "vpmovsxbd", Ve, xx, KEw, Wi_e, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf2382118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 171 */
{INVALID, 0x381118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66381118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2381118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66381118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2381118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vpmovusdb, 0xf3381118, "vpmovusdb", Wi_e, xx, KEw, Ve, xx, mrm|evex, x, END_LIST},
{EVEX_W_EXT, 0x66381118, "(evex_W ext 126)", xx, xx, xx, xx, xx, mrm, x, 126},/*127*/
{INVALID, 0xf2381118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 172 */
{INVALID, 0x383318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vpmovdw, 0xf3383318, "vpmovdw", Wh_e, xx, KEw, Ve, xx, mrm|evex, x, END_LIST},
{OP_vpmovzxwd, 0x66383318, "vpmovzxwd", Ve, xx, KEw, Wh_e, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf2383318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 173 */
{INVALID, 0x382318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vpmovsdw, 0xf3382318, "vpmovsdw", Wh_e, xx, KEw, Ve, xx, mrm|evex, x, END_LIST},
{OP_vpmovsxwd, 0x66382318, "vpmovsxwd", Ve, xx, KEw, Wh_e, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf2382318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 174 */
{INVALID, 0x381318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66381318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2381318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66381318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2381318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vpmovusdw, 0xf3381318, "vpmovusdw", Wh_e, xx, KEw, Ve, xx, mrm|evex, x, END_LIST},
{OP_vcvtph2ps, 0x66381318, "vcvtph2ps", Ve, xx, KEw, We, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf2381318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 175 */
{INVALID, 0x383018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vpmovwb, 0xf3383018, "vpmovwb", Wh_e, xx, KEd, Ve, xx, mrm|evex, x, END_LIST},
{OP_vpmovzxbw, 0x66383018, "vpmovzxbw", Ve, xx, KEd, Wh_e, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf2383018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 176 */
{INVALID, 0x382018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vpmovswb, 0xf3382018, "vpmovswb", Wh_e, xx, KEd, Ve, xx, mrm|evex, x, END_LIST},
{OP_vpmovsxbw, 0x66382018, "vpmovsxbw", Ve, xx, KEd, Wh_e, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf2382018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 177 */
{INVALID, 0x381018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66381018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2381018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66381018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2381018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vpmovuswb, 0xf3381018, "vpmovuswb", Wh_e, xx, KEd, Ve, xx, mrm|evex, x, END_LIST},
{OP_vpsrlvw, 0x66381058, "vpsrlvw", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf2381018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 178 */
{INVALID, 0x382818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf3382818, "(evex_W ext 137)", xx, xx, xx, xx, xx, mrm, x, 137},
{OP_vpmuldq, 0x66382858, "vpmuldq", Ve, xx, KEb, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf2382818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 179 */
{INVALID, 0x383818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf3383818, "(evex_W ext 138)", xx, xx, xx, xx, xx, mrm, x, 138},
{OP_vpminsb, 0x66383818, "vpminsb", Ve, xx, KEq, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf2383818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 180 */
{INVALID, 0x382918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf3382918, "(evex_W ext 139)", xx, xx, xx, xx, xx, mrm, x, 139},
{OP_vpcmpeqq, 0x66382958, "vpcmpeqq", KPb, xx, KEb, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf2382918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 181 */
{INVALID, 0x383918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf3383918, "(evex_W ext 140)", xx, xx, xx, xx, xx, mrm, x, 140},
{EVEX_W_EXT, 0x66383918, "(evex_W ext 112)", xx, xx, xx, xx, xx, mrm|evex, x, 112},
{INVALID, 0xf2383918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 182 */
{INVALID, 0x382618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf3382618, "(evex_W ext 170)", xx, xx, xx, xx, xx, mrm, x, 170},
{EVEX_W_EXT, 0x66382618, "(evex_W ext 168)", xx, xx, xx, xx, xx, mrm, x, 168},
{INVALID, 0xf2382618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 183 */
{INVALID, 0x382718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf3382718, "(evex_W ext 171)", xx, xx, xx, xx, xx, mrm, x, 171},
{EVEX_W_EXT, 0x66382718, "(evex_W ext 169)", xx, xx, xx, xx, xx, mrm, x, 169},
{INVALID, 0xf2382718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 184 */
{INVALID, 0x382a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3382a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66382a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2382a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x382a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpbroadcastmb2q, 0xf3382a58, "vpbroadcastmb2q", Ve, xx, KQb, xx, xx, mrm|evex, x, NA},
{OP_vmovntdqa, 0x66382a18, "vmovntdqa", Me, xx, Ve, xx, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf2382a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* prefix extension 185 */
{INVALID, 0x383a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3383a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66383a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2383a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x383a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpbroadcastmw2d, 0xf3383a18, "vpbroadcastmw2d", Ve, xx, KQw, xx, xx, mrm|evex, x, NA},
{OP_vpminuw, 0x66383a18, "vpminuw", Ve, xx, KEd, He, We, mrm|evex, x, END_LIST},
{INVALID, 0xf2383a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
};
/****************************************************************************
* Instructions that differ based on whether vex-encoded or not.
* Most of these require an 0x66 prefix but we use reqp for that
* so there's nothing inherent here about prefixes.
* TODO i#1312: A third row has been added for AVX-512 w/ EVEX prefix. Most or all
* EVEX instructions seem to resemble their corresponding VEX version. If we add
* a decode_table entry here, we currently can't test them throgh instr_create macros,
* unless we force the creation of EVEX versions.
*/
const instr_info_t e_vex_extensions[][3] = {
{ /* e_vex ext 0 */
{INVALID, 0x663a4a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vblendvps, 0x663a4a18, "vblendvps", Vx, xx, Hx,Wx,Lx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a4a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 1 */
{INVALID, 0x663a4b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vblendvpd, 0x663a4b18, "vblendvpd", Vx, xx, Hx,Wx,Lx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a4b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 2 */
{INVALID, 0x663a4c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpblendvb, 0x663a4c18, "vpblendvb", Vx, xx, Hx,Wx,Lx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a4c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 3 */
{OP_ptest, 0x66381718, "ptest", xx, xx, Vdq,Wdq, xx, mrm|reqp, fW6, END_LIST},
{OP_vptest, 0x66381718, "vptest", xx, xx, Vx,Wx, xx, mrm|vex|reqp, fW6, END_LIST},
{INVALID, 0x66381718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 4 */
{OP_pmovsxbw, 0x66382018, "pmovsxbw", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovsxbw, 0x66382018, "vpmovsxbw", Vx, xx, Wh_x, xx, xx, mrm|vex|reqp, x, tpe[176][10]},
{PREFIX_EXT, 0x382018, "(prefix ext 176)", xx, xx, xx, xx, xx, mrm|evex, x, 176},
}, { /* e_vex ext 5 */
{OP_pmovsxbd, 0x66382118, "pmovsxbd", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovsxbd, 0x66382118, "vpmovsxbd", Vx, xx, Wi_x, xx, xx, mrm|vex|reqp, x, tpe[170][10]},
{PREFIX_EXT, 0x382118, "(prefix ext 170)", xx, xx, xx, xx, xx, mrm|evex, x, 170},
}, { /* e_vex ext 6 */
/* XXX i#1312: the SSE and VEX table entries could get moved to prefix_extensions and
* this table here re-numbered.
*/
{OP_pmovsxbq, 0x66382218, "pmovsxbq", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovsxbq, 0x66382218, "vpmovsxbq", Vx, xx, Wj_x, xx, xx, mrm|vex|reqp, x, tpe[161][10]},
{PREFIX_EXT, 0x382218, "(prefix ext 161)", xx, xx, xx, xx, xx, mrm|evex, x, 161},
}, { /* e_vex ext 7 */
{OP_pmovsxwd, 0x66382318, "pmovsxwd", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovsxwd, 0x66382318, "vpmovsxwd", Vx, xx, Wh_x, xx, xx, mrm|vex|reqp, x, tpe[173][10]},
{PREFIX_EXT, 0x382318, "(prefix ext 173)", xx, xx, xx, xx, xx, mrm|evex, x, 173},
}, { /* e_vex ext 8 */
{OP_pmovsxwq, 0x66382418, "pmovsxwq", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovsxwq, 0x66382418, "vpmovsxwq", Vx, xx, Wi_x, xx, xx, mrm|vex|reqp, x, tpe[164][10]},
{PREFIX_EXT, 0x382418, "(prefix ext 164)", xx, xx, xx, xx, xx, mrm|evex, x, 164},
}, { /* e_vex ext 9 */
{OP_pmovsxdq, 0x66382518, "pmovsxdq", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovsxdq, 0x66382518, "vpmovsxdq", Vx, xx, Wh_x, xx, xx, mrm|vex|reqp, x, tpe[167][10]},
{PREFIX_EXT, 0x382518, "(prefix ext 167)", xx, xx, xx, xx, xx, mrm|evex, x, 167},
}, { /* e_vex ext 10 */
{OP_pmuldq, 0x66382818, "pmuldq", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpmuldq, 0x66382818, "vpmuldq", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, tpe[178][10]},
{PREFIX_EXT, 0x382818, "(prefix ext 178)", xx, xx, xx, xx, xx, mrm|evex, x, 178},
}, { /* e_vex ext 11 */
{OP_pcmpeqq, 0x66382918, "pcmpeqq", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpcmpeqq, 0x66382918, "vpcmpeqq", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, tpe[180][10]},
{PREFIX_EXT, 0x382918, "(prefix ext 180)", xx, xx, xx, xx, xx, mrm|evex, x, 180},
}, { /* e_vex ext 12 */
{OP_movntdqa, 0x66382a18, "movntdqa", Mdq, xx, Vdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vmovntdqa, 0x66382a18, "vmovntdqa", Mx, xx, Vx, xx, xx, mrm|vex|reqp, x, tpe[184][10]},
{PREFIX_EXT, 0x382a18, "(prefix ext 184)", xx, xx, xx, xx, xx, mrm|evex, x, 184},
}, { /* e_vex ext 13 */
{OP_packusdw, 0x66382b18, "packusdw", Vdq, xx, Wdq, Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpackusdw, 0x66382b18, "vpackusdw", Vx, xx, Hx, Wx, xx, mrm|vex|reqp, x, tvex[13][2]},
{OP_vpackusdw, 0x66382b18, "vpackusdw", Ve, xx, KEd, He, We, mrm|evex|reqp, x, END_LIST},
}, { /* e_vex ext 14 */
{OP_pmovzxbw, 0x66383018, "pmovzxbw", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovzxbw, 0x66383018, "vpmovzxbw", Vx, xx, Wh_x, xx, xx, mrm|vex|reqp, x, tpe[175][10]},
{PREFIX_EXT, 0x383018, "(prefix ext 175)", xx, xx, xx, xx, xx, mrm|evex, x, 175},
}, { /* e_vex ext 15 */
{OP_pmovzxbd, 0x66383118, "pmovzxbd", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovzxbd, 0x66383118, "vpmovzxbd", Vx, xx, Wi_x, xx, xx, mrm|vex|reqp, x, tpe[169][10]},
{PREFIX_EXT, 0x383118, "(prefix ext 169)", xx, xx, xx, xx, xx, mrm|evex, x, 169},
}, { /* e_vex ext 16 */
/* XXX i#1312: the SSE and VEX table entries could get moved to prefix_extensions and
* this table here re-numbered.
*/
{OP_pmovzxbq, 0x66383218, "pmovzxbq", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovzxbq, 0x66383218, "vpmovzxbq", Vx, xx, Wj_x, xx, xx, mrm|vex|reqp, x, tpe[160][10]},
{PREFIX_EXT, 0x383218, "(prefix ext 160)", xx, xx, xx, xx, xx, mrm|evex, x, 160},
}, { /* e_vex ext 17 */
{OP_pmovzxwd, 0x66383318, "pmovzxwd", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovzxwd, 0x66383318, "vpmovzxwd", Vx, xx, Wh_x, xx, xx, mrm|vex|reqp, x, tpe[172][10]},
{PREFIX_EXT, 0x383318, "(prefix ext 172)", xx, xx, xx, xx, xx, mrm|evex, x, 172},
}, { /* e_vex ext 18 */
{OP_pmovzxwq, 0x66383418, "pmovzxwq", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovzxwq, 0x66383418, "vpmovzxwq", Vx, xx, Wi_x, xx, xx, mrm|vex|reqp, x, tpe[163][10]},
{PREFIX_EXT, 0x383418, "(prefix ext 163)", xx, xx, xx, xx, xx, mrm|evex, x, 163},
}, { /* e_vex ext 19 */
{OP_pmovzxdq, 0x66383518, "pmovzxdq", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovzxdq, 0x66383518, "vpmovzxdq", Vx, xx, Wh_x, xx, xx, mrm|vex|reqp, x, tpe[166][10]},
{PREFIX_EXT, 0x383518, "(prefix ext 166)", xx, xx, xx, xx, xx, mrm|evex, x, 166},
}, { /* e_vex ext 20 */
{OP_pcmpgtq, 0x66383718, "pcmpgtq", Vdq, xx, Wdq, Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpcmpgtq, 0x66383718, "vpcmpgtq", Vx, xx, Hx, Wx, xx, mrm|vex|reqp, x, tvex[20][2]},
{OP_vpcmpgtq, 0x66383758, "vpcmpgtq", KPb, xx, KEb, He, We, mrm|evex|reqp, x, END_LIST},
}, { /* e_vex ext 21 */
{OP_pminsb, 0x66383818, "pminsb", Vdq, xx, Wdq, Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpminsb, 0x66383818, "vpminsb", Vx, xx, Hx, Wx, xx, mrm|vex|reqp, x, tpe[179][10]},
{PREFIX_EXT, 0x383818, "(prefix ext 179)", xx, xx, xx, xx, xx, mrm|evex, x, 179},
}, { /* e_vex ext 22 */
{OP_pminsd, 0x66383918, "pminsd", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpminsd, 0x66383918, "vpminsd", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, tevexw[112][0]},
{PREFIX_EXT, 0x383918, "(prefix ext 181)", xx, xx, xx, xx, xx, mrm|evex, x, 181},
}, { /* e_vex ext 23 */
{OP_pminuw, 0x66383a18, "pminuw", Vdq, xx, Wdq, Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpminuw, 0x66383a18, "vpminuw", Vx, xx, Hx, Wx, xx, mrm|vex|reqp, x, tpe[185][10]},
{PREFIX_EXT, 0x383a18, "(prefix ext 185)", xx, xx, xx, xx, xx, mrm|evex, x, 185},
}, { /* e_vex ext 24 */
{OP_pminud, 0x66383b18, "pminud", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpminud, 0x66383b18, "vpminud", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, tevexw[114][0]},
{EVEX_W_EXT, 0x66383b18, "(evex_W ext 114)", xx, xx, xx, xx, xx, mrm|evex, x, 114},
}, { /* e_vex ext 25 */
{OP_pmaxsb, 0x66383c18, "pmaxsb", Vdq, xx, Wdq, Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpmaxsb, 0x66383c18, "vpmaxsb", Vx, xx, Hx, Wx, xx, mrm|vex|reqp, x, tvex[25][2]},
{OP_vpmaxsb, 0x66383c18, "vpmaxsb", Ve, xx, KEq, He, We, mrm|evex|reqp, x, END_LIST},
}, { /* e_vex ext 26 */
{OP_pmaxsd, 0x66383d18, "pmaxsd", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpmaxsd, 0x66383d18, "vpmaxsd", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, tevexw[113][0]},
{EVEX_W_EXT, 0x66383d18, "(evex_W ext 113)", xx, xx, xx, xx, xx, mrm|evex, x, 113},
}, { /* e_vex ext 27 */
{OP_pmaxuw, 0x66383e18, "pmaxuw", Vdq, xx, Wdq, Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpmaxuw, 0x66383e18, "vpmaxuw", Vx, xx, Hx, Wx, xx, mrm|vex|reqp, x, tvex[27][2]},
{OP_vpmaxuw, 0x66383e18, "vpmaxuw", Ve, xx, KEd, He, We, mrm|evex|reqp, x, END_LIST},
}, { /* e_vex ext 28 */
{OP_pmaxud, 0x66383f18, "pmaxud", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpmaxud, 0x66383f18, "vpmaxud", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, tevexw[115][0]},
{EVEX_W_EXT, 0x66383f18, "(evex_W ext 115)", xx, xx, xx, xx, xx, mrm|evex, x, 115},
}, { /* e_vex ext 29 */
{OP_pmulld, 0x66384018, "pmulld", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpmulld, 0x66384018, "vpmulld", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, tevexw[45][0]},
{EVEX_W_EXT, 0x66384018, "(evex_W ext 45)", xx, xx, xx, xx, xx, mrm|evex, x, 45},
}, { /* e_vex ext 30 */
{OP_phminposuw, 0x66384118,"phminposuw",Vdq,xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vphminposuw, 0x66384118,"vphminposuw",Vdq,xx, Wdq, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66384118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 31 */
{OP_aesimc, 0x6638db18, "aesimc", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vaesimc, 0x6638db18, "vaesimc", Vdq, xx, Wdq, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x6638db18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 32 */
{OP_aesenc, 0x6638dc18, "aesenc", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vaesenc, 0x6638dc18, "vaesenc", Vdq, xx, Hdq,Wdq, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x6638dc18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 33 */
{OP_aesenclast, 0x6638dd18,"aesenclast",Vdq,xx,Wdq,Vdq,xx, mrm|reqp, x, END_LIST},
{OP_vaesenclast, 0x6638dd18,"vaesenclast",Vdq,xx,Hdq,Wdq,xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x6638dd18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 34 */
{OP_aesdec, 0x6638de18, "aesdec", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vaesdec, 0x6638de18, "vaesdec", Vdq, xx, Hdq,Wdq, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x6638de18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 35 */
{OP_aesdeclast, 0x6638df18,"aesdeclast",Vdq,xx,Wdq,Vdq,xx, mrm|reqp, x, END_LIST},
{OP_vaesdeclast, 0x6638df18,"vaesdeclast",Vdq,xx,Hdq,Wdq,xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x6638df18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 36 */
{OP_pextrb, 0x663a1418, "pextrb", Rd_Mb, xx, Vb_dq, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vpextrb, 0x663a1418, "vpextrb", Rd_Mb, xx, Vb_dq, Ib, xx, mrm|vex|reqp, x, tvex[36][2]},
{OP_vpextrb, 0x663a1418, "vpextrb", Rd_Mb, xx, Vb_dq, Ib, xx, mrm|evex|reqp, x, END_LIST},
}, { /* e_vex ext 37 */
{OP_pextrw, 0x663a1518, "pextrw", Rd_Mw, xx, Vw_dq, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vpextrw, 0x663a1518, "vpextrw", Rd_Mw, xx, Vw_dq, Ib, xx, mrm|vex|reqp, x, tpe[54][10]},
{OP_vpextrw, 0x663a1518, "vpextrw", Rd_Mw, xx, Vw_dq, Ib, xx, mrm|evex|reqp, x, END_LIST},
}, { /* e_vex ext 38 */
{OP_pextrd, 0x663a1618, "pextrd", Ed_q, xx, Vd_q_dq, Ib, xx, mrm|reqp, x, END_LIST},/*"pextrq" with rex.w*/
{OP_vpextrd, 0x663a1618, "vpextrd", Ed_q, xx, Vd_q_dq, Ib, xx, mrm|vex|reqp, x, tevexw[144][0]},/*"vpextrq" with rex.w*/
{EVEX_W_EXT, 0x663a1618, "(evex_W ext 144)", xx, xx, xx, xx, xx, mrm|evex, x, 144},
}, { /* e_vex ext 39 */
{OP_extractps, 0x663a1718, "extractps", Ed, xx, Vd_dq, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vextractps, 0x663a1718, "vextractps", Ed, xx, Ib, Vd_dq, xx, mrm|vex|reqp, x, tvex[39][2]},
{OP_vextractps, 0x663a1718, "vextractps", Ed, xx, Ib, Vd_dq, xx, mrm|evex|reqp, x, END_LIST},
}, { /* e_vex ext 40 */
{OP_roundps, 0x663a0818, "roundps", Vdq, xx, Wdq, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vroundps, 0x663a0818, "vroundps", Vx, xx, Wx, Ib, xx, mrm|vex|reqp, x, END_LIST},
{OP_vrndscaleps, 0x663a0818, "vrndscaleps", Ve, xx, KEw, Ib, We, mrm|evex|reqp, x, END_LIST},
}, { /* e_vex ext 41 */
{OP_roundpd, 0x663a0918, "roundpd", Vdq, xx, Wdq, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vroundpd, 0x663a0918, "vroundpd", Vx, xx, Wx, Ib, xx, mrm|vex|reqp, x, END_LIST},
{OP_vrndscalepd, 0x663a0958, "vrndscalepd", Ve, xx, KEb, Ib, We, mrm|evex|reqp, x, END_LIST},
}, { /* e_vex ext 42 */
{OP_roundss, 0x663a0a18, "roundss", Vss, xx, Wss, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vroundss, 0x663a0a18, "vroundss", Vdq, xx, H12_dq, Wss, Ib, mrm|vex|reqp, x, END_LIST},
{OP_vrndscaless, 0x663a0a18, "vrndscaless", Vdq, xx, KE1b, Ib, H12_dq, xop|mrm|evex|reqp, x, exop[115]},
}, { /* e_vex ext 43 */
{OP_roundsd, 0x663a0b18, "roundsd", Vsd, xx, Wsd, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vroundsd, 0x663a0b18, "vroundsd", Vdq, xx, Hsd, Wsd, Ib, mrm|vex|reqp, x, END_LIST},
{OP_vrndscalesd, 0x663a0b58, "vrndscalesd", Vdq, xx, KE1b, Ib, Hsd, xop|mrm|evex|reqp, x, exop[116]},
}, { /* e_vex ext 44 */
{OP_blendps, 0x663a0c18, "blendps", Vdq, xx, Wdq, Ib, Vdq, mrm|reqp, x, END_LIST},
{OP_vblendps, 0x663a0c18, "vblendps", Vx, xx, Hx, Wx, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a0c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 45 */
{OP_blendpd, 0x663a0d18, "blendpd", Vdq, xx, Wdq, Ib, Vdq, mrm|reqp, x, END_LIST},
{OP_vblendpd, 0x663a0d18, "vblendpd", Vx, xx, Hx, Wx, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a0d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 46 */
{OP_pblendw, 0x663a0e18, "pblendw", Vdq, xx, Wdq, Ib, Vdq, mrm|reqp, x, END_LIST},
{OP_vpblendw, 0x663a0e18, "vpblendw", Vx, xx, Hx, Wx, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a0e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 47 */
/* FIXME i#1388: pinsrb actually reads only bottom byte of reg */
{OP_pinsrb, 0x663a2018, "pinsrb", Vb_dq, xx, Rd_Mb, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vpinsrb, 0x663a2018, "vpinsrb", Vdq, xx, H15_dq, Rd_Mb, Ib, mrm|vex|reqp, x, tvex[47][2]},
{OP_vpinsrb, 0x663a2018, "vpinsrb", Vdq, xx, H15_dq, Rd_Mb, Ib, mrm|evex|reqp, x, END_LIST},
}, { /* e_vex ext 48 */
{OP_insertps, 0x663a2118, "insertps", Vdq, xx, Udq_Md, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vinsertps,0x663a2118, "vinsertps", Vdq, xx, Ib, Hdq, Udq_Md, mrm|vex|reqp|reqL0, x, tvex[48][2]},
{OP_vinsertps,0x663a2118, "vinsertps", Vdq, xx, Ib, Hdq, Udq_Md, mrm|evex|reqp|reqL0, x, END_LIST},
}, { /* e_vex ext 49 */
{OP_pinsrd, 0x663a2218, "pinsrd", Vd_q_dq, xx, Ed_q,Ib, xx, mrm|reqp, x, END_LIST},/*"pinsrq" with rex.w*/
{OP_vpinsrd, 0x663a2218, "vpinsrd", Vdq, xx, H12_8_dq, Ed_q, Ib, mrm|vex|reqp, x, tevexw[143][0]},/*"vpinsrq" with rex.w*/
{EVEX_W_EXT, 0x663a2218, "(evex_W ext 143)", xx, xx, xx, xx, xx, mrm|evex, x, 143},
}, { /* e_vex ext 50 */
{OP_dpps, 0x663a4018, "dpps", Vdq, xx, Wdq, Ib, Vdq, mrm|reqp, x, END_LIST},
{OP_vdpps, 0x663a4018, "vdpps", Vx, xx, Hx, Wx, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a4018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 51 */
{OP_dppd, 0x663a4118, "dppd", Vdq, xx, Wdq, Ib, Vdq, mrm|reqp, x, END_LIST},
{OP_vdppd, 0x663a4118, "vdppd", Vdq, xx, Hdq, Wdq, Ib, mrm|vex|reqp|reqL0, x, END_LIST},
{INVALID, 0x663a4118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 52 */
{OP_mpsadbw, 0x663a4218, "mpsadbw", Vdq, xx, Wdq, Ib, Vdq, mrm|reqp, x, END_LIST},
{OP_vmpsadbw, 0x663a4218, "vmpsadbw", Vx, xx, Hx, Wx, Ib, mrm|vex|reqp, x, END_LIST},
{OP_vdbpsadbw, 0x663a4218, "vdbpsadbw", Ve, xx, KEd, Ib, He, xop|mrm|evex|reqp, x, exop[117]},
}, { /* e_vex ext 53 */
{OP_pcmpestrm, 0x663a6018, "pcmpestrm",xmm0, xx, Vdq, Wdq, Ib, mrm|reqp|xop, fW6, exop[8]},
{OP_vpcmpestrm,0x663a6018, "vpcmpestrm",xmm0, xx, Vdq, Wdq, Ib, mrm|vex|reqp|xop, fW6, exop[11]},
{INVALID, 0x663a6018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 54 */
{OP_pcmpestri, 0x663a6118, "pcmpestri",ecx, xx, Vdq, Wdq, Ib, mrm|reqp|xop, fW6, exop[9]},
{OP_vpcmpestri,0x663a6118, "vpcmpestri",ecx, xx, Vdq, Wdq, Ib, mrm|vex|reqp|xop, fW6, exop[12]},
{INVALID, 0x663a6118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 55 */
{OP_pcmpistrm, 0x663a6218, "pcmpistrm",xmm0, xx, Vdq, Wdq, Ib, mrm|reqp, fW6, END_LIST},
{OP_vpcmpistrm,0x663a6218, "vpcmpistrm",xmm0, xx, Vdq, Wdq, Ib, mrm|vex|reqp, fW6, END_LIST},
{INVALID, 0x663a6218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 56 */
{OP_pcmpistri, 0x663a6318, "pcmpistri",ecx, xx, Vdq, Wdq, Ib, mrm|reqp, fW6, END_LIST},
{OP_vpcmpistri,0x663a6318, "vpcmpistri",ecx, xx, Vdq, Wdq, Ib, mrm|vex|reqp, fW6, END_LIST},
{INVALID, 0x663a6318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 57 */
{OP_pclmulqdq, 0x663a4418, "pclmulqdq", Vdq, xx, Wdq, Ib, Vdq, mrm|reqp, x, END_LIST},
{OP_vpclmulqdq,0x663a4418, "vpclmulqdq", Vdq, xx, Hdq, Wdq, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a4418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 58 */
{OP_aeskeygenassist, 0x663adf18, "aeskeygenassist",Vdq,xx,Wdq,Ib,xx,mrm|reqp,x,END_LIST},
{OP_vaeskeygenassist,0x663adf18, "vaeskeygenassist",Vdq,xx,Wdq,Ib,xx,mrm|vex|reqp,x,END_LIST},
{INVALID, 0x663adf18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 59 */
{INVALID, 0x66380e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vtestps, 0x66380e18, "vtestps", xx, xx, Vx,Wx, xx, mrm|vex|reqp, fW6, END_LIST},
{INVALID, 0x66380e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 60 */
{INVALID, 0x66380f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vtestpd, 0x66380f18, "vtestpd", xx, xx, Vx,Wx, xx, mrm|vex|reqp, fW6, END_LIST},
{INVALID, 0x66380f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 61 */
{OP_ldmxcsr, 0x0fae32, "ldmxcsr", xx, xx, Md, xx, xx, mrm, x, END_LIST},
{OP_vldmxcsr, 0x0fae32, "vldmxcsr", xx, xx, Md, xx, xx, mrm|vex|reqL0, x, END_LIST},
{INVALID, 0x0fae32, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 62 */
{OP_stmxcsr, 0x0fae33, "stmxcsr", Md, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_vstmxcsr, 0x0fae33, "vstmxcsr", Md, xx, xx, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0x0fae33, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 63 */
{INVALID, 0x66381318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvtph2ps, 0x66381318, "vcvtph2ps", Vx, xx, Wx, xx, xx, mrm|vex|reqp, x, tpe[174][10]},
{PREFIX_EXT, 0x381318, "(prefix ext 174)", xx, xx, xx, xx, xx, mrm|evex, x, 174},
}, { /* e_vex ext 64 */
{INVALID, 0x66381818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vbroadcastss, 0x66381818, "vbroadcastss", Vx, xx, Wd_dq, xx, xx, mrm|vex|reqp, x, tvex[64][2]},
{OP_vbroadcastss, 0x66381818, "vbroadcastss", Ve, xx, KEw, Wd_dq, xx, mrm|evex|reqp, x, END_LIST},
}, { /* e_vex ext 65 */
{INVALID, 0x66381918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vbroadcastsd, 0x66381918, "vbroadcastsd", Vqq, xx, Wq_dq, xx, xx, mrm|vex|reqp|reqL1, x, tevexw[147][1]},
{EVEX_W_EXT, 0x66381918, "(evex_W ext 147)", xx, xx, xx, xx, xx, mrm|evex, x, 147},
}, { /* e_vex ext 66 */
{INVALID, 0x66381a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vbroadcastf128, 0x66381a18, "vbroadcastf128", Vqq, xx, Mdq, xx, xx, mrm|vex|reqp|reqL1, x, END_LIST},
{EVEX_W_EXT, 0x66381a18, "(evex_W ext 148)", xx, xx, xx, xx, xx, mrm|evex, x, 148},
}, { /* e_vex ext 67 */
{INVALID, 0x66382c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmaskmovps, 0x66382c18, "vmaskmovps", Vx, xx, Hx,Mx, xx, mrm|vex|reqp|predcx, x, tvex[69][1]},
{EVEX_W_EXT, 0x66382c18, "(evex_W ext 180)", xx, xx, xx, xx, xx, mrm|evex, x, 180},
}, { /* e_vex ext 68 */
{INVALID, 0x66382d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmaskmovpd, 0x66382d18, "vmaskmovpd", Vx, xx, Hx,Mx, xx, mrm|vex|reqp|predcx, x, tvex[70][1]},
{EVEX_W_EXT, 0x66382d18, "(evex_W ext 181)", xx, xx, xx, xx, xx, mrm|evex, x, 181},
}, { /* e_vex ext 69 */
{INVALID, 0x66382e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmaskmovps, 0x66382e18, "vmaskmovps", Mx, xx, Hx,Vx, xx, mrm|vex|reqp|predcx, x, END_LIST},
{INVALID, 0x66382e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 70 */
{INVALID, 0x66382f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmaskmovpd, 0x66382f18, "vmaskmovpd", Mx, xx, Hx,Vx, xx, mrm|vex|reqp|predcx, x, END_LIST},
{INVALID, 0x66382f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 71 */
{INVALID, 0x663a0418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpermilps, 0x663a0418, "vpermilps", Vx, xx, Wx, Ib, xx, mrm|vex|reqp, x, tvex[71][2]},
{OP_vpermilps, 0x663a0418, "vpermilps", Ve, xx, KEw, We, Ib, mrm|evex|reqp, x, tvex[77][1]},
}, { /* e_vex ext 72 */
{INVALID, 0x663a0518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpermilpd, 0x663a0518, "vpermilpd", Vx, xx, Wx, Ib, xx, mrm|vex|reqp, x, tvex[72][2]},
{OP_vpermilpd, 0x663a0558, "vpermilpd", Ve, xx, KEb, We, Ib, mrm|evex|reqp, x, tvex[78][1]},
}, { /* e_vex ext 73 */
{INVALID, 0x663a0618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vperm2f128, 0x663a0618, "vperm2f128", Vqq, xx, Hqq, Wqq, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a0618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 74 */
{INVALID, 0x663a1818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vinsertf128, 0x663a1818, "vinsertf128", Vqq, xx, Hqq, Wdq, Ib, mrm|vex|reqp, x, END_LIST},
{EVEX_W_EXT, 0x663a1818, "(evex_W ext 104)", xx, xx, xx, xx, xx, mrm|evex, x, 104},
}, { /* e_vex ext 75 */
{INVALID, 0x663a1918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vextractf128, 0x663a1918, "vextractf128", Wdq, xx, Vdq_qq, Ib, xx, mrm|vex|reqp|reqL1, x, END_LIST},
{EVEX_W_EXT, 0x663a1918, "(evex_W ext 100)", xx, xx, xx, xx, xx, mrm|evex, x, 100},
}, { /* e_vex ext 76 */
{INVALID, 0x663a1d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvtps2ph, 0x663a1d18, "vcvtps2ph", Wx, xx, Vx, Ib, xx, mrm|vex|reqp, x, tvex[76][2]},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vcvtps2ph, 0x663a1d18, "vcvtps2ph", We, xx, KEw, Ve, Ib, mrm|evex|reqp, x, END_LIST},
}, { /* e_vex ext 77 */
{INVALID, 0x66380c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpermilps, 0x66380c18, "vpermilps", Vx, xx, Hx, Wx, xx, mrm|vex|reqp, x, tvex[77][2]},
{OP_vpermilps, 0x66380c18, "vpermilps", Ve, xx, KEw, He, We, mrm|evex|reqp, x, END_LIST},
}, { /* e_vex ext 78 */
{INVALID, 0x66380d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpermilpd, 0x66380d18, "vpermilpd", Vx, xx, Hx, Wx, xx, mrm|vex|reqp, x, tvex[78][2]},
{OP_vpermilpd, 0x66380d58, "vpermilpd", Ve, xx, KEb, He, We, mrm|evex|reqp, x, END_LIST},
}, { /* e_vex ext 79 */
{OP_seto, 0x0f9010, "seto", Eb, xx, xx, xx, xx, mrm, fRO, END_LIST},
{PREFIX_EXT, 0x0f9010, "(prefix ext 144)", xx, xx, xx, xx, xx, mrm, x, 144},
{INVALID, 0x0f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 80 */
{OP_setno, 0x0f9110, "setno", Eb, xx, xx, xx, xx, mrm, fRO, END_LIST},
{PREFIX_EXT, 0x0f9110, "(prefix ext 145)", xx, xx, xx, xx, xx, mrm, x, 145},
{INVALID, 0x0f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 81 */
{OP_setb, 0x0f9210, "setb", Eb, xx, xx, xx, xx, mrm, fRC, END_LIST},
{PREFIX_EXT, 0x0f9210, "(prefix ext 146)", xx, xx, xx, xx, xx, mrm, x, 146},
{INVALID, 0x0f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 82 */
{OP_setnb, 0x0f9310, "setnb", Eb, xx, xx, xx, xx, mrm, fRC, END_LIST},
{PREFIX_EXT, 0x0f9310, "(prefix ext 147)", xx, xx, xx, xx, xx, mrm, x, 147},
{INVALID, 0x0f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 83 */
{OP_cmovno, 0x0f4110, "cmovno", Gv, xx, Ev, xx, xx, mrm|predcc, fRO, END_LIST},
{PREFIX_EXT, 0x0f4110, "(prefix ext 148)", xx, xx, xx, xx, xx, mrm, x, 148},
{INVALID, 0x0f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 84 */
{OP_cmovb, 0x0f4210, "cmovb", Gv, xx, Ev, xx, xx, mrm|predcc, fRC, END_LIST},
{PREFIX_EXT, 0x0f4210, "(prefix ext 149)", xx, xx, xx, xx, xx, mrm, x, 149},
{INVALID, 0x0f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 85 */
{OP_cmovnp, 0x0f4b10, "cmovnp", Gv, xx, Ev, xx, xx, mrm|predcc, fRP, END_LIST},
{PREFIX_EXT, 0x0f4b10, "(prefix ext 150)", xx, xx, xx, xx, xx, mrm, x, 150},
{INVALID, 0x0f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 86 */
{OP_cmovz, 0x0f4410, "cmovz", Gv, xx, Ev, xx, xx, mrm|predcc, fRZ, END_LIST},
{PREFIX_EXT, 0x0f4410, "(prefix ext 151)", xx, xx, xx, xx, xx, mrm, x, 151},
{INVALID, 0x0f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 87 */
{OP_cmovnz, 0x0f4510, "cmovnz", Gv, xx, Ev, xx, xx, mrm|predcc, fRZ, END_LIST},
{PREFIX_EXT, 0x0f4510, "(prefix ext 152)", xx, xx, xx, xx, xx, mrm, x, 152},
{INVALID, 0x0f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 88 */
{OP_cmovbe, 0x0f4610, "cmovbe", Gv, xx, Ev, xx, xx, mrm|predcc, (fRC|fRZ), END_LIST},
{PREFIX_EXT, 0x0f4610, "(prefix ext 153)", xx, xx, xx, xx, xx, mrm, x, 153},
{INVALID, 0x0f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 89 */
{OP_cmovnbe, 0x0f4710, "cmovnbe", Gv, xx, Ev, xx, xx, mrm|predcc, (fRC|fRZ), END_LIST},
{PREFIX_EXT, 0x0f4710, "(prefix ext 154)", xx, xx, xx, xx, xx, mrm, x, 154},
{INVALID, 0x0f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 90 */
{OP_cmovp, 0x0f4a10, "cmovp", Gv, xx, Ev, xx, xx, mrm|predcc, fRP, END_LIST},
{PREFIX_EXT, 0x0f4a10, "(prefix ext 155)", xx, xx, xx, xx, xx, mrm, x, 155},
{INVALID, 0x0f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 91 */
{OP_sets, 0x0f9810, "sets", Eb, xx, xx, xx, xx, mrm, fRS, END_LIST},
{PREFIX_EXT, 0x0f9810, "(prefix ext 156)", xx, xx, xx, xx, xx, mrm, x, 156},
{INVALID, 0x0f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 92 */
{OP_setns, 0x0f9910, "setns", Eb, xx, xx, xx, xx, mrm, fRS, END_LIST},
{PREFIX_EXT, 0x0f9910, "(prefix ext 157)", xx, xx, xx, xx, xx, mrm, x, 157},
{INVALID, 0x0f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 93 */
{INVALID, 0x66389810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x66389818, "(vex_W ext 0)", xx, xx, xx, xx, xx, mrm|vex, x, 0},
{EVEX_W_EXT, 0x66389818, "(evex_W ext 62)", xx, xx, xx, xx, xx, mrm|evex, x, 62},
}, { /* e_vex ext 94 */
{INVALID, 0x6638a810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638a818, "(vex_W ext 1)", xx, xx, xx, xx, xx, mrm|vex, x, 1},
{EVEX_W_EXT, 0x6638a818, "(evex_W ext 63)", xx, xx, xx, xx, xx, mrm|evex, x, 63},
}, { /* e_vex ext 95 */
{INVALID, 0x6638b810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638b818, "(vex_W ext 2)", xx, xx, xx, xx, xx, mrm|vex, x, 2},
{EVEX_W_EXT, 0x6638b818, "(evex_W ext 64)", xx, xx, xx, xx, xx, mrm|evex, x, 64},
}, { /* e_vex ext 96 */
{INVALID, 0x66389910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x66389918, "(vex_W ext 3)", xx, xx, xx, xx, xx, mrm|vex, x, 3},
{EVEX_W_EXT, 0x66389918, "(evex_W ext 65)", xx, xx, xx, xx, xx, mrm|evex, x, 65},
}, { /* e_vex ext 97 */
{INVALID, 0x6638a910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638a918, "(vex_W ext 4)", xx, xx, xx, xx, xx, mrm|vex, x, 4},
{EVEX_W_EXT, 0x6638a918, "(evex_W ext 66)", xx, xx, xx, xx, xx, mrm|evex, x, 66},
}, { /* e_vex ext 98 */
{INVALID, 0x6638b910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638b918, "(vex_W ext 5)", xx, xx, xx, xx, xx, mrm|vex, x, 5},
{EVEX_W_EXT, 0x6638b918, "(evex_W ext 67)", xx, xx, xx, xx, xx, mrm|evex, x, 67},
}, { /* e_vex ext 99 */
{INVALID, 0x66389610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x66389618, "(vex_W ext 6)", xx, xx, xx, xx, xx, mrm|vex, x, 6},
{EVEX_W_EXT, 0x66389618, "(evex_W ext 68)", xx, xx, xx, xx, xx, mrm|evex, x, 68},
}, { /* e_vex ext 100 */
{INVALID, 0x6638a610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638a618, "(vex_W ext 7)", xx, xx, xx, xx, xx, mrm|vex, x, 7},
{EVEX_W_EXT, 0x6638a618, "(evex_W ext 69)", xx, xx, xx, xx, xx, mrm|evex, x, 69},
}, { /* e_vex ext 101 */
{INVALID, 0x6638b610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638b618, "(vex_W ext 8)", xx, xx, xx, xx, xx, mrm|vex, x, 8},
{EVEX_W_EXT, 0x6638b618, "(evex_W ext 70)", xx, xx, xx, xx, xx, mrm|evex, x, 70},
}, { /* e_vex ext 102 */
{INVALID, 0x66389710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x66389718, "(vex_W ext 9)", xx, xx, xx, xx, xx, mrm|vex, x, 9},
{EVEX_W_EXT, 0x66389718, "(evex_W ext 71)", xx, xx, xx, xx, xx, mrm|evex, x, 71},
}, { /* e_vex ext 103 */
{INVALID, 0x6638a710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638a718, "(vex_W ext 10)", xx, xx, xx, xx, xx, mrm|vex, x, 10},
{EVEX_W_EXT, 0x6638a718, "(evex_W ext 72)", xx, xx, xx, xx, xx, mrm|evex, x, 72},
}, { /* e_vex ext 104 */
{INVALID, 0x6638b710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638b718, "(vex_W ext 11)", xx, xx, xx, xx, xx, mrm|vex, x, 11},
{EVEX_W_EXT, 0x6638b718, "(evex_W ext 73)", xx, xx, xx, xx, xx, mrm|evex, x, 73},
}, { /* e_vex ext 105 */
{INVALID, 0x66389a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x66389a18, "(vex_W ext 12)", xx, xx, xx, xx, xx, mrm|vex, x, 12},
{EVEX_W_EXT, 0x66389a18, "(evex_W ext 74)", xx, xx, xx, xx, xx, mrm|evex, x, 74},
}, { /* e_vex ext 106 */
{INVALID, 0x6638aa10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638aa18, "(vex_W ext 13)", xx, xx, xx, xx, xx, mrm|vex, x, 13},
{EVEX_W_EXT, 0x6638aa18, "(evex_W ext 75)", xx, xx, xx, xx, xx, mrm|evex, x, 75},
}, { /* e_vex ext 107 */
{INVALID, 0x6638ba10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638ba18, "(vex_W ext 14)", xx, xx, xx, xx, xx, mrm|vex, x, 14},
{EVEX_W_EXT, 0x6638ba18, "(evex_W ext 76)", xx, xx, xx, xx, xx, mrm|evex, x, 76},
}, { /* e_vex ext 108 */
{INVALID, 0x66389b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x66389b18, "(vex_W ext 15)", xx, xx, xx, xx, xx, mrm|vex, x, 15},
{EVEX_W_EXT, 0x66389b18, "(evex_W ext 77)", xx, xx, xx, xx, xx, mrm|evex, x, 77},
}, { /* e_vex ext 109 */
{INVALID, 0x6638ab10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638ab18, "(vex_W ext 16)", xx, xx, xx, xx, xx, mrm|vex, x, 16},
{EVEX_W_EXT, 0x6638ab18, "(evex_W ext 78)", xx, xx, xx, xx, xx, mrm|evex, x, 78},
}, { /* e_vex ext 110 */
{INVALID, 0x6638bb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638bb18, "(vex_W ext 17)", xx, xx, xx, xx, xx, mrm|vex, x, 17},
{EVEX_W_EXT, 0x6638bb18, "(evex_W ext 79)", xx, xx, xx, xx, xx, mrm|evex, x, 79},
}, { /* e_vex ext 111 */
{INVALID, 0x66389c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x66389c18, "(vex_W ext 18)", xx, xx, xx, xx, xx, mrm|vex, x, 18},
{EVEX_W_EXT, 0x66389c18, "(evex_W ext 80)", xx, xx, xx, xx, xx, mrm|evex, x, 80},
}, { /* e_vex ext 112 */
{INVALID, 0x6638ac10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638ac18, "(vex_W ext 19)", xx, xx, xx, xx, xx, mrm|vex, x, 19},
{EVEX_W_EXT, 0x6638ac18, "(evex_W ext 81)", xx, xx, xx, xx, xx, mrm|evex, x, 81},
}, { /* e_vex ext 113 */
{INVALID, 0x6638bc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638bc18, "(vex_W ext 20)", xx, xx, xx, xx, xx, mrm|vex, x, 20},
{EVEX_W_EXT, 0x6638bc18, "(evex_W ext 82)", xx, xx, xx, xx, xx, mrm|evex, x, 82},
}, { /* e_vex ext 114 */
{INVALID, 0x66389d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x66389d18, "(vex_W ext 21)", xx, xx, xx, xx, xx, mrm|vex, x, 21},
{EVEX_W_EXT, 0x66389d18, "(evex_W ext 83)", xx, xx, xx, xx, xx, mrm|evex, x, 83},
}, { /* e_vex ext 115 */
{INVALID, 0x6638ad10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638ad18, "(vex_W ext 22)", xx, xx, xx, xx, xx, mrm|vex, x, 22},
{EVEX_W_EXT, 0x6638ad18, "(evex_W ext 84)", xx, xx, xx, xx, xx, mrm|evex, x, 84},
}, { /* e_vex ext 116 */
{INVALID, 0x6638bd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638bd18, "(vex_W ext 23)", xx, xx, xx, xx, xx, mrm|vex, x, 23},
{EVEX_W_EXT, 0x6638bd18, "(evex_W ext 85)", xx, xx, xx, xx, xx, mrm|evex, x, 85},
}, { /* e_vex ext 117 */
{INVALID, 0x66389e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x66389e18, "(vex_W ext 24)", xx, xx, xx, xx, xx, mrm|vex, x, 24},
{EVEX_W_EXT, 0x66389e18, "(evex_W ext 86)", xx, xx, xx, xx, xx, mrm|evex, x, 86},
}, { /* e_vex ext 118 */
{INVALID, 0x6638ae10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638ae18, "(vex_W ext 25)", xx, xx, xx, xx, xx, mrm|vex, x, 25},
{EVEX_W_EXT, 0x6638ae18, "(evex_W ext 87)", xx, xx, xx, xx, xx, mrm|evex, x, 87},
}, { /* e_vex ext 119 */
{INVALID, 0x6638be10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638be18, "(vex_W ext 26)", xx, xx, xx, xx, xx, mrm|vex, x, 26},
{EVEX_W_EXT, 0x6638be18, "(evex_W ext 88)", xx, xx, xx, xx, xx, mrm|evex, x, 88},
}, { /* e_vex ext 120 */
{INVALID, 0x66389f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x66389f18, "(vex_W ext 27)", xx, xx, xx, xx, xx, mrm|vex, x, 27},
{EVEX_W_EXT, 0x66389f18, "(evex_W ext 89)", xx, xx, xx, xx, xx, mrm|evex, x, 89},
}, { /* e_vex ext 121 */
{INVALID, 0x6638af10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638af18, "(vex_W ext 28)", xx, xx, xx, xx, xx, mrm|vex, x, 28},
{EVEX_W_EXT, 0x6638af18, "(evex_W ext 90)", xx, xx, xx, xx, xx, mrm|evex, x, 90},
}, { /* e_vex ext 122 */
{INVALID, 0x6638bf10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x6638bf18, "(vex_W ext 29)", xx, xx, xx, xx, xx, mrm|vex, x, 29},
{EVEX_W_EXT, 0x6638bf18, "(evex_W ext 91)", xx, xx, xx, xx, xx, mrm|evex, x, 91},
}, { /* e_vex ext 123 */
{INVALID, 0x66383610, "(bad)", xx, xx, xx, xx, xx, no, x, NA },
{OP_vpermd, 0x66383618, "vpermd", Vqq, xx, Hqq, Wqq, xx, mrm|vex|reqp, x, tevexw[92][0]},
{EVEX_W_EXT, 0x66383618, "(evex_W ext 92)", xx, xx, xx, xx, xx, mrm|evex, x, 92},
}, { /* e_vex ext 124 */
{INVALID, 0x66381610, "(bad)", xx, xx, xx, xx, xx, no, x, NA },
{OP_vpermps, 0x66381618, "vpermps", Vqq, xx, Hqq, Wqq, xx, mrm|vex|reqp, x, tevexw[93][0]},
{EVEX_W_EXT, 0x66381618, "(evex_W ext 93)", xx, xx, xx, xx, xx, mrm|evex, x, 93 },
}, { /* e_vex ext 125 */
{INVALID, 0x663a0010, "(bad)", xx, xx, xx, xx, xx, no, x, NA },
{OP_vpermq, 0x663a0058, "vpermq", Vqq, xx, Wqq, Ib, xx, mrm|vex|reqp, x, tvex[125][2]},
{OP_vpermq, 0x663a0058, "vpermq", Vf, xx, KEb, Wf, Ib, mrm|evex|reqp, x, tevexw[92][1]},
}, { /* e_vex ext 126 */
{INVALID, 0x663a0010, "(bad)", xx, xx, xx, xx, xx, no, x, NA },
{OP_vpermpd, 0x663a0158, "vpermpd", Vqq, xx, Wqq, Ib, xx, mrm|vex|reqp, x, tvex[126][2]},
{OP_vpermpd, 0x663a0158, "vpermpd", Vf, xx, KEw, Wf, Ib, mrm|evex|reqp, x, tevexw[93][1]},
}, { /* e_vex ext 127 */
{INVALID, 0x663a3918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vextracti128,0x663a3918, "vextracti128", Wdq, xx, Vqq, Ib, xx, mrm|vex|reqp, x, END_LIST},
{EVEX_W_EXT, 0x663a3918, "(evex_W ext 102)", xx, xx, xx, xx, xx, mrm|evex|reqp, x, 102},
}, { /* e_vex ext 128 */
{INVALID, 0x663a3818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vinserti128, 0x663a3818, "vinserti128", Vqq, xx, Hqq, Wdq, Ib, mrm|vex|reqp, x, END_LIST},
{EVEX_W_EXT, 0x663a3818, "(evex_W ext 106)", xx, xx, xx, xx, xx, mrm|evex|reqp, x, 106},
}, { /* e_vex ext 129 */
{OP_blendvpd, 0x66381518, "blendvpd", Vdq, xx, Wdq, xmm0, Vdq, mrm|reqp, x, END_LIST},
{INVALID, 0x66381518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x381518, "(prefix ext 168)", xx, xx, xx, xx, xx, mrm|evex, x, 168},
}, { /* e_vex ext 130 */
{OP_blendvps, 0x66381418, "blendvps", Vdq, xx, Wdq, xmm0, Vdq, mrm|reqp, x, END_LIST},
{INVALID, 0x66381418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x381418, "(prefix ext 165)", xx, xx, xx, xx, xx, mrm, x, 165},
}, { /* e_vex ext 131 */
{INVALID, 0x66384618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsravd, 0x66384618, "vpsravd", Vx, xx, Hx, Wx, xx, mrm|vex|reqp, x, tevexw[127][0]},
{EVEX_W_EXT, 0x66384618, "(evex_W ext 127)", xx, xx, xx, xx, xx, mrm|evex|reqp, x, 127},
}, { /* e_vex ext 132 */
{OP_pblendvb, 0x66381018, "pblendvb", Vdq, xx, Wdq, xmm0, Vdq, mrm|reqp,x, END_LIST},
{INVALID, 0x66381018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x381018, "(prefix ext 177)", xx, xx, xx, xx, xx, mrm|evex, x, 177},
}, { /* e_vex ext 133 */
{INVALID, 0x66384518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x66384518, "(vex_W ext 72)", xx, xx, xx, xx, xx, mrm|vex|reqp, x, 72},
{EVEX_W_EXT, 0x66384518, "(evex_W ext 128)", xx, xx, xx, xx, xx, mrm|evex|reqp, x, 128},
}, { /* e_vex ext 134 */
{INVALID, 0x66384718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x66384718, "(vex_W ext 73)", xx, xx, xx, xx, xx, mrm|vex|reqp, x, 73},
{EVEX_W_EXT, 0x66384718, "(evex_W ext 130)", xx, xx, xx, xx, xx, mrm|evex|reqp, x, 130},
}, { /* e_vex ext 135 */
{INVALID, 0x66387818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpbroadcastb, 0x66387818, "vpbroadcastb", Vx, xx, Wb_dq, xx, xx, mrm|vex|reqp, x, tvex[135][2]},
{OP_vpbroadcastb, 0x66387818, "vpbroadcastb", Ve, xx, KEq, Wb_dq, xx, mrm|evex|reqp, x, t38[135]},
}, { /* e_vex ext 136 */
{INVALID, 0x66387918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpbroadcastw, 0x66387918, "vpbroadcastw", Vx, xx, Ww_dq, xx, xx, mrm|vex|reqp, x, tvex[136][2]},
{OP_vpbroadcastw, 0x66387918, "vpbroadcastw", Ve, xx, KEd, Ww_dq, xx, mrm|evex|reqp, x, t38[136]},
}, { /* e_vex ext 137 */
{INVALID, 0x66385818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpbroadcastd, 0x66385818, "vpbroadcastd", Vx, xx, Wd_dq, xx, xx, mrm|vex|reqp, x, tvex[137][2]},
{OP_vpbroadcastd, 0x66385818, "vpbroadcastd", Ve, xx, KEw, Wd_dq, xx, mrm|evex|reqp, x, tevexw[150][0]},
}, { /* e_vex ext 138 */
{INVALID, 0x66385918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpbroadcastq, 0x66385918, "vpbroadcastq", Vx, xx, Wq_dq, xx, xx, mrm|vex|reqp, x, tevexw[151][1]},
{EVEX_W_EXT, 0x66385918, "(evex_W ext 151)", xx, xx, xx, xx, xx, mrm|evex|reqp, x, 151},
}, { /* e_vex ext 139 */
{INVALID, 0x66385a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vbroadcasti128, 0x66385a18, "vbroadcasti128", Vqq, xx, Mdq, xx, xx, mrm|vex|reqp, x, END_LIST},
{EVEX_W_EXT, 0x66385a18, "(evex_W ext 152)", xx, xx, xx, xx, xx, mrm|evex|reqp, x, 152},
},
};
/****************************************************************************
* Instructions that differ depending on mod and rm bits in modrm byte
* For mod, entry 0 is all mem ref mod values (0,1,2) while entry 1 is 3.
* For the mem ref, we give just one of the 3 possible modrm bytes
* (we only use it when encoding so we don't need all 3).
*/
const instr_info_t mod_extensions[][2] = {
{ /* mod extension 0 */
{OP_sgdt, 0x0f0130, "sgdt", Ms, xx, xx, xx, xx, mrm, x, END_LIST},
{RM_EXT, 0x0f0171, "(group 7 mod + rm ext 0)", xx, xx, xx, xx, xx, mrm, x, 0},
},
{ /* mod extension 1 */
{OP_sidt, 0x0f0131, "sidt", Ms, xx, xx, xx, xx, mrm, x, END_LIST},
{RM_EXT, 0x0f0171, "(group 7 mod + rm ext 1)", xx, xx, xx, xx, xx, mrm, x, 1},
},
{ /* mod extension 2 */
{OP_invlpg, 0x0f0137, "invlpg", xx, xx, Mm, xx, xx, mrm, x, END_LIST},
{RM_EXT, 0x0f0177, "(group 7 mod + rm ext 2)", xx, xx, xx, xx, xx, mrm, x, 2},
},
{ /* mod extension 3 */
{OP_clflush, 0x0fae37, "clflush", xx, xx, Mb, xx, xx, mrm, x, END_LIST},
{OP_sfence, 0xf80fae77, "sfence", xx, xx, xx, xx, xx, mrm, x, END_LIST},
},
{ /* mod extension 4 */
{OP_lidt, 0x0f0133, "lidt", xx, xx, Ms, xx, xx, mrm, x, END_LIST},
{RM_EXT, 0x0f0173, "(group 7 mod + rm ext 3)", xx, xx, xx, xx, xx, mrm, x, 3},
},
{ /* mod extension 5 */
{OP_lgdt, 0x0f0132, "lgdt", xx, xx, Ms, xx, xx, mrm, x, END_LIST},
{RM_EXT, 0x0f0172, "(group 7 mod + rm ext 4)", xx, xx, xx, xx, xx, mrm, x, 4},
},
{ /* mod extension 6 */
{REX_W_EXT, 0x0fae35, "(rex.w ext 3)", xx, xx, xx, xx, xx, mrm, x, 3},
/* note that gdb thinks e9-ef are "lfence (bad)" (PR 239920) */
{OP_lfence, 0xe80fae75, "lfence", xx, xx, xx, xx, xx, mrm, x, END_LIST},
},
{ /* mod extension 7 */
{REX_W_EXT, 0x0fae36, "(rex.w ext 4)", xx, xx, xx, xx, xx, mrm, x, 4},
{OP_mfence, 0xf00fae76, "mfence", xx, xx, xx, xx, xx, mrm, x, END_LIST},
},
{ /* mod extension 8 */
{OP_vmovss, 0xf30f1010, "vmovss", Vdq, xx, Wss, xx, xx, mrm|vex, x, modx[10][0]},
{OP_vmovss, 0xf30f1010, "vmovss", Vdq, xx, H12_dq, Uss, xx, mrm|vex, x, modx[10][1]},
},
{ /* mod extension 9 */
{OP_vmovsd, 0xf20f1010, "vmovsd", Vdq, xx, Wsd, xx, xx, mrm|vex, x, modx[11][0]},
{OP_vmovsd, 0xf20f1010, "vmovsd", Vdq, xx, Hsd, Usd, xx, mrm|vex, x, modx[11][1]},
},
{ /* mod extension 10 */
{OP_vmovss, 0xf30f1110, "vmovss", Wss, xx, Vss, xx, xx, mrm|vex, x, modx[ 8][1]},
{OP_vmovss, 0xf30f1110, "vmovss", Udq, xx, H12_dq, Vss, xx, mrm|vex, x, modx[20][0]},
},
{ /* mod extension 11 */
{OP_vmovsd, 0xf20f1110, "vmovsd", Wsd, xx, Vsd, xx, xx, mrm|vex, x, modx[ 9][1]},
{OP_vmovsd, 0xf20f1110, "vmovsd", Udq, xx, Hsd, Vsd, xx, mrm|vex, x, modx[21][0]},
},
{ /* mod extension 12 */
{PREFIX_EXT, 0x0fc736, "(prefix ext 137)", xx, xx, xx, xx, xx, no, x, 137},
{OP_rdrand, 0x0fc736, "rdrand", Rv, xx, xx, xx, xx, mrm, fW6, END_LIST},
},
{ /* mod extension 13 */
/* The latest Intel table implies 0x66 prefix makes invalid instr but not worth
* explicitly encoding that until we have more information.
*/
{OP_vmptrst, 0x0fc737, "vmptrst", Mq, xx, xx, xx, xx, mrm|o64, x, END_LIST},
{OP_rdseed, 0x0fc737, "rdseed", Rv, xx, xx, xx, xx, mrm, fW6, END_LIST},
},
{ /* mod extension 14 */
{REX_W_EXT, 0x0fae30, "(rex.w ext 0)", xx, xx, xx, xx, xx, mrm, x, 0},
/* Using reqp to avoid having to create a whole prefix_ext entry for one opcode.
* Ditto below.
*/
{OP_rdfsbase,0xf30fae30, "rdfsbase", Ry, xx, xx, xx, xx, mrm|o64|reqp, x, END_LIST},
},
{ /* mod extension 15 */
{REX_W_EXT, 0x0fae31, "(rex.w ext 1)", xx, xx, xx, xx, xx, mrm, x, 1},
{OP_rdgsbase,0xf30fae31, "rdgsbase", Ry, xx, xx, xx, xx, mrm|o64|reqp, x, END_LIST},
},
{ /* mod extension 16 */
{E_VEX_EXT, 0x0fae32, "(e_vex ext 61)", xx, xx, xx, xx, xx, mrm, x, 61},
{OP_wrfsbase,0xf30fae32, "wrfsbase", xx, xx, Ry, xx, xx, mrm|o64|reqp, x, END_LIST},
},
{ /* mod extension 17 */
{E_VEX_EXT, 0x0fae33, "(e_vex ext 62)", xx, xx, xx, xx, xx, mrm, x, 62},
{OP_wrgsbase,0xf30fae33, "wrgsbase", xx, xx, Ry, xx, xx, mrm|o64|reqp, x, END_LIST},
},
{ /* mod extension 18 */
/* load from memory zeroes top bits */
{OP_movss, 0xf30f1010, "movss", Vdq, xx, Mss, xx, xx, mrm, x, modx[18][1]},
{OP_movss, 0xf30f1010, "movss", Vss, xx, Uss, xx, xx, mrm, x, tpe[1][1]},
},
{ /* mod extension 19 */
/* load from memory zeroes top bits */
{OP_movsd, 0xf20f1010, "movsd", Vdq, xx, Msd, xx, xx, mrm, x, modx[19][1]},
{OP_movsd, 0xf20f1010, "movsd", Vsd, xx, Usd, xx, xx, mrm, x, tpe[1][3]},
},
{ /* mod extension 20 */
{OP_vmovss, 0xf30f1010, "vmovss", Vdq, xx, KE1b, Wss, xx, mrm|evex, x, modx[22][0]},
{OP_vmovss, 0xf30f1010, "vmovss", Vdq, xx, KE1b, H12_dq, Uss, mrm|evex, x, modx[22][1]},
},
{ /* mod extension 21 */
{OP_vmovsd, 0xf20f1050, "vmovsd", Vdq, xx, KE1b, Wsd, xx, mrm|evex, x, modx[23][0]},
{OP_vmovsd, 0xf20f1050, "vmovsd", Vdq, xx, KE1b, Hsd, Usd, mrm|evex, x, modx[23][1]},
},
{ /* mod extension 22 */
{OP_vmovss, 0xf30f1110, "vmovss", Wss, xx, KE1b, Vss, xx, mrm|evex, x, modx[20][1]},
{OP_vmovss, 0xf30f1110, "vmovss", Udq, xx, KE1b, H12_dq, Vss, mrm|evex, x, END_LIST},
},
{ /* mod extension 23 */
{OP_vmovsd, 0xf20f1150, "vmovsd", Wsd, xx, KE1b, Vsd, xx, mrm|evex, x, modx[21][1]},
{OP_vmovsd, 0xf20f1150, "vmovsd", Udq, xx, KE1b, Hsd, Vsd, mrm|evex, x, END_LIST},
},
};
/* Naturally all of these have modrm bytes even if they have no explicit operands */
const instr_info_t rm_extensions[][8] = {
{ /* rm extension 0 */
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmcall, 0xc10f0171, "vmcall", xx, xx, xx, xx, xx, mrm|o64, x, END_LIST},
{OP_vmlaunch, 0xc20f0171, "vmlaunch", xx, xx, xx, xx, xx, mrm|o64, x, END_LIST},
{OP_vmresume, 0xc30f0171, "vmresume", xx, xx, xx, xx, xx, mrm|o64, x, END_LIST},
{OP_vmxoff, 0xc40f0171, "vmxoff", xx, xx, xx, xx, xx, mrm|o64, x, END_LIST},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* rm extension 1 */
{OP_monitor, 0xc80f0171, "monitor", xx, xx, eax, ecx, edx, mrm, x, END_LIST},
{OP_mwait, 0xc90f0171, "mwait", xx, xx, eax, ecx, xx, mrm, x, END_LIST},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* rm extension 2 */
{OP_swapgs, 0xf80f0177, "swapgs", xx, xx, xx, xx, xx, mrm|o64, x, END_LIST},
{OP_rdtscp, 0xf90f0177, "rdtscp", edx, eax, xx, xx, xx, mrm|xop, x, exop[10]},/*AMD-only*/
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* rm extension 3 */
{OP_vmrun, 0xd80f0173, "vmrun", xx, xx, axAX, xx, xx, mrm, x, END_LIST},
{OP_vmmcall,0xd90f0173, "vmmcall", xx, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_vmload, 0xda0f0173, "vmload", xx, xx, axAX, xx, xx, mrm, x, END_LIST},
{OP_vmsave, 0xdb0f0173, "vmsave", xx, xx, axAX, xx, xx, mrm, x, END_LIST},
{OP_stgi, 0xdc0f0173, "stgi", xx, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_clgi, 0xdd0f0173, "clgi", xx, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_skinit, 0xde0f0173, "skinit", xx, xx, eax, xx, xx, mrm, x, END_LIST},
{OP_invlpga,0xdf0f0173, "invlpga", xx, xx, axAX, ecx, xx, mrm, x, END_LIST},
},
{ /* rm extension 4 */
{OP_xgetbv, 0xd00f0172, "xgetbv", edx, eax, ecx, xx, xx, mrm, x, END_LIST},
{OP_xsetbv, 0xd10f0172, "xsetbv", xx, xx, ecx, edx, eax, mrm, x, END_LIST},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmfunc, 0xd40f0172, "vmfunc", xx, xx, xx, xx, xx, mrm|o64, x, END_LIST},
/* Only if the transaction fails does xend write to eax => predcx.
* XXX i#1314: on failure eip is also written to.
*/
{OP_xend, 0xd50f0172, "xend", eax, xx, xx, xx, xx, mrm|predcx, x, NA},
{OP_xtest, 0xd60f0172, "xtest", xx, xx, xx, xx, xx, mrm, fW6, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
};
/****************************************************************************
* Instructions that differ depending on whether in 64-bit mode
*/
const instr_info_t x64_extensions[][2] = {
{ /* x64_ext 0 */
{OP_inc, 0x400000, "inc", zAX, xx, zAX, xx, xx, i64, (fW6&(~fWC)), t64e[1][0]},
{PREFIX, 0x400000, "rex", xx, xx, xx, xx, xx, no, x, PREFIX_REX_GENERAL},
}, { /* x64_ext 1 */
{OP_inc, 0x410000, "inc", zCX, xx, zCX, xx, xx, i64, (fW6&(~fWC)), t64e[2][0]},
{PREFIX, 0x410000, "rex.b", xx, xx, xx, xx, xx, no, x, PREFIX_REX_B},
}, { /* x64_ext 2 */
{OP_inc, 0x420000, "inc", zDX, xx, zDX, xx, xx, i64, (fW6&(~fWC)), t64e[3][0]},
{PREFIX, 0x420000, "rex.x", xx, xx, xx, xx, xx, no, x, PREFIX_REX_X},
}, { /* x64_ext 3 */
{OP_inc, 0x430000, "inc", zBX, xx, zBX, xx, xx, i64, (fW6&(~fWC)), t64e[4][0]},
{PREFIX, 0x430000, "rex.xb", xx, xx, xx, xx, xx, no, x, PREFIX_REX_X|PREFIX_REX_B},
}, { /* x64_ext 4 */
{OP_inc, 0x440000, "inc", zSP, xx, zSP, xx, xx, i64, (fW6&(~fWC)), t64e[5][0]},
{PREFIX, 0x440000, "rex.r", xx, xx, xx, xx, xx, no, x, PREFIX_REX_R},
}, { /* x64_ext 5 */
{OP_inc, 0x450000, "inc", zBP, xx, zBP, xx, xx, i64, (fW6&(~fWC)), t64e[6][0]},
{PREFIX, 0x450000, "rex.rb", xx, xx, xx, xx, xx, no, x, PREFIX_REX_R|PREFIX_REX_B},
}, { /* x64_ext 6 */
{OP_inc, 0x460000, "inc", zSI, xx, zSI, xx, xx, i64, (fW6&(~fWC)), t64e[7][0]},
{PREFIX, 0x460000, "rex.rx", xx, xx, xx, xx, xx, no, x, PREFIX_REX_R|PREFIX_REX_X},
}, { /* x64_ext 7 */
{OP_inc, 0x470000, "inc", zDI, xx, zDI, xx, xx, i64, (fW6&(~fWC)), tex[12][0]},
{PREFIX, 0x470000, "rex.rxb", xx, xx, xx, xx, xx, no, x, PREFIX_REX_R|PREFIX_REX_X|PREFIX_REX_B},
}, { /* x64_ext 8 */
{OP_dec, 0x480000, "dec", zAX, xx, zAX, xx, xx, i64, (fW6&(~fWC)), t64e[9][0]},
{PREFIX, 0x480000, "rex.w", xx, xx, xx, xx, xx, no, x, PREFIX_REX_W},
}, { /* x64_ext 9 */
{OP_dec, 0x490000, "dec", zCX, xx, zCX, xx, xx, i64, (fW6&(~fWC)), t64e[10][0]},
{PREFIX, 0x490000, "rex.wb", xx, xx, xx, xx, xx, no, x, PREFIX_REX_W|PREFIX_REX_B},
}, { /* x64_ext 10 */
{OP_dec, 0x4a0000, "dec", zDX, xx, zDX, xx, xx, i64, (fW6&(~fWC)), t64e[11][0]},
{PREFIX, 0x4a0000, "rex.wx", xx, xx, xx, xx, xx, no, x, PREFIX_REX_W|PREFIX_REX_X},
}, { /* x64_ext 11 */
{OP_dec, 0x4b0000, "dec", zBX, xx, zBX, xx, xx, i64, (fW6&(~fWC)), t64e[12][0]},
{PREFIX, 0x4b0000, "rex.wxb", xx, xx, xx, xx, xx, no, x, PREFIX_REX_W|PREFIX_REX_X|PREFIX_REX_B},
}, { /* x64_ext 12 */
{OP_dec, 0x4c0000, "dec", zSP, xx, zSP, xx, xx, i64, (fW6&(~fWC)), t64e[13][0]},
{PREFIX, 0x4c0000, "rex.wr", xx, xx, xx, xx, xx, no, x, PREFIX_REX_W|PREFIX_REX_R},
}, { /* x64_ext 13 */
{OP_dec, 0x4d0000, "dec", zBP, xx, zBP, xx, xx, i64, (fW6&(~fWC)), t64e[14][0]},
{PREFIX, 0x4d0000, "rex.wrb", xx, xx, xx, xx, xx, no, x, PREFIX_REX_W|PREFIX_REX_R|PREFIX_REX_B},
}, { /* x64_ext 14 */
{OP_dec, 0x4e0000, "dec", zSI, xx, zSI, xx, xx, i64, (fW6&(~fWC)), t64e[15][0]},
{PREFIX, 0x4e0000, "rex.wrx", xx, xx, xx, xx, xx, no, x, PREFIX_REX_W|PREFIX_REX_R|PREFIX_REX_X},
}, { /* x64_ext 15 */
{OP_dec, 0x4f0000, "dec", zDI, xx, zDI, xx, xx, i64, (fW6&(~fWC)), tex[12][1]},
{PREFIX, 0x4f0000, "rex.wrxb", xx, xx, xx, xx, xx, no, x, PREFIX_REX_W|PREFIX_REX_R|PREFIX_REX_X|PREFIX_REX_B},
}, { /* x64_ext 16 */
{OP_arpl, 0x630000, "arpl", Ew, xx, Gw, xx, xx, mrm|i64, fWZ, END_LIST},
{OP_movsxd, 0x630000, "movsxd", Gv, xx, Ed, xx, xx, mrm|o64, x, END_LIST},
},
};
/****************************************************************************
* Instructions that differ depending on the first two bits of the 2nd byte,
* or whether in x64 mode.
*/
const instr_info_t vex_prefix_extensions[][2] = {
{ /* vex_prefix_ext 0 */
{OP_les, 0xc40000, "les", Gz, es, Mp, xx, xx, mrm|i64, x, END_LIST},
{PREFIX, 0xc40000, "vex+2b", xx, xx, xx, xx, xx, no, x, PREFIX_VEX_3B},
}, { /* vex_prefix_ext 1 */
{OP_lds, 0xc50000, "lds", Gz, ds, Mp, xx, xx, mrm|i64, x, END_LIST},
{PREFIX, 0xc50000, "vex+1b", xx, xx, xx, xx, xx, no, x, PREFIX_VEX_2B},
},
};
/****************************************************************************
* Instructions that differ depending on bits 4 and 5 of the 2nd byte.
*/
const instr_info_t xop_prefix_extensions[][2] = {
{ /* xop_prefix_ext 0 */
{EXTENSION, 0x8f0000, "(group 1d)", xx, xx, xx, xx, xx, mrm, x, 26},
{PREFIX, 0x8f0000, "xop", xx, xx, xx, xx, xx, no, x, PREFIX_XOP},
},
};
/****************************************************************************
* Instructions that differ depending on whether vex-encoded and vex.L
* Index 0 = no vex, 1 = vex and vex.L=0, 2 = vex and vex.L=1
*/
const instr_info_t vex_L_extensions[][3] = {
{ /* vex_L_ext 0 */
{OP_emms, 0x0f7710, "emms", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vzeroupper, 0x0f7710, "vzeroupper", xx, xx, xx, xx, xx, vex, x, END_LIST},
{OP_vzeroall, 0x0f7790, "vzeroall", xx, xx, xx, xx, xx, vex, x, END_LIST},
},
};
/****************************************************************************
* Instructions that differ depending on whether evex-encoded.
* Index 0 = no evex, 1 = evex
*/
const instr_info_t evex_prefix_extensions[][2] = {
{ /* evex_prefix_ext */
{OP_bound, 0x620000, "bound", xx, xx, Gv, Ma, xx, mrm|i64, x, END_LIST},
{PREFIX, 0x620000, "(evex prefix)", xx, xx, xx, xx, xx, no, x, PREFIX_EVEX},
},
};
/****************************************************************************
* Instructions that differ depending on whether a rex prefix is present.
*/
/* Instructions that differ depending on whether rex.b in is present.
* The table is indexed by rex.b: index 0 is for no rex.b.
*/
const instr_info_t rex_b_extensions[][2] = {
{ /* rex.b extension 0 */
{OP_nop, 0x900000, "nop", xx, xx, xx, xx, xx, no, x, tpe[103][2]},
/* For decoding we avoid needing new operand types by only getting
* here if rex.b is set. For encode, we would need either to take
* REQUIRES_REX + OPCODE_SUFFIX or a new operand type for registers that
* must be extended (could also try to list r8 instead of eax but
* have to make sure all decode/encode routines can handle that as most
* assume the registers listed here are 32-bit base): that's too
* much effort for a corner case that we're not 100% certain works on
* all x64 processors, so we just don't list in the encoding chain.
*/
{OP_xchg, 0x900000, "xchg", eAX_x, eAX, eAX_x, eAX, xx, o64, x, END_LIST},
},
};
/* Instructions that differ depending on whether rex.w in is present.
* The table is indexed by rex.w: index 0 is for no rex.w.
*/
const instr_info_t rex_w_extensions[][2] = {
{ /* rex.w extension 0 */
{OP_fxsave32, 0x0fae30, "fxsave", Moq, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_fxsave64, 0x0fae30, "fxsave64", Moq, xx, xx, xx, xx, mrm|rex, x, END_LIST},
},
{ /* rex.w extension 1 */
{OP_fxrstor32, 0x0fae31, "fxrstor", xx, xx, Moq, xx, xx, mrm, x, END_LIST},
{OP_fxrstor64, 0x0fae31, "fxrstor64", xx, xx, Moq, xx, xx, mrm|rex, o64, END_LIST},
},
{ /* rex.w extension 2 */
{OP_xsave32, 0x0fae34, "xsave", Mxsave, xx, edx, eax, xx, mrm, x, END_LIST},
{OP_xsave64, 0x0fae34, "xsave64", Mxsave, xx, edx, eax, xx, mrm|rex, o64, END_LIST},
},
{ /* rex.w extension 3 */
{OP_xrstor32, 0x0fae35, "xrstor", xx, xx, Mxsave, edx, eax, mrm, x, END_LIST},
{OP_xrstor64, 0x0fae35, "xrstor64", xx, xx, Mxsave, edx, eax, mrm|rex, o64, END_LIST},
},
{ /* rex.w extension 4 */
{OP_xsaveopt32, 0x0fae36, "xsaveopt", Mxsave, xx, edx, eax, xx, mrm, x, END_LIST},
{OP_xsaveopt64, 0x0fae36, "xsaveopt64", Mxsave, xx, edx, eax, xx, mrm|rex, o64, END_LIST},
},
{ /* rex.w extension 5 */
{OP_xsavec32, 0x0fc734, "xsavec", Mxsave, xx, edx, eax, xx, mrm, x, END_LIST},
{OP_xsavec64, 0x0fc734, "xsavec64", Mxsave, xx, edx, eax, xx, mrm|rex, o64, END_LIST},
},
};
/****************************************************************************
* 3-byte-opcode instructions: 0x0f 0x38 and 0x0f 0x3a.
* SSSE3 and SSE4.
*
* XXX: if they add more 2nd byte possibilities, we could switch to one
* large table here and one extension type with indices into which subtable.
* For now we have two separate tables.
*
* N.B.: if any are added here that do not take modrm bytes, or whose
* size can vary based on data16 or addr16, we need to modify our
* decode_fast table assumptions!
*
* Many of these only come in Vdq,Wdq forms, yet still require the 0x66 prefix.
* Rather than waste space in the prefix_extensions table for 4 entries 3 of which
* are invalid, and need another layer of lookup, we use the new REQUIRES_PREFIX
* flag ("reqp").
*
* Since large parts of the opcode space are empty, we save space by having a
* table of 256 indices instead of 256 instr_info_t structs.
*/
const byte third_byte_38_index[256] = {
/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 96, 97, 56, 57, /* 0 */
16,127,128, 88, 17, 18,111, 19, 89, 90, 91,134, 13, 14, 15,133, /* 1 */
20, 21, 22, 23, 24, 25,148,149, 26, 27, 28, 29, 92, 93, 94, 95, /* 2 */
30, 31, 32, 33, 34, 35,112, 36, 37, 38, 39, 40, 41, 42, 43, 44, /* 3 */
45, 46,142,143, 156,113,114,115, 0, 0, 0, 0, 129,130,150,151, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 118,119,108,138, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 145,139,144, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0,123,122,121, 116,117,135,136, 137,124,125,126, /* 7 */
49, 50,103, 0, 0, 0, 0, 0, 141,147,140,146, 109,120,110, 0, /* 8 */
104,105,106,107, 0, 0, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, /* 9 */
0, 0, 0, 0, 0, 0, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, /* A */
0, 0, 0, 0, 157,158, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, /* B */
0, 0, 0, 0, 155, 0, 0, 0, 154, 0,131,132, 152,153, 0, 0, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 52, 53, 54, 55, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* E */
47, 48,100, 99, 0,101,102, 98, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
const instr_info_t third_byte_38[] = {
{INVALID, 0x38ff18, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* 0*/
/**** SSSE3 ****/
{PREFIX_EXT, 0x380018, "(prefix ext 118)", xx, xx, xx, xx, xx, mrm, x, 118},/* 1*/
{PREFIX_EXT, 0x380118, "(prefix ext 119)", xx, xx, xx, xx, xx, mrm, x, 119},/* 2*/
{PREFIX_EXT, 0x380218, "(prefix ext 120)", xx, xx, xx, xx, xx, mrm, x, 120},/* 3*/
{PREFIX_EXT, 0x380318, "(prefix ext 121)", xx, xx, xx, xx, xx, mrm, x, 121},/* 4*/
{PREFIX_EXT, 0x380418, "(prefix ext 122)", xx, xx, xx, xx, xx, mrm, x, 122},/* 5*/
{PREFIX_EXT, 0x380518, "(prefix ext 123)", xx, xx, xx, xx, xx, mrm, x, 123},/* 6*/
{PREFIX_EXT, 0x380618, "(prefix ext 124)", xx, xx, xx, xx, xx, mrm, x, 124},/* 7*/
{PREFIX_EXT, 0x380718, "(prefix ext 125)", xx, xx, xx, xx, xx, mrm, x, 125},/* 8*/
{PREFIX_EXT, 0x380818, "(prefix ext 126)", xx, xx, xx, xx, xx, mrm, x, 126},/* 9*/
{PREFIX_EXT, 0x380918, "(prefix ext 127)", xx, xx, xx, xx, xx, mrm, x, 127},/*10*/
{PREFIX_EXT, 0x380a18, "(prefix ext 128)", xx, xx, xx, xx, xx, mrm, x, 128},/*11*/
{PREFIX_EXT, 0x380b18, "(prefix ext 129)", xx, xx, xx, xx, xx, mrm, x, 129},/*12*/
{PREFIX_EXT, 0x381c18, "(prefix ext 130)", xx, xx, xx, xx, xx, mrm, x, 130},/*13*/
{PREFIX_EXT, 0x381d18, "(prefix ext 131)", xx, xx, xx, xx, xx, mrm, x, 131},/*14*/
{PREFIX_EXT, 0x381e18, "(prefix ext 132)", xx, xx, xx, xx, xx, mrm, x, 132},/*15*/
/**** SSE4 ****/
{E_VEX_EXT, 0x66381018, "(e_vex ext 132)", xx, xx, xx, xx, xx, mrm, x, 132},/*16*/
{E_VEX_EXT, 0x381418, "(e_vex ext 130)", xx, xx, xx, xx, xx, mrm, x, 130},/*17*/
{E_VEX_EXT, 0x66381518, "(e_vex ext 129)", xx, xx, xx, xx, xx, mrm, x, 129},/*18*/
{E_VEX_EXT, 0x66381718, "(e_vex ext 3)", xx, xx, xx, xx, xx, mrm, x, 3},/*19*/
/* 20 */
{E_VEX_EXT, 0x66382018, "(e_vex ext 4)", xx, xx, xx, xx, xx, mrm, x, 4},/*20*/
{E_VEX_EXT, 0x66382118, "(e_vex ext 5)", xx, xx, xx, xx, xx, mrm, x, 5},/*21*/
{E_VEX_EXT, 0x66382218, "(e_vex ext 6)", xx, xx, xx, xx, xx, mrm, x, 6},/*22*/
{E_VEX_EXT, 0x66382318, "(e_vex ext 7)", xx, xx, xx, xx, xx, mrm, x, 7},/*23*/
{E_VEX_EXT, 0x66382418, "(e_vex ext 8)", xx, xx, xx, xx, xx, mrm, x, 8},/*24*/
{E_VEX_EXT, 0x66382518, "(e_vex ext 9)", xx, xx, xx, xx, xx, mrm, x, 9},/*25*/
{E_VEX_EXT, 0x66382818, "(e_vex ext 10)", xx, xx, xx, xx, xx, mrm, x, 10},/*26*/
{E_VEX_EXT, 0x66382918, "(e_vex ext 11)", xx, xx, xx, xx, xx, mrm, x, 11},/*27*/
{E_VEX_EXT, 0x66382a18, "(e_vex ext 12)", xx, xx, xx, xx, xx, mrm, x, 12},/*28*/
{E_VEX_EXT, 0x66382b18, "(e_vex ext 13)", xx, xx, xx, xx, xx, mrm, x, 13},/*29*/
/* 30 */
{E_VEX_EXT, 0x66383018, "(e_vex ext 14)", xx, xx, xx, xx, xx, mrm, x, 14},/*30*/
{E_VEX_EXT, 0x66383118, "(e_vex ext 15)", xx, xx, xx, xx, xx, mrm, x, 15},/*31*/
{E_VEX_EXT, 0x66383218, "(e_vex ext 16)", xx, xx, xx, xx, xx, mrm, x, 16},/*32*/
{E_VEX_EXT, 0x66383318, "(e_vex ext 17)", xx, xx, xx, xx, xx, mrm, x, 17},/*33*/
{E_VEX_EXT, 0x66383418, "(e_vex ext 18)", xx, xx, xx, xx, xx, mrm, x, 18},/*34*/
{E_VEX_EXT, 0x66383518, "(e_vex ext 19)", xx, xx, xx, xx, xx, mrm, x, 19},/*35*/
{E_VEX_EXT, 0x66383718, "(e_vex ext 20)", xx, xx, xx, xx, xx, mrm, x, 20},/*36*/
{E_VEX_EXT, 0x66383818, "(e_vex ext 21)", xx, xx, xx, xx, xx, mrm, x, 21},/*37*/
{E_VEX_EXT, 0x66383918, "(e_vex ext 22)", xx, xx, xx, xx, xx, mrm, x, 22},/*38*/
{E_VEX_EXT, 0x66383a18, "(e_vex ext 23)", xx, xx, xx, xx, xx, mrm, x, 23},/*39*/
{E_VEX_EXT, 0x66383b18, "(e_vex ext 24)", xx, xx, xx, xx, xx, mrm, x, 24},/*40*/
{E_VEX_EXT, 0x66383c18, "(e_vex ext 25)", xx, xx, xx, xx, xx, mrm, x, 25},/*41*/
{E_VEX_EXT, 0x66383d18, "(e_vex ext 26)", xx, xx, xx, xx, xx, mrm, x, 26},/*42*/
{E_VEX_EXT, 0x66383e18, "(e_vex ext 27)", xx, xx, xx, xx, xx, mrm, x, 27},/*43*/
{E_VEX_EXT, 0x66383f18, "(e_vex ext 28)", xx, xx, xx, xx, xx, mrm, x, 28},/*44*/
/* 40 */
{E_VEX_EXT, 0x66384018, "(e_vex ext 29)", xx, xx, xx, xx, xx, mrm, x, 29},/*45*/
{E_VEX_EXT, 0x66384118, "(e_vex ext 30)", xx, xx, xx, xx, xx, mrm, x, 30},/*46*/
/* f0 */
{PREFIX_EXT, 0x38f018, "(prefix ext 138)", xx, xx, xx, xx, xx, mrm, x, 138},/*47*/
{PREFIX_EXT, 0x38f118, "(prefix ext 139)", xx, xx, xx, xx, xx, mrm, x, 139},/*48*/
/* 80 */
{OP_invept, 0x66388018, "invept", xx, xx, Gr, Mdq, xx, mrm|reqp, x, END_LIST},/*49*/
{OP_invvpid, 0x66388118, "invvpid", xx, xx, Gr, Mdq, xx, mrm|reqp, x, END_LIST},/*50*/
/* db-df */
{E_VEX_EXT, 0x6638db18, "(e_vex ext 31)", xx, xx, xx, xx, xx, mrm, x, 31},/*51*/
{E_VEX_EXT, 0x6638dc18, "(e_vex ext 32)", xx, xx, xx, xx, xx, mrm, x, 32},/*52*/
{E_VEX_EXT, 0x6638dd18, "(e_vex ext 33)", xx, xx, xx, xx, xx, mrm, x, 33},/*53*/
{E_VEX_EXT, 0x6638de18, "(e_vex ext 34)", xx, xx, xx, xx, xx, mrm, x, 34},/*54*/
{E_VEX_EXT, 0x6638df18, "(e_vex ext 35)", xx, xx, xx, xx, xx, mrm, x, 35},/*55*/
/* AVX */
{E_VEX_EXT, 0x66380e18, "(e_vex ext 59)", xx, xx, xx, xx, xx, mrm, x, 59},/*56*/
{E_VEX_EXT, 0x66380f18, "(e_vex ext 60)", xx, xx, xx, xx, xx, mrm, x, 60},/*57*/
/* FMA 96-9f */
{E_VEX_EXT, 0x389618, "(e_vex ext 99)", xx, xx, xx, xx, xx, mrm, x, 99},/*58*/
{E_VEX_EXT, 0x389718, "(e_vex ext 102)", xx, xx, xx, xx, xx, mrm, x, 102},/*59*/
{E_VEX_EXT, 0x389818, "(e_vex ext 93)", xx, xx, xx, xx, xx, mrm, x, 93},/*60*/
{E_VEX_EXT, 0x389918, "(e_vex ext 96)", xx, xx, xx, xx, xx, mrm, x, 96},/*61*/
{E_VEX_EXT, 0x389a18, "(e_vex ext 105)", xx, xx, xx, xx, xx, mrm, x, 105},/*62*/
{E_VEX_EXT, 0x389b18, "(e_vex ext 108)", xx, xx, xx, xx, xx, mrm, x, 108},/*63*/
{E_VEX_EXT, 0x389c18, "(e_vex ext 111)", xx, xx, xx, xx, xx, mrm, x, 111},/*64*/
{E_VEX_EXT, 0x389d18, "(e_vex ext 114)", xx, xx, xx, xx, xx, mrm, x, 114},/*65*/
{E_VEX_EXT, 0x389e18, "(e_vex ext 117)", xx, xx, xx, xx, xx, mrm, x, 117},/*66*/
{E_VEX_EXT, 0x389f18, "(e_vex ext 120)", xx, xx, xx, xx, xx, mrm, x, 120},/*67*/
/* FMA a6-af */
{E_VEX_EXT, 0x38a618, "(e_vex ext 100)", xx, xx, xx, xx, xx, mrm, x, 100},/*68*/
{E_VEX_EXT, 0x38a718, "(e_vex ext 103)", xx, xx, xx, xx, xx, mrm, x, 103},/*69*/
{E_VEX_EXT, 0x38a818, "(e_vex ext 94)", xx, xx, xx, xx, xx, mrm, x, 94},/*70*/
{E_VEX_EXT, 0x38a918, "(e_vex ext 97)", xx, xx, xx, xx, xx, mrm, x, 97},/*71*/
{E_VEX_EXT, 0x38aa18, "(e_vex ext 106)", xx, xx, xx, xx, xx, mrm, x, 106},/*72*/
{E_VEX_EXT, 0x38ab18, "(e_vex ext 109)", xx, xx, xx, xx, xx, mrm, x, 109},/*73*/
{E_VEX_EXT, 0x38ac18, "(e_vex ext 112)", xx, xx, xx, xx, xx, mrm, x, 112},/*74*/
{E_VEX_EXT, 0x38ad18, "(e_vex ext 115)", xx, xx, xx, xx, xx, mrm, x, 115},/*75*/
{E_VEX_EXT, 0x38ae18, "(e_vex ext 118)", xx, xx, xx, xx, xx, mrm, x, 118},/*76*/
{E_VEX_EXT, 0x38af18, "(e_vex ext 121)", xx, xx, xx, xx, xx, mrm, x, 121},/*77*/
/* FMA b6-bf */
{E_VEX_EXT, 0x38b618, "(e_vex ext 101)", xx, xx, xx, xx, xx, mrm, x, 101},/*78*/
{E_VEX_EXT, 0x38b718, "(e_vex ext 104)", xx, xx, xx, xx, xx, mrm, x, 104},/*79*/
{E_VEX_EXT, 0x38b818, "(e_vex ext 95)", xx, xx, xx, xx, xx, mrm, x, 95},/*80*/
{E_VEX_EXT, 0x38b918, "(e_vex ext 98)", xx, xx, xx, xx, xx, mrm, x, 98},/*81*/
{E_VEX_EXT, 0x38ba18, "(e_vex ext 107)", xx, xx, xx, xx, xx, mrm, x, 107},/*82*/
{E_VEX_EXT, 0x38bb18, "(e_vex ext 110)", xx, xx, xx, xx, xx, mrm, x, 110},/*83*/
{E_VEX_EXT, 0x38bc18, "(e_vex ext 113)", xx, xx, xx, xx, xx, mrm, x, 113},/*84*/
{E_VEX_EXT, 0x38bd18, "(e_vex ext 116)", xx, xx, xx, xx, xx, mrm, x, 116},/*85*/
{E_VEX_EXT, 0x38be18, "(e_vex ext 119)", xx, xx, xx, xx, xx, mrm, x, 119},/*86*/
{E_VEX_EXT, 0x38bf18, "(e_vex ext 122)", xx, xx, xx, xx, xx, mrm, x, 122},/*87*/
/* AVX overlooked in original pass */
{E_VEX_EXT, 0x66381318, "(e_vex ext 63)", xx, xx, xx, xx, xx, mrm, x, 63},/*88*/
{E_VEX_EXT, 0x66381818, "(e_vex ext 64)", xx, xx, xx, xx, xx, mrm, x, 64},/*89*/
{E_VEX_EXT, 0x66381918, "(e_vex ext 65)", xx, xx, xx, xx, xx, mrm, x, 65},/*90*/
{E_VEX_EXT, 0x66381a18, "(e_vex ext 66)", xx, xx, xx, xx, xx, mrm, x, 66},/*91*/
{E_VEX_EXT, 0x66382c18, "(e_vex ext 67)", xx, xx, xx, xx, xx, mrm, x, 67},/*92*/
{E_VEX_EXT, 0x66382d18, "(e_vex ext 68)", xx, xx, xx, xx, xx, mrm, x, 68},/*93*/
{E_VEX_EXT, 0x66382e18, "(e_vex ext 69)", xx, xx, xx, xx, xx, mrm, x, 69},/*94*/
{E_VEX_EXT, 0x66382f18, "(e_vex ext 70)", xx, xx, xx, xx, xx, mrm, x, 70},/*95*/
{E_VEX_EXT, 0x66380c18, "(e_vex ext 77)", xx, xx, xx, xx, xx, mrm, x, 77},/*96*/
{E_VEX_EXT, 0x66380d18, "(e_vex ext 78)", xx, xx, xx, xx, xx, mrm, x, 78},/*97*/
/* TBM */
{PREFIX_EXT, 0x38f718, "(prefix ext 141)", xx, xx, xx, xx, xx, mrm, x, 141}, /*98*/
/* BMI1 */
{EXTENSION, 0x38f318, "(group 17)", By, xx, Ey, xx, xx, mrm|vex, x, 31}, /*99*/
/* marked reqp b/c it should have no prefix (prefixes for future opcodes) */
{OP_andn, 0x38f218, "andn", Gy, xx, By, Ey, xx, mrm|vex|reqp, fW6, END_LIST},/*100*/
/* BMI2 */
{PREFIX_EXT, 0x38f518, "(prefix ext 142)", xx, xx, xx, xx, xx, mrm, x, 142}, /*101*/
{PREFIX_EXT, 0x38f618, "(prefix ext 143)", xx, xx, xx, xx, xx, mrm, x, 143}, /*102*/
{OP_invpcid, 0x66388218, "invpcid", xx, xx, Gy, Mdq, xx, mrm|reqp, x, END_LIST},/*103*/
/* AVX2 */
{VEX_W_EXT, 0x66389018, "(vex_W ext 66)", xx, xx, xx, xx, xx, mrm|vex, x, 66},/*104*/
{VEX_W_EXT, 0x66389118, "(vex_W ext 67)", xx, xx, xx, xx, xx, mrm|vex, x, 67},/*105*/
{VEX_W_EXT, 0x66389218, "(vex_W ext 68)", xx, xx, xx, xx, xx, mrm|vex, x, 68},/*106*/
{VEX_W_EXT, 0x66389318, "(vex_W ext 69)", xx, xx, xx, xx, xx, mrm|vex, x, 69},/*107*/
{E_VEX_EXT, 0x66385a18, "(e_vex ext 139)", xx, xx, xx, xx, xx, mrm, x, 139},/*108*/
{VEX_W_EXT, 0x66388c18, "(vex_W ext 70)", xx,xx,xx,xx,xx, mrm|vex|reqp, x, 70},/*109*/
{VEX_W_EXT, 0x66388e18, "(vex_W ext 71)", xx,xx,xx,xx,xx, mrm|vex|reqp, x, 71},/*110*/
/* Following Intel and not marking as packed float vs ints: just "qq". */
{E_VEX_EXT, 0x66381618, "(e_vex ext 124)", xx, xx, xx, xx, xx, mrm|reqp, x, 124},/*111*/
{E_VEX_EXT, 0x66383618, "(e_vex ext 123)", xx, xx, xx, xx, xx, mrm|reqp, x, 123},/*112*/
{E_VEX_EXT, 0x66384518, "(e_vex ext 133)", xx, xx, xx, xx, xx, mrm|reqp, x, 133},/*113*/
{E_VEX_EXT, 0x66384618, "(e_vex ext 131)", xx, xx, xx, xx, xx, mrm|reqp, x, 131},/*114*/
{E_VEX_EXT, 0x66384718, "(e_vex ext 134)", xx, xx, xx, xx, xx, mrm|reqp, x, 134},/*115*/
{E_VEX_EXT, 0x66387818, "(e_vex ext 135)", xx, xx, xx, xx, xx, mrm|reqp, x, 135},/*116*/
{E_VEX_EXT, 0x66387918, "(e_vex ext 136)", xx, xx, xx, xx, xx, mrm|reqp, x, 136},/*117*/
{E_VEX_EXT, 0x66385818, "(e_vex ext 137)", xx, xx, xx, xx, xx, mrm|reqp, x, 137},/*118*/
{E_VEX_EXT, 0x66385918, "(e_vex ext 138)", xx, xx, xx, xx, xx, mrm|reqp, x, 138},/*119*/
{OP_vpermw, 0x66388d58, "vpermw", Ve, xx, KEw, He, We, mrm|evex|reqp, x, END_LIST},/*120*/
{EVEX_W_EXT, 0x66387718, "(evex_W ext 94)", xx, xx, xx, xx, xx, mrm|reqp, x, 94},/*121*/
{EVEX_W_EXT, 0x66387618, "(evex_W ext 95)", xx, xx, xx, xx, xx, mrm|reqp, x, 95},/*122*/
{EVEX_W_EXT, 0x66387518, "(evex_W ext 96)", xx, xx, xx, xx, xx, mrm|reqp, x, 96},/*123*/
{EVEX_W_EXT, 0x66387d18, "(evex_W ext 97)", xx, xx, xx, xx, xx, mrm|reqp, x, 97},/*124*/
{EVEX_W_EXT, 0x66387e18, "(evex_W ext 98)", xx, xx, xx, xx, xx, mrm|reqp, x, 98},/*125*/
{EVEX_W_EXT, 0x66387f18, "(evex_W ext 99)", xx, xx, xx, xx, xx, mrm|reqp, x, 99},/*126*/
{PREFIX_EXT, 0x381118, "(prefix ext 171)", xx, xx, xx, xx, xx, mrm, x, 171}, /*127*/
{PREFIX_EXT, 0x381218, "(prefix ext 162)", xx, xx, xx, xx, xx, mrm, x, 162}, /*128*/
{EVEX_W_EXT, 0x66384c18, "(evex_W ext 131)", xx, xx, xx, xx, xx, mrm|reqp, x, 131},/*129*/
{EVEX_W_EXT, 0x66384d18, "(evex_W ext 132)", xx, xx, xx, xx, xx, mrm|reqp, x, 132},/*130*/
{EVEX_W_EXT, 0x6638ca18, "(evex_W ext 133)", xx, xx, xx, xx, xx, mrm|reqp, x, 133},/*131*/
{EVEX_W_EXT, 0x6638cb18, "(evex_W ext 134)", xx, xx, xx, xx, xx, mrm|reqp, x, 134},/*132*/
{EVEX_W_EXT, 0x66381f58, "(evex_W ext 146)", xx, xx, xx, xx, xx, mrm|reqp, x, 146},/*133*/
{EVEX_W_EXT, 0x66381b18, "(evex_W ext 149)", xx, xx, xx, xx, xx, mrm|reqp, x, 149},/*134*/
{OP_vpbroadcastb, 0x66387a18, "vpbroadcastb", Ve, xx, KEq, Ed, xx, mrm|evex|reqp, x, END_LIST},/*135*/
{OP_vpbroadcastw, 0x66387b18, "vpbroadcastw", Ve, xx, KEd, Ed, xx, mrm|evex|reqp, x, END_LIST},/*136*/
{EVEX_W_EXT, 0x66387c18, "(evex_W ext 150)", xx, xx, xx, xx, xx, mrm|reqp, x, 150},/*137*/
{EVEX_W_EXT, 0x66385b18, "(evex_W ext 153)", xx, xx, xx, xx, xx, mrm|reqp, x, 153},/*138*/
{EVEX_W_EXT, 0x66386518, "(evex_W ext 155)", xx, xx, xx, xx, xx, mrm|reqp, x, 155},/*139*/
{EVEX_W_EXT, 0x66388a18, "(evex_W ext 156)", xx, xx, xx, xx, xx, mrm|reqp, x, 156},/*140*/
{EVEX_W_EXT, 0x66388818, "(evex_W ext 157)", xx, xx, xx, xx, xx, mrm|reqp, x, 157},/*141*/
{EVEX_W_EXT, 0x66384218, "(evex_W ext 160)", xx, xx, xx, xx, xx, mrm|reqp, x, 160},/*142*/
{EVEX_W_EXT, 0x66384318, "(evex_W ext 161)", xx, xx, xx, xx, xx, mrm|reqp, x, 161},/*143*/
{EVEX_W_EXT, 0x66386618, "(evex_W ext 164)", xx, xx, xx, xx, xx, mrm|reqp, x, 164},/*144*/
{EVEX_W_EXT, 0x66386418, "(evex_W ext 165)", xx, xx, xx, xx, xx, mrm|reqp, x, 165},/*145*/
{EVEX_W_EXT, 0x66388b18, "(evex_W ext 166)", xx, xx, xx, xx, xx, mrm|reqp, x, 166},/*146*/
{EVEX_W_EXT, 0x66388918, "(evex_W ext 167)", xx, xx, xx, xx, xx, mrm|reqp, x, 167},/*147*/
{PREFIX_EXT, 0x382618, "(prefix ext 182)", xx, xx, xx, xx, xx, mrm, x, 182},/*148*/
{PREFIX_EXT, 0x382718, "(prefix ext 183)", xx, xx, xx, xx, xx, mrm, x, 183},/*149*/
{EVEX_W_EXT, 0x66384e18, "(evex_W ext 176)", xx, xx, xx, xx, xx, mrm|reqp, x, 176},/*150*/
{EVEX_W_EXT, 0x66384f18, "(evex_W ext 177)", xx, xx, xx, xx, xx, mrm|reqp, x, 177},/*151*/
{EVEX_W_EXT, 0x6638cc18, "(evex_W ext 178)", xx, xx, xx, xx, xx, mrm|reqp, x, 178},/*152*/
{EVEX_W_EXT, 0x6638cd18, "(evex_W ext 179)", xx, xx, xx, xx, xx, mrm|reqp, x, 179},/*153*/
{EVEX_W_EXT, 0x6638c818, "(evex_W ext 184)", xx, xx, xx, xx, xx, mrm|reqp, x, 184},/*154*/
{EVEX_W_EXT, 0x6638c418, "(evex_W ext 185)", xx, xx, xx, xx, xx, mrm|reqp, x, 185},/*155*/
{EVEX_W_EXT, 0x66384418, "(evex_W ext 186)", xx, xx, xx, xx, xx, mrm|reqp, x, 186},/*156*/
{OP_vpmadd52luq, 0x6638b458, "vpmadd52luq", Ve, xx, KEb, He, We, mrm|evex|reqp, x, END_LIST},/*157*/
{OP_vpmadd52huq, 0x6638b558, "vpmadd52huq", Ve, xx, KEd, He, We, mrm|evex|reqp, x, END_LIST},/*158*/
};
/* N.B.: every 0x3a instr so far has an immediate. If a version w/o an immed
* comes along we'll have to add a threebyte_3a_vex_extra[] table to decode_fast.c.
*/
const byte third_byte_3a_index[256] = {
/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
59,60,61,77, 28,29,30, 0, 6, 7, 8, 9, 10,11,12, 1, /* 0 */
0, 0, 0, 0, 2, 3, 4, 5, 31,32,69,67, 0,33,73,74, /* 1 */
13,14,15,75, 0,88,80,81, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
63,64,65,66, 0, 0, 0, 0, 57,58,70,68, 0, 0,71,72, /* 3 */
16,17,18,76, 23, 0,62, 0, 54,55,25,26, 27, 0, 0, 0, /* 4 */
82,83, 0, 0, 78,79,84,85, 0, 0, 0, 0, 34,35,36,37, /* 5 */
19,20,21,22, 0, 0,86,87, 38,39,40,41, 42,43,44,45, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 46,47,48,49, 50,51,52,53, /* 7 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* A */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* B */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,24, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* E */
56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
const instr_info_t third_byte_3a[] = {
{INVALID, 0x3aff18, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* 0*/
/**** SSSE3 ****/
{PREFIX_EXT, 0x3a0f18, "(prefix ext 133)", xx, xx, xx, xx, xx, mrm, x, 133}, /* 1*/
/**** SSE4 ****/
{E_VEX_EXT, 0x663a1418, "(e_vex ext 36)", xx, xx, xx, xx, xx, mrm, x, 36},/* 2*/
{E_VEX_EXT, 0x663a1518, "(e_vex ext 37)", xx, xx, xx, xx, xx, mrm, x, 37},/* 3*/
{E_VEX_EXT, 0x663a1618, "(e_vex ext 38)", xx, xx, xx, xx, xx, mrm, x, 38},/* 4*/
{E_VEX_EXT, 0x663a1718, "(e_vex ext 39)", xx, xx, xx, xx, xx, mrm, x, 39},/* 5*/
{E_VEX_EXT, 0x663a0818, "(e_vex ext 40)", xx, xx, xx, xx, xx, mrm, x, 40},/* 6*/
{E_VEX_EXT, 0x663a0918, "(e_vex ext 41)", xx, xx, xx, xx, xx, mrm, x, 41},/* 7*/
{E_VEX_EXT, 0x663a0a18, "(e_vex ext 42)", xx, xx, xx, xx, xx, mrm, x, 42},/* 8*/
{E_VEX_EXT, 0x663a0b18, "(e_vex ext 43)", xx, xx, xx, xx, xx, mrm, x, 43},/* 9*/
{E_VEX_EXT, 0x663a0c18, "(e_vex ext 44)", xx, xx, xx, xx, xx, mrm, x, 44},/*10*/
{E_VEX_EXT, 0x663a0d18, "(e_vex ext 45)", xx, xx, xx, xx, xx, mrm, x, 45},/*11*/
{E_VEX_EXT, 0x663a0e18, "(e_vex ext 46)", xx, xx, xx, xx, xx, mrm, x, 46},/*12*/
/* 20 */
{E_VEX_EXT, 0x663a2018, "(e_vex ext 47)", xx, xx, xx, xx, xx, mrm, x, 47},/*13*/
{E_VEX_EXT, 0x663a2118, "(e_vex ext 48)", xx, xx, xx, xx, xx, mrm, x, 48},/*14*/
{E_VEX_EXT, 0x663a2218, "(e_vex ext 49)", xx, xx, xx, xx, xx, mrm, x, 49},/*15*/
/* 40 */
{E_VEX_EXT, 0x663a4018, "(e_vex ext 50)", xx, xx, xx, xx, xx, mrm, x, 50},/*16*/
{E_VEX_EXT, 0x663a4118, "(e_vex ext 51)", xx, xx, xx, xx, xx, mrm, x, 51},/*17*/
{E_VEX_EXT, 0x663a4218, "(e_vex ext 52)", xx, xx, xx, xx, xx, mrm, x, 52},/*18*/
/* 60 */
{E_VEX_EXT, 0x663a6018, "(e_vex ext 53)", xx, xx, xx, xx, xx, mrm, x, 53},/*19*/
{E_VEX_EXT, 0x663a6118, "(e_vex ext 54)", xx, xx, xx, xx, xx, mrm, x, 54},/*20*/
{E_VEX_EXT, 0x663a6218, "(e_vex ext 55)", xx, xx, xx, xx, xx, mrm, x, 55},/*21*/
{E_VEX_EXT, 0x663a6318, "(e_vex ext 56)", xx, xx, xx, xx, xx, mrm, x, 56},/*22*/
{E_VEX_EXT, 0x663a4418, "(e_vex ext 57)", xx, xx, xx, xx, xx, mrm, x, 57},/*23*/
{E_VEX_EXT, 0x663adf18, "(e_vex ext 58)", xx, xx, xx, xx, xx, mrm, x, 58},/*24*/
/* AVX overlooked in original pass */
{E_VEX_EXT, 0x663a4a18, "(e_vex ext 0)", xx, xx, xx, xx, xx, mrm, x, 0},/*25*/
{E_VEX_EXT, 0x663a4b18, "(e_vex ext 1)", xx, xx, xx, xx, xx, mrm, x, 1},/*26*/
{E_VEX_EXT, 0x663a4c18, "(e_vex ext 2)", xx, xx, xx, xx, xx, mrm, x, 2},/*27*/
{E_VEX_EXT, 0x663a0418, "(e_vex ext 71)", xx, xx, xx, xx, xx, mrm, x, 71},/*28*/
{E_VEX_EXT, 0x663a0518, "(e_vex ext 72)", xx, xx, xx, xx, xx, mrm, x, 72},/*29*/
{E_VEX_EXT, 0x663a0618, "(e_vex ext 73)", xx, xx, xx, xx, xx, mrm, x, 73},/*30*/
{E_VEX_EXT, 0x663a1818, "(e_vex ext 74)", xx, xx, xx, xx, xx, mrm, x, 74},/*31*/
{E_VEX_EXT, 0x663a1918, "(e_vex ext 75)", xx, xx, xx, xx, xx, mrm, x, 75},/*32*/
{E_VEX_EXT, 0x663a1d18, "(e_vex ext 76)", xx, xx, xx, xx, xx, mrm, x, 76},/*33*/
/* FMA4 */
{VEX_W_EXT,0x663a5c18, "(vex_W ext 30)", xx, xx, xx, xx, xx, mrm, x, 30},/*34*/
{VEX_W_EXT,0x663a5d18, "(vex_W ext 31)", xx, xx, xx, xx, xx, mrm, x, 31},/*35*/
{VEX_W_EXT,0x663a5e18, "(vex_W ext 32)", xx, xx, xx, xx, xx, mrm, x, 32},/*36*/
{VEX_W_EXT,0x663a5f18, "(vex_W ext 33)", xx, xx, xx, xx, xx, mrm, x, 33},/*37*/
{VEX_W_EXT,0x663a6818, "(vex_W ext 34)", xx, xx, xx, xx, xx, mrm, x, 34},/*38*/
{VEX_W_EXT,0x663a6918, "(vex_W ext 35)", xx, xx, xx, xx, xx, mrm, x, 35},/*39*/
{VEX_W_EXT,0x663a6a18, "(vex_W ext 36)", xx, xx, xx, xx, xx, mrm, x, 36},/*40*/
{VEX_W_EXT,0x663a6b18, "(vex_W ext 37)", xx, xx, xx, xx, xx, mrm, x, 37},/*41*/
{VEX_W_EXT,0x663a6c18, "(vex_W ext 38)", xx, xx, xx, xx, xx, mrm, x, 38},/*42*/
{VEX_W_EXT,0x663a6d18, "(vex_W ext 39)", xx, xx, xx, xx, xx, mrm, x, 39},/*43*/
{VEX_W_EXT,0x663a6e18, "(vex_W ext 40)", xx, xx, xx, xx, xx, mrm, x, 40},/*44*/
{VEX_W_EXT,0x663a6f18, "(vex_W ext 41)", xx, xx, xx, xx, xx, mrm, x, 41},/*45*/
{VEX_W_EXT,0x663a7818, "(vex_W ext 42)", xx, xx, xx, xx, xx, mrm, x, 42},/*46*/
{VEX_W_EXT,0x663a7918, "(vex_W ext 43)", xx, xx, xx, xx, xx, mrm, x, 43},/*47*/
{VEX_W_EXT,0x663a7a18, "(vex_W ext 44)", xx, xx, xx, xx, xx, mrm, x, 44},/*48*/
{VEX_W_EXT,0x663a7b18, "(vex_W ext 45)", xx, xx, xx, xx, xx, mrm, x, 45},/*49*/
{VEX_W_EXT,0x663a7c18, "(vex_W ext 46)", xx, xx, xx, xx, xx, mrm, x, 46},/*50*/
{VEX_W_EXT,0x663a7d18, "(vex_W ext 47)", xx, xx, xx, xx, xx, mrm, x, 47},/*51*/
{VEX_W_EXT,0x663a7e18, "(vex_W ext 48)", xx, xx, xx, xx, xx, mrm, x, 48},/*52*/
{VEX_W_EXT,0x663a7f18, "(vex_W ext 49)", xx, xx, xx, xx, xx, mrm, x, 49},/*53*/
/* XOP */
{VEX_W_EXT,0x663a4818, "(vex_W ext 64)", xx, xx, xx, xx, xx, mrm, x, 64},/*54*/
{VEX_W_EXT,0x663a4918, "(vex_W ext 65)", xx, xx, xx, xx, xx, mrm, x, 65},/*55*/
/* BMI2 */
{OP_rorx, 0xf23af018, "rorx", Gy, xx, Ey, Ib, xx, mrm|vex|reqp, x, END_LIST},/*56*/
/* AVX2 */
{E_VEX_EXT, 0x663a3818, "(e_vex ext 128)", xx, xx, xx, xx, xx, mrm, x, 128},/*57*/
{E_VEX_EXT, 0x663a3918, "(e_vex ext 127)", xx, xx, xx, xx, xx, mrm, x, 127},/*58*/
{E_VEX_EXT, 0x663a0058, "(e_vex ext 125)", xx, xx, xx, xx, xx, mrm, x, 125},/*59*/
/* Following Intel and not marking as packed float vs ints: just "qq". */
{E_VEX_EXT, 0x663a0158, "(e_vex ext 126)", xx, xx, xx, xx, xx, mrm, x, 126},/*60*/
{OP_vpblendd,0x663a0218,"vpblendd",Vx,xx,Hx,Wx,Ib, mrm|vex|reqp,x,END_LIST},/*61*/
{OP_vperm2i128,0x663a4618,"vperm2i128",Vqq,xx,Hqq,Wqq,Ib, mrm|vex|reqp,x,END_LIST},/*62*/
/* AVX-512 */
{VEX_W_EXT, 0x663a3010, "(vex_W ext 102)", xx, xx, xx, xx, xx, mrm|reqp, x, 102 },/*63*/
{VEX_W_EXT, 0x663a3110, "(vex_W ext 103)", xx, xx, xx, xx, xx, mrm|reqp, x, 103 },/*64*/
{VEX_W_EXT, 0x663a3210, "(vex_W ext 100)", xx, xx, xx, xx, xx, mrm|reqp, x, 100 },/*65*/
{VEX_W_EXT, 0x663a3310, "(vex_W ext 101)", xx, xx, xx, xx, xx, mrm|reqp, x, 101 },/*66*/
{EVEX_W_EXT, 0x663a1b18, "(evex_W ext 101)", xx, xx, xx, xx, xx, mrm, x, 101},/*67*/
{EVEX_W_EXT, 0x663a3b18, "(evex_W ext 103)", xx, xx, xx, xx, xx, mrm, x, 103},/*68*/
{EVEX_W_EXT, 0x663a1a18, "(evex_W ext 105)", xx, xx, xx, xx, xx, mrm, x, 105},/*69*/
{EVEX_W_EXT, 0x663a3a18, "(evex_W ext 107)", xx, xx, xx, xx, xx, mrm, x, 107},/*70*/
{EVEX_W_EXT, 0x663a3e18, "(evex_W ext 108)", xx, xx, xx, xx, xx, mrm, x, 108},/*71*/
{EVEX_W_EXT, 0x663a3f18, "(evex_W ext 109)", xx, xx, xx, xx, xx, mrm, x, 109},/*72*/
{EVEX_W_EXT, 0x663a1e18, "(evex_W ext 110)", xx, xx, xx, xx, xx, mrm, x, 110},/*73*/
{EVEX_W_EXT, 0x663a1f18, "(evex_W ext 111)", xx, xx, xx, xx, xx, mrm, x, 111},/*74*/
{EVEX_W_EXT, 0x663a2318, "(evex_W ext 141)", xx, xx, xx, xx, xx, mrm, x, 141},/*75*/
{EVEX_W_EXT, 0x663a4318, "(evex_W ext 142)", xx, xx, xx, xx, xx, mrm, x, 142},/*76*/
{EVEX_W_EXT, 0x663a0318, "(evex_W ext 154)", xx, xx, xx, xx, xx, mrm, x, 154},/*77*/
{EVEX_W_EXT, 0x663a5418, "(evex_W ext 158)", xx, xx, xx, xx, xx, mrm, x, 158},/*78*/
{EVEX_W_EXT, 0x663a5518, "(evex_W ext 159)", xx, xx, xx, xx, xx, mrm, x, 159},/*79*/
{EVEX_W_EXT, 0x663a2618, "(evex_W ext 162)", xx, xx, xx, xx, xx, mrm, x, 162},/*80*/
{EVEX_W_EXT, 0x663a2718, "(evex_W ext 163)", xx, xx, xx, xx, xx, mrm, x, 163},/*81*/
{EVEX_W_EXT, 0x663a5018, "(evex_W ext 172)", xx, xx, xx, xx, xx, mrm, x, 172},/*82*/
{EVEX_W_EXT, 0x663a5118, "(evex_W ext 173)", xx, xx, xx, xx, xx, mrm, x, 173},/*83*/
{EVEX_W_EXT, 0x663a5618, "(evex_W ext 174)", xx, xx, xx, xx, xx, mrm, x, 174},/*84*/
{EVEX_W_EXT, 0x663a5718, "(evex_W ext 175)", xx, xx, xx, xx, xx, mrm, x, 175},/*85*/
{EVEX_W_EXT, 0x663a6618, "(evex_W ext 182)", xx, xx, xx, xx, xx, mrm, x, 182},/*86*/
{EVEX_W_EXT, 0x663a6718, "(evex_W ext 183)", xx, xx, xx, xx, xx, mrm, x, 183},/*87*/
{EVEX_W_EXT, 0x663a2518, "(evex_W ext 187)", xx, xx, xx, xx, xx, mrm, x, 187},/*88*/
};
/****************************************************************************
* Instructions that differ depending on vex.W
* Index is vex.W value
*/
const instr_info_t vex_W_extensions[][2] = {
{ /* vex_W_ext 0 */
{OP_vfmadd132ps,0x66389818,"vfmadd132ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[62][0]},
{OP_vfmadd132pd,0x66389858,"vfmadd132pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[62][1]},
}, { /* vex_W_ext 1 */
{OP_vfmadd213ps,0x6638a818,"vfmadd213ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[63][0]},
{OP_vfmadd213pd,0x6638a858,"vfmadd213pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[63][1]},
}, { /* vex_W_ext 2 */
{OP_vfmadd231ps,0x6638b818,"vfmadd231ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[64][0]},
{OP_vfmadd231pd,0x6638b858,"vfmadd231pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[64][1]},
}, { /* vex_W_ext 3 */
{OP_vfmadd132ss,0x66389918,"vfmadd132ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,tevexw[65][0]},
{OP_vfmadd132sd,0x66389958,"vfmadd132sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,tevexw[65][1]},
}, { /* vex_W_ext 4 */
{OP_vfmadd213ss,0x6638a918,"vfmadd213ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,tevexw[66][0]},
{OP_vfmadd213sd,0x6638a958,"vfmadd213sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,tevexw[66][1]},
}, { /* vex_W_ext 5 */
{OP_vfmadd231ss,0x6638b918,"vfmadd231ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,tevexw[67][0]},
{OP_vfmadd231sd,0x6638b958,"vfmadd231sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,tevexw[67][1]},
}, { /* vex_W_ext 6 */
{OP_vfmaddsub132ps,0x66389618,"vfmaddsub132ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[68][0]},
{OP_vfmaddsub132pd,0x66389658,"vfmaddsub132pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[68][1]},
}, { /* vex_W_ext 7 */
{OP_vfmaddsub213ps,0x6638a618,"vfmaddsub213ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[69][0]},
{OP_vfmaddsub213pd,0x6638a658,"vfmaddsub213pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[69][1]},
}, { /* vex_W_ext 8 */
{OP_vfmaddsub231ps,0x6638b618,"vfmaddsub231ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[70][0]},
{OP_vfmaddsub231pd,0x6638b658,"vfmaddsub231pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[70][1]},
}, { /* vex_W_ext 9 */
{OP_vfmsubadd132ps,0x66389718,"vfmsubadd132ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[71][0]},
{OP_vfmsubadd132pd,0x66389758,"vfmsubadd132pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[71][1]},
}, { /* vex_W_ext 10 */
{OP_vfmsubadd213ps,0x6638a718,"vfmsubadd213ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[72][0]},
{OP_vfmsubadd213pd,0x6638a758,"vfmsubadd213pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[72][1]},
}, { /* vex_W_ext 11 */
{OP_vfmsubadd231ps,0x6638b718,"vfmsubadd231ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[73][0]},
{OP_vfmsubadd231pd,0x6638b758,"vfmsubadd231pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[73][1]},
}, { /* vex_W_ext 12 */
{OP_vfmsub132ps,0x66389a18,"vfmsub132ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[74][0]},
{OP_vfmsub132pd,0x66389a58,"vfmsub132pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[74][1]},
}, { /* vex_W_ext 13 */
{OP_vfmsub213ps,0x6638aa18,"vfmsub213ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[75][0]},
{OP_vfmsub213pd,0x6638aa58,"vfmsub213pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[75][1]},
}, { /* vex_W_ext 14 */
{OP_vfmsub231ps,0x6638ba18,"vfmsub231ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[76][0]},
{OP_vfmsub231pd,0x6638ba58,"vfmsub231pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[76][1]},
}, { /* vex_W_ext 15 */
{OP_vfmsub132ss,0x66389b18,"vfmsub132ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,tevexw[77][0]},
{OP_vfmsub132sd,0x66389b58,"vfmsub132sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,tevexw[77][1]},
}, { /* vex_W_ext 16 */
{OP_vfmsub213ss,0x6638ab18,"vfmsub213ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,tevexw[78][0]},
{OP_vfmsub213sd,0x6638ab58,"vfmsub213sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,tevexw[78][1]},
}, { /* vex_W_ext 17 */
{OP_vfmsub231ss,0x6638bb18,"vfmsub231ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,tevexw[79][0]},
{OP_vfmsub231sd,0x6638bb58,"vfmsub231sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,tevexw[79][1]},
}, { /* vex_W_ext 18 */
{OP_vfnmadd132ps,0x66389c18,"vfnmadd132ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[80][0]},
{OP_vfnmadd132pd,0x66389c58,"vfnmadd132pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[80][1]},
}, { /* vex_W_ext 19 */
{OP_vfnmadd213ps,0x6638ac18,"vfnmadd213ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[81][0]},
{OP_vfnmadd213pd,0x6638ac58,"vfnmadd213pd",Vvd,xx,Hvd,Wvd,Vvs,mrm|vex|reqp,x,tevexw[81][1]},
}, { /* vex_W_ext 20 */
{OP_vfnmadd231ps,0x6638bc18,"vfnmadd231ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[82][0]},
{OP_vfnmadd231pd,0x6638bc58,"vfnmadd231pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[82][1]},
}, { /* vex_W_ext 21 */
{OP_vfnmadd132ss,0x66389d18,"vfnmadd132ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,tevexw[83][0]},
{OP_vfnmadd132sd,0x66389d58,"vfnmadd132sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,tevexw[83][1]},
}, { /* vex_W_ext 22 */
{OP_vfnmadd213ss,0x6638ad18,"vfnmadd213ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,tevexw[84][0]},
{OP_vfnmadd213sd,0x6638ad58,"vfnmadd213sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,tevexw[84][1]},
}, { /* vex_W_ext 23 */
{OP_vfnmadd231ss,0x6638bd18,"vfnmadd231ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,tevexw[85][0]},
{OP_vfnmadd231sd,0x6638bd58,"vfnmadd231sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,tevexw[85][1]},
}, { /* vex_W_ext 24 */
{OP_vfnmsub132ps,0x66389e18,"vfnmsub132ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[86][0]},
{OP_vfnmsub132pd,0x66389e58,"vfnmsub132pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[86][1]},
}, { /* vex_W_ext 25 */
{OP_vfnmsub213ps,0x6638ae18,"vfnmsub213ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[87][0]},
{OP_vfnmsub213pd,0x6638ae58,"vfnmsub213pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[87][1]},
}, { /* vex_W_ext 26 */
{OP_vfnmsub231ps,0x6638be18,"vfnmsub231ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,tevexw[88][0]},
{OP_vfnmsub231pd,0x6638be58,"vfnmsub231pd",Vvd,xx,Hvd,Wvd,Vvd,mrm|vex|reqp,x,tevexw[88][1]},
}, { /* vex_W_ext 27 */
{OP_vfnmsub132ss,0x66389f18,"vfnmsub132ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,tevexw[89][0]},
{OP_vfnmsub132sd,0x66389f58,"vfnmsub132sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,tevexw[89][1]},
}, { /* vex_W_ext 28 */
{OP_vfnmsub213ss,0x6638af18,"vfnmsub213ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,tevexw[90][0]},
{OP_vfnmsub213sd,0x6638af58,"vfnmsub213sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,tevexw[90][1]},
}, { /* vex_W_ext 29 */
{OP_vfnmsub231ss,0x6638bf18,"vfnmsub231ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,tevexw[91][0]},
{OP_vfnmsub231sd,0x6638bf58,"vfnmsub231sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,tevexw[91][1]},
}, { /* vex_W_ext 30 */
{OP_vfmaddsubps,0x663a5c18,"vfmaddsubps",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[30][1]},
{OP_vfmaddsubps,0x663a5c58,"vfmaddsubps",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 31 */
{OP_vfmaddsubpd,0x663a5d18,"vfmaddsubpd",Vvd,xx,Lvd,Wvd,Hvd,mrm|vex|reqp,x,tvexw[31][1]},
{OP_vfmaddsubpd,0x663a5d58,"vfmaddsubpd",Vvd,xx,Lvd,Hvd,Wvd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 32 */
{OP_vfmsubaddps,0x663a5e18,"vfmsubaddps",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[32][1]},
{OP_vfmsubaddps,0x663a5e58,"vfmsubaddps",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 33 */
{OP_vfmsubaddpd,0x663a5f18,"vfmsubaddpd",Vvd,xx,Lvd,Wvd,Hvd,mrm|vex|reqp,x,tvexw[33][1]},
{OP_vfmsubaddpd,0x663a5f58,"vfmsubaddpd",Vvd,xx,Lvd,Hvd,Wvd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 34 */
{OP_vfmaddps,0x663a6818,"vfmaddps",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[34][1]},
{OP_vfmaddps,0x663a6858,"vfmaddps",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 35 */
{OP_vfmaddpd,0x663a6918,"vfmaddpd",Vvd,xx,Lvd,Wvd,Hvd,mrm|vex|reqp,x,tvexw[35][1]},
{OP_vfmaddpd,0x663a6958,"vfmaddpd",Vvd,xx,Lvd,Hvd,Wvd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 36 */
{OP_vfmaddss,0x663a6a18,"vfmaddss",Vdq,xx,Lss,Wss,Hss,mrm|vex|reqp,x,tvexw[36][1]},
{OP_vfmaddss,0x663a6a58,"vfmaddss",Vdq,xx,Lss,Hss,Wss,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 37 */
{OP_vfmaddsd,0x663a6b18,"vfmaddsd",Vdq,xx,Lsd,Wsd,Hsd,mrm|vex|reqp,x,tvexw[37][1]},
{OP_vfmaddsd,0x663a6b58,"vfmaddsd",Vdq,xx,Lsd,Hsd,Wsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 38 */
{OP_vfmsubps,0x663a6c18,"vfmsubps",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[38][1]},
{OP_vfmsubps,0x663a6c58,"vfmsubps",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 39 */
{OP_vfmsubpd,0x663a6d18,"vfmsubpd",Vvd,xx,Lvd,Wvd,Hvd,mrm|vex|reqp,x,tvexw[39][1]},
{OP_vfmsubpd,0x663a6d58,"vfmsubpd",Vvd,xx,Lvd,Hvd,Wvd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 40 */
{OP_vfmsubss,0x663a6e18,"vfmsubss",Vdq,xx,Lss,Wss,Hss,mrm|vex|reqp,x,tvexw[40][1]},
{OP_vfmsubss,0x663a6e58,"vfmsubss",Vdq,xx,Lss,Hss,Wss,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 41 */
{OP_vfmsubsd,0x663a6f18,"vfmsubsd",Vdq,xx,Lsd,Wsd,Hsd,mrm|vex|reqp,x,tvexw[41][1]},
{OP_vfmsubsd,0x663a6f58,"vfmsubsd",Vdq,xx,Lsd,Hsd,Wsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 42 */
{OP_vfnmaddps,0x663a7818,"vfnmaddps",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[42][1]},
{OP_vfnmaddps,0x663a7858,"vfnmaddps",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 43 */
{OP_vfnmaddpd,0x663a7918,"vfnmaddpd",Vvd,xx,Lvd,Wvd,Hvd,mrm|vex|reqp,x,tvexw[43][1]},
{OP_vfnmaddpd,0x663a7958,"vfnmaddpd",Vvd,xx,Lvd,Hvd,Wvd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 44 */
{OP_vfnmaddss,0x663a7a18,"vfnmaddss",Vdq,xx,Lss,Wss,Hss,mrm|vex|reqp,x,tvexw[44][1]},
{OP_vfnmaddss,0x663a7a58,"vfnmaddss",Vdq,xx,Lss,Hss,Wss,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 45 */
{OP_vfnmaddsd,0x663a7b18,"vfnmaddsd",Vdq,xx,Lsd,Wsd,Hsd,mrm|vex|reqp,x,tvexw[45][1]},
{OP_vfnmaddsd,0x663a7b58,"vfnmaddsd",Vdq,xx,Lsd,Hsd,Wsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 46 */
{OP_vfnmsubps,0x663a7c18,"vfnmsubps",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[46][1]},
{OP_vfnmsubps,0x663a7c58,"vfnmsubps",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 47 */
{OP_vfnmsubpd,0x663a7d18,"vfnmsubpd",Vvd,xx,Lvd,Wvd,Hvd,mrm|vex|reqp,x,tvexw[47][1]},
{OP_vfnmsubpd,0x663a7d58,"vfnmsubpd",Vvd,xx,Lvd,Hvd,Wvd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 48 */
{OP_vfnmsubss,0x663a7e18,"vfnmsubss",Vdq,xx,Lss,Wss,Hss,mrm|vex|reqp,x,tvexw[48][1]},
{OP_vfnmsubss,0x663a7e58,"vfnmsubss",Vdq,xx,Lss,Hss,Wss,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 49 */
{OP_vfnmsubsd,0x663a7f18,"vfnmsubsd",Vdq,xx,Lsd,Wsd,Hsd,mrm|vex|reqp,x,tvexw[49][1]},
{OP_vfnmsubsd,0x663a7f58,"vfnmsubsd",Vdq,xx,Lsd,Hsd,Wsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 50 */
{OP_vpcmov, 0x08a218,"vpcmov", Vvs,xx,Hvs,Wvs,Lvs,mrm|vex,x,tvexw[50][1]},
{OP_vpcmov, 0x08a258,"vpcmov", Vvs,xx,Hvs,Lvs,Wvs,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 51 */
{OP_vpperm, 0x08a318,"vpperm", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,tvexw[51][1]},
{OP_vpperm, 0x08a358,"vpperm", Vdq,xx,Hdq,Ldq,Wdq,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 52 */
{OP_vprotb, 0x099018,"vprotb", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[52][1]},
{OP_vprotb, 0x099058,"vprotb", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 53 */
{OP_vprotw, 0x099118,"vprotw", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[53][1]},
{OP_vprotw, 0x099158,"vprotw", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 54 */
{OP_vprotd, 0x099218,"vprotd", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[54][1]},
{OP_vprotd, 0x099258,"vprotd", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 55 */
{OP_vprotq, 0x099318,"vprotq", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[55][1]},
{OP_vprotq, 0x099358,"vprotq", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 56 */
{OP_vpshlb, 0x099418,"vpshlb", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[56][1]},
{OP_vpshlb, 0x099458,"vpshlb", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 57 */
{OP_vpshlw, 0x099518,"vpshlw", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[57][1]},
{OP_vpshlw, 0x099558,"vpshlw", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 58 */
{OP_vpshld, 0x099618,"vpshld", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[58][1]},
{OP_vpshld, 0x099658,"vpshld", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 59 */
{OP_vpshlq, 0x099718,"vpshlq", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[59][1]},
{OP_vpshlq, 0x099758,"vpshlq", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 60 */
{OP_vpshab, 0x099818,"vpshab", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[60][1]},
{OP_vpshab, 0x099858,"vpshab", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 61 */
{OP_vpshaw, 0x099918,"vpshaw", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[61][1]},
{OP_vpshaw, 0x099958,"vpshaw", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 62 */
{OP_vpshad, 0x099a18,"vpshad", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[62][1]},
{OP_vpshad, 0x099a58,"vpshad", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 63 */
{OP_vpshaq, 0x099b18,"vpshaq", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[63][1]},
{OP_vpshaq, 0x099b58,"vpshaq", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 64 */
{OP_vpermil2ps,0x663a4818,"vpermil2ps",Vvs,xx,Hvs,Wvs,Lvs,mrm|vex|reqp,x,tvexw[64][1]},
{OP_vpermil2ps,0x663a4858,"vpermil2ps",Vvs,xx,Hvs,Lvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 65 */
{OP_vpermil2pd,0x663a4918,"vpermil2pd",Vvs,xx,Hvs,Wvs,Lvs,mrm|vex|reqp,x,tvexw[65][1]},
{OP_vpermil2pd,0x663a4958,"vpermil2pd",Vvs,xx,Hvs,Lvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 66 */
/* XXX: OP_v*gather* raise #UD if any pair of the index, mask, or destination
* registers are identical. We don't bother trying to detect that.
*/
{OP_vpgatherdd,0x66389018,"vpgatherdd",Vx,Hx,MVd,Hx,xx, mrm|vex|reqp,x,END_LIST},
{OP_vpgatherdq,0x66389058,"vpgatherdq",Vx,Hx,MVq,Hx,xx, mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 67 */
{OP_vpgatherqd,0x66389118,"vpgatherqd",Vx,Hx,MVd,Hx,xx, mrm|vex|reqp,x,END_LIST},
{OP_vpgatherqq,0x66389158,"vpgatherqq",Vx,Hx,MVq,Hx,xx, mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 68 */
{OP_vgatherdps,0x66389218,"vgatherdps",Vvs,Hx,MVd,Hx,xx, mrm|vex|reqp,x,END_LIST},
{OP_vgatherdpd,0x66389258,"vgatherdpd",Vvd,Hx,MVq,Hx,xx, mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 69 */
{OP_vgatherqps,0x66389318,"vgatherqps",Vvs,Hx,MVd,Hx,xx, mrm|vex|reqp,x,END_LIST},
{OP_vgatherqpd,0x66389358,"vgatherqpd",Vvd,Hx,MVq,Hx,xx, mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 70 */
{OP_vpmaskmovd,0x66388c18,"vpmaskmovd",Vx,xx,Hx,Mx,xx, mrm|vex|reqp|predcx,x,tvexw[71][0]},
{OP_vpmaskmovq,0x66388c58,"vpmaskmovq",Vx,xx,Hx,Mx,xx, mrm|vex|reqp|predcx,x,tvexw[71][1]},
}, { /* vex_W_ext 71 */
/* Conditional store => predcx */
{OP_vpmaskmovd,0x66388e18,"vpmaskmovd",Mx,xx,Vx,Hx,xx, mrm|vex|reqp|predcx,x,END_LIST},
{OP_vpmaskmovq,0x66388e58,"vpmaskmovq",Mx,xx,Vx,Hx,xx, mrm|vex|reqp|predcx,x,END_LIST},
}, { /* vex_W_ext 72 */
{OP_vpsrlvd,0x66384518,"vpsrlvd",Vx,xx,Hx,Wx,xx, mrm|vex|reqp,x,tevexw[128][0]},
{OP_vpsrlvq,0x66384558,"vpsrlvq",Vx,xx,Hx,Wx,xx, mrm|vex|reqp,x,tevexw[128][1]},
}, { /* vex_W_ext 73 */
{OP_vpsllvd,0x66384718,"vpsllvd",Vx,xx,Hx,Wx,xx, mrm|vex|reqp,x,tevexw[130][0]},
{OP_vpsllvq,0x66384758,"vpsllvq",Vx,xx,Hx,Wx,xx, mrm|vex|reqp,x,tevexw[130][1]},
}, { /* vex_W_ext 74 */
{OP_kmovw,0x0f9010,"kmovw",KPw,xx,KQw,xx,xx, mrm|vex,x,tvexw[76][0]},
{OP_kmovq,0x0f9050,"kmovq",KPq,xx,KQq,xx,xx, mrm|vex,x,tvexw[76][1]},
}, { /* vex_W_ext 75 */
{OP_kmovb,0x660f9010,"kmovb",KPb,xx,KQb,xx,xx, mrm|vex,x,tvexw[77][0]},
{OP_kmovd,0x660f9050,"kmovd",KPd,xx,KQd,xx,xx, mrm|vex,x,tvexw[77][1]},
}, { /* vex_W_ext 76 */
{OP_kmovw,0x0f9110,"kmovw",KQw,xx,KPw,xx,xx, mrm|vex,x,tvexw[78][0]},
{OP_kmovq,0x0f9150,"kmovq",KQq,xx,KPq,xx,xx, mrm|vex,x,tvexw[106][1]},
}, { /* vex_W_ext 77 */
{OP_kmovb,0x660f9110,"kmovb",KQb,xx,KPb,xx,xx, mrm|vex,x,tvexw[79][0]},
{OP_kmovd,0x660f9150,"kmovd",KQd,xx,KPd,xx,xx, mrm|vex,x,tvexw[106][0]},
}, { /* vex_W_ext 78 */
{OP_kmovw,0x0f9210,"kmovw",KPw,xx,Ry,xx,xx, mrm|vex,x,tvexw[80][0]},
{INVALID, 0x0f9250,"(bad)", xx,xx,xx,xx,xx, no,x,NA},
}, { /* vex_W_ext 79 */
{OP_kmovb,0x660f9210,"kmovb",KPb,xx,Ry,xx,xx, mrm|vex,x,tvexw[81][0]},
{INVALID, 0x660f9250,"(bad)", xx,xx,xx,xx,xx, no,x,NA},
}, { /* vex_W_ext 80 */
{OP_kmovw,0x0f9310,"kmovw", Gd,xx,KRw,xx,xx, mrm|vex,x,END_LIST},
{INVALID, 0x0f9450,"(bad)", xx,xx,xx,xx,xx, no,x,NA},
}, { /* vex_W_ext 81 */
{OP_kmovb,0x660f9310,"kmovb",Gd,xx,KRb,xx,xx, mrm|vex,x,END_LIST},
{INVALID, 0x660f9350,"(bad)",xx,xx,xx,xx,xx, no,x,NA},
}, { /* vex_W_ext 82 */
{OP_kandw,0x0f4110,"kandw",KPw,xx,KVw,KRw,xx, mrm|vex,x,END_LIST},
{OP_kandq,0x0f4150,"kandq",KPq,xx,KVq,KRq,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 83 */
{OP_kandb,0x660f4110,"kandb",KPb,xx,KVb,KRb,xx, mrm|vex,x,END_LIST},
{OP_kandd,0x660f4150,"kandd",KPd,xx,KVd,KRd,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 84 */
{OP_kandnw,0x0f4210,"kandnw",KPw,xx,KVw,KRw,xx, mrm|vex,x,END_LIST},
{OP_kandnq,0x0f4250,"kandnq",KPq,xx,KVq,KRq,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 85 */
{OP_kandnb,0x660f4210,"kandnb",KPb,xx,KVb,KRb,xx, mrm|vex,x,END_LIST},
{OP_kandnd,0x660f4250,"kandnd",KPd,xx,KVd,KRd,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 86 */
{OP_kunpckwd,0x0f4b10,"kunpckwd",KPd,xx,KVd,KRd,xx, mrm|vex,x,END_LIST},
{OP_kunpckdq,0x0f4b50,"kunpckdq",KPq,xx,KVq,KRq,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 87 */
{OP_kunpckbw,0x660f4b10,"kunpckbw",KPw,xx,KVw,KRw,xx, mrm|vex,x,END_LIST},
{INVALID, 0x660f4b50, "(bad)", xx,xx, xx, xx,xx, no,x,NA},
}, { /* vex_W_ext 88 */
{OP_knotw,0x0f4410,"knotw",KPw,xx,KRw,xx,xx, mrm|vex,x,END_LIST},
{OP_knotq,0x0f4450,"knotq",KPq,xx,KRq,xx,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 89 */
{OP_knotb,0x660f4410,"knotb",KPb,xx,KRb,xx,xx, mrm|vex,x,END_LIST},
{OP_knotd,0x660f4450,"knotd",KPd,xx,KRd,xx,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 90 */
{OP_korw,0x0f4510,"korw",KPw,xx,KVw,KRw,xx, mrm|vex,x,END_LIST},
{OP_korq,0x0f4550,"korq",KPq,xx,KVq,KRq,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 91 */
{OP_korb,0x660f4510,"korb",KPb,xx,KVb,KRb,xx, mrm|vex,x,END_LIST},
{OP_kord,0x660f4550,"kord",KPd,xx,KVd,KRd,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 92 */
{OP_kxnorw,0x0f4610,"kxnorw",KPw,xx,KVw,KRw,xx, mrm|vex,x,END_LIST},
{OP_kxnorq,0x0f4650,"kxnorq",KPq,xx,KVq,KRq,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 93 */
{OP_kxnorb,0x660f4610,"kxnorb",KPb,xx,KVb,KRb,xx, mrm|vex,x,END_LIST},
{OP_kxnord,0x660f4650,"kxnord",KPd,xx,KVd,KRd,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 94 */
{OP_kxorw,0x0f4710,"kxorw",KPw,xx,KVw,KRw,xx, mrm|vex,x,END_LIST},
{OP_kxorq,0x0f4750,"kxorq",KPq,xx,KVq,KRq,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 95 */
{OP_kxorb,0x660f4710,"kxorb",KPb,xx,KVb,KRb,xx, mrm|vex,x,END_LIST},
{OP_kxord,0x660f4750,"kxord",KPd,xx,KVd,KRd,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 96 */
{OP_kaddw,0x0f4a10,"kaddw",KPw,xx,KVw,KRw,xx, mrm|vex,x,END_LIST},
{OP_kaddq,0x0f4a50,"kaddq",KPq,xx,KVq,KRq,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 97 */
{OP_kaddb,0x660f4a10,"kaddb",KPb,xx,KVb,KRb,xx, mrm|vex,x,END_LIST},
{OP_kaddd,0x660f4a50,"kaddd",KPd,xx,KVd,KRd,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 98 */
{OP_kortestw,0x0f9810,"kortestw",KPw,xx,KRw,xx,xx, mrm|vex,(fWC|fWZ),END_LIST},
{OP_kortestq,0x0f9850,"kortestq",KPq,xx,KRq,xx,xx, mrm|vex,(fWC|fWZ),END_LIST},
}, { /* vex_W_ext 99 */
{OP_kortestb,0x660f9810,"kortestb",KPb,xx,KRb,xx,xx, mrm|vex,(fWC|fWZ),END_LIST},
{OP_kortestd,0x660f9850,"kortestd",KPd,xx,KRd,xx,xx, mrm|vex,(fWC|fWZ),END_LIST},
}, { /* vex_W_ext 100 */
{OP_kshiftlb,0x663a3208,"kshiftlb",KPb,xx,KRb,Ib,xx, mrm|vex,x,END_LIST},
{OP_kshiftlw,0x663a3248,"kshiftlw",KPw,xx,KRw,Ib,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 101 */
{OP_kshiftld,0x663a3308,"kshiftld",KPd,xx,KRd,Ib,xx, mrm|vex,x,END_LIST},
{OP_kshiftlq,0x663a3348,"kshiftlq",KPq,xx,KRq,Ib,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 102 */
{OP_kshiftrb,0x663a3008,"kshiftrb",KPb,xx,KRb,Ib,xx, mrm|vex,x,END_LIST},
{OP_kshiftrw,0x663a3048,"kshiftrw",KPw,xx,KRw,Ib,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 103 */
{OP_kshiftrd,0x663a3108,"kshiftrd",KPd,xx,KRd,Ib,xx, mrm|vex,x,END_LIST},
{OP_kshiftrq,0x663a3148,"kshiftrq",KPq,xx,KRq,Ib,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 104 */
{OP_ktestw,0x0f9910,"ktestw",KPw,xx,KRw,xx,xx, mrm|vex,fW6,END_LIST},
{OP_ktestq,0x0f9950,"ktestq",KPq,xx,KRq,xx,xx, mrm|vex,fW6,END_LIST},
}, { /* vex_W_ext 105 */
{OP_ktestb,0x660f9910,"ktestb",KPb,xx,KRb,xx,xx, mrm|vex,fW6,END_LIST},
{OP_ktestd,0x660f9950,"ktestd",KPd,xx,KRd,xx,xx, mrm|vex,fW6,END_LIST},
}, { /* vex_W_ext 106 */
{OP_kmovd,0xf20f9210,"kmovd",KPd,xx,Ry,xx,xx, mrm|vex,x,tvexw[107][0]},
{OP_kmovq,0xf20f9250,"kmovq",KPq,xx,Ry,xx,xx, mrm|vex,x,tvexw[107][1]},
}, { /* vex_W_ext 107 */
{OP_kmovd,0xf20f9310,"kmovd", Gd,xx,KRd,xx,xx, mrm|vex,x,END_LIST},
{OP_kmovq,0xf20f9350,"kmovq",Gd_q,xx,KRq,xx,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 108 */
{OP_vmovd, 0x660f6e10, "vmovd", Vdq, xx, Ed, xx, xx, mrm|vex, x, tvexw[109][0]},
{OP_vmovq, 0x660f6e50, "vmovq", Vdq, xx, Ed_q, xx, xx, mrm|vex, x, tvexw[109][1]},
}, { /* vex_W_ext 109 */
{OP_vmovd, 0x660f7e10, "vmovd", Ed, xx, Vd_dq, xx, xx, mrm|vex, x, tevexw[135][0]},
{OP_vmovq, 0x660f7e50, "vmovq", Ed_q, xx, Vq_dq, xx, xx, mrm|vex, x, tevexw[135][1]},
},
};
/****************************************************************************
* Instructions that differ depending on evex.W.
* Index is evex.W value
*/
const instr_info_t evex_W_extensions[][2] = {
{ /* evex_W_ext 0 */
{OP_vmovups, 0x0f1010,"vmovups", Ves,xx,KEd,Wes,xx,mrm|evex,x,tevexw[1][0]},
{INVALID, 0x0f1050,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 1 */
{OP_vmovups, 0x0f1110,"vmovups", Wes,xx,KEd,Ves,xx,mrm|evex,x,END_LIST},
{INVALID, 0x0f1150,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 2 */
{INVALID, 0x660f1010,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vmovupd, 0x660f1050,"vmovupd", Ved,xx,KEd,Wed,xx,mrm|evex,x,tevexw[3][1]},
}, { /* evex_W_ext 3 */
{INVALID, 0x660f1110,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vmovupd, 0x660f1150,"vmovupd", Wed,xx,KEd,Ved,xx,mrm|evex,x,END_LIST},
}, { /* evex_W_ext 4 */
{OP_vmovaps, 0x0f2810,"vmovaps", Ves,xx,KEd,Wes,xx,mrm|evex,x,tevexw[5][0]},
{INVALID, 0x0f2850,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 5 */
{OP_vmovaps, 0x0f2910,"vmovaps", Wes,xx,KEd,Ves,xx,mrm|evex,x,END_LIST},
{INVALID, 0x0f2950,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 6 */
{INVALID, 0x660f2810,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vmovapd, 0x660f2850,"vmovapd", Ved,xx,KEd,Wed,xx,mrm|evex,x,tevexw[7][1]},
}, { /* evex_W_ext 7 */
{INVALID, 0x660f2910,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vmovapd, 0x660f2950,"vmovapd", Wed,xx,KEd,Ved,xx,mrm|evex,x,END_LIST},
}, { /* evex_W_ext 8 */
{OP_vmovdqa32, 0x660f6f10,"vmovdqa32",Ve,xx,KEw,We,xx,mrm|evex,x,tevexw[9][0]},
{OP_vmovdqa64, 0x660f6f50,"vmovdqa64",Ve,xx,KEw,We,xx,mrm|evex,x,tevexw[9][1]},
}, { /* evex_W_ext 9 */
{OP_vmovdqa32, 0x660f7f10,"vmovdqa32",We,xx,KEw,Ve,xx,mrm|evex,x,END_LIST},
{OP_vmovdqa64, 0x660f7f50,"vmovdqa64",We,xx,KEw,Ve,xx,mrm|evex,x,END_LIST},
}, { /* evex_W_ext 10 */
{OP_vmovdqu8, 0xf20f6f10,"vmovdqu8",Ve,xx,KEw,We,xx,mrm|evex,x,tevexw[12][0]},
{OP_vmovdqu16, 0xf20f6f50,"vmovdqu16",Ve,xx,KEw,We,xx,mrm|evex,x,tevexw[12][1]},
}, { /* evex_W_ext 11 */
{OP_vmovdqu32, 0xf30f6f10,"vmovdqu32",Ve,xx,KEw,We,xx,mrm|evex,x,tevexw[13][0]},
{OP_vmovdqu64, 0xf30f6f50,"vmovdqu64",Ve,xx,KEw,We,xx,mrm|evex,x,tevexw[13][1]},
}, { /* evex_W_ext 12 */
{OP_vmovdqu8, 0xf20f7f10,"vmovdqu8",We,xx,KEw,Ve,xx,mrm|evex,x,END_LIST},
{OP_vmovdqu16, 0xf20f7f50,"vmovdqu16",We,xx,KEw,Ve,xx,mrm|evex,x,END_LIST},
}, { /* evex_W_ext 13 */
{OP_vmovdqu32, 0xf30f7f10,"vmovdqu32",We,xx,KEw,Ve,xx,mrm|evex,x,END_LIST},
{OP_vmovdqu64, 0xf30f7f50,"vmovdqu64",We,xx,KEw,Ve,xx,mrm|evex,x,END_LIST},
}, { /* evex_W_ext 14 */
{OP_vmovlps, 0x0f1210, "vmovlps", Vq_dq, xx, Hq_dq, Wq_dq, xx, mrm|evex|reqL0|reqLL0, x, tevexw[15][0]}, /*"vmovhlps" if reg-reg */
{INVALID, 0x0f1250,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 15 */
{OP_vmovlps, 0x0f1310, "vmovlps", Mq, xx, Vq_dq, xx, xx, mrm|evex, x, END_LIST},
{INVALID, 0x0f1350,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 16 */
{INVALID, 0x660f1210,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vmovlpd, 0x660f1250, "vmovlpd", Vq_dq, xx, Hq_dq, Mq, xx, mrm|evex|reqL0|reqLL0, x, tevexw[17][1]},
}, { /* evex_W_ext 17 */
{INVALID, 0x660f1310,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vmovlpd, 0x660f1350, "vmovlpd", Mq, xx, Vq_dq, xx, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 18 */
{OP_vmovsldup,0xf30f1210, "vmovsldup", Ves, xx, KEw, Wes, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf30f1250,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 19 */
{INVALID, 0xf20f1210,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vmovddup, 0xf20f1250, "vmovddup", Ved, xx, KEb, Wh_e, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 20 */
{OP_vmovhps, 0x0f1610, "vmovhps", Vq_dq, xx, Hq_dq, Wq_dq, xx, mrm|evex|reqL0|reqLL0, x, tevexw[21][0]}, /*"vmovlhps" if reg-reg */
{INVALID, 0x0f1650,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 21 */
{OP_vmovhps, 0x0f1710, "vmovhps", Mq, xx, Vq_dq, xx, xx, mrm|evex|reqL0|reqLL0, x, END_LIST},
{INVALID, 0x0f1750,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 22 */
{INVALID, 0x660f1610,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vmovhpd, 0x660f1650, "vmovhpd", Vq_dq, xx, Hq_dq, Mq, xx, mrm|evex|reqL0|reqLL0, x, tevexw[23][1]},
}, { /* evex_W_ext 23 */
{INVALID, 0x660f1710,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vmovhpd, 0x660f1750, "vmovhpd", Mq, xx, Vq_dq, xx, xx, mrm|evex|reqL0|reqLL0, x, END_LIST},
}, { /* evex_W_ext 24 */
{OP_vmovshdup, 0xf30f1610, "vmovshdup", Ves, xx, KEw, Wes, xx, mrm|evex, x, END_LIST},
{INVALID, 0xf30f1650,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 25 */
{OP_vunpcklps, 0x0f1410, "vunpcklps", Ves, xx, KEw, Hh_e, Wh_e, mrm|evex, x, END_LIST},
{INVALID, 0x0f1450,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 26 */
{INVALID, 0x660f1410,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vunpcklpd, 0x660f1450, "vunpcklpd", Ved, xx, KEb, Hh_e, Wh_e, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 27 */
{OP_vunpckhps, 0x0f1510, "vunpckhps", Ves, xx, KEw, Hh_e, Wh_e, mrm|evex, x, END_LIST},
{INVALID, 0x0f1550,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 28 */
{INVALID, 0x660f1510,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vunpckhpd, 0x660f1550, "vunpckhpd", Ved, xx, KEb, Hh_e, Wh_e, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 29 */
{OP_vcvtss2si, 0xf30f2d10, "vcvtss2si", Gd, xx, Wss, xx, xx, mrm|evex, x, tevexw[29][1]},
{OP_vcvtss2si, 0xf30f2d50, "vcvtss2si", Gd_q, xx, Wss, xx, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 30 */
{OP_vcvtsd2si, 0xf20f2d10, "vcvtsd2si", Gd, xx, Wsd, xx, xx, mrm|evex, x, tevexw[30][1]},
{OP_vcvtsd2si, 0xf20f2d50, "vcvtsd2si", Gd_q, xx, Wsd, xx, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 31 */
{OP_vcvtsi2ss, 0xf30f2a10, "vcvtsi2ss", Vdq, xx, H12_dq, Ed, xx, mrm|evex, x, tevexw[31][1]},
{OP_vcvtsi2ss, 0xf30f2a50, "vcvtsi2ss", Vdq, xx, H12_dq, Ed_q, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 32 */
{OP_vcvtsi2sd, 0xf20f2a10, "vcvtsi2sd", Vdq, xx, Hsd, Ed, xx, mrm|evex, x, tevexw[32][1]},
{OP_vcvtsi2sd, 0xf20f2a50, "vcvtsi2sd", Vdq, xx, Hsd, Ed_q, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 33 */
{OP_vmovntps, 0x0f2b10, "vmovntps", Mes, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{INVALID, 0x0f2b50,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 34 */
{INVALID, 0x660f2b10,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vmovntpd, 0x660f2b50, "vmovntpd", Med, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 35 */
{OP_vcvttss2si, 0xf30f2c10, "vcvttss2si", Gd, xx, Wss, xx, xx, mrm|evex, x, tevexw[35][1]},
{OP_vcvttss2si, 0xf30f2c50, "vcvttss2si", Gd_q, xx, Wss, xx, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 36 */
{OP_vcvttsd2si, 0xf20f2c10, "vcvttsd2si", Gd, xx, Wsd, xx, xx, mrm|evex, x, tevexw[36][1]},
{OP_vcvttsd2si, 0xf20f2c50, "vcvttsd2si", Gd_q, xx, Wsd, xx, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 37 */
{OP_vucomiss, 0x0f2e10, "vucomiss", xx, xx, Vss, Wss, xx, mrm|evex, fW6, END_LIST},
{INVALID, 0x0f2e50,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 38 */
{INVALID, 0x660f2e10,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vucomisd, 0x660f2e50, "vucomisd", xx, xx, Vsd, Wsd, xx, mrm|evex, fW6, END_LIST},
}, { /* evex_W_ext 39 */
{OP_vcomiss, 0x0f2f10, "vcomiss", xx, xx, Vss, Wss, xx, mrm|evex, fW6, END_LIST},
{INVALID, 0x0f2f50,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 40 */
{INVALID, 0x660f2e10,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vcomisd, 0x660f2f50, "vcomisd", xx, xx, Vsd, Wsd, xx, mrm|evex, fW6, END_LIST},
}, { /* evex_W_ext 41 */
{OP_vpandd, 0x660fdb10, "vpandd", Ve, xx, KEw, He, We, mrm|evex, x, END_LIST},
{OP_vpandq, 0x660fdb50, "vpandq", Ve, xx, KEb, He, We, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 42 */
{OP_vpandnd, 0x660fdf10, "vpandnd", Ve, xx, KEw, He, We, mrm|evex, x, END_LIST},
{OP_vpandnq, 0x660fdf50, "vpandnq", Ve, xx, KEb, He, We, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 43 */
{OP_vpord, 0x660feb10, "vpord", Ve, xx, KEw, He, We, mrm|evex, x, END_LIST},
{OP_vporq, 0x660feb50, "vporq", Ve, xx, KEb, He, We, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 44 */
{OP_vpxord, 0x660fef10, "vpxord", Ve, xx, KEw, He, We, mrm|evex, x, END_LIST},
{OP_vpxorq, 0x660fef50, "vpxorq", Ve, xx, KEb, He, We, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 45 */
{OP_vpmulld, 0x66384018, "vpmulld", Ve, xx, KEw,He,We, mrm|evex|reqp, x, END_LIST},
{OP_vpmullq, 0x66384058, "vpmullq", Ve, xx, KEb,He,We, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 46 */
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vcvtps2qq, 0x660f7b10, "vcvtps2qq", Ve, xx, KEb, Wes, xx, mrm|evex, x, END_LIST},
{OP_vcvtpd2qq, 0x660f7b50, "vcvtpd2qq", Ve, xx, KEb, Wed, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 47 */
{OP_vcvtps2udq, 0x0f7910, "vcvtps2udq", Ve, xx, KEw, Wes, xx, mrm|evex, x, END_LIST},
{OP_vcvtpd2udq, 0x0f7950, "vcvtpd2udq", Ve, xx, KEb, Wed, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 48 */
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vcvtps2uqq, 0x660f7910, "vcvtps2uqq", Ve, xx, KEw, Wes, xx, mrm|evex, x, END_LIST},
{OP_vcvtpd2uqq, 0x660f7950, "vcvtpd2uqq", Ve, xx, KEb, Wed, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 49 */
{OP_vcvttps2udq, 0x0f7810, "vcvttps2udq", Ve, xx, KEw, Wes, xx, mrm|evex, x, END_LIST},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vcvttpd2udq, 0x0f7850, "vcvttpd2udq", Ve, xx, KEb, Wed, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 50 */
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vcvttps2qq,0x660f7a10, "vcvttps2qq", Ve, xx, KEb, Wes, xx, mrm|evex, x, END_LIST},
{OP_vcvttpd2qq,0x660f7a50, "vcvttpd2qq", Ve, xx, KEb, Wed, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 51 */
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vcvttps2uqq, 0x660f7810, "vcvttps2uqq", Ve, xx, KEb, Wes, xx, mrm|evex, x, END_LIST},
{OP_vcvttpd2uqq, 0x660f7850, "vcvttpd2uqq", Ve, xx, KEb, Wed, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 52 */
{OP_vcvtss2usi, 0xf30f7910, "vcvtss2usi", Gd, xx, Wss, xx, xx, mrm|evex, x, tevexw[52][1]},
{OP_vcvtss2usi, 0xf30f7950, "vcvtss2usi", Gd_q, xx, Wss, xx, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 53 */
{OP_vcvtsd2usi, 0xf20f7910, "vcvtsd2usi", Gd, xx, Wsd, xx, xx, mrm|evex, x, tevexw[53][1]},
{OP_vcvtsd2usi, 0xf20f7950, "vcvtsd2usi", Gd_q, xx, Wsd, xx, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 54 */
{OP_vcvttss2usi, 0xf30f7810, "vcvttss2usi", Gd, xx, Wss, xx, xx, mrm|evex, x, tevexw[54][1]},
{OP_vcvttss2usi, 0xf30f7850, "vcvttss2usi", Gd_q, xx, Wss, xx, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 55 */
{OP_vcvttsd2usi, 0xf20f7810, "vcvttsd2usi", Gd, xx, Wsd, xx, xx, mrm|evex, x, tevexw[55][1]},
{OP_vcvttsd2usi, 0xf20f7850, "vcvttsd2usi", Gd_q, xx, Wsd, xx, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 56 */
{OP_vcvtdq2ps, 0x0f5b10, "vcvtdq2ps", Ves, xx, KEw, We, xx, mrm|evex, x, END_LIST},
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vcvtqq2ps, 0x0f5b50, "vcvtqq2ps", Ves, xx, KEb, We, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 57 */
/* XXX i#3639: tools tend to accept different source/destination register mnemonics.
* This also affects the existing VEX version if it exists.
*/
{OP_vcvtdq2pd, 0xf30fe610, "vcvtdq2pd", Ved, xx, KEb, We, xx, mrm|evex, x, END_LIST},
{OP_vcvtqq2pd, 0xf30fe650, "vcvtqq2pd", Ved, xx, KEb, We, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 58 */
{OP_vcvtusi2ss, 0xf30f7b10, "vcvtusi2ss", Vdq, xx, H12_dq, Ed, xx, mrm|evex, x, tevexw[58][1]},
{OP_vcvtusi2ss, 0xf30f7b50, "vcvtusi2ss", Vdq, xx, H12_dq, Ed_q, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 59 */
{OP_vcvtusi2sd, 0xf20f7b10, "vcvtusi2sd", Vdq, xx, Hsd, Ed, xx, mrm|evex, x, tevexw[59][1]},
{OP_vcvtusi2sd, 0xf20f7b50, "vcvtusi2sd", Vdq, xx, Hsd, Ed_q, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 60 */
{OP_vcvtudq2ps, 0xf20f7a10, "vcvtudq2ps", Ve, xx, KEw, We, xx, mrm|evex, x, END_LIST},
{OP_vcvtuqq2ps, 0xf20f7a50, "vcvtuqq2ps", Ve, xx, KEb, We, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 61 */
{OP_vcvtudq2pd, 0xf30f7a10, "vcvtudq2pd", Ve, xx, KEb, We, xx, mrm|evex, x, END_LIST},
{OP_vcvtuqq2pd, 0xf30f7a50, "vcvtuqq2pd", Ve, xx, KEb, We, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 62 */
{OP_vfmadd132ps,0x66389818,"vfmadd132ps",Ves,xx,KEw,Hes,Wes,xop|mrm|evex|reqp,x,exop[14]},
{OP_vfmadd132pd,0x66389858,"vfmadd132pd",Ved,xx,KEb,Hed,Wed,xop|mrm|evex|reqp,x,exop[15]},
}, { /* evex_W_ext 63 */
{OP_vfmadd213ps,0x6638a818,"vfmadd213ps",Ves,xx,KEw,Hes,Wes,xop|mrm|evex|reqp,x,exop[16]},
{OP_vfmadd213pd,0x6638a858,"vfmadd213pd",Ved,xx,KEb,Hed,Wed,xop|mrm|evex|reqp,x,exop[17]},
}, { /* evex_W_ext 64 */
{OP_vfmadd231ps,0x6638b818,"vfmadd231ps",Ves,xx,KEw,Hes,Wes,xop|mrm|evex|reqp,x,exop[18]},
{OP_vfmadd231pd,0x6638b858,"vfmadd231pd",Ved,xx,KEb,Hed,Wed,xop|mrm|evex|reqp,x,exop[19]},
}, { /* evex_W_ext 65 */
{OP_vfmadd132ss,0x66389918,"vfmadd132ss",Vss,xx,KE1b,Hss,Wss,xop|mrm|evex|reqp,x,exop[20]},
{OP_vfmadd132sd,0x66389958,"vfmadd132sd",Vsd,xx,KE1b,Hsd,Wsd,xop|mrm|evex|reqp,x,exop[21]},
}, { /* evex_W_ext 66 */
{OP_vfmadd213ss,0x6638a918,"vfmadd213ss",Vss,xx,KE1b,Hss,Wss,xop|mrm|evex|reqp,x,exop[22]},
{OP_vfmadd213sd,0x6638a958,"vfmadd213sd",Vsd,xx,KE1b,Hsd,Wsd,xop|mrm|evex|reqp,x,exop[23]},
}, { /* evex_W_ext 67 */
{OP_vfmadd231ss,0x6638b918,"vfmadd231ss",Vss,xx,KE1b,Hss,Wss,xop|mrm|evex|reqp,x,exop[24]},
{OP_vfmadd231sd,0x6638b958,"vfmadd231sd",Vsd,xx,KE1b,Hsd,Wsd,xop|mrm|evex|reqp,x,exop[25]},
}, { /* evex_W_ext 68 */
{OP_vfmaddsub132ps,0x66389618,"vfmaddsub132ps",Ves,xx,KEw,Hes,Wes,xop|mrm|evex|reqp,x,exop[26]},
{OP_vfmaddsub132pd,0x66389658,"vfmaddsub132pd",Ved,xx,KEb,Hed,Wed,xop|mrm|evex|reqp,x,exop[27]},
}, { /* evex_W_ext 69 */
{OP_vfmaddsub213ps,0x6638a618,"vfmaddsub213ps",Ves,xx,KEw,Hes,Wes,xop|mrm|evex|reqp,x,exop[28]},
{OP_vfmaddsub213pd,0x6638a658,"vfmaddsub213pd",Ved,xx,KEb,Hed,Wed,xop|mrm|evex|reqp,x,exop[29]},
}, { /* evex_W_ext 70 */
{OP_vfmaddsub231ps,0x6638b618,"vfmaddsub231ps",Ves,xx,KEw,Hes,Wes,xop|mrm|evex|reqp,x,exop[30]},
{OP_vfmaddsub231pd,0x6638b658,"vfmaddsub231pd",Ved,xx,KEb,Hed,Wed,xop|mrm|evex|reqp,x,exop[31]},
}, { /* evex_W_ext 71 */
{OP_vfmsubadd132ps,0x66389718,"vfmsubadd132ps",Ves,xx,KEw,Hes,Wes,xop|mrm|evex|reqp,x,exop[32]},
{OP_vfmsubadd132pd,0x66389758,"vfmsubadd132pd",Ved,xx,KEb,Hed,Wed,xop|mrm|evex|reqp,x,exop[33]},
}, { /* evex_W_ext 72 */
{OP_vfmsubadd213ps,0x6638a718,"vfmsubadd213ps",Ves,xx,KEw,Hes,Wes,xop|mrm|evex|reqp,x,exop[34]},
{OP_vfmsubadd213pd,0x6638a758,"vfmsubadd213pd",Ved,xx,KEb,Hed,Wed,xop|mrm|evex|reqp,x,exop[35]},
}, { /* evex_W_ext 73 */
{OP_vfmsubadd231ps,0x6638b718,"vfmsubadd231ps",Ves,xx,KEw,Hes,Wes,xop|mrm|evex|reqp,x,exop[36]},
{OP_vfmsubadd231pd,0x6638b758,"vfmsubadd231pd",Ved,xx,KEb,Hed,Wed,xop|mrm|evex|reqp,x,exop[37]},
}, { /* evex_W_ext 74 */
{OP_vfmsub132ps,0x66389a18,"vfmsub132ps",Ves,xx,KEw,Hes,Wes,xop|mrm|evex|reqp,x,exop[38]},
{OP_vfmsub132pd,0x66389a58,"vfmsub132pd",Ved,xx,KEb,Hed,Wed,xop|mrm|evex|reqp,x,exop[39]},
}, { /* evex_W_ext 75 */
{OP_vfmsub213ps,0x6638aa18,"vfmsub213ps",Ves,xx,KEw,Hes,Wes,xop|mrm|evex|reqp,x,exop[40]},
{OP_vfmsub213pd,0x6638aa58,"vfmsub213pd",Ved,xx,KEb,Hed,Wed,xop|mrm|evex|reqp,x,exop[41]},
}, { /* evex_W_ext 76 */
{OP_vfmsub231ps,0x6638ba18,"vfmsub231ps",Ves,xx,KEw,Hes,Wes,xop|mrm|evex|reqp,x,exop[42]},
{OP_vfmsub231pd,0x6638ba58,"vfmsub231pd",Ved,xx,KEb,Hed,Wed,xop|mrm|evex|reqp,x,exop[43]},
}, { /* evex_W_ext 77 */
{OP_vfmsub132ss,0x66389b18,"vfmsub132ss",Vss,xx,KE1b,Hss,Wss,xop|mrm|evex|reqp,x,exop[44]},
{OP_vfmsub132sd,0x66389b58,"vfmsub132sd",Vsd,xx,KE1b,Hsd,Wsd,xop|mrm|evex|reqp,x,exop[45]},
}, { /* evex_W_ext 78 */
{OP_vfmsub213ss,0x6638ab18,"vfmsub213ss",Vss,xx,KE1b,Hss,Wss,xop|mrm|evex|reqp,x,exop[46]},
{OP_vfmsub213sd,0x6638ab58,"vfmsub213sd",Vsd,xx,KE1b,Hsd,Wsd,xop|mrm|evex|reqp,x,exop[47]},
}, { /* evex_W_ext 79 */
{OP_vfmsub231ss,0x6638bb18,"vfmsub231ss",Vss,xx,KE1b,Hss,Wss,xop|mrm|evex|reqp,x,exop[48]},
{OP_vfmsub231sd,0x6638bb58,"vfmsub231sd",Vsd,xx,KE1b,Hsd,Wsd,xop|mrm|evex|reqp,x,exop[49]},
}, { /* evex_W_ext 80 */
{OP_vfnmadd132ps,0x66389c18,"vfnmadd132ps",Ves,xx,KEb,Hes,Wes,xop|mrm|evex|reqp,x,exop[50]},
{OP_vfnmadd132pd,0x66389c58,"vfnmadd132pd",Ved,xx,KEw,Hed,Wed,xop|mrm|evex|reqp,x,exop[51]},
}, { /* evex_W_ext 81 */
{OP_vfnmadd213ps,0x6638ac18,"vfnmadd213ps",Ves,xx,KEb,Hes,Wes,xop|mrm|evex|reqp,x,exop[52]},
{OP_vfnmadd213pd,0x6638ac58,"vfnmadd213pd",Ved,xx,KEw,Hed,Wed,xop|mrm|evex|reqp,x,exop[53]},
}, { /* evex_W_ext 82 */
{OP_vfnmadd231ps,0x6638bc18,"vfnmadd231ps",Ves,xx,KEb,Hes,Wes,xop|mrm|evex|reqp,x,exop[54]},
{OP_vfnmadd231pd,0x6638bc58,"vfnmadd231pd",Ved,xx,KEw,Hed,Wed,xop|mrm|evex|reqp,x,exop[55]},
}, { /* evex_W_ext 83 */
{OP_vfnmadd132ss,0x66389d18,"vfnmadd132ss",Vss,xx,KE1b,Hss,Wss,xop|mrm|evex|reqp,x,exop[56]},
{OP_vfnmadd132sd,0x66389d58,"vfnmadd132sd",Vsd,xx,KE1b,Hsd,Wsd,xop|mrm|evex|reqp,x,exop[57]},
}, { /* evex_W_ext 84 */
{OP_vfnmadd213ss,0x6638ad18,"vfnmadd213ss",Vss,xx,KE1b,Hss,Wss,xop|mrm|evex|reqp,x,exop[58]},
{OP_vfnmadd213sd,0x6638ad58,"vfnmadd213sd",Vsd,xx,KE1b,Hsd,Wsd,xop|mrm|evex|reqp,x,exop[59]},
}, { /* evex_W_ext 85 */
{OP_vfnmadd231ss,0x6638bd18,"vfnmadd231ss",Vss,xx,KE1b,Hss,Wss,xop|mrm|evex|reqp,x,exop[60]},
{OP_vfnmadd231sd,0x6638bd58,"vfnmadd231sd",Vsd,xx,KE1b,Hsd,Wsd,xop|mrm|evex|reqp,x,exop[61]},
}, { /* evex_W_ext 86 */
{OP_vfnmsub132ps,0x66389e18,"vfnmsub132ps",Ves,xx,KEw,Hes,Wes,xop|mrm|evex|reqp,x,exop[62]},
{OP_vfnmsub132pd,0x66389e58,"vfnmsub132pd",Ved,xx,KEb,Hed,Wed,xop|mrm|evex|reqp,x,exop[63]},
}, { /* evex_W_ext 87 */
{OP_vfnmsub213ps,0x6638ae18,"vfnmsub213ps",Ves,xx,KEw,Hes,Wes,xop|mrm|evex|reqp,x,exop[64]},
{OP_vfnmsub213pd,0x6638ae58,"vfnmsub213pd",Ved,xx,KEb,Hed,Wed,xop|mrm|evex|reqp,x,exop[65]},
}, { /* evex_W_ext 88 */
{OP_vfnmsub231ps,0x6638be18,"vfnmsub231ps",Ves,xx,KEw,Hes,Wes,xop|mrm|evex|reqp,x,exop[66]},
{OP_vfnmsub231pd,0x6638be58,"vfnmsub231pd",Ved,xx,KEb,Hed,Wed,xop|mrm|evex|reqp,x,exop[67]},
}, { /* evex_W_ext 89 */
{OP_vfnmsub132ss,0x66389f18,"vfnmsub132ss",Vss,xx,KE1b,Hss,Wss,xop|mrm|evex|reqp,x,exop[68]},
{OP_vfnmsub132sd,0x66389f58,"vfnmsub132sd",Vsd,xx,KE1b,Hsd,Wsd,xop|mrm|evex|reqp,x,exop[69]},
}, { /* evex_W_ext 90 */
{OP_vfnmsub213ss,0x6638af18,"vfnmsub213ss",Vss,xx,KE1b,Hss,Wss,xop|mrm|evex|reqp,x,exop[70]},
{OP_vfnmsub213sd,0x6638af58,"vfnmsub213sd",Vsd,xx,KE1b,Hsd,Wsd,xop|mrm|evex|reqp,x,exop[71]},
}, { /* evex_W_ext 91 */
{OP_vfnmsub231ss,0x6638bf18,"vfnmsub231ss",Vss,xx,KE1b,Hss,Wss,xop|mrm|evex|reqp,x,exop[72]},
{OP_vfnmsub231sd,0x6638bf58,"vfnmsub231sd",Vsd,xx,KE1b,Hsd,Wsd,xop|mrm|evex|reqp,x,exop[73]},
}, { /* evex_W_ext 92 */
{OP_vpermd,0x66383618,"vpermd",Vf,xx,KEb,Hf,Wf,mrm|evex|reqp,x,END_LIST},
{OP_vpermq,0x66383658,"vpermq",Vf,xx,KEb,Hf,Wf,mrm|evex|reqp,x,END_LIST},
}, { /* evex_W_ext 93 */
{OP_vpermps,0x66381618,"vpermps",Vf,xx,KEw,Hf,Wf,mrm|evex|reqp,x,END_LIST},
{OP_vpermpd,0x66381658,"vpermpd",Vf,xx,KEw,Hf,Wf,mrm|evex|reqp,x,END_LIST},
}, { /* evex_W_ext 94 */
{OP_vpermi2ps,0x66387718,"vpermi2ps",Ve,xx,KEw,He,We,mrm|evex|reqp,x,END_LIST},
{OP_vpermi2pd,0x66387758,"vpermi2pd",Ve,xx,KEw,He,We,mrm|evex|reqp,x,END_LIST},
}, { /* evex_W_ext 95 */
{OP_vpermi2d,0x66387618,"vpermi2d",Ve,xx,KEw,He,We,mrm|evex|reqp,x,END_LIST},
{OP_vpermi2q,0x66387658,"vpermi2q",Ve,xx,KEb,He,We,mrm|evex|reqp,x,END_LIST},
}, { /* evex_W_ext 96 */
{OP_vpermi2b,0x66387518,"vpermi2b",Ve,xx,KEq,He,We,mrm|evex|reqp,x,END_LIST},
{OP_vpermi2w,0x66387558,"vpermi2w",Ve,xx,KEd,He,We,mrm|evex|reqp,x,END_LIST},
}, { /* evex_W_ext 97 */
{OP_vpermt2b,0x66387d18,"vpermt2b",Ve,xx,KEw,He,We,mrm|evex|reqp,x,END_LIST},
{OP_vpermt2w,0x66387d58,"vpermt2w",Ve,xx,KEb,He,We,mrm|evex|reqp,x,END_LIST},
}, { /* evex_W_ext 98 */
{OP_vpermt2d,0x66387e18,"vpermt2d",Ve,xx,KEq,He,We,mrm|evex|reqp,x,END_LIST},
{OP_vpermt2q,0x66387e58,"vpermt2q",Ve,xx,KEd,He,We,mrm|evex|reqp,x,END_LIST},
}, { /* evex_W_ext 99 */
{OP_vpermt2ps,0x66387f18,"vpermt2ps",Ve,xx,KEw,He,We,mrm|evex|reqp,x,END_LIST},
{OP_vpermt2pd,0x66387f58,"vpermt2pd",Ve,xx,KEb,He,We,mrm|evex|reqp,x,END_LIST},
}, { /* evex_W_ext 100 */
{OP_vextractf32x4, 0x663a1918, "vextractf32x4", Wdq, xx, KE4b, Ib, Vdq_f, mrm|evex|reqp, x, END_LIST},
{OP_vextractf64x2, 0x663a1958, "vextractf64x2", Wdq, xx, KE2b, Ib, Vdq_f, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 101 */
{OP_vextractf32x8, 0x663a1b18, "vextractf32x8", Wqq, xx, KEb, Ib, Vqq_oq, mrm|evex|reqp, x, END_LIST},
{OP_vextractf64x4, 0x663a1b58, "vextractf64x4", Wqq, xx, KE4b, Ib, Vqq_oq, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 102 */
{OP_vextracti32x4, 0x663a3918, "vextracti32x4", Wdq, xx, KE4b, Ib, Vdq_f, mrm|evex|reqp, x, END_LIST},
{OP_vextracti64x2, 0x663a3958, "vextracti64x2", Wdq, xx, KE2b, Ib, Vdq_f, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 103 */
{OP_vextracti32x8, 0x663a3b18, "vextracti32x8", Wqq, xx, KEb, Ib, Vqq_oq, mrm|evex|reqp, x, END_LIST},
{OP_vextracti64x4, 0x663a3b58, "vextracti64x4", Wqq, xx, KE4b, Ib, Vqq_oq, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 104 */
{OP_vinsertf32x4, 0x663a1818, "vinsertf32x4", Vf, xx, KEw, Ib, Hdq_f, xop|mrm|evex|reqp, x, exop[74]},
{OP_vinsertf64x2, 0x663a1858, "vinsertf64x2", Vf, xx, KEb, Ib, Hdq_f, xop|mrm|evex|reqp, x, exop[75]},
}, { /* evex_W_ext 105 */
{OP_vinsertf32x8, 0x663a1a18, "vinsertf32x8", Voq, xx, KEw, Ib, Hdq_f, xop|mrm|evex|reqp, x, exop[76]},
{OP_vinsertf64x4, 0x663a1a58, "vinsertf64x4", Voq, xx, KEb, Ib, Hdq_f, xop|mrm|evex|reqp, x, exop[77]},
}, { /* evex_W_ext 106 */
{OP_vinserti32x4, 0x663a3818, "vinserti32x4", Vf, xx, KEw, Ib, Hdq_f, xop|mrm|evex|reqp, x, exop[78]},
{OP_vinserti64x2, 0x663a3858, "vinserti64x2", Vf, xx, KEb, Ib, Hdq_f, xop|mrm|evex|reqp, x, exop[79]},
}, { /* evex_W_ext 107 */
{OP_vinserti32x8, 0x663a3a18, "vinserti32x8", Voq, xx, KEw, Ib, Hdq_f, xop|mrm|evex|reqp, x, exop[80]},
{OP_vinserti64x4, 0x663a3a58, "vinserti64x4", Voq, xx, KEb, Ib, Hdq_f, xop|mrm|evex|reqp, x, exop[81]},
}, { /* evex_W_ext 108 */
{OP_vpcmpub, 0x663a3e18, "vpcmpub", KPq, xx, KEq, Ib, He, xop|evex|mrm|reqp, x, exop[82]},
{OP_vpcmpuw, 0x663a3e58, "vpcmpuw", KPd, xx, KEd, Ib, He, xop|evex|mrm|reqp, x, exop[84]},
}, { /* evex_W_ext 109 */
{OP_vpcmpb, 0x663a3f18, "vpcmpb", KPq, xx, KEq, Ib, He, xop|evex|mrm|reqp, x, exop[83]},
{OP_vpcmpw, 0x663a3f58, "vpcmpw", KPd, xx, KEd, Ib, He, xop|evex|mrm|reqp, x, exop[85]},
}, { /* evex_W_ext 110 */
{OP_vpcmpud, 0x663a1e18, "vpcmpud", KPw, xx, KEw, Ib, He, xop|evex|mrm|reqp, x, exop[86]},
{OP_vpcmpuq, 0x663a1e58, "vpcmpuq", KPb, xx, KEb, Ib, He, xop|evex|mrm|reqp, x, exop[87]},
}, { /* evex_W_ext 111 */
{OP_vpcmpd, 0x663a1f18, "vpcmpd", KPw, xx, KEw, Ib, He, xop|evex|mrm|reqp, x, exop[88]},
{OP_vpcmpq, 0x663a1f58, "vpcmpq", KPb, xx, KEb, Ib, He, xop|evex|mrm|reqp, x, exop[89]},
}, { /* evex_W_ext 112 */
{OP_vpminsd, 0x66383918, "vpminsd", Ve, xx, KEw, He, We, mrm|evex, x, END_LIST},
{OP_vpminsq, 0x66383958, "vpminsq", Ve, xx, KEb, He, We, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 113 */
{OP_vpmaxsd, 0x66383d18, "vpmaxsd", Ve, xx, KEw, He, We, mrm|evex|reqp, x, END_LIST},
{OP_vpmaxsq, 0x66383d58, "vpmaxsq", Ve, xx, KEb, He, We, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 114 */
{OP_vpminud, 0x66383b18, "vpminud", Ve, xx, KEw, He, We, mrm|evex|reqp, x, END_LIST},
{OP_vpminuq, 0x66383b58, "vpminuq", Ve, xx, KEb, He, We, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 115 */
{OP_vpmaxud, 0x66383f18, "vpmaxud", Ve, xx, KEw, He, We, mrm|evex|reqp, x, END_LIST},
{OP_vpmaxuq, 0x66383f58, "vpmaxuq", Ve, xx, KEb, He, We, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 116 */
{OP_vprolvd, 0x66381518, "vprolvd", Ve, xx, KEw, He, We, mrm|evex, x, END_LIST},
{OP_vprolvq, 0x66381558, "vprolvq", Ve, xx, KEb, He, We, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 117 */
{OP_vprold, 0x660f7231, "vprold", He, xx, KEw, Ib, We, mrm|evex, x, END_LIST},
{OP_vprolq, 0x660f7271, "vprolq", He, xx, KEb, Ib, We, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 118 */
{OP_vprorvd, 0x66381418, "vprorvd", Ve, xx, KEw, He, We, mrm|evex, x, END_LIST},
{OP_vprorvq, 0x66381458, "vprorvq", Ve, xx, KEb, He, We, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 119 */
{OP_vprord, 0x660f7230, "vprord", He, xx, KEw, Ib, We, mrm|evex, x, END_LIST},
{OP_vprorq, 0x660f7270, "vprorq", He, xx, KEb, Ib, We, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 120 */
{OP_vpsrad, 0x660fe210, "vpsrad", Ve, xx, KEw, He, We, mrm|evex, x, tevexw[121][0]},
{OP_vpsraq, 0x660fe250, "vpsraq", Ve, xx, KEb, He, We, mrm|evex, x, tevexw[121][1]},
}, { /* evex_W_ext 121 */
{OP_vpsrad, 0x660f7234, "vpsrad", He, xx, KEw, Ib, We, mrm|evex, x, END_LIST},
{OP_vpsraq, 0x660f7274, "vpsraq", He, xx, KEb, Ib, We, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 122 */
{OP_vpsrld, 0x660fd210, "vpsrld", Ve, xx, KEw, He, We, mrm|evex, x, tevexw[123][0]},
{INVALID, 0x660fd250,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 123 */
{OP_vpsrld, 0x660f7232, "vpsrld", He, xx, KEw, Ib, We, mrm|evex, x, END_LIST},
{INVALID, 0x660f7272,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 124 */
{INVALID, 0x660fd310,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vpsrlq, 0x660fd350, "vpsrlq", Ve, xx, KEb, He, We, mrm|evex, x, tevexw[125][1]},
}, { /* evex_W_ext 125 */
{INVALID, 0x660f7332,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vpsrlq, 0x660f7372, "vpsrlq", He, xx, KEb, Ib, We, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 126 */
{INVALID, 0x66381118,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vpsravw, 0x66381158, "vpsravw", Ve, xx, KEb, He, We, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 127 */
{OP_vpsravd, 0x66384618, "vpsravd", Ve, xx, KEw, He, We, mrm|evex|reqp, x, END_LIST},
{OP_vpsravq, 0x66384658, "vpsravq", Ve, xx, KEb, He, We, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 128 */
{OP_vpsrlvd,0x66384518, "vpsrlvd", Ve, xx, KEw, He, We, mrm|evex|reqp, x, END_LIST},
{OP_vpsrlvq,0x66384558, "vpsrlvq", Ve, xx, KEb, He, We, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 129 */
{INVALID, 0x66381218,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vpsllvw, 0x66381258,"vpsllvw", Ve, xx, KEd, He, We, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 130 */
{OP_vpsllvd, 0x66384718, "vpsllvd", Ve, xx, KEw, He, We, mrm|evex|reqp,x,END_LIST},
{OP_vpsllvq, 0x66384758, "vpsllvq", Ve, xx, KEb, He, We, mrm|evex|reqp,x,END_LIST},
}, { /* evex_W_ext 131 */
{OP_vrcp14ps, 0x66384c18, "vrcp14ps", Ve, xx, KEw, We, xx, mrm|evex|reqp,x,END_LIST},
{OP_vrcp14pd, 0x66384c58, "vrcp14pd", Ve, xx, KEb, We, xx, mrm|evex|reqp,x,END_LIST},
}, { /* evex_W_ext 132 */
{OP_vrcp14ss, 0x66384d18, "vrcp14ss", Vdq, xx, KE1b, H12_dq, Wss, mrm|evex|reqp,x,END_LIST},
{OP_vrcp14sd, 0x66384d58, "vrcp14sd", Vdq, xx, KE1b, Hsd, Wsd, mrm|evex|reqp,x,END_LIST},
}, { /* evex_W_ext 133 */
{OP_vrcp28ps, 0x6638ca18, "vrcp28ps", Voq, xx, KEw, Woq, xx, mrm|evex|reqp,x,END_LIST},
{OP_vrcp28pd, 0x6638ca58, "vrcp28pd", Voq, xx, KEb, Woq, xx, mrm|evex|reqp,x,END_LIST},
}, { /* evex_W_ext 134 */
{OP_vrcp28ss, 0x6638cb18, "vrcp28ss", Vdq, xx, KE1b, H12_dq, Wss, mrm|evex|reqp,x,END_LIST},
{OP_vrcp28sd, 0x6638cb58, "vrcp28sd", Vdq, xx, KE1b, Hsd, Wsd, mrm|evex|reqp,x,END_LIST},
}, { /* evex_W_ext 135 */
{OP_vmovd, 0x660f6e10, "vmovd", Vdq, xx, Ed, xx, xx, mrm|evex, x, tevexw[136][0]},
{OP_vmovq, 0x660f6e50, "vmovq", Vdq, xx, Ed_q, xx, xx, mrm|evex, x, tevexw[136][1]},
}, { /* evex_W_ext 136 */
{OP_vmovd, 0x660f7e10, "vmovd", Ed, xx, Vd_dq, xx, xx, mrm|evex, x, END_LIST},
{OP_vmovq, 0x660f7e50, "vmovq", Ed_q, xx, Vq_dq, xx, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 137 */
{OP_vpmovm2b, 0xf3382818, "vpmovm2b", Ve, xx, KQq, xx, xx, mrm|evex, x, END_LIST},
{OP_vpmovm2w, 0xf3382858, "vpmovm2w", Ve, xx, KQd, xx, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 138 */
{OP_vpmovm2d, 0xf3383818, "vpmovm2d", Ve, xx, KQw, xx, xx, mrm|evex, x, END_LIST},
{OP_vpmovm2q, 0xf3383858, "vpmovm2q", Ve, xx, KQb, xx, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 139 */
{OP_vpmovb2m, 0xf3382918, "vpmovb2m", KPq, xx, Ue, xx, xx, mrm|evex, x, END_LIST},
{OP_vpmovw2m, 0xf3382958, "vpmovw2m", KPd, xx, Ue, xx, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 140 */
{OP_vpmovd2m, 0xf3383918, "vpmovd2m", KPw, xx, Ue, xx, xx, mrm|evex, x, END_LIST},
{OP_vpmovq2m, 0xf3383958, "vpmovq2m", KPb, xx, Ue, xx, xx, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 141 */
{OP_vshuff32x4, 0x663a2318, "vshuff32x4", Vfs, xx, KEw, Ib, Hfs, xop|mrm|evex|reqp, x, exop[96]},
{OP_vshuff64x2, 0x663a2358, "vshuff64x2", Vfd, xx, KEb, Ib, Hfd, xop|mrm|evex|reqp, x, exop[97]},
}, { /* evex_W_ext 142 */
{OP_vshufi32x4, 0x663a4318, "vshufi32x4", Vfs, xx, KEw, Ib, Hfs, xop|mrm|evex|reqp, x, exop[98]},
{OP_vshufi64x2, 0x663a4358, "vshufi64x2", Vfd, xx, KEb, Ib, Hfd, xop|mrm|evex|reqp, x, exop[99]},
}, { /* evex_W_ext 143 */
{OP_vpinsrd, 0x663a2218, "vpinsrd", Vdq, xx, H12_8_dq, Ed_q, Ib, mrm|evex|reqp, x, END_LIST},
{OP_vpinsrq, 0x663a2258, "vpinsrq", Vdq, xx, H12_8_dq, Ed_q, Ib, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 144 */
{OP_vpextrd, 0x663a1618, "vpextrd", Ed_q, xx, Vd_q_dq, Ib, xx, mrm|evex|reqp, x, END_LIST},
{OP_vpextrq, 0x663a1658, "vpextrq", Ed_q, xx, Vd_q_dq, Ib, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 145 */
{OP_vpabsd, 0x66381e18, "vpabsd", Ve, xx, KEw, We, xx, mrm|evex, x, END_LIST},
{INVALID, 0x66381e58,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
}, { /* evex_W_ext 146 */
{INVALID, 0x66381f18,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vpabsq, 0x66381f58, "vpabsq", Ve, xx, KEb, We, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 147 */
{OP_vbroadcastf32x2, 0x66381918, "vbroadcastf32x2", Vf, xx, KEb, Wq_dq, xx, mrm|evex|reqp, x, END_LIST},
{OP_vbroadcastsd, 0x66381958, "vbroadcastsd", Vf, xx, KEb, Wq_dq, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 148 */
{OP_vbroadcastf32x4, 0x66381a18, "vbroadcastf32x4", Vf, xx, KEw, Mdq, xx, mrm|evex|reqp, x, END_LIST},
{OP_vbroadcastf64x2, 0x66381a58, "vbroadcastf64x2", Vf, xx, KEb, Mdq, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 149 */
{OP_vbroadcastf32x8, 0x66381b18, "vbroadcastf32x8", Voq, xx, KEd, Mqq, xx, mrm|evex|reqp, x, END_LIST},
{OP_vbroadcastf64x4, 0x66381b58, "vbroadcastf64x4", Voq, xx, KEb, Mqq, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 150 */
{OP_vpbroadcastd, 0x66387c18, "vpbroadcastd", Ve, xx, KEw, Ed, xx, mrm|evex|reqp, x, END_LIST},
{OP_vpbroadcastq, 0x66387c58, "vpbroadcastq", Ve, xx, KEb, Eq, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 151 */
{OP_vbroadcasti32x2, 0x66385918, "vbroadcasti32x2", Ve, xx, KEb, Wq_dq, xx, mrm|evex|reqp, x, END_LIST},
{OP_vpbroadcastq, 0x66385958, "vpbroadcastq", Ve, xx, KEb, Wq_dq, xx, mrm|evex|reqp, x, tevexw[150][1]},
}, { /* evex_W_ext 152 */
{OP_vbroadcasti32x4, 0x66385a18, "vbroadcasti32x4", Vf, xx, KEw, Mdq, xx, mrm|evex|reqp, x, END_LIST},
{OP_vbroadcasti64x2, 0x66385a58, "vbroadcasti64x2", Vf, xx, KEw, Mdq, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 153 */
{OP_vbroadcasti32x8, 0x66385b18, "vbroadcasti32x8", Vf, xx, KEw, Mqq, xx, mrm|evex|reqp, x, END_LIST},
{OP_vbroadcasti64x4, 0x66385b58, "vbroadcasti64x4", Vf, xx, KEb, Mqq, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 154 */
{OP_valignd, 0x663a0318, "valignd", Ve, xx, KEw, Ib, He, xop|mrm|evex|reqp, x, exop[101]},
{OP_valignq, 0x663a0358, "valignq", Ve, xx, KEb, Ib, He, xop|mrm|evex|reqp, x, exop[102]},
}, { /* evex_W_ext 155 */
{OP_vblendmps, 0x66386518, "vblendmps", Ve, xx, KEw, He, We, mrm|evex|reqp, x, END_LIST},
{OP_vblendmpd, 0x66386558, "vblendmpd", Ve, xx, KEb, He, We, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 156 */
{OP_vcompressps, 0x66388a18, "vcompressps", We, xx, KEw, Ve, xx, mrm|evex|reqp, x, END_LIST},
{OP_vcompresspd, 0x66388a58, "vcompresspd", We, xx, KEb, Ve, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 157 */
{OP_vexpandps, 0x66388818, "vexpandps", We, xx, KEw, Ve, xx, mrm|evex|reqp, x, END_LIST},
{OP_vexpandpd, 0x66388858, "vexpandpd", We, xx, KEb, Ve, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 158 */
{OP_vfixupimmps, 0x663a5418, "vfixupimmps", Ve, xx, KEw, Ib, He, xop|mrm|evex|reqp, x, exop[103]},
{OP_vfixupimmpd, 0x663a5458, "vfixupimmpd", Ve, xx, KEb, Ib, He, xop|mrm|evex|reqp, x, exop[104]},
}, { /* evex_W_ext 159 */
{OP_vfixupimmss, 0x663a5518, "vfixupimmss", Vdq, xx, KE1b, Ib, Hdq, xop|mrm|evex|reqp, x, exop[105]},
{OP_vfixupimmsd, 0x663a5558, "vfixupimmsd", Vdq, xx, KE1b, Ib, Hdq, xop|mrm|evex|reqp, x, exop[106]},
}, { /* evex_W_ext 160 */
{OP_vgetexpps, 0x66384218, "vgetexpps", Ve, xx, KEw, We, xx, mrm|evex|reqp, x, END_LIST},
{OP_vgetexppd, 0x66384258, "vgetexppd", Ve, xx, KEb, We, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 161 */
{OP_vgetexpss, 0x66384318, "vgetexpss", Vdq, xx, KE1b, H12_dq, Wd_dq, mrm|evex|reqp, x, END_LIST},
{OP_vgetexpsd, 0x66384358, "vgetexpsd", Vdq, xx, KE1b, Hsd, Wq_dq, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 162 */
{OP_vgetmantps, 0x663a2618, "vgetmantps", Ve, xx, KEw, Ib, We, mrm|evex|reqp, x, END_LIST},
{OP_vgetmantpd, 0x663a2658, "vgetmantpd", Ve, xx, KEb, Ib, We, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 163 */
{OP_vgetmantss, 0x663a2718, "vgetmantss", Vdq, xx, KE1b, Ib, H12_dq, xop|mrm|evex|reqp, x, exop[107]},
{OP_vgetmantsd, 0x663a2758, "vgetmantsd", Vdq, xx, KE1b, Ib, Hsd, xop|mrm|evex|reqp, x, exop[108]},
}, { /* evex_W_ext 164 */
{OP_vpblendmb, 0x66386618, "vpblendmb", Ve, xx, KEq, He, We, mrm|evex|reqp, x, END_LIST},
{OP_vpblendmw, 0x66386658, "vpblendmw", Ve, xx, KEd, He, We, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 165 */
{OP_vpblendmd, 0x66386418, "vpblendmd", Ve, xx, KEw, He, We, mrm|evex|reqp, x, END_LIST},
{OP_vpblendmq, 0x66386458, "vpblendmq", Ve, xx, KEb, He, We, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 166 */
{OP_vpcompressd, 0x66388b18, "vpcompressd", We, xx, KEw, Ve, xx, mrm|evex|reqp, x, END_LIST},
{OP_vpcompressq, 0x66388b58, "vpcompressq", We, xx, KEb, Ve, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 167 */
{OP_vpexpandd, 0x66388918, "vpexpandd", We, xx, KEw, Ve, xx, mrm|evex|reqp, x, END_LIST},
{OP_vpexpandq, 0x66388958, "vpexpandq", We, xx, KEb, Ve, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 168 */
{OP_vptestmb, 0x66382618, "vptestmb", KPq, xx, KEq, He, We, mrm|evex, x, END_LIST},
{OP_vptestmw, 0x66382658, "vptestmw", KPd, xx, KEd, He, We, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 169 */
{OP_vptestmd, 0x66382718, "vptestmd", KPw, xx, KEw, He, We, mrm|evex, x, END_LIST},
{OP_vptestmq, 0x66382758, "vptestmq", KPb, xx, KEb, He, We, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 170 */
{OP_vptestnmb, 0xf3382618, "vptestnmb", KPq, xx, KEq, He, We, mrm|evex, x, END_LIST},
{OP_vptestnmw, 0xf3382658, "vptestnmw", KPd, xx, KEd, He, We, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 171 */
{OP_vptestnmd, 0xf3382718, "vptestnmd", KPw, xx, KEw, He, We, mrm|evex, x, END_LIST},
{OP_vptestnmq, 0xf3382758, "vptestnmq", KPb, xx, KEb, He, We, mrm|evex, x, END_LIST},
}, { /* evex_W_ext 172 */
{OP_vrangeps, 0x663a5018, "vrangeps", Ve, xx, KEw, Ib, He, xop|mrm|evex|reqp, x, exop[109]},
{OP_vrangepd, 0x663a5058, "vrangepd", Ve, xx, KEb, Ib, He, xop|mrm|evex|reqp, x, exop[110]},
}, { /* evex_W_ext 173 */
{OP_vrangess, 0x663a5118, "vrangess", Vdq, xx, KE1b, Ib, H12_dq, xop|mrm|evex|reqp, x, exop[111]},
{OP_vrangesd, 0x663a5158, "vrangesd", Vdq, xx, KE1b, Ib, Hsd, xop|mrm|evex|reqp, x, exop[112]},
}, { /* evex_W_ext 174 */
{OP_vreduceps, 0x663a5618, "vreduceps", Ve, xx, KEw, Ib, We, mrm|evex|reqp, x, END_LIST},
{OP_vreducepd, 0x663a5658, "vreducepd", Ve, xx, KEb, Ib, We, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 175 */
{OP_vreducess, 0x663a5718, "vreducess", Vdq, xx, KE1b, Ib, H12_dq, xop|mrm|evex|reqp, x, exop[113]},
{OP_vreducesd, 0x663a5758, "vreducesd", Vdq, xx, KE1b, Ib, Hsd, xop|mrm|evex|reqp, x, exop[114]},
}, { /* evex_W_ext 176 */
{OP_vrsqrt14ps, 0x66384e18, "vrsqrt14ps", Ve, xx, KEw, We, xx, mrm|evex|reqp, x, END_LIST},
{OP_vrsqrt14pd, 0x66384e58, "vrsqrt14pd", Ve, xx, KEb, We, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 177 */
{OP_vrsqrt14ss, 0x66384f18, "vrsqrt14ss", Vdq, xx, KE1b, H12_dq, Wd_dq, mrm|evex|reqp, x, END_LIST},
{OP_vrsqrt14sd, 0x66384f58, "vrsqrt14sd", Vdq, xx, KE1b, Hsd, Wq_dq, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 178 */
{OP_vrsqrt28ps, 0x6638cc18, "vrsqrt28ps", Voq, xx, KEw, Woq, xx, mrm|evex|reqp, x, END_LIST},
{OP_vrsqrt28pd, 0x6638cc58, "vrsqrt28pd", Voq, xx, KEb, Woq, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 179 */
{OP_vrsqrt28ss, 0x6638cd18, "vrsqrt28ss", Vdq, xx, KE1b, H12_dq, Wd_dq, mrm|evex|reqp, x, END_LIST},
{OP_vrsqrt28sd, 0x6638cd58, "vrsqrt28sd", Vdq, xx, KE1b, Hsd, Wq_dq, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 180 */
{OP_vscalefps, 0x66382c18, "vscalefps", Ve, xx, KEw, He, We, mrm|evex|reqp, x, END_LIST},
{OP_vscalefpd, 0x66382c58, "vscalefpd", Ve, xx, KEb, He, We, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 181 */
{OP_vscalefss, 0x66382d18, "vscalefss", Vdq, xx, KE1b, H12_dq, Wd_dq, mrm|evex|reqp, x, END_LIST},
{OP_vscalefsd, 0x66382d58, "vscalefsd", Vdq, xx, KE1b, Hsd, Wq_dq, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 182 */
{OP_vfpclassps, 0x663a6618, "vfpclassps", KPw, xx, KEw, Ib, We, mrm|evex|reqp, x, END_LIST},
{OP_vfpclasspd, 0x663a6658, "vfpclasspd", KPb, xx, KEb, Ib, We, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 183 */
{OP_vfpclassss, 0x663a6718, "vfpclassss", KP1b, xx, KE1b, Ib, Wd_dq, mrm|evex|reqp, x, END_LIST},
{OP_vfpclasssd, 0x663a6758, "vfpclasssd", KP1b, xx, KE1b, Ib, Wq_dq, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 184 */
{OP_vexp2ps, 0x6638c818, "vexp2ps", Voq, xx, KEw, Woq, xx, mrm|evex|reqp, x, END_LIST},
{OP_vexp2pd, 0x6638c858, "vexp2pd", Voq, xx, KEb, Woq, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 185 */
{OP_vpconflictd, 0x6638c418, "vpconflictd", Ve, xx, KEw, We, xx, mrm|evex|reqp, x, END_LIST},
{OP_vpconflictq, 0x6638c458, "vpconflictq", Ve, xx, KEb, We, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 186 */
{OP_vplzcntd, 0x66384418, "vplzcntd", Ve, xx, KEw, We, xx, mrm|evex|reqp, x, END_LIST},
{OP_vplzcntq, 0x66384458, "vplzcntq", Ve, xx, KEb, We, xx, mrm|evex|reqp, x, END_LIST},
}, { /* evex_W_ext 187 */
{OP_vpternlogd, 0x663a2518, "vpternlogd", Ve, xx, KEw, Ib, He, xop|mrm|evex|reqp, x, exop[118]},
{OP_vpternlogq, 0x663a2558, "vpternlogq", Ve, xx, KEb, Ib, He, xop|mrm|evex|reqp, x, exop[119]},
},
};
/****************************************************************************
* XOP instructions
*
* Since large parts of the opcode space are empty, we save space by having
* tables of 256 indices instead of tables of 256 instr_info_t structs.
*/
/* N.B.: all XOP 0x08 are assumed to have an immediate. If this becomes
* untrue we'll have to add an xop_8_extra[] table in decode_fast.c.
*/
const byte xop_8_index[256] = {
/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */
0, 0, 0, 0, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 4, 5, /* 8 */
0, 0, 0, 0, 0, 6, 7, 8, 0, 0, 0, 0, 0, 0, 9,10, /* 9 */
0, 0,11,12, 0, 0,13, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* A */
0, 0, 0, 0, 0, 0,14, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* B */
15,16,17,18, 0, 0, 0, 0, 0, 0, 0, 0, 19,20,21,22, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23,24,25,26, /* E */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
const byte xop_9_index[256] = {
/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
0,58,59, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
0, 0,61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */
27,28,29,30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */
31,32,33,34, 35,36,37,38, 39,40,41,42, 0, 0, 0, 0, /* 9 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* A */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* B */
0,43,44,45, 0, 0,46,47, 0, 0, 0,48, 0, 0, 0, 0, /* C */
0,49,50,51, 0, 0,52,53, 0, 0, 0,54, 0, 0, 0, 0, /* D */
0,55,56,57, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* E */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
/* N.B.: nothing here for initial XOP but upcoming TBM and LWP have opcodes here */
const byte xop_a_index[256] = {
/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
60, 0,62, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* A */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* B */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* E */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
const instr_info_t xop_extensions[] = {
{INVALID, 0x000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* 0*/
/* We are out of flags, and we want to share a lot of REQUIRES_VEX, so to
* distinguish XOP we just rely on the XOP.map_select being disjoint from
* the VEX.m-mmm field.
*/
/* XOP.map_select = 0x08 */
{OP_vpmacssww, 0x088518,"vpmacssww", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 1*/
{OP_vpmacsswd, 0x088618,"vpmacsswd", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 2*/
{OP_vpmacssdql,0x088718,"vpmacssdql",Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 3*/
{OP_vpmacssdd, 0x088e18,"vpmacssdd", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 4*/
{OP_vpmacssdqh,0x088f18,"vpmacssdqh",Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 5*/
{OP_vpmacsww, 0x089518,"vpmacsww", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 6*/
{OP_vpmacswd, 0x089618,"vpmacswd", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 7*/
{OP_vpmacsdql, 0x089718,"vpmacsdql", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 8*/
{OP_vpmacsdd, 0x089e18,"vpmacsdd", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 9*/
{OP_vpmacsdqh, 0x089f18,"vpmacsdqh", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /*10*/
{VEX_W_EXT, 0x08a218, "(vex_W ext 50)", xx,xx,xx,xx,xx, mrm|vex, x, 50}, /*11*/
{VEX_W_EXT, 0x08a318, "(vex_W ext 51)", xx,xx,xx,xx,xx, mrm|vex, x, 51}, /*12*/
{OP_vpmadcsswd,0x08a618,"vpmadcsswd",Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /*13*/
{OP_vpmadcswd, 0x08b618,"vpmadcswd", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /*14*/
{OP_vprotb, 0x08c018,"vprotb", Vdq,xx,Wdq,Ib,xx,mrm|vex,x,tvexw[52][0]},/*15*/
{OP_vprotw, 0x08c118,"vprotw", Vdq,xx,Wdq,Ib,xx,mrm|vex,x,tvexw[53][0]},/*16*/
{OP_vprotd, 0x08c218,"vprotd", Vdq,xx,Wdq,Ib,xx,mrm|vex,x,tvexw[54][0]},/*17*/
{OP_vprotq, 0x08c318,"vprotq", Vdq,xx,Wdq,Ib,xx,mrm|vex,x,tvexw[55][0]},/*18*/
{OP_vpcomb, 0x08cc18,"vpcomb", Vdq,xx,Hdq,Wdq,Ib,mrm|vex,x,END_LIST}, /*19*/
{OP_vpcomw, 0x08cd18,"vpcomw", Vdq,xx,Hdq,Wdq,Ib,mrm|vex,x,END_LIST}, /*20*/
{OP_vpcomd, 0x08ce18,"vpcomd", Vdq,xx,Hdq,Wdq,Ib,mrm|vex,x,END_LIST}, /*21*/
{OP_vpcomq, 0x08cf18,"vpcomq", Vdq,xx,Hdq,Wdq,Ib,mrm|vex,x,END_LIST}, /*22*/
{OP_vpcomub, 0x08ec18,"vpcomub", Vdq,xx,Hdq,Wdq,Ib,mrm|vex,x,END_LIST}, /*23*/
{OP_vpcomuw, 0x08ed18,"vpcomuw", Vdq,xx,Hdq,Wdq,Ib,mrm|vex,x,END_LIST}, /*24*/
{OP_vpcomud, 0x08ee18,"vpcomud", Vdq,xx,Hdq,Wdq,Ib,mrm|vex,x,END_LIST}, /*25*/
{OP_vpcomuq, 0x08ef18,"vpcomuq", Vdq,xx,Hdq,Wdq,Ib,mrm|vex,x,END_LIST}, /*26*/
/* XOP.map_select = 0x09 */
{OP_vfrczps, 0x098018,"vfrczps", Vvs,xx,Wvs,xx,xx,mrm|vex,x,END_LIST}, /*27*/
{OP_vfrczpd, 0x098118,"vfrczpd", Vvs,xx,Wvs,xx,xx,mrm|vex,x,END_LIST}, /*28*/
{OP_vfrczss, 0x098218,"vfrczss", Vss,xx,Wss,xx,xx,mrm|vex,x,END_LIST}, /*29*/
{OP_vfrczsd, 0x098318,"vfrczsd", Vsd,xx,Wsd,xx,xx,mrm|vex,x,END_LIST}, /*30*/
{VEX_W_EXT, 0x099018, "(vex_W ext 52)", xx,xx,xx,xx,xx, mrm|vex, x, 52}, /*31*/
{VEX_W_EXT, 0x099118, "(vex_W ext 53)", xx,xx,xx,xx,xx, mrm|vex, x, 53}, /*32*/
{VEX_W_EXT, 0x099218, "(vex_W ext 54)", xx,xx,xx,xx,xx, mrm|vex, x, 54}, /*33*/
{VEX_W_EXT, 0x099318, "(vex_W ext 55)", xx,xx,xx,xx,xx, mrm|vex, x, 55}, /*34*/
{VEX_W_EXT, 0x099418, "(vex_W ext 56)", xx,xx,xx,xx,xx, mrm|vex, x, 56}, /*35*/
{VEX_W_EXT, 0x099518, "(vex_W ext 57)", xx,xx,xx,xx,xx, mrm|vex, x, 57}, /*36*/
{VEX_W_EXT, 0x099618, "(vex_W ext 58)", xx,xx,xx,xx,xx, mrm|vex, x, 58}, /*37*/
{VEX_W_EXT, 0x099718, "(vex_W ext 59)", xx,xx,xx,xx,xx, mrm|vex, x, 59}, /*38*/
{VEX_W_EXT, 0x099818, "(vex_W ext 60)", xx,xx,xx,xx,xx, mrm|vex, x, 60}, /*39*/
{VEX_W_EXT, 0x099918, "(vex_W ext 61)", xx,xx,xx,xx,xx, mrm|vex, x, 61}, /*40*/
{VEX_W_EXT, 0x099a18, "(vex_W ext 62)", xx,xx,xx,xx,xx, mrm|vex, x, 62}, /*41*/
{VEX_W_EXT, 0x099b18, "(vex_W ext 63)", xx,xx,xx,xx,xx, mrm|vex, x, 63}, /*42*/
{OP_vphaddbw, 0x09c118,"vphaddbw", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*43*/
{OP_vphaddbd, 0x09c218,"vphaddbd", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*44*/
{OP_vphaddbq, 0x09c318,"vphaddbq", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*45*/
{OP_vphaddwd, 0x09c618,"vphaddwd", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*46*/
{OP_vphaddwq, 0x09c718,"vphaddwq", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*47*/
{OP_vphadddq, 0x09cb18,"vphadddq", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*48*/
/* AMD decode table erroneously lists this as "vphaddubwd" */
{OP_vphaddubw, 0x09d118,"vphaddubw", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*49*/
{OP_vphaddubd, 0x09d218,"vphaddubd", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*50*/
{OP_vphaddubq, 0x09d318,"vphaddubq", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*51*/
{OP_vphadduwd, 0x09d618,"vphadduwd", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*52*/
{OP_vphadduwq, 0x09d718,"vphadduwq", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*53*/
{OP_vphaddudq, 0x09db18,"vphaddudq", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*54*/
{OP_vphsubbw, 0x09e118,"vphsubbw", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*55*/
{OP_vphsubwd, 0x09e218,"vphsubwd", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*56*/
{OP_vphsubdq, 0x09e318,"vphsubdq", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*57*/
{EXTENSION, 0x090118, "(XOP group 1)", xx,xx, xx,xx,xx, mrm|vex, x, 27}, /*58*/
{EXTENSION, 0x090218, "(XOP group 2)", xx,xx, xx,xx,xx, mrm|vex, x, 28}, /*59*/
/* XOP.map_select = 0x0a */
{OP_bextr, 0x0a1018, "bextr", Gy,xx,Ey,Id,xx, mrm|vex, fW6, END_LIST}, /*60*/
/* Later-added instrs, from various tables */
{EXTENSION, 0x091218, "(XOP group 3)", xx,xx, xx,xx,xx, mrm|vex, x, 29}, /*61*/
{EXTENSION, 0x0a1218, "(XOP group 4)", xx,xx, xx,xx,xx, mrm|vex, x, 30}, /*62*/
};
/****************************************************************************
* String instructions that differ depending on rep/repne prefix
*
* Note that Intel manuals prior to May 2011 claim that for x64 the count
* register for ins and outs is rcx by default, but for all other rep* is ecx.
* The AMD manual, and experimental evidence, contradicts this and has rcx
* as the default count register for all rep*.
* Furthermore, the Intel manual implies that w/o rex.w edi/esi are used
* rather than rdi/rsi: which again the AMD manual and experimental
* evidence contradict.
*/
const instr_info_t rep_extensions[][4] = {
/* FIXME: ins and outs access "I/O ports", are these memory addresses?
* if so, change Ib to Ob and change dx to i_dx (move to dest for outs)
*/
{ /* rep extension 0 */
{OP_ins, 0x6c0000, "ins", Yb, axDI, dx, axDI, xx, no, fRD, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_ins, 0xf36c0000, "rep ins", Yb, axDI, dx, axDI, axCX, xop_next, fRD, END_LIST},
{OP_CONTD, 0xf36c0000, "rep ins", axCX, xx, xx, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 1 */
{OP_ins, 0x6d0000, "ins", Yz, axDI, dx, axDI, xx, no, fRD, tre[0][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_ins, 0xf36d0000, "rep ins", Yz, axDI, dx, axDI, axCX, xop_next, fRD, tre[0][2]},
{OP_CONTD, 0xf36d0000, "rep ins", axCX, xx, xx, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 2 */
{OP_outs, 0x6e0000, "outs", axSI, xx, Xb, dx, axSI, no, fRD, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_outs, 0xf36e0000, "rep outs", axSI, axCX, Xb, dx, axSI, xop_next, fRD, END_LIST},
{OP_CONTD, 0xf36e0000, "rep outs", xx, xx, axCX, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 3 */
{OP_outs, 0x6f0000, "outs", axSI, xx, Xz, dx, axSI, no, fRD, tre[2][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_outs, 0xf36f0000, "rep outs", axSI, axCX, Xz, dx, axSI, xop_next, fRD, tre[2][2]},
{OP_CONTD, 0xf36f0000, "rep outs", xx, xx, axCX, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 4 */
{OP_movs, 0xa40000, "movs", Yb, axSI, Xb, axSI, axDI, xop_next, fRD, END_LIST},
{OP_CONTD, 0xa40000, "movs", axDI, xx, xx, xx, xx, no, fRD, END_LIST},
{OP_rep_movs, 0xf3a40000, "rep movs", Yb, axSI, Xb, axSI, axDI, xop_next, fRD, END_LIST},
{OP_CONTD, 0xf3a40000, "rep movs", axDI, axCX, axCX, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 5 */
{OP_movs, 0xa50000, "movs", Yv, axSI, Xv, axSI, axDI, xop_next, fRD, tre[4][0]},
{OP_CONTD, 0xa50000, "movs", axDI, xx, xx, xx, xx, no, fRD, END_LIST},
{OP_rep_movs, 0xf3a50000, "rep movs", Yv, axSI, Xv, axSI, axDI, xop_next, fRD, tre[4][2]},
{OP_CONTD, 0xf3a50000, "rep movs", axDI, axCX, axCX, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 6 */
{OP_stos, 0xaa0000, "stos", Yb, axDI, al, axDI, xx, no, fRD, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_stos, 0xf3aa0000, "rep stos", Yb, axDI, al, axDI, axCX, xop_next, fRD, END_LIST},
{OP_CONTD, 0xf3aa0000, "rep stos", axCX, xx, xx, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 7 */
{OP_stos, 0xab0000, "stos", Yv, axDI, eAX, axDI, xx, no, fRD, tre[6][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_stos, 0xf3ab0000, "rep stos", Yv, axDI, eAX, axDI, axCX, xop_next, fRD, tre[6][2]},
{OP_CONTD, 0xf3ab0000, "rep stos", axCX, xx, xx, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 8 */
{OP_lods, 0xac0000, "lods", al, axSI, Xb, axSI, xx, no, fRD, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_lods, 0xf3ac0000, "rep lods", al, axSI, Xb, axSI, axCX, xop_next, fRD, END_LIST},
{OP_CONTD, 0xf3ac0000, "rep lods", axCX, xx, xx, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 9 */
{OP_lods, 0xad0000, "lods", eAX, axSI, Xv, axSI, xx, no, fRD, tre[8][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_lods, 0xf3ad0000, "rep lods", eAX, axSI, Xv, axSI, axCX, xop_next, fRD, tre[8][2]},
{OP_CONTD, 0xf3ad0000, "rep lods", axCX, xx, xx, xx, xx, no, fRD, END_LIST},
},
};
const instr_info_t repne_extensions[][6] = {
{ /* repne extension 0 */
{OP_cmps, 0xa60000, "cmps", axSI, axDI, Xb, Yb, axSI, xop_next, (fW6|fRD), END_LIST},
{OP_CONTD, 0xa60000, "cmps", xx, xx, axDI, xx, xx, no, (fW6|fRD), END_LIST},
{OP_rep_cmps, 0xf3a60000, "rep cmps", axSI, axDI, Xb, Yb, axSI, xop_next, (fW6|fRD|fRZ), END_LIST},
{OP_CONTD, 0xf3a60000, "rep cmps", axCX, xx, axDI, axCX, xx, no, (fW6|fRD), END_LIST},
{OP_repne_cmps, 0xf2a60000, "repne cmps", axSI, axDI, Xb, Yb, axSI, xop_next, (fW6|fRD|fRZ), END_LIST},
{OP_CONTD, 0xf2a60000, "repne cmps", axCX, xx, axDI, axCX, xx, no, (fW6|fRD), END_LIST},
},
{ /* repne extension 1 */
{OP_cmps, 0xa70000, "cmps", axSI, axDI, Xv, Yv, axSI, xop_next, (fW6|fRD), tne[0][0]},
{OP_CONTD, 0xa70000, "cmps", xx, xx, axDI, xx, xx, no, (fW6|fRD), END_LIST},
{OP_rep_cmps, 0xf3a70000, "rep cmps", axSI, axDI, Xv, Yv, axSI, xop_next, (fW6|fRD|fRZ), tne[0][2]},
{OP_CONTD, 0xf3a70000, "rep cmps", axCX, xx, axDI, axCX, xx, no, (fW6|fRD), END_LIST},
{OP_repne_cmps, 0xf2a70000, "repne cmps", axSI, axDI, Xv, Yv, axSI, xop_next, (fW6|fRD|fRZ), tne[0][4]},
{OP_CONTD, 0xf2a70000, "repne cmps", axCX, xx, axDI, axCX, xx, no, (fW6|fRD), END_LIST},
},
{ /* repne extension 2 */
{OP_scas, 0xae0000, "scas", axDI, xx, Yb, al, axDI, no, (fW6|fRD), END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_scas, 0xf3ae0000, "rep scas", axDI, axCX, Yb, al, axDI, xop_next, (fW6|fRD|fRZ), END_LIST},
{OP_CONTD, 0xf3ae0000, "rep scas", xx, xx, axCX, xx, xx, no, (fW6|fRD), END_LIST},
{OP_repne_scas, 0xf2ae0000, "repne scas", axDI, axCX, Yb, al, axDI, xop_next, (fW6|fRD|fRZ), END_LIST},
{OP_CONTD, 0xf2ae0000, "repne scas", xx, xx, axCX, xx, xx, no, (fW6|fRD), END_LIST},
},
{ /* repne extension 3 */
{OP_scas, 0xaf0000, "scas", axDI, xx, Yv, eAX, axDI, no, (fW6|fRD), tne[2][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_scas, 0xf3af0000, "rep scas", axDI, axCX, Yv, eAX, axDI, xop_next, (fW6|fRD|fRZ), tne[2][2]},
{OP_CONTD, 0xf3af0000, "rep scas", xx, xx, axCX, xx, xx, no, (fW6|fRD), END_LIST},
{OP_repne_scas, 0xf2af0000, "repne scas", axDI, axCX, Yv, eAX, axDI, xop_next, (fW6|fRD|fRZ), tne[2][4]},
{OP_CONTD, 0xf2af0000, "repne scas", xx, xx, axCX, xx, xx, no, (fW6|fRD), END_LIST},
}
};
/****************************************************************************
* Float instructions with ModR/M from 0x00 to 0xbf
* This is from Tables A-7, A-9, A-11, A-13, A-15, A-17, A-19, A-21
* I've added my own symbol '+' to indicate a float, and:
* 'x' to indicate extended real (80 bits)
* 'y' to indicate 14/28 byte value in memory
* 'z' to indicate 98/108 byte value in memory
*/
/* FIXME: I ignore fp stack changes, should we model that? */
const instr_info_t float_low_modrm[] = {
/* d8 */
{OP_fadd, 0xd80020, "fadd", st0, xx, Fd, st0, xx, mrm, x, tfl[0x20]}, /* 00 */
{OP_fmul, 0xd80021, "fmul", st0, xx, Fd, st0, xx, mrm, x, tfl[0x21]},
{OP_fcom, 0xd80022, "fcom", xx, xx, Fd, st0, xx, mrm, x, tfl[0x22]},
{OP_fcomp, 0xd80023, "fcomp", xx, xx, Fd, st0, xx, mrm, x, tfl[0x23]},
{OP_fsub, 0xd80024, "fsub", st0, xx, Fd, st0, xx, mrm, x, tfl[0x24]},
{OP_fsubr, 0xd80025, "fsubr", st0, xx, Fd, st0, xx, mrm, x, tfl[0x25]},
{OP_fdiv, 0xd80026, "fdiv", st0, xx, Fd, st0, xx, mrm, x, tfl[0x26]},
{OP_fdivr, 0xd80027, "fdivr", st0, xx, Fd, st0, xx, mrm, x, tfl[0x27]},
/* d9 */
{OP_fld, 0xd90020, "fld", st0, xx, Fd, xx, xx, mrm, x, tfl[0x1d]}, /* 08 */
{INVALID, 0xd90021, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fst, 0xd90022, "fst", Fd, xx, st0, xx, xx, mrm, x, tfl[0x2a]},
{OP_fstp, 0xd90023, "fstp", Fd, xx, st0, xx, xx, mrm, x, tfl[0x1f]},
{OP_fldenv, 0xd90024, "fldenv", xx, xx, Fy, xx, xx, mrm, x, END_LIST},
{OP_fldcw, 0xd90025, "fldcw", xx, xx, Fw, xx, xx, mrm, x, END_LIST},
{OP_fnstenv, 0xd90026, "fnstenv", Fy, xx, xx, xx, xx, mrm, x, END_LIST},/*FIXME: w/ preceding fwait instr, this is "fstenv"*/
{OP_fnstcw, 0xd90027, "fnstcw", Fw, xx, xx, xx, xx, mrm, x, END_LIST},/*FIXME: w/ preceding fwait instr, this is "fstcw"*/
/* da */
{OP_fiadd, 0xda0020, "fiadd", st0, xx, Md, st0, xx, mrm, x, tfl[0x30]}, /* 10 */
{OP_fimul, 0xda0021, "fimul", st0, xx, Md, st0, xx, mrm, x, tfl[0x31]},
{OP_ficom, 0xda0022, "ficom", st0, xx, Md, st0, xx, mrm, x, tfl[0x32]},
{OP_ficomp, 0xda0023, "ficomp", st0, xx, Md, st0, xx, mrm, x, tfl[0x33]},
{OP_fisub, 0xda0024, "fisub", st0, xx, Md, st0, xx, mrm, x, tfl[0x34]},
{OP_fisubr, 0xda0025, "fisubr", st0, xx, Md, st0, xx, mrm, x, tfl[0x35]},
{OP_fidiv, 0xda0026, "fidiv", st0, xx, Md, st0, xx, mrm, x, tfl[0x36]},
{OP_fidivr, 0xda0027, "fidivr", st0, xx, Md, st0, xx, mrm, x, tfl[0x37]},
/* db */
{OP_fild, 0xdb0020, "fild", st0, xx, Md, xx, xx, mrm, x, tfl[0x38]}, /* 18 */
{OP_fisttp, 0xdb0021, "fisttp", Md, xx, st0, xx, xx, no, x, tfl[0x39]},
{OP_fist, 0xdb0022, "fist", Md, xx, st0, xx, xx, mrm, x, tfl[0x3a]},
{OP_fistp, 0xdb0023, "fistp", Md, xx, st0, xx, xx, mrm, x, tfl[0x3b]},
{INVALID, 0xdb0024, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fld, 0xdb0025, "fld", st0, xx, Fx, xx, xx, mrm, x, tfl[0x28]},
{INVALID, 0xdb0026, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fstp, 0xdb0027, "fstp", Fx, xx, st0, xx, xx, mrm, x, tfl[0x2b]},
/* dc */
{OP_fadd, 0xdc0020, "fadd", st0, xx, Fq, st0, xx, mrm, x, tfh[0][0x00]}, /* 20 */
{OP_fmul, 0xdc0021, "fmul", st0, xx, Fq, st0, xx, mrm, x, tfh[0][0x08]},
{OP_fcom, 0xdc0022, "fcom", xx, xx, Fq, st0, xx, mrm, x, tfh[0][0x10]},
{OP_fcomp, 0xdc0023, "fcomp", xx, xx, Fq, st0, xx, mrm, x, tfh[0][0x18]},
{OP_fsub, 0xdc0024, "fsub", st0, xx, Fq, st0, xx, mrm, x, tfh[0][0x20]},
{OP_fsubr, 0xdc0025, "fsubr", st0, xx, Fq, st0, xx, mrm, x, tfh[0][0x28]},
{OP_fdiv, 0xdc0026, "fdiv", st0, xx, Fq, st0, xx, mrm, x, tfh[0][0x30]},
{OP_fdivr, 0xdc0027, "fdivr", st0, xx, Fq, st0, xx, mrm, x, tfh[0][0x38]},
/* dd */
{OP_fld, 0xdd0020, "fld", st0, xx, Fq, xx, xx, mrm, x, tfh[1][0x00]}, /* 28 */
{OP_fisttp, 0xdd0021, "fisttp", Mq, xx, st0, xx, xx, no, x, tfl[0x19]},
{OP_fst, 0xdd0022, "fst", Fq, xx, st0, xx, xx, mrm, x, tfh[5][0x10]},
{OP_fstp, 0xdd0023, "fstp", Fq, xx, st0, xx, xx, mrm, x, tfh[5][0x18]},
{OP_frstor,0xdd0024, "frstor", xx, xx, Fz, xx, xx, mrm, x, END_LIST},
{INVALID, 0xdd0025, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fnsave, 0xdd0026, "fnsave", Fz, xx, xx, xx, xx, mrm, x, END_LIST},/*FIXME:w/ preceding fwait instr, this is "fsave"*/
{OP_fnstsw, 0xdd0027, "fnstsw", Fw, xx, xx, xx, xx, mrm, x, tfh[7][0x20]},/*FIXME:w/ preceding fwait instr, this is "fstsw"*/
/* de */
{OP_fiadd, 0xde0020, "fiadd", st0, xx, Fw, st0, xx, mrm, x, END_LIST}, /* 30 */
{OP_fimul, 0xde0021, "fimul", st0, xx, Fw, st0, xx, mrm, x, END_LIST},
{OP_ficom, 0xde0022, "ficom", xx, xx, Fw, st0, xx, mrm, x, END_LIST},
{OP_ficomp, 0xde0023, "ficomp", xx, xx, Fw, st0, xx, mrm, x, END_LIST},
{OP_fisub, 0xde0024, "fisub", st0, xx, Fw, st0, xx, mrm, x, END_LIST},
{OP_fisubr, 0xde0025, "fisubr", st0, xx, Fw, st0, xx, mrm, x, END_LIST},
{OP_fidiv, 0xde0026, "fidiv", st0, xx, Fw, st0, xx, mrm, x, END_LIST},
{OP_fidivr, 0xde0027, "fidivr", st0, xx, Fw, st0, xx, mrm, x, END_LIST},
/* df */
{OP_fild, 0xdf0020, "fild", st0, xx, Fw, xx, xx, mrm, x, tfl[0x3d]}, /* 38 */
{OP_fisttp, 0xdf0021, "fisttp", Mw, xx, st0, xx, xx, no, x, END_LIST},
{OP_fist, 0xdf0022, "fist", Fw, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fistp, 0xdf0023, "fistp", Fw, xx, st0, xx, xx, mrm, x, tfl[0x3f]},
{OP_fbld, 0xdf0024, "fbld", st0, xx, Fx, xx, xx, mrm, x, END_LIST},
{OP_fild, 0xdf0025, "fild", st0, xx, Fq, xx, xx, mrm, x, END_LIST},
{OP_fbstp, 0xdf0026, "fbstp", Fx, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fistp, 0xdf0027, "fistp", Fq, xx, st0, xx, xx, mrm, x, END_LIST},
};
/****************************************************************************
* Float instructions with ModR/M above 0xbf
* This is from Tables A-8, A-10, A-12, A-14, A-16, A-18, A-20, A-22
*/
const instr_info_t float_high_modrm[][64] = {
{ /* d8 = [0] */
{OP_fadd, 0xd8c010, "fadd", st0, xx, st0, st0, xx, mrm, x, tfh[0][0x01]}, /* c0 = [0x00] */
{OP_fadd, 0xd8c110, "fadd", st0, xx, st1, st0, xx, mrm, x, tfh[0][0x02]},
{OP_fadd, 0xd8c210, "fadd", st0, xx, st2, st0, xx, mrm, x, tfh[0][0x03]},
{OP_fadd, 0xd8c310, "fadd", st0, xx, st3, st0, xx, mrm, x, tfh[0][0x04]},
{OP_fadd, 0xd8c410, "fadd", st0, xx, st4, st0, xx, mrm, x, tfh[0][0x05]},
{OP_fadd, 0xd8c510, "fadd", st0, xx, st5, st0, xx, mrm, x, tfh[0][0x06]},
{OP_fadd, 0xd8c610, "fadd", st0, xx, st6, st0, xx, mrm, x, tfh[0][0x07]},
{OP_fadd, 0xd8c710, "fadd", st0, xx, st7, st0, xx, mrm, x, tfh[4][0x00]},
{OP_fmul, 0xd8c810, "fmul", st0, xx, st0, st0, xx, mrm, x, tfh[0][0x09]}, /* c8 = [0x08] */
{OP_fmul, 0xd8c910, "fmul", st0, xx, st1, st0, xx, mrm, x, tfh[0][0x0a]},
{OP_fmul, 0xd8ca10, "fmul", st0, xx, st2, st0, xx, mrm, x, tfh[0][0x0b]},
{OP_fmul, 0xd8cb10, "fmul", st0, xx, st3, st0, xx, mrm, x, tfh[0][0x0c]},
{OP_fmul, 0xd8cc10, "fmul", st0, xx, st4, st0, xx, mrm, x, tfh[0][0x0d]},
{OP_fmul, 0xd8cd10, "fmul", st0, xx, st5, st0, xx, mrm, x, tfh[0][0x0e]},
{OP_fmul, 0xd8ce10, "fmul", st0, xx, st6, st0, xx, mrm, x, tfh[0][0x0f]},
{OP_fmul, 0xd8cf10, "fmul", st0, xx, st7, st0, xx, mrm, x, tfh[4][0x08]},
{OP_fcom, 0xd8d010, "fcom", xx, xx, st0, st0, xx, mrm, x, tfh[0][0x11]}, /* d0 = [0x10] */
{OP_fcom, 0xd8d110, "fcom", xx, xx, st0, st1, xx, mrm, x, tfh[0][0x12]},
{OP_fcom, 0xd8d210, "fcom", xx, xx, st0, st2, xx, mrm, x, tfh[0][0x13]},
{OP_fcom, 0xd8d310, "fcom", xx, xx, st0, st3, xx, mrm, x, tfh[0][0x14]},
{OP_fcom, 0xd8d410, "fcom", xx, xx, st0, st4, xx, mrm, x, tfh[0][0x15]},
{OP_fcom, 0xd8d510, "fcom", xx, xx, st0, st5, xx, mrm, x, tfh[0][0x16]},
{OP_fcom, 0xd8d610, "fcom", xx, xx, st0, st6, xx, mrm, x, tfh[0][0x17]},
{OP_fcom, 0xd8d710, "fcom", xx, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fcomp, 0xd8d810, "fcomp", xx, xx, st0, st0, xx, mrm, x, tfh[0][0x19]}, /* d8 = [0x18] */
{OP_fcomp, 0xd8d910, "fcomp", xx, xx, st0, st1, xx, mrm, x, tfh[0][0x1a]},
{OP_fcomp, 0xd8da10, "fcomp", xx, xx, st0, st2, xx, mrm, x, tfh[0][0x1b]},
{OP_fcomp, 0xd8db10, "fcomp", xx, xx, st0, st3, xx, mrm, x, tfh[0][0x1c]},
{OP_fcomp, 0xd8dc10, "fcomp", xx, xx, st0, st4, xx, mrm, x, tfh[0][0x1d]},
{OP_fcomp, 0xd8dd10, "fcomp", xx, xx, st0, st5, xx, mrm, x, tfh[0][0x1e]},
{OP_fcomp, 0xd8de10, "fcomp", xx, xx, st0, st6, xx, mrm, x, tfh[0][0x1f]},
{OP_fcomp, 0xd8df10, "fcomp", xx, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fsub, 0xd8e010, "fsub", st0, xx, st0, st0, xx, mrm, x, tfh[0][0x21]}, /* e0 = [0x20] */
{OP_fsub, 0xd8e110, "fsub", st0, xx, st1, st0, xx, mrm, x, tfh[0][0x22]},
{OP_fsub, 0xd8e210, "fsub", st0, xx, st2, st0, xx, mrm, x, tfh[0][0x23]},
{OP_fsub, 0xd8e310, "fsub", st0, xx, st3, st0, xx, mrm, x, tfh[0][0x24]},
{OP_fsub, 0xd8e410, "fsub", st0, xx, st4, st0, xx, mrm, x, tfh[0][0x25]},
{OP_fsub, 0xd8e510, "fsub", st0, xx, st5, st0, xx, mrm, x, tfh[0][0x26]},
{OP_fsub, 0xd8e610, "fsub", st0, xx, st6, st0, xx, mrm, x, tfh[0][0x27]},
{OP_fsub, 0xd8e710, "fsub", st0, xx, st7, st0, xx, mrm, x, tfh[4][0x28]},
{OP_fsubr, 0xd8e810, "fsubr", st0, xx, st0, st0, xx, mrm, x, tfh[0][0x29]}, /* e8 = [0x28] */
{OP_fsubr, 0xd8e910, "fsubr", st0, xx, st1, st0, xx, mrm, x, tfh[0][0x2a]},
{OP_fsubr, 0xd8ea10, "fsubr", st0, xx, st2, st0, xx, mrm, x, tfh[0][0x2b]},
{OP_fsubr, 0xd8eb10, "fsubr", st0, xx, st3, st0, xx, mrm, x, tfh[0][0x2c]},
{OP_fsubr, 0xd8ec10, "fsubr", st0, xx, st4, st0, xx, mrm, x, tfh[0][0x2d]},
{OP_fsubr, 0xd8ed10, "fsubr", st0, xx, st5, st0, xx, mrm, x, tfh[0][0x2e]},
{OP_fsubr, 0xd8ee10, "fsubr", st0, xx, st6, st0, xx, mrm, x, tfh[0][0x2f]},
{OP_fsubr, 0xd8ef10, "fsubr", st0, xx, st7, st0, xx, mrm, x, tfh[4][0x20]},
{OP_fdiv, 0xd8f010, "fdiv", st0, xx, st0, st0, xx, mrm, x, tfh[0][0x31]}, /* f0 = [0x30] */
{OP_fdiv, 0xd8f110, "fdiv", st0, xx, st1, st0, xx, mrm, x, tfh[0][0x32]},
{OP_fdiv, 0xd8f210, "fdiv", st0, xx, st2, st0, xx, mrm, x, tfh[0][0x33]},
{OP_fdiv, 0xd8f310, "fdiv", st0, xx, st3, st0, xx, mrm, x, tfh[0][0x34]},
{OP_fdiv, 0xd8f410, "fdiv", st0, xx, st4, st0, xx, mrm, x, tfh[0][0x35]},
{OP_fdiv, 0xd8f510, "fdiv", st0, xx, st5, st0, xx, mrm, x, tfh[0][0x36]},
{OP_fdiv, 0xd8f610, "fdiv", st0, xx, st6, st0, xx, mrm, x, tfh[0][0x37]},
{OP_fdiv, 0xd8f710, "fdiv", st0, xx, st7, st0, xx, mrm, x, tfh[4][0x38]},
{OP_fdivr, 0xd8f810, "fdivr", st0, xx, st0, st0, xx, mrm, x, tfh[0][0x39]}, /* f8 = [0x38] */
{OP_fdivr, 0xd8f910, "fdivr", st0, xx, st1, st0, xx, mrm, x, tfh[0][0x3a]},
{OP_fdivr, 0xd8fa10, "fdivr", st0, xx, st2, st0, xx, mrm, x, tfh[0][0x3b]},
{OP_fdivr, 0xd8fb10, "fdivr", st0, xx, st3, st0, xx, mrm, x, tfh[0][0x3c]},
{OP_fdivr, 0xd8fc10, "fdivr", st0, xx, st4, st0, xx, mrm, x, tfh[0][0x3d]},
{OP_fdivr, 0xd8fd10, "fdivr", st0, xx, st5, st0, xx, mrm, x, tfh[0][0x3e]},
{OP_fdivr, 0xd8fe10, "fdivr", st0, xx, st6, st0, xx, mrm, x, tfh[0][0x3f]},
{OP_fdivr, 0xd8ff10, "fdivr", st0, xx, st7, st0, xx, mrm, x, tfh[4][0x30]},
},
{ /* d9 = [1] */
{OP_fld, 0xd9c010, "fld", st0, xx, st0, xx, xx, mrm, x, tfh[1][0x01]}, /* c0 = [0x00] */
{OP_fld, 0xd9c110, "fld", st0, xx, st1, xx, xx, mrm, x, tfh[1][0x02]},
{OP_fld, 0xd9c210, "fld", st0, xx, st2, xx, xx, mrm, x, tfh[1][0x03]},
{OP_fld, 0xd9c310, "fld", st0, xx, st3, xx, xx, mrm, x, tfh[1][0x04]},
{OP_fld, 0xd9c410, "fld", st0, xx, st4, xx, xx, mrm, x, tfh[1][0x05]},
{OP_fld, 0xd9c510, "fld", st0, xx, st5, xx, xx, mrm, x, tfh[1][0x06]},
{OP_fld, 0xd9c610, "fld", st0, xx, st6, xx, xx, mrm, x, tfh[1][0x07]},
{OP_fld, 0xd9c710, "fld", st0, xx, st7, xx, xx, mrm, x, END_LIST},
{OP_fxch, 0xd9c810, "fxch", st0, st0, st0, st0, xx, mrm, x, tfh[1][0x09]}, /* c8 = [0x08] */
{OP_fxch, 0xd9c910, "fxch", st0, st1, st0, st1, xx, mrm, x, tfh[1][0x0a]},
{OP_fxch, 0xd9ca10, "fxch", st0, st2, st0, st2, xx, mrm, x, tfh[1][0x0b]},
{OP_fxch, 0xd9cb10, "fxch", st0, st3, st0, st3, xx, mrm, x, tfh[1][0x0c]},
{OP_fxch, 0xd9cc10, "fxch", st0, st4, st0, st4, xx, mrm, x, tfh[1][0x0d]},
{OP_fxch, 0xd9cd10, "fxch", st0, st5, st0, st5, xx, mrm, x, tfh[1][0x0e]},
{OP_fxch, 0xd9ce10, "fxch", st0, st6, st0, st6, xx, mrm, x, tfh[1][0x0f]},
{OP_fxch, 0xd9cf10, "fxch", st0, st7, st0, st7, xx, mrm, x, END_LIST},
{OP_fnop, 0xd9d010, "fnop", xx, xx, xx, xx, xx, mrm, x, END_LIST}, /* d0 = [0x10] */
{INVALID, 0xd9d110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xd9d210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xd9d310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xd9d410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xd9d510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xd9d610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xd9d710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* Undocumented. On sandpile.org as "fstp1". We assume an alias for fstp
* and do not include in the encode chain.
*/
{OP_fstp, 0xd9d810, "fstp", st0, xx, st0, xx, xx, mrm, x, END_LIST}, /* d8 = [0x18] */
{OP_fstp, 0xd9d910, "fstp", st1, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xd9da10, "fstp", st2, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xd9db10, "fstp", st3, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xd9dc10, "fstp", st4, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xd9dd10, "fstp", st5, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xd9de10, "fstp", st6, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xd9df10, "fstp", st7, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fchs, 0xd9e010, "fchs", st0, xx, st0, xx, xx, mrm, x, END_LIST}, /* e0 = [0x20] */
{OP_fabs, 0xd9e110, "fabs", st0, xx, st0, xx, xx, mrm, x, END_LIST},
{INVALID, 0xd9e210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xd9e310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_ftst, 0xd9e410, "ftst", st0, xx, cF, xx, xx, mrm, x, END_LIST},
{OP_fxam, 0xd9e510, "fxam", xx, xx, st0, xx, xx, mrm, x, END_LIST},
{INVALID, 0xd9e610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xd9e710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fld1, 0xd9e810, "fld1", st0, xx, cF, xx, xx, mrm, x, END_LIST}, /* e8 = [0x28] */
{OP_fldl2t, 0xd9e910, "fldl2t", st0, xx, cF, xx, xx, mrm, x, END_LIST},
{OP_fldl2e, 0xd9ea10, "fldl2e", st0, xx, cF, xx, xx, mrm, x, END_LIST},
{OP_fldpi, 0xd9eb10, "fldpi", st0, xx, cF, xx, xx, mrm, x, END_LIST},
{OP_fldlg2, 0xd9ec10, "fldlg2", st0, xx, cF, xx, xx, mrm, x, END_LIST},
{OP_fldln2, 0xd9ed10, "fldln2", st0, xx, cF, xx, xx, mrm, x, END_LIST},
{OP_fldz, 0xd9ee10, "fldz", st0, xx, cF, xx, xx, mrm, x, END_LIST},
{INVALID, 0xd9ef10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_f2xm1, 0xd9f010, "f2xm1", st0, xx, st0, xx, xx, mrm, x, END_LIST}, /* f0 = [0x30] */
{OP_fyl2x, 0xd9f110, "fyl2x", st0, st1, st0, st1, xx, mrm, x, END_LIST},
{OP_fptan, 0xd9f210, "fptan", st0, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fpatan, 0xd9f310, "fpatan", st0, st1, st0, st1, xx, mrm, x, END_LIST},
{OP_fxtract,0xd9f410, "fxtract",st0, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fprem1, 0xd9f510, "fprem1", st0, st1, st0, st1, xx, mrm, x, END_LIST},
{OP_fdecstp,0xd9f610, "fdecstp", xx, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_fincstp,0xd9f710, "fincstp", xx, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_fprem, 0xd9f810, "fprem", st0, st1, st0, st1, xx, mrm, x, END_LIST}, /* f8 = [0x38] */
{OP_fyl2xp1,0xd9f910, "fyl2xp1",st0, st1, st0, st1, xx, mrm, x, END_LIST},
{OP_fsqrt, 0xd9fa10, "fsqrt", st0, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fsincos,0xd9fb10, "fsincos",st0, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_frndint,0xd9fc10, "frndint",st0, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fscale, 0xd9fd10, "fscale", st0, xx, st1, st0, xx, mrm, x, END_LIST},
{OP_fsin, 0xd9fe10, "fsin", st0, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fcos, 0xd9ff10, "fcos", st0, xx, st0, xx, xx, mrm, x, END_LIST},
},
{ /* da = [2] */
{OP_fcmovb, 0xdac010, "fcmovb", st0, xx, st0, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x01]}, /* c0 = [0x00] */
{OP_fcmovb, 0xdac110, "fcmovb", st0, xx, st1, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x02]},
{OP_fcmovb, 0xdac210, "fcmovb", st0, xx, st2, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x03]},
{OP_fcmovb, 0xdac310, "fcmovb", st0, xx, st3, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x04]},
{OP_fcmovb, 0xdac410, "fcmovb", st0, xx, st4, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x05]},
{OP_fcmovb, 0xdac510, "fcmovb", st0, xx, st5, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x06]},
{OP_fcmovb, 0xdac610, "fcmovb", st0, xx, st6, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x07]},
{OP_fcmovb, 0xdac710, "fcmovb", st0, xx, st7, xx, xx, mrm|predcc, (fRC|fRP|fRZ), END_LIST},
{OP_fcmove, 0xdac810, "fcmove", st0, xx, st0, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x09]}, /* c8 = [0x08] */
{OP_fcmove, 0xdac910, "fcmove", st0, xx, st1, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x0a]},
{OP_fcmove, 0xdaca10, "fcmove", st0, xx, st2, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x0b]},
{OP_fcmove, 0xdacb10, "fcmove", st0, xx, st3, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x0c]},
{OP_fcmove, 0xdacc10, "fcmove", st0, xx, st4, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x0d]},
{OP_fcmove, 0xdacd10, "fcmove", st0, xx, st5, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x0e]},
{OP_fcmove, 0xdace10, "fcmove", st0, xx, st6, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x0f]},
{OP_fcmove, 0xdacf10, "fcmove", st0, xx, st7, xx, xx, mrm|predcc, (fRC|fRP|fRZ), END_LIST},
{OP_fcmovbe, 0xdad010, "fcmovbe", st0, xx, st0, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x11]}, /* d0 = [0x10] */
{OP_fcmovbe, 0xdad110, "fcmovbe", st0, xx, st1, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x12]},
{OP_fcmovbe, 0xdad210, "fcmovbe", st0, xx, st2, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x13]},
{OP_fcmovbe, 0xdad310, "fcmovbe", st0, xx, st3, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x14]},
{OP_fcmovbe, 0xdad410, "fcmovbe", st0, xx, st4, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x15]},
{OP_fcmovbe, 0xdad510, "fcmovbe", st0, xx, st5, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x16]},
{OP_fcmovbe, 0xdad610, "fcmovbe", st0, xx, st6, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x17]},
{OP_fcmovbe, 0xdad710, "fcmovbe", st0, xx, st7, xx, xx, mrm|predcc, (fRC|fRP|fRZ), END_LIST},
{OP_fcmovu, 0xdad810, "fcmovu", st0, xx, st0, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x19]}, /* d8 = [0x18] */
{OP_fcmovu, 0xdad910, "fcmovu", st0, xx, st1, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x1a]},
{OP_fcmovu, 0xdada10, "fcmovu", st0, xx, st2, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x1b]},
{OP_fcmovu, 0xdadb10, "fcmovu", st0, xx, st3, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x1c]},
{OP_fcmovu, 0xdadc10, "fcmovu", st0, xx, st4, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x1d]},
{OP_fcmovu, 0xdadd10, "fcmovu", st0, xx, st5, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x1e]},
{OP_fcmovu, 0xdade10, "fcmovu", st0, xx, st6, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x1f]},
{OP_fcmovu, 0xdadf10, "fcmovu", st0, xx, st7, xx, xx, mrm|predcc, (fRC|fRP|fRZ), END_LIST},
{INVALID, 0xdae010, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* e0 = [0x20] */
{INVALID, 0xdae110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdae210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdae310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdae410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdae510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdae610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdae710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdae810, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* e8 = [0x28] */
{OP_fucompp, 0xdae910, "fucompp", xx, xx, st0, st1, xx, mrm, x, END_LIST},
{INVALID, 0xdaea10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaeb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaec10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaed10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaee10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaef10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaf010, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* f0 = [0x30] */
{INVALID, 0xdaf110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaf210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaf310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaf410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaf510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaf610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaf710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaf810, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* f8 = [0x38] */
{INVALID, 0xdaf910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdafa10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdafb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdafc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdafd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdafe10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaff10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* db = [3] */
{OP_fcmovnb, 0xdbc010, "fcmovnb", st0, xx, st0, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x01]}, /* c0 = [0x00] */
{OP_fcmovnb, 0xdbc110, "fcmovnb", st0, xx, st1, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x02]},
{OP_fcmovnb, 0xdbc210, "fcmovnb", st0, xx, st2, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x03]},
{OP_fcmovnb, 0xdbc310, "fcmovnb", st0, xx, st3, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x04]},
{OP_fcmovnb, 0xdbc410, "fcmovnb", st0, xx, st4, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x05]},
{OP_fcmovnb, 0xdbc510, "fcmovnb", st0, xx, st5, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x06]},
{OP_fcmovnb, 0xdbc610, "fcmovnb", st0, xx, st6, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x07]},
{OP_fcmovnb, 0xdbc710, "fcmovnb", st0, xx, st7, xx, xx, mrm|predcc, (fRC|fRP|fRZ), END_LIST},
{OP_fcmovne, 0xdbc810, "fcmovne", st0, xx, st0, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x09]}, /* c8 = [0x08] */
{OP_fcmovne, 0xdbc910, "fcmovne", st0, xx, st1, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x0a]},
{OP_fcmovne, 0xdbca10, "fcmovne", st0, xx, st2, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x0b]},
{OP_fcmovne, 0xdbcb10, "fcmovne", st0, xx, st3, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x0c]},
{OP_fcmovne, 0xdbcc10, "fcmovne", st0, xx, st4, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x0d]},
{OP_fcmovne, 0xdbcd10, "fcmovne", st0, xx, st5, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x0e]},
{OP_fcmovne, 0xdbce10, "fcmovne", st0, xx, st6, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x0f]},
{OP_fcmovne, 0xdbcf10, "fcmovne", st0, xx, st7, xx, xx, mrm|predcc, (fRC|fRP|fRZ), END_LIST},
{OP_fcmovnbe, 0xdbd010, "fcmovnbe", st0, xx, st0, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x12]}, /* d0 = [0x10] */
{OP_fcmovnbe, 0xdbd110, "fcmovnbe", st0, xx, st1, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x12]},
{OP_fcmovnbe, 0xdbd210, "fcmovnbe", st0, xx, st2, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x13]},
{OP_fcmovnbe, 0xdbd310, "fcmovnbe", st0, xx, st3, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x14]},
{OP_fcmovnbe, 0xdbd410, "fcmovnbe", st0, xx, st4, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x15]},
{OP_fcmovnbe, 0xdbd510, "fcmovnbe", st0, xx, st5, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x16]},
{OP_fcmovnbe, 0xdbd610, "fcmovnbe", st0, xx, st6, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x17]},
{OP_fcmovnbe, 0xdbd710, "fcmovnbe", st0, xx, st7, xx, xx, mrm|predcc, (fRC|fRP|fRZ), END_LIST},
{OP_fcmovnu, 0xdbd810, "fcmovnu", st0, xx, st0, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x19]}, /* d8 = [0x18] */
{OP_fcmovnu, 0xdbd910, "fcmovnu", st0, xx, st1, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x1a]},
{OP_fcmovnu, 0xdbda10, "fcmovnu", st0, xx, st2, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x1b]},
{OP_fcmovnu, 0xdbdb10, "fcmovnu", st0, xx, st3, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x1c]},
{OP_fcmovnu, 0xdbdc10, "fcmovnu", st0, xx, st4, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x1d]},
{OP_fcmovnu, 0xdbdd10, "fcmovnu", st0, xx, st5, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x1e]},
{OP_fcmovnu, 0xdbde10, "fcmovnu", st0, xx, st6, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x1f]},
{OP_fcmovnu, 0xdbdf10, "fcmovnu", st0, xx, st7, xx, xx, mrm|predcc, (fRC|fRP|fRZ), END_LIST},
{INVALID, 0xdbe010, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* e0 = [0x20] */
{INVALID, 0xdbe110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fnclex, 0xdbe210, "fnclex", xx, xx, xx, xx, xx, mrm, x, END_LIST},/*FIXME: w/ preceding fwait instr, called "fclex"*/
{OP_fninit, 0xdbe310, "fninit", xx, xx, xx, xx, xx, mrm, x, END_LIST},/*FIXME: w/ preceding fwait instr, called "finit"*/
{INVALID, 0xdbe410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbe510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbe610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbe710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fucomi, 0xdbe810, "fucomi", xx, xx, st0, st0, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x29]}, /* e8 = [0x28] */
{OP_fucomi, 0xdbe910, "fucomi", xx, xx, st0, st1, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x2a]},
{OP_fucomi, 0xdbea10, "fucomi", xx, xx, st0, st2, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x2b]},
{OP_fucomi, 0xdbeb10, "fucomi", xx, xx, st0, st3, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x2c]},
{OP_fucomi, 0xdbec10, "fucomi", xx, xx, st0, st4, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x2d]},
{OP_fucomi, 0xdbed10, "fucomi", xx, xx, st0, st5, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x2e]},
{OP_fucomi, 0xdbee10, "fucomi", xx, xx, st0, st6, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x2f]},
{OP_fucomi, 0xdbef10, "fucomi", xx, xx, st0, st7, xx, mrm, (fWC|fWP|fWZ), END_LIST},
{OP_fcomi, 0xdbf010, "fcomi", xx, xx, st0, st0, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x31]}, /* f0 = [0x30] */
{OP_fcomi, 0xdbf110, "fcomi", xx, xx, st0, st1, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x32]},
{OP_fcomi, 0xdbf210, "fcomi", xx, xx, st0, st2, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x33]},
{OP_fcomi, 0xdbf310, "fcomi", xx, xx, st0, st3, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x34]},
{OP_fcomi, 0xdbf410, "fcomi", xx, xx, st0, st4, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x35]},
{OP_fcomi, 0xdbf510, "fcomi", xx, xx, st0, st5, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x36]},
{OP_fcomi, 0xdbf610, "fcomi", xx, xx, st0, st6, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x37]},
{OP_fcomi, 0xdbf710, "fcomi", xx, xx, st0, st7, xx, mrm, (fWC|fWP|fWZ), END_LIST},
{INVALID, 0xdbf810, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* f8 = [0x38] */
{INVALID, 0xdbf910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbfa10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbfb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbfc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbfd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbfe10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbff10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* dc = [4] */
{OP_fadd, 0xdcc010, "fadd", st0, xx, st0, st0, xx, mrm, x, tfh[4][0x01]}, /* c0 = [0x00] */
{OP_fadd, 0xdcc110, "fadd", st1, xx, st0, st1, xx, mrm, x, tfh[4][0x02]},
{OP_fadd, 0xdcc210, "fadd", st2, xx, st0, st2, xx, mrm, x, tfh[4][0x03]},
{OP_fadd, 0xdcc310, "fadd", st3, xx, st0, st3, xx, mrm, x, tfh[4][0x04]},
{OP_fadd, 0xdcc410, "fadd", st4, xx, st0, st4, xx, mrm, x, tfh[4][0x05]},
{OP_fadd, 0xdcc510, "fadd", st5, xx, st0, st5, xx, mrm, x, tfh[4][0x06]},
{OP_fadd, 0xdcc610, "fadd", st6, xx, st0, st6, xx, mrm, x, tfh[4][0x07]},
{OP_fadd, 0xdcc710, "fadd", st7, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fmul, 0xdcc810, "fmul", st0, xx, st0, st0, xx, mrm, x, tfh[4][0x09]}, /* c8 = [0x08] */
{OP_fmul, 0xdcc910, "fmul", st1, xx, st0, st1, xx, mrm, x, tfh[4][0x0a]},
{OP_fmul, 0xdcca10, "fmul", st2, xx, st0, st2, xx, mrm, x, tfh[4][0x0b]},
{OP_fmul, 0xdccb10, "fmul", st3, xx, st0, st3, xx, mrm, x, tfh[4][0x0c]},
{OP_fmul, 0xdccc10, "fmul", st4, xx, st0, st4, xx, mrm, x, tfh[4][0x0d]},
{OP_fmul, 0xdccd10, "fmul", st5, xx, st0, st5, xx, mrm, x, tfh[4][0x0e]},
{OP_fmul, 0xdcce10, "fmul", st6, xx, st0, st6, xx, mrm, x, tfh[4][0x0f]},
{OP_fmul, 0xdccf10, "fmul", st7, xx, st0, st7, xx, mrm, x, END_LIST},
/* Undocumented. On sandpile.org as "fcom2". We assume an alias for fcom
* and do not include in the encode chain.
*/
{OP_fcom, 0xdcd010, "fcom", xx, xx, st0, st0, xx, mrm, x, END_LIST}, /* d0 = [0x10] */
{OP_fcom, 0xdcd110, "fcom", xx, xx, st0, st1, xx, mrm, x, END_LIST},
{OP_fcom, 0xdcd210, "fcom", xx, xx, st0, st2, xx, mrm, x, END_LIST},
{OP_fcom, 0xdcd310, "fcom", xx, xx, st0, st3, xx, mrm, x, END_LIST},
{OP_fcom, 0xdcd410, "fcom", xx, xx, st0, st4, xx, mrm, x, END_LIST},
{OP_fcom, 0xdcd510, "fcom", xx, xx, st0, st5, xx, mrm, x, END_LIST},
{OP_fcom, 0xdcd610, "fcom", xx, xx, st0, st6, xx, mrm, x, END_LIST},
{OP_fcom, 0xdcd710, "fcom", xx, xx, st0, st7, xx, mrm, x, END_LIST},
/* Undocumented. On sandpile.org as "fcomp3". We assume an alias for fcomp
* and do not include in the encode chain.
*/
{OP_fcomp, 0xdcd810, "fcomp", xx, xx, st0, st0, xx, mrm, x, END_LIST}, /* d8 = [0x18] */
{OP_fcomp, 0xdcd910, "fcomp", xx, xx, st0, st1, xx, mrm, x, END_LIST},
{OP_fcomp, 0xdcda10, "fcomp", xx, xx, st0, st2, xx, mrm, x, END_LIST},
{OP_fcomp, 0xdcdb10, "fcomp", xx, xx, st0, st3, xx, mrm, x, END_LIST},
{OP_fcomp, 0xdcdc10, "fcomp", xx, xx, st0, st4, xx, mrm, x, END_LIST},
{OP_fcomp, 0xdcdd10, "fcomp", xx, xx, st0, st5, xx, mrm, x, END_LIST},
{OP_fcomp, 0xdcde10, "fcomp", xx, xx, st0, st6, xx, mrm, x, END_LIST},
{OP_fcomp, 0xdcdf10, "fcomp", xx, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fsubr, 0xdce010, "fsubr", st0, xx, st0, st0, xx, mrm, x, tfh[4][0x21]}, /* e0 = [0x20] */
{OP_fsubr, 0xdce110, "fsubr", st1, xx, st0, st1, xx, mrm, x, tfh[4][0x22]},
{OP_fsubr, 0xdce210, "fsubr", st2, xx, st0, st2, xx, mrm, x, tfh[4][0x23]},
{OP_fsubr, 0xdce310, "fsubr", st3, xx, st0, st3, xx, mrm, x, tfh[4][0x24]},
{OP_fsubr, 0xdce410, "fsubr", st4, xx, st0, st4, xx, mrm, x, tfh[4][0x25]},
{OP_fsubr, 0xdce510, "fsubr", st5, xx, st0, st5, xx, mrm, x, tfh[4][0x26]},
{OP_fsubr, 0xdce610, "fsubr", st6, xx, st0, st6, xx, mrm, x, tfh[4][0x27]},
{OP_fsubr, 0xdce710, "fsubr", st7, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fsub, 0xdce810, "fsub", st0, xx, st0, st0, xx, mrm, x, tfh[4][0x29]}, /* e8 = [0x28] */
{OP_fsub, 0xdce910, "fsub", st1, xx, st0, st1, xx, mrm, x, tfh[4][0x2a]},
{OP_fsub, 0xdcea10, "fsub", st2, xx, st0, st2, xx, mrm, x, tfh[4][0x2b]},
{OP_fsub, 0xdceb10, "fsub", st3, xx, st0, st3, xx, mrm, x, tfh[4][0x2c]},
{OP_fsub, 0xdcec10, "fsub", st4, xx, st0, st4, xx, mrm, x, tfh[4][0x2d]},
{OP_fsub, 0xdced10, "fsub", st5, xx, st0, st5, xx, mrm, x, tfh[4][0x2e]},
{OP_fsub, 0xdcee10, "fsub", st6, xx, st0, st6, xx, mrm, x, tfh[4][0x2f]},
{OP_fsub, 0xdcef10, "fsub", st7, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fdivr, 0xdcf010, "fdivr", st0, xx, st0, st0, xx, mrm, x, tfh[4][0x31]}, /* f0 = [0x30] */
{OP_fdivr, 0xdcf110, "fdivr", st1, xx, st0, st1, xx, mrm, x, tfh[4][0x32]},
{OP_fdivr, 0xdcf210, "fdivr", st2, xx, st0, st2, xx, mrm, x, tfh[4][0x33]},
{OP_fdivr, 0xdcf310, "fdivr", st3, xx, st0, st3, xx, mrm, x, tfh[4][0x34]},
{OP_fdivr, 0xdcf410, "fdivr", st4, xx, st0, st4, xx, mrm, x, tfh[4][0x35]},
{OP_fdivr, 0xdcf510, "fdivr", st5, xx, st0, st5, xx, mrm, x, tfh[4][0x36]},
{OP_fdivr, 0xdcf610, "fdivr", st6, xx, st0, st6, xx, mrm, x, tfh[4][0x37]},
{OP_fdivr, 0xdcf710, "fdivr", st7, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fdiv, 0xdcf810, "fdiv", st0, xx, st0, st0, xx, mrm, x, tfh[4][0x39]}, /* f8 = [0x38] */
{OP_fdiv, 0xdcf910, "fdiv", st1, xx, st0, st1, xx, mrm, x, tfh[4][0x3a]},
{OP_fdiv, 0xdcfa10, "fdiv", st2, xx, st0, st2, xx, mrm, x, tfh[4][0x3b]},
{OP_fdiv, 0xdcfb10, "fdiv", st3, xx, st0, st3, xx, mrm, x, tfh[4][0x3c]},
{OP_fdiv, 0xdcfc10, "fdiv", st4, xx, st0, st4, xx, mrm, x, tfh[4][0x3d]},
{OP_fdiv, 0xdcfd10, "fdiv", st5, xx, st0, st5, xx, mrm, x, tfh[4][0x3e]},
{OP_fdiv, 0xdcfe10, "fdiv", st6, xx, st0, st6, xx, mrm, x, tfh[4][0x3f]},
{OP_fdiv, 0xdcff10, "fdiv", st7, xx, st0, st7, xx, mrm, x, END_LIST},
},
{ /* dd = [5] */
{OP_ffree, 0xddc010, "ffree", st0, xx, xx, xx, xx, mrm, x, tfh[5][0x01]}, /* c0 = [0x00] */
{OP_ffree, 0xddc110, "ffree", st1, xx, xx, xx, xx, mrm, x, tfh[5][0x02]},
{OP_ffree, 0xddc210, "ffree", st2, xx, xx, xx, xx, mrm, x, tfh[5][0x03]},
{OP_ffree, 0xddc310, "ffree", st3, xx, xx, xx, xx, mrm, x, tfh[5][0x04]},
{OP_ffree, 0xddc410, "ffree", st4, xx, xx, xx, xx, mrm, x, tfh[5][0x05]},
{OP_ffree, 0xddc510, "ffree", st5, xx, xx, xx, xx, mrm, x, tfh[5][0x06]},
{OP_ffree, 0xddc610, "ffree", st6, xx, xx, xx, xx, mrm, x, tfh[5][0x07]},
{OP_ffree, 0xddc710, "ffree", st7, xx, xx, xx, xx, mrm, x, END_LIST},
/* Undocumented. On sandpile.org as "fxch4". We assume an alias for fxch
* and do not include in the encode chain.
*/
{OP_fxch, 0xddc810, "fxch", st0, st0, st0, st0, xx, mrm, x, END_LIST}, /* c8 = [0x08] */
{OP_fxch, 0xddc910, "fxch", st0, st1, st0, st1, xx, mrm, x, END_LIST},
{OP_fxch, 0xddca10, "fxch", st0, st2, st0, st2, xx, mrm, x, END_LIST},
{OP_fxch, 0xddcb10, "fxch", st0, st3, st0, st3, xx, mrm, x, END_LIST},
{OP_fxch, 0xddcc10, "fxch", st0, st4, st0, st4, xx, mrm, x, END_LIST},
{OP_fxch, 0xddcd10, "fxch", st0, st5, st0, st5, xx, mrm, x, END_LIST},
{OP_fxch, 0xddce10, "fxch", st0, st6, st0, st6, xx, mrm, x, END_LIST},
{OP_fxch, 0xddcf10, "fxch", st0, st7, st0, st7, xx, mrm, x, END_LIST},
{OP_fst, 0xddd010, "fst", st0, xx, st0, xx, xx, mrm, x, tfh[5][0x11]}, /* d0 = [0x10] */
{OP_fst, 0xddd110, "fst", st1, xx, st0, xx, xx, mrm, x, tfh[5][0x12]},
{OP_fst, 0xddd210, "fst", st2, xx, st0, xx, xx, mrm, x, tfh[5][0x13]},
{OP_fst, 0xddd310, "fst", st3, xx, st0, xx, xx, mrm, x, tfh[5][0x14]},
{OP_fst, 0xddd410, "fst", st4, xx, st0, xx, xx, mrm, x, tfh[5][0x15]},
{OP_fst, 0xddd510, "fst", st5, xx, st0, xx, xx, mrm, x, tfh[5][0x16]},
{OP_fst, 0xddd610, "fst", st6, xx, st0, xx, xx, mrm, x, tfh[5][0x17]},
{OP_fst, 0xddd710, "fst", st7, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xddd810, "fstp", st0, xx, st0, xx, xx, mrm, x, tfh[5][0x19]}, /* d8 = [0x18] */
{OP_fstp, 0xddd910, "fstp", st1, xx, st0, xx, xx, mrm, x, tfh[5][0x1a]},
{OP_fstp, 0xddda10, "fstp", st2, xx, st0, xx, xx, mrm, x, tfh[5][0x1b]},
{OP_fstp, 0xdddb10, "fstp", st3, xx, st0, xx, xx, mrm, x, tfh[5][0x1c]},
{OP_fstp, 0xdddc10, "fstp", st4, xx, st0, xx, xx, mrm, x, tfh[5][0x1d]},
{OP_fstp, 0xdddd10, "fstp", st5, xx, st0, xx, xx, mrm, x, tfh[5][0x1e]},
{OP_fstp, 0xddde10, "fstp", st6, xx, st0, xx, xx, mrm, x, tfh[5][0x1f]},
{OP_fstp, 0xdddf10, "fstp", st7, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fucom, 0xdde010, "fucom", xx, xx, st0, st0, xx, mrm, x, tfh[5][0x21]}, /* e0 = [0x20] */
{OP_fucom, 0xdde110, "fucom", xx, xx, st1, st0, xx, mrm, x, tfh[5][0x22]},
{OP_fucom, 0xdde210, "fucom", xx, xx, st2, st0, xx, mrm, x, tfh[5][0x23]},
{OP_fucom, 0xdde310, "fucom", xx, xx, st3, st0, xx, mrm, x, tfh[5][0x24]},
{OP_fucom, 0xdde410, "fucom", xx, xx, st4, st0, xx, mrm, x, tfh[5][0x25]},
{OP_fucom, 0xdde510, "fucom", xx, xx, st5, st0, xx, mrm, x, tfh[5][0x26]},
{OP_fucom, 0xdde610, "fucom", xx, xx, st6, st0, xx, mrm, x, tfh[5][0x27]},
{OP_fucom, 0xdde710, "fucom", xx, xx, st7, st0, xx, mrm, x, END_LIST},
{OP_fucomp, 0xdde810, "fucomp", xx, xx, st0, st0, xx, mrm, x, tfh[5][0x29]}, /* e8 = [0x28] */
{OP_fucomp, 0xdde910, "fucomp", xx, xx, st1, st0, xx, mrm, x, tfh[5][0x2a]},
{OP_fucomp, 0xddea10, "fucomp", xx, xx, st2, st0, xx, mrm, x, tfh[5][0x2b]},
{OP_fucomp, 0xddeb10, "fucomp", xx, xx, st3, st0, xx, mrm, x, tfh[5][0x2c]},
{OP_fucomp, 0xddec10, "fucomp", xx, xx, st4, st0, xx, mrm, x, tfh[5][0x2d]},
{OP_fucomp, 0xdded10, "fucomp", xx, xx, st5, st0, xx, mrm, x, tfh[5][0x2e]},
{OP_fucomp, 0xddee10, "fucomp", xx, xx, st6, st0, xx, mrm, x, tfh[5][0x2f]},
{OP_fucomp, 0xddef10, "fucomp", xx, xx, st7, st0, xx, mrm, x, END_LIST},
{INVALID, 0xddf010, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* f0 = [0x30] */
{INVALID, 0xddf110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddf210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddf310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddf410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddf510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddf610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddf710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddf810, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* f8 = [0x38] */
{INVALID, 0xddf910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddfa10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddfb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddfc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddfd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddfe10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddff10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* de = [6]*/
{OP_faddp, 0xdec010, "faddp", st0, xx, st0, st0, xx, mrm, x, tfh[6][0x01]}, /* c0 = [0x00] */
{OP_faddp, 0xdec110, "faddp", st1, xx, st0, st1, xx, mrm, x, tfh[6][0x02]},
{OP_faddp, 0xdec210, "faddp", st2, xx, st0, st2, xx, mrm, x, tfh[6][0x03]},
{OP_faddp, 0xdec310, "faddp", st3, xx, st0, st3, xx, mrm, x, tfh[6][0x04]},
{OP_faddp, 0xdec410, "faddp", st4, xx, st0, st4, xx, mrm, x, tfh[6][0x05]},
{OP_faddp, 0xdec510, "faddp", st5, xx, st0, st5, xx, mrm, x, tfh[6][0x06]},
{OP_faddp, 0xdec610, "faddp", st6, xx, st0, st6, xx, mrm, x, tfh[6][0x07]},
{OP_faddp, 0xdec710, "faddp", st7, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fmulp, 0xdec810, "fmulp", st0, xx, st0, st0, xx, mrm, x, tfh[6][0x09]}, /* c8 = [0x08] */
{OP_fmulp, 0xdec910, "fmulp", st1, xx, st0, st1, xx, mrm, x, tfh[6][0x0a]},
{OP_fmulp, 0xdeca10, "fmulp", st2, xx, st0, st2, xx, mrm, x, tfh[6][0x0b]},
{OP_fmulp, 0xdecb10, "fmulp", st3, xx, st0, st3, xx, mrm, x, tfh[6][0x0c]},
{OP_fmulp, 0xdecc10, "fmulp", st4, xx, st0, st4, xx, mrm, x, tfh[6][0x0d]},
{OP_fmulp, 0xdecd10, "fmulp", st5, xx, st0, st5, xx, mrm, x, tfh[6][0x0e]},
{OP_fmulp, 0xdece10, "fmulp", st6, xx, st0, st6, xx, mrm, x, tfh[6][0x0f]},
{OP_fmulp, 0xdecf10, "fmulp", st7, xx, st0, st7, xx, mrm, x, END_LIST},
/* Undocumented. On sandpile.org as "fcomp5". We assume an alias for fcomp
* and do not include in the encode chain.
*/
{OP_fcomp, 0xded010, "fcomp", xx, xx, st0, st0, xx, mrm, x, END_LIST}, /* d0 = [0x10] */
{OP_fcomp, 0xded110, "fcomp", xx, xx, st0, st1, xx, mrm, x, END_LIST},
{OP_fcomp, 0xded210, "fcomp", xx, xx, st0, st2, xx, mrm, x, END_LIST},
{OP_fcomp, 0xded310, "fcomp", xx, xx, st0, st3, xx, mrm, x, END_LIST},
{OP_fcomp, 0xded410, "fcomp", xx, xx, st0, st4, xx, mrm, x, END_LIST},
{OP_fcomp, 0xded510, "fcomp", xx, xx, st0, st5, xx, mrm, x, END_LIST},
{OP_fcomp, 0xded610, "fcomp", xx, xx, st0, st6, xx, mrm, x, END_LIST},
{OP_fcomp, 0xded710, "fcomp", xx, xx, st0, st7, xx, mrm, x, END_LIST},
{INVALID, 0xded810, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* d8 = [0x18] */
{OP_fcompp, 0xded910, "fcompp", xx, xx, st0, st1, xx, mrm, x, END_LIST},
{INVALID, 0xdeda10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdedb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdedc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdedd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdede10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdedf10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fsubrp, 0xdee010, "fsubrp", st0, xx, st0, st0, xx, mrm, x, tfh[6][0x21]}, /* e0 = [0x20] */
{OP_fsubrp, 0xdee110, "fsubrp", st1, xx, st0, st1, xx, mrm, x, tfh[6][0x22]},
{OP_fsubrp, 0xdee210, "fsubrp", st2, xx, st0, st2, xx, mrm, x, tfh[6][0x23]},
{OP_fsubrp, 0xdee310, "fsubrp", st3, xx, st0, st3, xx, mrm, x, tfh[6][0x24]},
{OP_fsubrp, 0xdee410, "fsubrp", st4, xx, st0, st4, xx, mrm, x, tfh[6][0x25]},
{OP_fsubrp, 0xdee510, "fsubrp", st5, xx, st0, st5, xx, mrm, x, tfh[6][0x26]},
{OP_fsubrp, 0xdee610, "fsubrp", st6, xx, st0, st6, xx, mrm, x, tfh[6][0x27]},
{OP_fsubrp, 0xdee710, "fsubrp", st7, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fsubp, 0xdee810, "fsubp", st0, xx, st0, st0, xx, mrm, x, tfh[6][0x29]}, /* e8 = [0x28] */
{OP_fsubp, 0xdee910, "fsubp", st1, xx, st0, st1, xx, mrm, x, tfh[6][0x2a]},
{OP_fsubp, 0xdeea10, "fsubp", st2, xx, st0, st2, xx, mrm, x, tfh[6][0x2b]},
{OP_fsubp, 0xdeeb10, "fsubp", st3, xx, st0, st3, xx, mrm, x, tfh[6][0x2c]},
{OP_fsubp, 0xdeec10, "fsubp", st4, xx, st0, st4, xx, mrm, x, tfh[6][0x2d]},
{OP_fsubp, 0xdeed10, "fsubp", st5, xx, st0, st5, xx, mrm, x, tfh[6][0x2e]},
{OP_fsubp, 0xdeee10, "fsubp", st6, xx, st0, st6, xx, mrm, x, tfh[6][0x2f]},
{OP_fsubp, 0xdeef10, "fsubp", st7, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fdivrp, 0xdef010, "fdivrp", st0, xx, st0, st0, xx, mrm, x, tfh[6][0x31]}, /* f0 = [0x30] */
{OP_fdivrp, 0xdef110, "fdivrp", st1, xx, st0, st1, xx, mrm, x, tfh[6][0x32]},
{OP_fdivrp, 0xdef210, "fdivrp", st2, xx, st0, st2, xx, mrm, x, tfh[6][0x33]},
{OP_fdivrp, 0xdef310, "fdivrp", st3, xx, st0, st3, xx, mrm, x, tfh[6][0x34]},
{OP_fdivrp, 0xdef410, "fdivrp", st4, xx, st0, st4, xx, mrm, x, tfh[6][0x35]},
{OP_fdivrp, 0xdef510, "fdivrp", st5, xx, st0, st5, xx, mrm, x, tfh[6][0x36]},
{OP_fdivrp, 0xdef610, "fdivrp", st6, xx, st0, st6, xx, mrm, x, tfh[6][0x37]},
{OP_fdivrp, 0xdef710, "fdivrp", st7, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fdivp, 0xdef810, "fdivp", st0, xx, st0, st0, xx, mrm, x, tfh[6][0x39]}, /* f8 = [0x38] */
{OP_fdivp, 0xdef910, "fdivp", st1, xx, st0, st1, xx, mrm, x, tfh[6][0x3a]},
{OP_fdivp, 0xdefa10, "fdivp", st2, xx, st0, st2, xx, mrm, x, tfh[6][0x3b]},
{OP_fdivp, 0xdefb10, "fdivp", st3, xx, st0, st3, xx, mrm, x, tfh[6][0x3c]},
{OP_fdivp, 0xdefc10, "fdivp", st4, xx, st0, st4, xx, mrm, x, tfh[6][0x3d]},
{OP_fdivp, 0xdefd10, "fdivp", st5, xx, st0, st5, xx, mrm, x, tfh[6][0x3e]},
{OP_fdivp, 0xdefe10, "fdivp", st6, xx, st0, st6, xx, mrm, x, tfh[6][0x3f]},
{OP_fdivp, 0xdeff10, "fdivp", st7, xx, st0, st7, xx, mrm, x, END_LIST},
},
{ /* df = [7] */
/* Undocumented by Intel, but is on p152 of "AMD Athlon
* Processor x86 Code Optimization Guide."
*/
{OP_ffreep, 0xdfc010, "ffreep", st0, xx, xx, xx, xx, mrm, x, tfh[7][0x01]}, /* c0 = [0x00] */
{OP_ffreep, 0xdfc110, "ffreep", st1, xx, xx, xx, xx, mrm, x, tfh[7][0x02]},
{OP_ffreep, 0xdfc210, "ffreep", st2, xx, xx, xx, xx, mrm, x, tfh[7][0x03]},
{OP_ffreep, 0xdfc310, "ffreep", st3, xx, xx, xx, xx, mrm, x, tfh[7][0x04]},
{OP_ffreep, 0xdfc410, "ffreep", st4, xx, xx, xx, xx, mrm, x, tfh[7][0x05]},
{OP_ffreep, 0xdfc510, "ffreep", st5, xx, xx, xx, xx, mrm, x, tfh[7][0x06]},
{OP_ffreep, 0xdfc610, "ffreep", st6, xx, xx, xx, xx, mrm, x, tfh[7][0x07]},
{OP_ffreep, 0xdfc710, "ffreep", st7, xx, xx, xx, xx, mrm, x, END_LIST},
/* Undocumented. On sandpile.org as "fxch7". We assume an alias for fxch
* and do not include in the encode chain.
*/
{OP_fxch, 0xdfc810, "fxch", st0, st0, st0, st0, xx, mrm, x, END_LIST}, /* c8 = [0x08] */
{OP_fxch, 0xdfc910, "fxch", st0, st1, st0, st1, xx, mrm, x, END_LIST},
{OP_fxch, 0xdfca10, "fxch", st0, st2, st0, st2, xx, mrm, x, END_LIST},
{OP_fxch, 0xdfcb10, "fxch", st0, st3, st0, st3, xx, mrm, x, END_LIST},
{OP_fxch, 0xdfcc10, "fxch", st0, st4, st0, st4, xx, mrm, x, END_LIST},
{OP_fxch, 0xdfcd10, "fxch", st0, st5, st0, st5, xx, mrm, x, END_LIST},
{OP_fxch, 0xdfce10, "fxch", st0, st6, st0, st6, xx, mrm, x, END_LIST},
{OP_fxch, 0xdfcf10, "fxch", st0, st7, st0, st7, xx, mrm, x, END_LIST},
/* Undocumented. On sandpile.org as "fstp8". We assume an alias for fstp
* and do not include in the encode chain.
*/
{OP_fstp, 0xdfd010, "fstp", st0, xx, st0, xx, xx, mrm, x, END_LIST}, /* d0 = [0x10] */
{OP_fstp, 0xdfd110, "fstp", st1, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfd210, "fstp", st2, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfd310, "fstp", st3, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfd410, "fstp", st4, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfd510, "fstp", st5, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfd610, "fstp", st6, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfd710, "fstp", st7, xx, st0, xx, xx, mrm, x, END_LIST},
/* Undocumented. On sandpile.org as "fstp9". We assume an alias for fstp
* and do not include in the encode chain.
*/
{OP_fstp, 0xdfd810, "fstp", st0, xx, st0, xx, xx, mrm, x, END_LIST}, /* d8 = [0x18] */
{OP_fstp, 0xdfd910, "fstp", st1, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfda10, "fstp", st2, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfdb10, "fstp", st3, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfdc10, "fstp", st4, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfdd10, "fstp", st5, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfde10, "fstp", st6, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfdf10, "fstp", st7, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fnstsw, 0xdfe010, "fnstsw", ax, xx, xx, xx, xx, mrm, x, END_LIST}, /* e0 = [0x20] */ /*FIXME:w/ preceding fwait instr, this is "fstsw"*/
{INVALID, 0xdfe110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdfe210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdfe310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdfe410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdfe510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdfe610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdfe710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fucomip, 0xdfe810, "fucomip", xx, xx, st0, st0, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x29]}, /* e8 = [0x28] */
{OP_fucomip, 0xdfe910, "fucomip", xx, xx, st0, st1, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x2a]},
{OP_fucomip, 0xdfea10, "fucomip", xx, xx, st0, st2, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x2b]},
{OP_fucomip, 0xdfeb10, "fucomip", xx, xx, st0, st3, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x2c]},
{OP_fucomip, 0xdfec10, "fucomip", xx, xx, st0, st4, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x2d]},
{OP_fucomip, 0xdfed10, "fucomip", xx, xx, st0, st5, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x2e]},
{OP_fucomip, 0xdfee10, "fucomip", xx, xx, st0, st6, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x2f]},
{OP_fucomip, 0xdfef10, "fucomip", xx, xx, st0, st7, xx, mrm, (fWC|fWP|fWZ), END_LIST},
{OP_fcomip, 0xdff010, "fcomip", xx, xx, st0, st0, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x31]}, /* f0 = [0x30] */
{OP_fcomip, 0xdff110, "fcomip", xx, xx, st0, st1, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x32]},
{OP_fcomip, 0xdff210, "fcomip", xx, xx, st0, st2, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x33]},
{OP_fcomip, 0xdff310, "fcomip", xx, xx, st0, st3, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x34]},
{OP_fcomip, 0xdff410, "fcomip", xx, xx, st0, st4, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x35]},
{OP_fcomip, 0xdff510, "fcomip", xx, xx, st0, st5, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x36]},
{OP_fcomip, 0xdff610, "fcomip", xx, xx, st0, st6, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x37]},
{OP_fcomip, 0xdff710, "fcomip", xx, xx, st0, st7, xx, mrm, (fWC|fWP|fWZ), END_LIST},
{INVALID, 0xdff810, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* f8 = [0x38] */
{INVALID, 0xdff910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdffa10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdffc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdffd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdffe10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdfff10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
};
/****************************************************************************
* Suffix extensions: 3DNow! and 3DNow!+
* Since there are only 24 of them, we save space by having a
* table of 256 indices instead of 256 instr_info_t structs.
*/
const byte suffix_index[256] = {
/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20,18, 0, 0, /* 0 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 21,19, 0, 0, /* 1 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,22, 0, 0, 0,23, 0, /* 8 */
4, 0, 0, 0, 7, 0,10,13, 0, 0,16, 0, 0, 0, 2, 0, /* 9 */
5, 0, 0, 0, 8, 0,11,14, 0, 0,17, 0, 0, 0, 3, 0, /* A */
6, 0, 0, 0, 9, 0,12,15, 0, 0, 0,24, 0, 0, 0, 1, /* B */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* E */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
const instr_info_t suffix_extensions[] = {
/* Rather than forging an exception let's anticipate future additions: we know
* (pretty sure anyway) that they'll have the same length and operand structure.
* Won't encode properly from Level 4 but that's ok.
*/
{OP_unknown_3dnow, 0x000f0f90, "unknown 3DNow",
Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 0*/
{OP_pavgusb , 0xbf0f0f90, "pavgusb", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 1*/
{OP_pfadd , 0x9e0f0f90, "pfadd", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 2*/
{OP_pfacc , 0xae0f0f90, "pfacc", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 3*/
{OP_pfcmpge , 0x900f0f90, "pfcmpge", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 4*/
{OP_pfcmpgt , 0xa00f0f90, "pfcmpgt", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 5*/
{OP_pfcmpeq , 0xb00f0f90, "pfcmpeq", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 6*/
{OP_pfmin , 0x940f0f90, "pfmin" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 7*/
{OP_pfmax , 0xa40f0f90, "pfmax" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 8*/
{OP_pfmul , 0xb40f0f90, "pfmul" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 9*/
{OP_pfrcp , 0x960f0f90, "pfrcp" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*10*/
{OP_pfrcpit1, 0xa60f0f90, "pfrcpit1", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*11*/
{OP_pfrcpit2, 0xb60f0f90, "pfrcpit2", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*12*/
{OP_pfrsqrt , 0x970f0f90, "pfrsqrt", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*13*/
{OP_pfrsqit1, 0xa70f0f90, "pfrsqit1", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*14*/
{OP_pmulhrw , 0xb70f0f90, "pmulhrw", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*15*/
{OP_pfsub , 0x9a0f0f90, "pfsub" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*16*/
{OP_pfsubr , 0xaa0f0f90, "pfsubr" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*17*/
{OP_pi2fd , 0x0d0f0f90, "pi2fd" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*18*/
{OP_pf2id , 0x1d0f0f90, "pf2id", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*19*/
{OP_pi2fw , 0x0c0f0f90, "pi2fw" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*20*/
{OP_pf2iw , 0x1c0f0f90, "pf2iw", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*21*/
{OP_pfnacc , 0x8a0f0f90, "pfnacc" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*22*/
{OP_pfpnacc , 0x8e0f0f90, "pfpnacc", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*23*/
{OP_pswapd , 0xbb0f0f90, "pswapd" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*24*/
};
/****************************************************************************
* To handle more than 2 dests or 3 sources we chain on extra instructions.
* All cases where we have extra operands are single-encoding-only instructions,
* so we use the list field to point to here.
* N.B.: the size of this table is hardcoded in decode.c.
* Also, only implicit operands are in these instruction extensions!!!
*/
const instr_info_t extra_operands[] =
{
/* 0 */
{OP_CONTD, 0x000000, "<pusha cont'd>", xx, xx, eCX, eDX, eBP, xop, x, exop[0x01]},
{OP_CONTD, 0x000000, "<pusha cont'd>", xx, xx, eSI, eDI, xx, no, x, END_LIST},
/* 2 */
{OP_CONTD, 0x000000, "<popa cont'd>", eBX, eCX, xx, xx, xx, xop, x, exop[0x03]},
{OP_CONTD, 0x000000, "<popa cont'd>", eDX, eBP, xx, xx, xx, xop, x, exop[0x04]},
{OP_CONTD, 0x000000, "<popa cont'd>", eSI, eDI, xx, xx, xx, no, x, END_LIST},
/* 5 */
{OP_CONTD, 0x000000, "<enter cont'd>", xbp, xx, xbp, xx, xx, no, x, END_LIST},
/* 6 */
{OP_CONTD, 0x000000, "<cpuid cont'd>", ecx, edx, xx, xx, xx, no, x, END_LIST},
/* 7 */
{OP_CONTD, 0x000000, "<cmpxchg8b cont'd>", eDX, xx, eCX, eBX, xx, mrm, fWZ, END_LIST},
{OP_CONTD,0x663a6018, "<pcmpestrm cont'd", xx, xx, eax, edx, xx, mrm|reqp, fW6, END_LIST},
{OP_CONTD,0x663a6018, "<pcmpestri cont'd", xx, xx, eax, edx, xx, mrm|reqp, fW6, END_LIST},
/* 10 */
{OP_CONTD,0xf90f0177, "<rdtscp cont'd>", ecx, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_CONTD,0x663a6018, "<vpcmpestrm cont'd", xx, xx, eax, edx, xx, mrm|vex|reqp, fW6, END_LIST},
{OP_CONTD,0x663a6018, "<vpcmpestri cont'd", xx, xx, eax, edx, xx, mrm|vex|reqp, fW6, END_LIST},
{OP_CONTD,0x0f3710, "<getsec cont'd", ecx, xx, xx, xx, xx, predcx, x, END_LIST},
/* 14 */
{OP_CONTD,0x66389818, "<vfmadd132ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x66389858, "<vfmadd132pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 16 */
{OP_CONTD,0x6638a818, "<vfmadd213ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638a858, "<vfmadd213pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 18 */
{OP_CONTD,0x6638b818, "<vfmadd231ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638b858, "<vfmadd231pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 20 */
{OP_CONTD,0x66389918, "<vfmadd132ss cont'd", xx, xx, Vss, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x66389958, "<vfmadd132sd cont'd", xx, xx, Vsd, xx, xx, mrm|evex, x, END_LIST},
/* 22 */
{OP_CONTD,0x6638a918, "<vfmadd213ss cont'd", xx, xx, Vss, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638a958, "<vfmadd213sd cont'd", xx, xx, Vsd, xx, xx, mrm|evex, x, END_LIST},
/* 24 */
{OP_CONTD,0x6638b918, "<vfmadd231ss cont'd", xx, xx, Vss, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638b958, "<vfmadd231sd cont'd", xx, xx, Vsd, xx, xx, mrm|evex, x, END_LIST},
/* 26 */
{OP_CONTD,0x66389618, "<vfmaddsub132ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x66389658, "<vfmaddsub132pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 28 */
{OP_CONTD,0x6638a618, "<vfmaddsub213ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638a658, "<vfmaddsub213pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 30 */
{OP_CONTD,0x6638b618, "<vfmaddsub231ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638b658, "<vfmaddsub231pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 32 */
{OP_CONTD,0x66389718, "<vfmsubadd132ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x66389758, "<vfmsubadd132pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 34 */
{OP_CONTD,0x6638a718, "<vfmsubadd213ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638a758, "<vfmsubadd213pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 36 */
{OP_CONTD,0x6638b718, "<vfmsubadd231ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638b758, "<vfmsubadd231pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 38 */
{OP_CONTD,0x66389a18, "<vfmsub132ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x66389a58, "<vfmsub132pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 40 */
{OP_CONTD,0x6638aa18, "<vfmsub213ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638aa58, "<vfmsub213pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 42 */
{OP_CONTD,0x6638ba18, "<vfmsub231ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638ba58, "<vfmsub231pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 44 */
{OP_CONTD,0x66389b18, "<vfmsub132ss cont'd", xx, xx, Vss, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x66389b58, "<vfmsub132sd cont'd", xx, xx, Vsd, xx, xx, mrm|evex, x, END_LIST},
/* 46 */
{OP_CONTD,0x6638ab18, "<vfmsub213ss cont'd", xx, xx, Vss, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638ab58, "<vfmsub213sd cont'd", xx, xx, Vsd, xx, xx, mrm|evex, x, END_LIST},
/* 48 */
{OP_CONTD,0x6638bb18, "<vfmsub231ss cont'd", xx, xx, Vss, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638bb58, "<vfmsub231sd cont'd", xx, xx, Vsd, xx, xx, mrm|evex, x, END_LIST},
/* 50 */
{OP_CONTD,0x66389c18, "<vfnmadd132ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x66389c58, "<vfnmadd132pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 52 */
{OP_CONTD,0x6638ac18, "<vfnmadd213ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638ac58, "<vfnmadd213pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 54 */
{OP_CONTD,0x6638bc18, "<vfnmadd231ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638bc58, "<vfnmadd231pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 56 */
{OP_CONTD,0x66389d18, "<vfnmadd132ss cont'd", xx, xx, Vss, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x66389d58, "<vfnmadd132sd cont'd", xx, xx, Vsd, xx, xx, mrm|evex, x, END_LIST},
/* 58 */
{OP_CONTD,0x6638ad18, "<vfnmadd213ss cont'd", xx, xx, Vss, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638ad58, "<vfnmadd213sd cont'd", xx, xx, Vsd, xx, xx, mrm|evex, x, END_LIST},
/* 60 */
{OP_CONTD,0x6638bd18, "<vfnmadd231ss cont'd", xx, xx, Vss, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638bd58, "<vfnmadd231sd cont'd", xx, xx, Vsd, xx, xx, mrm|evex, x, END_LIST},
/* 62 */
{OP_CONTD,0x66389e18, "<vfnmsub132ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x66389e58, "<vfnmsub132pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 64 */
{OP_CONTD,0x6638ae18, "<vfnmsub213ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638ae58, "<vfnmsub213pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 66 */
{OP_CONTD,0x6638be18, "<vfnmsub231ps cont'd", xx, xx, Ves, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638be58, "<vfnmsub231pd cont'd", xx, xx, Ved, xx, xx, mrm|evex, x, END_LIST},
/* 68 */
{OP_CONTD,0x66389f18, "<vfnmsub132ss cont'd", xx, xx, Vss, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x66389f58, "<vfnmsub132sd cont'd", xx, xx, Vsd, xx, xx, mrm|evex, x, END_LIST},
/* 70 */
{OP_CONTD,0x6638af18, "<vfnmsub213ss cont'd", xx, xx, Vss, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638af58, "<vfnmsub213sd cont'd", xx, xx, Vsd, xx, xx, mrm|evex, x, END_LIST},
/* 72 */
{OP_CONTD,0x6638bf18, "<vfnmsub231ss cont'd", xx, xx, Vss, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD,0x6638bf58, "<vfnmsub231sd cont'd", xx, xx, Vsd, xx, xx, mrm|evex, x, END_LIST},
/* 74 */
{OP_CONTD, 0x663a1818, "vinsertf32x4 cont'd", xx, xx, Wdq, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD, 0x663a1858, "vinsertf64x2 cont'd", xx, xx, Wdq, xx, xx, mrm|evex, x, END_LIST},
/* 76 */
{OP_CONTD, 0x663a1a18, "vinsertf32x8 cont'd", xx, xx, Wdq, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD, 0x663a1a58, "vinsertf64x4 cont'd", xx, xx, Wdq, xx, xx, mrm|evex, x, END_LIST},
/* 78 */
{OP_CONTD, 0x663a3818, "vinserti32x4 cont'd", xx, xx, Wdq, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD, 0x663a3858, "vinserti64x2 cont'd", xx, xx, Wdq, xx, xx, mrm|evex, x, END_LIST},
/* 80 */
{OP_CONTD, 0x663a3a18, "vinserti32x8 cont'd", xx, xx, Wdq, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD, 0x663a3a58, "vinserti64x4 cont'd", xx, xx, Wdq, xx, xx, mrm|evex, x, END_LIST},
/* 82 */
{OP_CONTD, 0x663a3e18, "vpcmpub cont'd", xx, xx, We, xx, xx, evex|mrm, x, END_LIST},
{OP_CONTD, 0x663a3f18, "vpcmpb cont'd", xx, xx, We, xx, xx, evex|mrm, x, END_LIST},
/* 84 */
{OP_CONTD, 0x663a3e18, "vpcmpuw cont'd", xx, xx, We, xx, xx, evex|mrm, x, END_LIST},
{OP_CONTD, 0x663a3f18, "vpcmpw cont'd", xx, xx, We, xx, xx, evex|mrm, x, END_LIST},
/* 86 */
{OP_CONTD, 0x663a1e18, "vpcmpud cont'd", xx, xx, We, xx, xx, evex|mrm, x, END_LIST},
{OP_CONTD, 0x663a1f18, "vpcmpd cont'd", xx, xx, We, xx, xx, evex|mrm, x, END_LIST},
/* 88 */
{OP_CONTD, 0x663a1e18, "vpcmpuq cont'd", xx, xx, We, xx, xx, evex|mrm, x, END_LIST},
{OP_CONTD, 0x663a1f18, "vpcmpq cont'd", xx, xx, We, xx, xx, evex|mrm, x, END_LIST},
/* 90 */
{OP_CONTD, 0x0fc210, "vcmpps cont'd", xx, xx, Wes, xx, xx, evex|mrm, x, END_LIST},
{OP_CONTD, 0xf30fc210, "vcmpss cont'd", xx, xx, Wss, xx, xx, evex|mrm, x, END_LIST},
/* 92 */
{OP_CONTD, 0x660fc210, "vcmppd cont'd", xx, xx, Wed, xx, xx, evex|mrm, x, END_LIST},
{OP_CONTD, 0xf20fc210, "vcmpsd cont'd", xx, xx, Wsd, xx, xx, evex|mrm, x, END_LIST},
/* 94 */
{OP_CONTD, 0x0fc610, "vshufps cont'd", xx, xx, Wes, xx, xx, evex|mrm, x, END_LIST},
{OP_CONTD, 0x660fc650, "vshufpd cont'd", xx, xx, Wed, xx, xx, evex|mrm, x, END_LIST},
/* 96 */
{OP_CONTD, 0x663a2318, "vshuff32x4 cont'd", xx, xx, Wfs, xx, xx, evex|mrm, x, END_LIST},
{OP_CONTD, 0x663a2358, "vshuff64x2 cont'd", xx, xx, Wfd, xx, xx, evex|mrm, x, END_LIST},
/* 98 */
{OP_CONTD, 0x663a4318, "vshufi32x4 cont'd", xx, xx, Wfs, xx, xx, evex|mrm, x, END_LIST},
{OP_CONTD, 0x663a4358, "vshufi64x2 cont'd", xx, xx, Wfd, xx, xx, evex|mrm, x, END_LIST},
/* 100 */
{OP_CONTD, 0x663a0f18, "vpalignr cont'd", xx, xx, We, xx, xx, mrm|evex, x, END_LIST},
{OP_CONTD, 0x663a0318, "valignd cont'd", xx, xx, We, xx, xx, mrm|evex|reqp, x, END_LIST},
/* 102 */
{OP_CONTD, 0x663a0358, "valignq cont'd", xx, xx, We, xx, xx, mrm|evex|reqp, x, END_LIST},
{OP_CONTD, 0x663a5418, "vfixupimmps cont'd", xx, xx, We, xx, xx, mrm|evex|reqp, x, END_LIST},
/* 104 */
{OP_CONTD, 0x663a5458, "vfixupimmpd cont'd", xx, xx, We, xx, xx, mrm|evex|reqp, x, END_LIST},
{OP_CONTD, 0x663a5518, "vfixupimmss cont'd", xx, xx, Wd_dq, xx, xx, mrm|evex|reqp, x, END_LIST},
/* 106 */
{OP_CONTD, 0x663a5558, "vfixupimmsd cont'd", xx, xx, Wq_dq, xx, xx, mrm|evex|reqp, x, END_LIST},
{OP_CONTD, 0x663a2718, "vgetmantss cont'd", xx, xx, Wd_dq, xx, xx, mrm|evex|reqp, x, END_LIST},
/* 108 */
{OP_CONTD, 0x663a2758, "vgetmantsd cont'd", xx, xx, Wq_dq, xx, xx, mrm|evex|reqp, x, END_LIST},
{OP_CONTD, 0x663a5018, "vrangeps cont'd", xx, xx, We, xx, xx, mrm|evex|reqp, x, END_LIST},
/* 110 */
{OP_CONTD, 0x663a5058, "vrangepd cont'd", xx, xx, We, xx, xx, mrm|evex|reqp, x, END_LIST},
{OP_CONTD, 0x663a5118, "vrangess cont'd", xx, xx, Wd_dq, xx, xx, mrm|evex|reqp, x, END_LIST},
/* 112 */
{OP_CONTD, 0x663a5158, "vrangesd cont'd", xx, xx, Wq_dq, xx, xx, mrm|evex|reqp, x, END_LIST},
{OP_CONTD, 0x663a5718, "vreducess cont'd", xx, xx, Wd_dq, xx, xx, mrm|evex|reqp, x, END_LIST},
/* 114 */
{OP_CONTD, 0x663a5758, "vreducesd cont'd", xx, xx, Wq_dq, xx, xx, mrm|evex|reqp, x, END_LIST},
{OP_CONTD, 0x663a0a18, "vrndscaless cont'd", xx, xx, Wd_dq, xx, xx, mrm|evex|reqp, x, END_LIST},
/* 116 */
{OP_CONTD, 0x663a0b18, "vrndscalesd cont'd", xx, xx, Wq_dq, xx, xx, mrm|evex|reqp, x, END_LIST},
{OP_CONTD, 0x663a4218, "vdbpsadbw cont'd", xx, xx, We, xx, xx, mrm|evex|reqp, x, END_LIST},
/* 118 */
{OP_CONTD, 0x663a2518, "vpternlogd cont'd", xx, xx, We, xx, xx, mrm|evex|reqp, x, END_LIST},
{OP_CONTD, 0x663a2558, "vpternlogq cont'd", xx, xx, We, xx, xx, mrm|evex|reqp, x, END_LIST},
};
/* clang-format on */
| 1 | 17,308 | First, any changes here should be synchronized with instr_compute_VSIB_index(). Second, the original looks correct to me: the first letter of the opcode name suffix is the index size, while the second is the memory size. So "vpgatherdq" is a d-sized index and q-sized memory. The operand size we store for memory accesses is always the memory size, not the address size, and for VSIB we store a single memory element size. It looks like we have no inside-opnd_t storage of the index size: that's done by dispatch on opcode it seems. I have a bunch of notes of discussions on how to handle VSIB: we decided to bail on encoding too much inside the opnd_t I guess. | DynamoRIO-dynamorio | c |
@@ -118,6 +118,9 @@ class ExternalProgramTask(luigi.Task):
file_object.seek(0)
return ''.join(map(lambda s: s.decode('utf-8'), file_object.readlines()))
+ def build_tracking_url(self, logs_output):
+ return logs_output
+
def run(self):
args = list(map(str, self.program_args()))
| 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2016 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Template tasks for running external programs as luigi tasks.
This module is primarily intended for when you need to call a single external
program or shell script, and it's enough to specify program arguments and
environment variables.
If you need to run multiple commands, chain them together or pipe output
from one command to the next, you're probably better off using something like
`plumbum`_, and wrapping plumbum commands in normal luigi
:py:class:`~luigi.task.Task` s.
.. _plumbum: https://plumbum.readthedocs.io/
"""
import logging
import os
import re
import signal
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from multiprocessing import Process
from time import sleep
import luigi
from luigi.parameter import ParameterVisibility
logger = logging.getLogger('luigi-interface')
class ExternalProgramTask(luigi.Task):
"""
Template task for running an external program in a subprocess
The program is run using :py:class:`subprocess.Popen`, with ``args`` passed
as a list, generated by :py:meth:`program_args` (where the first element should
be the executable). See :py:class:`subprocess.Popen` for details.
Your must override :py:meth:`program_args` to specify the arguments you want,
and you can optionally override :py:meth:`program_environment` if you want to
control the environment variables (see :py:class:`ExternalPythonProgramTask`
for an example).
By default, the output (stdout and stderr) of the run external program
is being captured and displayed after the execution has ended. This
behaviour can be overridden by passing ``--capture-output False``
"""
capture_output = luigi.BoolParameter(default=True, significant=False, positional=False)
stream_for_searching_tracking_url = luigi.parameter.ChoiceParameter(
var_type=str, choices=['none', 'stdout', 'stderr'], default='none',
significant=False, positional=False, visibility=ParameterVisibility.HIDDEN,
description="Stream for searching tracking URL")
"""
Used for defining which stream should be tracked for URL, may be set to 'stdout', 'stderr' or 'none'.
Default value is 'none', so URL tracking is not performed.
"""
tracking_url_pattern = luigi.OptionalParameter(
default=None, significant=False, positional=False, visibility=ParameterVisibility.HIDDEN,
description="Regex pattern used for searching URL in the logs of the external program")
"""
Regex pattern used for searching URL in the logs of the external program.
If a log line matches the regex, the first group in the matching is set as the tracking URL
for the job in the web UI. Example: 'Job UI is here: (https?://.*)'.
Default value is None, so URL tracking is not performed.
"""
def program_args(self):
"""
Override this method to map your task parameters to the program arguments
:return: list to pass as ``args`` to :py:class:`subprocess.Popen`
"""
raise NotImplementedError
def program_environment(self):
"""
Override this method to control environment variables for the program
:return: dict mapping environment variable names to values
"""
env = os.environ.copy()
return env
@property
def always_log_stderr(self):
"""
When True, stderr will be logged even if program execution succeeded
Override to False to log stderr only when program execution fails.
"""
return True
def _clean_output_file(self, file_object):
file_object.seek(0)
return ''.join(map(lambda s: s.decode('utf-8'), file_object.readlines()))
def run(self):
args = list(map(str, self.program_args()))
logger.info('Running command: %s', ' '.join(args))
env = self.program_environment()
kwargs = {'env': env}
tmp_stdout, tmp_stderr = None, None
if self.capture_output:
tmp_stdout, tmp_stderr = tempfile.TemporaryFile(), tempfile.TemporaryFile()
kwargs.update({'stdout': tmp_stdout, 'stderr': tmp_stderr})
try:
if self.stream_for_searching_tracking_url != 'none' and self.tracking_url_pattern is not None:
with self._proc_with_tracking_url_context(proc_args=args, proc_kwargs=kwargs) as proc:
proc.wait()
else:
proc = subprocess.Popen(args, **kwargs)
with ExternalProgramRunContext(proc):
proc.wait()
success = proc.returncode == 0
if self.capture_output:
stdout = self._clean_output_file(tmp_stdout)
stderr = self._clean_output_file(tmp_stderr)
if stdout:
logger.info('Program stdout:\n{}'.format(stdout))
if stderr:
if self.always_log_stderr or not success:
logger.info('Program stderr:\n{}'.format(stderr))
else:
stdout, stderr = None, None
if not success:
raise ExternalProgramRunError(
'Program failed with return code={}:'.format(proc.returncode),
args, env=env, stdout=stdout, stderr=stderr)
finally:
if self.capture_output:
tmp_stderr.close()
tmp_stdout.close()
@contextmanager
def _proc_with_tracking_url_context(self, proc_args, proc_kwargs):
time_to_sleep = 0.5
file_to_write = proc_kwargs.get(self.stream_for_searching_tracking_url)
proc_kwargs.update({self.stream_for_searching_tracking_url: subprocess.PIPE})
main_proc = subprocess.Popen(proc_args, **proc_kwargs)
pipe_to_read = main_proc.stderr if self.stream_for_searching_tracking_url == 'stderr' else main_proc.stdout
def _track_url_by_pattern():
"""
Scans the pipe looking for a passed pattern, if the pattern is found, `set_tracking_url` callback is sent.
If tmp_stdout is passed, also appends lines to this file.
"""
pattern = re.compile(self.tracking_url_pattern)
for new_line in iter(pipe_to_read.readline, ''):
if new_line:
if file_to_write:
file_to_write.write(new_line)
match = re.search(pattern, new_line.decode('utf-8'))
if match:
self.set_tracking_url(match.group(1))
else:
sleep(time_to_sleep)
track_proc = Process(target=_track_url_by_pattern)
try:
track_proc.start()
with ExternalProgramRunContext(main_proc):
yield main_proc
finally:
# need to wait a bit to let the subprocess read the last lines
track_proc.join(time_to_sleep * 2)
if track_proc.is_alive():
track_proc.terminate()
pipe_to_read.close()
class ExternalProgramRunContext(object):
def __init__(self, proc):
self.proc = proc
def __enter__(self):
self.__old_signal = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self.kill_job)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is KeyboardInterrupt:
self.kill_job()
signal.signal(signal.SIGTERM, self.__old_signal)
def kill_job(self, captured_signal=None, stack_frame=None):
self.proc.kill()
if captured_signal is not None:
# adding 128 gives the exit code corresponding to a signal
sys.exit(128 + captured_signal)
class ExternalProgramRunError(RuntimeError):
def __init__(self, message, args, env=None, stdout=None, stderr=None):
super(ExternalProgramRunError, self).__init__(message, args, env, stdout, stderr)
self.message = message
self.args = args
self.env = env
self.out = stdout
self.err = stderr
def __str__(self):
info = self.message
info += '\nCOMMAND: {}'.format(' '.join(self.args))
info += '\nSTDOUT: {}'.format(self.out or '[empty]')
info += '\nSTDERR: {}'.format(self.err or '[empty]')
env_string = None
if self.env:
env_string = ' '.join(['='.join([k, '\'{}\''.format(v)]) for k, v in self.env.items()])
info += '\nENVIRONMENT: {}'.format(env_string or '[empty]')
# reset terminal color in case the ENVIRONMENT changes colors
info += '\033[m'
return info
class ExternalPythonProgramTask(ExternalProgramTask):
"""
Template task for running an external Python program in a subprocess
Simple extension of :py:class:`ExternalProgramTask`, adding two
:py:class:`luigi.parameter.Parameter` s for setting a virtualenv and for
extending the ``PYTHONPATH``.
"""
virtualenv = luigi.Parameter(
default=None,
positional=False,
description='path to the virtualenv directory to use. It should point to '
'the directory containing the ``bin/activate`` file used for '
'enabling the virtualenv.')
extra_pythonpath = luigi.Parameter(
default=None,
positional=False,
description='extend the search path for modules by prepending this '
'value to the ``PYTHONPATH`` environment variable.')
def program_environment(self):
env = super(ExternalPythonProgramTask, self).program_environment()
if self.extra_pythonpath:
pythonpath = ':'.join([self.extra_pythonpath, env.get('PYTHONPATH', '')])
env.update({'PYTHONPATH': pythonpath})
if self.virtualenv:
# Make the same changes to the env that a normal venv/bin/activate script would
path = ':'.join(['{}/bin'.format(self.virtualenv), env.get('PATH', '')])
env.update({
'PATH': path,
'VIRTUAL_ENV': self.virtualenv
})
# remove PYTHONHOME env variable, if it exists
env.pop('PYTHONHOME', None)
return env
| 1 | 19,579 | Sorry for going back and forth. Adding docstring here would be very helpful for others to understand the need of this method. | spotify-luigi | py |
@@ -121,10 +121,7 @@ bool Creature::canSee(const Position& pos) const
bool Creature::canSeeCreature(const Creature* creature) const
{
- if (!canSeeInvisibility() && creature->isInvisible()) {
- return false;
- }
- return true;
+ return canSeeInvisibility() && creature->isInvisible();
}
void Creature::setSkull(Skulls_t newSkull) | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2016 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "creature.h"
#include "game.h"
#include "monster.h"
#include "configmanager.h"
#include "scheduler.h"
double Creature::speedA = 857.36;
double Creature::speedB = 261.29;
double Creature::speedC = -4795.01;
extern Game g_game;
extern ConfigManager g_config;
extern CreatureEvents* g_creatureEvents;
Creature::Creature() :
localMapCache(), isInternalRemoved(false)
{
referenceCounter = 0;
id = 0;
tile = nullptr;
direction = DIRECTION_SOUTH;
master = nullptr;
lootDrop = true;
skillLoss = true;
health = 1000;
healthMax = 1000;
mana = 0;
lastStep = 0;
lastStepCost = 1;
baseSpeed = 220;
varSpeed = 0;
followCreature = nullptr;
hasFollowPath = false;
eventWalk = 0;
cancelNextWalk = false;
forceUpdateFollowPath = false;
isMapLoaded = false;
isUpdatingPath = false;
attackedCreature = nullptr;
lastHitCreatureId = 0;
blockCount = 0;
blockTicks = 0;
walkUpdateTicks = 0;
creatureCheck = false;
inCheckCreaturesVector = false;
scriptEventsBitField = 0;
hiddenHealth = false;
skull = SKULL_NONE;
onIdleStatus();
}
Creature::~Creature()
{
for (Creature* summon : summons) {
summon->setAttackedCreature(nullptr);
summon->setMaster(nullptr);
summon->decrementReferenceCounter();
}
for (Condition* condition : conditions) {
condition->endCondition(this);
delete condition;
}
}
bool Creature::canSee(const Position& myPos, const Position& pos, int32_t viewRangeX, int32_t viewRangeY)
{
if (myPos.z <= 7) {
//we are on ground level or above (7 -> 0)
//view is from 7 -> 0
if (pos.z > 7) {
return false;
}
} else if (myPos.z >= 8) {
//we are underground (8 -> 15)
//view is +/- 2 from the floor we stand on
if (Position::getDistanceZ(myPos, pos) > 2) {
return false;
}
}
const int_fast32_t offsetz = myPos.getZ() - pos.getZ();
return (pos.getX() >= myPos.getX() - viewRangeX + offsetz) && (pos.getX() <= myPos.getX() + viewRangeX + offsetz)
&& (pos.getY() >= myPos.getY() - viewRangeY + offsetz) && (pos.getY() <= myPos.getY() + viewRangeY + offsetz);
}
bool Creature::canSee(const Position& pos) const
{
return canSee(getPosition(), pos, Map::maxViewportX, Map::maxViewportY);
}
bool Creature::canSeeCreature(const Creature* creature) const
{
if (!canSeeInvisibility() && creature->isInvisible()) {
return false;
}
return true;
}
void Creature::setSkull(Skulls_t newSkull)
{
skull = newSkull;
g_game.updateCreatureSkull(this);
}
int64_t Creature::getTimeSinceLastMove() const
{
if (lastStep) {
return OTSYS_TIME() - lastStep;
}
return std::numeric_limits<int64_t>::max();
}
int32_t Creature::getWalkDelay(Direction dir) const
{
if (lastStep == 0) {
return 0;
}
int64_t ct = OTSYS_TIME();
int64_t stepDuration = getStepDuration(dir);
return stepDuration - (ct - lastStep);
}
int32_t Creature::getWalkDelay() const
{
//Used for auto-walking
if (lastStep == 0) {
return 0;
}
int64_t ct = OTSYS_TIME();
int64_t stepDuration = getStepDuration() * lastStepCost;
return stepDuration - (ct - lastStep);
}
void Creature::onThink(uint32_t interval)
{
if (!isMapLoaded && useCacheMap()) {
isMapLoaded = true;
updateMapCache();
}
if (followCreature && master != followCreature && !canSeeCreature(followCreature)) {
onCreatureDisappear(followCreature, false);
}
if (attackedCreature && master != attackedCreature && !canSeeCreature(attackedCreature)) {
onCreatureDisappear(attackedCreature, false);
}
blockTicks += interval;
if (blockTicks >= 1000) {
blockCount = std::min<uint32_t>(blockCount + 1, 2);
blockTicks = 0;
}
if (followCreature) {
walkUpdateTicks += interval;
if (forceUpdateFollowPath || walkUpdateTicks >= 2000) {
walkUpdateTicks = 0;
forceUpdateFollowPath = false;
isUpdatingPath = true;
}
}
if (isUpdatingPath) {
isUpdatingPath = false;
goToFollowCreature();
}
//scripting event - onThink
const CreatureEventList& thinkEvents = getCreatureEvents(CREATURE_EVENT_THINK);
for (CreatureEvent* thinkEvent : thinkEvents) {
thinkEvent->executeOnThink(this, interval);
}
}
void Creature::onAttacking(uint32_t interval)
{
if (!attackedCreature) {
return;
}
onAttacked();
attackedCreature->onAttacked();
if (g_game.isSightClear(getPosition(), attackedCreature->getPosition(), true)) {
doAttacking(interval);
}
}
void Creature::onIdleStatus()
{
if (getHealth() > 0) {
damageMap.clear();
lastHitCreatureId = 0;
}
}
void Creature::onWalk()
{
if (getWalkDelay() <= 0) {
Direction dir;
uint32_t flags = FLAG_IGNOREFIELDDAMAGE;
if (getNextStep(dir, flags)) {
ReturnValue ret = g_game.internalMoveCreature(this, dir, flags);
if (ret != RETURNVALUE_NOERROR) {
if (Player* player = getPlayer()) {
player->sendCancelMessage(ret);
player->sendCancelWalk();
}
forceUpdateFollowPath = true;
}
} else {
if (listWalkDir.empty()) {
onWalkComplete();
}
stopEventWalk();
}
}
if (cancelNextWalk) {
listWalkDir.clear();
onWalkAborted();
cancelNextWalk = false;
}
if (eventWalk != 0) {
eventWalk = 0;
addEventWalk();
}
}
void Creature::onWalk(Direction& dir)
{
if (hasCondition(CONDITION_DRUNK)) {
uint32_t r = uniform_random(0, 20);
if (r <= DIRECTION_DIAGONAL_MASK) {
if (r < DIRECTION_DIAGONAL_MASK) {
dir = static_cast<Direction>(r);
}
g_game.internalCreatureSay(this, TALKTYPE_MONSTER_SAY, "Hicks!", false);
}
}
}
bool Creature::getNextStep(Direction& dir, uint32_t&)
{
if (listWalkDir.empty()) {
return false;
}
dir = listWalkDir.front();
listWalkDir.pop_front();
onWalk(dir);
return true;
}
void Creature::startAutoWalk(const std::forward_list<Direction>& listDir)
{
listWalkDir = listDir;
size_t size = 0;
for (auto it = listDir.begin(); it != listDir.end() && size <= 1; ++it) {
size++;
}
addEventWalk(size == 1);
}
void Creature::addEventWalk(bool firstStep)
{
cancelNextWalk = false;
if (getStepSpeed() <= 0) {
return;
}
if (eventWalk != 0) {
return;
}
int64_t ticks = getEventStepTicks(firstStep);
if (ticks <= 0) {
return;
}
// Take first step right away, but still queue the next
if (ticks == 1) {
g_game.checkCreatureWalk(getID());
}
eventWalk = g_scheduler.addEvent(createSchedulerTask(ticks, std::bind(&Game::checkCreatureWalk, &g_game, getID())));
}
void Creature::stopEventWalk()
{
if (eventWalk != 0) {
g_scheduler.stopEvent(eventWalk);
eventWalk = 0;
}
}
void Creature::updateMapCache()
{
Tile* tile;
const Position& myPos = getPosition();
Position pos(0, 0, myPos.z);
for (int32_t y = -maxWalkCacheHeight; y <= maxWalkCacheHeight; ++y) {
for (int32_t x = -maxWalkCacheWidth; x <= maxWalkCacheWidth; ++x) {
pos.x = myPos.getX() + x;
pos.y = myPos.getY() + y;
tile = g_game.map.getTile(pos);
updateTileCache(tile, pos);
}
}
}
void Creature::updateTileCache(const Tile* tile, int32_t dx, int32_t dy)
{
if (std::abs(dx) <= maxWalkCacheWidth && std::abs(dy) <= maxWalkCacheHeight) {
localMapCache[maxWalkCacheHeight + dy][maxWalkCacheWidth + dx] = tile && tile->queryAdd(0, *this, 1, FLAG_PATHFINDING | FLAG_IGNOREFIELDDAMAGE) == RETURNVALUE_NOERROR;
}
}
void Creature::updateTileCache(const Tile* tile, const Position& pos)
{
const Position& myPos = getPosition();
if (pos.z == myPos.z) {
int32_t dx = Position::getOffsetX(pos, myPos);
int32_t dy = Position::getOffsetY(pos, myPos);
updateTileCache(tile, dx, dy);
}
}
int32_t Creature::getWalkCache(const Position& pos) const
{
if (!useCacheMap()) {
return 2;
}
const Position& myPos = getPosition();
if (myPos.z != pos.z) {
return 0;
}
if (pos == myPos) {
return 1;
}
int32_t dx = Position::getOffsetX(pos, myPos);
if (std::abs(dx) <= maxWalkCacheWidth) {
int32_t dy = Position::getOffsetY(pos, myPos);
if (std::abs(dy) <= maxWalkCacheHeight) {
if (localMapCache[maxWalkCacheHeight + dy][maxWalkCacheWidth + dx]) {
return 1;
} else {
return 0;
}
}
}
//out of range
return 2;
}
void Creature::onAddTileItem(const Tile* tile, const Position& pos)
{
if (isMapLoaded && pos.z == getPosition().z) {
updateTileCache(tile, pos);
}
}
void Creature::onUpdateTileItem(const Tile* tile, const Position& pos, const Item*,
const ItemType& oldType, const Item*, const ItemType& newType)
{
if (!isMapLoaded) {
return;
}
if (oldType.blockSolid || oldType.blockPathFind || newType.blockPathFind || newType.blockSolid) {
if (pos.z == getPosition().z) {
updateTileCache(tile, pos);
}
}
}
void Creature::onRemoveTileItem(const Tile* tile, const Position& pos, const ItemType& iType, const Item*)
{
if (!isMapLoaded) {
return;
}
if (iType.blockSolid || iType.blockPathFind || iType.isGroundTile()) {
if (pos.z == getPosition().z) {
updateTileCache(tile, pos);
}
}
}
void Creature::onCreatureAppear(Creature* creature, bool)
{
if (creature == this) {
if (useCacheMap()) {
isMapLoaded = true;
updateMapCache();
}
} else if (isMapLoaded) {
if (creature->getPosition().z == getPosition().z) {
updateTileCache(creature->getTile(), creature->getPosition());
}
}
}
void Creature::onRemoveCreature(Creature* creature, bool)
{
onCreatureDisappear(creature, true);
if (creature == this) {
if (master && !master->isRemoved()) {
master->removeSummon(this);
}
} else if (isMapLoaded) {
if (creature->getPosition().z == getPosition().z) {
updateTileCache(creature->getTile(), creature->getPosition());
}
}
}
void Creature::onCreatureDisappear(const Creature* creature, bool isLogout)
{
if (attackedCreature == creature) {
setAttackedCreature(nullptr);
onAttackedCreatureDisappear(isLogout);
}
if (followCreature == creature) {
setFollowCreature(nullptr);
onFollowCreatureDisappear(isLogout);
}
}
void Creature::onChangeZone(ZoneType_t zone)
{
if (attackedCreature && zone == ZONE_PROTECTION) {
onCreatureDisappear(attackedCreature, false);
}
}
void Creature::onAttackedCreatureChangeZone(ZoneType_t zone)
{
if (zone == ZONE_PROTECTION) {
onCreatureDisappear(attackedCreature, false);
}
}
void Creature::onCreatureMove(Creature* creature, const Tile* newTile, const Position& newPos,
const Tile* oldTile, const Position& oldPos, bool teleport)
{
if (creature == this) {
lastStep = OTSYS_TIME();
lastStepCost = 1;
if (!teleport) {
if (oldPos.z != newPos.z) {
//floor change extra cost
lastStepCost = 2;
} else if (Position::getDistanceX(newPos, oldPos) >= 1 && Position::getDistanceY(newPos, oldPos) >= 1) {
//diagonal extra cost
lastStepCost = 3;
}
} else {
stopEventWalk();
}
if (!summons.empty()) {
//check if any of our summons is out of range (+/- 2 floors or 30 tiles away)
std::forward_list<Creature*> despawnList;
for (Creature* summon : summons) {
const Position& pos = summon->getPosition();
if (Position::getDistanceZ(newPos, pos) > 2 || (std::max<int32_t>(Position::getDistanceX(newPos, pos), Position::getDistanceY(newPos, pos)) > 30)) {
despawnList.push_front(summon);
}
}
for (Creature* despawnCreature : despawnList) {
g_game.removeCreature(despawnCreature, true);
}
}
if (newTile->getZone() != oldTile->getZone()) {
onChangeZone(getZone());
}
//update map cache
if (isMapLoaded) {
if (teleport || oldPos.z != newPos.z) {
updateMapCache();
} else {
Tile* tile;
const Position& myPos = getPosition();
Position pos;
if (oldPos.y > newPos.y) { //north
//shift y south
for (int32_t y = mapWalkHeight - 1; --y >= 0;) {
memcpy(localMapCache[y + 1], localMapCache[y], sizeof(localMapCache[y]));
}
//update 0
for (int32_t x = -maxWalkCacheWidth; x <= maxWalkCacheWidth; ++x) {
tile = g_game.map.getTile(myPos.getX() + x, myPos.getY() - maxWalkCacheHeight, myPos.z);
updateTileCache(tile, x, -maxWalkCacheHeight);
}
} else if (oldPos.y < newPos.y) { // south
//shift y north
for (int32_t y = 0; y <= mapWalkHeight - 2; ++y) {
memcpy(localMapCache[y], localMapCache[y + 1], sizeof(localMapCache[y]));
}
//update mapWalkHeight - 1
for (int32_t x = -maxWalkCacheWidth; x <= maxWalkCacheWidth; ++x) {
tile = g_game.map.getTile(myPos.getX() + x, myPos.getY() + maxWalkCacheHeight, myPos.z);
updateTileCache(tile, x, maxWalkCacheHeight);
}
}
if (oldPos.x < newPos.x) { // east
//shift y west
int32_t starty = 0;
int32_t endy = mapWalkHeight - 1;
int32_t dy = Position::getDistanceY(oldPos, newPos);
if (dy < 0) {
endy += dy;
} else if (dy > 0) {
starty = dy;
}
for (int32_t y = starty; y <= endy; ++y) {
for (int32_t x = 0; x <= mapWalkWidth - 2; ++x) {
localMapCache[y][x] = localMapCache[y][x + 1];
}
}
//update mapWalkWidth - 1
for (int32_t y = -maxWalkCacheHeight; y <= maxWalkCacheHeight; ++y) {
tile = g_game.map.getTile(myPos.x + maxWalkCacheWidth, myPos.y + y, myPos.z);
updateTileCache(tile, maxWalkCacheWidth, y);
}
} else if (oldPos.x > newPos.x) { // west
//shift y east
int32_t starty = 0;
int32_t endy = mapWalkHeight - 1;
int32_t dy = Position::getDistanceY(oldPos, newPos);
if (dy < 0) {
endy += dy;
} else if (dy > 0) {
starty = dy;
}
for (int32_t y = starty; y <= endy; ++y) {
for (int32_t x = mapWalkWidth - 1; --x >= 0;) {
localMapCache[y][x + 1] = localMapCache[y][x];
}
}
//update 0
for (int32_t y = -maxWalkCacheHeight; y <= maxWalkCacheHeight; ++y) {
tile = g_game.map.getTile(myPos.x - maxWalkCacheWidth, myPos.y + y, myPos.z);
updateTileCache(tile, -maxWalkCacheWidth, y);
}
}
updateTileCache(oldTile, oldPos);
}
}
} else {
if (isMapLoaded) {
const Position& myPos = getPosition();
if (newPos.z == myPos.z) {
updateTileCache(newTile, newPos);
}
if (oldPos.z == myPos.z) {
updateTileCache(oldTile, oldPos);
}
}
}
if (creature == followCreature || (creature == this && followCreature)) {
if (hasFollowPath) {
isUpdatingPath = true;
}
if (newPos.z != oldPos.z || !canSee(followCreature->getPosition())) {
onCreatureDisappear(followCreature, false);
}
}
if (creature == attackedCreature || (creature == this && attackedCreature)) {
if (newPos.z != oldPos.z || !canSee(attackedCreature->getPosition())) {
onCreatureDisappear(attackedCreature, false);
} else {
if (hasExtraSwing()) {
//our target is moving lets see if we can get in hit
g_dispatcher.addTask(createTask(std::bind(&Game::checkCreatureAttack, &g_game, getID())));
}
if (newTile->getZone() != oldTile->getZone()) {
onAttackedCreatureChangeZone(attackedCreature->getZone());
}
}
}
}
void Creature::onDeath()
{
bool lastHitUnjustified = false;
bool mostDamageUnjustified = false;
Creature* lastHitCreature = g_game.getCreatureByID(lastHitCreatureId);
Creature* lastHitCreatureMaster;
if (lastHitCreature) {
lastHitUnjustified = lastHitCreature->onKilledCreature(this);
lastHitCreatureMaster = lastHitCreature->getMaster();
} else {
lastHitCreatureMaster = nullptr;
}
Creature* mostDamageCreature = nullptr;
const int64_t timeNow = OTSYS_TIME();
const uint32_t inFightTicks = g_config.getNumber(ConfigManager::PZ_LOCKED);
int32_t mostDamage = 0;
std::map<Creature*, uint64_t> experienceMap;
for (const auto& it : damageMap) {
if (Creature* attacker = g_game.getCreatureByID(it.first)) {
CountBlock_t cb = it.second;
if ((cb.total > mostDamage && (timeNow - cb.ticks <= inFightTicks))) {
mostDamage = cb.total;
mostDamageCreature = attacker;
}
if (attacker != this) {
uint64_t gainExp = getGainedExperience(attacker);
if (Player* player = attacker->getPlayer()) {
Party* party = player->getParty();
if (party && party->getLeader() && party->isSharedExperienceActive() && party->isSharedExperienceEnabled()) {
attacker = party->getLeader();
}
}
auto tmpIt = experienceMap.find(attacker);
if (tmpIt == experienceMap.end()) {
experienceMap[attacker] = gainExp;
} else {
tmpIt->second += gainExp;
}
}
}
}
for (const auto& it : experienceMap) {
it.first->onGainExperience(it.second, this);
}
if (mostDamageCreature) {
if (mostDamageCreature != lastHitCreature && mostDamageCreature != lastHitCreatureMaster) {
Creature* mostDamageCreatureMaster = mostDamageCreature->getMaster();
if (lastHitCreature != mostDamageCreatureMaster && (lastHitCreatureMaster == nullptr || mostDamageCreatureMaster != lastHitCreatureMaster)) {
mostDamageUnjustified = mostDamageCreature->onKilledCreature(this, false);
}
}
}
bool droppedCorpse = dropCorpse(lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified);
death(lastHitCreature);
if (master) {
master->removeSummon(this);
}
if (droppedCorpse) {
g_game.removeCreature(this, false);
}
}
bool Creature::dropCorpse(Creature* lastHitCreature, Creature* mostDamageCreature, bool lastHitUnjustified, bool mostDamageUnjustified)
{
if (!lootDrop && getMonster()) {
if (master) {
//scripting event - onDeath
const CreatureEventList& deathEvents = getCreatureEvents(CREATURE_EVENT_DEATH);
for (CreatureEvent* deathEvent : deathEvents) {
deathEvent->executeOnDeath(this, nullptr, lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified);
}
}
g_game.addMagicEffect(getPosition(), CONST_ME_POFF);
} else {
Item* splash;
switch (getRace()) {
case RACE_VENOM:
splash = Item::CreateItem(ITEM_FULLSPLASH, FLUID_GREEN);
break;
case RACE_BLOOD:
splash = Item::CreateItem(ITEM_FULLSPLASH, FLUID_BLOOD);
break;
default:
splash = nullptr;
break;
}
Tile* tile = getTile();
if (splash) {
g_game.internalAddItem(tile, splash, INDEX_WHEREEVER, FLAG_NOLIMIT);
g_game.startDecay(splash);
}
Item* corpse = getCorpse(lastHitCreature, mostDamageCreature);
if (corpse) {
g_game.internalAddItem(tile, corpse, INDEX_WHEREEVER, FLAG_NOLIMIT);
g_game.startDecay(corpse);
}
//scripting event - onDeath
for (CreatureEvent* deathEvent : getCreatureEvents(CREATURE_EVENT_DEATH)) {
deathEvent->executeOnDeath(this, corpse, lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified);
}
if (corpse) {
dropLoot(corpse->getContainer(), lastHitCreature);
}
}
return true;
}
bool Creature::hasBeenAttacked(uint32_t attackerId)
{
auto it = damageMap.find(attackerId);
if (it == damageMap.end()) {
return false;
}
return (OTSYS_TIME() - it->second.ticks) <= g_config.getNumber(ConfigManager::PZ_LOCKED);
}
Item* Creature::getCorpse(Creature*, Creature*)
{
return Item::CreateItem(getLookCorpse());
}
void Creature::changeHealth(int32_t healthChange, bool sendHealthChange/* = true*/)
{
int32_t oldHealth = health;
if (healthChange > 0) {
health += std::min<int32_t>(healthChange, getMaxHealth() - health);
} else {
health = std::max<int32_t>(0, health + healthChange);
}
if (sendHealthChange && oldHealth != health) {
g_game.addCreatureHealth(this);
}
}
void Creature::changeMana(int32_t manaChange)
{
if (manaChange > 0) {
mana += std::min<int32_t>(manaChange, getMaxMana() - mana);
} else {
mana = std::max<int32_t>(0, mana + manaChange);
}
}
void Creature::gainHealth(Creature* healer, int32_t healthGain)
{
changeHealth(healthGain);
if (healer) {
healer->onTargetCreatureGainHealth(this, healthGain);
}
}
void Creature::drainHealth(Creature* attacker, int32_t damage)
{
changeHealth(-damage, false);
if (attacker) {
attacker->onAttackedCreatureDrainHealth(this, damage);
}
}
void Creature::drainMana(Creature* attacker, int32_t manaLoss)
{
onAttacked();
changeMana(-manaLoss);
if (attacker) {
addDamagePoints(attacker, manaLoss);
}
}
BlockType_t Creature::blockHit(Creature* attacker, CombatType_t combatType, int32_t& damage,
bool checkDefense /* = false */, bool checkArmor /* = false */, bool /* field = false */)
{
BlockType_t blockType = BLOCK_NONE;
if (isImmune(combatType)) {
damage = 0;
blockType = BLOCK_IMMUNITY;
} else if (checkDefense || checkArmor) {
bool hasDefense = false;
if (blockCount > 0) {
--blockCount;
hasDefense = true;
}
if (checkDefense && hasDefense) {
int32_t defense = getDefense();
damage -= uniform_random(defense / 2, defense);
if (damage <= 0) {
damage = 0;
blockType = BLOCK_DEFENSE;
checkArmor = false;
}
}
if (checkArmor) {
int32_t armorValue = getArmor();
if (armorValue > 1) {
double armorFormula = armorValue * 0.475;
int32_t armorReduction = static_cast<int32_t>(std::ceil(armorFormula));
damage -= uniform_random(
armorReduction,
armorReduction + static_cast<int32_t>(std::floor(armorFormula))
);
} else if (armorValue == 1) {
--damage;
}
if (damage <= 0) {
damage = 0;
blockType = BLOCK_ARMOR;
}
}
if (hasDefense && blockType != BLOCK_NONE) {
onBlockHit();
}
}
if (attacker) {
attacker->onAttackedCreature(this);
attacker->onAttackedCreatureBlockHit(blockType);
}
onAttacked();
return blockType;
}
bool Creature::setAttackedCreature(Creature* creature)
{
if (creature) {
const Position& creaturePos = creature->getPosition();
if (creaturePos.z != getPosition().z || !canSee(creaturePos)) {
attackedCreature = nullptr;
return false;
}
attackedCreature = creature;
onAttackedCreature(attackedCreature);
attackedCreature->onAttacked();
} else {
attackedCreature = nullptr;
}
for (Creature* summon : summons) {
summon->setAttackedCreature(creature);
}
return true;
}
void Creature::getPathSearchParams(const Creature*, FindPathParams& fpp) const
{
fpp.fullPathSearch = !hasFollowPath;
fpp.clearSight = true;
fpp.maxSearchDist = 12;
fpp.minTargetDist = 1;
fpp.maxTargetDist = 1;
}
void Creature::goToFollowCreature()
{
if (followCreature) {
FindPathParams fpp;
getPathSearchParams(followCreature, fpp);
Monster* monster = getMonster();
if (monster && !monster->getMaster() && (monster->isFleeing() || fpp.maxTargetDist > 1)) {
Direction dir = DIRECTION_NONE;
if (monster->isFleeing()) {
monster->getDistanceStep(followCreature->getPosition(), dir, true);
} else { //maxTargetDist > 1
if (!monster->getDistanceStep(followCreature->getPosition(), dir)) {
// if we can't get anything then let the A* calculate
listWalkDir.clear();
if (getPathTo(followCreature->getPosition(), listWalkDir, fpp)) {
hasFollowPath = true;
startAutoWalk(listWalkDir);
} else {
hasFollowPath = false;
}
return;
}
}
if (dir != DIRECTION_NONE) {
listWalkDir.clear();
listWalkDir.push_front(dir);
hasFollowPath = true;
startAutoWalk(listWalkDir);
}
} else {
listWalkDir.clear();
if (getPathTo(followCreature->getPosition(), listWalkDir, fpp)) {
hasFollowPath = true;
startAutoWalk(listWalkDir);
} else {
hasFollowPath = false;
}
}
}
onFollowCreatureComplete(followCreature);
}
bool Creature::setFollowCreature(Creature* creature)
{
if (creature) {
if (followCreature == creature) {
return true;
}
const Position& creaturePos = creature->getPosition();
if (creaturePos.z != getPosition().z || !canSee(creaturePos)) {
followCreature = nullptr;
return false;
}
if (!listWalkDir.empty()) {
listWalkDir.clear();
onWalkAborted();
}
hasFollowPath = false;
forceUpdateFollowPath = false;
followCreature = creature;
isUpdatingPath = true;
} else {
isUpdatingPath = false;
followCreature = nullptr;
}
onFollowCreature(creature);
return true;
}
double Creature::getDamageRatio(Creature* attacker) const
{
uint32_t totalDamage = 0;
uint32_t attackerDamage = 0;
for (const auto& it : damageMap) {
const CountBlock_t& cb = it.second;
totalDamage += cb.total;
if (it.first == attacker->getID()) {
attackerDamage += cb.total;
}
}
if (totalDamage == 0) {
return 0;
}
return (static_cast<double>(attackerDamage) / totalDamage);
}
uint64_t Creature::getGainedExperience(Creature* attacker) const
{
return std::floor(getDamageRatio(attacker) * getLostExperience());
}
void Creature::addDamagePoints(Creature* attacker, int32_t damagePoints)
{
if (damagePoints <= 0) {
return;
}
uint32_t attackerId = attacker->id;
auto it = damageMap.find(attackerId);
if (it == damageMap.end()) {
CountBlock_t cb;
cb.ticks = OTSYS_TIME();
cb.total = damagePoints;
damageMap[attackerId] = cb;
} else {
it->second.total += damagePoints;
it->second.ticks = OTSYS_TIME();
}
lastHitCreatureId = attackerId;
}
void Creature::onAddCondition(ConditionType_t type)
{
if (type == CONDITION_PARALYZE && hasCondition(CONDITION_HASTE)) {
removeCondition(CONDITION_HASTE);
} else if (type == CONDITION_HASTE && hasCondition(CONDITION_PARALYZE)) {
removeCondition(CONDITION_PARALYZE);
}
}
void Creature::onAddCombatCondition(ConditionType_t)
{
//
}
void Creature::onEndCondition(ConditionType_t)
{
//
}
void Creature::onTickCondition(ConditionType_t type, bool& bRemove)
{
const MagicField* field = getTile()->getFieldItem();
if (!field) {
return;
}
switch (type) {
case CONDITION_FIRE:
bRemove = (field->getCombatType() != COMBAT_FIREDAMAGE);
break;
case CONDITION_ENERGY:
bRemove = (field->getCombatType() != COMBAT_ENERGYDAMAGE);
break;
case CONDITION_POISON:
bRemove = (field->getCombatType() != COMBAT_EARTHDAMAGE);
break;
case CONDITION_FREEZING:
bRemove = (field->getCombatType() != COMBAT_ICEDAMAGE);
break;
case CONDITION_DAZZLED:
bRemove = (field->getCombatType() != COMBAT_HOLYDAMAGE);
break;
case CONDITION_CURSED:
bRemove = (field->getCombatType() != COMBAT_DEATHDAMAGE);
break;
case CONDITION_DROWN:
bRemove = (field->getCombatType() != COMBAT_DROWNDAMAGE);
break;
case CONDITION_BLEEDING:
bRemove = (field->getCombatType() != COMBAT_PHYSICALDAMAGE);
break;
default:
break;
}
}
void Creature::onCombatRemoveCondition(Condition* condition)
{
removeCondition(condition);
}
void Creature::onAttacked()
{
//
}
void Creature::onAttackedCreatureDrainHealth(Creature* target, int32_t points)
{
target->addDamagePoints(this, points);
}
bool Creature::onKilledCreature(Creature* target, bool)
{
if (master) {
master->onKilledCreature(target);
}
//scripting event - onKill
const CreatureEventList& killEvents = getCreatureEvents(CREATURE_EVENT_KILL);
for (CreatureEvent* killEvent : killEvents) {
killEvent->executeOnKill(this, target);
}
return false;
}
void Creature::onGainExperience(uint64_t gainExp, Creature* target)
{
if (gainExp == 0 || !master) {
return;
}
gainExp /= 2;
master->onGainExperience(gainExp, target);
SpectatorVec list;
g_game.map.getSpectators(list, position, false, true);
if (list.empty()) {
return;
}
TextMessage message(MESSAGE_EXPERIENCE_OTHERS, ucfirst(getNameDescription()) + " gained " + std::to_string(gainExp) + (gainExp != 1 ? " experience points." : " experience point."));
message.position = position;
message.primary.color = TEXTCOLOR_WHITE_EXP;
message.primary.value = gainExp;
for (Creature* spectator : list) {
spectator->getPlayer()->sendTextMessage(message);
}
}
void Creature::addSummon(Creature* creature)
{
creature->setDropLoot(false);
creature->setLossSkill(false);
creature->setMaster(this);
creature->incrementReferenceCounter();
summons.push_back(creature);
}
void Creature::removeSummon(Creature* creature)
{
auto cit = std::find(summons.begin(), summons.end(), creature);
if (cit != summons.end()) {
creature->setDropLoot(false);
creature->setLossSkill(true);
creature->setMaster(nullptr);
creature->decrementReferenceCounter();
summons.erase(cit);
}
}
bool Creature::addCondition(Condition* condition, bool force/* = false*/)
{
if (condition == nullptr) {
return false;
}
if (!force && condition->getType() == CONDITION_HASTE && hasCondition(CONDITION_PARALYZE)) {
int64_t walkDelay = getWalkDelay();
if (walkDelay > 0) {
g_scheduler.addEvent(createSchedulerTask(walkDelay, std::bind(&Game::forceAddCondition, &g_game, getID(), condition)));
return false;
}
}
Condition* prevCond = getCondition(condition->getType(), condition->getId(), condition->getSubId());
if (prevCond) {
prevCond->addCondition(this, condition);
delete condition;
return true;
}
if (condition->startCondition(this)) {
conditions.push_back(condition);
onAddCondition(condition->getType());
return true;
}
delete condition;
return false;
}
bool Creature::addCombatCondition(Condition* condition)
{
//Caution: condition variable could be deleted after the call to addCondition
ConditionType_t type = condition->getType();
if (!addCondition(condition)) {
return false;
}
onAddCombatCondition(type);
return true;
}
void Creature::removeCondition(ConditionType_t type, bool force/* = false*/)
{
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (condition->getType() != type) {
++it;
continue;
}
if (!force && type == CONDITION_PARALYZE) {
int64_t walkDelay = getWalkDelay();
if (walkDelay > 0) {
g_scheduler.addEvent(createSchedulerTask(walkDelay, std::bind(&Game::forceRemoveCondition, &g_game, getID(), type)));
return;
}
}
it = conditions.erase(it);
condition->endCondition(this);
delete condition;
onEndCondition(type);
}
}
void Creature::removeCondition(ConditionType_t type, ConditionId_t conditionId, bool force/* = false*/)
{
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (condition->getType() != type || condition->getId() != conditionId) {
++it;
continue;
}
if (!force && type == CONDITION_PARALYZE) {
int64_t walkDelay = getWalkDelay();
if (walkDelay > 0) {
g_scheduler.addEvent(createSchedulerTask(walkDelay, std::bind(&Game::forceRemoveCondition, &g_game, getID(), type)));
return;
}
}
it = conditions.erase(it);
condition->endCondition(this);
delete condition;
onEndCondition(type);
}
}
void Creature::removeCombatCondition(ConditionType_t type)
{
std::vector<Condition*> removeConditions;
for (Condition* condition : conditions) {
if (condition->getType() == type) {
removeConditions.push_back(condition);
}
}
for (Condition* condition : removeConditions) {
onCombatRemoveCondition(condition);
}
}
void Creature::removeCondition(Condition* condition, bool force/* = false*/)
{
auto it = std::find(conditions.begin(), conditions.end(), condition);
if (it == conditions.end()) {
return;
}
if (!force && condition->getType() == CONDITION_PARALYZE) {
int64_t walkDelay = getWalkDelay();
if (walkDelay > 0) {
g_scheduler.addEvent(createSchedulerTask(walkDelay, std::bind(&Game::forceRemoveCondition, &g_game, getID(), condition->getType())));
return;
}
}
conditions.erase(it);
condition->endCondition(this);
onEndCondition(condition->getType());
delete condition;
}
Condition* Creature::getCondition(ConditionType_t type) const
{
for (Condition* condition : conditions) {
if (condition->getType() == type) {
return condition;
}
}
return nullptr;
}
Condition* Creature::getCondition(ConditionType_t type, ConditionId_t conditionId, uint32_t subId/* = 0*/) const
{
for (Condition* condition : conditions) {
if (condition->getType() == type && condition->getId() == conditionId && condition->getSubId() == subId) {
return condition;
}
}
return nullptr;
}
void Creature::executeConditions(uint32_t interval)
{
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (!condition->executeCondition(this, interval)) {
ConditionType_t type = condition->getType();
it = conditions.erase(it);
condition->endCondition(this);
delete condition;
onEndCondition(type);
} else {
++it;
}
}
}
bool Creature::hasCondition(ConditionType_t type, uint32_t subId/* = 0*/) const
{
if (isSuppress(type)) {
return false;
}
int64_t timeNow = OTSYS_TIME();
for (Condition* condition : conditions) {
if (condition->getType() != type || condition->getSubId() != subId) {
continue;
}
if (condition->getEndTime() >= timeNow) {
return true;
}
}
return false;
}
bool Creature::isImmune(CombatType_t type) const
{
return hasBitSet(static_cast<uint32_t>(type), getDamageImmunities());
}
bool Creature::isImmune(ConditionType_t type) const
{
return hasBitSet(static_cast<uint32_t>(type), getConditionImmunities());
}
bool Creature::isSuppress(ConditionType_t type) const
{
return hasBitSet(static_cast<uint32_t>(type), getConditionSuppressions());
}
int64_t Creature::getStepDuration(Direction dir) const
{
int64_t stepDuration = getStepDuration();
if ((dir & DIRECTION_DIAGONAL_MASK) != 0) {
stepDuration *= 3;
}
return stepDuration;
}
int64_t Creature::getStepDuration() const
{
if (isRemoved()) {
return 0;
}
uint32_t calculatedStepSpeed;
uint32_t groundSpeed;
int32_t stepSpeed = getStepSpeed();
if (stepSpeed > -Creature::speedB) {
calculatedStepSpeed = floor((Creature::speedA * log((stepSpeed / 2) + Creature::speedB) + Creature::speedC) + 0.5);
if (calculatedStepSpeed <= 0) {
calculatedStepSpeed = 1;
}
} else {
calculatedStepSpeed = 1;
}
Item* ground = tile->getGround();
if (ground) {
groundSpeed = Item::items[ground->getID()].speed;
if (groundSpeed == 0) {
groundSpeed = 150;
}
} else {
groundSpeed = 150;
}
double duration = std::floor(1000 * groundSpeed / calculatedStepSpeed);
int64_t stepDuration = std::ceil(duration / 50) * 50;
const Monster* monster = getMonster();
if (monster && monster->isTargetNearby() && !monster->isFleeing() && !monster->getMaster()) {
stepDuration *= 2;
}
return stepDuration;
}
int64_t Creature::getEventStepTicks(bool onlyDelay) const
{
int64_t ret = getWalkDelay();
if (ret <= 0) {
int64_t stepDuration = getStepDuration();
if (onlyDelay && stepDuration > 0) {
ret = 1;
} else {
ret = stepDuration * lastStepCost;
}
}
return ret;
}
void Creature::getCreatureLight(LightInfo& light) const
{
light = internalLight;
}
void Creature::setNormalCreatureLight()
{
internalLight.level = 0;
internalLight.color = 0;
}
bool Creature::registerCreatureEvent(const std::string& name)
{
CreatureEvent* event = g_creatureEvents->getEventByName(name);
if (!event) {
return false;
}
CreatureEventType_t type = event->getEventType();
if (hasEventRegistered(type)) {
for (CreatureEvent* creatureEvent : eventsList) {
if (creatureEvent == event) {
return false;
}
}
} else {
scriptEventsBitField |= static_cast<uint32_t>(1) << type;
}
eventsList.push_back(event);
return true;
}
bool Creature::unregisterCreatureEvent(const std::string& name)
{
CreatureEvent* event = g_creatureEvents->getEventByName(name);
if (!event) {
return false;
}
CreatureEventType_t type = event->getEventType();
if (!hasEventRegistered(type)) {
return false;
}
bool resetTypeBit = true;
auto it = eventsList.begin(), end = eventsList.end();
while (it != end) {
CreatureEvent* curEvent = *it;
if (curEvent == event) {
it = eventsList.erase(it);
continue;
}
if (curEvent->getEventType() == type) {
resetTypeBit = false;
}
++it;
}
if (resetTypeBit) {
scriptEventsBitField &= ~(static_cast<uint32_t>(1) << type);
}
return true;
}
CreatureEventList Creature::getCreatureEvents(CreatureEventType_t type)
{
CreatureEventList tmpEventList;
if (!hasEventRegistered(type)) {
return tmpEventList;
}
for (CreatureEvent* creatureEvent : eventsList) {
if (creatureEvent->getEventType() == type) {
tmpEventList.push_back(creatureEvent);
}
}
return tmpEventList;
}
bool FrozenPathingConditionCall::isInRange(const Position& startPos, const Position& testPos,
const FindPathParams& fpp) const
{
if (fpp.fullPathSearch) {
if (testPos.x > targetPos.x + fpp.maxTargetDist) {
return false;
}
if (testPos.x < targetPos.x - fpp.maxTargetDist) {
return false;
}
if (testPos.y > targetPos.y + fpp.maxTargetDist) {
return false;
}
if (testPos.y < targetPos.y - fpp.maxTargetDist) {
return false;
}
} else {
int_fast32_t dx = Position::getOffsetX(startPos, targetPos);
int32_t dxMax = (dx >= 0 ? fpp.maxTargetDist : 0);
if (testPos.x > targetPos.x + dxMax) {
return false;
}
int32_t dxMin = (dx <= 0 ? fpp.maxTargetDist : 0);
if (testPos.x < targetPos.x - dxMin) {
return false;
}
int_fast32_t dy = Position::getOffsetY(startPos, targetPos);
int32_t dyMax = (dy >= 0 ? fpp.maxTargetDist : 0);
if (testPos.y > targetPos.y + dyMax) {
return false;
}
int32_t dyMin = (dy <= 0 ? fpp.maxTargetDist : 0);
if (testPos.y < targetPos.y - dyMin) {
return false;
}
}
return true;
}
bool FrozenPathingConditionCall::operator()(const Position& startPos, const Position& testPos,
const FindPathParams& fpp, int32_t& bestMatchDist) const
{
if (!isInRange(startPos, testPos, fpp)) {
return false;
}
if (fpp.clearSight && !g_game.isSightClear(testPos, targetPos, true)) {
return false;
}
int32_t testDist = std::max<int32_t>(Position::getDistanceX(targetPos, testPos), Position::getDistanceY(targetPos, testPos));
if (fpp.maxTargetDist == 1) {
if (testDist < fpp.minTargetDist || testDist > fpp.maxTargetDist) {
return false;
}
return true;
} else if (testDist <= fpp.maxTargetDist) {
if (testDist < fpp.minTargetDist) {
return false;
}
if (testDist == fpp.maxTargetDist) {
bestMatchDist = 0;
return true;
} else if (testDist > bestMatchDist) {
//not quite what we want, but the best so far
bestMatchDist = testDist;
return true;
}
}
return false;
}
bool Creature::isInvisible() const
{
return std::find_if(conditions.begin(), conditions.end(), [] (const Condition* condition) {
return condition->getType() == CONDITION_INVISIBLE;
}) != conditions.end();
}
bool Creature::getPathTo(const Position& targetPos, std::forward_list<Direction>& dirList, const FindPathParams& fpp) const
{
return g_game.map.getPathMatching(*this, dirList, FrozenPathingConditionCall(targetPos), fpp);
}
bool Creature::getPathTo(const Position& targetPos, std::forward_list<Direction>& dirList, int32_t minTargetDist, int32_t maxTargetDist, bool fullPathSearch /*= true*/, bool clearSight /*= true*/, int32_t maxSearchDist /*= 0*/) const
{
FindPathParams fpp;
fpp.fullPathSearch = fullPathSearch;
fpp.maxSearchDist = maxSearchDist;
fpp.clearSight = clearSight;
fpp.minTargetDist = minTargetDist;
fpp.maxTargetDist = maxTargetDist;
return getPathTo(targetPos, dirList, fpp);
}
| 1 | 12,817 | You introduced a bug here. | otland-forgottenserver | cpp |
@@ -261,6 +261,14 @@ namespace Microsoft.DotNet.Build.Tasks.VersionTools
"true",
StringComparison.OrdinalIgnoreCase);
+ string oldValue = step.GetMetadata("ReplacementSubstituteOld");
+ string newValue = step.GetMetadata("ReplacementSubstituteNew");
+
+ if (!string.IsNullOrEmpty(oldValue) && !string.IsNullOrEmpty(newValue))
+ {
+ updater.ReplacementTransform = v => v.Replace(oldValue, newValue);
+ }
+
return updater;
}
| 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using Microsoft.Build.Framework;
using Microsoft.DotNet.VersionTools;
using Microsoft.DotNet.VersionTools.Automation;
using Microsoft.DotNet.VersionTools.Automation.GitHubApi;
using Microsoft.DotNet.VersionTools.BuildManifest;
using Microsoft.DotNet.VersionTools.BuildManifest.Model;
using Microsoft.DotNet.VersionTools.Dependencies;
using Microsoft.DotNet.VersionTools.Dependencies.BuildManifest;
using Microsoft.DotNet.VersionTools.Dependencies.BuildOutput;
using Microsoft.DotNet.VersionTools.Dependencies.BuildOutput.OrchestratedBuild;
using Microsoft.DotNet.VersionTools.Dependencies.Submodule;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Text.RegularExpressions;
using System.Xml.Linq;
namespace Microsoft.DotNet.Build.Tasks.VersionTools
{
public abstract class BaseDependenciesTask : BuildTask
{
internal const string RawUrlMetadataName = "RawUrl";
internal const string RawVersionsBaseUrlMetadataName = "RawVersionsBaseUrl";
internal const string VersionsRepoDirMetadataName = "VersionsRepoDir";
internal const string BuildInfoPathMetadataName = "BuildInfoPath";
internal const string CurrentRefMetadataName = "CurrentRef";
internal const string PackageIdMetadataName = "PackageId";
internal const string VersionMetadataName = "Version";
internal const string DependencyTypeMetadataName = "DependencyType";
[Required]
public ITaskItem[] DependencyInfo { get; set; }
public ITaskItem[] ProjectJsonFiles { get; set; }
public ITaskItem[] UpdateStep { get; set; }
public string BuildInfoCacheDir { get; set; }
/// <summary>
/// GitHub personal authentication token (PAT). If no PAT is provided, API calls are
/// performed anonymously. This works for operations that don't need any permissions, like
/// fetching the latest dotnet/versions commit hash. It is always preferable to supply a PAT
/// because the anonymous user rate limit is small and per-IP.
/// </summary>
public string GitHubAuthToken { get; set; }
public string GitHubUser { get; set; }
/// <summary>
/// A potentially authenticated GitHub client. Only valid during TraceListenedExecute.
/// </summary>
protected GitHubClient GitHubClient { get; private set; }
protected Dictionary<IDependencyInfo, ITaskItem> DependencyInfoConfigItems { get; } =
new Dictionary<IDependencyInfo, ITaskItem>();
public override bool Execute()
{
GitHubAuth auth = null;
if (!string.IsNullOrEmpty(GitHubAuthToken))
{
auth = new GitHubAuth(GitHubAuthToken, GitHubUser);
}
using (GitHubClient = new GitHubClient(auth))
{
Trace.Listeners.MsBuildListenedInvoke(Log, TraceListenedExecute);
}
return !Log.HasLoggedErrors;
}
protected abstract void TraceListenedExecute();
protected Regex CreateXmlUpdateRegex(string elementName, string contentGroupName) =>
new Regex($@"<{elementName}>(?<{contentGroupName}>.*)</{elementName}>");
protected IEnumerable<IDependencyUpdater> CreateUpdaters()
{
if (ProjectJsonFiles != null && ProjectJsonFiles.Any())
{
yield return new ProjectJsonUpdater(ProjectJsonFiles.Select(item => item.ItemSpec));
}
foreach (ITaskItem step in UpdateStep ?? Enumerable.Empty<ITaskItem>())
{
string type = step.GetMetadata("UpdaterType");
switch (type)
{
case "Xml":
yield return CreateXmlUpdater(step);
break;
case "File":
yield return ConfigureFileUpdater(
new FilePackageUpdater
{
PackageId = GetRequiredMetadata(step, "PackageId"),
Path = GetRequiredMetadata(step, "Path"),
},
step);
break;
case "Tool versions":
yield return new ToolVersionsUpdater
{
Path = GetRequiredMetadata(step, "Path"),
};
break;
case "Submodule from package":
yield return new IndicatorPackageSubmoduleUpdater(
GetRequiredMetadata(step, "IndicatorPackage"))
{
PackageDownloadBaseUrl = GetRequiredMetadata(step, "PackageDownloadBaseUrl"),
Path = GetRequiredMetadata(step, "Path")
};
break;
case "Submodule from latest":
yield return new LatestCommitSubmoduleUpdater(
GetRequiredMetadata(step, "Repository"),
GetRequiredMetadata(step, "Ref"))
{
Path = GetRequiredMetadata(step, "Path")
};
break;
case "Submodule from orchestrated build":
yield return new OrchestratedBuildSubmoduleUpdater
{
Path = GetRequiredMetadata(step, "Path"),
BuildName = GetRequiredMetadata(step, "BuildName"),
GitUrl = GetRequiredMetadata(step, "GitUrl")
};
break;
case "Build attribute from orchestrated build":
yield return CreateOrchestratedBuildUpdater(
step,
OrchestratedBuildUpdateHelpers.BuildAttribute(
GetRequiredMetadata(step, "BuildName"),
GetRequiredMetadata(step, "AttributeName")));
break;
case "Orchestrated blob feed attribute":
yield return CreateOrchestratedBuildUpdater(
step,
OrchestratedBuildUpdateHelpers.OrchestratedFeedAttribute(
GetRequiredMetadata(step, "AttributeName")));
break;
case "Orchestrated blob feed package version":
yield return CreateOrchestratedBuildUpdater(
step,
OrchestratedBuildUpdateHelpers.OrchestratedFeedPackageVersion(
GetRequiredMetadata(step, "PackageId")));
break;
default:
throw new NotSupportedException(
$"Unsupported updater '{step.ItemSpec}': UpdaterType '{type}'.");
}
}
}
protected IEnumerable<IDependencyInfo> CreateLocalDependencyInfos()
{
return CreateDependencyInfos(false, null);
}
protected IEnumerable<IDependencyInfo> CreateDependencyInfos(
bool remote,
string versionsCommit)
{
foreach (ITaskItem info in DependencyInfo ?? Enumerable.Empty<ITaskItem>())
{
IDependencyInfo dependencyInfo;
string type = info.GetMetadata("DependencyType");
switch (type)
{
case "Build":
SetVersionsCommitOverride(info, versionsCommit);
dependencyInfo = CreateBuildInfoDependency(info, BuildInfoCacheDir);
break;
case "Submodule":
dependencyInfo = SubmoduleDependencyInfo.Create(
GetRequiredMetadata(info, "Repository"),
GetRequiredMetadata(info, "Ref"),
GetRequiredMetadata(info, "Path"),
remote);
break;
case "Orchestrated build":
SetVersionsCommitOverride(info, versionsCommit);
dependencyInfo = OrchestratedBuildDependencyInfo.CreateAsync(
info.ItemSpec,
new GitHubProject(
GetRequiredMetadata(info, "VersionsRepo"),
GetRequiredMetadata(info, "VersionsRepoOwner")),
GetRequiredMetadata(info, CurrentRefMetadataName),
GetRequiredMetadata(info, "BasePath"),
new BuildManifestClient(GitHubClient)).Result;
break;
case "Orchestrated build file":
dependencyInfo = new OrchestratedBuildDependencyInfo(
info.ItemSpec,
OrchestratedBuildModel.Parse(
XElement.Parse(
File.ReadAllText(
GetRequiredMetadata(info, "Path")))));
break;
default:
throw new NotSupportedException(
$"Unsupported DependencyInfo '{info.ItemSpec}': DependencyType '{type}'.");
}
DependencyInfoConfigItems[dependencyInfo] = info;
yield return dependencyInfo;
}
}
private FileRegexUpdater CreateXmlUpdater(ITaskItem step)
{
string buildInfoName = step.GetMetadata("BuildInfoName");
string packageId = step.GetMetadata("PackageId");
FileRegexUpdater updater;
if (!string.IsNullOrEmpty(buildInfoName))
{
updater = new FileRegexReleaseUpdater
{
BuildInfoName = buildInfoName
};
}
else
{
updater = new FileRegexPackageUpdater
{
PackageId = packageId
};
}
ConfigureFileRegexUpdater(updater, step);
return updater;
}
private FileUpdater ConfigureFileUpdater(FileUpdater updater, ITaskItem step)
{
updater.SkipIfNoReplacementFound = string.Equals(
step.GetMetadata(nameof(updater.SkipIfNoReplacementFound)),
"true",
StringComparison.OrdinalIgnoreCase);
return updater;
}
private FileRegexUpdater ConfigureFileRegexUpdater(FileRegexUpdater updater, ITaskItem step)
{
updater.Path = step.GetMetadata("Path");
string elementName = step.GetMetadata("ElementName");
string manualRegex = step.GetMetadata("Regex");
if (!string.IsNullOrEmpty(elementName))
{
updater.Regex = CreateXmlUpdateRegex(elementName, nameof(elementName));
updater.VersionGroupName = nameof(elementName);
}
else if (!string.IsNullOrEmpty(manualRegex))
{
updater.Regex = new Regex(manualRegex);
updater.VersionGroupName = GetRequiredMetadata(step, "VersionGroupName");
}
else
{
throw new ArgumentException(
$"On '{step.ItemSpec}', did not find 'ElementName' or 'Regex' metadata.");
}
updater.SkipIfNoReplacementFound = string.Equals(
step.GetMetadata(nameof(updater.SkipIfNoReplacementFound)),
"true",
StringComparison.OrdinalIgnoreCase);
return updater;
}
private IDependencyUpdater CreateOrchestratedBuildUpdater(
ITaskItem step,
Func<OrchestratedBuildDependencyInfo[], DependencyReplacement> updater)
{
string path = step.GetMetadata("SingleLineFile");
if (!string.IsNullOrEmpty(path))
{
return ConfigureFileUpdater(
new FileOrchestratedBuildCustomUpdater
{
GetDesiredValue = updater,
Path = path
},
step);
}
return ConfigureFileRegexUpdater(
new FileRegexOrchestratedBuildCustomUpdater { GetDesiredValue = updater },
step);
}
private static BuildDependencyInfo CreateBuildInfoDependency(ITaskItem item, string cacheDir)
{
BuildInfo info = CreateBuildInfo(item, cacheDir);
bool updateStaticDependencies = item
.GetMetadata("UpdateStableVersions")
.Equals("true", StringComparison.OrdinalIgnoreCase);
string[] disabledPackages = item
.GetMetadata("DisabledPackages")
.Split(new[] { ';' }, StringSplitOptions.RemoveEmptyEntries);
return new BuildDependencyInfo(
info,
updateStaticDependencies,
disabledPackages);
}
private static BuildInfo CreateBuildInfo(ITaskItem item, string cacheDir)
{
string rawUrl = item.GetMetadata(RawUrlMetadataName);
if (!string.IsNullOrEmpty(rawUrl))
{
return BuildInfo.Get(item.ItemSpec, rawUrl);
}
string rawVersionsBaseUrl = item.GetMetadata(RawVersionsBaseUrlMetadataName);
string buildInfoPath = item.GetMetadata(BuildInfoPathMetadataName);
string currentRef = item.GetMetadata(CurrentRefMetadataName);
// Optional: override base url with a local directory.
string versionsRepoDir = item.GetMetadata(VersionsRepoDirMetadataName);
if (!string.IsNullOrEmpty(versionsRepoDir) &&
!string.IsNullOrEmpty(buildInfoPath))
{
return BuildInfo.LocalFileGetAsync(
item.ItemSpec,
versionsRepoDir,
buildInfoPath,
// Don't fetch latest release file: it may not be present in build from source.
fetchLatestReleaseFile: false).Result;
}
if (!string.IsNullOrEmpty(rawVersionsBaseUrl) &&
!string.IsNullOrEmpty(buildInfoPath) &&
!string.IsNullOrEmpty(currentRef))
{
return BuildInfo.CachedGet(
item.ItemSpec,
rawVersionsBaseUrl,
currentRef,
buildInfoPath,
cacheDir);
}
string packageId = item.GetMetadata(PackageIdMetadataName);
string version = item.GetMetadata(VersionMetadataName);
if (!string.IsNullOrEmpty(packageId) &&
!string.IsNullOrEmpty(version))
{
return new BuildInfo
{
Name = item.ItemSpec,
LatestPackages = new Dictionary<string, string>
{
[packageId] = version
}
};
}
throw new Exception($"Unable to create build info with '{item}'.");
}
private static string GetRequiredMetadata(ITaskItem item, string name)
{
string metadata = item.GetMetadata(name);
if (string.IsNullOrEmpty(metadata))
{
throw new ArgumentException(
$"On '{item.ItemSpec}', did not find required '{name}' metadata.");
}
return metadata;
}
private static void SetVersionsCommitOverride(ITaskItem item, string versionsCommit)
{
if (versionsCommit != null)
{
ReplaceExistingMetadata(item, CurrentRefMetadataName, versionsCommit);
}
}
private static void ReplaceExistingMetadata(ITaskItem item, string name, string value)
{
if (!string.IsNullOrEmpty(item.GetMetadata(name)))
{
item.SetMetadata(name, value);
}
}
}
}
| 1 | 15,084 | Should we log a warning/error if only one is specified and not the other? | dotnet-buildtools | .cs |
@@ -463,4 +463,15 @@ func (c *fakeClient) ReportApplicationLiveStateEvents(ctx context.Context, req *
return &pipedservice.ReportApplicationLiveStateEventsResponse{}, nil
}
+func (c *fakeClient) GetLatestEvent(ctx context.Context, req *pipedservice.GetLatestEventRequest, opts ...grpc.CallOption) (*pipedservice.GetLatestEventResponse, error) {
+ c.logger.Info("fake client received GetLatestEvent rpc", zap.Any("request", req))
+ return &pipedservice.GetLatestEventResponse{
+ Event: &model.Event{
+ Id: "dev",
+ Name: "dev",
+ ProjectId: "dev",
+ },
+ }, nil
+}
+
var _ pipedservice.PipedServiceClient = (*fakeClient)(nil) | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pipedclientfake
import (
"context"
"fmt"
"sync"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/pipe-cd/pipe/pkg/app/api/service/pipedservice"
"github.com/pipe-cd/pipe/pkg/model"
)
type fakeClient struct {
applications map[string]*model.Application
deployments map[string]*model.Deployment
mu sync.RWMutex
logger *zap.Logger
}
// NewClient returns a new fakeClient.
func NewClient(logger *zap.Logger) *fakeClient {
var (
projectID = "local-project"
envID = "dev"
pipedID = "local-piped"
apps = make(map[string]*model.Application, 0)
k8sAppNames = map[string]bool{
"analysis-by-http": false,
"analysis-by-log": false,
"analysis-by-metrics": false,
"analysis-with-baseline": false,
"bluegreen": false,
"canary": true,
"helm-local-chart": false,
"helm-remote-chart": false,
"helm-remote-git-chart": false,
"kustomize-local-base": false,
"kustomize-remote-base": false,
"mesh-envoy-bluegreen": false,
"mesh-envoy-canary": false,
"mesh-istio-bluegreen": false,
"mesh-istio-canary": false,
"multi-steps-canary": false,
"simple": false,
"wait-approval": false,
}
)
// Register applications for debug repository.
for name, enable := range k8sAppNames {
app := &model.Application{
Id: projectID + "/" + envID + "/" + name,
Name: name,
EnvId: envID,
PipedId: pipedID,
ProjectId: projectID,
Kind: model.ApplicationKind_KUBERNETES,
CloudProvider: "kubernetes-default",
GitPath: &model.ApplicationGitPath{
Repo: &model.ApplicationGitRepository{
Id: "debug",
Remote: "[email protected]:pipe-cd/debug.git",
Branch: "master",
},
Path: "kubernetes/" + name,
},
Disabled: !enable,
}
apps[app.Id] = app
}
return &fakeClient{
applications: apps,
deployments: map[string]*model.Deployment{},
logger: logger.Named("fake-piped-client"),
}
}
// Close closes the connection to server.
func (c *fakeClient) Close() error {
c.logger.Info("fakeClient client is closing")
return nil
}
// Ping is periodically sent to report its realtime status/stats to control-plane.
// The received stats will be pushed to the metrics collector.
func (c *fakeClient) Ping(ctx context.Context, req *pipedservice.PingRequest, opts ...grpc.CallOption) (*pipedservice.PingResponse, error) {
c.logger.Info("fake client received Ping rpc", zap.Any("request", req))
return &pipedservice.PingResponse{}, nil
}
// ReportPipedMeta is sent by piped while starting up to report its metadata
// such as configured cloud providers.
func (c *fakeClient) ReportPipedMeta(ctx context.Context, req *pipedservice.ReportPipedMetaRequest, opts ...grpc.CallOption) (*pipedservice.ReportPipedMetaResponse, error) {
c.logger.Info("fake client received ReportPipedMeta rpc", zap.Any("request", req))
return &pipedservice.ReportPipedMetaResponse{}, nil
}
// GetEnvironment finds and returns the environment for the specified ID.
func (c *fakeClient) GetEnvironment(ctx context.Context, req *pipedservice.GetEnvironmentRequest, opts ...grpc.CallOption) (*pipedservice.GetEnvironmentResponse, error) {
c.logger.Info("fake client received GetEnvironment rpc", zap.Any("request", req))
return &pipedservice.GetEnvironmentResponse{
Environment: &model.Environment{
Id: "dev",
Name: "dev",
},
}, nil
}
// ListApplications returns a list of registered applications
// that should be managed by the requested piped.
// Disabled applications should not be included in the response.
// Piped uses this RPC to fetch and sync the application configuration into its local database.
func (c *fakeClient) ListApplications(ctx context.Context, req *pipedservice.ListApplicationsRequest, opts ...grpc.CallOption) (*pipedservice.ListApplicationsResponse, error) {
c.logger.Info("fake client received ListApplications rpc", zap.Any("request", req))
apps := make([]*model.Application, 0, len(c.applications))
for _, app := range c.applications {
if app.Disabled {
continue
}
apps = append(apps, app)
}
return &pipedservice.ListApplicationsResponse{
Applications: apps,
}, nil
}
// ReportApplicationSyncState is used to update the sync status of an application.
func (c *fakeClient) ReportApplicationSyncState(ctx context.Context, req *pipedservice.ReportApplicationSyncStateRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationSyncStateResponse, error) {
c.logger.Info("fake client received ReportApplicationSyncState rpc", zap.Any("request", req))
c.mu.RLock()
defer c.mu.RUnlock()
app, ok := c.applications[req.ApplicationId]
if !ok {
return nil, status.Error(codes.NotFound, "application was not found")
}
app.SyncState = req.State
return &pipedservice.ReportApplicationSyncStateResponse{}, nil
}
// ReportApplicationDeployingStatus is used to report whether the specified application is deploying or not.
func (c *fakeClient) ReportApplicationDeployingStatus(_ context.Context, req *pipedservice.ReportApplicationDeployingStatusRequest, _ ...grpc.CallOption) (*pipedservice.ReportApplicationDeployingStatusResponse, error) {
c.logger.Info("fake client received ReportApplicationDeployingStatus rpc", zap.Any("request", req))
c.mu.RLock()
defer c.mu.RUnlock()
app, ok := c.applications[req.ApplicationId]
if !ok {
return nil, status.Error(codes.NotFound, "application was not found")
}
app.Deploying = req.Deploying
return &pipedservice.ReportApplicationDeployingStatusResponse{}, nil
}
// ReportApplicationMostRecentDeployment is used to update the basic information about
// the most recent deployment of a specific application.
func (c *fakeClient) ReportApplicationMostRecentDeployment(ctx context.Context, req *pipedservice.ReportApplicationMostRecentDeploymentRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationMostRecentDeploymentResponse, error) {
c.logger.Info("fake client received ReportApplicationMostRecentDeployment rpc", zap.Any("request", req))
c.mu.RLock()
defer c.mu.RUnlock()
app, ok := c.applications[req.ApplicationId]
if !ok {
return nil, status.Error(codes.NotFound, "application was not found")
}
switch req.Status {
case model.DeploymentStatus_DEPLOYMENT_SUCCESS:
app.MostRecentlySuccessfulDeployment = req.Deployment
case model.DeploymentStatus_DEPLOYMENT_PENDING:
app.MostRecentlyTriggeredDeployment = req.Deployment
}
return &pipedservice.ReportApplicationMostRecentDeploymentResponse{}, nil
}
// GetApplicationMostRecentDeployment returns the most recent deployment of the given application.
func (c *fakeClient) GetApplicationMostRecentDeployment(ctx context.Context, req *pipedservice.GetApplicationMostRecentDeploymentRequest, opts ...grpc.CallOption) (*pipedservice.GetApplicationMostRecentDeploymentResponse, error) {
c.logger.Info("fake client received GetApplicationMostRecentDeployment rpc", zap.Any("request", req))
c.mu.RLock()
defer c.mu.RUnlock()
app, ok := c.applications[req.ApplicationId]
if !ok {
return nil, status.Error(codes.NotFound, "application was not found")
}
if req.Status == model.DeploymentStatus_DEPLOYMENT_SUCCESS && app.MostRecentlySuccessfulDeployment != nil {
return &pipedservice.GetApplicationMostRecentDeploymentResponse{Deployment: app.MostRecentlySuccessfulDeployment}, nil
}
if req.Status == model.DeploymentStatus_DEPLOYMENT_PENDING && app.MostRecentlyTriggeredDeployment != nil {
return &pipedservice.GetApplicationMostRecentDeploymentResponse{Deployment: app.MostRecentlyTriggeredDeployment}, nil
}
return nil, status.Error(codes.NotFound, "")
}
// ListNotCompletedDeployments returns a list of not completed deployments
// which are managed by this piped.
// DeploymentController component uses this RPC to spawns/syncs its local deployment executors.
func (c *fakeClient) ListNotCompletedDeployments(ctx context.Context, req *pipedservice.ListNotCompletedDeploymentsRequest, opts ...grpc.CallOption) (*pipedservice.ListNotCompletedDeploymentsResponse, error) {
c.logger.Info("fake client received ListNotCompletedDeployments rpc", zap.Any("request", req))
c.mu.RLock()
defer c.mu.RUnlock()
deployments := make([]*model.Deployment, 0, len(c.deployments))
for _, d := range c.deployments {
if model.IsCompletedDeployment(d.Status) {
continue
}
deployments = append(deployments, d.Clone())
}
return &pipedservice.ListNotCompletedDeploymentsResponse{
Deployments: deployments,
}, nil
}
// CreateDeployment creates/triggers a new deployment for an application
// that is managed by this piped.
// This will be used by DeploymentTrigger component.
func (c *fakeClient) CreateDeployment(ctx context.Context, req *pipedservice.CreateDeploymentRequest, opts ...grpc.CallOption) (*pipedservice.CreateDeploymentResponse, error) {
c.logger.Info("fake client received CreateDeployment rpc", zap.Any("request", req))
c.mu.Lock()
defer c.mu.Unlock()
if _, ok := c.deployments[req.Deployment.Id]; ok {
return nil, status.Error(codes.AlreadyExists, "")
}
c.deployments[req.Deployment.Id] = req.Deployment
return &pipedservice.CreateDeploymentResponse{}, nil
}
// ReportDeploymentPlanned used by piped to update the status
// of a specific deployment to PLANNED.
func (c *fakeClient) ReportDeploymentPlanned(ctx context.Context, req *pipedservice.ReportDeploymentPlannedRequest, opts ...grpc.CallOption) (*pipedservice.ReportDeploymentPlannedResponse, error) {
c.logger.Info("fake client received ReportDeploymentPlanned rpc", zap.Any("request", req))
c.mu.Lock()
defer c.mu.Unlock()
d, ok := c.deployments[req.DeploymentId]
if !ok {
return nil, status.Error(codes.NotFound, "deployment was not found")
}
s := model.DeploymentStatus_DEPLOYMENT_PLANNED
if !model.CanUpdateDeploymentStatus(d.Status, s) {
msg := fmt.Sprintf("invalid status, cur = %s, req = %s", d.Status.String(), s.String())
return nil, status.Error(codes.FailedPrecondition, msg)
}
if req.Summary != "" {
d.Summary = req.Summary
}
d.Status = s
d.StatusReason = req.StatusReason
d.RunningCommitHash = req.RunningCommitHash
d.Version = req.Version
if len(req.Stages) > 0 {
d.Stages = req.Stages
}
return &pipedservice.ReportDeploymentPlannedResponse{}, nil
}
// ReportDeploymentStatusChanged is used to update the status
// of a specific deployment to RUNNING or ROLLING_BACK.
func (c *fakeClient) ReportDeploymentStatusChanged(ctx context.Context, req *pipedservice.ReportDeploymentStatusChangedRequest, opts ...grpc.CallOption) (*pipedservice.ReportDeploymentStatusChangedResponse, error) {
c.logger.Info("fake client received ReportDeploymentStatusChanged rpc", zap.Any("request", req))
c.mu.Lock()
defer c.mu.Unlock()
d, ok := c.deployments[req.DeploymentId]
if !ok {
return nil, status.Error(codes.NotFound, "deployment was not found")
}
if !model.CanUpdateDeploymentStatus(d.Status, req.Status) {
msg := fmt.Sprintf("invalid status, cur = %s, req = %s", d.Status.String(), req.Status.String())
return nil, status.Error(codes.FailedPrecondition, msg)
}
d.Status = req.Status
d.StatusReason = req.StatusReason
return &pipedservice.ReportDeploymentStatusChangedResponse{}, nil
}
// ReportDeploymentCompleted used by piped to update the status
// of a specific deployment to SUCCESS | FAILURE | CANCELLED.
func (c *fakeClient) ReportDeploymentCompleted(ctx context.Context, req *pipedservice.ReportDeploymentCompletedRequest, opts ...grpc.CallOption) (*pipedservice.ReportDeploymentCompletedResponse, error) {
c.logger.Info("fake client received ReportDeploymentCompleted rpc", zap.Any("request", req))
c.mu.Lock()
defer c.mu.Unlock()
d, ok := c.deployments[req.DeploymentId]
if !ok {
return nil, status.Error(codes.NotFound, "deployment was not found")
}
if !model.IsCompletedDeployment(req.Status) {
msg := fmt.Sprintf("invalid status, expected a completed one but got %s", req.Status.String())
return nil, status.Error(codes.FailedPrecondition, msg)
}
if !model.CanUpdateDeploymentStatus(d.Status, req.Status) {
msg := fmt.Sprintf("invalid status, cur = %s, req = %s", d.Status.String(), req.Status.String())
return nil, status.Error(codes.FailedPrecondition, msg)
}
d.Status = req.Status
d.StatusReason = req.StatusReason
d.CompletedAt = req.CompletedAt
for _, stage := range d.Stages {
if status, ok := req.StageStatuses[stage.Id]; ok {
stage.Status = status
}
}
return &pipedservice.ReportDeploymentCompletedResponse{}, nil
}
// SaveDeploymentMetadata used by piped to persist the metadata of a specific deployment.
func (c *fakeClient) SaveDeploymentMetadata(ctx context.Context, req *pipedservice.SaveDeploymentMetadataRequest, opts ...grpc.CallOption) (*pipedservice.SaveDeploymentMetadataResponse, error) {
c.logger.Info("fake client received SaveDeploymentMetadata rpc", zap.Any("request", req))
c.mu.Lock()
defer c.mu.Unlock()
d, ok := c.deployments[req.DeploymentId]
if !ok {
return nil, status.Error(codes.NotFound, "deployment was not found")
}
d.Metadata = req.Metadata
return &pipedservice.SaveDeploymentMetadataResponse{}, nil
}
// SaveStageMetadata used by piped to persist the metadata
// of a specific stage of a deployment.
func (c *fakeClient) SaveStageMetadata(ctx context.Context, req *pipedservice.SaveStageMetadataRequest, opts ...grpc.CallOption) (*pipedservice.SaveStageMetadataResponse, error) {
c.logger.Info("fake client received SaveStageMetadata rpc", zap.Any("request", req))
c.mu.Lock()
defer c.mu.Unlock()
d, ok := c.deployments[req.DeploymentId]
if !ok {
return nil, status.Error(codes.NotFound, "deployment was not found")
}
for _, s := range d.Stages {
if s.Id != req.StageId {
continue
}
s.Metadata = req.Metadata
return &pipedservice.SaveStageMetadataResponse{}, nil
}
return nil, status.Error(codes.NotFound, "stage was not found")
}
// ReportStageLogs is sent by piped to save the log of a pipeline stage.
func (c *fakeClient) ReportStageLogs(ctx context.Context, req *pipedservice.ReportStageLogsRequest, opts ...grpc.CallOption) (*pipedservice.ReportStageLogsResponse, error) {
c.logger.Info("fake client received ReportStageLogs rpc", zap.Any("request", req))
return &pipedservice.ReportStageLogsResponse{}, nil
}
// ReportStageLogsFromLastCheckpoint is used to save the full logs from the most recently saved point.
func (c *fakeClient) ReportStageLogsFromLastCheckpoint(ctx context.Context, req *pipedservice.ReportStageLogsFromLastCheckpointRequest, opts ...grpc.CallOption) (*pipedservice.ReportStageLogsFromLastCheckpointResponse, error) {
c.logger.Info("fake client received ReportStageLogsFromLastCheckpoint rpc", zap.Any("request", req))
return &pipedservice.ReportStageLogsFromLastCheckpointResponse{}, nil
}
// ReportStageStatusChanged used by piped to update the status
// of a specific stage of a deployment.
func (c *fakeClient) ReportStageStatusChanged(ctx context.Context, req *pipedservice.ReportStageStatusChangedRequest, opts ...grpc.CallOption) (*pipedservice.ReportStageStatusChangedResponse, error) {
c.logger.Info("fake client received ReportStageStatusChanged rpc", zap.Any("request", req))
c.mu.Lock()
defer c.mu.Unlock()
d, ok := c.deployments[req.DeploymentId]
if !ok {
return nil, status.Error(codes.NotFound, "deployment was not found")
}
for _, s := range d.Stages {
if s.Id != req.StageId {
continue
}
s.Status = req.Status
s.RetriedCount = req.RetriedCount
s.Visible = req.Visible
s.CompletedAt = req.CompletedAt
return &pipedservice.ReportStageStatusChangedResponse{}, nil
}
return nil, status.Error(codes.NotFound, "stage was not found")
}
// ListUnhandledCommands is periodically called by piped to obtain the commands
// that should be handled.
// Whenever an user makes an interaction from WebUI (cancel/approve/retry/sync)
// a new command with a unique identifier will be generated an saved into the datastore.
// Piped uses this RPC to list all still-not-handled commands to handle them,
// then report back the result to server.
// On other side, the web will periodically check the command status and feedback the result to user.
// In the future, we may need a solution to remove all old-handled commands from datastore for space.
func (c *fakeClient) ListUnhandledCommands(ctx context.Context, req *pipedservice.ListUnhandledCommandsRequest, opts ...grpc.CallOption) (*pipedservice.ListUnhandledCommandsResponse, error) {
c.logger.Info("fake client received ListUnhandledCommands rpc", zap.Any("request", req))
return &pipedservice.ListUnhandledCommandsResponse{}, nil
}
// ReportCommandHandled is called by piped to mark a specific command as handled.
// The request payload will contain the handle status as well as any additional result data.
// The handle result should be updated to both datastore and cache (for reading from web).
func (c *fakeClient) ReportCommandHandled(ctx context.Context, req *pipedservice.ReportCommandHandledRequest, opts ...grpc.CallOption) (*pipedservice.ReportCommandHandledResponse, error) {
c.logger.Info("fake client received ReportCommandHandled rpc", zap.Any("request", req))
return &pipedservice.ReportCommandHandledResponse{}, nil
}
// ReportApplicationLiveState is periodically sent to correct full state of an application.
// For kubernetes application, this contains a full tree of its kubernetes resources.
// The tree data should be written into filestore immediately and then the state in cache should be refreshsed too.
func (c *fakeClient) ReportApplicationLiveState(ctx context.Context, req *pipedservice.ReportApplicationLiveStateRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationLiveStateResponse, error) {
c.logger.Info("fake client received ReportApplicationLiveState rpc", zap.Any("request", req))
return &pipedservice.ReportApplicationLiveStateResponse{}, nil
}
// ReportApplicationLiveStateEvents is sent by piped to submit one or multiple events
// about the changes of application state.
// Control plane uses the received events to update the state of application-resource-tree.
// We want to start by a simple solution at this initial stage of development,
// so the API server just handles as below:
// - loads the releated application-resource-tree from filestore
// - checks and builds new state for the application-resource-tree
// - updates new state into fielstore and cache (cache data is for reading while handling web requests)
// In the future, we may want to redesign the behavior of this RPC by using pubsub/queue pattern.
// After receiving the events, all of them will be publish into a queue immediately,
// and then another Handler service will pick them inorder to apply to build new state.
// By that way we can control the traffic to the datastore in a better way.
func (c *fakeClient) ReportApplicationLiveStateEvents(ctx context.Context, req *pipedservice.ReportApplicationLiveStateEventsRequest, opts ...grpc.CallOption) (*pipedservice.ReportApplicationLiveStateEventsResponse, error) {
c.logger.Info("fake client received ReportApplicationLiveStateEvents rpc", zap.Any("request", req))
return &pipedservice.ReportApplicationLiveStateEventsResponse{}, nil
}
var _ pipedservice.PipedServiceClient = (*fakeClient)(nil)
| 1 | 13,259 | `ctx` is unused in GetLatestEvent | pipe-cd-pipe | go |
@@ -128,6 +128,14 @@ void nano_daemon::daemon::run (boost::filesystem::path const & data_path, nano::
logger.always_log (boost::format ("Open file descriptors limit is %1%") % file_descriptor_limit);
}
+ // a 0-valued port means for the node 'let the OS decide'; however, for the daemon start up, if
+ // the user hasn't specified a port in the config, we must use the default peering port for the network
+ //
+ if (!config.node.peering_port)
+ {
+ config.node.peering_port = network_params.network.default_node_port;
+ }
+
auto node (std::make_shared<nano::node> (io_ctx, data_path, config.node, opencl_work, flags));
if (!node->init_error ())
{ | 1 | #include <nano/boost/process/child.hpp>
#include <nano/lib/signal_manager.hpp>
#include <nano/lib/threading.hpp>
#include <nano/lib/tlsconfig.hpp>
#include <nano/lib/utility.hpp>
#include <nano/nano_node/daemon.hpp>
#include <nano/node/cli.hpp>
#include <nano/node/daemonconfig.hpp>
#include <nano/node/ipc/ipc_server.hpp>
#include <nano/node/json_handler.hpp>
#include <nano/node/node.hpp>
#include <nano/node/openclwork.hpp>
#include <nano/rpc/rpc.hpp>
#include <boost/format.hpp>
#include <csignal>
#include <iostream>
namespace
{
void nano_abort_signal_handler (int signum)
{
// remove `signum` from signal handling when under Windows
#ifdef _WIN32
std::signal (signum, SIG_DFL);
#endif
// create some debugging log files
nano::dump_crash_stacktrace ();
nano::create_load_memory_address_files ();
// re-raise signal to call the default handler and exit
raise (signum);
}
void install_abort_signal_handler ()
{
// We catch signal SIGSEGV and SIGABRT not via the signal manager because we want these signal handlers
// to be executed in the stack of the code that caused the signal, so we can dump the stacktrace.
#ifdef _WIN32
std::signal (SIGSEGV, nano_abort_signal_handler);
std::signal (SIGABRT, nano_abort_signal_handler);
#else
struct sigaction sa = {};
sa.sa_handler = nano_abort_signal_handler;
sigemptyset (&sa.sa_mask);
sa.sa_flags = SA_RESETHAND;
sigaction (SIGSEGV, &sa, NULL);
sigaction (SIGABRT, &sa, NULL);
#endif
}
volatile sig_atomic_t sig_int_or_term = 0;
constexpr std::size_t OPEN_FILE_DESCRIPTORS_LIMIT = 16384;
}
static void load_and_set_bandwidth_params (std::shared_ptr<nano::node> const & node, boost::filesystem::path const & data_path, nano::node_flags const & flags)
{
nano::daemon_config config{ data_path, node->network_params };
auto error = nano::read_node_config_toml (data_path, config, flags.config_overrides);
if (!error)
{
error = nano::flags_config_conflicts (flags, config.node);
if (!error)
{
node->set_bandwidth_params (config.node.bandwidth_limit, config.node.bandwidth_limit_burst_ratio);
}
}
}
void nano_daemon::daemon::run (boost::filesystem::path const & data_path, nano::node_flags const & flags)
{
install_abort_signal_handler ();
boost::filesystem::create_directories (data_path);
boost::system::error_code error_chmod;
nano::set_secure_perm_directory (data_path, error_chmod);
std::unique_ptr<nano::thread_runner> runner;
nano::network_params network_params{ nano::network_constants::active_network };
nano::daemon_config config{ data_path, network_params };
auto error = nano::read_node_config_toml (data_path, config, flags.config_overrides);
nano::set_use_memory_pools (config.node.use_memory_pools);
if (!error)
{
error = nano::flags_config_conflicts (flags, config.node);
}
if (!error)
{
config.node.logging.init (data_path);
nano::logger_mt logger{ config.node.logging.min_time_between_log_output };
auto tls_config (std::make_shared<nano::tls_config> ());
error = nano::read_tls_config_toml (data_path, *tls_config, logger);
if (error)
{
std::cerr << error.get_message () << std::endl;
std::exit (1);
}
else
{
config.node.websocket_config.tls_config = tls_config;
}
boost::asio::io_context io_ctx;
auto opencl (nano::opencl_work::create (config.opencl_enable, config.opencl, logger, config.node.network_params.work));
nano::work_pool opencl_work (config.node.network_params.network, config.node.work_threads, config.node.pow_sleep_interval, opencl ? [&opencl] (nano::work_version const version_a, nano::root const & root_a, uint64_t difficulty_a, std::atomic<int> & ticket_a) {
return opencl->generate_work (version_a, root_a, difficulty_a, ticket_a);
}
: std::function<boost::optional<uint64_t> (nano::work_version const, nano::root const &, uint64_t, std::atomic<int> &)> (nullptr));
try
{
// This avoid a blank prompt during any node initialization delays
auto initialization_text = "Starting up Nano node...";
std::cout << initialization_text << std::endl;
logger.always_log (initialization_text);
nano::set_file_descriptor_limit (OPEN_FILE_DESCRIPTORS_LIMIT);
auto const file_descriptor_limit = nano::get_file_descriptor_limit ();
if (file_descriptor_limit < OPEN_FILE_DESCRIPTORS_LIMIT)
{
logger.always_log (boost::format ("WARNING: open file descriptors limit is %1%, lower than the %2% recommended. Node was unable to change it.") % file_descriptor_limit % OPEN_FILE_DESCRIPTORS_LIMIT);
}
else
{
logger.always_log (boost::format ("Open file descriptors limit is %1%") % file_descriptor_limit);
}
auto node (std::make_shared<nano::node> (io_ctx, data_path, config.node, opencl_work, flags));
if (!node->init_error ())
{
auto network_label = node->network_params.network.get_current_network_as_string ();
std::time_t dateTime = std::time (nullptr);
std::cout << "Network: " << network_label << ", version: " << NANO_VERSION_STRING << "\n"
<< "Path: " << node->application_path.string () << "\n"
<< "Build Info: " << BUILD_INFO << "\n"
<< "Database backend: " << node->store.vendor_get () << "\n"
<< "Start time: " << std::put_time (std::gmtime (&dateTime), "%c UTC") << std::endl;
auto voting (node->wallets.reps ().voting);
if (voting > 1)
{
std::cout << "Voting with more than one representative can limit performance: " << voting << " representatives are configured" << std::endl;
}
node->start ();
nano::ipc::ipc_server ipc_server (*node, config.rpc);
std::unique_ptr<boost::process::child> rpc_process;
std::unique_ptr<boost::process::child> nano_pow_server_process;
/*if (config.pow_server.enable)
{
if (!boost::filesystem::exists (config.pow_server.pow_server_path))
{
std::cerr << std::string ("nano_pow_server is configured to start as a child process, however the file cannot be found at: ") + config.pow_server.pow_server_path << std::endl;
std::exit (1);
}
nano_pow_server_process = std::make_unique<boost::process::child> (config.pow_server.pow_server_path, "--config_path", data_path / "config-nano-pow-server.toml");
}*/
std::unique_ptr<nano::rpc> rpc;
std::unique_ptr<nano::rpc_handler_interface> rpc_handler;
if (config.rpc_enable)
{
if (!config.rpc.child_process.enable)
{
// Launch rpc in-process
nano::rpc_config rpc_config{ config.node.network_params.network };
auto error = nano::read_rpc_config_toml (data_path, rpc_config, flags.rpc_config_overrides);
if (error)
{
std::cout << error.get_message () << std::endl;
std::exit (1);
}
rpc_config.tls_config = tls_config;
rpc_handler = std::make_unique<nano::inprocess_rpc_handler> (*node, ipc_server, config.rpc, [&ipc_server, &workers = node->workers, &io_ctx] () {
ipc_server.stop ();
workers.add_timed_task (std::chrono::steady_clock::now () + std::chrono::seconds (3), [&io_ctx] () {
io_ctx.stop ();
});
});
rpc = nano::get_rpc (io_ctx, rpc_config, *rpc_handler);
rpc->start ();
}
else
{
// Spawn a child rpc process
if (!boost::filesystem::exists (config.rpc.child_process.rpc_path))
{
throw std::runtime_error (std::string ("RPC is configured to spawn a new process however the file cannot be found at: ") + config.rpc.child_process.rpc_path);
}
auto network = node->network_params.network.get_current_network_as_string ();
rpc_process = std::make_unique<boost::process::child> (config.rpc.child_process.rpc_path, "--daemon", "--data_path", data_path, "--network", network);
}
}
debug_assert (!nano::signal_handler_impl);
nano::signal_handler_impl = [&io_ctx] () {
io_ctx.stop ();
sig_int_or_term = 1;
};
nano::signal_manager sigman;
// keep trapping Ctrl-C to avoid a second Ctrl-C interrupting tasks started by the first
sigman.register_signal_handler (SIGINT, &nano::signal_handler, true);
// sigterm is less likely to come in bunches so only trap it once
sigman.register_signal_handler (SIGTERM, &nano::signal_handler, false);
#ifndef _WIN32
// on sighup we should reload the bandwidth parameters
std::function<void (int)> sighup_signal_handler ([&node, &data_path, &flags] (int signum) {
debug_assert (signum == SIGHUP);
load_and_set_bandwidth_params (node, data_path, flags);
});
sigman.register_signal_handler (SIGHUP, sighup_signal_handler, true);
#endif
runner = std::make_unique<nano::thread_runner> (io_ctx, node->config.io_threads);
runner->join ();
if (sig_int_or_term == 1)
{
ipc_server.stop ();
node->stop ();
if (rpc)
{
rpc->stop ();
}
}
if (rpc_process)
{
rpc_process->wait ();
}
}
else
{
std::cerr << "Error initializing node\n";
}
}
catch (std::runtime_error const & e)
{
std::cerr << "Error while running node (" << e.what () << ")\n";
}
}
else
{
std::cerr << "Error deserializing config: " << error.get_message () << std::endl;
}
}
| 1 | 17,078 | This looks good to me. Personally, I would have the following for maximum flexibility: * convert peering_port from uint64_t to int. * Set the default value to "-1" to specify the value is not set * if the value is -1 then do `config.node.peering_port = network_params.network.default_node_port` That would allow for 0 to mean, select the port number yourself and be more consistent. But I am happy with the change as is, as well. | nanocurrency-nano-node | cpp |
@@ -10,7 +10,6 @@ __all__ = [
class ImageTransform(object):
"""Preprocess an image.
-
1. rescale the image to expected size
2. normalize the image
3. flip the image (if needed) | 1 | import mmcv
import numpy as np
import torch
__all__ = [
'ImageTransform', 'BboxTransform', 'MaskTransform', 'SegMapTransform',
'Numpy2Tensor'
]
class ImageTransform(object):
"""Preprocess an image.
1. rescale the image to expected size
2. normalize the image
3. flip the image (if needed)
4. pad the image (if needed)
5. transpose to (c, h, w)
"""
def __init__(self,
mean=(0, 0, 0),
std=(1, 1, 1),
to_rgb=True,
size_divisor=None):
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
self.size_divisor = size_divisor
def __call__(self, img, scale, flip=False, keep_ratio=True):
if keep_ratio:
img, scale_factor = mmcv.imrescale(img, scale, return_scale=True)
else:
img, w_scale, h_scale = mmcv.imresize(
img, scale, return_scale=True)
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
img_shape = img.shape
img = mmcv.imnormalize(img, self.mean, self.std, self.to_rgb)
if flip:
img = mmcv.imflip(img)
if self.size_divisor is not None:
img = mmcv.impad_to_multiple(img, self.size_divisor)
pad_shape = img.shape
else:
pad_shape = img_shape
img = img.transpose(2, 0, 1)
return img, img_shape, pad_shape, scale_factor
def bbox_flip(bboxes, img_shape):
"""Flip bboxes horizontally.
Args:
bboxes(ndarray): shape (..., 4*k)
img_shape(tuple): (height, width)
"""
assert bboxes.shape[-1] % 4 == 0
w = img_shape[1]
flipped = bboxes.copy()
flipped[..., 0::4] = w - bboxes[..., 2::4] - 1
flipped[..., 2::4] = w - bboxes[..., 0::4] - 1
return flipped
class BboxTransform(object):
"""Preprocess gt bboxes.
1. rescale bboxes according to image size
2. flip bboxes (if needed)
3. pad the first dimension to `max_num_gts`
"""
def __init__(self, max_num_gts=None):
self.max_num_gts = max_num_gts
def __call__(self, bboxes, img_shape, scale_factor, flip=False):
gt_bboxes = bboxes * scale_factor
if flip:
gt_bboxes = bbox_flip(gt_bboxes, img_shape)
gt_bboxes[:, 0::2] = np.clip(gt_bboxes[:, 0::2], 0, img_shape[1] - 1)
gt_bboxes[:, 1::2] = np.clip(gt_bboxes[:, 1::2], 0, img_shape[0] - 1)
if self.max_num_gts is None:
return gt_bboxes
else:
num_gts = gt_bboxes.shape[0]
padded_bboxes = np.zeros((self.max_num_gts, 4), dtype=np.float32)
padded_bboxes[:num_gts, :] = gt_bboxes
return padded_bboxes
class MaskTransform(object):
"""Preprocess masks.
1. resize masks to expected size and stack to a single array
2. flip the masks (if needed)
3. pad the masks (if needed)
"""
def __call__(self, masks, pad_shape, scale_factor, flip=False):
# aspect ratio unchanged
if isinstance(scale_factor, float):
masks = [
mmcv.imrescale(mask, scale_factor, interpolation='nearest')
for mask in masks
]
# aspect ratio changed
else:
w_ratio, h_ratio = scale_factor[:2]
if masks:
h, w = masks[0].shape[:2]
new_h = int(np.round(h * h_ratio))
new_w = int(np.round(w * w_ratio))
new_size = (new_w, new_h)
masks = [
mmcv.imresize(mask, new_size, interpolation='nearest')
for mask in masks
]
if flip:
masks = [mask[:, ::-1] for mask in masks]
padded_masks = [
mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks
]
padded_masks = np.stack(padded_masks, axis=0)
return padded_masks
class SegMapTransform(object):
"""Preprocess semantic segmentation maps.
1. rescale the segmentation map to expected size
3. flip the image (if needed)
4. pad the image (if needed)
"""
def __init__(self, size_divisor=None):
self.size_divisor = size_divisor
def __call__(self, img, scale, flip=False, keep_ratio=True):
if keep_ratio:
img = mmcv.imrescale(img, scale, interpolation='nearest')
else:
img = mmcv.imresize(img, scale, interpolation='nearest')
if flip:
img = mmcv.imflip(img)
if self.size_divisor is not None:
img = mmcv.impad_to_multiple(img, self.size_divisor)
return img
class Numpy2Tensor(object):
def __init__(self):
pass
def __call__(self, *args):
if len(args) == 1:
return torch.from_numpy(args[0])
else:
return tuple([torch.from_numpy(np.array(array)) for array in args])
| 1 | 17,826 | The blank line between the summary and detailed description is better to be kept. | open-mmlab-mmdetection | py |
@@ -167,6 +167,11 @@ func (a *API) Setup() {
a.setMetadataEndpointsKey()
a.writeShapeNames()
a.resolveReferences()
+
+ if !a.NoRemoveUnusedShapes {
+ a.removeUnusedShapes()
+ }
+
a.fixStutterNames()
a.renameExportable()
a.applyShapeNameAliases() | 1 | // +build codegen
package api
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
)
// APIs provides a set of API models loaded by API package name.
type APIs map[string]*API
// LoadAPIs loads the API model files from disk returning the map of API
// package. Returns error if multiple API model resolve to the same package
// name.
func LoadAPIs(modelPaths []string, baseImport string) (APIs, error) {
apis := APIs{}
for _, modelPath := range modelPaths {
a, err := loadAPI(modelPath, baseImport)
if err != nil {
return nil, fmt.Errorf("failed to load API, %v, %v", modelPath, err)
}
importPath := a.ImportPath()
if _, ok := apis[importPath]; ok {
return nil, fmt.Errorf(
"package names must be unique attempted to load %v twice. Second model file: %v",
importPath, modelPath)
}
apis[importPath] = a
}
return apis, nil
}
func loadAPI(modelPath, baseImport string) (*API, error) {
a := &API{
BaseImportPath: baseImport,
BaseCrosslinkURL: "https://docs.aws.amazon.com",
}
modelFile := filepath.Base(modelPath)
modelDir := filepath.Dir(modelPath)
err := attachModelFiles(modelDir,
modelLoader{modelFile, a.Attach, true},
modelLoader{"docs-2.json", a.AttachDocs, false},
modelLoader{"paginators-1.json", a.AttachPaginators, false},
modelLoader{"waiters-2.json", a.AttachWaiters, false},
modelLoader{"examples-1.json", a.AttachExamples, false},
modelLoader{"smoke.json", a.AttachSmokeTests, false},
)
if err != nil {
return nil, err
}
a.Setup()
return a, nil
}
type modelLoader struct {
Filename string
Loader func(string)
Required bool
}
func attachModelFiles(modelPath string, modelFiles ...modelLoader) error {
for _, m := range modelFiles {
filepath := filepath.Join(modelPath, m.Filename)
_, err := os.Stat(filepath)
if os.IsNotExist(err) && !m.Required {
continue
} else if err != nil {
return fmt.Errorf("failed to load model file %v, %v", m.Filename, err)
}
m.Loader(filepath)
}
return nil
}
// ExpandModelGlobPath returns a slice of model paths expanded from the glob
// pattern passed in. Returns the path of the model file to be loaded. Includes
// all versions of a service model.
//
// e.g:
// models/apis/*/*/api-2.json
//
// Or with specific model file:
// models/apis/service/version/api-2.json
func ExpandModelGlobPath(globs ...string) ([]string, error) {
modelPaths := []string{}
for _, g := range globs {
filepaths, err := filepath.Glob(g)
if err != nil {
return nil, err
}
for _, p := range filepaths {
modelPaths = append(modelPaths, p)
}
}
return modelPaths, nil
}
// TrimModelServiceVersions sorts the model paths by service version then
// returns recent model versions, and model version excluded.
//
// Uses the third from last path element to determine unique service. Only one
// service version will be included.
//
// models/apis/service/version/api-2.json
func TrimModelServiceVersions(modelPaths []string) (include, exclude []string) {
sort.Strings(modelPaths)
// Remove old API versions from list
m := map[string]struct{}{}
for i := len(modelPaths) - 1; i >= 0; i-- {
// service name is 2nd-to-last component
parts := strings.Split(modelPaths[i], string(filepath.Separator))
svc := parts[len(parts)-3]
if _, ok := m[svc]; ok {
// Removed unused service version
exclude = append(exclude, modelPaths[i])
continue
}
include = append(include, modelPaths[i])
m[svc] = struct{}{}
}
return include, exclude
}
// Attach opens a file by name, and unmarshal its JSON data.
// Will proceed to setup the API if not already done so.
func (a *API) Attach(filename string) {
a.path = filepath.Dir(filename)
f, err := os.Open(filename)
defer f.Close()
if err != nil {
panic(err)
}
if err := json.NewDecoder(f).Decode(a); err != nil {
panic(fmt.Errorf("failed to decode %s, err: %v", filename, err))
}
}
// AttachString will unmarshal a raw JSON string, and setup the
// API if not already done so.
func (a *API) AttachString(str string) {
json.Unmarshal([]byte(str), a)
if !a.initialized {
a.Setup()
}
}
// Setup initializes the API.
func (a *API) Setup() {
a.setServiceAliaseName()
a.setMetadataEndpointsKey()
a.writeShapeNames()
a.resolveReferences()
a.fixStutterNames()
a.renameExportable()
a.applyShapeNameAliases()
a.createInputOutputShapes()
a.renameAPIPayloadShapes()
a.renameCollidingFields()
a.updateTopLevelShapeReferences()
a.suppressHTTP2EventStreams()
a.setupEventStreams()
a.findEndpointDiscoveryOp()
a.customizationPasses()
if !a.NoRemoveUnusedShapes {
a.removeUnusedShapes()
}
if !a.NoValidataShapeMethods {
a.addShapeValidations()
}
a.initialized = true
}
| 1 | 9,579 | Curiously, are these indents intended or should they be aligned? | aws-aws-sdk-go | go |
@@ -1130,7 +1130,9 @@ def getControlFieldSpeech(attrs,ancestorAttrs,fieldType,formatConfig=None,extraD
else:
tableID = None
- roleText=getSpeechTextForProperties(reason=reason,role=role)
+ roleText=attrs.get('roleText')
+ if not roleText:
+ roleText=getSpeechTextForProperties(reason=reason,role=role)
stateText=getSpeechTextForProperties(reason=reason,states=states,_role=role)
keyboardShortcutText=getSpeechTextForProperties(reason=reason,keyboardShortcut=keyboardShortcut) if config.conf["presentation"]["reportKeyboardShortcuts"] else ""
ariaCurrentText=getSpeechTextForProperties(reason=reason,current=ariaCurrent) | 1 | # -*- coding: UTF-8 -*-
#speech.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2006-2017 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Babbage B.V.
"""High-level functions to speak information.
"""
import itertools
import weakref
import unicodedata
import time
import colors
import globalVars
from logHandler import log
import api
import controlTypes
import config
import tones
import synthDriverHandler
from synthDriverHandler import *
import re
import textInfos
import queueHandler
import speechDictHandler
import characterProcessing
import languageHandler
speechMode_off=0
speechMode_beeps=1
speechMode_talk=2
#: How speech should be handled; one of speechMode_off, speechMode_beeps or speechMode_talk.
speechMode=speechMode_talk
speechMode_beeps_ms=15
beenCanceled=True
isPaused=False
curWordChars=[]
#Set containing locale codes for languages supporting conjunct characters
LANGS_WITH_CONJUNCT_CHARS = {'hi', 'as', 'bn', 'gu', 'kn', 'kok', 'ml', 'mni', 'mr', 'pa', 'te', 'ur', 'ta'}
#: The string used to separate distinct chunks of text when multiple chunks should be spoken without pauses.
# #555: Use two spaces so that numbers from adjacent chunks aren't treated as a single number
# for languages such as French and German which use space as a thousands separator.
CHUNK_SEPARATOR = " "
oldTreeLevel=None
oldTableID=None
oldRowNumber=None
oldRowSpan=None
oldColumnNumber=None
oldColumnSpan=None
def initialize():
"""Loads and sets the synth driver configured in nvda.ini."""
synthDriverHandler.initialize()
setSynth(config.conf["speech"]["synth"])
def terminate():
setSynth(None)
speechViewerObj=None
#: If a chunk of text contains only these characters, it will be considered blank.
BLANK_CHUNK_CHARS = frozenset((" ", "\n", "\r", "\0", u"\xa0"))
def isBlank(text):
"""Determine whether text should be reported as blank.
@param text: The text in question.
@type text: str
@return: C{True} if the text is blank, C{False} if not.
@rtype: bool
"""
return not text or set(text) <= BLANK_CHUNK_CHARS
RE_CONVERT_WHITESPACE = re.compile("[\0\r\n]")
def processText(locale,text,symbolLevel):
text = speechDictHandler.processText(text)
text = characterProcessing.processSpeechSymbols(locale, text, symbolLevel)
text = RE_CONVERT_WHITESPACE.sub(u" ", text)
return text.strip()
def getLastSpeechIndex():
"""Gets the last index passed by the synthesizer. Indexing is used so that its possible to find out when a certain peace of text has been spoken yet. Usually the character position of the text is passed to speak functions as the index.
@returns: the last index encountered
@rtype: int
"""
return getSynth().lastIndex
def cancelSpeech():
"""Interupts the synthesizer from currently speaking"""
global beenCanceled, isPaused, _speakSpellingGenerator
# Import only for this function to avoid circular import.
import sayAllHandler
sayAllHandler.stop()
speakWithoutPauses._pendingSpeechSequence=[]
speakWithoutPauses.lastSentIndex=None
if _speakSpellingGenerator:
_speakSpellingGenerator.close()
if beenCanceled:
return
elif speechMode==speechMode_off:
return
elif speechMode==speechMode_beeps:
return
getSynth().cancel()
beenCanceled=True
isPaused=False
def pauseSpeech(switch):
global isPaused, beenCanceled
getSynth().pause(switch)
isPaused=switch
beenCanceled=False
def speakMessage(text,index=None):
"""Speaks a given message.
@param text: the message to speak
@type text: string
@param index: the index to mark this current text with, its best to use the character position of the text if you know it
@type index: int
"""
speakText(text,index=index,reason=controlTypes.REASON_MESSAGE)
def getCurrentLanguage():
synth=getSynth()
language=None
if synth:
try:
language=synth.language if config.conf['speech']['trustVoiceLanguage'] else None
except NotImplementedError:
pass
if language:
language=languageHandler.normalizeLanguage(language)
if not language:
language=languageHandler.getLanguage()
return language
def spellTextInfo(info,useCharacterDescriptions=False):
"""Spells the text from the given TextInfo, honouring any LangChangeCommand objects it finds if autoLanguageSwitching is enabled."""
if not config.conf['speech']['autoLanguageSwitching']:
speakSpelling(info.text,useCharacterDescriptions=useCharacterDescriptions)
return
curLanguage=None
for field in info.getTextWithFields({}):
if isinstance(field,basestring):
speakSpelling(field,curLanguage,useCharacterDescriptions=useCharacterDescriptions)
elif isinstance(field,textInfos.FieldCommand) and field.command=="formatChange":
curLanguage=field.field.get('language')
_speakSpellingGenerator=None
def speakSpelling(text,locale=None,useCharacterDescriptions=False):
global beenCanceled, _speakSpellingGenerator
import speechViewer
if speechViewer.isActive:
speechViewer.appendText(text)
if speechMode==speechMode_off:
return
elif speechMode==speechMode_beeps:
tones.beep(config.conf["speech"]["beepSpeechModePitch"],speechMode_beeps_ms)
return
if isPaused:
cancelSpeech()
beenCanceled=False
defaultLanguage=getCurrentLanguage()
if not locale or (not config.conf['speech']['autoDialectSwitching'] and locale.split('_')[0]==defaultLanguage.split('_')[0]):
locale=defaultLanguage
if not text:
# Translators: This is spoken when NVDA moves to an empty line.
return getSynth().speak((_("blank"),))
if not text.isspace():
text=text.rstrip()
if _speakSpellingGenerator and _speakSpellingGenerator.gi_frame:
_speakSpellingGenerator.send((text,locale,useCharacterDescriptions))
else:
_speakSpellingGenerator=_speakSpellingGen(text,locale,useCharacterDescriptions)
try:
# Speak the first character before this function returns.
next(_speakSpellingGenerator)
except StopIteration:
return
queueHandler.registerGeneratorObject(_speakSpellingGenerator)
def getCharDescListFromText(text,locale):
"""This method prepares a list, which contains character and its description for all characters the text is made up of, by checking the presence of character descriptions in characterDescriptions.dic of that locale for all possible combination of consecutive characters in the text.
This is done to take care of conjunct characters present in several languages such as Hindi, Urdu, etc.
"""
charDescList = []
charDesc=None
i = len(text)
while i:
subText = text[:i]
charDesc = characterProcessing.getCharacterDescription(locale,subText)
if charDesc or i==1:
if not charDesc:
# #5375: We're down to a single character (i == 1) and we don't have a description.
# Try converting to lower case.
# This provides for upper case English characters (which only have lower case descriptions).
charDesc = characterProcessing.getCharacterDescription(locale,subText.lower())
charDescList.append((subText,charDesc))
text = text[i:]
i = len(text)
else:
i = i - 1
return charDescList
def _speakSpellingGen(text,locale,useCharacterDescriptions):
synth=getSynth()
synthConfig=config.conf["speech"][synth.name]
buf=[(text,locale,useCharacterDescriptions)]
for text,locale,useCharacterDescriptions in buf:
textLength=len(text)
count = 0
localeHasConjuncts = True if locale.split('_',1)[0] in LANGS_WITH_CONJUNCT_CHARS else False
charDescList = getCharDescListFromText(text,locale) if localeHasConjuncts else text
for item in charDescList:
if localeHasConjuncts:
# item is a tuple containing character and its description
char = item[0]
charDesc = item[1]
else:
# item is just a character.
char = item
if useCharacterDescriptions:
charDesc=characterProcessing.getCharacterDescription(locale,char.lower())
uppercase=char.isupper()
if useCharacterDescriptions and charDesc:
#Consider changing to multiple synth speech calls
char=charDesc[0] if textLength>1 else u"\u3001".join(charDesc)
else:
char=characterProcessing.processSpeechSymbol(locale,char)
if uppercase and synthConfig["sayCapForCapitals"]:
# Translators: cap will be spoken before the given letter when it is capitalized.
char=_("cap %s")%char
if uppercase and synth.isSupported("pitch") and synthConfig["capPitchChange"]:
oldPitch=synthConfig["pitch"]
synth.pitch=max(0,min(oldPitch+synthConfig["capPitchChange"],100))
count = len(char)
index=count+1
log.io("Speaking character %r"%char)
speechSequence=[LangChangeCommand(locale)] if config.conf['speech']['autoLanguageSwitching'] else []
if len(char) == 1 and synthConfig["useSpellingFunctionality"]:
speechSequence.append(CharacterModeCommand(True))
if index is not None:
speechSequence.append(IndexCommand(index))
speechSequence.append(char)
synth.speak(speechSequence)
if uppercase and synth.isSupported("pitch") and synthConfig["capPitchChange"]:
synth.pitch=oldPitch
while textLength>1 and (isPaused or getLastSpeechIndex()!=index):
for x in xrange(2):
args=yield
if args: buf.append(args)
if uppercase and synthConfig["beepForCapitals"]:
tones.beep(2000,50)
args=yield
if args: buf.append(args)
def speakObjectProperties(obj,reason=controlTypes.REASON_QUERY,index=None,**allowedProperties):
#Fetch the values for all wanted properties
newPropertyValues={}
positionInfo=None
for name,value in allowedProperties.iteritems():
if name=="includeTableCellCoords":
# This is verbosity info.
newPropertyValues[name]=value
elif name.startswith('positionInfo_') and value:
if positionInfo is None:
positionInfo=obj.positionInfo
elif value:
try:
newPropertyValues[name]=getattr(obj,name)
except NotImplementedError:
pass
if positionInfo:
if allowedProperties.get('positionInfo_level',False) and 'level' in positionInfo:
newPropertyValues['positionInfo_level']=positionInfo['level']
if allowedProperties.get('positionInfo_indexInGroup',False) and 'indexInGroup' in positionInfo:
newPropertyValues['positionInfo_indexInGroup']=positionInfo['indexInGroup']
if allowedProperties.get('positionInfo_similarItemsInGroup',False) and 'similarItemsInGroup' in positionInfo:
newPropertyValues['positionInfo_similarItemsInGroup']=positionInfo['similarItemsInGroup']
#Fetched the cached properties and update them with the new ones
oldCachedPropertyValues=getattr(obj,'_speakObjectPropertiesCache',{}).copy()
cachedPropertyValues=oldCachedPropertyValues.copy()
cachedPropertyValues.update(newPropertyValues)
obj._speakObjectPropertiesCache=cachedPropertyValues
#If we should only cache we can stop here
if reason==controlTypes.REASON_ONLYCACHE:
return
#If only speaking change, then filter out all values that havn't changed
if reason==controlTypes.REASON_CHANGE:
for name in set(newPropertyValues)&set(oldCachedPropertyValues):
if newPropertyValues[name]==oldCachedPropertyValues[name]:
del newPropertyValues[name]
elif name=="states": #states need specific handling
oldStates=oldCachedPropertyValues[name]
newStates=newPropertyValues[name]
newPropertyValues['states']=newStates-oldStates
newPropertyValues['negativeStates']=oldStates-newStates
#properties such as states need to know the role to speak properly, give it as a _ name
newPropertyValues['_role']=newPropertyValues.get('role',obj.role)
# The real states are needed also, as the states entry might be filtered.
newPropertyValues['_states']=obj.states
if "rowNumber" in newPropertyValues or "columnNumber" in newPropertyValues:
# We're reporting table cell info, so pass the table ID.
try:
newPropertyValues["_tableID"]=obj.tableID
except NotImplementedError:
pass
newPropertyValues['current']=obj.isCurrent
if allowedProperties.get('placeholder', False):
newPropertyValues['placeholder']=obj.placeholder
#Get the speech text for the properties we want to speak, and then speak it
text=getSpeechTextForProperties(reason,**newPropertyValues)
if text:
speakText(text,index=index)
def _speakPlaceholderIfEmpty(info, obj, reason):
""" attempt to speak placeholder attribute if the textInfo 'info' is empty
@return: True if info was considered empty, and we attempted to speak the placeholder value.
False if info was not considered empty.
"""
textEmpty = obj._isTextEmpty
if textEmpty:
speakObjectProperties(obj,reason=reason,placeholder=True)
return True
return False
def speakObject(obj,reason=controlTypes.REASON_QUERY,index=None):
from NVDAObjects import NVDAObjectTextInfo
role=obj.role
# Choose when we should report the content of this object's textInfo, rather than just the object's value
import browseMode
shouldReportTextContent=not (
# focusEntered should never present text content
(reason==controlTypes.REASON_FOCUSENTERED) or
# The rootNVDAObject of a browseMode document in browse mode (not passThrough) should never present text content
(isinstance(obj.treeInterceptor,browseMode.BrowseModeDocumentTreeInterceptor) and not obj.treeInterceptor.passThrough and obj==obj.treeInterceptor.rootNVDAObject) or
# objects that do not report as having navigableText should not report their text content either
not obj._hasNavigableText
)
allowProperties={'name':True,'role':True,'roleText':True,'states':True,'value':True,'description':True,'keyboardShortcut':True,'positionInfo_level':True,'positionInfo_indexInGroup':True,'positionInfo_similarItemsInGroup':True,"cellCoordsText":True,"rowNumber":True,"columnNumber":True,"includeTableCellCoords":True,"columnCount":True,"rowCount":True,"rowHeaderText":True,"columnHeaderText":True,"rowSpan":True,"columnSpan":True}
if reason==controlTypes.REASON_FOCUSENTERED:
allowProperties["value"]=False
allowProperties["keyboardShortcut"]=False
allowProperties["positionInfo_level"]=False
# Aside from excluding some properties, focus entered should be spoken like focus.
reason=controlTypes.REASON_FOCUS
if not config.conf["presentation"]["reportObjectDescriptions"]:
allowProperties["description"]=False
if not config.conf["presentation"]["reportKeyboardShortcuts"]:
allowProperties["keyboardShortcut"]=False
if not config.conf["presentation"]["reportObjectPositionInformation"]:
allowProperties["positionInfo_level"]=False
allowProperties["positionInfo_indexInGroup"]=False
allowProperties["positionInfo_similarItemsInGroup"]=False
if reason!=controlTypes.REASON_QUERY:
allowProperties["rowCount"]=False
allowProperties["columnCount"]=False
formatConf=config.conf["documentFormatting"]
if not formatConf["reportTableCellCoords"]:
allowProperties["cellCoordsText"]=False
# rowNumber and columnNumber might be needed even if we're not reporting coordinates.
allowProperties["includeTableCellCoords"]=False
if not formatConf["reportTableHeaders"]:
allowProperties["rowHeaderText"]=False
allowProperties["columnHeaderText"]=False
if (not formatConf["reportTables"]
or (not formatConf["reportTableCellCoords"] and not formatConf["reportTableHeaders"])):
# We definitely aren't reporting any table info at all.
allowProperties["rowNumber"]=False
allowProperties["columnNumber"]=False
allowProperties["rowSpan"]=False
allowProperties["columnSpan"]=False
if shouldReportTextContent:
allowProperties['value']=False
speakObjectProperties(obj,reason=reason,index=index,**allowProperties)
if reason==controlTypes.REASON_ONLYCACHE:
return
if shouldReportTextContent:
try:
info=obj.makeTextInfo(textInfos.POSITION_SELECTION)
if not info.isCollapsed:
# if there is selected text, then there is a value and we do not report placeholder
# Translators: This is spoken to indicate what has been selected. for example 'selected hello world'
speakSelectionMessage(_("selected %s"),info.text)
else:
info.expand(textInfos.UNIT_LINE)
_speakPlaceholderIfEmpty(info, obj, reason)
speakTextInfo(info,unit=textInfos.UNIT_LINE,reason=controlTypes.REASON_CARET)
except:
newInfo=obj.makeTextInfo(textInfos.POSITION_ALL)
if not _speakPlaceholderIfEmpty(newInfo, obj, reason):
speakTextInfo(newInfo,unit=textInfos.UNIT_PARAGRAPH,reason=controlTypes.REASON_CARET)
elif role==controlTypes.ROLE_MATH:
import mathPres
mathPres.ensureInit()
if mathPres.speechProvider:
try:
speak(mathPres.speechProvider.getSpeechForMathMl(obj.mathMl))
except (NotImplementedError, LookupError):
pass
def speakText(text,index=None,reason=controlTypes.REASON_MESSAGE,symbolLevel=None):
"""Speaks some text.
@param text: The text to speak.
@type text: str
@param index: The index to mark this text with, which can be used later to determine whether this piece of text has been spoken.
@type index: int
@param reason: The reason for this speech; one of the controlTypes.REASON_* constants.
@param symbolLevel: The symbol verbosity level; C{None} (default) to use the user's configuration.
"""
speechSequence=[]
if index is not None:
speechSequence.append(IndexCommand(index))
if text is not None:
if isBlank(text):
# Translators: This is spoken when the line is considered blank.
text=_("blank")
speechSequence.append(text)
speak(speechSequence,symbolLevel=symbolLevel)
RE_INDENTATION_SPLIT = re.compile(r"^([^\S\r\n\f\v]*)(.*)$", re.UNICODE | re.DOTALL)
def splitTextIndentation(text):
"""Splits indentation from the rest of the text.
@param text: The text to split.
@type text: basestring
@return: Tuple of indentation and content.
@rtype: (basestring, basestring)
"""
return RE_INDENTATION_SPLIT.match(text).groups()
RE_INDENTATION_CONVERT = re.compile(r"(?P<char>\s)(?P=char)*", re.UNICODE)
IDT_BASE_FREQUENCY = 220 #One octave below middle A.
IDT_TONE_DURATION = 80 #Milleseconds
IDT_MAX_SPACES = 72
def getIndentationSpeech(indentation, formatConfig):
"""Retrieves the phrase to be spoken for a given string of indentation.
@param indentation: The string of indentation.
@type indentation: unicode
@param formatConfig: The configuration to use.
@type formatConfig: dict
@return: The phrase to be spoken.
@rtype: unicode
"""
speechIndentConfig = formatConfig["reportLineIndentation"]
toneIndentConfig = formatConfig["reportLineIndentationWithTones"] and speechMode == speechMode_talk
if not indentation:
if toneIndentConfig:
tones.beep(IDT_BASE_FREQUENCY, IDT_TONE_DURATION)
# Translators: This is spoken when the given line has no indentation.
return (_("no indent") if speechIndentConfig else "")
#The non-breaking space is semantically a space, so we replace it here.
indentation = indentation.replace(u"\xa0", u" ")
res = []
locale=languageHandler.getLanguage()
quarterTones = 0
for m in RE_INDENTATION_CONVERT.finditer(indentation):
raw = m.group()
symbol = characterProcessing.processSpeechSymbol(locale, raw[0])
count = len(raw)
if symbol == raw[0]:
# There is no replacement for this character, so do nothing.
res.append(raw)
elif count == 1:
res.append(symbol)
else:
res.append(u"{count} {symbol}".format(count=count, symbol=symbol))
quarterTones += (count*4 if raw[0]== "\t" else count)
speak = speechIndentConfig
if toneIndentConfig:
if quarterTones <= IDT_MAX_SPACES:
#Remove me during speech refactor.
pitch = IDT_BASE_FREQUENCY*2**(quarterTones/24.0) #24 quarter tones per octave.
tones.beep(pitch, IDT_TONE_DURATION)
else:
#we have more than 72 spaces (18 tabs), and must speak it since we don't want to hurt the users ears.
speak = True
return (" ".join(res) if speak else "")
def speak(speechSequence,symbolLevel=None):
"""Speaks a sequence of text and speech commands
@param speechSequence: the sequence of text and L{SpeechCommand} objects to speak
@param symbolLevel: The symbol verbosity level; C{None} (default) to use the user's configuration.
"""
if not speechSequence: #Pointless - nothing to speak
return
import speechViewer
if speechViewer.isActive:
for item in speechSequence:
if isinstance(item,basestring):
speechViewer.appendText(item)
global beenCanceled, curWordChars
curWordChars=[]
if speechMode==speechMode_off:
return
elif speechMode==speechMode_beeps:
tones.beep(config.conf["speech"]["beepSpeechModePitch"],speechMode_beeps_ms)
return
if isPaused:
cancelSpeech()
beenCanceled=False
#Filter out redundant LangChangeCommand objects
#And also fill in default values
autoLanguageSwitching=config.conf['speech']['autoLanguageSwitching']
autoDialectSwitching=config.conf['speech']['autoDialectSwitching']
curLanguage=defaultLanguage=getCurrentLanguage()
prevLanguage=None
defaultLanguageRoot=defaultLanguage.split('_')[0]
oldSpeechSequence=speechSequence
speechSequence=[]
for item in oldSpeechSequence:
if isinstance(item,LangChangeCommand):
if not autoLanguageSwitching: continue
curLanguage=item.lang
if not curLanguage or (not autoDialectSwitching and curLanguage.split('_')[0]==defaultLanguageRoot):
curLanguage=defaultLanguage
elif isinstance(item,basestring):
if not item: continue
if autoLanguageSwitching and curLanguage!=prevLanguage:
speechSequence.append(LangChangeCommand(curLanguage))
prevLanguage=curLanguage
speechSequence.append(item)
else:
speechSequence.append(item)
if not speechSequence:
# After normalisation, the sequence is empty.
# There's nothing to speak.
return
log.io("Speaking %r" % speechSequence)
if symbolLevel is None:
symbolLevel=config.conf["speech"]["symbolLevel"]
curLanguage=defaultLanguage
inCharacterMode=False
for index in xrange(len(speechSequence)):
item=speechSequence[index]
if isinstance(item,CharacterModeCommand):
inCharacterMode=item.state
if autoLanguageSwitching and isinstance(item,LangChangeCommand):
curLanguage=item.lang
if isinstance(item,basestring):
speechSequence[index]=processText(curLanguage,item,symbolLevel)
if not inCharacterMode:
speechSequence[index]+=CHUNK_SEPARATOR
getSynth().speak(speechSequence)
def speakSelectionMessage(message,text):
if len(text) < 512:
speakMessage(message % text)
else:
# Translators: This is spoken when the user has selected a large portion of text. Example output "1000 characters"
speakMessage(message % _("%d characters") % len(text))
def speakSelectionChange(oldInfo,newInfo,speakSelected=True,speakUnselected=True,generalize=False):
"""Speaks a change in selection, either selected or unselected text.
@param oldInfo: a TextInfo instance representing what the selection was before
@type oldInfo: L{textInfos.TextInfo}
@param newInfo: a TextInfo instance representing what the selection is now
@type newInfo: L{textInfos.TextInfo}
@param generalize: if True, then this function knows that the text may have changed between the creation of the oldInfo and newInfo objects, meaning that changes need to be spoken more generally, rather than speaking the specific text, as the bounds may be all wrong.
@type generalize: boolean
"""
selectedTextList=[]
unselectedTextList=[]
if newInfo.isCollapsed and oldInfo.isCollapsed:
return
startToStart=newInfo.compareEndPoints(oldInfo,"startToStart")
startToEnd=newInfo.compareEndPoints(oldInfo,"startToEnd")
endToStart=newInfo.compareEndPoints(oldInfo,"endToStart")
endToEnd=newInfo.compareEndPoints(oldInfo,"endToEnd")
if speakSelected and oldInfo.isCollapsed:
selectedTextList.append(newInfo.text)
elif speakUnselected and newInfo.isCollapsed:
unselectedTextList.append(oldInfo.text)
else:
if startToEnd>0 or endToStart<0:
if speakSelected and not newInfo.isCollapsed:
selectedTextList.append(newInfo.text)
if speakUnselected and not oldInfo.isCollapsed:
unselectedTextList.append(oldInfo.text)
else:
if speakSelected and startToStart<0 and not newInfo.isCollapsed:
tempInfo=newInfo.copy()
tempInfo.setEndPoint(oldInfo,"endToStart")
selectedTextList.append(tempInfo.text)
if speakSelected and endToEnd>0 and not newInfo.isCollapsed:
tempInfo=newInfo.copy()
tempInfo.setEndPoint(oldInfo,"startToEnd")
selectedTextList.append(tempInfo.text)
if startToStart>0 and not oldInfo.isCollapsed:
tempInfo=oldInfo.copy()
tempInfo.setEndPoint(newInfo,"endToStart")
unselectedTextList.append(tempInfo.text)
if endToEnd<0 and not oldInfo.isCollapsed:
tempInfo=oldInfo.copy()
tempInfo.setEndPoint(newInfo,"startToEnd")
unselectedTextList.append(tempInfo.text)
locale=getCurrentLanguage()
if speakSelected:
if not generalize:
for text in selectedTextList:
if len(text)==1:
text=characterProcessing.processSpeechSymbol(locale,text)
# Translators: This is spoken while the user is in the process of selecting something, For example: "hello selected"
speakSelectionMessage(_("%s selected"),text)
elif len(selectedTextList)>0:
text=newInfo.text
if len(text)==1:
text=characterProcessing.processSpeechSymbol(locale,text)
# Translators: This is spoken to indicate what has been selected. for example 'selected hello world'
speakSelectionMessage(_("selected %s"),text)
if speakUnselected:
if not generalize:
for text in unselectedTextList:
if len(text)==1:
text=characterProcessing.processSpeechSymbol(locale,text)
# Translators: This is spoken to indicate what has been unselected. for example 'hello unselected'
speakSelectionMessage(_("%s unselected"),text)
elif len(unselectedTextList)>0:
if not newInfo.isCollapsed:
text=newInfo.text
if len(text)==1:
text=characterProcessing.processSpeechSymbol(locale,text)
# Translators: This is spoken to indicate when the previous selection was removed and a new selection was made. for example 'hello world selected instead'
speakSelectionMessage(_("%s selected instead"),text)
else:
# Translators: Reported when selection is removed.
speakMessage(_("selection removed"))
#: The number of typed characters for which to suppress speech.
_suppressSpeakTypedCharactersNumber = 0
#: The time at which suppressed typed characters were sent.
_suppressSpeakTypedCharactersTime = None
def _suppressSpeakTypedCharacters(number):
"""Suppress speaking of typed characters.
This should be used when sending a string of characters to the system
and those characters should not be spoken individually as if the user were typing them.
@param number: The number of characters to suppress.
@type number: int
"""
global _suppressSpeakTypedCharactersNumber, _suppressSpeakTypedCharactersTime
_suppressSpeakTypedCharactersNumber += number
_suppressSpeakTypedCharactersTime = time.time()
#: The character to use when masking characters in protected fields.
PROTECTED_CHAR = "*"
#: The first character which is not a Unicode control character.
#: This is used to test whether a character should be spoken as a typed character;
#: i.e. it should have a visual or spatial representation.
FIRST_NONCONTROL_CHAR = u" "
def speakTypedCharacters(ch):
global curWordChars
typingIsProtected=api.isTypingProtected()
if typingIsProtected:
realChar=PROTECTED_CHAR
else:
realChar=ch
if unicodedata.category(ch)[0] in "LMN":
curWordChars.append(realChar)
elif ch=="\b":
# Backspace, so remove the last character from our buffer.
del curWordChars[-1:]
elif ch==u'\u007f':
# delete character produced in some apps with control+backspace
return
elif len(curWordChars)>0:
typedWord="".join(curWordChars)
curWordChars=[]
if log.isEnabledFor(log.IO):
log.io("typed word: %s"%typedWord)
if config.conf["keyboard"]["speakTypedWords"] and not typingIsProtected:
speakText(typedWord)
global _suppressSpeakTypedCharactersNumber, _suppressSpeakTypedCharactersTime
if _suppressSpeakTypedCharactersNumber > 0:
# We primarily suppress based on character count and still have characters to suppress.
# However, we time out after a short while just in case.
suppress = time.time() - _suppressSpeakTypedCharactersTime <= 0.1
if suppress:
_suppressSpeakTypedCharactersNumber -= 1
else:
_suppressSpeakTypedCharactersNumber = 0
_suppressSpeakTypedCharactersTime = None
else:
suppress = False
if not suppress and config.conf["keyboard"]["speakTypedCharacters"] and ch >= FIRST_NONCONTROL_CHAR:
speakSpelling(realChar)
class SpeakTextInfoState(object):
"""Caches the state of speakTextInfo such as the current controlField stack, current formatfield and indentation."""
__slots__=[
'objRef',
'controlFieldStackCache',
'formatFieldAttributesCache',
'indentationCache',
]
def __init__(self,obj):
if isinstance(obj,SpeakTextInfoState):
oldState=obj
self.objRef=oldState.objRef
else:
self.objRef=weakref.ref(obj)
oldState=getattr(obj,'_speakTextInfoState',None)
self.controlFieldStackCache=list(oldState.controlFieldStackCache) if oldState else []
self.formatFieldAttributesCache=oldState.formatFieldAttributesCache if oldState else {}
self.indentationCache=oldState.indentationCache if oldState else ""
def updateObj(self):
obj=self.objRef()
if obj:
obj._speakTextInfoState=self.copy()
def copy(self):
return self.__class__(self)
def _speakTextInfo_addMath(speechSequence, info, field):
import mathPres
mathPres.ensureInit()
if not mathPres.speechProvider:
return
try:
speechSequence.extend(mathPres.speechProvider.getSpeechForMathMl(info.getMathMl(field)))
except (NotImplementedError, LookupError):
return
def speakTextInfo(info,useCache=True,formatConfig=None,unit=None,reason=controlTypes.REASON_QUERY,index=None,onlyInitialFields=False,suppressBlanks=False):
onlyCache=reason==controlTypes.REASON_ONLYCACHE
if isinstance(useCache,SpeakTextInfoState):
speakTextInfoState=useCache
elif useCache:
speakTextInfoState=SpeakTextInfoState(info.obj)
else:
speakTextInfoState=None
autoLanguageSwitching=config.conf['speech']['autoLanguageSwitching']
extraDetail=unit in (textInfos.UNIT_CHARACTER,textInfos.UNIT_WORD)
if not formatConfig:
formatConfig=config.conf["documentFormatting"]
if extraDetail:
formatConfig=formatConfig.copy()
formatConfig['extraDetail']=True
reportIndentation=unit==textInfos.UNIT_LINE and ( formatConfig["reportLineIndentation"] or formatConfig["reportLineIndentationWithTones"])
speechSequence=[]
#Fetch the last controlFieldStack, or make a blank one
controlFieldStackCache=speakTextInfoState.controlFieldStackCache if speakTextInfoState else []
formatFieldAttributesCache=speakTextInfoState.formatFieldAttributesCache if speakTextInfoState else {}
textWithFields=info.getTextWithFields(formatConfig)
# We don't care about node bounds, especially when comparing fields.
# Remove them.
for command in textWithFields:
if not isinstance(command,textInfos.FieldCommand):
continue
field=command.field
if not field:
continue
try:
del field["_startOfNode"]
except KeyError:
pass
try:
del field["_endOfNode"]
except KeyError:
pass
#Make a new controlFieldStack and formatField from the textInfo's initialFields
newControlFieldStack=[]
newFormatField=textInfos.FormatField()
initialFields=[]
for field in textWithFields:
if isinstance(field,textInfos.FieldCommand) and field.command in ("controlStart","formatChange"):
initialFields.append(field.field)
else:
break
if len(initialFields)>0:
del textWithFields[0:len(initialFields)]
endFieldCount=0
for field in reversed(textWithFields):
if isinstance(field,textInfos.FieldCommand) and field.command=="controlEnd":
endFieldCount+=1
else:
break
if endFieldCount>0:
del textWithFields[0-endFieldCount:]
for field in initialFields:
if isinstance(field,textInfos.ControlField):
newControlFieldStack.append(field)
elif isinstance(field,textInfos.FormatField):
newFormatField.update(field)
else:
raise ValueError("unknown field: %s"%field)
#Calculate how many fields in the old and new controlFieldStacks are the same
commonFieldCount=0
for count in xrange(min(len(newControlFieldStack),len(controlFieldStackCache))):
# #2199: When comparing controlFields try using uniqueID if it exists before resorting to compairing the entire dictionary
oldUniqueID=controlFieldStackCache[count].get('uniqueID')
newUniqueID=newControlFieldStack[count].get('uniqueID')
if ((oldUniqueID is not None or newUniqueID is not None) and newUniqueID==oldUniqueID) or (newControlFieldStack[count]==controlFieldStackCache[count]):
commonFieldCount+=1
else:
break
# #2591: Only if the reason is not focus, Speak the exit of any controlFields not in the new stack.
# We don't do this for focus because hearing "out of list", etc. isn't useful when tabbing or using quick navigation and makes navigation less efficient.
if reason!=controlTypes.REASON_FOCUS:
endingBlock=False
for count in reversed(xrange(commonFieldCount,len(controlFieldStackCache))):
text=info.getControlFieldSpeech(controlFieldStackCache[count],controlFieldStackCache[0:count],"end_removedFromControlFieldStack",formatConfig,extraDetail,reason=reason)
if text:
speechSequence.append(text)
if not endingBlock and reason==controlTypes.REASON_SAYALL:
endingBlock=bool(int(controlFieldStackCache[count].get('isBlock',0)))
if endingBlock:
speechSequence.append(SpeakWithoutPausesBreakCommand())
# The TextInfo should be considered blank if we are only exiting fields (i.e. we aren't entering any new fields and there is no text).
isTextBlank=True
# Even when there's no speakable text, we still need to notify the synth of the index.
if index is not None:
speechSequence.append(IndexCommand(index))
#Get speech text for any fields that are in both controlFieldStacks, if extra detail is not requested
if not extraDetail:
for count in xrange(commonFieldCount):
field=newControlFieldStack[count]
text=info.getControlFieldSpeech(field,newControlFieldStack[0:count],"start_inControlFieldStack",formatConfig,extraDetail,reason=reason)
if text:
speechSequence.append(text)
isTextBlank=False
if field.get("role")==controlTypes.ROLE_MATH:
isTextBlank=False
_speakTextInfo_addMath(speechSequence,info,field)
#Get speech text for any fields in the new controlFieldStack that are not in the old controlFieldStack
for count in xrange(commonFieldCount,len(newControlFieldStack)):
field=newControlFieldStack[count]
text=info.getControlFieldSpeech(field,newControlFieldStack[0:count],"start_addedToControlFieldStack",formatConfig,extraDetail,reason=reason)
if text:
speechSequence.append(text)
isTextBlank=False
if field.get("role")==controlTypes.ROLE_MATH:
isTextBlank=False
_speakTextInfo_addMath(speechSequence,info,field)
commonFieldCount+=1
#Fetch the text for format field attributes that have changed between what was previously cached, and this textInfo's initialFormatField.
text=info.getFormatFieldSpeech(newFormatField,formatFieldAttributesCache,formatConfig,reason=reason,unit=unit,extraDetail=extraDetail,initialFormat=True)
if text:
speechSequence.append(text)
if autoLanguageSwitching:
language=newFormatField.get('language')
speechSequence.append(LangChangeCommand(language))
lastLanguage=language
if onlyInitialFields or (unit in (textInfos.UNIT_CHARACTER,textInfos.UNIT_WORD) and len(textWithFields)>0 and len(textWithFields[0])==1 and all((isinstance(x,textInfos.FieldCommand) and x.command=="controlEnd") for x in itertools.islice(textWithFields,1,None) )):
if not onlyCache:
if onlyInitialFields or any(isinstance(x,basestring) for x in speechSequence):
speak(speechSequence)
if not onlyInitialFields:
speakSpelling(textWithFields[0],locale=language if autoLanguageSwitching else None)
if useCache:
speakTextInfoState.controlFieldStackCache=newControlFieldStack
speakTextInfoState.formatFieldAttributesCache=formatFieldAttributesCache
if not isinstance(useCache,SpeakTextInfoState):
speakTextInfoState.updateObj()
return
#Move through the field commands, getting speech text for all controlStarts, controlEnds and formatChange commands
#But also keep newControlFieldStack up to date as we will need it for the ends
# Add any text to a separate list, as it must be handled differently.
#Also make sure that LangChangeCommand objects are added before any controlField or formatField speech
relativeSpeechSequence=[]
inTextChunk=False
allIndentation=""
indentationDone=False
for command in textWithFields:
if isinstance(command,basestring):
if reportIndentation and not indentationDone:
indentation,command=splitTextIndentation(command)
# Combine all indentation into one string for later processing.
allIndentation+=indentation
if command:
# There was content after the indentation, so there is no more indentation.
indentationDone=True
if command:
if inTextChunk:
relativeSpeechSequence[-1]+=command
else:
relativeSpeechSequence.append(command)
inTextChunk=True
elif isinstance(command,textInfos.FieldCommand):
newLanguage=None
if command.command=="controlStart":
# Control fields always start a new chunk, even if they have no field text.
inTextChunk=False
fieldText=info.getControlFieldSpeech(command.field,newControlFieldStack,"start_relative",formatConfig,extraDetail,reason=reason)
newControlFieldStack.append(command.field)
elif command.command=="controlEnd":
# Control fields always start a new chunk, even if they have no field text.
inTextChunk=False
fieldText=info.getControlFieldSpeech(newControlFieldStack[-1],newControlFieldStack[0:-1],"end_relative",formatConfig,extraDetail,reason=reason)
del newControlFieldStack[-1]
if commonFieldCount>len(newControlFieldStack):
commonFieldCount=len(newControlFieldStack)
elif command.command=="formatChange":
fieldText=info.getFormatFieldSpeech(command.field,formatFieldAttributesCache,formatConfig,reason=reason,unit=unit,extraDetail=extraDetail)
if fieldText:
inTextChunk=False
if autoLanguageSwitching:
newLanguage=command.field.get('language')
if lastLanguage!=newLanguage:
# The language has changed, so this starts a new text chunk.
inTextChunk=False
if not inTextChunk:
if fieldText:
if autoLanguageSwitching and lastLanguage is not None:
# Fields must be spoken in the default language.
relativeSpeechSequence.append(LangChangeCommand(None))
lastLanguage=None
relativeSpeechSequence.append(fieldText)
if command.command=="controlStart" and command.field.get("role")==controlTypes.ROLE_MATH:
_speakTextInfo_addMath(relativeSpeechSequence,info,command.field)
if autoLanguageSwitching and newLanguage!=lastLanguage:
relativeSpeechSequence.append(LangChangeCommand(newLanguage))
lastLanguage=newLanguage
if reportIndentation and speakTextInfoState and allIndentation!=speakTextInfoState.indentationCache:
indentationSpeech=getIndentationSpeech(allIndentation, formatConfig)
if autoLanguageSwitching and speechSequence[-1].lang is not None:
# Indentation must be spoken in the default language,
# but the initial format field specified a different language.
# Insert the indentation before the LangChangeCommand.
speechSequence.insert(-1, indentationSpeech)
else:
speechSequence.append(indentationSpeech)
if speakTextInfoState: speakTextInfoState.indentationCache=allIndentation
# Don't add this text if it is blank.
relativeBlank=True
for x in relativeSpeechSequence:
if isinstance(x,basestring) and not isBlank(x):
relativeBlank=False
break
if not relativeBlank:
speechSequence.extend(relativeSpeechSequence)
isTextBlank=False
#Finally get speech text for any fields left in new controlFieldStack that are common with the old controlFieldStack (for closing), if extra detail is not requested
if autoLanguageSwitching and lastLanguage is not None:
speechSequence.append(LangChangeCommand(None))
lastLanguage=None
if not extraDetail:
for count in reversed(xrange(min(len(newControlFieldStack),commonFieldCount))):
text=info.getControlFieldSpeech(newControlFieldStack[count],newControlFieldStack[0:count],"end_inControlFieldStack",formatConfig,extraDetail,reason=reason)
if text:
speechSequence.append(text)
isTextBlank=False
# If there is nothing that should cause the TextInfo to be considered non-blank, blank should be reported, unless we are doing a say all.
if not suppressBlanks and reason != controlTypes.REASON_SAYALL and isTextBlank:
# Translators: This is spoken when the line is considered blank.
speechSequence.append(_("blank"))
#Cache a copy of the new controlFieldStack for future use
if useCache:
speakTextInfoState.controlFieldStackCache=list(newControlFieldStack)
speakTextInfoState.formatFieldAttributesCache=formatFieldAttributesCache
if not isinstance(useCache,SpeakTextInfoState):
speakTextInfoState.updateObj()
if not onlyCache and speechSequence:
if reason==controlTypes.REASON_SAYALL:
speakWithoutPauses(speechSequence)
else:
speak(speechSequence)
def getSpeechTextForProperties(reason=controlTypes.REASON_QUERY,**propertyValues):
global oldTreeLevel, oldTableID, oldRowNumber, oldRowSpan, oldColumnNumber, oldColumnSpan
textList=[]
name=propertyValues.get('name')
if name:
textList.append(name)
if 'role' in propertyValues:
role=propertyValues['role']
speakRole=True
elif '_role' in propertyValues:
speakRole=False
role=propertyValues['_role']
else:
speakRole=False
role=controlTypes.ROLE_UNKNOWN
value=propertyValues.get('value') if role not in controlTypes.silentValuesForRoles else None
cellCoordsText=propertyValues.get('cellCoordsText')
rowNumber=propertyValues.get('rowNumber')
columnNumber=propertyValues.get('columnNumber')
includeTableCellCoords=propertyValues.get('includeTableCellCoords',True)
if role==controlTypes.ROLE_CHARTELEMENT:
speakRole=False
roleText=propertyValues.get('roleText')
if speakRole and (roleText or reason not in (controlTypes.REASON_SAYALL,controlTypes.REASON_CARET,controlTypes.REASON_FOCUS) or not (name or value or cellCoordsText or rowNumber or columnNumber) or role not in controlTypes.silentRolesOnFocus) and (role!=controlTypes.ROLE_MATH or reason not in (controlTypes.REASON_CARET,controlTypes.REASON_SAYALL)):
textList.append(roleText if roleText else controlTypes.roleLabels[role])
if value:
textList.append(value)
states=propertyValues.get('states',set())
realStates=propertyValues.get('_states',states)
negativeStates=propertyValues.get('negativeStates',set())
if states or negativeStates:
textList.extend(controlTypes.processAndLabelStates(role, realStates, reason, states, negativeStates))
if 'description' in propertyValues:
textList.append(propertyValues['description'])
if 'keyboardShortcut' in propertyValues:
textList.append(propertyValues['keyboardShortcut'])
if includeTableCellCoords and cellCoordsText:
textList.append(cellCoordsText)
if cellCoordsText or rowNumber or columnNumber:
tableID = propertyValues.get("_tableID")
# Always treat the table as different if there is no tableID.
sameTable = (tableID and tableID == oldTableID)
# Don't update the oldTableID if no tableID was given.
if tableID and not sameTable:
oldTableID = tableID
rowSpan = propertyValues.get("rowSpan")
columnSpan = propertyValues.get("columnSpan")
if rowNumber and (not sameTable or rowNumber != oldRowNumber or rowSpan != oldRowSpan):
rowHeaderText = propertyValues.get("rowHeaderText")
if rowHeaderText:
textList.append(rowHeaderText)
if includeTableCellCoords and not cellCoordsText:
# Translators: Speaks current row number (example output: row 3).
textList.append(_("row %s")%rowNumber)
if rowSpan>1 and columnSpan<=1:
# Translators: Speaks the row span added to the current row number (example output: through 5).
textList.append(_("through %s")%(rowNumber+rowSpan-1))
oldRowNumber = rowNumber
oldRowSpan = rowSpan
if columnNumber and (not sameTable or columnNumber != oldColumnNumber or columnSpan != oldColumnSpan):
columnHeaderText = propertyValues.get("columnHeaderText")
if columnHeaderText:
textList.append(columnHeaderText)
if includeTableCellCoords and not cellCoordsText:
# Translators: Speaks current column number (example output: column 3).
textList.append(_("column %s")%columnNumber)
if columnSpan>1 and rowSpan<=1:
# Translators: Speaks the column span added to the current column number (example output: through 5).
textList.append(_("through %s")%(columnNumber+columnSpan-1))
oldColumnNumber = columnNumber
oldColumnSpan = columnSpan
if includeTableCellCoords and not cellCoordsText and rowSpan>1 and columnSpan>1:
# Translators: Speaks the row and column span added to the current row and column numbers
# (example output: through row 5 column 3).
textList.append(_("through row {row} column {column}").format(
row=rowNumber+rowSpan-1,
column=columnNumber+columnSpan-1
))
rowCount=propertyValues.get('rowCount',0)
columnCount=propertyValues.get('columnCount',0)
if rowCount and columnCount:
# Translators: Speaks number of columns and rows in a table (example output: with 3 rows and 2 columns).
textList.append(_("with {rowCount} rows and {columnCount} columns").format(rowCount=rowCount,columnCount=columnCount))
elif columnCount and not rowCount:
# Translators: Speaks number of columns (example output: with 4 columns).
textList.append(_("with %s columns")%columnCount)
elif rowCount and not columnCount:
# Translators: Speaks number of rows (example output: with 2 rows).
textList.append(_("with %s rows")%rowCount)
if rowCount or columnCount:
# The caller is entering a table, so ensure that it is treated as a new table, even if the previous table was the same.
oldTableID = None
ariaCurrent = propertyValues.get('current', False)
if ariaCurrent:
try:
textList.append(controlTypes.isCurrentLabels[ariaCurrent])
except KeyError:
log.debugWarning("Aria-current value not handled: %s"%ariaCurrent)
textList.append(controlTypes.isCurrentLabels[True])
placeholder = propertyValues.get('placeholder', None)
if placeholder:
textList.append(placeholder)
indexInGroup=propertyValues.get('positionInfo_indexInGroup',0)
similarItemsInGroup=propertyValues.get('positionInfo_similarItemsInGroup',0)
if 0<indexInGroup<=similarItemsInGroup:
# Translators: Spoken to indicate the position of an item in a group of items (such as a list).
# {number} is replaced with the number of the item in the group.
# {total} is replaced with the total number of items in the group.
textList.append(_("{number} of {total}").format(number=indexInGroup, total=similarItemsInGroup))
if 'positionInfo_level' in propertyValues:
level=propertyValues.get('positionInfo_level',None)
role=propertyValues.get('role',None)
if level is not None:
if role in (controlTypes.ROLE_TREEVIEWITEM,controlTypes.ROLE_LISTITEM) and level!=oldTreeLevel:
textList.insert(0,_("level %s")%level)
oldTreeLevel=level
else:
# Translators: Speaks the item level in treeviews (example output: level 2).
textList.append(_('level %s')%propertyValues['positionInfo_level'])
return CHUNK_SEPARATOR.join([x for x in textList if x])
def getControlFieldSpeech(attrs,ancestorAttrs,fieldType,formatConfig=None,extraDetail=False,reason=None):
if attrs.get('isHidden'):
return u""
if not formatConfig:
formatConfig=config.conf["documentFormatting"]
presCat=attrs.getPresentationCategory(ancestorAttrs,formatConfig, reason=reason)
childControlCount=int(attrs.get('_childcontrolcount',"0"))
if reason==controlTypes.REASON_FOCUS or attrs.get('alwaysReportName',False):
name=attrs.get('name',"")
else:
name=""
role=attrs.get('role',controlTypes.ROLE_UNKNOWN)
states=attrs.get('states',set())
keyboardShortcut=attrs.get('keyboardShortcut', "")
ariaCurrent=attrs.get('current', None)
placeholderValue=attrs.get('placeholder', None)
value=attrs.get('value',"")
if reason==controlTypes.REASON_FOCUS or attrs.get('alwaysReportDescription',False):
description=attrs.get('description',"")
else:
description=""
level=attrs.get('level',None)
if presCat != attrs.PRESCAT_LAYOUT:
tableID = attrs.get("table-id")
else:
tableID = None
roleText=getSpeechTextForProperties(reason=reason,role=role)
stateText=getSpeechTextForProperties(reason=reason,states=states,_role=role)
keyboardShortcutText=getSpeechTextForProperties(reason=reason,keyboardShortcut=keyboardShortcut) if config.conf["presentation"]["reportKeyboardShortcuts"] else ""
ariaCurrentText=getSpeechTextForProperties(reason=reason,current=ariaCurrent)
placeholderText=getSpeechTextForProperties(reason=reason,placeholder=placeholderValue)
nameText=getSpeechTextForProperties(reason=reason,name=name)
valueText=getSpeechTextForProperties(reason=reason,value=value)
descriptionText=(getSpeechTextForProperties(reason=reason,description=description)
if config.conf["presentation"]["reportObjectDescriptions"] else "")
levelText=getSpeechTextForProperties(reason=reason,positionInfo_level=level)
# Determine under what circumstances this node should be spoken.
# speakEntry: Speak when the user enters the control.
# speakWithinForLine: When moving by line, speak when the user is already within the control.
# speakExitForLine: When moving by line, speak when the user exits the control.
# speakExitForOther: When moving by word or character, speak when the user exits the control.
speakEntry=speakWithinForLine=speakExitForLine=speakExitForOther=False
if presCat == attrs.PRESCAT_SINGLELINE:
speakEntry=True
speakWithinForLine=True
speakExitForOther=True
elif presCat in (attrs.PRESCAT_MARKER, attrs.PRESCAT_CELL):
speakEntry=True
elif presCat == attrs.PRESCAT_CONTAINER:
speakEntry=True
speakExitForLine=True
speakExitForOther=True
# Determine the order of speech.
# speakContentFirst: Speak the content before the control field info.
speakContentFirst = reason == controlTypes.REASON_FOCUS and presCat != attrs.PRESCAT_CONTAINER and role not in (controlTypes.ROLE_EDITABLETEXT, controlTypes.ROLE_COMBOBOX) and not tableID and controlTypes.STATE_EDITABLE not in states
# speakStatesFirst: Speak the states before the role.
speakStatesFirst=role==controlTypes.ROLE_LINK
# Determine what text to speak.
# Special cases
if speakEntry and childControlCount and fieldType=="start_addedToControlFieldStack" and role==controlTypes.ROLE_LIST and controlTypes.STATE_READONLY in states:
# List.
# Translators: Speaks number of items in a list (example output: list with 5 items).
return roleText+" "+_("with %s items")%childControlCount
elif fieldType=="start_addedToControlFieldStack" and role==controlTypes.ROLE_TABLE and tableID:
# Table.
return " ".join((nameText,roleText,stateText, getSpeechTextForProperties(_tableID=tableID, rowCount=attrs.get("table-rowcount"), columnCount=attrs.get("table-columncount")),levelText))
elif nameText and reason==controlTypes.REASON_FOCUS and fieldType == "start_addedToControlFieldStack" and role==controlTypes.ROLE_GROUPING:
# #3321: Report the name of groupings (such as fieldsets) for quicknav and focus jumps
return " ".join((nameText,roleText))
elif fieldType in ("start_addedToControlFieldStack","start_relative") and role in (controlTypes.ROLE_TABLECELL,controlTypes.ROLE_TABLECOLUMNHEADER,controlTypes.ROLE_TABLEROWHEADER) and tableID:
# Table cell.
reportTableHeaders = formatConfig["reportTableHeaders"]
reportTableCellCoords = formatConfig["reportTableCellCoords"]
getProps = {
'rowNumber': attrs.get("table-rownumber"),
'columnNumber': attrs.get("table-columnnumber"),
'rowSpan': attrs.get("table-rowsspanned"),
'columnSpan': attrs.get("table-columnsspanned"),
'includeTableCellCoords': reportTableCellCoords
}
if reportTableHeaders:
getProps['rowHeaderText'] = attrs.get("table-rowheadertext")
getProps['columnHeaderText'] = attrs.get("table-columnheadertext")
return (getSpeechTextForProperties(_tableID=tableID, **getProps)
+ (" %s" % stateText if stateText else "")
+ (" %s" % ariaCurrentText if ariaCurrent else ""))
# General cases
elif (
(speakEntry and ((speakContentFirst and fieldType in ("end_relative","end_inControlFieldStack")) or (not speakContentFirst and fieldType in ("start_addedToControlFieldStack","start_relative"))))
or (speakWithinForLine and not speakContentFirst and not extraDetail and fieldType=="start_inControlFieldStack")
):
out = []
content = attrs.get("content")
if content and speakContentFirst:
out.append(content)
if placeholderValue:
if valueText:
log.error("valueText exists when expected none: valueText:'%s' placeholderText:'%s'"%(valueText,placeholderText))
valueText = placeholderText
out.extend(x for x in (nameText,(stateText if speakStatesFirst else roleText),(roleText if speakStatesFirst else stateText),ariaCurrentText,valueText,descriptionText,levelText,keyboardShortcutText) if x)
if content and not speakContentFirst:
out.append(content)
return CHUNK_SEPARATOR.join(out)
elif fieldType in ("end_removedFromControlFieldStack","end_relative") and roleText and ((not extraDetail and speakExitForLine) or (extraDetail and speakExitForOther)):
# Translators: Indicates end of something (example output: at the end of a list, speaks out of list).
return _("out of %s")%roleText
# Special cases
elif not speakEntry and fieldType in ("start_addedToControlFieldStack","start_relative"):
out = []
if not extraDetail and controlTypes.STATE_CLICKABLE in states:
# Clickable.
out.append(getSpeechTextForProperties(states=set([controlTypes.STATE_CLICKABLE])))
if ariaCurrent:
out.append(ariaCurrentText)
return CHUNK_SEPARATOR.join(out)
else:
return ""
def getFormatFieldSpeech(attrs,attrsCache=None,formatConfig=None,reason=None,unit=None,extraDetail=False , initialFormat=False, separator=CHUNK_SEPARATOR):
if not formatConfig:
formatConfig=config.conf["documentFormatting"]
textList=[]
if formatConfig["reportTables"]:
tableInfo=attrs.get("table-info")
oldTableInfo=attrsCache.get("table-info") if attrsCache is not None else None
text=getTableInfoSpeech(tableInfo,oldTableInfo,extraDetail=extraDetail)
if text:
textList.append(text)
if formatConfig["reportPage"]:
pageNumber=attrs.get("page-number")
oldPageNumber=attrsCache.get("page-number") if attrsCache is not None else None
if pageNumber and pageNumber!=oldPageNumber:
# Translators: Indicates the page number in a document.
# %s will be replaced with the page number.
text=_("page %s")%pageNumber
textList.append(text)
sectionNumber=attrs.get("section-number")
oldSectionNumber=attrsCache.get("section-number") if attrsCache is not None else None
if sectionNumber and sectionNumber!=oldSectionNumber:
# Translators: Indicates the section number in a document.
# %s will be replaced with the section number.
text=_("section %s")%sectionNumber
textList.append(text)
textColumnCount=attrs.get("text-column-count")
oldTextColumnCount=attrsCache.get("text-column-count") if attrsCache is not None else None
textColumnNumber=attrs.get("text-column-number")
oldTextColumnNumber=attrsCache.get("text-column-number") if attrsCache is not None else None
# Because we do not want to report the number of columns when a document is just opened and there is only
# one column. This would be verbose, in the standard case.
# column number has changed, or the columnCount has changed
# but not if the columnCount is 1 or less and there is no old columnCount.
if (((textColumnNumber and textColumnNumber!=oldTextColumnNumber) or
(textColumnCount and textColumnCount!=oldTextColumnCount)) and not
(textColumnCount and int(textColumnCount) <=1 and oldTextColumnCount == None)) :
if textColumnNumber and textColumnCount:
# Translators: Indicates the text column number in a document.
# {0} will be replaced with the text column number.
# {1} will be replaced with the number of text columns.
text=_("column {0} of {1}").format(textColumnNumber,textColumnCount)
textList.append(text)
elif textColumnCount:
# Translators: Indicates the text column number in a document.
# %s will be replaced with the number of text columns.
text=_("%s columns")%(textColumnCount)
textList.append(text)
sectionBreakType=attrs.get("section-break")
if sectionBreakType:
if sectionBreakType == "0" : # Continuous section break.
text=_("continuous section break")
elif sectionBreakType == "1" : # New column section break.
text=_("new column section break")
elif sectionBreakType == "2" : # New page section break.
text=_("new page section break")
elif sectionBreakType == "3" : # Even pages section break.
text=_("even pages section break")
elif sectionBreakType == "4" : # Odd pages section break.
text=_("odd pages section break")
else:
text=""
textList.append(text)
columnBreakType=attrs.get("column-break")
if columnBreakType:
textList.append(_("column break"))
if formatConfig["reportHeadings"]:
headingLevel=attrs.get("heading-level")
oldHeadingLevel=attrsCache.get("heading-level") if attrsCache is not None else None
# headings should be spoken not only if they change, but also when beginning to speak lines or paragraphs
# Ensuring a similar experience to if a heading was a controlField
if headingLevel and (initialFormat and (reason==controlTypes.REASON_FOCUS or unit in (textInfos.UNIT_LINE,textInfos.UNIT_PARAGRAPH)) or headingLevel!=oldHeadingLevel):
# Translators: Speaks the heading level (example output: heading level 2).
text=_("heading level %d")%headingLevel
textList.append(text)
if formatConfig["reportStyle"]:
style=attrs.get("style")
oldStyle=attrsCache.get("style") if attrsCache is not None else None
if style!=oldStyle:
if style:
# Translators: Indicates the style of text.
# A style is a collection of formatting settings and depends on the application.
# %s will be replaced with the name of the style.
text=_("style %s")%style
else:
# Translators: Indicates that text has reverted to the default style.
# A style is a collection of formatting settings and depends on the application.
text=_("default style")
textList.append(text)
if formatConfig["reportBorderStyle"]:
borderStyle=attrs.get("border-style")
oldBorderStyle=attrsCache.get("border-style") if attrsCache is not None else None
if borderStyle!=oldBorderStyle:
if borderStyle:
text=borderStyle
else:
# Translators: Indicates that cell does not have border lines.
text=_("no border lines")
textList.append(text)
if formatConfig["reportFontName"]:
fontFamily=attrs.get("font-family")
oldFontFamily=attrsCache.get("font-family") if attrsCache is not None else None
if fontFamily and fontFamily!=oldFontFamily:
textList.append(fontFamily)
fontName=attrs.get("font-name")
oldFontName=attrsCache.get("font-name") if attrsCache is not None else None
if fontName and fontName!=oldFontName:
textList.append(fontName)
if formatConfig["reportFontSize"]:
fontSize=attrs.get("font-size")
oldFontSize=attrsCache.get("font-size") if attrsCache is not None else None
if fontSize and fontSize!=oldFontSize:
textList.append(fontSize)
if formatConfig["reportColor"]:
color=attrs.get("color")
oldColor=attrsCache.get("color") if attrsCache is not None else None
backgroundColor=attrs.get("background-color")
oldBackgroundColor=attrsCache.get("background-color") if attrsCache is not None else None
backgroundColor2=attrs.get("background-color2")
oldBackgroundColor2=attrsCache.get("background-color2") if attrsCache is not None else None
bgColorChanged=backgroundColor!=oldBackgroundColor or backgroundColor2!=oldBackgroundColor2
bgColorText=backgroundColor.name if isinstance(backgroundColor,colors.RGB) else unicode(backgroundColor)
if backgroundColor2:
bg2Name=backgroundColor2.name if isinstance(backgroundColor2,colors.RGB) else unicode(backgroundColor2)
# Translators: Reported when there are two background colors.
# This occurs when, for example, a gradient pattern is applied to a spreadsheet cell.
# {color1} will be replaced with the first background color.
# {color2} will be replaced with the second background color.
bgColorText=_("{color1} to {color2}").format(color1=bgColorText,color2=bg2Name)
if color and backgroundColor and color!=oldColor and bgColorChanged:
# Translators: Reported when both the text and background colors change.
# {color} will be replaced with the text color.
# {backgroundColor} will be replaced with the background color.
textList.append(_("{color} on {backgroundColor}").format(
color=color.name if isinstance(color,colors.RGB) else unicode(color),
backgroundColor=bgColorText))
elif color and color!=oldColor:
# Translators: Reported when the text color changes (but not the background color).
# {color} will be replaced with the text color.
textList.append(_("{color}").format(color=color.name if isinstance(color,colors.RGB) else unicode(color)))
elif backgroundColor and bgColorChanged:
# Translators: Reported when the background color changes (but not the text color).
# {backgroundColor} will be replaced with the background color.
textList.append(_("{backgroundColor} background").format(backgroundColor=bgColorText))
backgroundPattern=attrs.get("background-pattern")
oldBackgroundPattern=attrsCache.get("background-pattern") if attrsCache is not None else None
if backgroundPattern and backgroundPattern!=oldBackgroundPattern:
textList.append(_("background pattern {pattern}").format(pattern=backgroundPattern))
if formatConfig["reportLineNumber"]:
lineNumber=attrs.get("line-number")
oldLineNumber=attrsCache.get("line-number") if attrsCache is not None else None
if lineNumber is not None and lineNumber!=oldLineNumber:
# Translators: Indicates the line number of the text.
# %s will be replaced with the line number.
text=_("line %s")%lineNumber
textList.append(text)
if formatConfig["reportRevisions"]:
# Insertion
revision=attrs.get("revision-insertion")
oldRevision=attrsCache.get("revision-insertion") if attrsCache is not None else None
if (revision or oldRevision is not None) and revision!=oldRevision:
# Translators: Reported when text is marked as having been inserted
text=(_("inserted") if revision
# Translators: Reported when text is no longer marked as having been inserted.
else _("not inserted"))
textList.append(text)
revision=attrs.get("revision-deletion")
oldRevision=attrsCache.get("revision-deletion") if attrsCache is not None else None
if (revision or oldRevision is not None) and revision!=oldRevision:
# Translators: Reported when text is marked as having been deleted
text=(_("deleted") if revision
# Translators: Reported when text is no longer marked as having been deleted.
else _("not deleted"))
textList.append(text)
revision=attrs.get("revision")
oldRevision=attrsCache.get("revision") if attrsCache is not None else None
if (revision or oldRevision is not None) and revision!=oldRevision:
# Translators: Reported when text is revised.
text=(_("revised %s"%revision) if revision
# Translators: Reported when text is not revised.
else _("no revised %s")%oldRevision)
textList.append(text)
if formatConfig["reportEmphasis"]:
# marked text
marked=attrs.get("marked")
oldMarked=attrsCache.get("marked") if attrsCache is not None else None
if (marked or oldMarked is not None) and marked!=oldMarked:
# Translators: Reported when text is marked
text=(_("marked") if marked
# Translators: Reported when text is no longer marked
else _("not marked"))
textList.append(text)
# strong text
strong=attrs.get("strong")
oldStrong=attrsCache.get("strong") if attrsCache is not None else None
if (strong or oldStrong is not None) and strong!=oldStrong:
# Translators: Reported when text is marked as strong (e.g. bold)
text=(_("strong") if strong
# Translators: Reported when text is no longer marked as strong (e.g. bold)
else _("not strong"))
textList.append(text)
# emphasised text
emphasised=attrs.get("emphasised")
oldEmphasised=attrsCache.get("emphasised") if attrsCache is not None else None
if (emphasised or oldEmphasised is not None) and emphasised!=oldEmphasised:
# Translators: Reported when text is marked as emphasised
text=(_("emphasised") if emphasised
# Translators: Reported when text is no longer marked as emphasised
else _("not emphasised"))
textList.append(text)
if formatConfig["reportFontAttributes"]:
bold=attrs.get("bold")
oldBold=attrsCache.get("bold") if attrsCache is not None else None
if (bold or oldBold is not None) and bold!=oldBold:
# Translators: Reported when text is bolded.
text=(_("bold") if bold
# Translators: Reported when text is not bolded.
else _("no bold"))
textList.append(text)
italic=attrs.get("italic")
oldItalic=attrsCache.get("italic") if attrsCache is not None else None
if (italic or oldItalic is not None) and italic!=oldItalic:
# Translators: Reported when text is italicized.
text=(_("italic") if italic
# Translators: Reported when text is not italicized.
else _("no italic"))
textList.append(text)
strikethrough=attrs.get("strikethrough")
oldStrikethrough=attrsCache.get("strikethrough") if attrsCache is not None else None
if (strikethrough or oldStrikethrough is not None) and strikethrough!=oldStrikethrough:
if strikethrough:
# Translators: Reported when text is formatted with double strikethrough.
# See http://en.wikipedia.org/wiki/Strikethrough
text=(_("double strikethrough") if strikethrough=="double"
# Translators: Reported when text is formatted with strikethrough.
# See http://en.wikipedia.org/wiki/Strikethrough
else _("strikethrough"))
else:
# Translators: Reported when text is formatted without strikethrough.
# See http://en.wikipedia.org/wiki/Strikethrough
text=_("no strikethrough")
textList.append(text)
underline=attrs.get("underline")
oldUnderline=attrsCache.get("underline") if attrsCache is not None else None
if (underline or oldUnderline is not None) and underline!=oldUnderline:
# Translators: Reported when text is underlined.
text=(_("underlined") if underline
# Translators: Reported when text is not underlined.
else _("not underlined"))
textList.append(text)
textPosition=attrs.get("text-position")
oldTextPosition=attrsCache.get("text-position") if attrsCache is not None else None
if (textPosition or oldTextPosition is not None) and textPosition!=oldTextPosition:
textPosition=textPosition.lower() if textPosition else textPosition
if textPosition=="super":
# Translators: Reported for superscript text.
text=_("superscript")
elif textPosition=="sub":
# Translators: Reported for subscript text.
text=_("subscript")
else:
# Translators: Reported for text which is at the baseline position;
# i.e. not superscript or subscript.
text=_("baseline")
textList.append(text)
if formatConfig["reportAlignment"]:
textAlign=attrs.get("text-align")
oldTextAlign=attrsCache.get("text-align") if attrsCache is not None else None
if (textAlign or oldTextAlign is not None) and textAlign!=oldTextAlign:
textAlign=textAlign.lower() if textAlign else textAlign
if textAlign=="left":
# Translators: Reported when text is left-aligned.
text=_("align left")
elif textAlign=="center":
# Translators: Reported when text is centered.
text=_("align center")
elif textAlign=="right":
# Translators: Reported when text is right-aligned.
text=_("align right")
elif textAlign=="justify":
# Translators: Reported when text is justified.
# See http://en.wikipedia.org/wiki/Typographic_alignment#Justified
text=_("align justify")
elif textAlign=="distribute":
# Translators: Reported when text is justified with character spacing (Japanese etc)
# See http://kohei.us/2010/01/21/distributed-text-justification/
text=_("align distributed")
else:
# Translators: Reported when text has reverted to default alignment.
text=_("align default")
textList.append(text)
verticalAlign=attrs.get("vertical-align")
oldverticalAlign=attrsCache.get("vertical-align") if attrsCache is not None else None
if (verticalAlign or oldverticalAlign is not None) and verticalAlign!=oldverticalAlign:
verticalAlign=verticalAlign.lower() if verticalAlign else verticalAlign
if verticalAlign=="top":
# Translators: Reported when text is vertically top-aligned.
text=_("vertical align top")
elif verticalAlign in("center","middle"):
# Translators: Reported when text is vertically middle aligned.
text=_("vertical align middle")
elif verticalAlign=="bottom":
# Translators: Reported when text is vertically bottom-aligned.
text=_("vertical align bottom")
elif verticalAlign=="baseline":
# Translators: Reported when text is vertically aligned on the baseline.
text=_("vertical align baseline")
elif verticalAlign=="justify":
# Translators: Reported when text is vertically justified.
text=_("vertical align justified")
elif verticalAlign=="distributed":
# Translators: Reported when text is vertically justified but with character spacing (For some Asian content).
text=_("vertical align distributed")
else:
# Translators: Reported when text has reverted to default vertical alignment.
text=_("vertical align default")
textList.append(text)
if formatConfig["reportParagraphIndentation"]:
indentLabels={
'left-indent':(
# Translators: the label for paragraph format left indent
_("left indent"),
# Translators: the message when there is no paragraph format left indent
_("no left indent"),
),
'right-indent':(
# Translators: the label for paragraph format right indent
_("right indent"),
# Translators: the message when there is no paragraph format right indent
_("no right indent"),
),
'hanging-indent':(
# Translators: the label for paragraph format hanging indent
_("hanging indent"),
# Translators: the message when there is no paragraph format hanging indent
_("no hanging indent"),
),
'first-line-indent':(
# Translators: the label for paragraph format first line indent
_("first line indent"),
# Translators: the message when there is no paragraph format first line indent
_("no first line indent"),
),
}
for attr,(label,noVal) in indentLabels.iteritems():
newVal=attrs.get(attr)
oldVal=attrsCache.get(attr) if attrsCache else None
if (newVal or oldVal is not None) and newVal!=oldVal:
if newVal:
textList.append(u"%s %s"%(label,newVal))
else:
textList.append(noVal)
if formatConfig["reportLineSpacing"]:
lineSpacing=attrs.get("line-spacing")
oldLineSpacing=attrsCache.get("line-spacing") if attrsCache is not None else None
if (lineSpacing or oldLineSpacing is not None) and lineSpacing!=oldLineSpacing:
# Translators: a type of line spacing (E.g. single line spacing)
textList.append(_("line spacing %s")%lineSpacing)
if formatConfig["reportLinks"]:
link=attrs.get("link")
oldLink=attrsCache.get("link") if attrsCache is not None else None
if (link or oldLink is not None) and link!=oldLink:
text=_("link") if link else _("out of %s")%_("link")
textList.append(text)
if formatConfig["reportComments"]:
comment=attrs.get("comment")
oldComment=attrsCache.get("comment") if attrsCache is not None else None
if (comment or oldComment is not None) and comment!=oldComment:
if comment:
# Translators: Reported when text contains a comment.
text=_("has comment")
textList.append(text)
elif extraDetail:
# Translators: Reported when text no longer contains a comment.
text=_("out of comment")
textList.append(text)
if formatConfig["reportSpellingErrors"]:
invalidSpelling=attrs.get("invalid-spelling")
oldInvalidSpelling=attrsCache.get("invalid-spelling") if attrsCache is not None else None
if (invalidSpelling or oldInvalidSpelling is not None) and invalidSpelling!=oldInvalidSpelling:
if invalidSpelling:
# Translators: Reported when text contains a spelling error.
text=_("spelling error")
elif extraDetail:
# Translators: Reported when moving out of text containing a spelling error.
text=_("out of spelling error")
else:
text=""
if text:
textList.append(text)
invalidGrammar=attrs.get("invalid-grammar")
oldInvalidGrammar=attrsCache.get("invalid-grammar") if attrsCache is not None else None
if (invalidGrammar or oldInvalidGrammar is not None) and invalidGrammar!=oldInvalidGrammar:
if invalidGrammar:
# Translators: Reported when text contains a grammar error.
text=_("grammar error")
elif extraDetail:
# Translators: Reported when moving out of text containing a grammar error.
text=_("out of grammar error")
else:
text=""
if text:
textList.append(text)
if unit in (textInfos.UNIT_LINE,textInfos.UNIT_SENTENCE,textInfos.UNIT_PARAGRAPH,textInfos.UNIT_READINGCHUNK):
linePrefix=attrs.get("line-prefix")
if linePrefix:
textList.append(linePrefix)
if attrsCache is not None:
attrsCache.clear()
attrsCache.update(attrs)
return separator.join(textList)
def getTableInfoSpeech(tableInfo,oldTableInfo,extraDetail=False):
if tableInfo is None and oldTableInfo is None:
return ""
if tableInfo is None and oldTableInfo is not None:
# Translators: Indicates end of a table.
return _("out of table")
if not oldTableInfo or tableInfo.get("table-id")!=oldTableInfo.get("table-id"):
newTable=True
else:
newTable=False
textList=[]
if newTable:
columnCount=tableInfo.get("column-count",0)
rowCount=tableInfo.get("row-count",0)
# Translators: reports number of columns and rows in a table (example output: table with 3 columns and 5 rows).
text=_("table with {columnCount} columns and {rowCount} rows").format(columnCount=columnCount,rowCount=rowCount)
textList.append(text)
oldColumnNumber=oldTableInfo.get("column-number",0) if oldTableInfo else 0
columnNumber=tableInfo.get("column-number",0)
if columnNumber!=oldColumnNumber:
textList.append(_("column %s")%columnNumber)
oldRowNumber=oldTableInfo.get("row-number",0) if oldTableInfo else 0
rowNumber=tableInfo.get("row-number",0)
if rowNumber!=oldRowNumber:
textList.append(_("row %s")%rowNumber)
return " ".join(textList)
re_last_pause=re.compile(ur"^(.*(?<=[^\s.!?])[.!?][\"'”’)]?(?:\s+|$))(.*$)",re.DOTALL|re.UNICODE)
def speakWithoutPauses(speechSequence,detectBreaks=True):
"""
Speaks the speech sequences given over multiple calls, only sending to the synth at acceptable phrase or sentence boundaries, or when given None for the speech sequence.
"""
lastStartIndex=0
#Break on all explicit break commands
if detectBreaks and speechSequence:
sequenceLen=len(speechSequence)
for index in xrange(sequenceLen):
if isinstance(speechSequence[index],SpeakWithoutPausesBreakCommand):
if index>0 and lastStartIndex<index:
speakWithoutPauses(speechSequence[lastStartIndex:index],detectBreaks=False)
speakWithoutPauses(None)
lastStartIndex=index+1
if lastStartIndex<sequenceLen:
speakWithoutPauses(speechSequence[lastStartIndex:],detectBreaks=False)
return
finalSpeechSequence=[] #To be spoken now
pendingSpeechSequence=[] #To be saved off for speaking later
if speechSequence is None: #Requesting flush
if speakWithoutPauses._pendingSpeechSequence:
#Place the last incomplete phrase in to finalSpeechSequence to be spoken now
finalSpeechSequence=speakWithoutPauses._pendingSpeechSequence
speakWithoutPauses._pendingSpeechSequence=[]
else: #Handling normal speech
#Scan the given speech and place all completed phrases in finalSpeechSequence to be spoken,
#And place the final incomplete phrase in pendingSpeechSequence
for index in xrange(len(speechSequence)-1,-1,-1):
item=speechSequence[index]
if isinstance(item,basestring):
m=re_last_pause.match(item)
if m:
before,after=m.groups()
if after:
pendingSpeechSequence.append(after)
if before:
finalSpeechSequence.extend(speakWithoutPauses._pendingSpeechSequence)
speakWithoutPauses._pendingSpeechSequence=[]
finalSpeechSequence.extend(speechSequence[0:index])
finalSpeechSequence.append(before)
# Apply the last language change to the pending sequence.
# This will need to be done for any other speech change commands introduced in future.
for changeIndex in xrange(index-1,-1,-1):
change=speechSequence[changeIndex]
if not isinstance(change,LangChangeCommand):
continue
pendingSpeechSequence.append(change)
break
break
else:
pendingSpeechSequence.append(item)
else:
pendingSpeechSequence.append(item)
if pendingSpeechSequence:
pendingSpeechSequence.reverse()
speakWithoutPauses._pendingSpeechSequence.extend(pendingSpeechSequence)
#Scan the final speech sequence backwards
for item in reversed(finalSpeechSequence):
if isinstance(item,IndexCommand):
speakWithoutPauses.lastSentIndex=item.index
break
if finalSpeechSequence:
speak(finalSpeechSequence)
speakWithoutPauses.lastSentIndex=None
speakWithoutPauses._pendingSpeechSequence=[]
class SpeechCommand(object):
"""
The base class for objects that can be inserted between string of text for parituclar speech functions that convey things such as indexing or voice parameter changes.
"""
class IndexCommand(SpeechCommand):
"""Represents an index within some speech."""
def __init__(self,index):
"""
@param index: the value of this index
@type index: integer
"""
if not isinstance(index,int): raise ValueError("index must be int, not %s"%type(index))
self.index=index
def __repr__(self):
return "IndexCommand(%r)" % self.index
class CharacterModeCommand(SpeechCommand):
"""Turns character mode on and off for speech synths."""
def __init__(self,state):
"""
@param state: if true character mode is on, if false its turned off.
@type state: boolean
"""
if not isinstance(state,bool): raise ValueError("state must be boolean, not %s"%type(state))
self.state=state
def __repr__(self):
return "CharacterModeCommand(%r)" % self.state
class LangChangeCommand(SpeechCommand):
"""A command to switch the language within speech."""
def __init__(self,lang):
"""
@param lang: the language to switch to: If None then the NVDA locale will be used.
@type lang: string
"""
self.lang=lang # if lang else languageHandler.getLanguage()
def __repr__(self):
return "LangChangeCommand (%r)"%self.lang
class SpeakWithoutPausesBreakCommand(SpeechCommand):
"""Forces speakWithoutPauses to flush its buffer and therefore break the sentence at this point.
This should only be used with the L{speakWithoutPauses} function.
This will be removed during processing.
"""
class BreakCommand(SpeechCommand):
"""Insert a break between words.
"""
def __init__(self, time=0):
"""
@param time: The duration of the pause to be inserted in milliseconds.
@param time: int
"""
self.time = time
def __repr__(self):
return "BreakCommand(time=%d)" % self.time
class PitchCommand(SpeechCommand):
"""Change the pitch of the voice.
"""
def __init__(self, multiplier=1):
"""
@param multiplier: The number by which to multiply the current pitch setting;
e.g. 0.5 is half, 1 returns to the current pitch setting.
@param multiplier: int/float
"""
self.multiplier = multiplier
def __repr__(self):
return "PitchCommand(multiplier=%g)" % self.multiplier
class VolumeCommand(SpeechCommand):
"""Change the volume of the voice.
"""
def __init__(self, multiplier=1):
"""
@param multiplier: The number by which to multiply the current volume setting;
e.g. 0.5 is half, 1 returns to the current volume setting.
@param multiplier: int/float
"""
self.multiplier = multiplier
def __repr__(self):
return "VolumeCommand(multiplier=%g)" % self.multiplier
class RateCommand(SpeechCommand):
"""Change the rate of the voice.
"""
def __init__(self, multiplier=1):
"""
@param multiplier: The number by which to multiply the current rate setting;
e.g. 0.5 is half, 1 returns to the current rate setting.
@param multiplier: int/float
"""
self.multiplier = multiplier
def __repr__(self):
return "RateCommand(multiplier=%g)" % self.multiplier
class PhonemeCommand(SpeechCommand):
"""Insert a specific pronunciation.
This command accepts Unicode International Phonetic Alphabet (IPA) characters.
Note that this is not well supported by synthesizers.
"""
def __init__(self, ipa, text=None):
"""
@param ipa: Unicode IPA characters.
@type ipa: unicode
@param text: Text to speak if the synthesizer does not support
some or all of the specified IPA characters,
C{None} to ignore this command instead.
@type text: unicode
"""
self.ipa = ipa
self.text = text
def __repr__(self):
out = "PhonemeCommand(%r" % self.ipa
if self.text:
out += ", text=%r" % self.text
return out + ")"
| 1 | 22,381 | What if we changed this to this? roleText=attrs.get('roleText', lambda:getSpeechTextForProperties(reason=reason,role=role)) That will prevent the function from ever needing called in the roletext case, and removes that if. | nvaccess-nvda | py |
@@ -4801,7 +4801,15 @@ master_signal_handler_C(byte *xsp)
return;
}
#endif
- dcontext_t *dcontext = get_thread_private_dcontext();
+ /* We avoid using safe_read_tls_magic during detach. This thread may already have
+ * lost its TLS. A safe read may result into a race affecting asynchronous non-alarm
+ * signals (xref i#3535) between delivering the SIGSEGV and restoring the app's signal
+ * handlers. We don't need the thread's private dcontext anymore here at this point.
+ * Note that there is still a small race window if the signal gets delivered after the
+ * detach has finished, i.e. doing_detach is false. This is an issue in particular if
+ * the app has started re-attaching.
+ */
+ dcontext_t *dcontext = doing_detach ? NULL : get_thread_private_dcontext();
#ifdef MACOS
# ifdef X64 | 1 | /* **********************************************************
* Copyright (c) 2011-2019 Google, Inc. All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/*
* signal.c - dynamorio signal handler
*/
#include <errno.h>
#undef errno
#include "signal_private.h" /* pulls in globals.h for us, in right order */
/* We want to build on older toolchains so we have our own copy of signal
* data structures
*/
#include "include/siginfo.h"
#ifdef LINUX
# include "include/sigcontext.h"
# include "include/signalfd.h"
# include "../globals.h" /* after our sigcontext.h, to preclude bits/sigcontext.h */
#elif defined(MACOS)
# include "../globals.h" /* this defines _XOPEN_SOURCE for Mac */
# include <signal.h> /* after globals.h, for _XOPEN_SOURCE from os_exports.h */
#endif
#ifdef LINUX
# include <linux/sched.h>
#endif
#include <sys/time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <ucontext.h>
#include "os_private.h"
#include "../fragment.h"
#include "../fcache.h"
#include "../perfctr.h"
#include "arch.h"
#include "../monitor.h" /* for trace_abort */
#include "../link.h" /* for linking interrupted fragment_t */
#include "instr.h" /* to find target of SIGSEGV */
#include "decode.h" /* to find target of SIGSEGV */
#include "decode_fast.h" /* to handle self-mod code */
#include "../synch.h"
#include "../nudge.h"
#include "disassemble.h"
#include "ksynch.h"
#include "tls.h" /* tls_reinstate_selector */
#include "../translate.h"
#ifdef LINUX
# include "include/syscall.h"
#else
# include <sys/syscall.h>
#endif
#ifdef CLIENT_INTERFACE
# include "instrument.h"
#endif
#ifdef VMX86_SERVER
# include <errno.h>
#endif
/* Define the Linux names, which the code is already using */
#ifndef SA_NOMASK
# define SA_NOMASK SA_NODEFER
#endif
#ifndef SA_ONESHOT
# define SA_ONESHOT SA_RESETHAND
#endif
#ifndef SS_AUTODISARM
# define SS_AUTODISARM (1U << 31)
#endif
#ifndef SS_FLAG_BITS
# define SS_FLAG_BITS SS_AUTODISARM
#endif
/**** data structures ***************************************************/
/* The signal numbers are slightly different between operating systems.
* To support differing default actions, we have separate arrays, rather
* than indirecting to a single all-signals array.
*/
extern int default_action[];
/* We know that many signals are always asynchronous.
* Others, however, may be synchronous or may not -- e.g., another process
* could send us a SIGSEGV, and there is no way we can tell whether it
* was generated by a real memory fault or not. Thus we have to assume
* that we must not delay any SIGSEGV deliveries.
*/
extern bool can_always_delay[];
static inline bool
sig_is_alarm_signal(int sig)
{
return (sig == SIGALRM || sig == SIGVTALRM || sig == SIGPROF);
}
/* we do not use SIGSTKSZ b/c for things like code modification
* we end up calling many core routines and so want more space
* (though currently non-debug stack size == SIGSTKSZ (8KB))
*/
#define SIGSTACK_SIZE (DYNAMO_OPTION(signal_stack_size))
/* this flag not defined in our headers */
#define SA_RESTORER 0x04000000
/* if no app sigaction, it's RT, since that's our handler */
#ifdef LINUX
# define IS_RT_FOR_APP(info, sig) \
IF_X64_ELSE(true, \
((info)->app_sigaction[(sig)] == NULL \
? true \
: (TEST(SA_SIGINFO, (info)->app_sigaction[(sig)]->flags))))
#elif defined(MACOS)
# define IS_RT_FOR_APP(info, sig) (true)
#endif
/* kernel sets size and sp to 0 for SS_DISABLE
* when asked, will hand back SS_ONSTACK only if current xsp is inside the
* alt stack; otherwise, if an alt stack is registered, it will give flags of 0
* We do not support the "legacy stack switching" that uses the restorer field
* as seen in kernel sources.
*/
#define APP_HAS_SIGSTACK(info) \
((info)->app_sigstack.ss_sp != NULL && (info)->app_sigstack.ss_flags != SS_DISABLE)
/* Under normal circumstances the app_sigaction is lazily initialized when the
* app registers a signal handler, but during detach there are points where we
* are still intercepting signals after app_sigaction has been set to
* zeros. To be extra defensive, we do a NULL check.
*/
#define USE_APP_SIGSTACK(info, sig) \
(APP_HAS_SIGSTACK(info) && (info)->app_sigaction[sig] != NULL && \
TEST(SA_ONSTACK, (info)->app_sigaction[sig]->flags))
/* If we only intercept a few signals, we leave whether un-intercepted signals
* are blocked unchanged and stored in the kernel. If we intercept all (not
* quite yet: PR 297033, hence the need for this macro) we emulate the mask for
* all.
*/
#define EMULATE_SIGMASK(info, sig) \
(DYNAMO_OPTION(intercept_all_signals) || (info)->we_intercept[(sig)])
/* i#27: custom data to pass to the child of a clone */
/* PR i#149/403015: clone record now passed via a new dstack */
typedef struct _clone_record_t {
byte *dstack; /* dstack for new thread - allocated by parent thread */
#ifdef MACOS
/* XXX i#1403: once we have lower-level, earlier thread interception we can
* likely switch to something closer to what we do on Linux.
* This is used for bsdthread_create, where app_thread_xsp is NULL;
* for vfork, app_thread_xsp is non-NULL and this is unused.
*/
void *thread_arg;
#endif
reg_t app_thread_xsp; /* app xsp preserved for new thread to use */
app_pc continuation_pc;
thread_id_t caller_id;
int clone_sysnum;
uint clone_flags;
thread_sig_info_t info;
thread_sig_info_t *parent_info;
void *pcprofile_info;
#ifdef AARCHXX
/* To ensure we have the right value as of the point of the clone, we
* store it here (we'll have races if we try to get it during new thread
* init).
*/
reg_t app_stolen_value;
# ifndef AARCH64
dr_isa_mode_t isa_mode;
# endif
/* To ensure we have the right app lib tls base in child thread,
* we store it here if necessary (clone w/o CLONE_SETTLS or vfork).
*/
void *app_lib_tls_base;
#endif
/* we leave some padding at base of stack for dynamorio_clone
* to store values
*/
reg_t for_dynamorio_clone[4];
} __attribute__((__aligned__(ABI_STACK_ALIGNMENT))) clone_record_t;
/* i#350: set up signal handler for safe_read/faults during init */
static thread_sig_info_t init_info;
static kernel_sigset_t init_sigmask;
#ifdef DEBUG
static bool removed_sig_handler;
#endif
os_cxt_ptr_t osc_empty;
/**** function prototypes ***********************************************/
/* in x86.asm */
void
master_signal_handler(int sig, kernel_siginfo_t *siginfo, kernel_ucontext_t *ucxt);
static void
set_handler_and_record_app(dcontext_t *dcontext, thread_sig_info_t *info, int sig,
kernel_sigaction_t *act);
static void
intercept_signal(dcontext_t *dcontext, thread_sig_info_t *info, int sig);
static void
signal_info_init_sigaction(dcontext_t *dcontext, thread_sig_info_t *info);
static void
signal_info_exit_sigaction(dcontext_t *dcontext, thread_sig_info_t *info,
bool other_thread);
static bool
execute_handler_from_cache(dcontext_t *dcontext, int sig, sigframe_rt_t *our_frame,
sigcontext_t *sc_orig,
fragment_t *f _IF_CLIENT(byte *access_address));
static bool
execute_handler_from_dispatch(dcontext_t *dcontext, int sig);
/* Execute default action from code cache and may terminate the process.
* If returns, the return value decides if caller should restore
* the untranslated context.
*/
static bool
execute_default_from_cache(dcontext_t *dcontext, int sig, sigframe_rt_t *frame,
sigcontext_t *sc_orig, bool forged);
static void
execute_default_from_dispatch(dcontext_t *dcontext, int sig, sigframe_rt_t *frame);
static bool
handle_alarm(dcontext_t *dcontext, int sig, kernel_ucontext_t *ucxt);
static bool
handle_suspend_signal(dcontext_t *dcontext, kernel_ucontext_t *ucxt,
sigframe_rt_t *frame);
static bool
handle_nudge_signal(dcontext_t *dcontext, kernel_siginfo_t *siginfo,
kernel_ucontext_t *ucxt);
static void
init_itimer(dcontext_t *dcontext, bool first);
static bool
set_actual_itimer(dcontext_t *dcontext, int which, thread_sig_info_t *info, bool enable);
static bool
alarm_signal_has_DR_only_itimer(dcontext_t *dcontext, int signal);
#ifdef DEBUG
static void
dump_sigset(dcontext_t *dcontext, kernel_sigset_t *set);
#endif
static bool
is_sys_kill(dcontext_t *dcontext, byte *pc, byte *xsp, kernel_siginfo_t *info);
int
sigaction_syscall(int sig, kernel_sigaction_t *act, kernel_sigaction_t *oact)
{
#if !defined(VMX86_SERVER) && defined(LINUX)
/* PR 305020: must have SA_RESTORER for x64 */
/* i#2812: must have SA_RESTORER to handle vsyscall32 being disabled */
if (act != NULL && !TEST(SA_RESTORER, act->flags)) {
act->flags |= SA_RESTORER;
act->restorer = (void (*)(void))dynamorio_sigreturn;
}
#endif
return dynamorio_syscall(IF_MACOS_ELSE(SYS_sigaction, SYS_rt_sigaction), 4, sig, act,
oact, sizeof(kernel_sigset_t));
}
static inline bool
signal_is_interceptable(int sig)
{
return (sig != SIGKILL && sig != SIGSTOP);
}
static inline int
sigaltstack_syscall(const stack_t *newstack, stack_t *oldstack)
{
return dynamorio_syscall(SYS_sigaltstack, 2, newstack, oldstack);
}
static inline int
getitimer_syscall(int which, struct itimerval *val)
{
return dynamorio_syscall(SYS_getitimer, 2, which, val);
}
static inline int
setitimer_syscall(int which, struct itimerval *val, struct itimerval *old)
{
return dynamorio_syscall(SYS_setitimer, 3, which, val, old);
}
static inline int
sigprocmask_syscall(int how, kernel_sigset_t *set, kernel_sigset_t *oset,
size_t sigsetsize)
{
return dynamorio_syscall(IF_MACOS_ELSE(SYS_sigprocmask, SYS_rt_sigprocmask), 4, how,
set, oset, sigsetsize);
}
void
block_all_signals_except(kernel_sigset_t *oset, int num_signals,
... /* list of signals */)
{
kernel_sigset_t set;
kernel_sigfillset(&set);
va_list ap;
va_start(ap, num_signals);
for (int i = 0; i < num_signals; ++i) {
kernel_sigdelset(&set, va_arg(ap, int));
}
va_end(ap);
sigprocmask_syscall(SIG_SETMASK, &set, oset, sizeof(set));
}
static void
unblock_all_signals(kernel_sigset_t *oset)
{
kernel_sigset_t set;
kernel_sigemptyset(&set);
sigprocmask_syscall(SIG_SETMASK, &set, oset, sizeof(set));
}
/* exported for stackdump.c */
bool
set_default_signal_action(int sig)
{
kernel_sigset_t set;
kernel_sigaction_t act;
int rc;
memset(&act, 0, sizeof(act));
act.handler = (handler_t)SIG_DFL;
/* arm the signal */
rc = sigaction_syscall(sig, &act, NULL);
DODEBUG({ removed_sig_handler = true; });
/* If we're in our handler now, we have to unblock */
kernel_sigemptyset(&set);
kernel_sigaddset(&set, sig);
sigprocmask_syscall(SIG_UNBLOCK, &set, NULL, sizeof(set));
return (rc == 0);
}
static bool
set_ignore_signal_action(int sig)
{
kernel_sigaction_t act;
int rc;
memset(&act, 0, sizeof(act));
act.handler = (handler_t)SIG_IGN;
/* arm the signal */
rc = sigaction_syscall(sig, &act, NULL);
return (rc == 0);
}
/* We assume that signal handlers will be shared most of the time
* (pthreads shares them)
* Rather than start out with the handler table in local memory and then
* having to transfer to global, we just always use global
*/
static void
handler_free(dcontext_t *dcontext, void *p, size_t size)
{
global_heap_free(p, size HEAPACCT(ACCT_OTHER));
}
static void *
handler_alloc(dcontext_t *dcontext, size_t size)
{
return global_heap_alloc(size HEAPACCT(ACCT_OTHER));
}
/**** top-level routines ***********************************************/
static bool
os_itimers_thread_shared(void)
{
static bool itimers_shared;
static bool cached = false;
if (!cached) {
file_t f = os_open("/proc/version", OS_OPEN_READ);
if (f != INVALID_FILE) {
char buf[128];
int major, minor, rel;
os_read(f, buf, BUFFER_SIZE_ELEMENTS(buf));
NULL_TERMINATE_BUFFER(buf);
if (sscanf(buf, "%*s %*s %d.%d.%d", &major, &minor, &rel) == 3) {
/* Linux NPTL in kernel 2.6.12+ has POSIX-style itimers shared
* among threads.
*/
LOG(GLOBAL, LOG_ASYNCH, 1, "kernel version = %d.%d.%d\n", major, minor,
rel);
itimers_shared = ((major == 2 && minor >= 6 && rel >= 12) ||
(major >= 3 /* linux-3.0 or above */));
cached = true;
}
os_close(f);
}
if (!cached) {
/* assume not shared */
itimers_shared = false;
cached = true;
}
LOG(GLOBAL, LOG_ASYNCH, 1, "itimers are %s\n",
itimers_shared ? "thread-shared" : "thread-private");
}
return itimers_shared;
}
static void
unset_initial_crash_handlers(dcontext_t *dcontext)
{
ASSERT(init_info.app_sigaction != NULL);
signal_info_exit_sigaction(GLOBAL_DCONTEXT, &init_info, false /*!other_thread*/);
/* Undo the unblock-all */
sigprocmask_syscall(SIG_SETMASK, &init_sigmask, NULL, sizeof(init_sigmask));
DOLOG(2, LOG_ASYNCH, {
LOG(THREAD, LOG_ASYNCH, 2, "initial app signal mask:\n");
dump_sigset(dcontext, &init_sigmask);
});
}
void
d_r_signal_init(void)
{
kernel_sigset_t set;
IF_LINUX(IF_X86_64(ASSERT(ALIGNED(offsetof(sigpending_t, xstate), AVX_ALIGNMENT))));
IF_MACOS(ASSERT(sizeof(kernel_sigset_t) == sizeof(__darwin_sigset_t)));
os_itimers_thread_shared();
/* Set up a handler for safe_read (or other fault detection) during
* DR init before thread is initialized.
*
* XXX: could set up a clone_record_t and pass to the initial
* signal_thread_inherit() but that would require further code changes.
* Could also call signal_thread_inherit to init this, but we don't want
* to intercept timer signals, etc. before we're ready to handle them,
* so we do a partial init.
*/
signal_info_init_sigaction(GLOBAL_DCONTEXT, &init_info);
intercept_signal(GLOBAL_DCONTEXT, &init_info, SIGSEGV);
intercept_signal(GLOBAL_DCONTEXT, &init_info, SIGBUS);
kernel_sigemptyset(&set);
kernel_sigaddset(&set, SIGSEGV);
kernel_sigaddset(&set, SIGBUS);
sigprocmask_syscall(SIG_UNBLOCK, &set, &init_sigmask, sizeof(set));
IF_LINUX(signalfd_init());
signal_arch_init();
}
void
d_r_signal_exit()
{
IF_LINUX(signalfd_exit());
#ifdef DEBUG
if (d_r_stats->loglevel > 0 && (d_r_stats->logmask & (LOG_ASYNCH | LOG_STATS)) != 0) {
LOG(GLOBAL, LOG_ASYNCH | LOG_STATS, 1, "Total signals delivered: %d\n",
GLOBAL_STAT(num_signals));
}
#endif
}
#ifdef HAVE_SIGALTSTACK
/* Separated out to run from the dstack (i#2016: see below). */
static void
set_our_alt_stack(void *arg)
{
thread_sig_info_t *info = (thread_sig_info_t *)arg;
DEBUG_DECLARE(int rc =)
sigaltstack_syscall(&info->sigstack, &info->app_sigstack);
ASSERT(rc == 0);
}
#endif
void
signal_thread_init(dcontext_t *dcontext, void *os_data)
{
thread_sig_info_t *info =
HEAP_TYPE_ALLOC(dcontext, thread_sig_info_t, ACCT_OTHER, PROTECTED);
size_t pend_unit_size = sizeof(sigpending_t) +
/* include alignment for xsave on xstate */
signal_frame_extra_size(true)
/* sigpending_t has xstate inside it already */
IF_LINUX(IF_X86(-sizeof(kernel_xstate_t)));
IF_LINUX(IF_X86(ASSERT(!YMM_ENABLED() || ALIGNED(pend_unit_size, AVX_ALIGNMENT))));
/* all fields want to be initialized to 0 */
memset(info, 0, sizeof(thread_sig_info_t));
dcontext->signal_field = (void *)info;
/* our special heap to avoid reentrancy problems
* composed entirely of sigpending_t units
* Note that it's fine to have the special heap do page-at-a-time
* committing, which does not use locks (unless triggers reset!),
* but if we need a new unit that will grab a lock: we try to
* avoid that by limiting the # of pending alarm signals (PR 596768).
*/
info->sigheap = special_heap_init_aligned(
pend_unit_size, IF_X86_ELSE(AVX_ALIGNMENT, 0),
false /* cannot have any locking */, false /* -x */, true /* persistent */,
pend_unit_size * DYNAMO_OPTION(max_pending_signals));
#ifdef HAVE_SIGALTSTACK
/* set up alternate stack
* i#552 we may terminate the process without freeing the stack, so we
* stack_alloc it to exempt from the memory leak check.
*/
info->sigstack.ss_sp = (char *)stack_alloc(SIGSTACK_SIZE, NULL) - SIGSTACK_SIZE;
info->sigstack.ss_size = SIGSTACK_SIZE;
/* kernel will set xsp to sp+size to grow down from there, we don't have to */
info->sigstack.ss_flags = 0;
/* i#2016: for late takeover, this app thread may already be on its own alt
* stack. Not setting SA_ONSTACK for SUSPEND_SIGNAL is not sufficient to avoid
* this, as our SUSPEND_SIGNAL can interrupt the app inside its own signal
* handler. Thus, we simply swap to another stack temporarily to avoid the
* kernel complaining. The dstack is set up but it has the clone record and
* initial mcxt, so we use the new alt stack.
*/
call_switch_stack((void *)info, (byte *)info->sigstack.ss_sp + info->sigstack.ss_size,
set_our_alt_stack, NULL, true /*return*/);
LOG(THREAD, LOG_ASYNCH, 1, "signal stack is " PFX " - " PFX "\n",
info->sigstack.ss_sp, info->sigstack.ss_sp + info->sigstack.ss_size);
/* app_sigstack dealt with below, based on parentage */
#endif
kernel_sigemptyset(&info->app_sigblocked);
ASSIGN_INIT_LOCK_FREE(info->child_lock, child_lock);
/* signal_thread_inherit() finishes per-thread init and is invoked
* by os_thread_init_finalize(): we need it after synch_thread_init() and
* other post-os_thread_init() setup b/c we can't yet record pending signals,
* but we need it before we give up thread_initexit_lock so we can handle
* our own suspend signals (i#2779).
*/
}
bool
is_thread_signal_info_initialized(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
return info->fully_initialized;
}
/* i#27: create custom data to pass to the child of a clone
* since we can't rely on being able to find the caller, or that
* its syscall data is still valid, once in the child.
*
* i#149/ PR 403015: The clone record is passed to the new thread via the dstack
* created for it. Unlike before, where the child thread would create its own
* dstack, now the parent thread creates the dstack. Also, switches app stack
* to dstack.
*
* XXX i#1403: for Mac we want to eventually do lower-level earlier interception
* of threads, but for now we're later and higher-level, intercepting the user
* thread function on the new thread's stack. We ignore app_thread_xsp.
*/
void *
#ifdef MACOS
create_clone_record(dcontext_t *dcontext, reg_t *app_thread_xsp, app_pc thread_func,
void *thread_arg)
#else
create_clone_record(dcontext_t *dcontext, reg_t *app_thread_xsp)
#endif
{
clone_record_t *record;
byte *dstack = stack_alloc(DYNAMORIO_STACK_SIZE, NULL);
LOG(THREAD, LOG_ASYNCH, 1, "create_clone_record: dstack for new thread is " PFX "\n",
dstack);
#ifdef MACOS
if (app_thread_xsp == NULL) {
record = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, clone_record_t, ACCT_THREAD_MGT,
true /*prot*/);
record->app_thread_xsp = 0;
record->continuation_pc = thread_func;
record->thread_arg = thread_arg;
record->clone_flags = CLONE_THREAD | CLONE_VM | CLONE_SIGHAND | SIGCHLD;
} else {
#endif
/* Note, the stack grows to low memory addr, so dstack points to the high
* end of the allocated stack region. So, we must subtract to get space for
* the clone record.
*/
record = (clone_record_t *)(dstack - sizeof(clone_record_t));
ASSERT(ALIGNED(record, get_ABI_stack_alignment()));
record->app_thread_xsp = *app_thread_xsp;
/* asynch_target is set in d_r_dispatch() prior to calling pre_system_call(). */
record->continuation_pc = dcontext->asynch_target;
record->clone_flags = dcontext->sys_param0;
#ifdef MACOS
}
#endif
LOG(THREAD, LOG_ASYNCH, 1, "allocated clone record: " PFX "\n", record);
record->dstack = dstack;
record->caller_id = dcontext->owning_thread;
record->clone_sysnum = dcontext->sys_num;
record->info = *((thread_sig_info_t *)dcontext->signal_field);
/* Sigstack is not inherited so clear it now to avoid having to figure out
* where it got its value in signal_thread_inherit (i#3116).
*/
memset(&record->info.app_sigstack, 0, sizeof(record->info.app_sigstack));
record->info.app_sigstack.ss_flags = SS_DISABLE;
record->parent_info = (thread_sig_info_t *)dcontext->signal_field;
record->pcprofile_info = dcontext->pcprofile_field;
#ifdef AARCHXX
record->app_stolen_value = get_stolen_reg_val(get_mcontext(dcontext));
# ifndef AARCH64
record->isa_mode = dr_get_isa_mode(dcontext);
# endif
/* If the child thread shares the same TLS with parent by not setting
* CLONE_SETTLS or vfork, we put the TLS base here and clear the
* thread register in new_thread_setup, so that DR can distinguish
* this case from normal pthread thread creation.
*/
record->app_lib_tls_base = (!TEST(CLONE_SETTLS, record->clone_flags))
? os_get_app_tls_base(dcontext, TLS_REG_LIB)
: NULL;
#endif
LOG(THREAD, LOG_ASYNCH, 1, "create_clone_record: thread " TIDFMT ", pc " PFX "\n",
record->caller_id, record->continuation_pc);
#ifdef MACOS
if (app_thread_xsp != NULL) {
#endif
/* Set the thread stack to point to the dstack, below the clone record.
* Note: it's glibc who sets up the arg to the thread start function;
* the kernel just does a fork + stack swap, so we can get away w/ our
* own stack swap if we restore before the glibc asm code takes over.
* We restore this parameter to the app value in
* restore_clone_param_from_clone_record().
*/
/* i#754: set stack to be XSTATE aligned for saving YMM registers */
ASSERT(ALIGNED(XSTATE_ALIGNMENT, REGPARM_END_ALIGN));
*app_thread_xsp = ALIGN_BACKWARD(record, XSTATE_ALIGNMENT);
#ifdef MACOS
}
#endif
return (void *)record;
}
/* This is to support dr_create_client_thread() */
void
set_clone_record_fields(void *record, reg_t app_thread_xsp, app_pc continuation_pc,
uint clone_sysnum, uint clone_flags)
{
clone_record_t *rec = (clone_record_t *)record;
ASSERT(rec != NULL);
rec->app_thread_xsp = app_thread_xsp;
rec->continuation_pc = continuation_pc;
rec->clone_sysnum = clone_sysnum;
rec->clone_flags = clone_flags;
}
/* i#149/PR 403015: The clone record is passed to the new thread by placing it
* at the bottom of the dstack, i.e., the high memory. So the new thread gets
* it from the base of the dstack. The dstack is then set as the app stack.
*
* CAUTION: don't use a lot of stack in this routine as it gets invoked on the
* dstack from new_thread_setup - this is because this routine assumes
* no more than a page of dstack has been used so far since the clone
* system call was done.
*/
void *
get_clone_record(reg_t xsp)
{
clone_record_t *record;
byte *dstack_base;
/* xsp should be in a dstack, i.e., dynamorio heap. */
ASSERT(is_dynamo_address((app_pc)xsp));
/* The (size of the clone record +
* stack used by new_thread_start (only for setting up priv_mcontext_t) +
* stack used by new_thread_setup before calling get_clone_record())
* is less than a page. This is verified by the assert below. If it does
* exceed a page, it won't happen at random during runtime, but in a
* predictable way during development, which will be caught by the assert.
* The current usage is about 800 bytes for clone_record +
* sizeof(priv_mcontext_t) + few words in new_thread_setup before
* get_clone_record() is called.
*/
dstack_base = (byte *)ALIGN_FORWARD(xsp, PAGE_SIZE);
record = (clone_record_t *)(dstack_base - sizeof(clone_record_t));
/* dstack_base and the dstack in the clone record should be the same. */
ASSERT(dstack_base == record->dstack);
#ifdef MACOS
ASSERT(record->app_thread_xsp != 0); /* else it's not in dstack */
#endif
return (void *)record;
}
/* i#149/PR 403015: App xsp is passed to the new thread via the clone record. */
reg_t
get_clone_record_app_xsp(void *record)
{
ASSERT(record != NULL);
return ((clone_record_t *)record)->app_thread_xsp;
}
#ifdef MACOS
void *
get_clone_record_thread_arg(void *record)
{
ASSERT(record != NULL);
return ((clone_record_t *)record)->thread_arg;
}
#endif
byte *
get_clone_record_dstack(void *record)
{
ASSERT(record != NULL);
return ((clone_record_t *)record)->dstack;
}
#ifdef AARCHXX
reg_t
get_clone_record_stolen_value(void *record)
{
ASSERT(record != NULL);
return ((clone_record_t *)record)->app_stolen_value;
}
# ifndef AARCH64
uint /* dr_isa_mode_t but we have a header ordering problem */
get_clone_record_isa_mode(void *record)
{
ASSERT(record != NULL);
return ((clone_record_t *)record)->isa_mode;
}
# endif
void
set_thread_register_from_clone_record(void *record)
{
/* If record->app_lib_tls_base is not NULL, it means the parent
* thread did not setup TLS for the child, and we need clear the
* thread register.
*/
if (((clone_record_t *)record)->app_lib_tls_base != NULL)
write_thread_register(NULL);
}
void
set_app_lib_tls_base_from_clone_record(dcontext_t *dcontext, void *record)
{
if (((clone_record_t *)record)->app_lib_tls_base != NULL) {
/* child and parent share the same TLS */
os_set_app_tls_base(dcontext, TLS_REG_LIB,
((clone_record_t *)record)->app_lib_tls_base);
}
}
#endif
void
restore_clone_param_from_clone_record(dcontext_t *dcontext, void *record)
{
#ifdef LINUX
ASSERT(record != NULL);
clone_record_t *crec = (clone_record_t *)record;
if (crec->clone_sysnum == SYS_clone && TEST(CLONE_VM, crec->clone_flags)) {
/* Restore the original stack parameter to the syscall, which we clobbered
* in create_clone_record(). Some apps examine it post-syscall (i#3171).
*/
set_syscall_param(dcontext, SYSCALL_PARAM_CLONE_STACK,
get_mcontext(dcontext)->xsp);
}
#endif
}
/* Initializes info's app_sigaction, restorer_valid, and we_intercept fields */
static void
signal_info_init_sigaction(dcontext_t *dcontext, thread_sig_info_t *info)
{
info->app_sigaction = (kernel_sigaction_t **)handler_alloc(
dcontext, SIGARRAY_SIZE * sizeof(kernel_sigaction_t *));
memset(info->app_sigaction, 0, SIGARRAY_SIZE * sizeof(kernel_sigaction_t *));
memset(&info->restorer_valid, -1, SIGARRAY_SIZE * sizeof(info->restorer_valid[0]));
info->we_intercept = (bool *)handler_alloc(dcontext, SIGARRAY_SIZE * sizeof(bool));
memset(info->we_intercept, 0, SIGARRAY_SIZE * sizeof(bool));
}
/* Cleans up info's app_sigaction and we_intercept entries */
static void
signal_info_exit_sigaction(dcontext_t *dcontext, thread_sig_info_t *info,
bool other_thread)
{
int i;
kernel_sigaction_t act;
memset(&act, 0, sizeof(act));
act.handler = (handler_t)SIG_DFL;
kernel_sigemptyset(&act.mask); /* does mask matter for SIG_DFL? */
for (i = 1; i <= MAX_SIGNUM; i++) {
if (sig_is_alarm_signal(i) && doing_detach &&
alarm_signal_has_DR_only_itimer(dcontext, i)) {
/* We ignore alarms *during* detach in signal_remove_alarm_handlers(),
* but to avoid crashing on an alarm arriving post-detach we set to
* SIG_IGN if we have an itimer and the app does not (a slight
* transparency violation to gain robustness: i#2270).
*/
set_ignore_signal_action(i);
} else if (!other_thread) {
if (info->app_sigaction[i] != NULL) {
/* Restore to old handler, but not if exiting whole process:
* else may get itimer during cleanup, so we set to SIG_IGN. We
* do this during detach in signal_remove_alarm_handlers() (and
* post-detach above).
*/
if (dynamo_exited && !doing_detach) {
info->app_sigaction[i]->handler = (handler_t)SIG_IGN;
}
LOG(THREAD, LOG_ASYNCH, 2, "\trestoring " PFX " as handler for %d\n",
info->app_sigaction[i]->handler, i);
sigaction_syscall(i, info->app_sigaction[i], NULL);
} else if (info->we_intercept[i]) {
/* restore to default */
LOG(THREAD, LOG_ASYNCH, 2, "\trestoring SIG_DFL as handler for %d\n", i);
sigaction_syscall(i, &act, NULL);
}
}
if (info->app_sigaction[i] != NULL) {
handler_free(dcontext, info->app_sigaction[i], sizeof(kernel_sigaction_t));
}
}
handler_free(dcontext, info->app_sigaction,
SIGARRAY_SIZE * sizeof(kernel_sigaction_t *));
info->app_sigaction = NULL;
handler_free(dcontext, info->we_intercept, SIGARRAY_SIZE * sizeof(bool));
info->we_intercept = NULL;
}
/* Called to finalize per-thread initialization.
* Inherited and shared fields are set up here.
* The clone_record contains the continuation pc, which is stored in dcontext->next_tag.
*/
void
signal_thread_inherit(dcontext_t *dcontext, void *clone_record)
{
clone_record_t *record = (clone_record_t *)clone_record;
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
int i;
if (record != NULL) {
LOG(THREAD, LOG_ASYNCH, 1, "continuation pc is " PFX "\n",
record->continuation_pc);
dcontext->next_tag = record->continuation_pc;
LOG(THREAD, LOG_ASYNCH, 1,
"parent tid is " TIDFMT ", parent sysnum is %d(%s), clone flags=" PIFX "\n",
record->caller_id, record->clone_sysnum,
#ifdef SYS_vfork
(record->clone_sysnum == SYS_vfork)
? "vfork"
:
#endif
(IF_LINUX(record->clone_sysnum == SYS_clone ? "clone" :) IF_MACOS(
record->clone_sysnum == SYS_bsdthread_create ? "bsdthread_create"
:) "unexpected"),
record->clone_flags);
#ifdef SYS_vfork
if (record->clone_sysnum == SYS_vfork) {
/* The above clone_flags argument is bogus.
SYS_vfork doesn't have a free register to keep the hardcoded value
see /usr/src/linux/arch/i386/kernel/process.c */
/* CHECK: is this the only place real clone flags are needed? */
record->clone_flags = CLONE_VFORK | CLONE_VM | SIGCHLD;
}
#endif
/* handlers are either inherited or shared */
if (TEST(CLONE_SIGHAND, record->clone_flags)) {
/* need to share table of handlers! */
LOG(THREAD, LOG_ASYNCH, 2, "sharing signal handlers with parent\n");
info->shared_app_sigaction = true;
info->shared_refcount = record->info.shared_refcount;
info->shared_lock = record->info.shared_lock;
info->app_sigaction = record->info.app_sigaction;
info->we_intercept = record->info.we_intercept;
d_r_mutex_lock(info->shared_lock);
(*info->shared_refcount)++;
#ifdef DEBUG
for (i = 1; i <= MAX_SIGNUM; i++) {
if (info->app_sigaction[i] != NULL) {
LOG(THREAD, LOG_ASYNCH, 2, "\thandler for signal %d is " PFX "\n", i,
info->app_sigaction[i]->handler);
}
}
#endif
d_r_mutex_unlock(info->shared_lock);
} else {
/* copy handlers */
LOG(THREAD, LOG_ASYNCH, 2, "inheriting signal handlers from parent\n");
info->app_sigaction = (kernel_sigaction_t **)handler_alloc(
dcontext, SIGARRAY_SIZE * sizeof(kernel_sigaction_t *));
memset(info->app_sigaction, 0, SIGARRAY_SIZE * sizeof(kernel_sigaction_t *));
for (i = 1; i <= MAX_SIGNUM; i++) {
info->restorer_valid[i] = -1; /* clear cache */
if (record->info.app_sigaction[i] != NULL) {
info->app_sigaction[i] = (kernel_sigaction_t *)handler_alloc(
dcontext, sizeof(kernel_sigaction_t));
memcpy(info->app_sigaction[i], record->info.app_sigaction[i],
sizeof(kernel_sigaction_t));
LOG(THREAD, LOG_ASYNCH, 2, "\thandler for signal %d is " PFX "\n", i,
info->app_sigaction[i]->handler);
}
}
info->we_intercept =
(bool *)handler_alloc(dcontext, SIGARRAY_SIZE * sizeof(bool));
memcpy(info->we_intercept, record->info.we_intercept,
SIGARRAY_SIZE * sizeof(bool));
d_r_mutex_lock(&record->info.child_lock);
record->info.num_unstarted_children--;
d_r_mutex_unlock(&record->info.child_lock);
/* this should be safe since parent should wait for us */
d_r_mutex_lock(&record->parent_info->child_lock);
record->parent_info->num_unstarted_children--;
d_r_mutex_unlock(&record->parent_info->child_lock);
}
/* itimers are either private or shared */
if (TEST(CLONE_THREAD, record->clone_flags) && os_itimers_thread_shared()) {
ASSERT(record->info.shared_itimer);
LOG(THREAD, LOG_ASYNCH, 2, "sharing itimers with parent\n");
info->shared_itimer = true;
info->shared_itimer_refcount = record->info.shared_itimer_refcount;
info->shared_itimer_underDR = record->info.shared_itimer_underDR;
info->itimer = record->info.itimer;
atomic_add_exchange_int((volatile int *)info->shared_itimer_refcount, 1);
/* shared_itimer_underDR will be incremented in start_itimer() */
} else {
info->shared_itimer = false;
init_itimer(dcontext, false /*!first thread*/);
}
/* rest of state is never shared.
* app_sigstack should already be in place, when we set up our sigstack
* we asked for old sigstack.
* FIXME: are current pending or blocked inherited?
*/
#ifdef MACOS
if (record->app_thread_xsp != 0) {
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, record, clone_record_t, ACCT_THREAD_MGT,
true /*prot*/);
}
#endif
} else {
/* Initialize in isolation */
if (APP_HAS_SIGSTACK(info)) {
/* parent was NOT under our control, so the real sigstack we see is
* a real sigstack that was present before we took control
*/
LOG(THREAD, LOG_ASYNCH, 1, "app already has signal stack " PFX " - " PFX "\n",
info->app_sigstack.ss_sp,
info->app_sigstack.ss_sp + info->app_sigstack.ss_size);
}
signal_info_init_sigaction(dcontext, info);
info->shared_itimer = false; /* we'll set to true if a child is created */
init_itimer(dcontext, true /*first*/);
/* We split init vs start for the signal handlers and mask. We do not
* install ours until we start running the app, to avoid races like
* i#2335. We'll set them up when os_process_under_dynamorio_*() invokes
* signal_reinstate_handlers(). All we do now is mark which signals we
* want to intercept.
*/
if (DYNAMO_OPTION(intercept_all_signals)) {
/* PR 304708: to support client signal handlers without
* the complexity of per-thread and per-signal callbacks
* we always intercept all signals. We also check here
* for handlers the app registered before our init.
*/
for (i = 1; i <= MAX_SIGNUM; i++) {
/* cannot intercept KILL or STOP */
if (signal_is_interceptable(i) &&
/* FIXME PR 297033: we don't support intercepting DEFAULT_STOP /
* DEFAULT_CONTINUE signals. Once add support, update
* dr_register_signal_event() comments.
*/
default_action[i] != DEFAULT_STOP &&
default_action[i] != DEFAULT_CONTINUE)
info->we_intercept[i] = true;
}
} else {
/* we intercept the following signals ourselves: */
info->we_intercept[SIGSEGV] = true;
/* PR 313665: look for DR crashes on unaligned memory or mmap bounds */
info->we_intercept[SIGBUS] = true;
/* PR 212090: the signal we use to suspend threads */
info->we_intercept[SUSPEND_SIGNAL] = true;
#ifdef PAPI
/* use SIGPROF for updating gui so it can be distinguished from SIGVTALRM */
info->we_intercept[SIGPROF] = true;
#endif
/* vtalarm only used with pc profiling. it interferes w/ PAPI
* so arm this signal only if necessary
*/
if (INTERNAL_OPTION(profile_pcs)) {
info->we_intercept[SIGVTALRM] = true;
}
#ifdef CLIENT_INTERFACE
info->we_intercept[SIGALRM] = true;
#endif
#ifdef SIDELINE
info->we_intercept[SIGCHLD] = true;
#endif
/* i#61/PR 211530: the signal we use for nudges */
info->we_intercept[NUDGESIG_SIGNUM] = true;
}
/* should be 1st thread */
if (d_r_get_num_threads() > 1)
ASSERT_NOT_REACHED();
}
/* only when SIGVTALRM handler is in place should we start itimer (PR 537743) */
if (INTERNAL_OPTION(profile_pcs)) {
/* even if the parent thread exits, we can use a pointer to its
* pcprofile_info b/c when shared it's process-shared and is not freed
* until the entire process exits
*/
pcprofile_thread_init(dcontext, info->shared_itimer,
(record == NULL) ? NULL : record->pcprofile_info);
}
info->pre_syscall_app_sigprocmask_valid = false;
/* Assumed to be async safe. */
info->fully_initialized = true;
}
/* When taking over existing app threads, we assume they're using pthreads and
* expect to share signal handlers, memory, thread group id, etc.
* Invokes dynamo_thread_init() with the appropriate os_data.
*/
dcontext_t *
init_thread_with_shared_siginfo(priv_mcontext_t *mc, dcontext_t *takeover_dc)
{
clone_record_t crec = {
0,
};
thread_sig_info_t *parent_siginfo = (thread_sig_info_t *)takeover_dc->signal_field;
/* Create a fake clone record with the given siginfo. All threads in the
* same thread group must share signal handlers since Linux 2.5.35, but we
* have to guess at the other flags.
* FIXME i#764: If we take over non-pthreads threads, we'll need some way to
* tell if they're sharing signal handlers or not.
*/
crec.caller_id = takeover_dc->owning_thread;
#ifdef LINUX
crec.clone_sysnum = SYS_clone;
#else
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#58: NYI on Mac */
#endif
crec.clone_flags = PTHREAD_CLONE_FLAGS;
crec.parent_info = parent_siginfo;
crec.info = *parent_siginfo;
crec.pcprofile_info = takeover_dc->pcprofile_field;
IF_DEBUG(int r =)
dynamo_thread_init(NULL, mc, &crec _IF_CLIENT_INTERFACE(false));
ASSERT(r == SUCCESS);
return get_thread_private_dcontext();
}
static void
free_pending_signal(thread_sig_info_t *info, int sig)
{
sigpending_t *temp = info->sigpending[sig];
info->sigpending[sig] = temp->next;
special_heap_free(info->sigheap, temp);
info->num_pending--;
}
/* This is split from os_fork_init() so the new logfiles are available
* (xref i#189/PR 452168). It had to be after dynamo_other_thread_exit()
* called in dynamorio_fork_init() after os_fork_init() else we clean
* up data structs used in signal_thread_exit().
*/
void
signal_fork_init(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
int i;
/* Child of fork is a single thread in a new process so should
* start over w/ no sharing (xref i#190/PR 452178)
*/
if (info->shared_app_sigaction) {
info->shared_app_sigaction = false;
if (info->shared_lock != NULL) {
DELETE_LOCK(*info->shared_lock);
global_heap_free(info->shared_lock, sizeof(mutex_t) HEAPACCT(ACCT_OTHER));
}
if (info->shared_refcount != NULL)
global_heap_free(info->shared_refcount, sizeof(int) HEAPACCT(ACCT_OTHER));
info->shared_lock = NULL;
info->shared_refcount = NULL;
}
if (info->shared_itimer) {
/* itimers are not inherited across fork */
info->shared_itimer = false;
for (i = 0; i < NUM_ITIMERS; i++)
DELETE_RECURSIVE_LOCK((*info->itimer)[i].lock);
if (os_itimers_thread_shared())
global_heap_free(info->itimer, sizeof(*info->itimer) HEAPACCT(ACCT_OTHER));
else
heap_free(dcontext, info->itimer, sizeof(*info->itimer) HEAPACCT(ACCT_OTHER));
info->itimer = NULL; /* reset by init_itimer */
ASSERT(info->shared_itimer_refcount != NULL);
global_heap_free(info->shared_itimer_refcount, sizeof(int) HEAPACCT(ACCT_OTHER));
info->shared_itimer_refcount = NULL;
ASSERT(info->shared_itimer_underDR != NULL);
global_heap_free(info->shared_itimer_underDR, sizeof(int) HEAPACCT(ACCT_OTHER));
info->shared_itimer_underDR = NULL;
init_itimer(dcontext, true /*first*/);
}
info->num_unstarted_children = 0;
for (i = 1; i <= MAX_SIGNUM; i++) {
/* "A child created via fork(2) initially has an empty pending signal set" */
dcontext->signals_pending = 0;
while (info->sigpending[i] != NULL) {
free_pending_signal(info, i);
}
info->num_pending = 0;
}
if (INTERNAL_OPTION(profile_pcs)) {
pcprofile_fork_init(dcontext);
}
info->pre_syscall_app_sigprocmask_valid = false;
/* Assumed to be async safe. */
info->fully_initialized = true;
}
#ifdef DEBUG
static bool
sigsegv_handler_is_ours(void)
{
int rc;
kernel_sigaction_t oldact;
rc = sigaction_syscall(SIGSEGV, NULL, &oldact);
return (rc == 0 && oldact.handler == (handler_t)master_signal_handler);
}
#endif /* DEBUG */
#if defined(X86) && defined(LINUX)
static byte *
get_xstate_buffer(dcontext_t *dcontext)
{
/* See thread_sig_info_t.xstate_buf comments for why this is in TLS. */
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
if (info->xstate_buf == NULL) {
info->xstate_alloc =
heap_alloc(dcontext, signal_frame_extra_size(true) HEAPACCT(ACCT_OTHER));
info->xstate_buf = (byte *)ALIGN_FORWARD(info->xstate_alloc, XSTATE_ALIGNMENT);
ASSERT(info->xstate_alloc + signal_frame_extra_size(true) >=
info->xstate_buf + signal_frame_extra_size(false));
}
return info->xstate_buf;
}
#endif
void
signal_thread_exit(dcontext_t *dcontext, bool other_thread)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
int i;
/* i#1012: DR's signal handler should always be installed before this point.
*/
ASSERT(sigsegv_handler_is_ours() || removed_sig_handler);
while (info->num_unstarted_children > 0) {
/* must wait for children to start and copy our state
* before we destroy it!
*/
os_thread_yield();
}
/* stop_itimer() was already called by os_thread_not_under_dynamo() called
* from dynamo_thread_exit_common(). We need to leave the app itimers in place
* in case we're detaching.
*/
#if defined(X86) && defined(LINUX)
if (info->xstate_alloc != NULL) {
heap_free(dcontext, info->xstate_alloc,
signal_frame_extra_size(true) HEAPACCT(ACCT_OTHER));
}
#endif
/* FIXME: w/ shared handlers, if parent (the owner here) dies,
* can children keep living w/ a copy of the handlers?
*/
if (info->shared_app_sigaction) {
d_r_mutex_lock(info->shared_lock);
(*info->shared_refcount)--;
d_r_mutex_unlock(info->shared_lock);
}
if (!info->shared_app_sigaction || *info->shared_refcount == 0) {
LOG(THREAD, LOG_ASYNCH, 2, "signal handler cleanup:\n");
signal_info_exit_sigaction(dcontext, info, other_thread);
if (info->shared_lock != NULL) {
DELETE_LOCK(*info->shared_lock);
global_heap_free(info->shared_lock, sizeof(mutex_t) HEAPACCT(ACCT_OTHER));
}
if (info->shared_refcount != NULL)
global_heap_free(info->shared_refcount, sizeof(int) HEAPACCT(ACCT_OTHER));
}
if (info->shared_itimer) {
atomic_add_exchange_int((volatile int *)info->shared_itimer_refcount, -1);
}
if (!info->shared_itimer || *info->shared_itimer_refcount == 0) {
if (INTERNAL_OPTION(profile_pcs)) {
/* no cleanup needed for non-final thread in group */
pcprofile_thread_exit(dcontext);
}
for (i = 0; i < NUM_ITIMERS; i++)
DELETE_RECURSIVE_LOCK((*info->itimer)[i].lock);
if (os_itimers_thread_shared())
global_heap_free(info->itimer, sizeof(*info->itimer) HEAPACCT(ACCT_OTHER));
else
heap_free(dcontext, info->itimer, sizeof(*info->itimer) HEAPACCT(ACCT_OTHER));
if (info->shared_itimer_refcount != NULL) {
global_heap_free(info->shared_itimer_refcount,
sizeof(int) HEAPACCT(ACCT_OTHER));
ASSERT(info->shared_itimer_underDR != NULL);
global_heap_free(info->shared_itimer_underDR,
sizeof(int) HEAPACCT(ACCT_OTHER));
}
}
for (i = 1; i <= MAX_SIGNUM; i++) {
/* pending queue is per-thread and not shared */
while (info->sigpending[i] != NULL) {
sigpending_t *temp = info->sigpending[i];
info->sigpending[i] = temp->next;
special_heap_free(info->sigheap, temp);
}
info->num_pending = 0;
}
/* If no detach flag is set, we assume that this thread is on its way to exit.
* In order to prevent receiving signals while a thread is on its way to exit
* without a valid dcontext, signals at this stage are blocked. The exceptions
* are the suspend signal and any signal that a terminating SYS_kill may need.
* (i#2921). In this case, we do not want to restore the signal mask. For detach,
* we do need to restore the app's mask.
*/
if (!other_thread && doing_detach)
signal_swap_mask(dcontext, true /*to_app*/);
#ifdef HAVE_SIGALTSTACK
/* Remove our sigstack and restore the app sigstack if it had one. */
if (!other_thread) {
LOG(THREAD, LOG_ASYNCH, 2, "removing our signal stack " PFX " - " PFX "\n",
info->sigstack.ss_sp, info->sigstack.ss_sp + info->sigstack.ss_size);
if (APP_HAS_SIGSTACK(info)) {
LOG(THREAD, LOG_ASYNCH, 2, "restoring app signal stack " PFX " - " PFX "\n",
info->app_sigstack.ss_sp,
info->app_sigstack.ss_sp + info->app_sigstack.ss_size);
} else {
ASSERT(TEST(SS_DISABLE, info->app_sigstack.ss_flags));
}
if (info->sigstack.ss_sp != NULL) {
/* i#552: to raise client exit event, we may call dynamo_process_exit
* on sigstack in signal handler.
* In that case we set sigstack (ss_sp) NULL to avoid stack swap.
*/
# ifdef MACOS
if (info->app_sigstack.ss_sp == NULL) {
/* Kernel fails w/ ENOMEM (even for SS_DISABLE) if ss_size is too small */
info->sigstack.ss_flags = SS_DISABLE;
i = sigaltstack_syscall(&info->sigstack, NULL);
/* i#1814: kernel gives EINVAL if last handler didn't call sigreturn! */
ASSERT(i == 0 || i == -EINVAL);
} else {
i = sigaltstack_syscall(&info->app_sigstack, NULL);
/* i#1814: kernel gives EINVAL if last handler didn't call sigreturn! */
ASSERT(i == 0 || i == -EINVAL);
}
# else
i = sigaltstack_syscall(&info->app_sigstack, NULL);
ASSERT(i == 0);
# endif
}
}
#endif
IF_LINUX(signalfd_thread_exit(dcontext, info));
special_heap_exit(info->sigheap);
DELETE_LOCK(info->child_lock);
#ifdef DEBUG
/* for non-debug we do fast exit path and don't free local heap */
# ifdef HAVE_SIGALTSTACK
if (info->sigstack.ss_sp != NULL) {
/* i#552: to raise client exit event, we may call dynamo_process_exit
* on sigstack in signal handler.
* In that case we set sigstack (ss_sp) NULL to avoid stack free.
*/
stack_free(info->sigstack.ss_sp + info->sigstack.ss_size, info->sigstack.ss_size);
}
# endif
HEAP_TYPE_FREE(dcontext, info, thread_sig_info_t, ACCT_OTHER, PROTECTED);
#endif
#ifdef PAPI
/* use SIGPROF for updating gui so it can be distinguished from SIGVTALRM */
set_itimer_callback(
dcontext, ITIMER_PROF, 500,
(void (*func)(dcontext_t *, priv_mcontext_t *))perfctr_update_gui());
#endif
}
void
set_handler_sigact(kernel_sigaction_t *act, int sig, handler_t handler)
{
act->handler = handler;
#ifdef MACOS
/* This is the real target */
act->tramp = (tramp_t)handler;
#endif
act->flags = SA_SIGINFO; /* send 3 args to handler */
#ifdef HAVE_SIGALTSTACK
act->flags |= SA_ONSTACK; /* use our sigstack */
#endif
/* We want the kernel to help us auto-restart syscalls, esp. when our signals
* interrupt native code such as during attach or in client or DR code (i#2659).
*/
act->flags |= SA_RESTART;
#if !defined(VMX86_SERVER) && defined(LINUX)
/* PR 305020: must have SA_RESTORER for x64 */
/* i#2812: must have SA_RESTORER to handle vsyscall32 being disabled */
act->flags |= SA_RESTORER;
act->restorer = (void (*)(void))dynamorio_sigreturn;
#endif
/* We block most signals within our handler */
kernel_sigfillset(&act->mask);
/* i#184/PR 450670: we let our suspend signal interrupt our own handler
* We never send more than one before resuming, so no danger to stack usage
* from our own: but app could pile them up.
*/
kernel_sigdelset(&act->mask, SUSPEND_SIGNAL);
/* i#193/PR 287309: we need to NOT suppress further SIGSEGV, for decode faults,
* for try/except, and for !HAVE_MEMINFO probes.
* Just like SUSPEND_SIGNAL, if app sends repeated SEGV, could run out of
* alt stack: seems too corner-case to be worth increasing stack size.
*/
kernel_sigdelset(&act->mask, SIGSEGV);
if (sig == SUSPEND_SIGNAL || sig == SIGSEGV)
act->flags |= SA_NODEFER;
/* Sigset is a 1 or 2 elt array of longs on X64/X86. Treat as 2 elt of
* uint32. */
IF_DEBUG(uint32 *mask_sig = (uint32 *)&act->mask.sig[0]);
LOG(THREAD_GET, LOG_ASYNCH, 3, "mask for our handler is " PFX " " PFX "\n",
mask_sig[0], mask_sig[1]);
}
static void
set_our_handler_sigact(kernel_sigaction_t *act, int sig)
{
set_handler_sigact(act, sig, (handler_t)master_signal_handler);
}
static void
set_handler_and_record_app(dcontext_t *dcontext, thread_sig_info_t *info, int sig,
kernel_sigaction_t *act)
{
int rc;
kernel_sigaction_t oldact;
ASSERT(sig <= MAX_SIGNUM);
/* arm the signal */
rc = sigaction_syscall(sig, act, &oldact);
ASSERT(rc ==
0
/* Workaround for PR 223720, which was fixed in ESX4.0 but
* is present in ESX3.5 and earlier: vmkernel treats
* 63 and 64 as invalid signal numbers.
*/
IF_VMX86(|| (sig >= 63 && rc == -EINVAL)));
if (rc != 0) /* be defensive: app will probably still work */
return;
if (oldact.handler != (handler_t)SIG_DFL &&
oldact.handler != (handler_t)master_signal_handler) {
/* save the app's action for sig */
if (info->shared_app_sigaction) {
/* app_sigaction structure is shared */
d_r_mutex_lock(info->shared_lock);
}
if (info->app_sigaction[sig] != NULL) {
/* go ahead and toss the old one, it's up to the app to store
* and then restore later if it wants to
*/
handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t));
}
info->app_sigaction[sig] =
(kernel_sigaction_t *)handler_alloc(dcontext, sizeof(kernel_sigaction_t));
memcpy(info->app_sigaction[sig], &oldact, sizeof(kernel_sigaction_t));
/* clear cache */
info->restorer_valid[sig] = -1;
if (info->shared_app_sigaction)
d_r_mutex_unlock(info->shared_lock);
#ifdef DEBUG
if (oldact.handler == (handler_t)SIG_IGN) {
LOG(THREAD, LOG_ASYNCH, 2,
"app already installed SIG_IGN as sigaction for signal %d\n", sig);
} else {
LOG(THREAD, LOG_ASYNCH, 2,
"app already installed " PFX " as sigaction flags=0x%x for signal %d\n",
oldact.handler, oldact.flags, sig);
}
#endif
} else {
LOG(THREAD, LOG_ASYNCH, 2,
"prior handler is " PFX " vs master " PFX " with flags=0x%x for signal %d\n",
oldact.handler, master_signal_handler, oldact.flags, sig);
if (info->app_sigaction[sig] != NULL) {
if (info->shared_app_sigaction)
d_r_mutex_lock(info->shared_lock);
handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t));
info->app_sigaction[sig] = NULL;
if (info->shared_app_sigaction)
d_r_mutex_unlock(info->shared_lock);
}
}
LOG(THREAD, LOG_ASYNCH, 3, "\twe intercept signal %d\n", sig);
}
/* Set up master_signal_handler as the handler for signal "sig",
* for the current thread. Since we deal with kernel data structures
* in our interception of system calls, we use them here as well,
* to avoid having to translate to/from libc data structures.
*/
static void
intercept_signal(dcontext_t *dcontext, thread_sig_info_t *info, int sig)
{
kernel_sigaction_t act;
ASSERT(sig <= MAX_SIGNUM);
set_our_handler_sigact(&act, sig);
set_handler_and_record_app(dcontext, info, sig, &act);
}
static void
intercept_signal_ignore_initially(dcontext_t *dcontext, thread_sig_info_t *info, int sig)
{
kernel_sigaction_t act;
ASSERT(sig <= MAX_SIGNUM);
memset(&act, 0, sizeof(act));
act.handler = (handler_t)SIG_IGN;
set_handler_and_record_app(dcontext, info, sig, &act);
}
static void
intercept_signal_no_longer_ignore(dcontext_t *dcontext, thread_sig_info_t *info, int sig)
{
kernel_sigaction_t act;
int rc;
ASSERT(sig <= MAX_SIGNUM);
set_our_handler_sigact(&act, sig);
rc = sigaction_syscall(sig, &act, NULL);
ASSERT(rc == 0);
}
/* i#1921: For proper single-threaded native execution with re-takeover we need
* to propagate signals. For now we only support going completely native in
* this thread but without a full detach, so we abandon our signal handlers w/o
* freeing memory up front.
* We also use this for the start/stop interface where we are going fully native
* for all threads.
*/
void
signal_remove_handlers(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
int i;
kernel_sigaction_t act;
memset(&act, 0, sizeof(act));
act.handler = (handler_t)SIG_DFL;
kernel_sigemptyset(&act.mask);
for (i = 1; i <= MAX_SIGNUM; i++) {
if (info->app_sigaction[i] != NULL) {
LOG(THREAD, LOG_ASYNCH, 2, "\trestoring " PFX " as handler for %d\n",
info->app_sigaction[i]->handler, i);
sigaction_syscall(i, info->app_sigaction[i], NULL);
} else if (info->we_intercept[i]) {
/* restore to default */
LOG(THREAD, LOG_ASYNCH, 2, "\trestoring SIG_DFL as handler for %d\n", i);
sigaction_syscall(i, &act, NULL);
}
}
DODEBUG({ removed_sig_handler = true; });
}
void
signal_remove_alarm_handlers(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
int i;
for (i = 1; i <= MAX_SIGNUM; i++) {
if (!info->we_intercept[i])
continue;
if (sig_is_alarm_signal(i)) {
set_ignore_signal_action(i);
}
}
}
/* For attaching mid-run, we assume regular POSIX with handlers global to just one
* thread group in the process.
* We also use this routine for the initial setup of our handlers, which we
* split from signal_thread_inherit() to support start/stop.
*/
void
signal_reinstate_handlers(dcontext_t *dcontext, bool ignore_alarm)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
int i;
for (i = 1; i <= MAX_SIGNUM; i++) {
bool skip = false;
if (!info->we_intercept[i]) {
skip = true;
if (signal_is_interceptable(i)) {
/* We do have to intercept everything the app does.
* If the app removes its handler, we'll never remove ours, which we
* can live with.
*/
kernel_sigaction_t oldact;
int rc = sigaction_syscall(i, NULL, &oldact);
ASSERT(rc == 0);
if (rc == 0 && oldact.handler != (handler_t)SIG_DFL &&
oldact.handler != (handler_t)master_signal_handler) {
skip = false;
}
}
}
if (skip)
continue;
if (sig_is_alarm_signal(i) && ignore_alarm) {
LOG(THREAD, LOG_ASYNCH, 2, "\tignoring %d initially\n", i);
intercept_signal_ignore_initially(dcontext, info, i);
} else {
LOG(THREAD, LOG_ASYNCH, 2, "\trestoring DR handler for %d\n", i);
intercept_signal(dcontext, info, i);
}
}
DODEBUG({ removed_sig_handler = false; });
}
void
signal_reinstate_alarm_handlers(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
int i;
for (i = 1; i <= MAX_SIGNUM; i++) {
if (!info->we_intercept[i] || !sig_is_alarm_signal(i))
continue;
LOG(THREAD, LOG_ASYNCH, 2, "\trestoring DR handler for %d\n", i);
intercept_signal_no_longer_ignore(dcontext, info, i);
}
}
/**** system call handlers ***********************************************/
/* FIXME: invalid pointer passed to kernel will currently show up
* probably as a segfault in our handlers below...need to make them
* look like kernel, and pass error code back to os.c
*/
void
handle_clone(dcontext_t *dcontext, uint flags)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
if ((flags & CLONE_VM) == 0) {
/* separate process not sharing memory */
if ((flags & CLONE_SIGHAND) != 0) {
/* FIXME: how deal with this?
* "man clone" says: "Since Linux 2.6.0-test6, flags must also
* include CLONE_VM if CLONE_SIGHAND is specified"
*/
LOG(THREAD, LOG_ASYNCH, 1, "WARNING: !CLONE_VM but CLONE_SIGHAND!\n");
ASSERT_NOT_IMPLEMENTED(false);
}
return;
}
pre_second_thread();
if ((flags & CLONE_SIGHAND) != 0) {
/* need to share table of handlers! */
LOG(THREAD, LOG_ASYNCH, 2, "handle_clone: CLONE_SIGHAND set!\n");
if (!info->shared_app_sigaction) {
/* this is the start of a chain of sharing
* no synch needed here, child not created yet
*/
info->shared_app_sigaction = true;
info->shared_refcount =
(int *)global_heap_alloc(sizeof(int) HEAPACCT(ACCT_OTHER));
*info->shared_refcount = 1;
info->shared_lock =
(mutex_t *)global_heap_alloc(sizeof(mutex_t) HEAPACCT(ACCT_OTHER));
ASSIGN_INIT_LOCK_FREE(*info->shared_lock, shared_lock);
} /* else, some ancestor is already owner */
} else {
/* child will inherit copy of current table -> cannot modify it
* until child is scheduled! FIXME: any other way?
*/
d_r_mutex_lock(&info->child_lock);
info->num_unstarted_children++;
d_r_mutex_unlock(&info->child_lock);
}
if (TEST(CLONE_THREAD, flags) && os_itimers_thread_shared()) {
if (!info->shared_itimer) {
/* this is the start of a chain of sharing
* no synch needed here, child not created yet
*/
info->shared_itimer = true;
info->shared_itimer_refcount =
(int *)global_heap_alloc(sizeof(int) HEAPACCT(ACCT_OTHER));
*info->shared_itimer_refcount = 1;
info->shared_itimer_underDR =
(int *)global_heap_alloc(sizeof(int) HEAPACCT(ACCT_OTHER));
*info->shared_itimer_underDR = 1;
} /* else, some ancestor already created */
}
}
/* Returns false if should NOT issue syscall.
* In such a case, the result is in "result".
* If *result is non-zero, the syscall should fail.
* We could instead issue the syscall and expect it to fail, which would have a more
* accurate error code, but that risks missing a failure (e.g., RT on Android
* which in some cases returns success on bugus params).
* It seems better to err on the side of the wrong error code or failing when
* we shouldn't, than to think it failed when it didn't, which is more complex
* to deal with.
*/
bool
handle_sigaction(dcontext_t *dcontext, int sig, const kernel_sigaction_t *act,
prev_sigaction_t *oact, size_t sigsetsize, OUT uint *result)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
kernel_sigaction_t *save;
kernel_sigaction_t local_act;
if (sigsetsize != sizeof(kernel_sigset_t)) {
*result = EINVAL;
return false;
}
if (act != NULL) {
/* Linux checks readability before checking the signal number. */
if (!d_r_safe_read(act, sizeof(local_act), &local_act)) {
*result = EFAULT;
return false;
}
}
/* i#1135: app may pass invalid signum to find MAX_SIGNUM */
if (sig <= 0 || sig > MAX_SIGNUM || (act != NULL && !signal_is_interceptable(sig))) {
*result = EINVAL;
return false;
}
if (act != NULL) {
/* app is installing a new action */
while (info->num_unstarted_children > 0) {
/* must wait for children to start and copy our state
* before we modify it!
*/
os_thread_yield();
}
info->sigaction_param = act;
}
if (info->shared_app_sigaction) {
/* app_sigaction structure is shared */
d_r_mutex_lock(info->shared_lock);
}
if (oact != NULL) {
/* Keep a copy of the prior one for post-syscall to hand to the app. */
info->use_kernel_prior_sigaction = false;
if (info->app_sigaction[sig] == NULL) {
if (info->we_intercept[sig]) {
/* need to pretend there is no handler */
memset(&info->prior_app_sigaction, 0, sizeof(info->prior_app_sigaction));
info->prior_app_sigaction.handler = (handler_t)SIG_DFL;
} else {
info->use_kernel_prior_sigaction = true;
}
} else {
memcpy(&info->prior_app_sigaction, info->app_sigaction[sig],
sizeof(info->prior_app_sigaction));
}
}
if (act != NULL) {
if (local_act.handler == (handler_t)SIG_IGN ||
local_act.handler == (handler_t)SIG_DFL) {
LOG(THREAD, LOG_ASYNCH, 2, "app installed %s as sigaction for signal %d\n",
(local_act.handler == (handler_t)SIG_IGN) ? "SIG_IGN" : "SIG_DFL", sig);
if (!info->we_intercept[sig]) {
/* let the SIG_IGN/SIG_DFL go through, we want to remove our
* handler. we delete the stored app_sigaction in post_
*/
if (info->shared_app_sigaction)
d_r_mutex_unlock(info->shared_lock);
return true;
}
} else {
LOG(THREAD, LOG_ASYNCH, 2,
"app installed " PFX " as sigaction for signal %d\n", local_act.handler,
sig);
DOLOG(2, LOG_ASYNCH, {
LOG(THREAD, LOG_ASYNCH, 2, "signal mask for handler:\n");
dump_sigset(dcontext, (kernel_sigset_t *)&local_act.mask);
});
}
/* save app's entire sigaction struct */
save = (kernel_sigaction_t *)handler_alloc(dcontext, sizeof(kernel_sigaction_t));
memcpy(save, &local_act, sizeof(kernel_sigaction_t));
/* Remove the unblockable sigs */
kernel_sigdelset(&save->mask, SIGKILL);
kernel_sigdelset(&save->mask, SIGSTOP);
if (info->app_sigaction[sig] != NULL) {
/* go ahead and toss the old one, it's up to the app to store
* and then restore later if it wants to
*/
handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t));
}
info->app_sigaction[sig] = save;
LOG(THREAD, LOG_ASYNCH, 3, "\tflags = " PFX ", %s = " PFX "\n", local_act.flags,
IF_MACOS_ELSE("tramp", "restorer"),
IF_MACOS_ELSE(local_act.tramp, local_act.restorer));
/* clear cache */
info->restorer_valid[sig] = -1;
}
if (info->shared_app_sigaction)
d_r_mutex_unlock(info->shared_lock);
if (info->we_intercept[sig]) {
/* cancel the syscall */
*result = handle_post_sigaction(dcontext, true, sig, act, oact, sigsetsize);
return false;
}
if (act != NULL) {
/* Now hand kernel our master handler instead of app's. */
set_our_handler_sigact(&info->our_sigaction, sig);
set_syscall_param(dcontext, 1, (reg_t)&info->our_sigaction);
/* FIXME PR 297033: we don't support intercepting DEFAULT_STOP /
* DEFAULT_CONTINUE signals b/c we can't generate the default
* action: if the app registers a handler, though, we should work
* properly if we never see SIG_DFL.
*/
}
return true;
}
/* os.c thinks it's passing us struct_sigaction, really it's kernel_sigaction_t,
* which has fields in different order.
* Only called on success.
* Returns the desired app return value (caller will negate if nec).
*/
uint
handle_post_sigaction(dcontext_t *dcontext, bool success, int sig,
const kernel_sigaction_t *act, prev_sigaction_t *oact,
size_t sigsetsize)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
if (act != NULL) {
/* Restore app register value, in case we changed it. */
set_syscall_param(dcontext, 1, (reg_t)info->sigaction_param);
}
if (!success)
return 0; /* don't change return value */
ASSERT(sig <= MAX_SIGNUM && sig > 0);
if (oact != NULL) {
if (info->use_kernel_prior_sigaction) {
/* Real syscall succeeded with oact so it must be readable, barring races. */
ASSERT(oact->handler == (handler_t)SIG_IGN ||
oact->handler == (handler_t)SIG_DFL);
} else {
/* We may have skipped the syscall so we have to check writability */
#ifdef MACOS
/* On MacOS prev_sigaction_t is a different type (i#2105) */
bool fault = true;
TRY_EXCEPT(dcontext,
{
oact->handler = info->prior_app_sigaction.handler;
oact->mask = info->prior_app_sigaction.mask;
oact->flags = info->prior_app_sigaction.flags;
fault = false;
},
{
/* EXCEPT */
/* nothing: fault is already true */
});
if (fault)
return EFAULT;
#else
if (!safe_write_ex(oact, sizeof(*oact), &info->prior_app_sigaction, NULL)) {
/* We actually don't have to undo installing any passed action
* b/c the Linux kernel does that *before* checking oact perms.
*/
return EFAULT;
}
#endif
}
}
/* If installing IGN or DFL, delete ours.
* XXX: This is racy. We can't hold the lock across the syscall, though.
* What we should do is just drop support for -no_intercept_all_signals,
* which is off by default anyway and never turned off.
*/
if (act != NULL &&
/* De-ref here should work barring races: already racy and non-default so not
* bothering with safe_read.
*/
((act->handler == (handler_t)SIG_IGN || act->handler == (handler_t)SIG_DFL) &&
!info->we_intercept[sig]) &&
info->app_sigaction[sig] != NULL) {
if (info->shared_app_sigaction)
d_r_mutex_lock(info->shared_lock);
/* remove old stored app action */
handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t));
info->app_sigaction[sig] = NULL;
if (info->shared_app_sigaction)
d_r_mutex_unlock(info->shared_lock);
}
return 0;
}
#ifdef LINUX
static bool
convert_old_sigaction_to_kernel(dcontext_t *dcontext, kernel_sigaction_t *ks,
const old_sigaction_t *os)
{
bool res = false;
TRY_EXCEPT(dcontext,
{
ks->handler = os->handler;
ks->flags = os->flags;
ks->restorer = os->restorer;
kernel_sigemptyset(&ks->mask);
ks->mask.sig[0] = os->mask;
res = true;
},
{
/* EXCEPT */
/* nothing: res is already false */
});
return res;
}
static bool
convert_kernel_sigaction_to_old(dcontext_t *dcontext, old_sigaction_t *os,
const kernel_sigaction_t *ks)
{
bool res = false;
TRY_EXCEPT(dcontext,
{
os->handler = ks->handler;
os->flags = ks->flags;
os->restorer = ks->restorer;
os->mask = ks->mask.sig[0];
res = true;
},
{
/* EXCEPT */
/* nothing: res is already false */
});
return res;
}
/* Returns false (and "result") if should NOT issue syscall. */
bool
handle_old_sigaction(dcontext_t *dcontext, int sig, const old_sigaction_t *act,
old_sigaction_t *oact, OUT uint *result)
{
kernel_sigaction_t kact;
kernel_sigaction_t okact;
bool res;
if (act != NULL) {
if (!convert_old_sigaction_to_kernel(dcontext, &kact, act)) {
*result = EFAULT;
return false;
}
}
res = handle_sigaction(dcontext, sig, act == NULL ? NULL : &kact,
oact == NULL ? NULL : &okact, sizeof(kernel_sigset_t), result);
if (!res)
*result = handle_post_old_sigaction(dcontext, true, sig, act, oact);
return res;
}
/* Returns the desired app return value (caller will negate if nec). */
uint
handle_post_old_sigaction(dcontext_t *dcontext, bool success, int sig,
const old_sigaction_t *act, old_sigaction_t *oact)
{
kernel_sigaction_t kact;
kernel_sigaction_t okact;
ptr_uint_t res;
if (act != NULL && success) {
if (!convert_old_sigaction_to_kernel(dcontext, &kact, act)) {
ASSERT(!success);
return EFAULT;
}
}
if (oact != NULL && success) {
if (!convert_old_sigaction_to_kernel(dcontext, &okact, oact)) {
ASSERT(!success);
return EFAULT;
}
}
res = handle_post_sigaction(dcontext, success, sig, act == NULL ? NULL : &kact,
oact == NULL ? NULL : &okact, sizeof(kernel_sigset_t));
if (res == 0 && oact != NULL) {
if (!convert_kernel_sigaction_to_old(dcontext, oact, &okact)) {
return EFAULT;
}
}
return res;
}
#endif /* LINUX */
/* Returns false and sets *result if should NOT issue syscall.
* If *result is non-zero, the syscall should fail.
*/
bool
handle_sigaltstack(dcontext_t *dcontext, const stack_t *stack, stack_t *old_stack,
reg_t cur_xsp, OUT uint *result)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
stack_t local_stack;
if (old_stack != NULL) {
if (!safe_write_ex(old_stack, sizeof(*old_stack), &info->app_sigstack, NULL)) {
*result = EFAULT;
return false;
}
}
if (stack != NULL) {
/* Fail in the same way the kernel does. */
if (!d_r_safe_read(stack, sizeof(local_stack), &local_stack)) {
*result = EFAULT;
return false;
}
if (APP_HAS_SIGSTACK(info)) {
/* The app is not allowed to set a new altstack while on the current one. */
reg_t cur_sigstk = (reg_t)info->app_sigstack.ss_sp;
if (cur_xsp >= cur_sigstk &&
cur_xsp < cur_sigstk + info->app_sigstack.ss_size) {
*result = EPERM;
return false;
}
}
uint key_flag = local_stack.ss_flags & ~SS_FLAG_BITS;
if (key_flag != SS_DISABLE && key_flag != SS_ONSTACK && key_flag != 0) {
*result = EINVAL;
return false;
}
if (key_flag == SS_DISABLE) {
/* Zero the other params and don't even check them. */
local_stack.ss_sp = NULL;
local_stack.ss_size = 0;
} else {
if (local_stack.ss_size < MINSIGSTKSZ) {
*result = ENOMEM;
return false;
}
}
info->app_sigstack = local_stack;
LOG(THREAD, LOG_ASYNCH, 2, "Setting app signal stack to " PFX "-" PFX " %d=%s\n",
local_stack.ss_sp, local_stack.ss_sp + local_stack.ss_size - 1,
local_stack.ss_flags, (APP_HAS_SIGSTACK(info)) ? "enabled" : "disabled");
}
*result = 0;
return false; /* always cancel syscall */
}
/* Blocked signals:
* In general, we don't need to keep track of blocked signals.
* We only need to do so for those signals we intercept ourselves.
* Thus, info->app_sigblocked ONLY contains entries for signals
* we intercept ourselves.
* PR 304708: we now intercept all signals.
*/
static void
set_blocked(dcontext_t *dcontext, kernel_sigset_t *set, bool absolute)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
int i;
if (absolute) {
/* discard current blocked signals, re-set from new mask */
kernel_sigemptyset(&info->app_sigblocked);
} /* else, OR in the new set */
for (i = 1; i <= MAX_SIGNUM; i++) {
if (EMULATE_SIGMASK(info, i) && kernel_sigismember(set, i)) {
kernel_sigaddset(&info->app_sigblocked, i);
}
}
#ifdef DEBUG
if (d_r_stats->loglevel >= 3 && (d_r_stats->logmask & LOG_ASYNCH) != 0) {
LOG(THREAD, LOG_ASYNCH, 3, "blocked signals are now:\n");
dump_sigset(dcontext, &info->app_sigblocked);
}
#endif
}
void
signal_set_mask(dcontext_t *dcontext, kernel_sigset_t *sigset)
{
set_blocked(dcontext, sigset, true /*absolute*/);
}
void
signal_swap_mask(dcontext_t *dcontext, bool to_app)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
if (to_app) {
if (init_info.app_sigaction != NULL) {
/* This is the first execution of the app.
* We need to remove our own init-time handler and mask.
*/
unset_initial_crash_handlers(dcontext);
return;
}
sigprocmask_syscall(SIG_SETMASK, &info->app_sigblocked, NULL,
sizeof(info->app_sigblocked));
} else {
unblock_all_signals(&info->app_sigblocked);
DOLOG(2, LOG_ASYNCH, {
LOG(THREAD, LOG_ASYNCH, 2, "thread %d's initial app signal mask:\n",
d_r_get_thread_id());
dump_sigset(dcontext, &info->app_sigblocked);
});
}
}
/* Scans over info->sigpending to see if there are any unblocked, pending
* signals, and sets dcontext->signals_pending if there are. Do this after
* modifying the set of signals blocked by the application.
*/
void
check_signals_pending(dcontext_t *dcontext, thread_sig_info_t *info)
{
int i;
if (dcontext->signals_pending != 0)
return;
for (i = 1; i <= MAX_SIGNUM; i++) {
if (info->sigpending[i] != NULL &&
!kernel_sigismember(&info->app_sigblocked, i) && !dcontext->signals_pending) {
/* We only update the application's set of blocked signals from
* syscall handlers, so we know we'll go back to d_r_dispatch and see
* this flag right away.
*/
LOG(THREAD, LOG_ASYNCH, 3, "\tsetting signals_pending flag\n");
dcontext->signals_pending = 1;
break;
}
}
}
/* Returns whether to execute the syscall */
bool
handle_sigprocmask(dcontext_t *dcontext, int how, kernel_sigset_t *app_set,
kernel_sigset_t *oset, size_t sigsetsize)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
int i;
kernel_sigset_t safe_set;
/* If we're intercepting all, we emulate the whole thing */
bool execute_syscall = !DYNAMO_OPTION(intercept_all_signals);
LOG(THREAD, LOG_ASYNCH, 2, "handle_sigprocmask\n");
if (oset != NULL)
info->pre_syscall_app_sigblocked = info->app_sigblocked;
if (app_set != NULL && d_r_safe_read(app_set, sizeof(safe_set), &safe_set)) {
if (execute_syscall) {
/* The syscall will execute, so remove from the set passed
* to it. We restore post-syscall.
* XXX i#1187: we could crash here touching app memory -- could
* use TRY, but the app could pass read-only memory and it
* would work natively! Better to swap in our own
* allocated data struct. There's a transparency issue w/
* races too if another thread looks at this memory. This
* won't happen by default b/c -intercept_all_signals is
* on by default so we don't try to solve all these
* issues.
*/
info->pre_syscall_app_sigprocmask = safe_set;
}
if (how == SIG_BLOCK) {
/* The set of blocked signals is the union of the current
* set and the set argument.
*/
for (i = 1; i <= MAX_SIGNUM; i++) {
if (EMULATE_SIGMASK(info, i) && kernel_sigismember(&safe_set, i)) {
kernel_sigaddset(&info->app_sigblocked, i);
if (execute_syscall)
kernel_sigdelset(app_set, i);
}
}
} else if (how == SIG_UNBLOCK) {
/* The signals in set are removed from the current set of
* blocked signals.
*/
for (i = 1; i <= MAX_SIGNUM; i++) {
if (EMULATE_SIGMASK(info, i) && kernel_sigismember(&safe_set, i)) {
kernel_sigdelset(&info->app_sigblocked, i);
if (execute_syscall)
kernel_sigdelset(app_set, i);
}
}
} else if (how == SIG_SETMASK) {
/* The set of blocked signals is set to the argument set. */
kernel_sigemptyset(&info->app_sigblocked);
for (i = 1; i <= MAX_SIGNUM; i++) {
if (EMULATE_SIGMASK(info, i) && kernel_sigismember(&safe_set, i)) {
kernel_sigaddset(&info->app_sigblocked, i);
if (execute_syscall)
kernel_sigdelset(app_set, i);
}
}
}
#ifdef DEBUG
if (d_r_stats->loglevel >= 3 && (d_r_stats->logmask & LOG_ASYNCH) != 0) {
LOG(THREAD, LOG_ASYNCH, 3, "blocked signals are now:\n");
dump_sigset(dcontext, &info->app_sigblocked);
}
#endif
/* make sure we deliver pending signals that are now unblocked
* FIXME: consider signal #S, which we intercept ourselves.
* If S arrives, then app blocks it prior to our delivering it,
* we then won't deliver it until app unblocks it...is this a
* problem? Could have arrived a little later and then we would
* do same thing, but this way kernel may send one more than would
* get w/o dynamo? This goes away if we deliver signals
* prior to letting app do a syscall.
*/
check_signals_pending(dcontext, info);
}
if (!execute_syscall) {
handle_post_sigprocmask(dcontext, how, app_set, oset, sigsetsize);
return false; /* skip syscall */
} else
return true;
}
/* need to add in our signals that the app thinks are blocked */
void
handle_post_sigprocmask(dcontext_t *dcontext, int how, kernel_sigset_t *app_set,
kernel_sigset_t *oset, size_t sigsetsize)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
int i;
if (!DYNAMO_OPTION(intercept_all_signals)) {
/* Restore app memory */
safe_write_ex(app_set, sizeof(*app_set), &info->pre_syscall_app_sigprocmask,
NULL);
}
if (oset != NULL) {
if (DYNAMO_OPTION(intercept_all_signals))
safe_write_ex(oset, sizeof(*oset), &info->pre_syscall_app_sigblocked, NULL);
else {
/* the syscall wrote to oset already, so just add any additional */
for (i = 1; i <= MAX_SIGNUM; i++) {
if (EMULATE_SIGMASK(info, i) &&
/* use the pre-syscall value: do not take into account changes
* from this syscall itself! (PR 523394)
*/
kernel_sigismember(&info->pre_syscall_app_sigblocked, i)) {
kernel_sigaddset(oset, i);
}
}
}
}
}
void
handle_sigsuspend(dcontext_t *dcontext, kernel_sigset_t *set, size_t sigsetsize)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
int i;
ASSERT(set != NULL);
LOG(THREAD, LOG_ASYNCH, 2, "handle_sigsuspend\n");
info->in_sigsuspend = true;
info->app_sigblocked_save = info->app_sigblocked;
kernel_sigemptyset(&info->app_sigblocked);
for (i = 1; i <= MAX_SIGNUM; i++) {
if (EMULATE_SIGMASK(info, i) && kernel_sigismember(set, i)) {
kernel_sigaddset(&info->app_sigblocked, i);
kernel_sigdelset(set, i);
}
}
#ifdef DEBUG
if (d_r_stats->loglevel >= 3 && (d_r_stats->logmask & LOG_ASYNCH) != 0) {
LOG(THREAD, LOG_ASYNCH, 3, "in sigsuspend, blocked signals are now:\n");
dump_sigset(dcontext, &info->app_sigblocked);
}
#endif
}
/**** utility routines ***********************************************/
#ifdef DEBUG
static void
dump_sigset(dcontext_t *dcontext, kernel_sigset_t *set)
{
int sig;
for (sig = 1; sig <= MAX_SIGNUM; sig++) {
if (kernel_sigismember(set, sig))
LOG(THREAD, LOG_ASYNCH, 1, "\t%d = blocked\n", sig);
}
}
#endif /* DEBUG */
/* PR 205795: to avoid lock problems w/ in_fcache (it grabs a lock, we
* could have interrupted someone holding that), we first check
* whereami --- if whereami is DR_WHERE_FCACHE we still check the pc
* to distinguish generated routines, but at least we're certain
* it's not in DR where it could own a lock.
* We can't use is_on_dstack() here b/c we need to handle clean call
* arg crashes -- which is too bad since checking client dll and DR dll is
* not sufficient due to calls to ntdll, libc, or pc being in gencode.
*/
static bool
safe_is_in_fcache(dcontext_t *dcontext, app_pc pc, app_pc xsp)
{
if (dcontext->whereami != DR_WHERE_FCACHE ||
IF_CLIENT_INTERFACE(is_in_client_lib(pc) ||) is_in_dynamo_dll(pc) ||
is_on_initstack(xsp))
return false;
/* Reasonably certain not in DR code, so no locks should be held */
return in_fcache(pc);
}
static bool
safe_is_in_coarse_stubs(dcontext_t *dcontext, app_pc pc, app_pc xsp)
{
if (dcontext->whereami != DR_WHERE_FCACHE ||
IF_CLIENT_INTERFACE(is_in_client_lib(pc) ||) is_in_dynamo_dll(pc) ||
is_on_initstack(xsp))
return false;
/* Reasonably certain not in DR code, so no locks should be held */
return in_coarse_stubs(pc);
}
static bool
is_on_alt_stack(dcontext_t *dcontext, byte *sp)
{
#ifdef HAVE_SIGALTSTACK
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
return (sp >= (byte *)info->sigstack.ss_sp &&
/* deliberate equality check since stacks often init to top */
sp <= (byte *)(info->sigstack.ss_sp + info->sigstack.ss_size));
#else
return false;
#endif
}
/* The caller must initialize ucxt, including its fpstate pointer for x86 Linux. */
static void
sig_full_initialize(sig_full_cxt_t *sc_full, kernel_ucontext_t *ucxt)
{
sc_full->sc = SIGCXT_FROM_UCXT(ucxt);
#ifdef X86
sc_full->fp_simd_state = NULL; /* we have a ptr inside sigcontext_t */
#elif defined(ARM)
sc_full->fp_simd_state = &ucxt->coproc.uc_vfp;
#elif defined(AARCH64)
sc_full->fp_simd_state = &ucxt->uc_mcontext.__reserved;
#else
ASSERT_NOT_IMPLEMENTED(false);
#endif
}
void
sigcontext_to_mcontext(priv_mcontext_t *mc, sig_full_cxt_t *sc_full,
dr_mcontext_flags_t flags)
{
sigcontext_t *sc = sc_full->sc;
ASSERT(mc != NULL && sc != NULL);
#ifdef X86
if (TEST(DR_MC_INTEGER, flags)) {
mc->xax = sc->SC_XAX;
mc->xbx = sc->SC_XBX;
mc->xcx = sc->SC_XCX;
mc->xdx = sc->SC_XDX;
mc->xsi = sc->SC_XSI;
mc->xdi = sc->SC_XDI;
mc->xbp = sc->SC_XBP;
# ifdef X64
mc->r8 = sc->SC_FIELD(r8);
mc->r9 = sc->SC_FIELD(r9);
mc->r10 = sc->SC_FIELD(r10);
mc->r11 = sc->SC_FIELD(r11);
mc->r12 = sc->SC_FIELD(r12);
mc->r13 = sc->SC_FIELD(r13);
mc->r14 = sc->SC_FIELD(r14);
mc->r15 = sc->SC_FIELD(r15);
# endif /* X64 */
}
if (TEST(DR_MC_CONTROL, flags)) {
mc->xsp = sc->SC_XSP;
mc->xflags = sc->SC_XFLAGS;
mc->pc = (app_pc)sc->SC_XIP;
}
#elif defined(AARCH64)
if (TEST(DR_MC_INTEGER, flags))
memcpy(&mc->r0, &sc->SC_FIELD(regs[0]), sizeof(mc->r0) * 31);
if (TEST(DR_MC_CONTROL, flags)) {
/* XXX i#2710: the link register should be under DR_MC_CONTROL */
mc->sp = sc->SC_FIELD(sp);
mc->pc = (void *)sc->SC_FIELD(pc);
mc->nzcv = sc->SC_FIELD(pstate);
}
#elif defined(ARM)
if (TEST(DR_MC_INTEGER, flags)) {
mc->r0 = sc->SC_FIELD(arm_r0);
mc->r1 = sc->SC_FIELD(arm_r1);
mc->r2 = sc->SC_FIELD(arm_r2);
mc->r3 = sc->SC_FIELD(arm_r3);
mc->r4 = sc->SC_FIELD(arm_r4);
mc->r5 = sc->SC_FIELD(arm_r5);
mc->r6 = sc->SC_FIELD(arm_r6);
mc->r7 = sc->SC_FIELD(arm_r7);
mc->r8 = sc->SC_FIELD(arm_r8);
mc->r9 = sc->SC_FIELD(arm_r9);
mc->r10 = sc->SC_FIELD(arm_r10);
mc->r11 = sc->SC_FIELD(arm_fp);
mc->r12 = sc->SC_FIELD(arm_ip);
/* XXX i#2710: the link register should be under DR_MC_CONTROL */
mc->r14 = sc->SC_FIELD(arm_lr);
}
if (TEST(DR_MC_CONTROL, flags)) {
mc->r13 = sc->SC_FIELD(arm_sp);
mc->r15 = sc->SC_FIELD(arm_pc);
mc->cpsr = sc->SC_FIELD(arm_cpsr);
}
# ifdef X64
# error NYI on AArch64
# endif /* X64 */
#endif /* X86/ARM */
if (TEST(DR_MC_MULTIMEDIA, flags))
sigcontext_to_mcontext_simd(mc, sc_full);
}
/* Note that unlike mcontext_to_context(), this routine does not fill in
* any state that is not present in the mcontext: in particular, it assumes
* the sigcontext already contains the native fpstate. If the caller
* is generating a synthetic sigcontext, the caller should call
* save_fpstate() before calling this routine.
*/
/* XXX: on ARM, sigreturn needs the T bit set in the sigcontext_t cpsr field in
* order to return to Thumb mode. But, our mcontext doesn't have the T bit (b/c
* usermode can't read it). Thus callers must either modify an mcontext
* obtained from sigcontext_to_mcontext() or must call set_pc_mode_in_cpsr() in
* order to create a proper sigcontext for sigreturn. All callers here do so.
* The only external non-Windows caller of thread_set_mcontext() is
* translate_from_synchall_to_dispatch() who first does a thread_get_mcontext()
* and tweaks that context, so cpsr should be there.
*/
void
mcontext_to_sigcontext(sig_full_cxt_t *sc_full, priv_mcontext_t *mc,
dr_mcontext_flags_t flags)
{
sigcontext_t *sc = sc_full->sc;
ASSERT(mc != NULL && sc != NULL);
#ifdef X86
if (TEST(DR_MC_INTEGER, flags)) {
sc->SC_XAX = mc->xax;
sc->SC_XBX = mc->xbx;
sc->SC_XCX = mc->xcx;
sc->SC_XDX = mc->xdx;
sc->SC_XSI = mc->xsi;
sc->SC_XDI = mc->xdi;
sc->SC_XBP = mc->xbp;
# ifdef X64
sc->SC_FIELD(r8) = mc->r8;
sc->SC_FIELD(r9) = mc->r9;
sc->SC_FIELD(r10) = mc->r10;
sc->SC_FIELD(r11) = mc->r11;
sc->SC_FIELD(r12) = mc->r12;
sc->SC_FIELD(r13) = mc->r13;
sc->SC_FIELD(r14) = mc->r14;
sc->SC_FIELD(r15) = mc->r15;
# endif /* X64 */
}
if (TEST(DR_MC_CONTROL, flags)) {
sc->SC_XSP = mc->xsp;
sc->SC_XFLAGS = mc->xflags;
sc->SC_XIP = (ptr_uint_t)mc->pc;
}
#elif defined(AARCH64)
if (TEST(DR_MC_INTEGER, flags)) {
memcpy(&sc->SC_FIELD(regs[0]), &mc->r0, sizeof(mc->r0) * 31);
}
if (TEST(DR_MC_CONTROL, flags)) {
/* XXX i#2710: the link register should be under DR_MC_CONTROL */
sc->SC_FIELD(sp) = mc->sp;
sc->SC_FIELD(pc) = (ptr_uint_t)mc->pc;
sc->SC_FIELD(pstate) = mc->nzcv;
}
#elif defined(ARM)
if (TEST(DR_MC_INTEGER, flags)) {
sc->SC_FIELD(arm_r0) = mc->r0;
sc->SC_FIELD(arm_r1) = mc->r1;
sc->SC_FIELD(arm_r2) = mc->r2;
sc->SC_FIELD(arm_r3) = mc->r3;
sc->SC_FIELD(arm_r4) = mc->r4;
sc->SC_FIELD(arm_r5) = mc->r5;
sc->SC_FIELD(arm_r6) = mc->r6;
sc->SC_FIELD(arm_r7) = mc->r7;
sc->SC_FIELD(arm_r8) = mc->r8;
sc->SC_FIELD(arm_r9) = mc->r9;
sc->SC_FIELD(arm_r10) = mc->r10;
sc->SC_FIELD(arm_fp) = mc->r11;
sc->SC_FIELD(arm_ip) = mc->r12;
/* XXX i#2710: the link register should be under DR_MC_CONTROL */
sc->SC_FIELD(arm_lr) = mc->r14;
}
if (TEST(DR_MC_CONTROL, flags)) {
sc->SC_FIELD(arm_sp) = mc->r13;
sc->SC_FIELD(arm_pc) = mc->r15;
sc->SC_FIELD(arm_cpsr) = mc->cpsr;
}
# ifdef X64
# error NYI on AArch64
# endif /* X64 */
#endif /* X86/ARM */
if (TEST(DR_MC_MULTIMEDIA, flags))
mcontext_to_sigcontext_simd(sc_full, mc);
}
static void
ucontext_to_mcontext(priv_mcontext_t *mc, kernel_ucontext_t *uc)
{
sig_full_cxt_t sc_full;
sig_full_initialize(&sc_full, uc);
sigcontext_to_mcontext(mc, &sc_full, DR_MC_ALL);
}
static void
mcontext_to_ucontext(kernel_ucontext_t *uc, priv_mcontext_t *mc)
{
sig_full_cxt_t sc_full;
sig_full_initialize(&sc_full, uc);
mcontext_to_sigcontext(&sc_full, mc, DR_MC_ALL);
}
#ifdef AARCHXX
static void
set_sigcxt_stolen_reg(sigcontext_t *sc, reg_t val)
{
*(&sc->SC_R0 + (dr_reg_stolen - DR_REG_R0)) = val;
}
static reg_t
get_sigcxt_stolen_reg(sigcontext_t *sc)
{
return *(&sc->SC_R0 + (dr_reg_stolen - DR_REG_R0));
}
# ifndef AARCH64
static dr_isa_mode_t
get_pc_mode_from_cpsr(sigcontext_t *sc)
{
return TEST(EFLAGS_T, sc->SC_XFLAGS) ? DR_ISA_ARM_THUMB : DR_ISA_ARM_A32;
}
static void
set_pc_mode_in_cpsr(sigcontext_t *sc, dr_isa_mode_t isa_mode)
{
if (isa_mode == DR_ISA_ARM_THUMB)
sc->SC_XFLAGS |= EFLAGS_T;
else
sc->SC_XFLAGS &= ~EFLAGS_T;
}
# endif
#endif
/* Returns whether successful. If avoid_failure, tries to translate
* at least pc if not successful. Pass f if known.
*/
static bool
translate_sigcontext(dcontext_t *dcontext, kernel_ucontext_t *uc, bool avoid_failure,
fragment_t *f)
{
bool success = false;
priv_mcontext_t mcontext;
sigcontext_t *sc = SIGCXT_FROM_UCXT(uc);
ucontext_to_mcontext(&mcontext, uc);
/* FIXME: if cannot find exact match, we're in trouble!
* probably ok to delay, since that indicates not a synchronous
* signal.
*/
/* FIXME : in_fcache() (called by recreate_app_state) grabs fcache
* fcache_unit_areas.lock, we could deadlock! Also on initexit_lock
* == PR 205795/1317
*/
/* For safe recreation we need to either be couldbelinking or hold the
* initexit lock (to keep someone from flushing current fragment), the
* initexit lock is easier
*/
d_r_mutex_lock(&thread_initexit_lock);
/* PR 214962: we assume we're going to relocate to this stored context,
* so we restore memory now
*/
if (translate_mcontext(dcontext->thread_record, &mcontext, true /*restore memory*/,
f)) {
mcontext_to_ucontext(uc, &mcontext);
success = true;
} else {
if (avoid_failure) {
ASSERT_NOT_REACHED(); /* is ok to break things, is UNIX :) */
/* FIXME : what to do? reg state might be wrong at least get pc */
if (safe_is_in_fcache(dcontext, (cache_pc)sc->SC_XIP, (app_pc)sc->SC_XSP)) {
sc->SC_XIP = (ptr_uint_t)recreate_app_pc(dcontext, mcontext.pc, f);
ASSERT(sc->SC_XIP != (ptr_uint_t)NULL);
} else {
/* FIXME : can't even get pc right, what do we do here? */
sc->SC_XIP = 0;
}
}
}
d_r_mutex_unlock(&thread_initexit_lock);
/* FIXME i#2095: restore the app's segment register value(s). */
LOG(THREAD, LOG_ASYNCH, 3,
"\ttranslate_sigcontext: just set frame's eip to " PFX "\n", sc->SC_XIP);
return success;
}
/* Takes an os-specific context */
void
thread_set_self_context(void *cxt)
{
#ifdef X86
if (!INTERNAL_OPTION(use_sigreturn_setcontext)) {
sigcontext_t *sc = (sigcontext_t *)cxt;
dr_jmp_buf_t buf;
buf.xbx = sc->SC_XBX;
buf.xcx = sc->SC_XCX;
buf.xdi = sc->SC_XDI;
buf.xsi = sc->SC_XSI;
buf.xbp = sc->SC_XBP;
/* XXX: this is not fully transparent: it assumes the target stack
* is valid and that we can clobber the slot beyond TOS.
* Using this instead of sigreturn is meant mainly as a diagnostic
* to help debug future issues with sigreturn (xref i#2080).
*/
buf.xsp = sc->SC_XSP - XSP_SZ; /* extra slot for retaddr */
buf.xip = sc->SC_XIP;
# ifdef X64
buf.r8 = sc->r8;
buf.r9 = sc->r9;
buf.r10 = sc->r10;
buf.r11 = sc->r11;
buf.r12 = sc->r12;
buf.r13 = sc->r13;
buf.r14 = sc->r14;
buf.r15 = sc->r15;
# endif
dr_longjmp(&buf, sc->SC_XAX);
return;
}
#endif
dcontext_t *dcontext = get_thread_private_dcontext();
/* Unlike Windows we can't say "only set this subset of the
* full machine state", so we need to get the rest of the state,
*/
sigframe_rt_t frame;
#if defined(LINUX) || defined(DEBUG)
sigcontext_t *sc = (sigcontext_t *)cxt;
#endif
app_pc xsp_for_sigreturn;
#ifdef VMX86_SERVER
ASSERT_NOT_IMPLEMENTED(false); /* PR 405694: can't use regular sigreturn! */
#endif
memset(&frame, 0, sizeof(frame));
#ifdef LINUX
# ifdef X86
byte *xstate = get_xstate_buffer(dcontext);
frame.uc.uc_mcontext.fpstate = &((kernel_xstate_t *)xstate)->fpstate;
# endif /* X86 */
frame.uc.uc_mcontext = *sc;
#endif
save_fpstate(dcontext, &frame);
/* The kernel calls do_sigaltstack on sys_rt_sigreturn primarily to ensure
* the frame is ok, but the side effect is we can mess up our own altstack
* settings if we're not careful. Having invalid ss_size looks good for
* kernel 2.6.23.9 at least so we leave frame.uc.uc_stack as all zeros.
*/
/* make sure sigreturn's mask setting doesn't change anything */
sigprocmask_syscall(SIG_SETMASK, NULL, (kernel_sigset_t *)&frame.uc.uc_sigmask,
sizeof(frame.uc.uc_sigmask));
LOG(THREAD_GET, LOG_ASYNCH, 2, "thread_set_self_context: pc=" PFX "\n", sc->SC_XIP);
LOG(THREAD_GET, LOG_ASYNCH, 3, "full sigcontext\n");
DOLOG(LOG_ASYNCH, 3,
{ dump_sigcontext(dcontext, get_sigcontext_from_rt_frame(&frame)); });
/* set up xsp to point at &frame + sizeof(char*) */
xsp_for_sigreturn = ((app_pc)&frame) + sizeof(char *);
#ifdef X86
asm("mov %0, %%" ASM_XSP : : "m"(xsp_for_sigreturn));
# ifdef MACOS
ASSERT_NOT_IMPLEMENTED(false && "need to pass 2 params to SYS_sigreturn");
asm("jmp _dynamorio_sigreturn");
# else
/* i#2632: recent clang for 32-bit annoyingly won't do the right thing for
* "jmp dynamorio_sigreturn" and leaves relocs so we ensure it's PIC:
*/
void (*asm_jmp_tgt)() = dynamorio_sigreturn;
asm("mov %0, %%" ASM_XCX : : "m"(asm_jmp_tgt));
asm("jmp *%" ASM_XCX);
# endif /* MACOS/LINUX */
#elif defined(AARCH64)
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
#elif defined(ARM)
asm("ldr " ASM_XSP ", %0" : : "m"(xsp_for_sigreturn));
asm("b dynamorio_sigreturn");
#endif /* X86/ARM */
ASSERT_NOT_REACHED();
}
static void
thread_set_segment_registers(sigcontext_t *sc)
{
#ifdef X86
/* Fill in the segment registers */
__asm__ __volatile__("mov %%cs, %%ax; mov %%ax, %0"
: "=m"(sc->SC_FIELD(cs))
:
: "eax");
# ifndef X64
__asm__ __volatile__("mov %%ss, %%ax; mov %%ax, %0"
: "=m"(sc->SC_FIELD(ss))
:
: "eax");
__asm__ __volatile__("mov %%ds, %%ax; mov %%ax, %0"
: "=m"(sc->SC_FIELD(ds))
:
: "eax");
__asm__ __volatile__("mov %%es, %%ax; mov %%ax, %0"
: "=m"(sc->SC_FIELD(es))
:
: "eax");
# endif
__asm__ __volatile__("mov %%fs, %%ax; mov %%ax, %0"
: "=m"(sc->SC_FIELD(fs))
:
: "eax");
__asm__ __volatile__("mov %%gs, %%ax; mov %%ax, %0"
: "=m"(sc->SC_FIELD(gs))
:
: "eax");
#endif
}
/* Takes a priv_mcontext_t */
void
thread_set_self_mcontext(priv_mcontext_t *mc)
{
kernel_ucontext_t ucxt;
sig_full_cxt_t sc_full;
sig_full_initialize(&sc_full, &ucxt);
#if defined(LINUX) && defined(X86)
sc_full.sc->fpstate = NULL; /* for mcontext_to_sigcontext */
#endif
mcontext_to_sigcontext(&sc_full, mc, DR_MC_ALL);
thread_set_segment_registers(sc_full.sc);
/* sigreturn takes the mode from cpsr */
IF_ARM(
set_pc_mode_in_cpsr(sc_full.sc, dr_get_isa_mode(get_thread_private_dcontext())));
/* thread_set_self_context will fill in the real fp/simd state for x86 */
thread_set_self_context((void *)sc_full.sc);
ASSERT_NOT_REACHED();
}
#ifdef LINUX
static bool
sig_has_restorer(thread_sig_info_t *info, int sig)
{
# ifdef VMX86_SERVER
/* vmkernel ignores SA_RESTORER (PR 405694) */
return false;
# endif
if (info->app_sigaction[sig] == NULL)
return false;
if (TEST(SA_RESTORER, info->app_sigaction[sig]->flags))
return true;
if (info->app_sigaction[sig]->restorer == NULL)
return false;
/* we cache the result due to the safe_read cost */
if (info->restorer_valid[sig] == -1) {
/* With older kernels, don't seem to need flag: if sa_restorer !=
* NULL kernel will use it. But with newer kernels that's not
* true, and sometimes libc does pass non-NULL.
*/
# ifdef X86
/* Signal restorer code for Ubuntu 7.04:
* 0xffffe420 <__kernel_sigreturn+0>: pop %eax
* 0xffffe421 <__kernel_sigreturn+1>: mov $0x77,%eax
* 0xffffe426 <__kernel_sigreturn+6>: int $0x80
*
* 0xffffe440 <__kernel_rt_sigreturn+0>: mov $0xad,%eax
* 0xffffe445 <__kernel_rt_sigreturn+5>: int $0x80
*/
static const byte SIGRET_NONRT[8] = { 0x58, 0xb8, 0x77, 0x00,
0x00, 0x00, 0xcd, 0x80 };
static const byte SIGRET_RT[8] = { 0xb8, 0xad, 0x00, 0x00, 0x00, 0xcd, 0x80 };
# elif defined(ARM)
static const byte SIGRET_NONRT[8] = { 0x77, 0x70, 0xa0, 0xe3,
0x00, 0x00, 0x00, 0xef };
static const byte SIGRET_RT[8] = {
0xad, 0x70, 0xa0, 0xe3, 0x00, 0x00, 0x00, 0xef
};
# elif defined(AARCH64)
static const byte SIGRET_NONRT[8] = { 0 }; /* unused */
static const byte SIGRET_RT[8] =
/* FIXME i#1569: untested */
/* mov w8, #139 ; svc #0 */
{ 0x68, 0x11, 0x80, 0x52, 0x01, 0x00, 0x00, 0xd4 };
# endif
byte buf[MAX(sizeof(SIGRET_NONRT), sizeof(SIGRET_RT))] = { 0 };
if (d_r_safe_read(info->app_sigaction[sig]->restorer, sizeof(buf), buf) &&
((IS_RT_FOR_APP(info, sig) &&
memcmp(buf, SIGRET_RT, sizeof(SIGRET_RT)) == 0) ||
(!IS_RT_FOR_APP(info, sig) &&
memcmp(buf, SIGRET_NONRT, sizeof(SIGRET_NONRT)) == 0))) {
LOG(THREAD_GET, LOG_ASYNCH, 2,
"sig_has_restorer %d: " PFX " looks like restorer, using w/o flag\n", sig,
info->app_sigaction[sig]->restorer);
info->restorer_valid[sig] = 1;
} else
info->restorer_valid[sig] = 0;
}
return (info->restorer_valid[sig] == 1);
}
#endif
/* Returns the size of the frame for delivering to the app.
* For x64 this does NOT include kernel_fpstate_t.
*/
static uint
get_app_frame_size(thread_sig_info_t *info, int sig)
{
if (IS_RT_FOR_APP(info, sig))
return sizeof(sigframe_rt_t);
#ifdef LINUX
else
return sizeof(sigframe_plain_t);
#endif
}
static kernel_ucontext_t *
get_ucontext_from_rt_frame(sigframe_rt_t *frame)
{
#if defined(MACOS) && !defined(X64)
/* Padding makes it unsafe to access uc on frame from kernel */
return frame->puc;
#else
return &frame->uc;
#endif
}
sigcontext_t *
get_sigcontext_from_rt_frame(sigframe_rt_t *frame)
{
return SIGCXT_FROM_UCXT(get_ucontext_from_rt_frame(frame));
}
static sigcontext_t *
get_sigcontext_from_app_frame(thread_sig_info_t *info, int sig, void *frame)
{
sigcontext_t *sc = NULL; /* initialize to satisfy Mac clang */
bool rtframe = IS_RT_FOR_APP(info, sig);
if (rtframe)
sc = get_sigcontext_from_rt_frame((sigframe_rt_t *)frame);
#ifdef LINUX
else {
# ifdef X86
sc = (sigcontext_t *)&(((sigframe_plain_t *)frame)->sc);
# elif defined(ARM)
sc = SIGCXT_FROM_UCXT(&(((sigframe_plain_t *)frame)->uc));
# else
ASSERT_NOT_REACHED();
# endif
}
#endif
return sc;
}
static sigcontext_t *
get_sigcontext_from_pending(thread_sig_info_t *info, int sig)
{
ASSERT(info->sigpending[sig] != NULL);
return get_sigcontext_from_rt_frame(&info->sigpending[sig]->rt_frame);
}
/* Returns the address on the appropriate signal stack where we should copy
* the frame.
* If frame is NULL, assumes signal happened while in DR and has been delayed,
* and thus we need to provide fpstate regardless of whether the original
* had it. If frame is non-NULL, matches frame's amount of fpstate.
*/
static byte *
get_sigstack_frame_ptr(dcontext_t *dcontext, int sig, sigframe_rt_t *frame)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
sigcontext_t *sc = (frame == NULL) ? get_sigcontext_from_pending(info, sig)
: get_sigcontext_from_rt_frame(frame);
byte *sp;
if (frame != NULL) {
/* signal happened while in cache, grab interrupted xsp */
sp = (byte *)sc->SC_XSP;
LOG(THREAD, LOG_ASYNCH, 3, "get_sigstack_frame_ptr: using frame's xsp " PFX "\n",
sp);
} else {
/* signal happened while in DR, use stored xsp */
sp = (byte *)get_mcontext(dcontext)->xsp;
LOG(THREAD, LOG_ASYNCH, 3, "get_sigstack_frame_ptr: using app xsp " PFX "\n", sp);
}
if (USE_APP_SIGSTACK(info, sig)) {
/* app has own signal stack which is enabled for this handler */
LOG(THREAD, LOG_ASYNCH, 3, "get_sigstack_frame_ptr: app has own stack " PFX "\n",
info->app_sigstack.ss_sp);
LOG(THREAD, LOG_ASYNCH, 3, "\tcur sp=" PFX " vs app stack " PFX "-" PFX "\n", sp,
info->app_sigstack.ss_sp,
info->app_sigstack.ss_sp + info->app_sigstack.ss_size);
if (sp > (byte *)info->app_sigstack.ss_sp &&
sp - (byte *)info->app_sigstack.ss_sp < info->app_sigstack.ss_size) {
/* we're currently in the alt stack, so use current xsp */
LOG(THREAD, LOG_ASYNCH, 3,
"\tinside alt stack, so using current xsp " PFX "\n", sp);
} else {
/* need to go to top, stack grows down */
sp = info->app_sigstack.ss_sp + info->app_sigstack.ss_size;
LOG(THREAD, LOG_ASYNCH, 3,
"\tnot inside alt stack, so using base xsp " PFX "\n", sp);
}
}
/* now get frame pointer: need to go down to first field of frame */
sp -= get_app_frame_size(info, sig);
#if defined(LINUX) && defined(X86)
if (frame == NULL) {
/* XXX i#641: we always include space for full xstate,
* even if we don't use it all, which does not match what the
* kernel does, but we're not tracking app actions to know whether
* we can skip lazy fpstate on the delay
*/
sp -= signal_frame_extra_size(true);
} else {
if (sc->fpstate != NULL) {
/* The kernel doesn't seem to lazily include avx, so we don't either,
* which simplifies all our frame copying: if YMM_ENABLED() and the
* fpstate pointer is non-NULL, then we assume there's space for
* full xstate
*/
sp -= signal_frame_extra_size(true);
DOCHECK(1, {
if (YMM_ENABLED()) {
ASSERT_CURIOSITY(sc->fpstate->sw_reserved.magic1 == FP_XSTATE_MAGIC1);
ASSERT(sc->fpstate->sw_reserved.extended_size <=
signal_frame_extra_size(true));
}
});
}
}
#endif /* LINUX && X86 */
/* PR 369907: don't forget the redzone */
sp -= REDZONE_SIZE;
/* Align to 16-bytes. The kernel does this for both 32 and 64-bit code
* these days, so we do as well.
*/
sp = (byte *)ALIGN_BACKWARD(sp, 16);
IF_X86(sp -= sizeof(reg_t)); /* Model retaddr. */
LOG(THREAD, LOG_ASYNCH, 3, "\tplacing frame at " PFX "\n", sp);
return sp;
}
#if defined(LINUX) && !defined(X64)
static void
convert_rt_mask_to_nonrt(sigframe_plain_t *f_plain, kernel_sigset_t *sigmask)
{
# ifdef X86
f_plain->sc.oldmask = sigmask->sig[0];
memcpy(&f_plain->extramask, &sigmask->sig[1], (_NSIG_WORDS - 1) * sizeof(uint));
# elif defined(ARM)
f_plain->uc.uc_mcontext.oldmask = sigmask->sig[0];
memcpy(&f_plain->uc.sigset_ex, &sigmask->sig[1], (_NSIG_WORDS - 1) * sizeof(uint));
# else
# error NYI
# endif
}
static void
convert_frame_to_nonrt(dcontext_t *dcontext, int sig, sigframe_rt_t *f_old,
sigframe_plain_t *f_new)
{
# ifdef X86
sigcontext_t *sc_old = get_sigcontext_from_rt_frame(f_old);
f_new->pretcode = f_old->pretcode;
f_new->sig = f_old->sig;
memcpy(&f_new->sc, get_sigcontext_from_rt_frame(f_old), sizeof(sigcontext_t));
if (sc_old->fpstate != NULL) {
/* up to caller to include enough space for fpstate at end */
byte *new_fpstate =
(byte *)ALIGN_FORWARD(((byte *)f_new) + sizeof(*f_new), XSTATE_ALIGNMENT);
memcpy(new_fpstate, sc_old->fpstate, signal_frame_extra_size(false));
f_new->sc.fpstate = (kernel_fpstate_t *)new_fpstate;
}
convert_rt_mask_to_nonrt(f_new, &f_old->uc.uc_sigmask);
memcpy(&f_new->retcode, &f_old->retcode, RETCODE_SIZE);
/* now fill in our extra field */
f_new->sig_noclobber = f_new->sig;
# elif defined(ARM)
memcpy(&f_new->uc, &f_old->uc, sizeof(f_new->uc));
memcpy(f_new->retcode, f_old->retcode, sizeof(f_new->retcode));
/* now fill in our extra field */
f_new->sig_noclobber = f_old->info.si_signo;
# endif /* X86 */
LOG(THREAD, LOG_ASYNCH, 3, "\tconverted sig=%d rt frame to non-rt frame\n",
f_new->sig_noclobber);
}
#endif
/* Exported for call from master_signal_handler asm routine.
* For the rt signal frame f_old that was copied to f_new, updates
* the intra-frame absolute pointers to point to the new addresses
* in f_new.
* Only updates the pretcode to the stored app restorer if for_app.
*/
void
fixup_rtframe_pointers(dcontext_t *dcontext, int sig, sigframe_rt_t *f_old,
sigframe_rt_t *f_new, bool for_app)
{
if (dcontext == NULL)
dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
#if defined(X86) && defined(LINUX)
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
bool has_restorer = sig_has_restorer(info, sig);
# ifdef DEBUG
uint level = 3;
# if !defined(HAVE_MEMINFO)
/* avoid logging every single TRY probe fault */
if (!dynamo_initialized)
level = 5;
# endif
# endif
if (has_restorer && for_app)
f_new->pretcode = (char *)info->app_sigaction[sig]->restorer;
else {
# ifdef VMX86_SERVER
/* PR 404712: skip kernel's restorer code */
if (for_app)
f_new->pretcode = (char *)dynamorio_sigreturn;
# else
# ifdef X64
ASSERT(!for_app || doing_detach); /* detach uses a frame to go native */
# else
/* only point at retcode if old one was -- with newer OS, points at
* vsyscall page and there is no restorer, yet stack restorer code left
* there for gdb compatibility
*/
if (f_old->pretcode == f_old->retcode)
f_new->pretcode = f_new->retcode;
/* else, pointing at vsyscall, or we set it to dynamorio_sigreturn in
* master_signal_handler
*/
LOG(THREAD, LOG_ASYNCH, level, "\tleaving pretcode with old value\n");
# endif
# endif
}
# ifndef X64
f_new->pinfo = &(f_new->info);
f_new->puc = &(f_new->uc);
# endif
if (f_old->uc.uc_mcontext.fpstate != NULL) {
uint frame_size = get_app_frame_size(info, sig);
byte *frame_end = ((byte *)f_new) + frame_size;
byte *tgt = (byte *)ALIGN_FORWARD(frame_end, XSTATE_ALIGNMENT);
ASSERT(tgt - frame_end <= signal_frame_extra_size(true));
memcpy(tgt, f_old->uc.uc_mcontext.fpstate, sizeof(kernel_fpstate_t));
f_new->uc.uc_mcontext.fpstate = (kernel_fpstate_t *)tgt;
if (YMM_ENABLED()) {
kernel_xstate_t *xstate_new = (kernel_xstate_t *)tgt;
kernel_xstate_t *xstate_old =
(kernel_xstate_t *)f_old->uc.uc_mcontext.fpstate;
memcpy(&xstate_new->xstate_hdr, &xstate_old->xstate_hdr,
sizeof(xstate_new->xstate_hdr));
memcpy(&xstate_new->ymmh, &xstate_old->ymmh, sizeof(xstate_new->ymmh));
}
LOG(THREAD, LOG_ASYNCH, level + 1, "\tfpstate old=" PFX " new=" PFX "\n",
f_old->uc.uc_mcontext.fpstate, f_new->uc.uc_mcontext.fpstate);
} else {
/* if fpstate is not set up, we're delivering signal immediately,
* and we shouldn't need an fpstate since DR code won't modify it;
* only if we delayed will we need it, and when delaying we make
* room and set up the pointer in copy_frame_to_pending.
* xref i#641.
*/
LOG(THREAD, LOG_ASYNCH, level + 1, "\tno fpstate needed\n");
}
LOG(THREAD, LOG_ASYNCH, level, "\tretaddr = " PFX "\n", f_new->pretcode);
# ifdef RETURN_AFTER_CALL
info->signal_restorer_retaddr = (app_pc)f_new->pretcode;
# endif
/* 32-bit kernel copies to aligned buf first */
IF_X64(ASSERT(ALIGNED(f_new->uc.uc_mcontext.fpstate, 16)));
#elif defined(MACOS)
# ifndef X64
f_new->pinfo = &(f_new->info);
f_new->puc = &(f_new->uc);
# endif
f_new->puc->uc_mcontext =
(IF_X64_ELSE(_STRUCT_MCONTEXT64, _STRUCT_MCONTEXT32) *)&f_new->mc;
LOG(THREAD, LOG_ASYNCH, 3, "\tf_new=" PFX ", &handler=" PFX "\n", f_new,
&f_new->handler);
ASSERT(!for_app || ALIGNED(&f_new->handler, 16));
#endif /* X86 && LINUX */
}
/* Only operates on rt frames, so call before converting to plain.
* Must be called *after* translating the sigcontext.
*/
static void
fixup_siginfo(dcontext_t *dcontext, int sig, sigframe_rt_t *frame)
{
/* For some signals, si_addr is a PC which we must translate. */
if (sig != SIGILL && sig != SIGTRAP && sig != SIGFPE)
return; /* nothing to do */
sigcontext_t *sc = get_sigcontext_from_rt_frame(frame);
kernel_siginfo_t *siginfo = SIGINFO_FROM_RT_FRAME(frame);
LOG(THREAD, LOG_ASYNCH, 3, "%s: updating si_addr from " PFX " to " PFX "\n",
__FUNCTION__, siginfo->si_addr, sc->SC_XIP);
siginfo->si_addr = (void *)sc->SC_XIP;
#ifdef LINUX
siginfo->si_addr_lsb = sc->SC_XIP & 0x1;
#endif
}
static void
memcpy_rt_frame(sigframe_rt_t *frame, byte *dst, bool from_pending)
{
#if defined(MACOS) && !defined(X64)
if (!from_pending) {
/* The kernel puts padding in the middle. We collapse that padding here
* and re-align when we copy to the app stack.
* We should not reference fields from mc onward in what the kernel put
* on the stack, as our sigframe_rt_t layout does not match the kernel's
* variable mid-struct padding.
*/
sigcontext_t *sc = SIGCXT_FROM_UCXT(frame->puc);
memcpy(dst, frame, offsetof(sigframe_rt_t, puc) + sizeof(frame->puc));
memcpy(&((sigframe_rt_t *)dst)->mc, sc,
sizeof(sigframe_rt_t) - offsetof(sigframe_rt_t, mc));
return;
}
#endif
memcpy(dst, frame, sizeof(sigframe_rt_t));
}
/* Copies frame to sp.
* PR 304708: we now leave in rt form right up until we copy to the
* app stack, so that we can deliver to a client at a safe spot
* in rt form, so this routine now converts to a plain frame if necessary.
* If no restorer, touches up pretcode
* (and if rt_frame, touches up pinfo and puc)
* Also touches up fpstate pointer
*/
static void
copy_frame_to_stack(dcontext_t *dcontext, int sig, sigframe_rt_t *frame, byte *sp,
bool from_pending)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
bool rtframe = IS_RT_FOR_APP(info, sig);
uint frame_size = get_app_frame_size(info, sig);
#if defined(LINUX) && defined(X86_32)
bool has_restorer = sig_has_restorer(info, sig);
#endif
byte *flush_pc;
bool stack_unwritable = false;
uint size = frame_size;
#if defined(LINUX) && defined(X86)
sigcontext_t *sc = get_sigcontext_from_rt_frame(frame);
size += (sc->fpstate == NULL ? 0 : signal_frame_extra_size(true));
#endif /* LINUX && X86 */
LOG(THREAD, LOG_ASYNCH, 3, "copy_frame_to_stack: rt=%d, src=" PFX ", sp=" PFX "\n",
rtframe, frame, sp);
fixup_siginfo(dcontext, sig, frame);
/* We avoid querying memory as it incurs global contended locks. */
flush_pc = is_executable_area_writable_overlap(sp, sp + size);
if (flush_pc != NULL) {
LOG(THREAD, LOG_ASYNCH, 2,
"\tcopy_frame_to_stack: part of stack is unwritable-by-us @" PFX "\n",
flush_pc);
flush_fragments_and_remove_region(dcontext, flush_pc, sp + size - flush_pc,
false /* don't own initexit_lock */,
false /* keep futures */);
}
TRY_EXCEPT(dcontext, /* try */
{
if (rtframe) {
ASSERT(frame_size == sizeof(*frame));
memcpy_rt_frame(frame, sp, from_pending);
}
IF_NOT_X64(
IF_LINUX(else convert_frame_to_nonrt(dcontext, sig, frame,
(sigframe_plain_t *)sp);));
},
/* except */ { stack_unwritable = true; });
if (stack_unwritable) {
/* Override the no-nested check in record_pending_signal(): it's ok b/c
* receive_pending_signal() calls to here at a consistent point,
* and we won't return there.
*/
info->nested_pending_ok = true;
/* Just throw away this signal and deliver SIGSEGV instead with the
* same sigcontext, like the kernel does.
*/
free_pending_signal(info, sig);
os_forge_exception(0, UNREADABLE_MEMORY_EXECUTION_EXCEPTION);
ASSERT_NOT_REACHED();
}
kernel_sigset_t *mask_to_restore = NULL;
if (info->pre_syscall_app_sigprocmask_valid) {
mask_to_restore = &info->pre_syscall_app_sigprocmask;
info->pre_syscall_app_sigprocmask_valid = false;
} else {
mask_to_restore = &info->app_sigblocked;
}
/* if !has_restorer we do NOT add the restorer code to the exec list here,
* to avoid removal problems (if handler never returns) and consistency problems
* (would have to mark as selfmod right now if on stack).
* for PROGRAM_SHEPHERDING we recognize as a pattern, and for consistency we
* allow entire region once try to execute -- not a performance worry since should
* very rarely be on the stack: should either be libc restorer code or with recent
* OS in rx vsyscall page.
*/
/* fix up pretcode, pinfo, puc, fpstate */
if (rtframe) {
sigframe_rt_t *f_new = (sigframe_rt_t *)sp;
fixup_rtframe_pointers(dcontext, sig, frame, f_new, true /*for app*/);
#ifdef HAVE_SIGALTSTACK
/* Make sure the frame's sigstack reflects the app stack, both for transparency
* of the app examining it and for correctness if we detach mid-handler.
*/
LOG(THREAD, LOG_ASYNCH, 3, "updated uc_stack @" PFX " to " PFX "\n",
&f_new->uc.uc_stack, info->app_sigstack.ss_sp);
f_new->uc.uc_stack = info->app_sigstack;
#endif
/* Store the prior mask, for restoring in sigreturn. */
memcpy(&f_new->uc.uc_sigmask, mask_to_restore, sizeof(info->app_sigblocked));
} else {
#ifdef X64
ASSERT_NOT_REACHED();
#endif
#if defined(LINUX) && !defined(X64)
sigframe_plain_t *f_new = (sigframe_plain_t *)sp;
# ifdef X86
# ifndef VMX86_SERVER
sigframe_plain_t *f_old = (sigframe_plain_t *)frame;
# endif
if (has_restorer)
f_new->pretcode = (char *)info->app_sigaction[sig]->restorer;
else {
# ifdef VMX86_SERVER
/* PR 404712: skip kernel's restorer code */
f_new->pretcode = (char *)dynamorio_nonrt_sigreturn;
# else
/* see comments in rt case above */
if (f_old->pretcode == f_old->retcode)
f_new->pretcode = f_new->retcode;
else {
/* whether we set to dynamorio_sigreturn in master_signal_handler
* or it's still vsyscall page, we have to convert to non-rt
*/
f_new->pretcode = (char *)dynamorio_nonrt_sigreturn;
} /* else, pointing at vsyscall most likely */
LOG(THREAD, LOG_ASYNCH, 3, "\tleaving pretcode with old value\n");
# endif
}
/* convert_frame_to_nonrt*() should have updated fpstate pointer.
* The inlined fpstate is no longer used on new kernels, and we do that
* as well on older kernels.
*/
ASSERT(f_new->sc.fpstate != &f_new->fpstate);
/* 32-bit kernel copies to aligned buf so no assert on fpstate alignment */
LOG(THREAD, LOG_ASYNCH, 3, "\tretaddr = " PFX "\n", f_new->pretcode);
/* There is no stored alt stack in a plain frame to update. */
# ifdef RETURN_AFTER_CALL
info->signal_restorer_retaddr = (app_pc)f_new->pretcode;
# endif
# endif /* X86 */
/* Store the prior mask, for restoring in sigreturn. */
convert_rt_mask_to_nonrt(f_new, mask_to_restore);
#endif /* LINUX && !X64 */
}
#ifdef MACOS
/* Update handler field, which is passed to the libc trampoline, to app */
ASSERT(info->app_sigaction[sig] != NULL);
((sigframe_rt_t *)sp)->handler = (app_pc)info->app_sigaction[sig]->handler;
#endif
}
/* Copies frame to pending slot.
* PR 304708: we now leave in rt form right up until we copy to the
* app stack, so that we can deliver to a client at a safe spot
* in rt form.
*/
static void
copy_frame_to_pending(dcontext_t *dcontext, int sig,
sigframe_rt_t *frame _IF_CLIENT(byte *access_address))
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
sigframe_rt_t *dst = &(info->sigpending[sig]->rt_frame);
memcpy_rt_frame(frame, (byte *)dst, false /*!already pending*/);
#if defined(LINUX) && defined(X86)
/* For lazy fpstate, it's possible there was no fpstate when the kernel
* sent us the frame, but in between then and now the app executed some
* fp or xmm/ymm instrs. Today we always add fpstate just in case.
* XXX i#641 optimization: track whether any fp/xmm/ymm
* instrs happened and avoid this.
*/
/* we'll fill in updated fpstate at delivery time, but we go ahead and
* copy now in case our own retrieval somehow misses some fields
*/
if (frame->uc.uc_mcontext.fpstate != NULL) {
memcpy(&info->sigpending[sig]->xstate, frame->uc.uc_mcontext.fpstate,
/* XXX: assuming full xstate if avx is enabled */
signal_frame_extra_size(false));
}
/* we must set the pointer now so that later save_fpstate, etc. work */
dst->uc.uc_mcontext.fpstate = (kernel_fpstate_t *)&info->sigpending[sig]->xstate;
#endif /* LINUX && X86 */
#ifdef CLIENT_INTERFACE
info->sigpending[sig]->access_address = access_address;
#endif
info->sigpending[sig]->use_sigcontext = false;
#ifdef MACOS
/* We rely on puc to find sc to we have to fix it up */
fixup_rtframe_pointers(dcontext, sig, frame, dst, false /*!for app*/);
#endif
LOG(THREAD, LOG_ASYNCH, 3, "copy_frame_to_pending from " PFX "\n", frame);
DOLOG(3, LOG_ASYNCH, {
LOG(THREAD, LOG_ASYNCH, 3, "sigcontext:\n");
dump_sigcontext(dcontext, get_sigcontext_from_rt_frame(dst));
});
}
/**** real work ***********************************************/
/* transfer control from signal handler to fcache return routine */
static void
transfer_from_sig_handler_to_fcache_return(dcontext_t *dcontext, kernel_ucontext_t *uc,
sigcontext_t *sc_interrupted, int sig,
app_pc next_pc, linkstub_t *last_exit,
bool is_kernel_xfer)
{
sigcontext_t *sc = SIGCXT_FROM_UCXT(uc);
#ifdef CLIENT_INTERFACE
if (is_kernel_xfer) {
sig_full_cxt_t sc_interrupted_full = { sc_interrupted, NULL /*not provided*/ };
sig_full_cxt_t sc_full;
sig_full_initialize(&sc_full, uc);
sc->SC_XIP = (ptr_uint_t)next_pc;
if (instrument_kernel_xfer(dcontext, DR_XFER_SIGNAL_DELIVERY, sc_interrupted_full,
NULL, NULL, next_pc, sc->SC_XSP, sc_full, NULL, sig))
next_pc = canonicalize_pc_target(dcontext, (app_pc)sc->SC_XIP);
}
#endif
dcontext->next_tag = canonicalize_pc_target(dcontext, next_pc);
IF_ARM(dr_set_isa_mode(dcontext, get_pc_mode_from_cpsr(sc), NULL));
/* Set our sigreturn context to point to fcache_return!
* Then we'll go back through kernel, appear in fcache_return,
* and go through d_r_dispatch & interp, without messing up dynamo stack.
* Note that even if this is a write in the shared cache, we
* still go to the private fcache_return for simplicity.
*/
sc->SC_XIP = (ptr_uint_t)fcache_return_routine(dcontext);
#ifdef AARCHXX
/* We do not have to set dr_reg_stolen in dcontext's mcontext here
* because dcontext's mcontext is stale and we used the mcontext
* created from recreate_app_state_internal with the original sigcontext.
*/
/* We restore dr_reg_stolen's app value in recreate_app_state_internal,
* so now we need set dr_reg_stolen to hold DR's TLS before sigreturn
* from DR's handler.
*/
ASSERT(get_sigcxt_stolen_reg(sc) != (reg_t)*get_dr_tls_base_addr());
set_sigcxt_stolen_reg(sc, (reg_t)*get_dr_tls_base_addr());
# ifndef AARCH64
/* We're going to our fcache_return gencode which uses DEFAULT_ISA_MODE */
set_pc_mode_in_cpsr(sc, DEFAULT_ISA_MODE);
# endif
#endif
#if defined(X64) || defined(ARM)
/* x64 always uses shared gencode */
get_local_state_extended()->spill_space.IF_X86_ELSE(xax, r0) =
sc->IF_X86_ELSE(SC_XAX, SC_R0);
# ifdef AARCH64
/* X1 needs to be spilled because of br x1 in exit stubs. */
get_local_state_extended()->spill_space.r1 = sc->SC_R1;
# endif
#else
get_mcontext(dcontext)->IF_X86_ELSE(xax, r0) = sc->IF_X86_ELSE(SC_XAX, SC_R0);
#endif
LOG(THREAD, LOG_ASYNCH, 2, "\tsaved xax " PFX "\n", sc->IF_X86_ELSE(SC_XAX, SC_R0));
sc->IF_X86_ELSE(SC_XAX, SC_R0) = (ptr_uint_t)last_exit;
LOG(THREAD, LOG_ASYNCH, 2, "\tset next_tag to " PFX ", resuming in fcache_return\n",
next_pc);
LOG(THREAD, LOG_ASYNCH, 3, "transfer_from_sig_handler_to_fcache_return\n");
DOLOG(3, LOG_ASYNCH, {
LOG(THREAD, LOG_ASYNCH, 3, "sigcontext @" PFX ":\n", sc);
dump_sigcontext(dcontext, sc);
});
}
#ifdef CLIENT_INTERFACE
static dr_signal_action_t
send_signal_to_client(dcontext_t *dcontext, int sig, sigframe_rt_t *frame,
sigcontext_t *raw_sc, byte *access_address, bool blocked,
fragment_t *fragment)
{
kernel_ucontext_t *uc = get_ucontext_from_rt_frame(frame);
dr_siginfo_t si;
dr_signal_action_t action;
/* XXX #1615: we need a full ucontext to store pre-xl8 simd values.
* Right now we share the same simd values with post-xl8.
*/
sig_full_cxt_t raw_sc_full;
sig_full_initialize(&raw_sc_full, uc);
raw_sc_full.sc = raw_sc;
if (!dr_signal_hook_exists())
return DR_SIGNAL_DELIVER;
LOG(THREAD, LOG_ASYNCH, 2, "sending signal to client\n");
si.sig = sig;
si.drcontext = (void *)dcontext;
/* It's safe to allocate since we do not send signals that interrupt DR.
* With priv_mcontext_t x2 that's a little big for stack alloc.
*/
si.mcontext = heap_alloc(dcontext, sizeof(*si.mcontext) HEAPACCT(ACCT_OTHER));
si.raw_mcontext = heap_alloc(dcontext, sizeof(*si.raw_mcontext) HEAPACCT(ACCT_OTHER));
dr_mcontext_init(si.mcontext);
dr_mcontext_init(si.raw_mcontext);
/* i#207: fragment tag and fcache start pc on fault. */
si.fault_fragment_info.tag = NULL;
si.fault_fragment_info.cache_start_pc = NULL;
/* i#182/PR 449996: we provide the pre-translation context */
if (raw_sc != NULL) {
fragment_t wrapper;
si.raw_mcontext_valid = true;
sigcontext_to_mcontext(dr_mcontext_as_priv_mcontext(si.raw_mcontext),
&raw_sc_full, si.raw_mcontext->flags);
/* i#207: fragment tag and fcache start pc on fault. */
/* FIXME: we should avoid the fragment_pclookup since it is expensive
* and since we already did the work of a lookup when translating
*/
if (fragment == NULL)
fragment = fragment_pclookup(dcontext, si.raw_mcontext->pc, &wrapper);
if (fragment != NULL && !hide_tag_from_client(fragment->tag)) {
si.fault_fragment_info.tag = fragment->tag;
si.fault_fragment_info.cache_start_pc = FCACHE_ENTRY_PC(fragment);
si.fault_fragment_info.is_trace = TEST(FRAG_IS_TRACE, fragment->flags);
si.fault_fragment_info.app_code_consistent =
!TESTANY(FRAG_WAS_DELETED | FRAG_SELFMOD_SANDBOXED, fragment->flags);
}
} else
si.raw_mcontext_valid = false;
/* The client has no way to calculate this when using
* instrumentation that deliberately faults (to shift a rare event
* out of the fastpath) so we provide it. When raw_mcontext is
* available the client can calculate it, but we provide it as a
* convenience anyway.
*/
si.access_address = access_address;
si.blocked = blocked;
ucontext_to_mcontext(dr_mcontext_as_priv_mcontext(si.mcontext), uc);
/* We disallow the client calling dr_redirect_execution(), so we
* will not leak si
*/
action = instrument_signal(dcontext, &si);
if (action == DR_SIGNAL_DELIVER || action == DR_SIGNAL_REDIRECT) {
/* propagate client changes */
CLIENT_ASSERT(si.mcontext->flags == DR_MC_ALL,
"signal mcontext flags cannot be changed");
mcontext_to_ucontext(uc, dr_mcontext_as_priv_mcontext(si.mcontext));
} else if (action == DR_SIGNAL_SUPPRESS && raw_sc != NULL) {
/* propagate client changes */
CLIENT_ASSERT(si.raw_mcontext->flags == DR_MC_ALL,
"signal mcontext flags cannot be changed");
mcontext_to_sigcontext(&raw_sc_full,
dr_mcontext_as_priv_mcontext(si.raw_mcontext),
si.raw_mcontext->flags);
}
heap_free(dcontext, si.mcontext, sizeof(*si.mcontext) HEAPACCT(ACCT_OTHER));
heap_free(dcontext, si.raw_mcontext, sizeof(*si.raw_mcontext) HEAPACCT(ACCT_OTHER));
return action;
}
/* Returns false if caller should exit */
static bool
handle_client_action_from_cache(dcontext_t *dcontext, int sig, dr_signal_action_t action,
sigframe_rt_t *our_frame, sigcontext_t *sc_orig,
sigcontext_t *sc_interrupted, bool blocked)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
kernel_ucontext_t *uc = get_ucontext_from_rt_frame(our_frame);
sigcontext_t *sc = SIGCXT_FROM_UCXT(uc);
/* in order to pass to the client, we come all the way here for signals
* the app has no handler for
*/
if (action == DR_SIGNAL_REDIRECT) {
/* send_signal_to_client copied mcontext into our
* master_signal_handler frame, so we set up for fcache_return w/
* our frame's state
*/
transfer_from_sig_handler_to_fcache_return(
dcontext, uc, sc_interrupted, sig, (app_pc)sc->SC_XIP,
(linkstub_t *)get_asynch_linkstub(), true);
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n");
trace_abort(dcontext);
}
return false;
} else if (action == DR_SIGNAL_SUPPRESS ||
(!blocked && info->app_sigaction[sig] != NULL &&
info->app_sigaction[sig]->handler == (handler_t)SIG_IGN)) {
LOG(THREAD, LOG_ASYNCH, 2, "%s: not delivering!\n",
(action == DR_SIGNAL_SUPPRESS) ? "client suppressing signal"
: "app signal handler is SIG_IGN");
/* restore original (untranslated) sc */
*get_sigcontext_from_rt_frame(our_frame) = *sc_orig;
return false;
} else if (!blocked && /* no BYPASS for blocked */
(action == DR_SIGNAL_BYPASS ||
(info->app_sigaction[sig] == NULL ||
info->app_sigaction[sig]->handler == (handler_t)SIG_DFL))) {
LOG(THREAD, LOG_ASYNCH, 2, "%s: executing default action\n",
(action == DR_SIGNAL_BYPASS) ? "client forcing default"
: "app signal handler is SIG_DFL");
if (execute_default_from_cache(dcontext, sig, our_frame, sc_orig, false)) {
/* if we haven't terminated, restore original (untranslated) sc
* on request.
*/
*get_sigcontext_from_rt_frame(our_frame) = *sc_orig;
LOG(THREAD, LOG_ASYNCH, 2, "%s: restored xsp=" PFX ", xip=" PFX "\n",
__FUNCTION__, get_sigcontext_from_rt_frame(our_frame)->SC_XSP,
get_sigcontext_from_rt_frame(our_frame)->SC_XIP);
}
return false;
}
CLIENT_ASSERT(action == DR_SIGNAL_DELIVER, "invalid signal event return value");
return true;
}
#endif
static void
abort_on_fault(dcontext_t *dcontext, uint dumpcore_flag, app_pc pc, byte *target, int sig,
sigframe_rt_t *frame, const char *prefix, const char *signame,
const char *where)
{
kernel_ucontext_t *ucxt = &frame->uc;
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
bool stack_overflow = (sig == SIGSEGV && is_stack_overflow(dcontext, target));
#if defined(STATIC_LIBRARY) && defined(LINUX)
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
uint orig_dumpcore_flag = dumpcore_flag;
if (init_info.app_sigaction != NULL)
info = &init_info; /* use init-time handler */
ASSERT(info->app_sigaction != NULL);
#endif
const char *fmt = "%s %s at PC " PFX "\n"
"Received SIG%s at%s pc " PFX " in thread " TIDFMT "\n"
"Base: " PFX "\n"
"Registers:"
#ifdef X86
"eax=" PFX " ebx=" PFX " ecx=" PFX " edx=" PFX "\n"
"\tesi=" PFX " edi=" PFX " esp=" PFX " ebp=" PFX "\n"
# ifdef X64
"\tr8 =" PFX " r9 =" PFX " r10=" PFX " r11=" PFX "\n"
"\tr12=" PFX " r13=" PFX " r14=" PFX " r15=" PFX "\n"
# endif /* X64 */
#elif defined(ARM)
# ifndef X64
" r0 =" PFX " r1 =" PFX " r2 =" PFX " r3 =" PFX "\n"
"\tr4 =" PFX " r5 =" PFX " r6 =" PFX " r7 =" PFX "\n"
"\tr8 =" PFX " r9 =" PFX " r10=" PFX " r11=" PFX "\n"
"\tr12=" PFX " r13=" PFX " r14=" PFX " r15=" PFX "\n"
# else
# error NYI on AArch64
# endif
#endif /* X86/ARM */
"\teflags=" PFX;
#if defined(STATIC_LIBRARY) && defined(LINUX)
/* i#2119: if we're invoking an app handler, disable a fatal coredump. */
if (INTERNAL_OPTION(invoke_app_on_crash) && info->app_sigaction[sig] != NULL &&
IS_RT_FOR_APP(info, sig) && TEST(dumpcore_flag, DYNAMO_OPTION(dumpcore_mask)) &&
!DYNAMO_OPTION(live_dump))
dumpcore_flag = 0;
#endif
report_dynamorio_problem(
dcontext, dumpcore_flag | (stack_overflow ? DUMPCORE_STACK_OVERFLOW : 0), pc,
(app_pc)sc->SC_FP, fmt, prefix, stack_overflow ? STACK_OVERFLOW_NAME : CRASH_NAME,
pc, signame, where, pc, d_r_get_thread_id(), get_dynamorio_dll_start(),
#ifdef X86
sc->SC_XAX, sc->SC_XBX, sc->SC_XCX, sc->SC_XDX, sc->SC_XSI, sc->SC_XDI,
sc->SC_XSP, sc->SC_XBP,
# ifdef X64
sc->SC_FIELD(r8), sc->SC_FIELD(r9), sc->SC_FIELD(r10), sc->SC_FIELD(r11),
sc->SC_FIELD(r12), sc->SC_FIELD(r13), sc->SC_FIELD(r14), sc->SC_FIELD(r15),
# endif /* X86 */
#elif defined(ARM)
# ifndef X64
sc->SC_FIELD(arm_r0), sc->SC_FIELD(arm_r1), sc->SC_FIELD(arm_r2),
sc->SC_FIELD(arm_r3), sc->SC_FIELD(arm_r4), sc->SC_FIELD(arm_r5),
sc->SC_FIELD(arm_r6), sc->SC_FIELD(arm_r7), sc->SC_FIELD(arm_r8),
sc->SC_FIELD(arm_r9), sc->SC_FIELD(arm_r10), sc->SC_FIELD(arm_fp),
sc->SC_FIELD(arm_ip), sc->SC_FIELD(arm_sp), sc->SC_FIELD(arm_lr),
sc->SC_FIELD(arm_pc),
# else
# error NYI on AArch64
# endif /* X64 */
#endif /* X86/ARM */
sc->SC_XFLAGS);
#if defined(STATIC_LIBRARY) && defined(LINUX)
/* i#2119: For static DR, the surrounding app's handler may well be
* safe to invoke even when DR state is messed up: it's worth a try, as it
* likely has useful reporting features for users of the app.
* We limit to Linux and RT for simplicity: it can be expanded later if static
* library use expands.
*/
if (INTERNAL_OPTION(invoke_app_on_crash) && info->app_sigaction[sig] != NULL &&
IS_RT_FOR_APP(info, sig)) {
SYSLOG(SYSLOG_WARNING, INVOKING_APP_HANDLER, 2, get_application_name(),
get_application_pid());
(*info->app_sigaction[sig]->handler)(sig, &frame->info, ucxt);
/* If the app handler didn't terminate, now get a fatal core. */
if (TEST(orig_dumpcore_flag, DYNAMO_OPTION(dumpcore_mask)) &&
!DYNAMO_OPTION(live_dump))
os_dump_core("post-app-handler attempt at core dump");
}
#endif
os_terminate(dcontext, TERMINATE_PROCESS);
ASSERT_NOT_REACHED();
}
static void
abort_on_DR_fault(dcontext_t *dcontext, app_pc pc, byte *target, int sig,
sigframe_rt_t *frame, const char *signame, const char *where)
{
abort_on_fault(dcontext, DUMPCORE_INTERNAL_EXCEPTION, pc, target, sig, frame,
exception_label_core, signame, where);
ASSERT_NOT_REACHED();
}
/* Returns whether unlinked or mangled syscall.
* Restored in receive_pending_signal.
*/
static bool
unlink_fragment_for_signal(dcontext_t *dcontext, fragment_t *f,
byte *pc /*interruption pc*/)
{
/* We only come here if we interrupted a fragment in the cache,
* or interrupted transition gencode (i#2019),
* which means that this thread's DR state is safe, and so it
* should be ok to acquire a lock. xref PR 596069.
*
* There is a race where if two threads hit a signal in the same
* shared fragment, the first could re-link after the second
* un-links but before the second exits, and the second could then
* execute the syscall, resulting in arbitrary delay prior to
* signal delivery. We don't want to allocate global memory,
* but we could use a static array of counters (since should
* be small # of interrupted shared fragments at any one time)
* used as refcounts so we only unlink when all are done.
* Not bothering to implement now: going to live w/ chance of
* long signal delays. xref PR 596069.
*/
bool changed = false;
bool waslinking = is_couldbelinking(dcontext);
if (!waslinking)
enter_couldbelinking(dcontext, NULL, false);
/* may not be linked if trace_relink or something */
if (TEST(FRAG_COARSE_GRAIN, f->flags)) {
/* XXX PR 213040: we don't support unlinking coarse, so we try
* not to come here, but for indirect branch and other spots
* where we don't yet support translation (since can't fault)
* we end up w/ no bound on delivery...
*/
} else if (TEST(FRAG_LINKED_OUTGOING, f->flags)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tunlinking outgoing for interrupted F%d\n", f->id);
SHARED_FLAGS_RECURSIVE_LOCK(f->flags, acquire, change_linking_lock);
// Double-check flags to ensure some other thread didn't unlink
// while we waited for the change_linking_lock.
if (TEST(FRAG_LINKED_OUTGOING, f->flags)) {
unlink_fragment_outgoing(dcontext, f);
changed = true;
}
SHARED_FLAGS_RECURSIVE_LOCK(f->flags, release, change_linking_lock);
} else {
LOG(THREAD, LOG_ASYNCH, 3, "\toutgoing already unlinked for interrupted F%d\n",
f->id);
}
if (TEST(FRAG_HAS_SYSCALL, f->flags)) {
/* Syscalls are signal barriers!
* Make sure the next syscall (if any) in f is not executed!
* instead go back to d_r_dispatch right before the syscall
*/
/* syscall mangling does a bunch of decodes but only one write,
* changing the target of a short jmp, which is atomic
* since a one-byte write, so we don't need the change_linking_lock.
*/
if (mangle_syscall_code(dcontext, f, pc, false /*do not skip exit cti*/))
changed = true;
}
if (!waslinking)
enter_nolinking(dcontext, NULL, false);
return changed;
}
static void
relink_interrupted_fragment(dcontext_t *dcontext, thread_sig_info_t *info)
{
if (info->interrupted == NULL)
return;
/* i#2066: if we were building a trace, it may already be re-linked */
if (!TEST(FRAG_LINKED_OUTGOING, info->interrupted->flags)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tre-linking outgoing for interrupted F%d\n",
info->interrupted->id);
SHARED_FLAGS_RECURSIVE_LOCK(info->interrupted->flags, acquire,
change_linking_lock);
/* Double-check flags to ensure some other thread didn't link
* while we waited for the change_linking_lock.
*/
if (!TEST(FRAG_LINKED_OUTGOING, info->interrupted->flags)) {
link_fragment_outgoing(dcontext, info->interrupted, false);
}
SHARED_FLAGS_RECURSIVE_LOCK(info->interrupted->flags, release,
change_linking_lock);
}
if (TEST(FRAG_HAS_SYSCALL, info->interrupted->flags)) {
/* restore syscall (they're a barrier to signals, so signal
* handler has cur frag exit before it does a syscall)
*/
if (info->interrupted_pc != NULL) {
mangle_syscall_code(dcontext, info->interrupted, info->interrupted_pc,
true /*skip exit cti*/);
}
}
info->interrupted = NULL;
info->interrupted_pc = NULL;
}
static bool
interrupted_inlined_syscall(dcontext_t *dcontext, fragment_t *f,
byte *pc /*interruption pc*/)
{
bool pre_or_post_syscall = false;
if (TEST(FRAG_HAS_SYSCALL, f->flags)) {
/* PR 596147: if the thread is currently in an inlined
* syscall when a signal comes in, we can't delay and bound the
* delivery time: we need to deliver now. Should decode
* backward and see if syscall. We assume our translation of
* the interruption state is fine to re-start: i.e., the syscall
* is complete if kernel has pc at post-syscall point, and
* kernel set EINTR in eax if necessary.
*/
/* Interrupted fcache, so ok to alloc memory for decode */
instr_t instr;
byte *nxt_pc;
instr_init(dcontext, &instr);
nxt_pc = decode(dcontext, pc, &instr);
if (nxt_pc != NULL && instr_valid(&instr) && instr_is_syscall(&instr)) {
/* pre-syscall but post-jmp so can't skip syscall */
pre_or_post_syscall = true;
} else {
size_t syslen = syscall_instr_length(FRAG_ISA_MODE(f->flags));
instr_reset(dcontext, &instr);
nxt_pc = decode(dcontext, pc - syslen, &instr);
if (nxt_pc != NULL && instr_valid(&instr) && instr_is_syscall(&instr)) {
#if defined(X86) && !defined(MACOS)
/* decoding backward so check for exit cti jmp prior
* to syscall to ensure no mismatch
*/
instr_reset(dcontext, &instr);
nxt_pc = decode(dcontext, pc - syslen - JMP_LONG_LENGTH, &instr);
if (nxt_pc != NULL && instr_valid(&instr) &&
instr_get_opcode(&instr) == OP_jmp) {
/* post-inlined-syscall */
pre_or_post_syscall = true;
}
#else
/* On Mac and ARM we have some TLS spills in between so we just
* trust that this is a syscall (esp on ARM w/ aligned instrs).
*/
pre_or_post_syscall = true;
#endif
}
}
instr_free(dcontext, &instr);
}
return pre_or_post_syscall;
}
/* i#1145: auto-restart syscalls interrupted by signals */
static bool
adjust_syscall_for_restart(dcontext_t *dcontext, thread_sig_info_t *info, int sig,
sigcontext_t *sc, fragment_t *f, reg_t orig_retval_reg)
{
byte *pc = (byte *)sc->SC_XIP;
int sys_inst_len;
if (sc->IF_X86_ELSE(SC_XAX, SC_R0) != -EINTR) {
/* The syscall succeeded, so no reason to interrupt.
* Some syscalls succeed on a signal coming in.
* E.g., SYS_wait4 on SIGCHLD, or reading from a slow device.
* XXX: Now that we pass SA_RESTART we should never get here?
*/
return false;
}
/* Don't restart if the app's handler says not to */
if (info->app_sigaction[sig] != NULL &&
!TEST(SA_RESTART, info->app_sigaction[sig]->flags)) {
return false;
}
/* XXX i#1145: some syscalls are never restarted when interrupted by a signal.
* We check those that are simple to distinguish below, but not all are. We have
* this under an option so it can be disabled if necessary.
*/
if (!DYNAMO_OPTION(restart_syscalls))
return false;
/* Now that we use SA_RESTART we rely on that and ignore our own
* inaccurate check sysnum_is_not_restartable(sysnum).
* SA_RESTART also means we can just be passed in the register value to restore.
*/
LOG(THREAD, LOG_ASYNCH, 2, "%s: restored xax/r0 to %ld\n", __FUNCTION__,
orig_retval_reg);
#ifdef X86
sc->SC_XAX = orig_retval_reg;
#elif defined(AARCHXX)
sc->SC_R0 = orig_retval_reg;
#else
# error NYI
#endif
/* Now adjust the pc to point at the syscall instruction instead of after it,
* so when we resume we'll go back to the syscall.
* Adjusting solves transparency as well: natively the kernel adjusts
* the pc before setting up the signal frame.
* We don't pass in the post-syscall pc provided by the kernel because
* we want the app pc, not the raw pc.
*/
dr_isa_mode_t isa_mode;
if (is_after_syscall_address(dcontext, pc) || pc == vsyscall_sysenter_return_pc) {
isa_mode = dr_get_isa_mode(dcontext);
} else {
/* We're going to walk back in the fragment, not gencode */
ASSERT(f != NULL);
isa_mode = FRAG_ISA_MODE(f->flags);
}
sys_inst_len = syscall_instr_length(isa_mode);
if (pc == vsyscall_sysenter_return_pc) {
#ifdef X86
sc->SC_XIP = (ptr_uint_t)(vsyscall_syscall_end_pc - sys_inst_len);
/* To restart sysenter we must re-copy xsp into xbp, as xbp is
* clobbered by the kernel.
* XXX: The kernel points at the int 0x80 in vsyscall on a restart
* and so doesn't have to do this: should we do that too? If so we'll
* have to avoid interpreting our own hook which is right after the
* int 0x80.
*/
sc->SC_XBP = sc->SC_XSP;
#else
ASSERT_NOT_REACHED();
#endif
} else if (is_after_syscall_address(dcontext, pc)) {
/* We're at do_syscall: point at app syscall instr. We want an app
* address b/c this signal will be delayed and the delivery will use
* a direct app context: no translation from the cache.
* The caller sets info->sigpending[sig]->use_sigcontext for us.
*/
sc->SC_XIP = (ptr_uint_t)(dcontext->asynch_target - sys_inst_len);
DODEBUG({
instr_t instr;
dr_isa_mode_t old_mode;
dr_set_isa_mode(dcontext, isa_mode, &old_mode);
instr_init(dcontext, &instr);
ASSERT(decode(dcontext, (app_pc)sc->SC_XIP, &instr) != NULL &&
instr_is_syscall(&instr));
instr_free(dcontext, &instr);
dr_set_isa_mode(dcontext, old_mode, NULL);
});
} else {
ASSERT_NOT_REACHED(); /* Inlined syscalls no longer come here. */
}
LOG(THREAD, LOG_ASYNCH, 2, "%s: sigreturn pc is now " PFX "\n", __FUNCTION__,
sc->SC_XIP);
return true;
}
/* XXX: Better to get this code inside arch/ but we'd have to convert to an mcontext
* which seems overkill.
*/
static fragment_t *
find_next_fragment_from_gencode(dcontext_t *dcontext, sigcontext_t *sc)
{
fragment_t *f = NULL;
fragment_t wrapper;
byte *pc = (byte *)sc->SC_XIP;
if (in_clean_call_save(dcontext, pc) || in_clean_call_restore(dcontext, pc)) {
#ifdef AARCHXX
f = fragment_pclookup(dcontext, (cache_pc)sc->SC_LR, &wrapper);
#elif defined(X86)
cache_pc retaddr = NULL;
/* Get the retaddr. We assume this is the adjustment used by
* insert_out_of_line_context_switch().
*/
byte *ra_slot =
dcontext->dstack - get_clean_call_switch_stack_size() - sizeof(retaddr);
/* The extra x86 slot is only there for save. */
if (in_clean_call_save(dcontext, pc))
ra_slot -= get_clean_call_temp_stack_size();
if (d_r_safe_read(ra_slot, sizeof(retaddr), &retaddr))
f = fragment_pclookup(dcontext, retaddr, &wrapper);
#else
# error Unsupported arch.
#endif
} else if (in_indirect_branch_lookup_code(dcontext, pc)) {
/* Try to find the target if the signal arrived in the IBL.
* We could try to be a lot more precise by hardcoding the IBL
* sequence here but that would make the code less maintainable.
* Instead we try the registers that hold the target app address.
*/
/* First check for the jmp* on the hit path: that is the only place
* in the ibl where the target tag is not sitting in a register.
*/
#if defined(X86) && defined(X64)
/* Optimization for the common case of targeting a prefix on x86_64:
* ff 61 08 jmp 0x08(%rcx)[8byte]
* The tag is in 0x0(%rcx) so we avoid a decode and pclookup.
*/
if (*pc == 0xff && *(pc + 1) == 0x61 && *(pc + 2) == 0x08) {
f = fragment_lookup(dcontext, *(app_pc *)sc->SC_XCX);
}
#endif
if (f == NULL) {
instr_t instr;
instr_init(dcontext, &instr);
decode_cti(dcontext, pc, &instr);
if (instr_is_ibl_hit_jump(&instr)) {
priv_mcontext_t mc;
sig_full_cxt_t sc_full = { sc, NULL /*not provided*/ };
sigcontext_to_mcontext(&mc, &sc_full, DR_MC_INTEGER | DR_MC_CONTROL);
byte *target;
if (opnd_is_memory_reference(instr_get_target(&instr))) {
target = instr_compute_address_priv(&instr, &mc);
ASSERT(target != NULL);
if (target != NULL)
target = *(byte **)target;
} else {
ASSERT(opnd_is_reg(instr_get_target(&instr)));
target = (byte *)reg_get_value_priv(
opnd_get_reg(instr_get_target(&instr)), &mc);
}
ASSERT(target != NULL);
if (target != NULL)
f = fragment_pclookup(dcontext, target, &wrapper);
/* I tried to hit this case running client.cleancallsig in a loop
* and while I could on x86 and x86_64 I never did on ARM or
* AArch64. We can remove this once someone hits it and it works.
*/
IF_AARCHXX(ASSERT_NOT_TESTED());
}
instr_free(dcontext, &instr);
}
#ifdef AARCHXX
/* The target is in r2 the whole time, w/ or w/o Thumb LSB. */
if (f == NULL && sc->SC_R2 != 0)
f = fragment_lookup(dcontext, ENTRY_PC_TO_DECODE_PC(sc->SC_R2));
#elif defined(X86)
/* The target is initially in xcx but is then copied to xbx. */
if (f == NULL && sc->SC_XBX != 0)
f = fragment_lookup(dcontext, (app_pc)sc->SC_XBX);
if (f == NULL && sc->SC_XCX != 0)
f = fragment_lookup(dcontext, (app_pc)sc->SC_XCX);
#else
# error Unsupported arch.
#endif
} else {
/* If in fcache_enter or do_syscall*, we stored the next_tag in asynch_target
* in d_r_dispatch. But, we need to avoid using the asynch_target for the
* fragment we just exited if we're in fcache_return.
*/
if (dcontext->asynch_target != NULL && !in_fcache_return(dcontext, pc))
f = fragment_lookup(dcontext, dcontext->asynch_target);
}
return f;
}
static void
record_pending_signal(dcontext_t *dcontext, int sig, kernel_ucontext_t *ucxt,
sigframe_rt_t *frame, bool forged _IF_CLIENT(byte *access_address))
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
/* XXX #1615: we need a full ucontext to store pre-xl8 simd values */
sigcontext_t sc_orig;
byte *pc = (byte *)sc->SC_XIP;
byte *xsp = (byte *)sc->SC_XSP;
bool receive_now = false;
bool blocked = false;
bool handled = false;
bool at_auto_restart_syscall = false;
int syslen = 0;
reg_t orig_retval_reg = sc->IF_X86_ELSE(SC_XAX, SC_R0);
sigpending_t *pend;
fragment_t *f = NULL;
fragment_t wrapper;
/* We no longer block SUSPEND_SIGNAL (i#184/PR 450670) or SIGSEGV (i#193/PR 287309).
* But we can have re-entrancy issues in this routine if the app uses the same
* SUSPEND_SIGNAL, or the nested SIGSEGV needs to be sent to the app. The
* latter shouldn't happen unless the app sends SIGSEGV via SYS_kill().
*/
if (ostd->processing_signal > 0 ||
/* If we interrupted receive_pending_signal() we can't prepend a new
* pending or delete an old b/c we might mess up the state so we
* just drop this one: should only happen for alarm signal
*/
(info->accessing_sigpending && !info->nested_pending_ok &&
/* we do want to report a crash in receive_pending_signal() */
(can_always_delay[sig] ||
is_sys_kill(dcontext, pc, (byte *)sc->SC_XSP, &frame->info)))) {
LOG(THREAD, LOG_ASYNCH, 1, "nested signal %d\n", sig);
ASSERT(ostd->processing_signal == 0 || sig == SUSPEND_SIGNAL || sig == SIGSEGV);
ASSERT(can_always_delay[sig] ||
is_sys_kill(dcontext, pc, (byte *)sc->SC_XSP, &frame->info));
/* To avoid re-entrant execution of special_heap_alloc() and of
* prepending to the pending list we just drop this signal.
* FIXME i#194/PR 453996: do better.
*/
STATS_INC(num_signals_dropped);
SYSLOG_INTERNAL_WARNING_ONCE("dropping nested signal");
return;
}
ostd->processing_signal++; /* no need for atomicity: thread-private */
/* First, check whether blocked, before we restore for sigsuspend (i#1340). */
if (kernel_sigismember(&info->app_sigblocked, sig))
blocked = true;
if (info->in_sigsuspend) {
/* sigsuspend ends when a signal is received, so restore the
* old blocked set
*/
info->app_sigblocked = info->app_sigblocked_save;
info->in_sigsuspend = false;
/* update the set to restore to post-signal-delivery */
#ifdef MACOS
ucxt->uc_sigmask = *(__darwin_sigset_t *)&info->app_sigblocked;
#else
ucxt->uc_sigmask = info->app_sigblocked;
#endif
#ifdef DEBUG
if (d_r_stats->loglevel >= 3 && (d_r_stats->logmask & LOG_ASYNCH) != 0) {
LOG(THREAD, LOG_ASYNCH, 3, "after sigsuspend, blocked signals are now:\n");
dump_sigset(dcontext, &info->app_sigblocked);
}
#endif
}
if (get_at_syscall(dcontext))
syslen = syscall_instr_length(dr_get_isa_mode(dcontext));
if (info->app_sigaction[sig] != NULL &&
info->app_sigaction[sig]->handler ==
(handler_t)SIG_IGN
/* If a client registered a handler, put this in the queue.
* Races between registering, queueing, and delivering are fine.
*/
IF_CLIENT_INTERFACE(&&!dr_signal_hook_exists())) {
LOG(THREAD, LOG_ASYNCH, 3,
"record_pending_signal (%d at pc " PFX "): action is SIG_IGN!\n", sig, pc);
ostd->processing_signal--;
return;
} else if (blocked) {
/* signal is blocked by app, so just record it, don't receive now */
LOG(THREAD, LOG_ASYNCH, 2,
"record_pending_signal(%d at pc " PFX "): signal is currently blocked\n", sig,
pc);
IF_LINUX(handled = notify_signalfd(dcontext, info, sig, frame));
} else if (safe_is_in_fcache(dcontext, pc, xsp)) {
LOG(THREAD, LOG_ASYNCH, 2, "record_pending_signal(%d) from cache pc " PFX "\n",
sig, pc);
if (forged || can_always_delay[sig]) {
/* to make translation easier, want to delay if can until d_r_dispatch
* unlink cur frag, wait for d_r_dispatch
*/
/* check for coarse first to avoid cost of coarse pclookup */
if (get_fcache_coarse_info(pc) != NULL) {
/* PR 213040: we can't unlink coarse. If we fail to translate
* we'll switch back to delaying, below.
*/
if (sig_is_alarm_signal(sig) && info->sigpending[sig] != NULL &&
info->sigpending[sig]->next != NULL && info->skip_alarm_xl8 > 0) {
/* Translating coarse fragments is very expensive so we
* avoid doing it when we're having trouble keeping up w/
* the alarm frequency (PR 213040), but we make sure we try
* every once in a while to avoid unbounded signal delay
*/
info->skip_alarm_xl8--;
STATS_INC(num_signals_coarse_delayed);
} else {
if (sig_is_alarm_signal(sig))
info->skip_alarm_xl8 = SKIP_ALARM_XL8_MAX;
receive_now = true;
LOG(THREAD, LOG_ASYNCH, 2,
"signal interrupted coarse fragment so delivering now\n");
}
} else {
f = fragment_pclookup(dcontext, pc, &wrapper);
ASSERT(f != NULL);
ASSERT(!TEST(FRAG_COARSE_GRAIN, f->flags)); /* checked above */
LOG(THREAD, LOG_ASYNCH, 2, "\tdelaying until exit F%d\n", f->id);
if (interrupted_inlined_syscall(dcontext, f, pc)) {
/* PR 596147: if delayable signal arrives after syscall-skipping
* jmp, either at syscall or post-syscall, we deliver
* immediately, since we can't bound the delay
*/
receive_now = true;
LOG(THREAD, LOG_ASYNCH, 2,
"signal interrupted pre/post syscall itself so delivering now\n");
/* We don't set at_auto_restart_syscall because we just leave
* the SA_RESTART kernel-supplied resumption point: with no
* post-syscall handler to worry about we have no need to
* change anything.
*/
} else {
/* could get another signal but should be in same fragment */
ASSERT(info->interrupted == NULL || info->interrupted == f);
if (info->interrupted != f) {
/* Just in case there's a prior, avoid leaving it unlinked. */
relink_interrupted_fragment(dcontext, info);
if (unlink_fragment_for_signal(dcontext, f, pc)) {
info->interrupted = f;
info->interrupted_pc = pc;
} else {
/* either was unlinked for trace creation, or we got another
* signal before exiting cache to handle 1st
*/
ASSERT(info->interrupted == NULL || info->interrupted == f);
}
}
}
}
} else {
/* the signal interrupted code cache => run handler now! */
receive_now = true;
LOG(THREAD, LOG_ASYNCH, 2, "\tnot certain can delay so handling now\n");
}
} else if (in_generated_routine(dcontext, pc) ||
/* XXX: should also check fine stubs */
safe_is_in_coarse_stubs(dcontext, pc, xsp)) {
/* Assumption: dynamo errors have been caught already inside
* the master_signal_handler, thus any error in a generated routine
* is an asynch signal that can be delayed
*/
LOG(THREAD, LOG_ASYNCH, 2,
"record_pending_signal(%d) from gen routine or stub " PFX "\n", sig, pc);
if (get_at_syscall(dcontext)) {
/* i#1206: the syscall was interrupted, so we can go back to d_r_dispatch
* and don't need to receive it now (which complicates post-syscall handling)
* w/o any extra delay.
*/
/* i#2659: we now use SA_RESTART to handle interrupting native
* auto-restart syscalls. That means we have to adjust do_syscall
* interruption to give us control so we can deliver the signal. Due to
* needing to run post-syscall handlers (we don't want to get into nested
* dcontexts like on Windows) it's simplest to go back to d_r_dispatch, which
* is most easily done by emulating the non-SA_RESTART behavior.
* XXX: This all seems backward: we should revisit this model and see if
* we can get rid of this emulation and the auto-restart emulation.
*/
/* The get_at_syscall() check above distinguishes from just having
* arrived at the syscall instr, but with SA_RESTART we can't distinguish
* not-yet-executed-syscall from syscall-was-interrupted-in-the-kernel.
* This matters for sigreturn (i#2995), whose asynch_target points somewhere
* other than right after the syscall, so we exclude it (it can't be
* interrupted so we know we haven't executed it yet).
*/
if (is_after_syscall_address(dcontext, pc + syslen) &&
!is_sigreturn_syscall_number(sc->SC_SYSNUM_REG)) {
LOG(THREAD, LOG_ASYNCH, 2,
"Adjusting interrupted auto-restart syscall from " PFX " to " PFX
"\n",
pc, pc + syslen);
at_auto_restart_syscall = true;
sc->SC_XIP += syslen;
sc->IF_X86_ELSE(SC_XAX, SC_R0) = -EINTR;
pc = (byte *)sc->SC_XIP;
}
}
/* This could come from another thread's SYS_kill (via our gen do_syscall) */
DOLOG(1, LOG_ASYNCH, {
if (!is_after_syscall_address(dcontext, pc) && !forged &&
!can_always_delay[sig]) {
LOG(THREAD, LOG_ASYNCH, 1,
"WARNING: signal %d in gen routine: may cause problems!\n", sig);
}
});
/* i#2019: for a signal arriving in gencode before entry to a fragment,
* we need to unlink the fragment just like for a signal arriving inside
* the fragment itself.
* Multiple signals should all have the same asynch_target so we should
* only need a single info->interrupted.
*/
if (info->interrupted == NULL && !get_at_syscall(dcontext)) {
f = find_next_fragment_from_gencode(dcontext, sc);
if (f != NULL && !TEST(FRAG_COARSE_GRAIN, f->flags)) {
if (unlink_fragment_for_signal(dcontext, f, FCACHE_ENTRY_PC(f))) {
info->interrupted = f;
info->interrupted_pc = FCACHE_ENTRY_PC(f);
}
}
}
} else if (get_at_syscall(dcontext) && pc == vsyscall_sysenter_return_pc - syslen &&
/* See i#2995 comment above: rule out sigreturn */
!is_sigreturn_syscall_number(sc->SC_SYSNUM_REG)) {
LOG(THREAD, LOG_ASYNCH, 2,
"record_pending_signal(%d) from restart-vsyscall " PFX "\n", sig, pc);
/* While the kernel points at int 0x80 for a restart, we leverage our
* existing sysenter restart mechanism.
*/
at_auto_restart_syscall = true;
sc->SC_XIP = (reg_t)vsyscall_sysenter_return_pc;
sc->IF_X86_ELSE(SC_XAX, SC_R0) = -EINTR;
pc = (byte *)sc->SC_XIP;
} else if (pc == vsyscall_sysenter_return_pc) {
LOG(THREAD, LOG_ASYNCH, 2, "record_pending_signal(%d) from vsyscall " PFX "\n",
sig, pc);
/* i#1206: the syscall was interrupted but is not auto-restart, so we can go
* back to d_r_dispatch and don't need to receive it now (which complicates
* post-syscall handling)
*/
} else if (thread_synch_check_state(dcontext, THREAD_SYNCH_NO_LOCKS) &&
/* Avoid grabbing locks for xl8 while in a suspended state (i#3026). */
ksynch_get_value(&ostd->suspended) == 0) {
/* The signal interrupted DR or the client but it's at a safe spot so
* deliver it now.
*/
receive_now = true;
} else {
/* the signal interrupted DR itself => do not run handler now! */
LOG(THREAD, LOG_ASYNCH, 2, "record_pending_signal(%d) from DR at pc " PFX "\n",
sig, pc);
if (!forged && !can_always_delay[sig] &&
!is_sys_kill(dcontext, pc, (byte *)sc->SC_XSP, &frame->info)) {
/* i#195/PR 453964: don't re-execute if will just re-fault.
* Our checks for dstack, etc. in master_signal_handler should
* have accounted for everything
*/
ASSERT_NOT_REACHED();
abort_on_DR_fault(dcontext, pc, NULL, sig, frame,
(sig == SIGSEGV) ? "SEGV" : "other", " unknown");
}
}
LOG(THREAD, LOG_ASYNCH, 3, "\taction is not SIG_IGN\n");
#if defined(X86) && defined(LINUX)
LOG(THREAD, LOG_ASYNCH, 3, "\tretaddr = " PFX "\n",
frame->pretcode); /* pretcode has same offs for plain */
#endif
if (receive_now) {
/* we need to translate sc before we know whether client wants to
* suppress, so we need a backup copy
*/
bool xl8_success;
ASSERT(!at_auto_restart_syscall); /* only used for delayed delivery */
sc_orig = *sc;
ASSERT(!forged);
/* cache the fragment since pclookup is expensive for coarse (i#658) */
f = fragment_pclookup(dcontext, (cache_pc)sc->SC_XIP, &wrapper);
xl8_success = translate_sigcontext(dcontext, ucxt, !can_always_delay[sig], f);
if (can_always_delay[sig] && !xl8_success) {
/* delay: we expect this for coarse fragments if alarm arrives
* in middle of ind branch region or sthg (PR 213040)
*/
LOG(THREAD, LOG_ASYNCH, 2,
"signal is in un-translatable spot in coarse fragment: delaying\n");
receive_now = false;
}
}
if (receive_now) {
/* N.B.: since we abandon the old context for synchronous signals,
* we do not need to mark this fragment as FRAG_CANNOT_DELETE
*/
#ifdef DEBUG
if (d_r_stats->loglevel >= 2 && (d_r_stats->logmask & LOG_ASYNCH) != 0 &&
safe_is_in_fcache(dcontext, pc, xsp)) {
ASSERT(f != NULL);
LOG(THREAD, LOG_ASYNCH, 2, "Got signal at pc " PFX " in this fragment:\n",
pc);
disassemble_fragment(dcontext, f, false);
}
#endif
LOG(THREAD, LOG_ASYNCH, 2, "Going to receive signal now\n");
/* If we end up executing the default action, we'll go native
* since we translated the context. If there's a handler,
* we'll copy the context to the app stack and then adjust the
* original on our stack so we take over.
*/
execute_handler_from_cache(dcontext, sig, frame, &sc_orig,
f _IF_CLIENT(access_address));
} else if (!handled) {
#ifdef CLIENT_INTERFACE
/* i#182/PR 449996: must let client act on blocked non-delayable signals to
* handle instrumentation faults. Make sure we're at a safe spot: i.e.,
* only raise for in-cache faults. Checking forged and no-delay
* to avoid the in-cache check for delayable signals => safer.
*/
if (blocked && !forged && !can_always_delay[sig] &&
safe_is_in_fcache(dcontext, pc, xsp)) {
dr_signal_action_t action;
/* cache the fragment since pclookup is expensive for coarse (i#658) */
f = fragment_pclookup(dcontext, (cache_pc)sc->SC_XIP, &wrapper);
sc_orig = *sc;
translate_sigcontext(dcontext, ucxt, true /*shouldn't fail*/, f);
/* make a copy before send_signal_to_client() tweaks it */
sigcontext_t sc_interrupted = *sc;
action = send_signal_to_client(dcontext, sig, frame, &sc_orig, access_address,
true /*blocked*/, f);
/* For blocked signal early event we disallow BYPASS (xref i#182/PR 449996) */
CLIENT_ASSERT(action != DR_SIGNAL_BYPASS,
"cannot bypass a blocked signal event");
if (!handle_client_action_from_cache(dcontext, sig, action, frame, &sc_orig,
&sc_interrupted, true /*blocked*/)) {
ostd->processing_signal--;
return;
}
/* restore original (untranslated) sc */
*get_sigcontext_from_rt_frame(frame) = sc_orig;
}
#endif
/* i#196/PR 453847: avoid infinite loop of signals if try to re-execute */
if (blocked && !can_always_delay[sig] &&
!is_sys_kill(dcontext, pc, (byte *)sc->SC_XSP, &frame->info)) {
ASSERT(default_action[sig] == DEFAULT_TERMINATE ||
default_action[sig] == DEFAULT_TERMINATE_CORE);
LOG(THREAD, LOG_ASYNCH, 1,
"blocked fatal signal %d cannot be delayed: terminating\n", sig);
sc_orig = *sc;
/* If forged we're likely couldbelinking, and we don't need to xl8. */
if (forged)
ASSERT(is_couldbelinking(dcontext));
else
translate_sigcontext(dcontext, ucxt, true /*shouldn't fail*/, NULL);
/* the process should be terminated */
execute_default_from_cache(dcontext, sig, frame, &sc_orig, forged);
ASSERT_NOT_REACHED();
}
/* Happened in DR, do not translate context. Record for later processing
* at a safe point with a clean app state.
*/
if (!blocked || sig >= OFFS_RT || (blocked && info->sigpending[sig] == NULL)) {
/* only have 1 pending for blocked non-rt signals */
/* to avoid accumulating signals if we're slow in presence of
* a high-rate itimer we only keep 2 alarm signals (PR 596768)
*/
if (sig_is_alarm_signal(sig)) {
if (info->sigpending[sig] != NULL &&
info->sigpending[sig]->next != NULL) {
ASSERT(info->sigpending[sig]->next->next == NULL);
/* keep the oldest, replace newer w/ brand-new one, for
* more spread-out alarms
*/
sigpending_t *temp = info->sigpending[sig];
info->sigpending[sig] = temp->next;
special_heap_free(info->sigheap, temp);
info->num_pending--;
LOG(THREAD, LOG_ASYNCH, 2, "3rd pending alarm %d => dropping 2nd\n",
sig);
STATS_INC(num_signals_dropped);
SYSLOG_INTERNAL_WARNING_ONCE("dropping 3rd pending alarm signal");
}
}
/* special heap alloc always uses sizeof(sigpending_t) blocks */
pend = special_heap_alloc(info->sigheap);
ASSERT(sig > 0 && sig <= MAX_SIGNUM);
info->num_pending++;
if (info->num_pending > DYNAMO_OPTION(max_pending_signals) &&
!info->multiple_pending_units)
info->multiple_pending_units = true;
if (info->num_pending >= DYNAMO_OPTION(max_pending_signals)) {
/* We're at the limit of our special heap: one more and it will try to
* allocate a new unit, which is unsafe as it acquires locks. We take
* several steps: we notify the user; we check for this on delivery as
* well and proactively allocate a new unit in a safer context.
* XXX: Perhaps we should drop some signals here?
*/
DO_ONCE({
char max_string[32];
snprintf(max_string, BUFFER_SIZE_ELEMENTS(max_string), "%d",
DYNAMO_OPTION(max_pending_signals));
NULL_TERMINATE_BUFFER(max_string);
SYSLOG(SYSLOG_WARNING, MAX_PENDING_SIGNALS, 3, get_application_name(),
get_application_pid(), max_string);
});
}
pend->next = info->sigpending[sig];
info->sigpending[sig] = pend;
pend->unblocked = !blocked;
/* FIXME: note that for asynchronous signals we don't need to
* bother to record exact machine context, even entire frame,
* since don't want to pass dynamo pc context to app handler.
* only copy frame for synchronous signals? those only
* happen while in cache? but for asynch, we would have to
* construct our own frame...kind of a pain.
*/
copy_frame_to_pending(dcontext, sig, frame _IF_CLIENT(access_address));
/* i#1145: check whether we should auto-restart an interrupted syscall */
if (at_auto_restart_syscall) {
/* Adjust the pending frame to restart the syscall, if applicable */
sigframe_rt_t *frame = &(info->sigpending[sig]->rt_frame);
sigcontext_t *sc_pend = get_sigcontext_from_rt_frame(frame);
if (adjust_syscall_for_restart(dcontext, info, sig, sc_pend, f,
orig_retval_reg)) {
/* We're going to re-start this syscall after we go
* back to d_r_dispatch, run the post-syscall handler (for -EINTR),
* and deliver the signal. We've adjusted the sigcontext
* for re-start on the sigreturn, but we need to tell
* execute_handler_from_dispatch() to use our sigcontext
* and not the mcontext.
* A client will see a second set of pre + post handlers for
* the restart, which seems reasonable, given the signal in
* between.
*/
info->sigpending[sig]->use_sigcontext = true;
}
}
} else {
/* For clients, we document that we do not pass to them
* unless we're prepared to deliver to app. We would have
* to change our model to pass them non-final-translated
* contexts for delayable signals in order to give them
* signals as soon as they come in. Xref i#182/PR 449996.
*/
LOG(THREAD, LOG_ASYNCH, 3,
"\tnon-rt signal already in queue, ignoring this one!\n");
}
if (!blocked && !dcontext->signals_pending)
dcontext->signals_pending = 1;
}
ostd->processing_signal--;
}
/* Distinguish SYS_kill-generated from instruction-generated signals.
* If sent from another process we can't tell, but if sent from this
* thread the interruption point should be our own post-syscall.
* FIXME PR 368277: for other threads in same process we should set a flag
* and identify them as well.
* FIXME: for faults like SIGILL we could examine the interrupted pc
* to see whether it is capable of generating such a fault (see code
* used in handle_nudge_signal()).
*/
static bool
is_sys_kill(dcontext_t *dcontext, byte *pc, byte *xsp, kernel_siginfo_t *info)
{
#if !defined(VMX86_SERVER) && !defined(MACOS) /* does not use SI_KERNEL */
/* i#133: use si_code to distinguish user-sent signals.
* Even 2.2 Linux kernel supports <=0 meaning user-sent (except
* SIGIO) so we assume we can rely on it.
*/
if (info->si_code <= 0)
return true;
#endif
return (is_at_do_syscall(dcontext, pc, xsp) &&
(dcontext->sys_num == SYS_kill ||
#ifdef LINUX
dcontext->sys_num == SYS_tkill || dcontext->sys_num == SYS_tgkill ||
dcontext->sys_num == SYS_rt_sigqueueinfo
#elif defined(MACOS)
dcontext->sys_num == SYS___pthread_kill
#endif
));
}
static byte *
compute_memory_target(dcontext_t *dcontext, cache_pc instr_cache_pc,
kernel_ucontext_t *uc, kernel_siginfo_t *si, bool *write)
{
sigcontext_t *sc = SIGCXT_FROM_UCXT(uc);
byte *target = NULL;
instr_t instr;
priv_mcontext_t mc;
uint memopidx, memoppos, memopsize;
opnd_t memop;
bool found_target = false;
bool in_maps;
bool use_allmem = false;
uint prot;
IF_ARM(dr_isa_mode_t old_mode;)
LOG(THREAD, LOG_ALL, 2,
"computing memory target for " PFX " causing SIGSEGV, kernel claims it is " PFX
"\n",
instr_cache_pc, (byte *)si->si_addr);
/* ARM's sigcontext_t has a "fault_address" field but it also seems unreliable */
IF_ARM(LOG(THREAD, LOG_ALL, 2, "fault_address: " PFX "\n", sc->fault_address));
/* We used to do a memory query to check if instr_cache_pc is readable, but
* now we use TRY/EXCEPT because we don't have the instr length and the OS
* query is expensive. If decoding faults, the signal handler will longjmp
* out before it calls us recursively.
*/
instr_init(dcontext, &instr);
IF_ARM({
/* Be sure to use the interrupted mode and not the last-dispatch mode */
dr_set_isa_mode(dcontext, get_pc_mode_from_cpsr(sc), &old_mode);
});
TRY_EXCEPT(dcontext, { decode(dcontext, instr_cache_pc, &instr); },
{
return NULL; /* instr_cache_pc was unreadable */
});
IF_ARM(dr_set_isa_mode(dcontext, old_mode, NULL));
if (!instr_valid(&instr)) {
LOG(THREAD, LOG_ALL, 2,
"WARNING: got SIGSEGV for invalid instr at cache pc " PFX "\n",
instr_cache_pc);
ASSERT_NOT_REACHED();
instr_free(dcontext, &instr);
return NULL;
}
ucontext_to_mcontext(&mc, uc);
ASSERT(write != NULL);
/* i#1009: If si_addr is plausibly one of the memory operands of the
* faulting instruction, assume the target was si_addr. If none of the
* memops match, fall back to checking page protections, which can be racy.
* For si_addr == NULL, we fall back to the protection check because it's
* too likely to be a valid memop and we can live with a race on a page that
* is typically unmapped.
*/
if (si->si_code == SEGV_ACCERR && si->si_addr != NULL) {
for (memopidx = 0; instr_compute_address_ex_priv(&instr, &mc, memopidx, &target,
write, &memoppos);
memopidx++) {
/* i#1045: check whether operand and si_addr overlap */
memop = *write ? instr_get_dst(&instr, memoppos)
: instr_get_src(&instr, memoppos);
memopsize = opnd_size_in_bytes(opnd_get_size(memop));
LOG(THREAD, LOG_ALL, 2, "memory operand %u has address " PFX " and size %u\n",
memopidx, target, memopsize);
if ((byte *)si->si_addr >= target &&
(byte *)si->si_addr < target + memopsize) {
target = (byte *)si->si_addr;
found_target = true;
break;
}
}
}
/* For fcache faults, use all_memory_areas, which is faster but acquires
* locks. If it's possible we're in DR, go to the OS to avoid deadlock.
*/
if (DYNAMO_OPTION(use_all_memory_areas)) {
use_allmem = safe_is_in_fcache(dcontext, instr_cache_pc, (byte *)sc->SC_XSP);
}
if (!found_target) {
if (si->si_addr != NULL) {
LOG(THREAD, LOG_ALL, 3, "%s: falling back to racy protection checks\n",
__FUNCTION__);
}
/* i#115/PR 394984: consider all memops */
for (memopidx = 0;
instr_compute_address_ex_priv(&instr, &mc, memopidx, &target, write, NULL);
memopidx++) {
if (use_allmem) {
in_maps = get_memory_info(target, NULL, NULL, &prot);
} else {
in_maps = get_memory_info_from_os(target, NULL, NULL, &prot);
}
if ((!in_maps || !TEST(MEMPROT_READ, prot)) ||
(*write && !TEST(MEMPROT_WRITE, prot))) {
found_target = true;
break;
}
}
}
if (!found_target) {
/* probably an NX fault: how tell whether kernel enforcing? */
in_maps = get_memory_info_from_os(instr_cache_pc, NULL, NULL, &prot);
if (!in_maps || !TEST(MEMPROT_EXEC, prot)) {
target = instr_cache_pc;
found_target = true;
}
}
/* we may still not find target, e.g. for SYS_kill(SIGSEGV) */
if (!found_target)
target = NULL;
DOLOG(2, LOG_ALL, {
LOG(THREAD, LOG_ALL, 2,
"For SIGSEGV at cache pc " PFX ", computed target %s " PFX "\n",
instr_cache_pc, *write ? "write" : "read", target);
d_r_loginst(dcontext, 2, &instr, "\tfaulting instr");
});
instr_free(dcontext, &instr);
return target;
}
/* If native_state is true, assumes the fault is not in the cache and thus
* does not need translation but rather should always be re-executed.
*/
static bool
check_for_modified_code(dcontext_t *dcontext, cache_pc instr_cache_pc,
kernel_ucontext_t *uc, byte *target, bool native_state)
{
/* special case: we expect a seg fault for executable regions
* that were writable and marked read-only by us.
* have to figure out the target address!
* unfortunately the OS doesn't tell us, nor whether it's a write.
* FIXME: if sent from SYS_kill(SIGSEGV), the pc will be post-syscall,
* and if that post-syscall instr is a write that could have faulted,
* how can we tell the difference?
*/
if (was_executable_area_writable(target)) {
/* translate instr_cache_pc to original app pc
* DO NOT use translate_sigcontext, don't want to change the
* signal frame or else we'll lose control when we try to
* return to signal pc!
*/
app_pc next_pc, translated_pc = NULL;
fragment_t *f = NULL;
fragment_t wrapper;
ASSERT((cache_pc)SIGCXT_FROM_UCXT(uc)->SC_XIP == instr_cache_pc);
if (!native_state) {
/* For safe recreation we need to either be couldbelinking or hold
* the initexit lock (to keep someone from flushing current
* fragment), the initexit lock is easier
*/
d_r_mutex_lock(&thread_initexit_lock);
/* cache the fragment since pclookup is expensive for coarse units (i#658) */
f = fragment_pclookup(dcontext, instr_cache_pc, &wrapper);
translated_pc = recreate_app_pc(dcontext, instr_cache_pc, f);
ASSERT(translated_pc != NULL);
d_r_mutex_unlock(&thread_initexit_lock);
}
next_pc =
handle_modified_code(dcontext, instr_cache_pc, translated_pc, target, f);
if (!native_state) {
/* going to exit from middle of fragment (at the write) so will mess up
* trace building
*/
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n");
trace_abort(dcontext);
}
}
if (next_pc == NULL) {
/* re-execute the write -- just have master_signal_handler return */
return true;
} else {
ASSERT(!native_state);
/* Do not resume execution in cache, go back to d_r_dispatch. */
transfer_from_sig_handler_to_fcache_return(
dcontext, uc, NULL, SIGSEGV, next_pc,
(linkstub_t *)get_selfmod_linkstub(), false);
/* now have master_signal_handler return */
return true;
}
}
return false;
}
#ifndef HAVE_SIGALTSTACK
/* The exact layout of this struct is relied on in master_signal_handler()
* in x86.asm.
*/
struct clone_and_swap_args {
byte *stack;
byte *tos;
};
/* Helper function for swapping handler to dstack */
bool
sig_should_swap_stack(struct clone_and_swap_args *args, kernel_ucontext_t *ucxt)
{
byte *cur_esp;
dcontext_t *dcontext = get_thread_private_dcontext();
if (dcontext == NULL)
return false;
GET_STACK_PTR(cur_esp);
if (!is_on_dstack(dcontext, cur_esp)) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
/* Pass back the proper args to clone_and_swap_stack: we want to
* copy to dstack from the tos at the signal interruption point.
*/
args->stack = dcontext->dstack;
/* leave room for fpstate */
args->stack -= signal_frame_extra_size(true);
args->stack = (byte *)ALIGN_BACKWARD(args->stack, XSTATE_ALIGNMENT);
args->tos = (byte *)sc->SC_XSP;
return true;
} else
return false;
}
#endif
/* Helper that takes over the current thread signaled via SUSPEND_SIGNAL. Kept
* separate mostly to keep the priv_mcontext_t allocation out of
* master_signal_handler_C.
* If it returns, it returns false, and the signal should be squashed.
*/
static bool
sig_take_over(kernel_ucontext_t *uc)
{
priv_mcontext_t mc;
ucontext_to_mcontext(&mc, uc);
/* We don't want our own blocked signals: we want the app's, stored in the frame. */
if (!os_thread_take_over(&mc, SIGMASK_FROM_UCXT(uc)))
return false;
ASSERT_NOT_REACHED(); /* shouldn't return */
return true; /* make compiler happy */
}
static bool
is_safe_read_ucxt(kernel_ucontext_t *ucxt)
{
app_pc pc = (app_pc)SIGCXT_FROM_UCXT(ucxt)->SC_XIP;
return is_safe_read_pc(pc);
}
/* the master signal handler
* WARNING: behavior varies with different versions of the kernel!
* sigaction support was only added with 2.2
*/
#ifndef X86_32
/* stub in x86.asm passes our xsp to us */
# ifdef MACOS
void
master_signal_handler_C(handler_t handler, int style, int sig, kernel_siginfo_t *info,
kernel_ucontext_t *ucxt, byte *xsp)
# else
void
master_signal_handler_C(int sig, kernel_siginfo_t *siginfo, kernel_ucontext_t *ucxt,
byte *xsp)
# endif
#else
/* On ia32, adding a parameter disturbs the frame we're trying to capture, so we
* add an intermediate frame and read the normal params off the stack directly.
*/
void
master_signal_handler_C(byte *xsp)
#endif
{
sigframe_rt_t *frame = (sigframe_rt_t *)xsp;
#ifdef X86_32
/* Read the normal arguments from the frame. */
int sig = frame->sig;
kernel_siginfo_t *siginfo = frame->pinfo;
kernel_ucontext_t *ucxt = frame->puc;
#endif /* !X64 */
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
thread_record_t *tr;
#ifdef DEBUG
uint level = 2;
# if !defined(HAVE_MEMINFO)
/* avoid logging every single TRY probe fault */
if (!dynamo_initialized)
level = 5;
# endif
#endif
bool local;
#if defined(MACOS) && !defined(X64)
/* The kernel clears fs, so we have to re-instate our selector, if
* it was set in the first place.
*/
if (sc->__ss.__fs != 0)
tls_reinstate_selector(sc->__ss.__fs);
#endif
#ifdef X86
/* i#2089: For is_thread_tls_initialized() we need a safe_read path that does not
* do any logging or call get_thread_private_dcontext() as those will recurse.
* This path is global so there's no SELF_PROTECT_LOCAL and we also bypass
* the ENTERING_DR() for this short path.
*/
if (sig == SIGSEGV && sc->SC_XIP == (ptr_uint_t)safe_read_tls_magic) {
sc->SC_RETURN_REG = 0;
sc->SC_XIP = (reg_t)safe_read_tls_magic_recover;
return;
} else if (sig == SIGSEGV && sc->SC_XIP == (ptr_uint_t)safe_read_tls_self) {
sc->SC_RETURN_REG = 0;
sc->SC_XIP = (reg_t)safe_read_tls_self_recover;
return;
} else if (sig == SIGSEGV && sc->SC_XIP == (ptr_uint_t)safe_read_tls_app_self) {
sc->SC_RETURN_REG = 0;
sc->SC_XIP = (reg_t)safe_read_tls_app_self_recover;
return;
}
#endif
dcontext_t *dcontext = get_thread_private_dcontext();
#ifdef MACOS
# ifdef X64
ASSERT((YMM_ENABLED() && ucxt->uc_mcsize == sizeof(_STRUCT_MCONTEXT_AVX64)) ||
(!YMM_ENABLED() && ucxt->uc_mcsize == sizeof(_STRUCT_MCONTEXT64)));
# else
ASSERT((YMM_ENABLED() && ucxt->uc_mcsize == sizeof(_STRUCT_MCONTEXT_AVX32)) ||
(!YMM_ENABLED() && ucxt->uc_mcsize == sizeof(_STRUCT_MCONTEXT)));
# endif
#endif
/* i#350: To support safe_read or TRY_EXCEPT without a dcontext, use the
* global dcontext
* when handling safe_read faults. This lets us pass the check for a
* dcontext below and causes us to use the global log.
*/
if (dcontext == NULL && (sig == SIGSEGV || sig == SIGBUS) &&
(is_safe_read_ucxt(ucxt) ||
(!dynamo_initialized && global_try_except.try_except_state != NULL))) {
dcontext = GLOBAL_DCONTEXT;
}
if (dynamo_exited && d_r_get_num_threads() > 1 && sig == SIGSEGV) {
/* PR 470957: this is almost certainly a race so just squelch it.
* We live w/ the risk that it was holding a lock our release-build
* exit code needs.
*/
exit_thread_syscall(1);
}
/* FIXME: ensure the path for recording a pending signal does not grab any DR locks
* that could have been interrupted
* e.g., synchronize_dynamic_options grabs the stats_lock!
*/
if (sig == SUSPEND_SIGNAL) {
if (proc_get_vendor() == VENDOR_AMD) {
/* i#3356: Work around an AMD processor bug where it does not clear the
* hidden gs base when the gs selector is written. Pre-4.7 Linux kernels
* leave the prior thread's base in place on a switch due to this.
* We can thus come here and get the wrong dcontext on attach; worse,
* we can get NULL here but the wrong one later during init. It's
* safest to just set a non-zero value (the kernel ignores zero) for all
* unknown threads here. There are no problems for non-attach takeover.
*/
if (dcontext == NULL || dcontext->owning_thread != get_sys_thread_id()) {
/* tls_thread_preinit() further rules out a temp-native dcontext
* and avoids clobbering it, to preserve the thread_lookup() case
* below (which we do not want to run first as we could swap to
* the incorrect dcontext midway through it).
*/
if (!tls_thread_preinit()) {
SYSLOG_INTERNAL_ERROR_ONCE("ERROR: Failed to work around AMD context "
"switch bug #3356: crashes or "
"hangs may ensue...");
}
dcontext = NULL;
}
}
}
if (dcontext == NULL &&
/* Check for a temporarily-native thread we're synch-ing with. */
(sig == SUSPEND_SIGNAL
#ifdef X86
|| (INTERNAL_OPTION(safe_read_tls_init) &&
/* Check for whether this is a thread with its invalid sentinel magic set.
* In this case, we assume that it is either a thread that is currently
* temporarily-native via API like DR_EMIT_GO_NATIVE, or a thread in the
* clone window. We know by inspection of our own code that it is safe to
* call thread_lookup for either case thread makes a clone or was just
* cloned. i.e. thread_lookup requires a lock that must not be held by the
* calling thread (i#2921).
* XXX: what is ARM doing, any special case w/ dcontext == NULL?
*/
safe_read_tls_magic() == TLS_MAGIC_INVALID)
#endif
)) {
tr = thread_lookup(get_sys_thread_id());
if (tr != NULL)
dcontext = tr->dcontext;
}
if (dcontext == NULL ||
(dcontext != GLOBAL_DCONTEXT &&
(dcontext->signal_field == NULL ||
!((thread_sig_info_t *)dcontext->signal_field)->fully_initialized))) {
/* FIXME: || !intercept_asynch, or maybe !under_our_control */
/* FIXME i#26: this could be a signal arbitrarily sent to this thread.
* We could try to route it to another thread, using a global queue
* of pending signals. But what if it was targeted to this thread
* via SYS_{tgkill,tkill}? Can we tell the difference, even if
* we watch the kill syscalls: could come from another process?
*/
if (sig_is_alarm_signal(sig)) {
/* assuming an alarm during thread exit or init (xref PR 596127,
* i#359): suppressing is fine
*/
} else if (sig == SUSPEND_SIGNAL && dcontext == NULL) {
/* We sent SUSPEND_SIGNAL to a thread we don't control (no
* dcontext), which means we want to take over.
*/
ASSERT(!doing_detach);
if (!sig_take_over(ucxt))
return;
ASSERT_NOT_REACHED(); /* else, shouldn't return */
} else {
/* Using global dcontext because dcontext is NULL here. */
DOLOG(1, LOG_ASYNCH, { dump_sigcontext(GLOBAL_DCONTEXT, sc); });
SYSLOG_INTERNAL_ERROR("ERROR: master_signal_handler with no siginfo "
"(i#26?): tid=%d, sig=%d",
get_sys_thread_id(), sig);
}
/* see FIXME comments above.
* workaround for now: suppressing is better than dying.
*/
if (can_always_delay[sig])
return;
REPORT_FATAL_ERROR_AND_EXIT(FAILED_TO_HANDLE_SIGNAL, 2, get_application_name(),
get_application_pid());
}
/* we may be entering dynamo from code cache! */
/* Note that this is unsafe if -single_thread_in_DR => we grab a lock =>
* hang if signal interrupts DR: but we don't really support that option
*/
ENTERING_DR();
if (dcontext == GLOBAL_DCONTEXT) {
local = false;
tr = thread_lookup(get_sys_thread_id());
} else {
tr = dcontext->thread_record;
local = local_heap_protected(dcontext);
if (local)
SELF_PROTECT_LOCAL(dcontext, WRITABLE);
}
/* i#1921: For proper native execution with re-takeover we need to propagate
* signals to app handlers while native. For now we do not support re-takeover
* and we give up our handlers via signal_remove_handlers().
*/
ASSERT(tr == NULL || tr->under_dynamo_control || IS_CLIENT_THREAD(dcontext) ||
sig == SUSPEND_SIGNAL);
LOG(THREAD, LOG_ASYNCH, level,
"\nmaster_signal_handler: thread=%d, sig=%d, xsp=" PFX ", retaddr=" PFX "\n",
get_sys_thread_id(), sig, xsp, *((byte **)xsp));
LOG(THREAD, LOG_ASYNCH, level + 1,
"siginfo: sig = %d, pid = %d, status = %d, errno = %d, si_code = %d\n",
siginfo->si_signo, siginfo->si_pid, siginfo->si_status, siginfo->si_errno,
siginfo->si_code);
DOLOG(level + 1, LOG_ASYNCH, { dump_sigcontext(dcontext, sc); });
#if defined(X86_32) && !defined(VMX86_SERVER) && defined(LINUX)
/* FIXME case 6700: 2.6.9 (FC3) kernel sets up our frame with a pretcode
* of 0x440. This happens if our restorer is unspecified (though 2.6.9
* src code shows setting the restorer to a default value in that case...)
* or if we explicitly point at dynamorio_sigreturn. I couldn't figure
* out why it kept putting 0x440 there. So we fix the issue w/ this
* hardcoded return.
* This hack causes vmkernel to kill the process on sigreturn due to
* vmkernel's non-standard sigreturn semantics. PR 404712.
*/
*((byte **)xsp) = (byte *)dynamorio_sigreturn;
#endif
/* N.B.:
* ucontext_t is defined in two different places. The one we get
* included is /usr/include/sys/ucontext.h, which would have us
* doing this:
* void *pc = (void *) ucxt->uc_mcontext.gregs[EIP];
* However, EIP is not defined for us (used to be in older
* RedHat version) unless we define __USE_GNU, which we don't want to do
* for other reasons, so we'd have to also say:
* #define EIP 14
* Instead we go by the ucontext_t definition in
* /usr/include/asm/ucontext.h, which has it containing a sigcontext struct,
* defined in /usr/include/asm/sigcontext.h. This is the definition used
* by the kernel. The two definitions are field-for-field
* identical except that the sys one has an fpstate struct at the end --
* but the next field in the frame is an fpstate. The only mystery
* is why the rt frame is declared as ucontext instead of sigcontext.
* The kernel's version of ucontext must be the asm one!
* And the sys one grabs the next field of the frame.
* Also note that mcontext_t.fpregs == sigcontext.fpstate is NULL if
* floating point operations have not been used (lazy fp state saving).
* Also, sigset_t has different sizes according to kernel (8 bytes) vs.
* glibc (128 bytes?).
*/
switch (sig) {
case SIGBUS: /* PR 313665: look for DR crashes on unaligned memory or mmap bounds */
case SIGSEGV: {
/* Older kernels do NOT fill out the signal-specific fields of siginfo,
* except for SIGCHLD. Thus we cannot do this:
* void *pc = (void*) siginfo->si_addr;
* Thus we must use the third argument, which is a ucontext_t (see above)
*/
void *pc = (void *)sc->SC_XIP;
bool syscall_signal = false; /* signal came from syscall? */
bool is_write = false;
byte *target;
bool is_DR_exception = false;
#ifdef SIDELINE
if (dcontext == NULL) {
SYSLOG_INTERNAL_ERROR("seg fault in sideline thread -- NULL dcontext!");
ASSERT_NOT_REACHED();
}
#endif
if (is_safe_read_ucxt(ucxt) ||
(!dynamo_initialized && global_try_except.try_except_state != NULL) ||
dcontext->try_except.try_except_state != NULL) {
/* handle our own TRY/EXCEPT */
try_except_context_t *try_cxt;
#ifdef HAVE_MEMINFO
/* our probe produces many of these every run */
/* since we use for safe_*, making a _ONCE */
SYSLOG_INTERNAL_WARNING_ONCE("(1+x) Handling our fault in a TRY at " PFX, pc);
#endif
LOG(THREAD, LOG_ALL, level, "TRY fault at " PFX "\n", pc);
if (TEST(DUMPCORE_TRY_EXCEPT, DYNAMO_OPTION(dumpcore_mask)))
os_dump_core("try/except fault");
if (is_safe_read_ucxt(ucxt)) {
sc->SC_XIP = (reg_t)safe_read_resume_pc();
/* Break out to log the normal return from the signal handler.
*/
break;
}
try_cxt = (dcontext != NULL) ? dcontext->try_except.try_except_state
: global_try_except.try_except_state;
ASSERT(try_cxt != NULL);
/* The exception interception code did an ENTER so we must EXIT here */
EXITING_DR();
/* Since we have no sigreturn we have to restore the mask
* manually, just like siglongjmp(). i#226/PR 492568: we rely
* on the kernel storing the prior mask in ucxt, so we do not
* need to store it on every setjmp.
*/
/* Verify that there's no scenario where the mask gets changed prior
* to a fault inside a try. This relies on dr_setjmp_sigmask() filling
* in the mask, which we only bother to do in debug build.
*/
ASSERT(memcmp(&try_cxt->context.sigmask, &ucxt->uc_sigmask,
sizeof(ucxt->uc_sigmask)) == 0);
sigprocmask_syscall(SIG_SETMASK, SIGMASK_FROM_UCXT(ucxt), NULL,
sizeof(ucxt->uc_sigmask));
DR_LONGJMP(&try_cxt->context, LONGJMP_EXCEPTION);
ASSERT_NOT_REACHED();
}
target = compute_memory_target(dcontext, pc, ucxt, siginfo, &is_write);
#ifdef CLIENT_INTERFACE
if (CLIENTS_EXIST() && is_in_client_lib(pc)) {
/* i#1354: client might write to a page we made read-only.
* If so, handle the fault and re-execute it, if it's safe to do so
* (we document these criteria under DR_MEMPROT_PRETEND_WRITE).
*/
if (is_write && !is_couldbelinking(dcontext) && OWN_NO_LOCKS(dcontext) &&
check_for_modified_code(dcontext, pc, ucxt, target, true /*native*/))
break;
abort_on_fault(dcontext, DUMPCORE_CLIENT_EXCEPTION, pc, target, sig, frame,
exception_label_client, (sig == SIGSEGV) ? "SEGV" : "BUS",
" client library");
ASSERT_NOT_REACHED();
}
#endif
/* For !HAVE_MEMINFO, we cannot compute the target until
* after the try/except check b/c compute_memory_target()
* calls get_memory_info_from_os() which does a probe: and the
* try/except could be from a probe itself. A try/except that
* triggers a stack overflow should recover on the longjmp, so
* this order should be fine.
*/
/* FIXME: share code with Windows callback.c */
/* FIXME PR 205795: in_fcache and is_dynamo_address do grab locks! */
if ((is_on_dstack(dcontext, (byte *)sc->SC_XSP)
/* PR 302951: clean call arg processing => pass to app/client.
* Rather than call the risky in_fcache we check whereami. */
IF_CLIENT_INTERFACE(&&(dcontext->whereami != DR_WHERE_FCACHE))) ||
is_on_alt_stack(dcontext, (byte *)sc->SC_XSP) ||
is_on_initstack((byte *)sc->SC_XSP)) {
/* Checks here need to cover everything that record_pending_signal()
* thinks is non-fcache, non-gencode: else that routine will kill
* process since can't delay or re-execute (i#195/PR 453964).
*/
is_DR_exception = true;
} else if (!safe_is_in_fcache(dcontext, pc, (byte *)sc->SC_XSP) &&
(in_generated_routine(dcontext, pc) ||
is_at_do_syscall(dcontext, pc, (byte *)sc->SC_XSP) ||
is_dynamo_address(pc))) {
#ifdef CLIENT_INTERFACE
if (!in_generated_routine(dcontext, pc) &&
!is_at_do_syscall(dcontext, pc, (byte *)sc->SC_XSP)) {
/* PR 451074: client needs a chance to handle exceptions in its
* own gencode. client_exception_event() won't return if client
* wants to re-execute faulting instr.
*/
sigcontext_t sc_interrupted = *get_sigcontext_from_rt_frame(frame);
dr_signal_action_t action = send_signal_to_client(
dcontext, sig, frame, sc, target, false /*!blocked*/, NULL);
if (action != DR_SIGNAL_DELIVER && /* for delivery, continue below */
!handle_client_action_from_cache(dcontext, sig, action, frame, sc,
&sc_interrupted,
false /*!blocked*/)) {
/* client handled fault */
break;
}
}
#endif
is_DR_exception = true;
}
if (is_DR_exception) {
/* kill(getpid(), SIGSEGV) looks just like a SIGSEGV in the store of eax
* to mcontext after the syscall instr in do_syscall -- try to distinguish:
*/
if (is_sys_kill(dcontext, pc, (byte *)sc->SC_XSP, siginfo)) {
LOG(THREAD, LOG_ALL, 2,
"assuming SIGSEGV at post-do-syscall is kill, not our write fault\n");
syscall_signal = true;
}
if (!syscall_signal) {
if (check_in_last_thread_vm_area(dcontext, target)) {
/* See comments in callback.c as well.
* FIXME: try to share code
*/
SYSLOG_INTERNAL_WARNING("(decode) exception in last area, "
"DR pc=" PFX ", app pc=" PFX,
pc, target);
STATS_INC(num_exceptions_decode);
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_ASYNCH, 2,
"intercept_exception: "
"squashing old trace\n");
trace_abort(dcontext);
}
/* we do get faults when not building a bb: e.g.,
* ret_after_call_check does decoding (case 9396) */
if (dcontext->bb_build_info != NULL) {
/* must have been building a bb at the time */
bb_build_abort(dcontext, true /*clean vm area*/, true /*unlock*/);
}
/* Since we have no sigreturn we have to restore the mask manually */
unblock_all_signals(NULL);
/* Let's pass it back to the application - memory is unreadable */
if (TEST(DUMPCORE_FORGE_UNREAD_EXEC, DYNAMO_OPTION(dumpcore_mask)))
os_dump_core("Warning: Racy app execution (decode unreadable)");
os_forge_exception(target, UNREADABLE_MEMORY_EXECUTION_EXCEPTION);
ASSERT_NOT_REACHED();
} else {
abort_on_DR_fault(dcontext, pc, target, sig, frame,
(sig == SIGSEGV) ? "SEGV" : "BUS",
in_generated_routine(dcontext, pc) ? " generated"
: "");
}
}
}
/* if get here, pass the signal to the app */
ASSERT(pc != 0); /* shouldn't get here */
if (sig == SIGSEGV && !syscall_signal /*only for in-cache signals*/) {
/* special case: we expect a seg fault for executable regions
* that were writable and marked read-only by us.
*/
if (is_write &&
check_for_modified_code(dcontext, pc, ucxt, target, false /*!native*/)) {
/* it was our signal, so don't pass to app -- return now */
break;
}
}
/* pass it to the application (or client) */
LOG(THREAD, LOG_ALL, 1,
"** Received SIG%s at cache pc " PFX " in thread " TIDFMT "\n",
(sig == SIGSEGV) ? "SEGV" : "BUS", pc, d_r_get_thread_id());
ASSERT(syscall_signal || safe_is_in_fcache(dcontext, pc, (byte *)sc->SC_XSP));
/* we do not call trace_abort() here since we may need to
* translate from a temp private bb (i#376): but all paths
* that deliver the signal or redirect will call it
*/
record_pending_signal(dcontext, sig, ucxt, frame, false _IF_CLIENT(target));
break;
}
/* PR 212090: the signal we use to suspend threads */
case SUSPEND_SIGNAL:
if (handle_suspend_signal(dcontext, ucxt, frame)) {
/* i#1921: see comment above */
ASSERT(tr == NULL || tr->under_dynamo_control || IS_CLIENT_THREAD(dcontext));
record_pending_signal(dcontext, sig, ucxt, frame, false _IF_CLIENT(NULL));
}
/* else, don't deliver to app */
break;
/* i#61/PR 211530: the signal we use for nudges */
case NUDGESIG_SIGNUM:
if (handle_nudge_signal(dcontext, siginfo, ucxt))
record_pending_signal(dcontext, sig, ucxt, frame, false _IF_CLIENT(NULL));
/* else, don't deliver to app */
break;
case SIGALRM:
case SIGVTALRM:
case SIGPROF:
if (handle_alarm(dcontext, sig, ucxt))
record_pending_signal(dcontext, sig, ucxt, frame, false _IF_CLIENT(NULL));
/* else, don't deliver to app */
break;
#ifdef SIDELINE
case SIGCHLD: {
int status = siginfo->si_status;
if (siginfo->si_pid == 0) {
/* FIXME: with older versions of linux the sigchld fields of
* siginfo are not filled in properly!
* This is my attempt to handle that, pid seems to be 0
*/
break;
}
if (status != 0) {
LOG(THREAD, LOG_ALL, 0, "*** Child thread died with error %d\n", status);
ASSERT_NOT_REACHED();
}
break;
}
#endif
default: {
record_pending_signal(dcontext, sig, ucxt, frame, false _IF_CLIENT(NULL));
break;
}
} /* end switch */
LOG(THREAD, LOG_ASYNCH, level,
"\tmaster_signal_handler %d returning now to " PFX "\n\n", sig, sc->SC_XIP);
/* Ensure we didn't get the app's sigstack into our frame. On Mac, the kernel
* doesn't use the frame's uc_stack, so we limit this to Linux.
* The pointers may be different if a thread is on its way to exit, and the app's
* sigstack was already restored (i#3369).
*/
IF_LINUX(ASSERT(dcontext == NULL || dcontext == GLOBAL_DCONTEXT ||
dcontext->is_exiting ||
frame->uc.uc_stack.ss_sp ==
((thread_sig_info_t *)dcontext->signal_field)->sigstack.ss_sp));
/* restore protections */
if (local)
SELF_PROTECT_LOCAL(dcontext, READONLY);
EXITING_DR();
}
static bool
execute_handler_from_cache(dcontext_t *dcontext, int sig, sigframe_rt_t *our_frame,
sigcontext_t *sc_orig,
fragment_t *f _IF_CLIENT(byte *access_address))
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
/* we want to modify the sc in DR's frame */
kernel_ucontext_t *uc = get_ucontext_from_rt_frame(our_frame);
sigcontext_t *sc = SIGCXT_FROM_UCXT(uc);
kernel_sigset_t blocked;
/* Need to get xsp now before get new dcontext.
* This is the translated xsp, so we avoid PR 306410 (cleancall arg fault
* on dstack => handler run on dstack) that Windows hit.
*/
byte *xsp = get_sigstack_frame_ptr(dcontext, sig,
our_frame/* take xsp from (translated)
* interruption point */);
#ifdef CLIENT_INTERFACE
sigcontext_t sc_interrupted = *sc;
dr_signal_action_t action = send_signal_to_client(
dcontext, sig, our_frame, sc_orig, access_address, false /*not blocked*/, f);
if (!handle_client_action_from_cache(dcontext, sig, action, our_frame, sc_orig,
&sc_interrupted, false /*!blocked*/))
return false;
#else
if (info->app_sigaction[sig] == NULL ||
info->app_sigaction[sig]->handler == (handler_t)SIG_DFL) {
LOG(THREAD, LOG_ASYNCH, 3, "\taction is SIG_DFL\n");
if (execute_default_from_cache(dcontext, sig, our_frame, sc_orig, false)) {
/* if we haven't terminated, restore original (untranslated) sc
* on request.
* XXX i#1615: this doesn't restore SIMD regs, if client translated them!
*/
*get_sigcontext_from_rt_frame(our_frame) = *sc_orig;
}
return false;
}
ASSERT(info->app_sigaction[sig] != NULL &&
info->app_sigaction[sig]->handler != (handler_t)SIG_IGN &&
info->app_sigaction[sig]->handler != (handler_t)SIG_DFL);
#endif
LOG(THREAD, LOG_ASYNCH, 2, "execute_handler_from_cache for signal %d\n", sig);
RSTATS_INC(num_signals);
/* now that we know it's not a client-involved fault, dump as app fault */
report_app_problem(dcontext, APPFAULT_FAULT, (byte *)sc->SC_XIP, (byte *)sc->SC_FP,
"\nSignal %d delivered to application handler.\n", sig);
LOG(THREAD, LOG_ASYNCH, 3, "\txsp is " PFX "\n", xsp);
/* copy frame to appropriate stack and convert to non-rt if necessary */
copy_frame_to_stack(dcontext, sig, our_frame, (void *)xsp, false /*!pending*/);
LOG(THREAD, LOG_ASYNCH, 3, "\tcopied frame from " PFX " to " PFX "\n", our_frame,
xsp);
sigcontext_t *app_sc = get_sigcontext_from_app_frame(info, sig, (void *)xsp);
/* Because of difficulties determining when/if a signal handler
* returns, we do what the kernel does: abandon all of our current
* state, copy what we might need to the handler frame if we come back,
* and then it's ok if the handler doesn't return.
* If it does, we start interpreting afresh when we see sigreturn().
* This routine assumes anything needed to return has been put in the
* frame (only needed for signals queued up while in dynamo), and goes
* ahead and trashes the current dcontext.
*/
/* if we were building a trace, kill it */
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n");
trace_abort(dcontext);
}
/* add to set of blocked signals those in sigaction mask */
blocked = info->app_sigaction[sig]->mask;
/* SA_NOMASK says whether to block sig itself or not */
if ((info->app_sigaction[sig]->flags & SA_NOMASK) == 0)
kernel_sigaddset(&blocked, sig);
set_blocked(dcontext, &blocked, false /*relative: OR these in*/);
/* Doesn't matter what most app registers are, signal handler doesn't
* expect anything except the frame on the stack. We do need to set xsp,
* only because if app wants special signal stack we need to point xsp
* there. (If no special signal stack, this is a nop.)
*/
sc->SC_XSP = (ptr_uint_t)xsp;
/* Set up args to handler: int sig, kernel_siginfo_t *siginfo,
* kernel_ucontext_t *ucxt.
*/
#ifdef X86_64
sc->SC_XDI = sig;
sc->SC_XSI = (reg_t) & ((sigframe_rt_t *)xsp)->info;
sc->SC_XDX = (reg_t) & ((sigframe_rt_t *)xsp)->uc;
#elif defined(AARCHXX)
sc->SC_R0 = sig;
if (IS_RT_FOR_APP(info, sig)) {
sc->SC_R1 = (reg_t) & ((sigframe_rt_t *)xsp)->info;
sc->SC_R2 = (reg_t) & ((sigframe_rt_t *)xsp)->uc;
}
if (sig_has_restorer(info, sig))
sc->SC_LR = (reg_t)info->app_sigaction[sig]->restorer;
else
sc->SC_LR = (reg_t)dynamorio_sigreturn;
# ifndef AARCH64
/* We're going to our fcache_return gencode which uses DEFAULT_ISA_MODE */
set_pc_mode_in_cpsr(sc, DEFAULT_ISA_MODE);
# endif
#endif
/* Set our sigreturn context (NOT for the app: we already copied the
* translated context to the app stack) to point to fcache_return!
* Then we'll go back through kernel, appear in fcache_return,
* and go through d_r_dispatch & interp, without messing up DR stack.
*/
transfer_from_sig_handler_to_fcache_return(
dcontext, uc, app_sc, sig,
/* Make sure handler is next thing we execute */
(app_pc)SIGACT_PRIMARY_HANDLER(info->app_sigaction[sig]),
(linkstub_t *)get_asynch_linkstub(), true);
if ((info->app_sigaction[sig]->flags & SA_ONESHOT) != 0) {
/* clear handler now -- can't delete memory since sigreturn,
* others may look at sigaction struct, so we just set to default
*/
info->app_sigaction[sig]->handler = (handler_t)SIG_DFL;
}
LOG(THREAD, LOG_ASYNCH, 3, "\tset next_tag to handler " PFX ", xsp to " PFX "\n",
SIGACT_PRIMARY_HANDLER(info->app_sigaction[sig]), xsp);
return true;
}
static bool
execute_handler_from_dispatch(dcontext_t *dcontext, int sig)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
byte *xsp = get_sigstack_frame_ptr(dcontext, sig, NULL);
sigframe_rt_t *frame = &(info->sigpending[sig]->rt_frame);
priv_mcontext_t *mcontext = get_mcontext(dcontext);
sigcontext_t *sc;
kernel_ucontext_t *uc;
kernel_sigset_t blocked;
#ifdef CLIENT_INTERFACE
dr_signal_action_t action;
#else
if (info->app_sigaction[sig] == NULL ||
info->app_sigaction[sig]->handler == (handler_t)SIG_DFL) {
LOG(THREAD, LOG_ASYNCH, 3, "\taction is SIG_DFL\n");
execute_default_from_dispatch(dcontext, sig, frame);
return true;
}
ASSERT(info->app_sigaction[sig] != NULL &&
info->app_sigaction[sig]->handler != (handler_t)SIG_IGN &&
info->app_sigaction[sig]->handler != (handler_t)SIG_DFL);
#endif
LOG(THREAD, LOG_ASYNCH, 2, "execute_handler_from_dispatch for signal %d\n", sig);
RSTATS_INC(num_signals);
/* modify the rtframe before copying to stack so we can pass final
* version to client, and propagate its mods
*/
uc = get_ucontext_from_rt_frame(frame);
sc = SIGCXT_FROM_UCXT(uc);
/* Because of difficulties determining when/if a signal handler
* returns, we do what the kernel does: abandon all of our current
* state, copy what we might need to the handler frame if we come back,
* and then it's ok if the handler doesn't return.
* If it does, we start interpreting afresh when we see sigreturn().
*/
#ifdef DEBUG
if (d_r_stats->loglevel >= 3 && (d_r_stats->logmask & LOG_ASYNCH) != 0) {
LOG(THREAD, LOG_ASYNCH, 3, "original sigcontext " PFX ":\n", sc);
dump_sigcontext(dcontext, sc);
}
#endif
if (info->sigpending[sig]->use_sigcontext) {
LOG(THREAD, LOG_ASYNCH, 2,
"%s: using sigcontext, not mcontext (syscall restart)\n", __FUNCTION__);
} else {
/* copy currently-interrupted-context to frame's context, so we can
* abandon the currently-interrupted context.
*/
mcontext_to_ucontext(uc, mcontext);
}
/* Sigreturn needs the target ISA mode to be set in the T bit in cpsr.
* Since we came from d_r_dispatch, the post-signal target's mode is in dcontext.
*/
IF_ARM(set_pc_mode_in_cpsr(sc, dr_get_isa_mode(dcontext)));
/* mcontext does not contain fp or mmx or xmm state, which may have
* changed since the frame was created (while finishing up interrupted
* fragment prior to returning to d_r_dispatch). Since DR does not touch
* this state except for xmm on x64, we go ahead and copy the
* current state into the frame, and then touch up xmm for x64.
*/
/* FIXME: should this be done for all pending as soon as reach
* d_r_dispatch? what if get two asynch inside same frag prior to exiting
* cache? have issues with fpstate, but also prob with next_tag? FIXME
*/
/* FIXME: we should clear fpstate for app handler itself as that's
* how our own handler is executed.
*/
#if defined(LINUX) && defined(X86)
ASSERT(sc->fpstate != NULL); /* not doing i#641 yet */
save_fpstate(dcontext, frame);
#endif /* LINUX && X86 */
#ifdef DEBUG
if (d_r_stats->loglevel >= 3 && (d_r_stats->logmask & LOG_ASYNCH) != 0) {
LOG(THREAD, LOG_ASYNCH, 3, "new sigcontext " PFX ":\n", sc);
dump_sigcontext(dcontext, sc);
LOG(THREAD, LOG_ASYNCH, 3, "\n");
}
#endif
/* FIXME: other state? debug regs?
* if no syscall allowed between master_ (when frame created) and
* receiving, then don't have to worry about debug regs, etc.
* check for syscall when record pending, if it exists, try to
* receive in pre_system_call or something? what if ignorable? FIXME!
*/
if (!info->sigpending[sig]->use_sigcontext) {
/* for the pc we want the app pc not the cache pc */
sc->SC_XIP = (ptr_uint_t)dcontext->next_tag;
LOG(THREAD, LOG_ASYNCH, 3, "\tset frame's eip to " PFX "\n", sc->SC_XIP);
}
#ifdef CLIENT_INTERFACE
sigcontext_t sc_interrupted = *sc;
action = send_signal_to_client(dcontext, sig, frame, NULL,
info->sigpending[sig]->access_address,
false /*not blocked*/, NULL);
/* in order to pass to the client, we come all the way here for signals
* the app has no handler for
*/
if (action == DR_SIGNAL_REDIRECT) {
/* send_signal_to_client copied mcontext into frame's sc */
priv_mcontext_t *mcontext = get_mcontext(dcontext);
ucontext_to_mcontext(mcontext, uc);
dcontext->next_tag = canonicalize_pc_target(dcontext, (app_pc)sc->SC_XIP);
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n");
trace_abort(dcontext);
}
IF_ARM(dr_set_isa_mode(dcontext, get_pc_mode_from_cpsr(sc), NULL));
mcontext->pc = dcontext->next_tag;
sig_full_cxt_t sc_interrupted_full = { &sc_interrupted, NULL /*not provided*/ };
if (instrument_kernel_xfer(dcontext, DR_XFER_CLIENT_REDIRECT, sc_interrupted_full,
NULL, NULL, dcontext->next_tag, mcontext->xsp,
osc_empty, mcontext, sig))
dcontext->next_tag = canonicalize_pc_target(dcontext, mcontext->pc);
return true; /* don't try another signal */
} else if (action == DR_SIGNAL_SUPPRESS ||
(info->app_sigaction[sig] != NULL &&
info->app_sigaction[sig]->handler == (handler_t)SIG_IGN)) {
LOG(THREAD, LOG_ASYNCH, 2, "%s: not delivering!\n",
(action == DR_SIGNAL_SUPPRESS) ? "client suppressing signal"
: "app signal handler is SIG_IGN");
return false;
} else if (action == DR_SIGNAL_BYPASS ||
(info->app_sigaction[sig] == NULL ||
info->app_sigaction[sig]->handler == (handler_t)SIG_DFL)) {
LOG(THREAD, LOG_ASYNCH, 2, "%s: executing default action\n",
(action == DR_SIGNAL_BYPASS) ? "client forcing default"
: "app signal handler is SIG_DFL");
if (info->sigpending[sig]->use_sigcontext) {
/* after the default action we want to go to the sigcontext */
dcontext->next_tag = canonicalize_pc_target(dcontext, (app_pc)sc->SC_XIP);
ucontext_to_mcontext(get_mcontext(dcontext), uc);
IF_ARM(dr_set_isa_mode(dcontext, get_pc_mode_from_cpsr(sc), NULL));
}
execute_default_from_dispatch(dcontext, sig, frame);
return true;
}
CLIENT_ASSERT(action == DR_SIGNAL_DELIVER, "invalid signal event return value");
#endif
/* now that we've made all our changes and given the client a
* chance to make changes, copy the frame to the appropriate stack
* location and convert to non-rt if necessary
*/
copy_frame_to_stack(dcontext, sig, frame, xsp, true /*pending*/);
/* now point at the app's frame */
sc = get_sigcontext_from_app_frame(info, sig, (void *)xsp);
ASSERT(info->app_sigaction[sig] != NULL);
/* add to set of blocked signals those in sigaction mask */
blocked = info->app_sigaction[sig]->mask;
/* SA_NOMASK says whether to block sig itself or not */
if ((info->app_sigaction[sig]->flags & SA_NOMASK) == 0)
kernel_sigaddset(&blocked, sig);
set_blocked(dcontext, &blocked, false /*relative: OR these in*/);
/* if we were building a trace, kill it */
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n");
trace_abort(dcontext);
}
/* Doesn't matter what most app registers are, signal handler doesn't
* expect anything except the frame on the stack. We do need to set xsp.
*/
mcontext->xsp = (ptr_uint_t)xsp;
/* Set up args to handler: int sig, kernel_siginfo_t *siginfo,
* kernel_ucontext_t *ucxt.
*/
#ifdef X86_64
mcontext->xdi = sig;
mcontext->xsi = (reg_t) & ((sigframe_rt_t *)xsp)->info;
mcontext->xdx = (reg_t) & ((sigframe_rt_t *)xsp)->uc;
#elif defined(AARCHXX)
mcontext->r0 = sig;
if (IS_RT_FOR_APP(info, sig)) {
mcontext->r1 = (reg_t) & ((sigframe_rt_t *)xsp)->info;
mcontext->r2 = (reg_t) & ((sigframe_rt_t *)xsp)->uc;
}
if (sig_has_restorer(info, sig))
mcontext->lr = (reg_t)info->app_sigaction[sig]->restorer;
else
mcontext->lr = (reg_t)dynamorio_sigreturn;
#endif
#ifdef X86
/* Clear eflags DF (signal handler should match function entry ABI) */
mcontext->xflags &= ~EFLAGS_DF;
#endif
/* Make sure handler is next thing we execute */
dcontext->next_tag = canonicalize_pc_target(
dcontext, (app_pc)SIGACT_PRIMARY_HANDLER(info->app_sigaction[sig]));
if ((info->app_sigaction[sig]->flags & SA_ONESHOT) != 0) {
/* clear handler now -- can't delete memory since sigreturn,
* others may look at sigaction struct, so we just set to default
*/
info->app_sigaction[sig]->handler = (handler_t)SIG_DFL;
}
#ifdef CLIENT_INTERFACE
mcontext->pc = dcontext->next_tag;
sig_full_cxt_t sc_full = { sc, NULL /*not provided*/ };
if (instrument_kernel_xfer(dcontext, DR_XFER_SIGNAL_DELIVERY, sc_full, NULL, NULL,
dcontext->next_tag, mcontext->xsp, osc_empty, mcontext,
sig))
dcontext->next_tag = canonicalize_pc_target(dcontext, mcontext->pc);
#endif
LOG(THREAD, LOG_ASYNCH, 3, "\tset xsp to " PFX "\n", xsp);
return true;
}
/* The arg to SYS_kill, i.e., the signal number, should be in dcontext->sys_param0 */
/* This routine unblocks signals, but the caller must set the handler to default. */
static void
terminate_via_kill(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
ASSERT(dcontext == get_thread_private_dcontext());
/* Enure signal_thread_exit() will not re-block */
memset(&info->app_sigblocked, 0, sizeof(info->app_sigblocked));
/* FIXME PR 541760: there can be multiple thread groups and thus
* this may not exit all threads in the address space
*/
block_cleanup_and_terminate(
dcontext, SYS_kill,
/* Pass -pid in case main thread has exited
* in which case will get -ESRCH
*/
IF_VMX86(os_in_vmkernel_userworld() ? -(int)get_process_id() :) get_process_id(),
dcontext->sys_param0, true, 0, 0);
ASSERT_NOT_REACHED();
}
bool
is_currently_on_sigaltstack(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
byte *cur_esp;
GET_STACK_PTR(cur_esp);
return (cur_esp >= (byte *)info->sigstack.ss_sp &&
cur_esp < (byte *)info->sigstack.ss_sp + info->sigstack.ss_size);
}
static void
terminate_via_kill_from_anywhere(dcontext_t *dcontext, int sig)
{
dcontext->sys_param0 = sig; /* store arg to SYS_kill */
if (is_currently_on_sigaltstack(dcontext)) {
/* We can't clean up our sigstack properly when we're on it
* (i#1160) so we terminate on the dstack.
*/
call_switch_stack(dcontext, dcontext->dstack,
(void (*)(void *))terminate_via_kill, NULL /*!d_r_initstack */,
false /*no return */);
} else {
terminate_via_kill(dcontext);
}
ASSERT_NOT_REACHED();
}
/* xref os_request_fatal_coredump() */
void
os_terminate_via_signal(dcontext_t *dcontext, terminate_flags_t flags, int sig)
{
if (signal_is_interceptable(sig)) {
bool set_action = false;
#if defined(STATIC_LIBRARY) && defined(LINUX)
if (INTERNAL_OPTION(invoke_app_on_crash)) {
/* We come here for asserts. Faults already bypass this routine. */
dcontext_t *my_dc = get_thread_private_dcontext();
if (my_dc != NULL) {
thread_sig_info_t *info = (thread_sig_info_t *)my_dc->signal_field;
if (info != NULL && info->app_sigaction[sig] != NULL &&
IS_RT_FOR_APP(info, sig)) {
set_action = true;
sigaction_syscall(sig, info->app_sigaction[sig], NULL);
}
}
}
#endif
if (!set_action) {
DEBUG_DECLARE(bool res =)
set_default_signal_action(sig);
ASSERT(res);
}
}
if (TEST(TERMINATE_CLEANUP, flags)) {
/* we enter from several different places, so rewind until top-level kstat */
KSTOP_REWIND_UNTIL(thread_measured);
ASSERT(dcontext != NULL);
dcontext->sys_param0 = sig;
/* XXX: the comment in the else below implies some systems have SYS_kill
* of SIGSEGV w/ no handler on oneself actually return.
* cleanup_and_terminate won't return to us and will use global_do_syscall
* to invoke SYS_kill, which in debug will do an inf loop (good!) but
* in release will do SYS_exit_group -- oh well, the systems I'm testing
* on do an immediate exit.
*/
terminate_via_kill_from_anywhere(dcontext, sig);
} else {
/* general clean up is unsafe: just remove .1config file */
d_r_config_exit();
dynamorio_syscall(SYS_kill, 2, get_process_id(), sig);
/* We try both the SYS_kill and the immediate crash since on some platforms
* the SIGKILL is delayed and on others the *-1 is hanging(?): should investigate
*/
if (sig == SIGSEGV) /* make doubly-sure */
*((int *)PTR_UINT_MINUS_1) = 0;
while (true) {
/* in case signal delivery is delayed we wait...forever */
os_thread_yield();
}
}
ASSERT_NOT_REACHED();
}
static bool
execute_default_action(dcontext_t *dcontext, int sig, sigframe_rt_t *frame,
sigcontext_t *sc_orig, bool from_dispatch, bool forged)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
sigcontext_t *sc = get_sigcontext_from_rt_frame(frame);
byte *pc = (byte *)sc->SC_XIP;
LOG(THREAD, LOG_ASYNCH, 3, "execute_default_action for signal %d\n", sig);
/* should only come here for signals we catch, or signal with ONESHOT
* that didn't sigreturn
*/
ASSERT(info->we_intercept[sig] ||
(info->app_sigaction[sig]->flags & SA_ONESHOT) != 0);
if (info->app_sigaction[sig] != NULL &&
(info->app_sigaction[sig]->flags & SA_ONESHOT) != 0) {
if (!info->we_intercept[sig]) {
handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t));
info->app_sigaction[sig] = NULL;
}
}
/* FIXME PR 205310: we can't always perfectly emulate the default
* behavior. To execute the default action, we have to un-register our
* handler, if we have one, for signals whose default action is not
* ignore or that will just be re-raised upon returning to the
* interrupted context -- FIXME: are any of the ignores repeated?
* SIGURG?
*
* If called from execute_handler_from_cache(), our master_signal_handler()
* is going to return directly to the translated context: which means we
* go native to re-execute the instr, which if it does in fact generate
* the signal again means we have a nice transparent core dump.
*
* If called from execute_handler_from_dispatch(), we need to generate
* the signal ourselves.
*/
if (default_action[sig] != DEFAULT_IGNORE) {
DEBUG_DECLARE(bool ok =)
set_default_signal_action(sig);
ASSERT(ok);
/* FIXME: to avoid races w/ shared handlers should set a flag to
* prevent another thread from re-enabling.
* Perhaps worse: what if this signal arrives for another thread
* in the meantime (and the default is not terminate)?
*/
if (info->shared_app_sigaction) {
LOG(THREAD, LOG_ASYNCH, 1,
"WARNING: having to install SIG_DFL for thread " TIDFMT ", but will be "
"shared!\n",
d_r_get_thread_id());
}
if (default_action[sig] == DEFAULT_TERMINATE ||
default_action[sig] == DEFAULT_TERMINATE_CORE) {
report_app_problem(dcontext, APPFAULT_CRASH, pc, (byte *)sc->SC_FP,
"\nSignal %d delivered to application as default "
"action.\n",
sig);
/* App may call sigaction to set handler SIG_DFL (unnecessary but legal),
* in which case DR will put a handler in info->app_sigaction[sig].
* We must clear it, otherwise, signal_thread_exit may cleanup the
* handler and set it to SIG_IGN instead.
*/
if (info->app_sigaction[sig] != NULL) {
ASSERT(info->we_intercept[sig]);
handler_free(dcontext, info->app_sigaction[sig],
sizeof(kernel_sigaction_t));
info->app_sigaction[sig] = NULL;
}
/* N.B.: we don't have to restore our handler because the
* default action is for the process (entire thread group for NPTL) to die!
*/
if (from_dispatch || can_always_delay[sig] || forged ||
is_sys_kill(dcontext, pc, (byte *)sc->SC_XSP, &frame->info)) {
/* This must have come from SYS_kill rather than raised by
* a faulting instruction. Thus we can't go re-execute the
* instr in order to re-raise the signal (if from_dispatch,
* we delayed and can't re-execute anyway). Instead we
* re-generate via SYS_kill. An alternative, if we don't
* care about generating a core dump, is to use SYS_exit
* and pass the right exit code to indicate the signal
* number: that would avoid races w/ the sigaction.
*
* FIXME: should have app make the syscall to get a more
* transparent core dump!
*/
LOG(THREAD, LOG_ASYNCH, 1, "Terminating via kill\n");
if (!from_dispatch && !forged)
KSTOP_NOT_MATCHING_NOT_PROPAGATED(fcache_default);
KSTOP_NOT_MATCHING_NOT_PROPAGATED(dispatch_num_exits);
if (is_couldbelinking(dcontext)) /* won't be for SYS_kill (i#1159) */
enter_nolinking(dcontext, NULL, false);
/* we could be on sigstack so call this version: */
terminate_via_kill_from_anywhere(dcontext, sig);
ASSERT_NOT_REACHED();
} else {
/* We assume that re-executing the interrupted instr will
* re-raise the fault. We could easily be wrong:
* xref PR 363811 infinite loop due to memory we
* thought was unreadable and thus thought would raise
* a signal; xref PR 368277 to improve is_sys_kill(), and the
* "forged" parameter that puts us in the if() above.
* FIXME PR 205310: we should check whether we come out of
* the cache when we expected to terminate!
*
* An alternative is to abandon transparent core dumps and
* do the same explicit SYS_kill we do for from_dispatch.
* That would let us clean up DR as well.
* FIXME: currently we do not clean up DR for a synchronous
* signal death, but we do for asynch.
*/
/* i#552: cleanup and raise client exit event */
int instr_sz = 0;
thread_sig_info_t *info;
/* We are on the sigstack now, so assign it to NULL to avoid being
* freed during process exit cleanup
*/
info = (thread_sig_info_t *)dcontext->signal_field;
info->sigstack.ss_sp = NULL;
/* We enter from several different places, so rewind until
* top-level kstat.
*/
KSTOP_REWIND_UNTIL(thread_measured);
/* We try to raise the same signal in app's context so a correct
* coredump can be generated. However, the client might change
* the code in a way that the corresponding app code won't
* raise the signal, so we first check if the app instr is the
* same as instr in the cache, and raise the signal (by return).
* Otherwise, we kill the process instead.
* XXX: if the PC is unreadable we'll just crash here...should check
* for readability safely.
*/
ASSERT(sc_orig != NULL);
instr_sz = decode_sizeof(dcontext, (byte *)sc_orig->SC_XIP,
NULL _IF_X86_64(NULL));
if (instr_sz != 0 &&
pc != NULL && /* avoid crash on xl8 failure (i#1699) */
instr_sz == decode_sizeof(dcontext, pc, NULL _IF_X86_64(NULL)) &&
memcmp(pc, (byte *)sc_orig->SC_XIP, instr_sz) == 0) {
/* the app instr matches the cache instr; cleanup and raise the
* the signal in the app context
*/
LOG(THREAD, LOG_ASYNCH, 1, "Raising signal by re-executing\n");
dynamo_process_exit();
/* we cannot re-enter the cache, which is freed by now */
ASSERT(!from_dispatch);
return false;
} else {
/* mismatch, cleanup and terminate */
LOG(THREAD, LOG_ASYNCH, 1, "Terminating via kill\n");
dcontext->sys_param0 = sig;
terminate_via_kill(dcontext);
ASSERT_NOT_REACHED();
}
}
} else {
/* FIXME PR 297033: in order to intercept DEFAULT_STOP /
* DEFAULT_CONTINUE signals, we need to set sigcontext to point
* to some kind of regain-control routine, so that when our
* thread gets to run again we can reset our handler. So far
* we have no signals that fall here that we intercept.
*/
CLIENT_ASSERT(false, "STOP/CONT signals not supported");
}
#if defined(DEBUG) && defined(INTERNAL)
if (sig == SIGSEGV && !dynamo_exited) {
/* pc should be an app pc at this point (it was translated) --
* check for bad cases here
*/
if (safe_is_in_fcache(dcontext, pc, (byte *)sc->SC_XSP)) {
fragment_t wrapper;
fragment_t *f;
LOG(THREAD, LOG_ALL, 1,
"Received SIGSEGV at pc " PFX " in thread " TIDFMT "\n", pc,
d_r_get_thread_id());
f = fragment_pclookup(dcontext, pc, &wrapper);
if (f)
disassemble_fragment(dcontext, f, false);
ASSERT_NOT_REACHED();
} else if (in_generated_routine(dcontext, pc)) {
LOG(THREAD, LOG_ALL, 1,
"Received SIGSEGV at generated non-code-cache pc " PFX "\n", pc);
ASSERT_NOT_REACHED();
}
}
#endif
}
/* now continue at the interruption point and re-raise the signal */
return true;
}
static bool
execute_default_from_cache(dcontext_t *dcontext, int sig, sigframe_rt_t *frame,
sigcontext_t *sc_orig, bool forged)
{
return execute_default_action(dcontext, sig, frame, sc_orig, false, forged);
}
static void
execute_default_from_dispatch(dcontext_t *dcontext, int sig, sigframe_rt_t *frame)
{
execute_default_action(dcontext, sig, frame, NULL, true, false);
}
void
receive_pending_signal(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
sigpending_t *temp;
int sig;
LOG(THREAD, LOG_ASYNCH, 3, "receive_pending_signal\n");
if (info->interrupted != NULL) {
relink_interrupted_fragment(dcontext, info);
}
/* grab first pending signal
* XXX: start with real-time ones?
*/
/* "lock" the array to prevent a new signal that interrupts this bit of
* code from prepended or deleting from the array while we're accessing it
*/
info->accessing_sigpending = true;
/* barrier to prevent compiler from moving the above write below the loop */
__asm__ __volatile__("" : : : "memory");
if (!info->multiple_pending_units &&
info->num_pending + 2 >= DYNAMO_OPTION(max_pending_signals)) {
/* We're close to the limit: proactively get a new unit while it's safe
* to acquire locks. We do that by pushing over the edge.
* We assume that filling up a 2nd unit is too pathological to plan for.
*/
info->multiple_pending_units = true;
SYSLOG_INTERNAL_WARNING("many pending signals: asking for 2nd special unit");
sigpending_t *temp1 = special_heap_alloc(info->sigheap);
sigpending_t *temp2 = special_heap_alloc(info->sigheap);
sigpending_t *temp3 = special_heap_alloc(info->sigheap);
special_heap_free(info->sigheap, temp1);
special_heap_free(info->sigheap, temp2);
special_heap_free(info->sigheap, temp3);
}
for (sig = 1; sig <= MAX_SIGNUM; sig++) {
if (info->sigpending[sig] != NULL) {
bool executing = true;
/* We do not re-check whether blocked if it was unblocked at
* receive time, to properly handle sigsuspend (i#1340).
*/
if (!info->sigpending[sig]->unblocked &&
kernel_sigismember(&info->app_sigblocked, sig)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tsignal %d is blocked!\n", sig);
continue;
}
LOG(THREAD, LOG_ASYNCH, 3, "\treceiving signal %d\n", sig);
/* execute_handler_from_dispatch()'s call to copy_frame_to_stack() is
* allowed to remove the front entry from info->sigpending[sig] and
* jump to d_r_dispatch.
*/
executing = execute_handler_from_dispatch(dcontext, sig);
temp = info->sigpending[sig];
info->sigpending[sig] = temp->next;
special_heap_free(info->sigheap, temp);
info->num_pending--;
/* only one signal at a time! */
if (executing) {
/* Make negative so our fcache_enter check makes progress but
* our C code still considers there to be pending signals.
*/
dcontext->signals_pending = -1;
break;
}
}
}
/* barrier to prevent compiler from moving the below write above the loop */
__asm__ __volatile__("" : : : "memory");
info->accessing_sigpending = false;
/* we only clear this on a call to us where we find NO pending signals */
if (sig > MAX_SIGNUM) {
LOG(THREAD, LOG_ASYNCH, 3, "\tclearing signals_pending flag\n");
dcontext->signals_pending = 0;
}
}
/* Returns false if should NOT issue syscall. */
bool
#ifdef LINUX
handle_sigreturn(dcontext_t *dcontext, bool rt)
#else
handle_sigreturn(dcontext_t *dcontext, void *ucxt_param, int style)
#endif
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
sigcontext_t *sc = NULL; /* initialize to satisfy Mac clang */
kernel_ucontext_t *ucxt = NULL;
int sig = 0;
app_pc next_pc;
/* xsp was put in mcontext prior to pre_system_call() */
reg_t xsp = get_mcontext(dcontext)->xsp;
#ifdef MACOS
bool rt = true;
#endif
LOG(THREAD, LOG_ASYNCH, 3, "%ssigreturn()\n", rt ? "rt_" : "");
LOG(THREAD, LOG_ASYNCH, 3, "\txsp is " PFX "\n", xsp);
#ifdef PROGRAM_SHEPHERDING
/* if (!sig_has_restorer, region was never added to exec list,
* allowed as pattern only and kicked off at first write via
* selfmod detection or otherwise if vsyscall, so no worries
* about having to remove it here
*/
#endif
/* The easiest way to set all the non-GPR state that DR does not separately
* preserve is to actually execute the sigreturn syscall, so we set up to do
* that. We do not want to change DR's signal state, however, so we set it
* back to DR's values after processing the state for the app.
*/
kernel_sigset_t our_mask;
sigprocmask_syscall(SIG_SETMASK, NULL, &our_mask, sizeof(our_mask));
/* get sigframe: it's the top thing on the stack, except the ret
* popped off pretcode.
* WARNING: handler for tcsh's window_change (SIGWINCH) clobbers its
* signal # arg, so don't use frame->sig! (kernel doesn't look at sig
* so app can get away with it)
*/
if (rt) {
#ifdef LINUX
sigframe_rt_t *frame = (sigframe_rt_t *)(xsp IF_X86(-sizeof(char *)));
/* use si_signo instead of sig, less likely to be clobbered by app */
sig = frame->info.si_signo;
# ifdef X86_32
LOG(THREAD, LOG_ASYNCH, 3, "\tsignal was %d (did == param %d)\n", sig,
frame->sig);
if (frame->sig != sig)
LOG(THREAD, LOG_ASYNCH, 1, "WARNING: app sig handler clobbered sig param\n");
# endif
sc = get_sigcontext_from_app_frame(info, sig, (void *)frame);
ucxt = &frame->uc;
#elif defined(MACOS)
/* The initial frame fields on the stack are messed up due to
* params to handler from tramp, so use params to syscall.
* XXX: we don't have signal # though: so we have to rely on app
* not clobbering the sig param field.
*/
sig = *(int *)xsp;
LOG(THREAD, LOG_ASYNCH, 3, "\tsignal was %d\n", sig);
ucxt = (kernel_ucontext_t *)ucxt_param;
if (ucxt == NULL) {
/* On Mac the kernel seems to store state on whether the process is
* on the altstack, so longjmp calls _sigunaltstack() which issues a
* sigreturn syscall telling the kernel about the altstack change,
* with a NULL context.
*/
LOG(THREAD, LOG_ASYNCH, 3, "\tsigunalstack sigreturn: no context\n");
return true;
}
sc = SIGCXT_FROM_UCXT(ucxt);
#endif
ASSERT(sig > 0 && sig <= MAX_SIGNUM && IS_RT_FOR_APP(info, sig));
/* Re-set sigstack from the value stored in the frame. Silently ignore failure,
* just like the kernel does.
*/
uint ignored;
/* The kernel checks for being on the stack *after* swapping stacks, so pass
* sc->SC_XSP as the current stack.
*/
handle_sigaltstack(dcontext, &ucxt->uc_stack, NULL, sc->SC_XSP, &ignored);
/* Restore DR's so sigreturn syscall won't change it. */
ucxt->uc_stack = info->sigstack;
/* FIXME: what if handler called sigaction and requested rt
* when itself was non-rt?
*/
/* Discard blocked signals, re-set from prev mask stored in frame. */
set_blocked(dcontext, SIGMASK_FROM_UCXT(ucxt), true /*absolute*/);
/* Restore DR's so sigreturn syscall won't change it. */
*SIGMASK_FROM_UCXT(ucxt) = our_mask;
}
#if defined(LINUX) && !defined(X64)
else {
/* FIXME: libc's restorer pops prior to calling sigreturn, I have
* no idea why, but kernel asks for xsp-8 not xsp-4...weird!
*/
kernel_sigset_t prevset;
sigframe_plain_t *frame = (sigframe_plain_t *)(xsp IF_X86(-8));
/* We don't trust frame->sig (app sometimes clobbers it), and for
* plain frame there's no other place that sig is stored,
* so as a hack we added a new frame!
* FIXME: this means we won't support nonstandard use of SYS_sigreturn,
* e.g., as NtContinue, if frame didn't come from a real signal and so
* wasn't copied to stack by us.
*/
sig = frame->sig_noclobber;
LOG(THREAD, LOG_ASYNCH, 3, "\tsignal was %d (did == param %d)\n", sig,
IF_X86_ELSE(frame->sig, 0));
# ifdef X86_32
if (frame->sig != sig)
LOG(THREAD, LOG_ASYNCH, 1, "WARNING: app sig handler clobbered sig param\n");
# endif
ASSERT(sig > 0 && sig <= MAX_SIGNUM && !IS_RT_FOR_APP(info, sig));
sc = get_sigcontext_from_app_frame(info, sig, (void *)frame);
/* discard blocked signals, re-set from prev mask stored in frame */
prevset.sig[0] = frame->IF_X86_ELSE(sc.oldmask, uc.uc_mcontext.oldmask);
if (_NSIG_WORDS > 1) {
memcpy(&prevset.sig[1], &frame->IF_X86_ELSE(extramask, uc.sigset_ex),
sizeof(prevset.sig[1]));
}
# ifdef ARM
ucxt = &frame->uc; /* we leave ucxt NULL for x86: not needed there */
# endif
set_blocked(dcontext, &prevset, true /*absolute*/);
/* Restore DR's so sigreturn syscall won't change it. */
convert_rt_mask_to_nonrt(frame, &our_mask);
}
#endif /* LINUX */
/* Make sure we deliver pending signals that are now unblocked.
*/
check_signals_pending(dcontext, info);
/* if we were building a trace, kill it */
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n");
trace_abort(dcontext);
}
/* Defensively check for NULL.
* XXX i#3182: It did happen but it is not clear how.
*/
if (info->app_sigaction[sig] != NULL &&
TEST(SA_ONESHOT, info->app_sigaction[sig]->flags)) {
ASSERT(info->app_sigaction[sig]->handler == (handler_t)SIG_DFL);
if (!info->we_intercept[sig]) {
/* let kernel do default independent of us */
handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t));
info->app_sigaction[sig] = NULL;
}
}
ASSERT(!safe_is_in_fcache(dcontext, (app_pc)sc->SC_XIP, (byte *)sc->SC_XSP));
#ifdef CLIENT_INTERFACE
sig_full_cxt_t sc_full = { sc, NULL /*not provided*/ };
get_mcontext(dcontext)->pc = dcontext->next_tag;
instrument_kernel_xfer(dcontext, DR_XFER_SIGNAL_RETURN, osc_empty, NULL,
get_mcontext(dcontext), (app_pc)sc->SC_XIP, sc->SC_XSP,
sc_full, NULL, sig);
#endif
#ifdef DEBUG
if (d_r_stats->loglevel >= 3 && (d_r_stats->logmask & LOG_ASYNCH) != 0) {
LOG(THREAD, LOG_ASYNCH, 3, "returning-to sigcontext " PFX ":\n", sc);
dump_sigcontext(dcontext, sc);
}
#endif
/* XXX i#1206: if we interrupted a non-ignorable syscall to run the app's
* handler, and we set up to restart the syscall, we'll come here with the
* translated syscall pc -- thus we can't distinguish from a signal interrupting
* the prior app instr. So we can't simply point at do_syscall and call
* set_at_syscall -- we have to re-interpret the syscall and re-run the
* pre-syscall handler. Hopefully all our pre-syscall handlers can handle that.
*/
/* set up for d_r_dispatch */
/* we have to use a different slot since next_tag ends up holding the do_syscall
* entry when entered from d_r_dispatch (we're called from
* pre_syscall, prior to entering cache)
*/
dcontext->asynch_target = canonicalize_pc_target(
dcontext, (app_pc)(sc->SC_XIP IF_ARM(| (TEST(EFLAGS_T, sc->SC_XFLAGS) ? 1 : 0))));
next_pc = dcontext->asynch_target;
#ifdef VMX86_SERVER
/* PR 404712: kernel only restores gp regs so we do it ourselves and avoid
* complexities of kernel's non-linux-like sigreturn semantics
*/
sig_full_cxt_t sc_full = { sc, NULL }; /* non-ARM so NULL ok */
sigcontext_to_mcontext(get_mcontext(dcontext), &sc_full, DR_MC_ALL);
#else
/* HACK to get eax put into mcontext AFTER do_syscall */
dcontext->next_tag = (app_pc)sc->IF_X86_ELSE(SC_XAX, SC_R0);
/* use special linkstub so we know why we came out of the cache */
sc->IF_X86_ELSE(SC_XAX, SC_R0) = (ptr_uint_t)get_asynch_linkstub();
/* set our sigreturn context to point to fcache_return */
/* We don't need PC_AS_JMP_TGT b/c the kernel uses EFLAGS_T for the mode */
sc->SC_XIP = (ptr_uint_t)fcache_return_routine(dcontext);
/* if we overlaid inner frame on nested signal, will end up with this
* error -- disable in release build since this is often app's fault (stack
* too small)
* FIXME: how make this transparent? what ends up happening is that we
* get a segfault when we start interpreting d_r_dispatch, we want to make it
* look like whatever would happen to the app...
*/
ASSERT((app_pc)sc->SC_XIP != next_pc);
# ifdef AARCHXX
set_stolen_reg_val(get_mcontext(dcontext), get_sigcxt_stolen_reg(sc));
set_sigcxt_stolen_reg(sc, (reg_t)*get_dr_tls_base_addr());
# ifdef AARCH64
/* On entry to the do_syscall gencode, we save X1 into TLS_REG1_SLOT.
* Then the sigreturn would redirect the flow to the fcache_return gencode.
* In fcache_return it recovers the values of x0 and x1 from TLS_SLOT 0 and 1.
*/
get_mcontext(dcontext)->r1 = sc->regs[1];
# else
/* We're going to our fcache_return gencode which uses DEFAULT_ISA_MODE */
set_pc_mode_in_cpsr(sc, DEFAULT_ISA_MODE);
# endif
# endif
#endif
LOG(THREAD, LOG_ASYNCH, 3, "set next tag to " PFX ", sc->SC_XIP to " PFX "\n",
next_pc, sc->SC_XIP);
return IF_VMX86_ELSE(false, true);
}
bool
is_signal_restorer_code(byte *pc, size_t *len)
{
/* is this a sigreturn pattern placed by kernel on the stack or vsyscall page?
* for non-rt frame:
* 0x58 popl %eax
* 0xb8 <sysnum> movl SYS_sigreturn, %eax
* 0xcd 0x80 int 0x80
* for rt frame:
* 0xb8 <sysnum> movl SYS_rt_sigreturn, %eax
* 0xcd 0x80 int 0x80
*/
/* optimized we only need two uint reads, but we have to do
* some little-endian byte-order reverses to get the right result
*/
#define reverse(x) \
((((x)&0xff) << 24) | (((x)&0xff00) << 8) | (((x)&0xff0000) >> 8) | \
(((x)&0xff000000) >> 24))
#ifdef MACOS
# define SYS_RT_SIGRET SYS_sigreturn
#else
# define SYS_RT_SIGRET SYS_rt_sigreturn
#endif
#ifndef X64
/* 58 b8 s4 s3 s2 s1 cd 80 */
static const uint non_rt_1w = reverse(0x58b80000 | (reverse(SYS_sigreturn) >> 16));
static const uint non_rt_2w = reverse((reverse(SYS_sigreturn) << 16) | 0xcd80);
#endif
/* b8 s4 s3 s2 s1 cd 80 XX */
static const uint rt_1w = reverse(0xb8000000 | (reverse(SYS_RT_SIGRET) >> 8));
static const uint rt_2w = reverse((reverse(SYS_RT_SIGRET) << 24) | 0x00cd8000);
/* test rt first as it's the most common
* only 7 bytes here so we ignore the last one (becomes msb since little-endian)
*/
if (*((uint *)pc) == rt_1w && (*((uint *)(pc + 4)) & 0x00ffffff) == rt_2w) {
if (len != NULL)
*len = 7;
return true;
}
#ifndef X64
if (*((uint *)pc) == non_rt_1w && *((uint *)(pc + 4)) == non_rt_2w) {
if (len != NULL)
*len = 8;
return true;
}
#endif
return false;
}
void
os_forge_exception(app_pc target_pc, dr_exception_type_t type)
{
/* PR 205136:
* We want to deliver now, and the caller expects us not to return.
* We have two alternatives:
* 1) Emulate stack frame, and call transfer_to_dispatch() for delivery. We
* may not know how to fill out every field of the frame (cr2, etc.). Plus,
* we have problems w/ default actions (PR 205310) but we have to solve
* those long-term anyway. We also have to create different frames based on
* whether app intercepts via rt or not.
* 2) Call SYS_tgkill from a special location that our handler can
* recognize and know it's a signal meant for the app and that the
* interrupted DR can be discarded. We'd then essentially repeat 1,
* but modifying the kernel-generated frame. We'd have to always
* intercept SIGILL.
* I'm going with #1 for now b/c the common case is simpler.
*/
dcontext_t *dcontext = get_thread_private_dcontext();
#if defined(LINUX) && defined(X86)
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
#endif
char frame_no_xstate[sizeof(sigframe_rt_t)];
sigframe_rt_t *frame = (sigframe_rt_t *)frame_no_xstate;
int sig;
dr_where_am_i_t cur_whereami = dcontext->whereami;
kernel_ucontext_t *uc = get_ucontext_from_rt_frame(frame);
sigcontext_t *sc = SIGCXT_FROM_UCXT(uc);
switch (type) {
case ILLEGAL_INSTRUCTION_EXCEPTION: sig = SIGILL; break;
case UNREADABLE_MEMORY_EXECUTION_EXCEPTION: sig = SIGSEGV; break;
case SINGLE_STEP_EXCEPTION: ASSERT_NOT_IMPLEMENTED(false); /* FIXME: i#2144 */
case IN_PAGE_ERROR_EXCEPTION: /* fall-through: Windows only */
default:
ASSERT_NOT_REACHED();
sig = SIGSEGV;
break;
}
LOG(GLOBAL, LOG_ASYNCH, 1, "os_forge_exception sig=%d\n", sig);
/* Since we always delay delivery, we always want an rt frame. we'll convert
* to a plain frame on delivery.
*/
memset(frame, 0, sizeof(*frame));
frame->info.si_signo = sig;
/* Set si_code to match what would happen natively. We also need this to
* avoid the !is_sys_kill() check in record_pending_signal() to avoid an
* infinite loop (i#3171).
*/
frame->info.si_code = IF_LINUX_ELSE(SI_KERNEL, 0);
frame->info.si_addr = target_pc;
#ifdef X86_32
frame->sig = sig;
frame->pinfo = &frame->info;
frame->puc = (void *)&frame->uc;
#endif
#if defined(LINUX) && defined(X86)
/* We use a TLS buffer to avoid too much stack space here. */
sc->fpstate = (kernel_fpstate_t *)get_xstate_buffer(dcontext);
#endif
mcontext_to_ucontext(uc, get_mcontext(dcontext));
sc->SC_XIP = (reg_t)target_pc;
/* We'll fill in fpstate at delivery time.
* We fill in segment registers to their current values and assume they won't
* change and that these are the right values.
*
* FIXME i#2095: restore the app's segment register value(s).
*
* XXX: it seems to work w/o filling in the other state:
* I'm leaving cr2 and other fields all zero.
* If this gets problematic we could switch to approach #2.
*/
thread_set_segment_registers(sc);
#if defined(X86) && defined(LINUX)
if (sig_has_restorer(info, sig))
frame->pretcode = (char *)info->app_sigaction[sig]->restorer;
else
frame->pretcode = (char *)dynamorio_sigreturn;
#endif
/* We assume that we do not need to translate the context when forged.
* If we did, we'd move this below enter_nolinking() (and update
* record_pending_signal() to do the translation).
*/
record_pending_signal(dcontext, sig, &frame->uc, frame,
true /*forged*/
_IF_CLIENT(NULL));
/* For most callers this is not necessary and we only do it to match
* the Windows usage model: but for forging from our own handler,
* this is good b/c it resets us to the base of dstack.
*/
/* tell d_r_dispatch() why we're coming there */
dcontext->whereami = DR_WHERE_TRAMPOLINE;
KSTART(dispatch_num_exits);
set_last_exit(dcontext, (linkstub_t *)get_asynch_linkstub());
if (is_couldbelinking(dcontext))
enter_nolinking(dcontext, NULL, false);
transfer_to_dispatch(
dcontext, get_mcontext(dcontext),
cur_whereami != DR_WHERE_FCACHE && cur_whereami != DR_WHERE_SIGNAL_HANDLER
/*full_DR_state*/);
ASSERT_NOT_REACHED();
}
void
os_request_fatal_coredump(const char *msg)
{
/* To enable getting a coredump just make sure that rlimits are
* not preventing getting one, e.g. ulimit -c unlimited
*/
SYSLOG_INTERNAL_ERROR("Crashing the process deliberately for a core dump!");
os_terminate_via_signal(NULL, 0 /*no cleanup*/, SIGSEGV);
ASSERT_NOT_REACHED();
}
void
os_request_live_coredump(const char *msg)
{
#ifdef VMX86_SERVER
if (os_in_vmkernel_userworld()) {
vmk_request_live_coredump(msg);
return;
}
#endif
LOG(GLOBAL, LOG_ASYNCH, 1,
"LiveCoreDump unsupported (PR 365105). "
"Continuing execution without a core.\n");
return;
}
void
os_dump_core(const char *msg)
{
/* FIXME Case 3408: fork stack dump crashes on 2.6 kernel, so moving the getchar
* ahead to aid in debugging */
if (TEST(DUMPCORE_WAIT_FOR_DEBUGGER, dynamo_options.dumpcore_mask)) {
SYSLOG_INTERNAL_ERROR("looping so you can use gdb to attach to pid %s",
get_application_pid());
IF_CLIENT_INTERFACE(SYSLOG(SYSLOG_CRITICAL, WAITING_FOR_DEBUGGER, 2,
get_application_name(), get_application_pid()));
/* getchar() can hit our own vsyscall hook (from PR 212570); typically we
* want to attach and not continue anyway, so doing an infinite loop:
*/
while (true)
os_thread_yield();
}
if (DYNAMO_OPTION(live_dump)) {
os_request_live_coredump(msg);
}
if (TEST(DUMPCORE_INCLUDE_STACKDUMP, dynamo_options.dumpcore_mask)) {
/* fork, dump core, then use gdb to get a stack dump
* we can get into an infinite loop if there's a seg fault
* in the process of doing this -- so we have a do-once test,
* and if it failed we do the no-symbols dr callstack dump
*/
static bool tried_stackdump = false;
if (!tried_stackdump) {
tried_stackdump = true;
d_r_stackdump();
} else {
static bool tried_calldump = false;
if (!tried_calldump) {
tried_calldump = true;
dump_dr_callstack(STDERR);
}
}
}
if (!DYNAMO_OPTION(live_dump)) {
os_request_fatal_coredump(msg);
ASSERT_NOT_REACHED();
}
}
#ifdef RETURN_AFTER_CALL
bool
at_known_exception(dcontext_t *dcontext, app_pc target_pc, app_pc source_fragment)
{
/* There is a known exception in signal restorers and the Linux
* dynamic symbol resoulution.
* The latter we assume it is the only other recurring known exception,
* so the first time we pattern match to help make sure it is indeed
* _dl_runtime_resolve (since with LD_BIND_NOW it will never be called).
* After that we compare with the known value.
*/
static app_pc known_exception = 0;
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
LOG(THREAD, LOG_INTERP, 1, "RCT: testing for KNOWN exception " PFX " " PFX "\n",
target_pc, source_fragment);
/* Check if this is a signal return.
FIXME: we should really get that from the frame itself.
Since currently grabbing restorer only when copying a frame,
this will work with nested signals only if they all have same restorer
(I haven't seen restorers other than the one in libc)
*/
if (target_pc == info->signal_restorer_retaddr) {
LOG(THREAD, LOG_INTERP, 1,
"RCT: KNOWN exception this is a signal restorer --ok \n");
STATS_INC(ret_after_call_signal_restorer);
return true;
}
if (source_fragment == known_exception) {
LOG(THREAD, LOG_INTERP, 1,
"RCT: KNOWN exception again _dl_runtime_resolve --ok\n");
return true;
}
if (known_exception == 0) {
int ret_imm;
return at_dl_runtime_resolve_ret(dcontext, source_fragment, &ret_imm);
}
return false;
}
#endif /* RETURN_AFTER_CALL */
/***************************************************************************
* ITIMERS
*
* We support combining an app itimer with a DR itimer for each of the 3 types
* (PR 204556).
*/
static inline uint64
timeval_to_usec(struct timeval *t1)
{
return ((uint64)(t1->tv_sec)) * 1000000 + t1->tv_usec;
}
static inline void
usec_to_timeval(uint64 usec, struct timeval *t1)
{
t1->tv_sec = (long)usec / 1000000;
t1->tv_usec = (long)usec % 1000000;
}
static void
init_itimer(dcontext_t *dcontext, bool first)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
int i;
ASSERT(info != NULL);
ASSERT(!info->shared_itimer); /* else inherit */
LOG(THREAD, LOG_ASYNCH, 2, "thread has private itimers%s\n",
os_itimers_thread_shared() ? " (for now)" : "");
if (os_itimers_thread_shared()) {
/* we have to allocate now even if no itimer is installed until later,
* so that all child threads point to the same data
*/
info->itimer = (thread_itimer_info_t(*)[NUM_ITIMERS])global_heap_alloc(
sizeof(*info->itimer) HEAPACCT(ACCT_OTHER));
} else {
/* for simplicity and parallel w/ shared we allocate proactively */
info->itimer = (thread_itimer_info_t(*)[NUM_ITIMERS])heap_alloc(
dcontext, sizeof(*info->itimer) HEAPACCT(ACCT_OTHER));
}
memset(info->itimer, 0, sizeof(*info->itimer));
for (i = 0; i < NUM_ITIMERS; i++) {
ASSIGN_INIT_RECURSIVE_LOCK_FREE((*info->itimer)[i].lock, shared_itimer_lock);
}
if (first) {
/* see if app has set up an itimer before we were loaded */
struct itimerval prev;
int rc;
int which;
for (which = 0; which < NUM_ITIMERS; which++) {
rc = getitimer_syscall(which, &prev);
ASSERT(rc == SUCCESS);
(*info->itimer)[which].app.interval = timeval_to_usec(&prev.it_interval);
(*info->itimer)[which].app.value = timeval_to_usec(&prev.it_value);
}
}
}
/* Up to caller to hold lock for shared itimers */
static bool
set_actual_itimer(dcontext_t *dcontext, int which, thread_sig_info_t *info, bool enable)
{
struct itimerval val;
int rc;
ASSERT(info != NULL && info->itimer != NULL);
ASSERT(which >= 0 && which < NUM_ITIMERS);
if (enable) {
LOG(THREAD, LOG_ASYNCH, 2,
"installing itimer %d interval=" INT64_FORMAT_STRING
", value=" INT64_FORMAT_STRING "\n",
which, (*info->itimer)[which].actual.interval,
(*info->itimer)[which].actual.value);
/* i#2907: we have no signal handlers until we start the app (i#2335)
* so we can't set up an itimer until then.
*/
ASSERT(dynamo_initialized);
ASSERT(!info->shared_itimer ||
self_owns_recursive_lock(&(*info->itimer)[which].lock));
usec_to_timeval((*info->itimer)[which].actual.interval, &val.it_interval);
usec_to_timeval((*info->itimer)[which].actual.value, &val.it_value);
} else {
LOG(THREAD, LOG_ASYNCH, 2, "disabling itimer %d\n", which);
memset(&val, 0, sizeof(val));
(*info->itimer)[which].actual.value = 0;
(*info->itimer)[which].actual.interval = 0;
}
rc = setitimer_syscall(which, &val, NULL);
return (rc == SUCCESS);
}
/* Caller should hold lock */
static bool
itimer_new_settings(dcontext_t *dcontext, int which, bool app_changed)
{
struct itimerval val;
bool res = true;
int rc;
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
ASSERT(info != NULL && info->itimer != NULL);
ASSERT(which >= 0 && which < NUM_ITIMERS);
ASSERT(!info->shared_itimer ||
self_owns_recursive_lock(&(*info->itimer)[which].lock));
/* the general strategy is to set the actual value to the smaller,
* update the larger on each signal, and when the larger becomes
* smaller do a one-time swap for the remaining
*/
if ((*info->itimer)[which].dr.interval > 0 &&
((*info->itimer)[which].app.interval == 0 ||
(*info->itimer)[which].dr.interval < (*info->itimer)[which].app.interval))
(*info->itimer)[which].actual.interval = (*info->itimer)[which].dr.interval;
else
(*info->itimer)[which].actual.interval = (*info->itimer)[which].app.interval;
if ((*info->itimer)[which].actual.value > 0) {
if ((*info->itimer)[which].actual.interval == 0 &&
(*info->itimer)[which].dr.value == 0 &&
(*info->itimer)[which].app.value == 0) {
(*info->itimer)[which].actual.value = 0;
res = set_actual_itimer(dcontext, which, info, false /*disabled*/);
} else {
/* one of app or us has an in-flight timer which we should not interrupt.
* but, we already set the new requested value (for app or us), so we
* need to update the actual value so we subtract properly.
*/
rc = getitimer_syscall(which, &val);
ASSERT(rc == SUCCESS);
uint64 left = timeval_to_usec(&val.it_value);
if (!app_changed &&
(*info->itimer)[which].actual.value == (*info->itimer)[which].app.value)
(*info->itimer)[which].app.value = left;
if (app_changed &&
(*info->itimer)[which].actual.value == (*info->itimer)[which].dr.value)
(*info->itimer)[which].dr.value = left;
(*info->itimer)[which].actual.value = left;
}
} else {
if ((*info->itimer)[which].dr.value > 0 &&
((*info->itimer)[which].app.value == 0 ||
(*info->itimer)[which].dr.value < (*info->itimer)[which].app.value))
(*info->itimer)[which].actual.value = (*info->itimer)[which].dr.value;
else {
(*info->itimer)[which].actual.value = (*info->itimer)[which].app.value;
}
res = set_actual_itimer(dcontext, which, info, true /*enable*/);
}
return res;
}
bool
set_itimer_callback(dcontext_t *dcontext, int which, uint millisec,
void (*func)(dcontext_t *, priv_mcontext_t *),
void (*func_api)(dcontext_t *, dr_mcontext_t *))
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
bool rc;
if (which < 0 || which >= NUM_ITIMERS) {
CLIENT_ASSERT(false, "invalid itimer type");
return false;
}
if (func == NULL && func_api == NULL && millisec != 0) {
CLIENT_ASSERT(false, "invalid function");
return false;
}
ASSERT(info != NULL && info->itimer != NULL);
if (info->shared_itimer)
acquire_recursive_lock(&(*info->itimer)[which].lock);
(*info->itimer)[which].dr.interval = ((uint64)millisec) * 1000;
(*info->itimer)[which].dr.value = (*info->itimer)[which].dr.interval;
(*info->itimer)[which].cb = func;
(*info->itimer)[which].cb_api = func_api;
if (!dynamo_initialized) {
/* i#2907: we have no signal handlers until we start the app (i#2335)
* so we can't set up an itimer until then. start_itimer() called
* from os_thread_under_dynamo() will enable it.
*/
LOG(THREAD, LOG_ASYNCH, 2, "delaying itimer until attach\n");
rc = true;
} else
rc = itimer_new_settings(dcontext, which, false /*us*/);
if (info->shared_itimer)
release_recursive_lock(&(*info->itimer)[which].lock);
return rc;
}
uint
get_itimer_frequency(dcontext_t *dcontext, int which)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
uint ms = 0;
if (which < 0 || which >= NUM_ITIMERS) {
CLIENT_ASSERT(false, "invalid itimer type");
return 0;
}
ASSERT(info != NULL && info->itimer != NULL);
if (info->shared_itimer)
acquire_recursive_lock(&(*info->itimer)[which].lock);
ms = (*info->itimer)[which].dr.interval / 1000;
if (info->shared_itimer)
release_recursive_lock(&(*info->itimer)[which].lock);
return ms;
}
static int
signal_to_itimer_type(int sig)
{
if (sig == SIGALRM)
return ITIMER_REAL;
else if (sig == SIGVTALRM)
return ITIMER_VIRTUAL;
else if (sig == SIGPROF)
return ITIMER_PROF;
else
return -1;
}
static bool
alarm_signal_has_DR_only_itimer(dcontext_t *dcontext, int signal)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
int which = signal_to_itimer_type(signal);
if (which == -1)
return false;
if (info->shared_itimer)
acquire_recursive_lock(&(*info->itimer)[which].lock);
bool DR_only =
((*info->itimer)[which].dr.value > 0 || (*info->itimer)[which].dr.interval > 0) &&
(*info->itimer)[which].app.value == 0 && (*info->itimer)[which].app.interval == 0;
if (info->shared_itimer)
release_recursive_lock(&(*info->itimer)[which].lock);
return DR_only;
}
static bool
handle_alarm(dcontext_t *dcontext, int sig, kernel_ucontext_t *ucxt)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
ASSERT(info != NULL && info->itimer != NULL);
int which = 0;
bool invoke_cb = false, pass_to_app = false, reset_timer_manually = false;
bool should_release_lock = false;
/* i#471: suppress alarms coming in after exit */
if (dynamo_exited)
return pass_to_app;
which = signal_to_itimer_type(sig);
ASSERT(which != -1);
LOG(THREAD, LOG_ASYNCH, 2, "received alarm %d @" PFX "\n", which,
SIGCXT_FROM_UCXT(ucxt)->SC_XIP);
/* This alarm could have interrupted an app thread making an itimer syscall,
* which is why we don't want to block on a lock here.
* It can't interrupt this same thread handling a prior alarm (b/c we block
* the signal in our handler). It could arrive in thread B while thread A
* is still handling a prior alarm if the alarm frequency is high and the
* processing is slow, which is why we split the locks to be per-itimer-type.
* We also avoid new thread setup code acquiring these itimer locks by using
* atomic increments instead for the refcounts. Xref i#2993.
*/
if (info->shared_itimer) {
#ifdef DEADLOCK_AVOIDANCE
/* i#2061: in debug build we can get an alarm while in deadlock handling
* code that holds innermost_lock. We just drop such alarms.
*/
if (OWN_MUTEX(&innermost_lock))
return pass_to_app;
#endif
if (self_owns_recursive_lock(&(*info->itimer)[which].lock)) {
/* What can we do? We just go ahead and hope conflicting writes work out.
* We don't re-acquire in case app was in middle of acquiring.
*/
} else {
#define ALARM_LOCK_MAX_TRIES 4
int i;
for (i = 0; i < ALARM_LOCK_MAX_TRIES; ++i) {
if (try_recursive_lock(&(*info->itimer)[which].lock)) {
should_release_lock = true;
break;
}
os_thread_yield();
}
if (!should_release_lock) {
/* Heuristic: if fail N times then assume interrupted lock routine
* while processing an app syscall (see above: we ruled out other
* scenarios). What can we do? Just continue and hope conflicting
* writes work out.
*/
}
}
}
if ((*info->itimer)[which].app.value > 0) {
/* Alarm could have been on its way when app value changed */
if ((*info->itimer)[which].app.value >= (*info->itimer)[which].actual.value) {
(*info->itimer)[which].app.value -= (*info->itimer)[which].actual.value;
LOG(THREAD, LOG_ASYNCH, 2, "\tapp value is now %d\n",
(*info->itimer)[which].app.value);
if ((*info->itimer)[which].app.value == 0) {
pass_to_app = true;
(*info->itimer)[which].app.value = (*info->itimer)[which].app.interval;
} else
reset_timer_manually = true;
}
}
if ((*info->itimer)[which].dr.value > 0) {
/* Alarm could have been on its way when DR value changed */
if ((*info->itimer)[which].dr.value >= (*info->itimer)[which].actual.value) {
(*info->itimer)[which].dr.value -= (*info->itimer)[which].actual.value;
LOG(THREAD, LOG_ASYNCH, 2, "\tdr value is now %d\n",
(*info->itimer)[which].dr.value);
if ((*info->itimer)[which].dr.value == 0) {
invoke_cb = true;
(*info->itimer)[which].dr.value = (*info->itimer)[which].dr.interval;
} else
reset_timer_manually = true;
}
}
/* for efficiency we let the kernel reset the value to interval if
* there's only one timer
*/
if (reset_timer_manually) {
(*info->itimer)[which].actual.value = 0;
itimer_new_settings(dcontext, which, true /*doesn't matter: actual.value==0*/);
} else
(*info->itimer)[which].actual.value = (*info->itimer)[which].actual.interval;
if (invoke_cb) {
/* invoke after setting new itimer value */
/* we save stack space by allocating superset dr_mcontext_t */
dr_mcontext_t dmc;
dr_mcontext_init(&dmc);
priv_mcontext_t *mc = dr_mcontext_as_priv_mcontext(&dmc);
ucontext_to_mcontext(mc, ucxt);
void (*cb)(dcontext_t *, priv_mcontext_t *) = (*info->itimer)[which].cb;
void (*cb_api)(dcontext_t *, dr_mcontext_t *) = (*info->itimer)[which].cb_api;
if (which == ITIMER_VIRTUAL && info->shared_itimer && should_release_lock) {
release_recursive_lock(&(*info->itimer)[which].lock);
should_release_lock = false;
}
if (cb != NULL) {
cb(dcontext, mc);
} else {
cb_api(dcontext, &dmc);
}
}
if (info->shared_itimer && should_release_lock)
release_recursive_lock(&(*info->itimer)[which].lock);
return pass_to_app;
}
/* Starts itimer if stopped, or increases refcount of existing itimer if already
* started. It is *not* safe to call this more than once for the same thread,
* since it will inflate the refcount and prevent cleanup.
*/
void
start_itimer(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
ASSERT(info != NULL && info->itimer != NULL);
bool start = false;
if (info->shared_itimer) {
/* i#2993: We avoid acquiring the lock as an alarm signal can arrive during
* the lock routine (esp in debug build) and cause problems.
*/
int new_count =
atomic_add_exchange_int((volatile int *)info->shared_itimer_underDR, 1);
start = (new_count == 1);
} else
start = true;
if (start) {
/* Enable all DR itimers b/c at least one thread in this set of threads
* sharing itimers is under DR control
*/
int which;
LOG(THREAD, LOG_ASYNCH, 2, "starting DR itimers from thread " TIDFMT "\n",
d_r_get_thread_id());
for (which = 0; which < NUM_ITIMERS; which++) {
if (info->shared_itimer)
acquire_recursive_lock(&(*info->itimer)[which].lock);
/* May have already been set up with the start delayed (i#2907). */
if ((*info->itimer)[which].dr.interval > 0) {
(*info->itimer)[which].dr.value = (*info->itimer)[which].dr.interval;
itimer_new_settings(dcontext, which, false /*!app*/);
}
if (info->shared_itimer)
release_recursive_lock(&(*info->itimer)[which].lock);
}
}
}
/* Decrements the itimer refcount, and turns off the itimer once there are no
* more threads listening for it. It is not safe to call this more than once on
* the same thread.
*/
void
stop_itimer(dcontext_t *dcontext)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
ASSERT(info != NULL && info->itimer != NULL);
bool stop = false;
if (info->shared_itimer) {
ASSERT(*info->shared_itimer_underDR > 0);
int new_count =
atomic_add_exchange_int((volatile int *)info->shared_itimer_underDR, -1);
stop = (new_count == 0);
} else
stop = true;
if (stop) {
/* Disable all DR itimers b/c this set of threads sharing this
* itimer is now completely native
*/
int which;
LOG(THREAD, LOG_ASYNCH, 2, "stopping DR itimers from thread " TIDFMT "\n",
d_r_get_thread_id());
for (which = 0; which < NUM_ITIMERS; which++) {
if (info->shared_itimer)
acquire_recursive_lock(&(*info->itimer)[which].lock);
if ((*info->itimer)[which].dr.value > 0) {
(*info->itimer)[which].dr.value = 0;
if ((*info->itimer)[which].app.value > 0) {
(*info->itimer)[which].actual.interval =
(*info->itimer)[which].app.interval;
} else
set_actual_itimer(dcontext, which, info, false /*disable*/);
}
if (info->shared_itimer)
release_recursive_lock(&(*info->itimer)[which].lock);
}
}
}
/* handle app itimer syscalls */
/* handle_pre_alarm also calls this function and passes NULL as prev_timer */
void
handle_pre_setitimer(dcontext_t *dcontext, int which, const struct itimerval *new_timer,
struct itimerval *prev_timer)
{
if (new_timer == NULL || which < 0 || which >= NUM_ITIMERS)
return;
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
ASSERT(info != NULL && info->itimer != NULL);
struct itimerval val;
if (d_r_safe_read(new_timer, sizeof(val), &val)) {
if (info->shared_itimer)
acquire_recursive_lock(&(*info->itimer)[which].lock);
/* save a copy in case the syscall fails */
(*info->itimer)[which].app_saved = (*info->itimer)[which].app;
(*info->itimer)[which].app.interval = timeval_to_usec(&val.it_interval);
(*info->itimer)[which].app.value = timeval_to_usec(&val.it_value);
LOG(THREAD, LOG_ASYNCH, 2,
"app setitimer type=%d interval=" SZFMT " value=" SZFMT "\n", which,
(*info->itimer)[which].app.interval, (*info->itimer)[which].app.value);
itimer_new_settings(dcontext, which, true /*app*/);
if (info->shared_itimer)
release_recursive_lock(&(*info->itimer)[which].lock);
}
}
void
handle_post_setitimer(dcontext_t *dcontext, bool success, int which,
const struct itimerval *new_timer, struct itimerval *prev_timer)
{
if (new_timer == NULL || which < 0 || which >= NUM_ITIMERS) {
ASSERT(new_timer == NULL || !success);
return;
}
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
ASSERT(info != NULL && info->itimer != NULL);
ASSERT(which >= 0 && which < NUM_ITIMERS);
if (!success && new_timer != NULL) {
if (info->shared_itimer)
acquire_recursive_lock(&(*info->itimer)[which].lock);
/* restore saved pre-syscall settings */
(*info->itimer)[which].app = (*info->itimer)[which].app_saved;
itimer_new_settings(dcontext, which, true /*app*/);
if (info->shared_itimer)
release_recursive_lock(&(*info->itimer)[which].lock);
}
if (success && prev_timer != NULL)
handle_post_getitimer(dcontext, success, which, prev_timer);
}
void
handle_post_getitimer(dcontext_t *dcontext, bool success, int which,
struct itimerval *cur_timer)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
ASSERT(info != NULL && info->itimer != NULL);
if (success) {
/* write succeeded for kernel but we're user and can have races */
struct timeval val;
DEBUG_DECLARE(bool ok;)
ASSERT(which >= 0 && which < NUM_ITIMERS);
ASSERT(cur_timer != NULL);
if (info->shared_itimer)
acquire_recursive_lock(&(*info->itimer)[which].lock);
usec_to_timeval((*info->itimer)[which].app.interval, &val);
IF_DEBUG(ok =)
safe_write_ex(&cur_timer->it_interval, sizeof(val), &val, NULL);
ASSERT(ok);
if (d_r_safe_read(&cur_timer->it_value, sizeof(val), &val)) {
/* subtract the difference between last-asked-for value
* and current value to reflect elapsed time
*/
uint64 left = (*info->itimer)[which].app.value -
((*info->itimer)[which].actual.value - timeval_to_usec(&val));
usec_to_timeval(left, &val);
IF_DEBUG(ok =)
safe_write_ex(&cur_timer->it_value, sizeof(val), &val, NULL);
ASSERT(ok);
} else
ASSERT_NOT_REACHED();
if (info->shared_itimer)
release_recursive_lock(&(*info->itimer)[which].lock);
}
}
/* handle app alarm syscall */
/* alarm uses the same itimer and could be defined in terms of setitimer */
void
handle_pre_alarm(dcontext_t *dcontext, unsigned int sec)
{
struct itimerval val;
val.it_interval.tv_usec = 0;
val.it_interval.tv_sec = 0;
val.it_value.tv_usec = 0;
val.it_value.tv_sec = sec;
handle_pre_setitimer(dcontext, ITIMER_REAL, &val, NULL);
}
void
handle_post_alarm(dcontext_t *dcontext, bool success, unsigned int sec)
{
/* alarm is always successful, so do nothing in post */
ASSERT(success);
return;
}
/***************************************************************************
* Internal DR communication
*/
typedef struct _sig_detach_info_t {
KSYNCH_TYPE *detached;
byte *sigframe_xsp;
#ifdef HAVE_SIGALTSTACK
stack_t *app_sigstack;
#endif
} sig_detach_info_t;
/* xsp is only set for X86 */
static void
notify_and_jmp_without_stack(KSYNCH_TYPE *notify_var, byte *continuation, byte *xsp)
{
if (ksynch_kernel_support()) {
/* Can't use dstack once we signal so in asm we do:
* futex/semaphore = 1;
* %xsp = xsp;
* dynamorio_condvar_wake_and_jmp(notify_var, continuation);
*/
#ifdef MACOS
ASSERT(sizeof(notify_var->sem) == 4);
#endif
#ifdef X86
# ifndef MACOS
/* i#2632: recent clang for 32-bit annoyingly won't do the right thing for
* "jmp dynamorio_condvar_wake_and_jmp" and leaves relocs so we ensure it's PIC.
* We do this first as it may end up clobbering a scratch reg like xax.
*/
void (*asm_jmp_tgt)() = dynamorio_condvar_wake_and_jmp;
asm("mov %0, %%" ASM_XDX : : "m"(asm_jmp_tgt));
# endif
asm("mov %0, %%" ASM_XAX : : "m"(notify_var));
asm("mov %0, %%" ASM_XCX : : "m"(continuation));
asm("mov %0, %%" ASM_XSP : : "m"(xsp));
# ifdef MACOS
asm("movl $1,4(%" ASM_XAX ")");
asm("jmp _dynamorio_condvar_wake_and_jmp");
# else
asm("movl $1,(%" ASM_XAX ")");
asm("jmp *%" ASM_XDX);
# endif
#elif defined(AARCHXX)
asm("ldr " ASM_R0 ", %0" : : "m"(notify_var));
asm("mov " ASM_R1 ", #1");
asm("str " ASM_R1 ",[" ASM_R0 "]");
asm("ldr " ASM_R1 ", %0" : : "m"(continuation));
asm("b dynamorio_condvar_wake_and_jmp");
#endif
} else {
ksynch_set_value(notify_var, 1);
#ifdef X86
asm("mov %0, %%" ASM_XSP : : "m"(xsp));
asm("mov %0, %%" ASM_XAX : : "m"(continuation));
asm("jmp *%" ASM_XAX);
#elif defined(AARCHXX)
asm("ldr " ASM_R0 ", %0" : : "m"(continuation));
asm(ASM_INDJMP " " ASM_R0);
#endif /* X86/ARM */
}
}
/* Go native from detach. This is executed on the app stack. */
static void
sig_detach_go_native(sig_detach_info_t *info)
{
byte *xsp = info->sigframe_xsp;
#ifdef HAVE_SIGALTSTACK
/* Restore the app signal stack, though sigreturn will overwrite this with the
* uc_stack in the frame's ucontext anyway (which we already set for the app).
*/
DEBUG_DECLARE(int rc =)
sigaltstack_syscall(info->app_sigstack, NULL);
ASSERT(rc == 0);
#endif
#ifdef X86
/* Skip pretcode */
xsp += sizeof(char *);
#endif
notify_and_jmp_without_stack(info->detached, (byte *)dynamorio_sigreturn, xsp);
ASSERT_NOT_REACHED();
}
/* Sets this (slave) thread to detach by directly returning from the signal. */
static void
sig_detach(dcontext_t *dcontext, sigframe_rt_t *frame, KSYNCH_TYPE *detached)
{
thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field;
byte *xsp;
sig_detach_info_t detach_info;
LOG(THREAD, LOG_ASYNCH, 1, "%s: detaching\n", __FUNCTION__);
/* Update the mask of the signal frame so that the later sigreturn will
* restore the app signal mask.
*/
memcpy(&frame->uc.uc_sigmask, &info->app_sigblocked, sizeof(info->app_sigblocked));
/* Copy the signal frame to the app stack.
* XXX: We live with the transparency risk of storing the signal frame on
* the app stack: we assume the app stack is writable where we need it to be,
* and that we're not clobbering any app data beyond TOS.
*/
xsp = get_sigstack_frame_ptr(dcontext, SUSPEND_SIGNAL, frame);
copy_frame_to_stack(dcontext, SUSPEND_SIGNAL, frame, xsp, false /*!pending*/);
#ifdef HAVE_SIGALTSTACK
/* Make sure the frame's sigstack reflects the app stack.
* copy_frame_to_stack() should have done this for us.
*/
ASSERT(((sigframe_rt_t *)xsp)->uc.uc_stack.ss_sp == info->app_sigstack.ss_sp);
#endif
/* Restore app segment registers. */
os_thread_not_under_dynamo(dcontext);
os_tls_thread_exit(dcontext->local_state);
#ifdef HAVE_SIGALTSTACK
/* We can't restore the app's sigstack here as that will invalidate the
* sigstack we're currently on.
*/
detach_info.app_sigstack = &info->app_sigstack;
#endif
detach_info.detached = detached;
detach_info.sigframe_xsp = xsp;
call_switch_stack(&detach_info, xsp, (void (*)(void *))sig_detach_go_native,
false /*free_initstack*/, false /*do not return*/);
ASSERT_NOT_REACHED();
}
/* Returns whether to pass on to app */
static bool
handle_suspend_signal(dcontext_t *dcontext, kernel_ucontext_t *ucxt, sigframe_rt_t *frame)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
kernel_sigset_t prevmask;
sig_full_cxt_t sc_full;
ASSERT(ostd != NULL);
if (ostd->terminate) {
/* PR 297902: exit this thread, without using the dstack */
/* For MacOS, we need a stack as 32-bit syscalls take args on the stack.
* We go ahead and use it for x86 too for simpler sysenter return.
* We don't have a lot of options: we're terminating, so we go ahead
* and use the app stack.
*/
byte *app_xsp;
if (IS_CLIENT_THREAD(dcontext))
app_xsp = (byte *)SIGCXT_FROM_UCXT(ucxt)->SC_XSP;
else
app_xsp = (byte *)get_mcontext(dcontext)->xsp;
LOG(THREAD, LOG_ASYNCH, 2, "handle_suspend_signal: exiting\n");
ASSERT(app_xsp != NULL);
notify_and_jmp_without_stack(&ostd->terminated, (byte *)dynamorio_sys_exit,
app_xsp);
ASSERT_NOT_REACHED();
return false;
}
if (!doing_detach && is_thread_currently_native(dcontext->thread_record) &&
!IS_CLIENT_THREAD(dcontext) IF_APP_EXPORTS(&&!dr_api_exit)) {
if (!sig_take_over(ucxt))
return false;
ASSERT_NOT_REACHED(); /* else, shouldn't return */
}
/* If suspend_count is 0, we are not trying to suspend this thread
* (os_thread_resume() may have already decremented suspend_count to 0, but
* os_thread_suspend() will not send a signal until this thread unsets
* ostd->suspended, so not having a lock around the suspend_count read is
* ok), so pass signal to app.
* If we are trying or have already suspended this thread, our own
* os_thread_suspend() will not send a 2nd suspend signal until we are
* completely resumed, so we can distinguish app uses of SUSPEND_SIGNAL. We
* can't have a race between the read and write of suspended_sigcxt b/c
* signals are blocked. It's fine to have a race and reorder the app's
* signal w/ DR's.
*/
if (ostd->suspend_count == 0)
return true; /* pass to app */
ASSERT(ostd->suspended_sigcxt == NULL);
/* XXX: we're not setting DR_WHERE_SIGNAL_HANDLER in enough places.
* It's trickier than other whereamis b/c we want to resume the
* prior whereami when we return from the handler, but there are
* complex control paths that do not always return.
* We try to at least do it for the ksynch_wait here.
*/
dr_where_am_i_t prior_whereami = dcontext->whereami;
dcontext->whereami = DR_WHERE_SIGNAL_HANDLER;
sig_full_initialize(&sc_full, ucxt);
ostd->suspended_sigcxt = &sc_full;
LOG(THREAD, LOG_ASYNCH, 2, "handle_suspend_signal: suspended now\n");
/* We cannot use mutexes here as we have interrupted DR at an
* arbitrary point! Thus we can't use the event_t routines.
* However, the existing synch and check above prevent any
* re-entrance here, and our cond vars target just a single thread,
* so we can get away w/o a mutex.
*/
/* Notify os_thread_suspend that it can now return, as this thread is
* officially suspended now and is ready for thread_{get,set}_mcontext.
*/
ASSERT(ksynch_get_value(&ostd->suspended) == 0);
ksynch_set_value(&ostd->suspended, 1);
ksynch_wake_all(&ostd->suspended);
/* We're sitting on our sigaltstack w/ all signals blocked. We're
* going to stay here but unblock all signals so we don't lose any
* delivered while we're waiting. We're at a safe enough point (now
* that we've set ostd->suspended: i#5779) to re-enter
* master_signal_handler(). We use a mutex in thread_{suspend,resume} to
* prevent our own re-suspension signal from arriving before we've
* re-blocked on the resume.
*/
sigprocmask_syscall(SIG_SETMASK, SIGMASK_FROM_UCXT(ucxt), &prevmask,
sizeof(ucxt->uc_sigmask));
/* i#96/PR 295561: use futex(2) if available */
while (ksynch_get_value(&ostd->wakeup) == 0) {
/* Waits only if the wakeup flag is not set as 1. Return value
* doesn't matter because the flag will be re-checked.
*/
ksynch_wait(&ostd->wakeup, 0, 0);
if (ksynch_get_value(&ostd->wakeup) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
}
LOG(THREAD, LOG_ASYNCH, 2, "handle_suspend_signal: awake now\n");
/* re-block so our exit from master_signal_handler is not interrupted */
sigprocmask_syscall(SIG_SETMASK, &prevmask, NULL, sizeof(prevmask));
ostd->suspended_sigcxt = NULL;
/* Notify os_thread_resume that it can return now, which (assuming
* suspend_count is back to 0) means it's then safe to re-suspend.
*/
ksynch_set_value(&ostd->suspended, 0); /*reset prior to signalling os_thread_resume*/
ksynch_set_value(&ostd->resumed, 1);
ksynch_wake_all(&ostd->resumed);
dcontext->whereami = prior_whereami;
if (ostd->retakeover) {
ostd->retakeover = false;
sig_take_over(ucxt); /* shouldn't return for this case */
ASSERT_NOT_REACHED();
} else if (ostd->do_detach) {
ostd->do_detach = false;
sig_detach(dcontext, frame, &ostd->detached); /* no return */
ASSERT_NOT_REACHED();
}
return false; /* do not pass to app */
}
/* PR 206278: for try/except we need to save the signal mask */
void
dr_setjmp_sigmask(dr_jmp_buf_t *buf)
{
/* i#226/PR 492568: we rely on the kernel storing the prior mask in the
* signal frame, so we do not need to store it on every setjmp, which
* can be a performance hit.
*/
#ifdef DEBUG
sigprocmask_syscall(SIG_SETMASK, NULL, &buf->sigmask, sizeof(buf->sigmask));
#endif
}
/* i#61/PR 211530: nudge on Linux.
* Determines whether this is a nudge signal, and if so queues up a nudge,
* or is an app signal. Returns whether to pass the signal on to the app.
*/
static bool
handle_nudge_signal(dcontext_t *dcontext, kernel_siginfo_t *siginfo,
kernel_ucontext_t *ucxt)
{
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
nudge_arg_t *arg = (nudge_arg_t *)siginfo;
instr_t instr;
char buf[MAX_INSTR_LENGTH];
/* Distinguish a nudge from an app signal. An app using libc sigqueue()
* will never have its signal mistaken as libc does not expose the kernel_siginfo_t
* and always passes 0 for si_errno, so we're only worried beyond our
* si_code check about an app using a raw syscall that is deliberately
* trying to fool us.
* While there is a lot of padding space in kernel_siginfo_t, the kernel doesn't
* copy it through on SYS_rt_sigqueueinfo so we don't have room for any
* dedicated magic numbers. The client id could function as a magic
* number for client nudges, but I don't think we want to kill the app
* if an external nudger types the client id wrong.
*/
LOG(THREAD, LOG_ASYNCH, 2, "%s: sig=%d code=%d errno=%d\n", __FUNCTION__,
siginfo->si_signo, siginfo->si_code, siginfo->si_errno);
if (siginfo->si_signo !=
NUDGESIG_SIGNUM
/* PR 477454: remove the IF_NOT_VMX86 once we have nudge-arg support */
IF_NOT_VMX86(|| siginfo->si_code != SI_QUEUE || siginfo->si_errno == 0)) {
return true; /* pass to app */
}
#if defined(CLIENT_INTERFACE) && !defined(VMX86_SERVER)
DODEBUG({
if (TEST(NUDGE_GENERIC(client), arg->nudge_action_mask) &&
!is_valid_client_id(arg->client_id)) {
SYSLOG_INTERNAL_WARNING("received client nudge for invalid id=0x%x",
arg->client_id);
}
});
#endif
if (dynamo_exited || !dynamo_initialized || dcontext == NULL) {
/* Ignore the nudge: too early, or too late.
* Xref Windows handling of such cases in nudge.c: old case 5702, etc.
* We do this before the illegal-instr check b/c it's unsafe to decode
* if too early or too late.
*/
SYSLOG_INTERNAL_WARNING("too-early or too-late nudge: ignoring");
return false; /* do not pass to app */
}
/* As a further check, try to detect whether this was raised synchronously
* from a real illegal instr: though si_code for that should not be
* SI_QUEUE. It's possible a nudge happened to come at a bad instr before
* it faulted, or maybe the instr after a syscall or other wait spot is
* illegal, but we'll live with that risk.
*/
ASSERT(NUDGESIG_SIGNUM == SIGILL); /* else this check makes no sense */
instr_init(dcontext, &instr);
if (d_r_safe_read((byte *)sc->SC_XIP, sizeof(buf), buf) &&
(decode(dcontext, (byte *)buf, &instr) == NULL ||
/* check for ud2 (xref PR 523161) */
instr_is_undefined(&instr))) {
LOG(THREAD, LOG_ASYNCH, 2, "%s: real illegal instr @" PFX "\n", __FUNCTION__,
sc->SC_XIP);
DOLOG(2, LOG_ASYNCH,
{ disassemble_with_bytes(dcontext, (byte *)sc->SC_XIP, THREAD); });
instr_free(dcontext, &instr);
return true; /* pass to app */
}
instr_free(dcontext, &instr);
#ifdef VMX86_SERVER
/* Treat as a client nudge until we have PR 477454 */
if (siginfo->si_errno == 0) {
arg->version = NUDGE_ARG_CURRENT_VERSION;
arg->flags = 0;
arg->nudge_action_mask = NUDGE_GENERIC(client);
arg->client_id = 0;
arg->client_arg = 0;
}
#endif
LOG(THREAD, LOG_ASYNCH, 1,
"received nudge version=%u flags=0x%x mask=0x%x id=0x%08x "
"arg=0x" ZHEX64_FORMAT_STRING "\n",
arg->version, arg->flags, arg->nudge_action_mask, arg->client_id,
arg->client_arg);
SYSLOG_INTERNAL_INFO("received nudge mask=0x%x id=0x%08x arg=0x" ZHEX64_FORMAT_STRING,
arg->nudge_action_mask, arg->client_id, arg->client_arg);
/* We need to handle the nudge at a safe, nolinking spot */
if (safe_is_in_fcache(dcontext, (byte *)sc->SC_XIP, (byte *)sc->SC_XSP) &&
dcontext->interrupted_for_nudge == NULL) {
/* We unlink the interrupted fragment and skip any inlined syscalls to
* bound the nudge delivery time. If we already unlinked one we assume
* that's sufficient.
*/
fragment_t wrapper;
fragment_t *f = fragment_pclookup(dcontext, (byte *)sc->SC_XIP, &wrapper);
if (f != NULL) {
if (unlink_fragment_for_signal(dcontext, f, (byte *)sc->SC_XIP))
dcontext->interrupted_for_nudge = f;
}
}
/* No lock is needed since thread-private and this signal is blocked now */
nudge_add_pending(dcontext, arg);
return false; /* do not pass to app */
}
| 1 | 16,466 | This will still result in a safe_read_tls_magic on AMD in tls_thread_preinit(). | DynamoRIO-dynamorio | c |
@@ -561,5 +561,4 @@ public class TestJdbcCatalog {
String nsString = JdbcUtil.namespaceToString(ns);
Assert.assertEquals(ns, JdbcUtil.stringToNamespace(nsString));
}
-
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.jdbc;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.AssertHelpers;
import org.apache.iceberg.CatalogProperties;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.SortOrder;
import org.apache.iceberg.Table;
import org.apache.iceberg.Transaction;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.exceptions.AlreadyExistsException;
import org.apache.iceberg.exceptions.NamespaceNotEmptyException;
import org.apache.iceberg.exceptions.NoSuchNamespaceException;
import org.apache.iceberg.exceptions.NoSuchTableException;
import org.apache.iceberg.hadoop.Util;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.transforms.Transform;
import org.apache.iceberg.transforms.Transforms;
import org.apache.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import static org.apache.iceberg.NullOrder.NULLS_FIRST;
import static org.apache.iceberg.SortDirection.ASC;
import static org.apache.iceberg.types.Types.NestedField.required;
public class TestJdbcCatalog {
static final Schema SCHEMA = new Schema(
required(1, "id", Types.IntegerType.get(), "unique ID"),
required(2, "data", Types.StringType.get())
);
static final PartitionSpec PARTITION_SPEC = PartitionSpec.builderFor(SCHEMA)
.bucket("data", 16)
.build();
static Configuration conf = new Configuration();
private static JdbcCatalog catalog;
private static String warehouseLocation;
@Rule
public TemporaryFolder temp = new TemporaryFolder();
File tableDir = null;
protected List<String> metadataVersionFiles(String location) {
return Stream.of(new File(location).listFiles())
.filter(file -> !file.isDirectory())
.map(File::getName)
.filter(fileName -> fileName.endsWith("metadata.json"))
.collect(Collectors.toList())
;
}
protected List<String> manifestFiles(String location) {
return Stream.of(new File(location).listFiles())
.filter(file -> !file.isDirectory())
.map(File::getName)
.filter(fileName -> fileName.endsWith(".avro"))
.collect(Collectors.toList())
;
}
@Before
public void setupTable() throws Exception {
this.tableDir = temp.newFolder();
tableDir.delete(); // created by table create
Map<String, String> properties = new HashMap<>();
properties.put(CatalogProperties.URI,
"jdbc:sqlite:file::memory:?ic" + UUID.randomUUID().toString().replace("-", ""));
properties.put(JdbcCatalog.PROPERTY_PREFIX + "username", "user");
properties.put(JdbcCatalog.PROPERTY_PREFIX + "password", "password");
warehouseLocation = this.tableDir.getAbsolutePath();
properties.put(CatalogProperties.WAREHOUSE_LOCATION, warehouseLocation);
catalog = new JdbcCatalog();
catalog.setConf(conf);
catalog.initialize("test_jdbc_catalog", properties);
}
@Test
public void testInitialize() {
Map<String, String> properties = new HashMap<>();
properties.put(CatalogProperties.WAREHOUSE_LOCATION, this.tableDir.getAbsolutePath());
properties.put(CatalogProperties.URI, "jdbc:sqlite:file::memory:?icebergDB");
JdbcCatalog jdbcCatalog = new JdbcCatalog();
jdbcCatalog.setConf(conf);
jdbcCatalog.initialize("test_jdbc_catalog", properties);
// second initialization should not fail even if tables are already created
jdbcCatalog.initialize("test_jdbc_catalog", properties);
jdbcCatalog.initialize("test_jdbc_catalog", properties);
}
@Test
public void testCreateTableBuilder() {
TableIdentifier tableIdent = TableIdentifier.of("db", "ns1", "ns2", "tbl");
Table table = catalog.buildTable(tableIdent, SCHEMA)
.withPartitionSpec(PARTITION_SPEC)
.withProperties(null)
.withProperty("key1", "value1")
.withProperties(ImmutableMap.of("key2", "value2"))
.create();
Assert.assertEquals(SCHEMA.toString(), table.schema().toString());
Assert.assertEquals(1, table.spec().fields().size());
Assert.assertEquals("value1", table.properties().get("key1"));
Assert.assertEquals("value2", table.properties().get("key2"));
}
@Test
public void testCreateTableTxnBuilder() {
TableIdentifier tableIdent = TableIdentifier.of("db", "ns1", "ns2", "tbl");
Transaction txn = catalog.buildTable(tableIdent, SCHEMA)
.withPartitionSpec(null)
.withProperty("key1", "testval1")
.createTransaction();
txn.commitTransaction();
Table table = catalog.loadTable(tableIdent);
Assert.assertEquals(SCHEMA.toString(), table.schema().toString());
Assert.assertTrue(table.spec().isUnpartitioned());
Assert.assertEquals("testval1", table.properties().get("key1"));
}
@Test
public void testReplaceTxnBuilder() {
TableIdentifier tableIdent = TableIdentifier.of("db", "ns1", "ns2", "tbl");
final DataFile fileA = DataFiles.builder(PARTITION_SPEC)
.withPath("/path/to/data-a.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=0") // easy way to set partition data for now
.withRecordCount(2) // needs at least one record or else metrics will filter it out
.build();
Transaction createTxn = catalog.buildTable(tableIdent, SCHEMA)
.withPartitionSpec(PARTITION_SPEC)
.withProperty("key1", "value1")
.createOrReplaceTransaction();
createTxn.newAppend()
.appendFile(fileA)
.commit();
createTxn.commitTransaction();
Table table = catalog.loadTable(tableIdent);
Assert.assertNotNull(table.currentSnapshot());
Transaction replaceTxn = catalog.buildTable(tableIdent, SCHEMA)
.withProperty("key2", "value2")
.replaceTransaction();
replaceTxn.commitTransaction();
table = catalog.loadTable(tableIdent);
Assert.assertNull(table.currentSnapshot());
PartitionSpec v1Expected = PartitionSpec.builderFor(table.schema())
.alwaysNull("data", "data_bucket")
.withSpecId(1)
.build();
Assert.assertEquals("Table should have a spec with one void field",
v1Expected, table.spec());
Assert.assertEquals("value1", table.properties().get("key1"));
Assert.assertEquals("value2", table.properties().get("key2"));
}
@Test
public void testCreateTableDefaultSortOrder() {
TableIdentifier tableIdent = TableIdentifier.of("db", "ns1", "ns2", "tbl");
Table table = catalog.createTable(tableIdent, SCHEMA, PARTITION_SPEC);
SortOrder sortOrder = table.sortOrder();
Assert.assertEquals("Order ID must match", 0, sortOrder.orderId());
Assert.assertTrue("Order must unsorted", sortOrder.isUnsorted());
}
@Test
public void testCreateTableCustomSortOrder() {
TableIdentifier tableIdent = TableIdentifier.of("db", "ns1", "ns2", "tbl");
SortOrder order = SortOrder.builderFor(SCHEMA)
.asc("id", NULLS_FIRST)
.build();
Table table = catalog.buildTable(tableIdent, SCHEMA)
.withPartitionSpec(PARTITION_SPEC)
.withSortOrder(order)
.create();
SortOrder sortOrder = table.sortOrder();
Assert.assertEquals("Order ID must match", 1, sortOrder.orderId());
Assert.assertEquals("Order must have 1 field", 1, sortOrder.fields().size());
Assert.assertEquals("Direction must match ", ASC, sortOrder.fields().get(0).direction());
Assert.assertEquals("Null order must match ", NULLS_FIRST, sortOrder.fields().get(0).nullOrder());
Transform<?, ?> transform = Transforms.identity(Types.IntegerType.get());
Assert.assertEquals("Transform must match", transform, sortOrder.fields().get(0).transform());
}
@Test
public void testBasicCatalog() throws Exception {
TableIdentifier testTable = TableIdentifier.of("db", "ns1", "ns2", "tbl");
catalog.createTable(testTable, SCHEMA, PartitionSpec.unpartitioned());
String metaLocation = catalog.defaultWarehouseLocation(testTable);
FileSystem fs = Util.getFs(new Path(metaLocation), conf);
Assert.assertTrue(fs.isDirectory(new Path(metaLocation)));
AssertHelpers.assertThrows("should throw exception", AlreadyExistsException.class,
"already exists", () ->
catalog.createTable(testTable, SCHEMA, PartitionSpec.unpartitioned())
);
catalog.dropTable(testTable);
}
@Test
public void testCreateAndDropTableWithoutNamespace() throws Exception {
TableIdentifier testTable = TableIdentifier.of("tbl");
Table table = catalog.createTable(testTable, SCHEMA, PartitionSpec.unpartitioned());
Assert.assertEquals(table.schema().toString(), SCHEMA.toString());
Assert.assertEquals(catalog.name() + ".tbl", table.name());
String metaLocation = catalog.defaultWarehouseLocation(testTable);
FileSystem fs = Util.getFs(new Path(metaLocation), conf);
Assert.assertTrue(fs.isDirectory(new Path(metaLocation)));
catalog.dropTable(testTable, true);
}
@Test
public void testDefaultWarehouseLocation() throws Exception {
TableIdentifier testTable = TableIdentifier.of("tbl");
TableIdentifier testTable2 = TableIdentifier.of(Namespace.of("ns"), "tbl");
Assert.assertEquals(catalog.defaultWarehouseLocation(testTable),
warehouseLocation + "/" + testTable.name());
Assert.assertEquals(catalog.defaultWarehouseLocation(testTable2),
warehouseLocation + "/" + testTable2.namespace() + "/" + testTable2.name());
}
@Test
public void testConcurrentCommit() throws IOException {
TableIdentifier tableIdentifier = TableIdentifier.of("db", "table");
Table table = catalog.createTable(tableIdentifier, SCHEMA, PartitionSpec.unpartitioned());
// append file and commit!
String data = temp.newFile("data.parquet").getPath();
Files.write(Paths.get(data), new ArrayList<>(), StandardCharsets.UTF_8);
DataFile dataFile = DataFiles.builder(PartitionSpec.unpartitioned())
.withPath(data)
.withFileSizeInBytes(10)
.withRecordCount(1)
.build();
table.newAppend().appendFile(dataFile).commit();
Assert.assertEquals(1, table.history().size());
catalog.dropTable(tableIdentifier);
data = temp.newFile("data2.parquet").getPath();
Files.write(Paths.get(data), new ArrayList<>(), StandardCharsets.UTF_8);
DataFile dataFile2 = DataFiles.builder(PartitionSpec.unpartitioned())
.withPath(data)
.withFileSizeInBytes(10)
.withRecordCount(1)
.build();
AssertHelpers.assertThrows("Should fail", NoSuchTableException.class,
"Failed to load table", () -> table.newAppend().appendFile(dataFile2).commit()
);
}
@Test
public void testCommitHistory() throws IOException {
TableIdentifier testTable = TableIdentifier.of("db", "ns", "tbl");
catalog.createTable(testTable, SCHEMA, PartitionSpec.unpartitioned());
Table table = catalog.loadTable(testTable);
String data = temp.newFile("data.parquet").getPath();
Files.write(Paths.get(data), new ArrayList<>(), StandardCharsets.UTF_8);
DataFile dataFile = DataFiles.builder(PartitionSpec.unpartitioned())
.withPath(data)
.withFileSizeInBytes(10)
.withRecordCount(1)
.build();
table.newAppend().appendFile(dataFile).commit();
Assert.assertEquals(1, table.history().size());
data = temp.newFile("data2.parquet").getPath();
Files.write(Paths.get(data), new ArrayList<>(), StandardCharsets.UTF_8);
dataFile = DataFiles.builder(PartitionSpec.unpartitioned())
.withPath(data)
.withFileSizeInBytes(10)
.withRecordCount(1)
.build();
table.newAppend().appendFile(dataFile).commit();
Assert.assertEquals(2, table.history().size());
data = temp.newFile("data3.parquet").getPath();
Files.write(Paths.get(data), new ArrayList<>(), StandardCharsets.UTF_8);
dataFile = DataFiles.builder(PartitionSpec.unpartitioned())
.withPath(data)
.withFileSizeInBytes(10)
.withRecordCount(1)
.build();
table.newAppend().appendFile(dataFile).commit();
Assert.assertEquals(3, table.history().size());
}
@Test
public void testDropTable() {
TableIdentifier testTable = TableIdentifier.of("db", "ns1", "ns2", "tbl");
TableIdentifier testTable2 = TableIdentifier.of("db", "ns1", "ns2", "tbl2");
catalog.createTable(testTable, SCHEMA, PartitionSpec.unpartitioned());
catalog.createTable(testTable2, SCHEMA, PartitionSpec.unpartitioned());
catalog.dropTable(testTable);
Assert.assertFalse(catalog.listTables(testTable.namespace()).contains(testTable));
catalog.dropTable(testTable2);
AssertHelpers.assertThrows("should throw exception", NoSuchNamespaceException.class,
"not exist", () -> catalog.listTables(testTable2.namespace())
);
Assert.assertFalse(catalog.dropTable(TableIdentifier.of("db", "tbl-not-exists")));
}
@Test
public void testRenameTable() {
TableIdentifier from = TableIdentifier.of("db", "tbl1");
TableIdentifier to = TableIdentifier.of("db", "tbl2-newtable");
catalog.createTable(from, SCHEMA, PartitionSpec.unpartitioned());
catalog.renameTable(from, to);
Assert.assertTrue(catalog.listTables(to.namespace()).contains(to));
Assert.assertFalse(catalog.listTables(to.namespace()).contains(from));
Assert.assertTrue(catalog.loadTable(to).name().endsWith(to.name()));
AssertHelpers.assertThrows("should throw exception", NoSuchTableException.class,
"Table does not exist", () ->
catalog.renameTable(TableIdentifier.of("db", "tbl-not-exists"), to)
);
// rename table to existing table name!
TableIdentifier from2 = TableIdentifier.of("db", "tbl2");
catalog.createTable(from2, SCHEMA, PartitionSpec.unpartitioned());
AssertHelpers.assertThrows("should throw exception", UncheckedSQLException.class,
"Failed to rename db.tbl2 to db.tbl2-newtable", () -> catalog.renameTable(from2, to)
);
}
@Test
public void testListTables() {
TableIdentifier tbl1 = TableIdentifier.of("db", "tbl1");
TableIdentifier tbl2 = TableIdentifier.of("db", "tbl2");
TableIdentifier tbl3 = TableIdentifier.of("db", "tbl2", "subtbl2");
TableIdentifier tbl4 = TableIdentifier.of("db", "ns1", "tbl3");
TableIdentifier tbl5 = TableIdentifier.of("db", "metadata", "metadata");
Lists.newArrayList(tbl1, tbl2, tbl3, tbl4, tbl5).forEach(t ->
catalog.createTable(t, SCHEMA, PartitionSpec.unpartitioned())
);
List<TableIdentifier> tbls1 = catalog.listTables(Namespace.of("db"));
Set<String> tblSet = Sets.newHashSet(tbls1.stream().map(TableIdentifier::name).iterator());
Assert.assertEquals(tblSet.size(), 2);
Assert.assertTrue(tblSet.contains("tbl1"));
Assert.assertTrue(tblSet.contains("tbl2"));
List<TableIdentifier> tbls2 = catalog.listTables(Namespace.of("db", "ns1"));
Assert.assertEquals(tbls2.size(), 1);
Assert.assertEquals("tbl3", tbls2.get(0).name());
AssertHelpers.assertThrows("should throw exception", NoSuchNamespaceException.class,
"does not exist", () -> catalog.listTables(Namespace.of("db", "ns1", "ns2")));
}
@Test
public void testCallingLocationProviderWhenNoCurrentMetadata() {
TableIdentifier tableIdent = TableIdentifier.of("ns1", "ns2", "table1");
Transaction create = catalog.newCreateTableTransaction(tableIdent, SCHEMA);
create.table().locationProvider(); // NPE triggered if not handled appropriately
create.commitTransaction();
Assert.assertEquals("1 table expected", 1, catalog.listTables(Namespace.of("ns1", "ns2")).size());
catalog.dropTable(tableIdent, true);
}
@Test
public void testExistingTableUpdate() {
TableIdentifier tableIdent = TableIdentifier.of("ns1", "ns2", "table1");
Transaction create = catalog.newCreateTableTransaction(tableIdent, SCHEMA);
create.table().locationProvider(); // NPE triggered if not handled appropriately
create.commitTransaction();
Table icebergTable = catalog.loadTable(tableIdent);
// add a column
icebergTable.updateSchema().addColumn("Coll3", Types.LongType.get()).commit();
icebergTable = catalog.loadTable(tableIdent);
// Only 2 snapshotFile Should exist and no manifests should exist
Assert.assertEquals(2, metadataVersionFiles(icebergTable.location() + "/metadata/").size());
Assert.assertEquals(0, manifestFiles(icebergTable.location() + "/metadata/").size());
Assert.assertNotEquals(SCHEMA.asStruct(), icebergTable.schema().asStruct());
Assert.assertTrue(icebergTable.schema().asStruct().toString().contains("Coll3"));
}
@Test
public void testTableName() {
TableIdentifier tableIdent = TableIdentifier.of("db", "ns1", "ns2", "tbl");
catalog.buildTable(tableIdent, SCHEMA)
.withPartitionSpec(PARTITION_SPEC)
.create();
Table table = catalog.loadTable(tableIdent);
Assert.assertEquals("Name must match", catalog.name() + ".db.ns1.ns2.tbl", table.name());
TableIdentifier snapshotsTableIdent = TableIdentifier.of("db", "ns1", "ns2", "tbl", "snapshots");
Table snapshotsTable = catalog.loadTable(snapshotsTableIdent);
Assert.assertEquals(
"Name must match", catalog.name() + ".db.ns1.ns2.tbl.snapshots", snapshotsTable.name());
}
@Test
public void testListNamespace() {
TableIdentifier tbl1 = TableIdentifier.of("db", "ns1", "ns2", "metadata");
TableIdentifier tbl2 = TableIdentifier.of("db", "ns2", "ns3", "tbl2");
TableIdentifier tbl3 = TableIdentifier.of("db", "ns3", "tbl4");
TableIdentifier tbl4 = TableIdentifier.of("db", "metadata");
TableIdentifier tbl5 = TableIdentifier.of("db2", "metadata");
TableIdentifier tbl6 = TableIdentifier.of("tbl6");
Lists.newArrayList(tbl1, tbl2, tbl3, tbl4, tbl5, tbl6).forEach(t ->
catalog.createTable(t, SCHEMA, PartitionSpec.unpartitioned())
);
List<Namespace> nsp1 = catalog.listNamespaces(Namespace.of("db"));
Assert.assertEquals(nsp1.size(), 3);
Set<String> tblSet = Sets.newHashSet(nsp1.stream().map(Namespace::toString).iterator());
Assert.assertEquals(tblSet.size(), 3);
Assert.assertTrue(tblSet.contains("db.ns1"));
Assert.assertTrue(tblSet.contains("db.ns2"));
Assert.assertTrue(tblSet.contains("db.ns3"));
List<Namespace> nsp2 = catalog.listNamespaces(Namespace.of("db", "ns1"));
Assert.assertEquals(nsp2.size(), 1);
Assert.assertEquals("db.ns1.ns2", nsp2.get(0).toString());
List<Namespace> nsp3 = catalog.listNamespaces();
Set<String> tblSet2 = Sets.newHashSet(nsp3.stream().map(Namespace::toString).iterator());
System.out.println(tblSet2.toString());
Assert.assertEquals(tblSet2.size(), 3);
Assert.assertTrue(tblSet2.contains("db"));
Assert.assertTrue(tblSet2.contains("db2"));
Assert.assertTrue(tblSet2.contains(""));
List<Namespace> nsp4 = catalog.listNamespaces();
Set<String> tblSet3 = Sets.newHashSet(nsp4.stream().map(Namespace::toString).iterator());
Assert.assertEquals(tblSet3.size(), 3);
Assert.assertTrue(tblSet3.contains("db"));
Assert.assertTrue(tblSet3.contains("db2"));
Assert.assertTrue(tblSet3.contains(""));
AssertHelpers.assertThrows("Should fail to list namespace doesn't exist", NoSuchNamespaceException.class,
"Namespace does not exist", () -> catalog.listNamespaces(Namespace.of("db", "db2", "ns2")
));
}
@Test
public void testLoadNamespaceMeta() {
TableIdentifier tbl1 = TableIdentifier.of("db", "ns1", "ns2", "metadata");
TableIdentifier tbl2 = TableIdentifier.of("db", "ns2", "ns3", "tbl2");
TableIdentifier tbl3 = TableIdentifier.of("db", "ns3", "tbl4");
TableIdentifier tbl4 = TableIdentifier.of("db", "metadata");
Lists.newArrayList(tbl1, tbl2, tbl3, tbl4).forEach(t ->
catalog.createTable(t, SCHEMA, PartitionSpec.unpartitioned())
);
Assert.assertTrue(catalog.loadNamespaceMetadata(Namespace.of("db")).containsKey("location"));
AssertHelpers.assertThrows("Should fail to load namespace doesn't exist",
NoSuchNamespaceException.class, "Namespace does not exist", () ->
catalog.loadNamespaceMetadata(Namespace.of("db", "db2", "ns2")));
}
@Test
public void testNamespaceExists() {
TableIdentifier tbl1 = TableIdentifier.of("db", "ns1", "ns2", "metadata");
TableIdentifier tbl2 = TableIdentifier.of("db", "ns2", "ns3", "tbl2");
TableIdentifier tbl3 = TableIdentifier.of("db", "ns3", "tbl4");
TableIdentifier tbl4 = TableIdentifier.of("db", "metadata");
Lists.newArrayList(tbl1, tbl2, tbl3, tbl4).forEach(t ->
catalog.createTable(t, SCHEMA, PartitionSpec.unpartitioned())
);
Assert.assertTrue("Should true to namespace exist",
catalog.namespaceExists(Namespace.of("db", "ns1", "ns2")));
Assert.assertFalse("Should false to namespace doesn't exist",
catalog.namespaceExists(Namespace.of("db", "db2", "not_exist")));
}
@Test
public void testDropNamespace() {
AssertHelpers.assertThrows("Should fail to drop namespace doesn't exist", NoSuchNamespaceException.class,
"Namespace does not exist", () -> catalog.dropNamespace(Namespace.of("db", "ns1_not_exitss")));
TableIdentifier tbl0 = TableIdentifier.of("db", "ns1", "ns2", "tbl2");
TableIdentifier tbl1 = TableIdentifier.of("db", "ns1", "ns2", "tbl1");
TableIdentifier tbl2 = TableIdentifier.of("db", "ns1", "tbl2");
TableIdentifier tbl3 = TableIdentifier.of("db", "ns3", "tbl4");
TableIdentifier tbl4 = TableIdentifier.of("db", "tbl");
Lists.newArrayList(tbl0, tbl1, tbl2, tbl3, tbl4).forEach(t ->
catalog.createTable(t, SCHEMA, PartitionSpec.unpartitioned())
);
AssertHelpers.assertThrows("Should fail to drop namespace has tables", NamespaceNotEmptyException.class,
"is not empty. 2 tables exist.", () -> catalog.dropNamespace(tbl1.namespace()));
AssertHelpers.assertThrows("Should fail to drop namespace has tables", NamespaceNotEmptyException.class,
"is not empty. 1 tables exist.", () -> catalog.dropNamespace(tbl2.namespace()));
AssertHelpers.assertThrows("Should fail to drop namespace has tables", NamespaceNotEmptyException.class,
"is not empty. 1 tables exist.", () -> catalog.dropNamespace(tbl4.namespace()));
}
@Test
public void testConversions() {
Namespace ns = Namespace.of("db", "db2", "ns2");
String nsString = JdbcUtil.namespaceToString(ns);
Assert.assertEquals(ns, JdbcUtil.stringToNamespace(nsString));
}
}
| 1 | 44,054 | Can you remove this change? | apache-iceberg | java |
@@ -48,7 +48,7 @@ func ValidateCertificateForACMEIssuer(crt *cmapi.CertificateSpec, issuer *cmapi.
el := field.ErrorList{}
if crt.IsCA {
- el = append(el, field.Invalid(specPath.Child("isCA"), crt.KeyAlgorithm, "ACME does not support CA certificates"))
+ el = append(el, field.Invalid(specPath.Child("isCA"), crt.IsCA, "ACME does not support CA certificates"))
}
if crt.Subject != nil && len(crt.Subject.Organizations) != 0 { | 1 | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"k8s.io/apimachinery/pkg/util/validation/field"
cmapi "github.com/jetstack/cert-manager/pkg/internal/apis/certmanager"
)
func ValidateCertificateForIssuer(crt *cmapi.Certificate, issuerObj cmapi.GenericIssuer) field.ErrorList {
el := field.ErrorList{}
path := field.NewPath("spec")
switch {
case issuerObj.GetSpec().ACME != nil:
el = append(el, ValidateCertificateForACMEIssuer(&crt.Spec, issuerObj.GetSpec(), path)...)
case issuerObj.GetSpec().CA != nil:
case issuerObj.GetSpec().Vault != nil:
el = append(el, ValidateCertificateForVaultIssuer(&crt.Spec, issuerObj.GetSpec(), path)...)
case issuerObj.GetSpec().SelfSigned != nil:
case issuerObj.GetSpec().Venafi != nil:
default:
el = append(el, field.Invalid(path, "", fmt.Sprintf("no issuer specified for Issuer '%s/%s'", issuerObj.GetObjectMeta().Namespace, issuerObj.GetObjectMeta().Name)))
}
return el
}
func ValidateCertificateForACMEIssuer(crt *cmapi.CertificateSpec, issuer *cmapi.IssuerSpec, specPath *field.Path) field.ErrorList {
el := field.ErrorList{}
if crt.IsCA {
el = append(el, field.Invalid(specPath.Child("isCA"), crt.KeyAlgorithm, "ACME does not support CA certificates"))
}
if crt.Subject != nil && len(crt.Subject.Organizations) != 0 {
el = append(el, field.Invalid(specPath.Child("subject", "organizations"), crt.Subject.Organizations, "ACME does not support setting the organization name"))
}
if crt.Duration != nil {
el = append(el, field.Invalid(specPath.Child("duration"), crt.Duration, "ACME does not support certificate durations"))
}
if len(crt.IPAddresses) != 0 {
el = append(el, field.Invalid(specPath.Child("ipAddresses"), crt.IPAddresses, "ACME does not support certificate ip addresses"))
}
return el
}
func ValidateCertificateForVaultIssuer(crt *cmapi.CertificateSpec, issuer *cmapi.IssuerSpec, specPath *field.Path) field.ErrorList {
el := field.ErrorList{}
if crt.IsCA {
el = append(el, field.Invalid(specPath.Child("isCA"), crt.KeyAlgorithm, "Vault issuer does not currently support CA certificates"))
}
if crt.Subject != nil && len(crt.Subject.Organizations) != 0 {
el = append(el, field.Invalid(specPath.Child("subject", "organizations"), crt.Subject.Organizations, "Vault issuer does not currently support setting the organization name"))
}
return el
}
| 1 | 22,373 | Oh, was that a bug in the validation? | jetstack-cert-manager | go |
@@ -134,6 +134,10 @@ public class OnlineFeedViewActivity extends AppCompatActivity {
} else {
Log.d(TAG, "Activity was started with url " + feedUrl);
setLoadingLayout();
+ //Remove subscribeonandroid.com from feed URL in order to subscribe to the actual feed URL
+ if(feedUrl.contains("subscribeonandroid.com")){
+ feedUrl = feedUrl.replaceFirst("((www.)?(subscribeonandroid.com/))","");
+ }
if (savedInstanceState == null) {
startFeedDownload(feedUrl, null, null);
} else { | 1 | package de.danoeh.antennapod.activity;
import android.app.Dialog;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.graphics.LightingColorFilter;
import android.os.Build;
import android.os.Bundle;
import androidx.annotation.NonNull;
import androidx.annotation.UiThread;
import androidx.core.app.NavUtils;
import androidx.appcompat.app.AlertDialog;
import androidx.appcompat.app.AppCompatActivity;
import android.text.TextUtils;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.MenuItem;
import android.view.View;
import android.widget.AdapterView;
import android.widget.ArrayAdapter;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.ListView;
import android.widget.ProgressBar;
import android.widget.Spinner;
import android.widget.TextView;
import com.bumptech.glide.Glide;
import com.bumptech.glide.request.RequestOptions;
import de.danoeh.antennapod.core.event.FeedListUpdateEvent;
import org.apache.commons.lang3.StringUtils;
import org.greenrobot.eventbus.EventBus;
import org.greenrobot.eventbus.Subscribe;
import org.greenrobot.eventbus.ThreadMode;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.adapter.FeedItemlistDescriptionAdapter;
import de.danoeh.antennapod.core.dialog.DownloadRequestErrorDialogCreator;
import de.danoeh.antennapod.core.event.DownloadEvent;
import de.danoeh.antennapod.core.feed.Feed;
import de.danoeh.antennapod.core.feed.FeedItem;
import de.danoeh.antennapod.core.feed.FeedPreferences;
import de.danoeh.antennapod.core.glide.ApGlideSettings;
import de.danoeh.antennapod.core.glide.FastBlurTransformation;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.service.download.DownloadRequest;
import de.danoeh.antennapod.core.service.download.DownloadStatus;
import de.danoeh.antennapod.core.service.download.Downloader;
import de.danoeh.antennapod.core.service.download.HttpDownloader;
import de.danoeh.antennapod.core.storage.DBReader;
import de.danoeh.antennapod.core.storage.DownloadRequestException;
import de.danoeh.antennapod.core.storage.DownloadRequester;
import de.danoeh.antennapod.core.syndication.handler.FeedHandler;
import de.danoeh.antennapod.core.syndication.handler.FeedHandlerResult;
import de.danoeh.antennapod.core.syndication.handler.UnsupportedFeedtypeException;
import de.danoeh.antennapod.core.util.DownloadError;
import de.danoeh.antennapod.core.util.FileNameGenerator;
import de.danoeh.antennapod.core.util.Optional;
import de.danoeh.antennapod.core.util.StorageUtils;
import de.danoeh.antennapod.core.util.URLChecker;
import de.danoeh.antennapod.core.util.syndication.FeedDiscoverer;
import de.danoeh.antennapod.core.util.syndication.HtmlToPlainText;
import de.danoeh.antennapod.dialog.AuthenticationDialog;
import io.reactivex.Observable;
import io.reactivex.android.schedulers.AndroidSchedulers;
import io.reactivex.disposables.Disposable;
import io.reactivex.schedulers.Schedulers;
/**
* Downloads a feed from a feed URL and parses it. Subclasses can display the
* feed object that was parsed. This activity MUST be started with a given URL
* or an Exception will be thrown.
* <p/>
* If the feed cannot be downloaded or parsed, an error dialog will be displayed
* and the activity will finish as soon as the error dialog is closed.
*/
public class OnlineFeedViewActivity extends AppCompatActivity {
public static final String ARG_FEEDURL = "arg.feedurl";
// Optional argument: specify a title for the actionbar.
public static final String ARG_TITLE = "title";
private static final int RESULT_ERROR = 2;
private static final String TAG = "OnlineFeedViewActivity";
private volatile List<Feed> feeds;
private Feed feed;
private String selectedDownloadUrl;
private Downloader downloader;
private boolean isPaused;
private boolean didPressSubscribe = false;
private Dialog dialog;
private ListView listView;
private Button subscribeButton;
private ProgressBar progressBar;
private Disposable download;
private Disposable parser;
private Disposable updater;
@Override
protected void onCreate(Bundle savedInstanceState) {
setTheme(UserPreferences.getTranslucentTheme());
super.onCreate(savedInstanceState);
StorageUtils.checkStorageAvailability(this);
setContentView(R.layout.onlinefeedview_activity);
listView = findViewById(R.id.listview);
progressBar = findViewById(R.id.progressBar);
String feedUrl = null;
if (getIntent().hasExtra(ARG_FEEDURL)) {
feedUrl = getIntent().getStringExtra(ARG_FEEDURL);
} else if (TextUtils.equals(getIntent().getAction(), Intent.ACTION_SEND)
|| TextUtils.equals(getIntent().getAction(), Intent.ACTION_VIEW)) {
feedUrl = TextUtils.equals(getIntent().getAction(), Intent.ACTION_SEND)
? getIntent().getStringExtra(Intent.EXTRA_TEXT) : getIntent().getDataString();
}
if (feedUrl == null) {
Log.e(TAG, "feedUrl is null.");
new AlertDialog.Builder(OnlineFeedViewActivity.this).
setNeutralButton(android.R.string.ok,
(dialog, which) -> finish()).
setTitle(R.string.error_label).
setMessage(R.string.null_value_podcast_error).create().show();
} else {
Log.d(TAG, "Activity was started with url " + feedUrl);
setLoadingLayout();
if (savedInstanceState == null) {
startFeedDownload(feedUrl, null, null);
} else {
startFeedDownload(feedUrl, savedInstanceState.getString("username"), savedInstanceState.getString("password"));
}
}
}
/**
* Displays a progress indicator.
*/
private void setLoadingLayout() {
progressBar.setVisibility(View.VISIBLE);
findViewById(R.id.feedDisplay).setVisibility(View.GONE);
}
@Override
protected void onStart() {
super.onStart();
isPaused = false;
EventBus.getDefault().register(this);
}
@Override
protected void onStop() {
super.onStop();
isPaused = true;
EventBus.getDefault().unregister(this);
if (downloader != null && !downloader.isFinished()) {
downloader.cancel();
}
if(dialog != null && dialog.isShowing()) {
dialog.dismiss();
}
}
@Override
public void onDestroy() {
super.onDestroy();
if(updater != null) {
updater.dispose();
}
if(download != null) {
download.dispose();
}
if(parser != null) {
parser.dispose();
}
}
@Override
protected void onSaveInstanceState(Bundle outState) {
super.onSaveInstanceState(outState);
if (feed != null && feed.getPreferences() != null) {
outState.putString("username", feed.getPreferences().getUsername());
outState.putString("password", feed.getPreferences().getPassword());
}
}
private void resetIntent(String url, String title) {
Intent intent = new Intent();
intent.putExtra(ARG_FEEDURL, url);
intent.putExtra(ARG_TITLE, title);
setIntent(intent);
}
@Override
public void finish() {
super.finish();
overridePendingTransition(android.R.anim.fade_in, android.R.anim.fade_out);
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
case android.R.id.home:
Intent destIntent = new Intent(this, MainActivity.class);
if (NavUtils.shouldUpRecreateTask(this, destIntent)) {
startActivity(destIntent);
} else {
NavUtils.navigateUpFromSameTask(this);
}
return true;
}
return super.onOptionsItemSelected(item);
}
private void startFeedDownload(String url, String username, String password) {
Log.d(TAG, "Starting feed download");
url = URLChecker.prepareURL(url);
feed = new Feed(url, null);
if (username != null && password != null) {
feed.setPreferences(new FeedPreferences(0, false, FeedPreferences.AutoDeleteAction.GLOBAL, username, password));
}
String fileUrl = new File(getExternalCacheDir(),
FileNameGenerator.generateFileName(feed.getDownload_url())).toString();
feed.setFile_url(fileUrl);
final DownloadRequest request = new DownloadRequest(feed.getFile_url(),
feed.getDownload_url(), "OnlineFeed", 0, Feed.FEEDFILETYPE_FEED, username, password,
true, null);
download = Observable.fromCallable(() -> {
feeds = DBReader.getFeedList();
downloader = new HttpDownloader(request);
downloader.call();
return downloader.getResult();
})
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(this::checkDownloadResult,
error -> Log.e(TAG, Log.getStackTraceString(error)));
}
private void checkDownloadResult(@NonNull DownloadStatus status) {
if (status.isCancelled()) {
return;
}
if (status.isSuccessful()) {
parseFeed();
} else if (status.getReason() == DownloadError.ERROR_UNAUTHORIZED) {
if (!isFinishing() && !isPaused) {
dialog = new FeedViewAuthenticationDialog(OnlineFeedViewActivity.this,
R.string.authentication_notification_title,
downloader.getDownloadRequest().getSource()).create();
dialog.show();
}
} else {
String errorMsg = status.getReason().getErrorString(OnlineFeedViewActivity.this);
if (status.getReasonDetailed() != null) {
errorMsg += " (" + status.getReasonDetailed() + ")";
}
showErrorDialog(errorMsg);
}
}
@Subscribe
public void onFeedListChanged(FeedListUpdateEvent event) {
updater = Observable.fromCallable(DBReader::getFeedList)
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(
feeds -> {
OnlineFeedViewActivity.this.feeds = feeds;
handleUpdatedFeedStatus(feed);
}, error -> Log.e(TAG, Log.getStackTraceString(error))
);
}
@Subscribe(threadMode = ThreadMode.MAIN)
public void onEventMainThread(DownloadEvent event) {
Log.d(TAG, "onEventMainThread() called with: " + "event = [" + event + "]");
handleUpdatedFeedStatus(feed);
}
private void parseFeed() {
if (feed == null || (feed.getFile_url() == null && feed.isDownloaded())) {
throw new IllegalStateException("feed must be non-null and downloaded when parseFeed is called");
}
Log.d(TAG, "Parsing feed");
parser = Observable.fromCallable(this::doParseFeed)
.subscribeOn(Schedulers.computation())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(optionalResult -> {
if(optionalResult.isPresent()) {
FeedHandlerResult result = optionalResult.get();
beforeShowFeedInformation(result.feed);
showFeedInformation(result.feed, result.alternateFeedUrls);
}
}, error -> {
String errorMsg = DownloadError.ERROR_PARSER_EXCEPTION.getErrorString(
OnlineFeedViewActivity.this) + " (" + error.getMessage() + ")";
showErrorDialog(errorMsg);
Log.d(TAG, "Feed parser exception: " + Log.getStackTraceString(error));
});
}
@NonNull
private Optional<FeedHandlerResult> doParseFeed() throws Exception {
FeedHandler handler = new FeedHandler();
try {
return Optional.of(handler.parseFeed(feed));
} catch (UnsupportedFeedtypeException e) {
Log.d(TAG, "Unsupported feed type detected");
if ("html".equalsIgnoreCase(e.getRootElement())) {
boolean dialogShown = showFeedDiscoveryDialog(new File(feed.getFile_url()), feed.getDownload_url());
if (dialogShown) {
return Optional.empty();
} else {
Log.d(TAG, "Supplied feed is an HTML web page that has no references to any feed");
throw e;
}
} else {
throw e;
}
} catch (Exception e) {
Log.e(TAG, Log.getStackTraceString(e));
throw e;
} finally {
boolean rc = new File(feed.getFile_url()).delete();
Log.d(TAG, "Deleted feed source file. Result: " + rc);
}
}
/**
* Called after the feed has been downloaded and parsed and before showFeedInformation is called.
* This method is executed on a background thread
*/
private void beforeShowFeedInformation(Feed feed) {
Log.d(TAG, "Removing HTML from feed description");
feed.setDescription(HtmlToPlainText.getPlainText(feed.getDescription()));
Log.d(TAG, "Removing HTML from shownotes");
if (feed.getItems() != null) {
for (FeedItem item : feed.getItems()) {
item.setDescription(HtmlToPlainText.getPlainText(item.getDescription()));
}
}
}
/**
* Called when feed parsed successfully.
* This method is executed on the GUI thread.
*/
private void showFeedInformation(final Feed feed, Map<String, String> alternateFeedUrls) {
progressBar.setVisibility(View.GONE);
findViewById(R.id.feedDisplay).setVisibility(View.VISIBLE);
this.feed = feed;
this.selectedDownloadUrl = feed.getDownload_url();
listView.setSelector(android.R.color.transparent);
listView.setAdapter(new FeedItemlistDescriptionAdapter(this, 0, feed.getItems()));
ImageView cover = findViewById(R.id.imgvCover);
ImageView headerBackground = findViewById(R.id.imgvBackground);
findViewById(R.id.butShowInfo).setVisibility(View.INVISIBLE);
findViewById(R.id.butShowSettings).setVisibility(View.INVISIBLE);
headerBackground.setColorFilter(new LightingColorFilter(0xff828282, 0x000000));
TextView title = findViewById(R.id.txtvTitle);
TextView author = findViewById(R.id.txtvAuthor);
Spinner spAlternateUrls = findViewById(R.id.spinnerAlternateUrls);
View header = View.inflate(this, R.layout.onlinefeedview_header, null);
listView.addHeaderView(header);
TextView description = header.findViewById(R.id.txtvDescription);
subscribeButton = findViewById(R.id.butSubscribe);
if (StringUtils.isNotBlank(feed.getImageUrl())) {
Glide.with(this)
.load(feed.getImageUrl())
.apply(new RequestOptions()
.placeholder(R.color.light_gray)
.error(R.color.light_gray)
.diskCacheStrategy(ApGlideSettings.AP_DISK_CACHE_STRATEGY)
.fitCenter()
.dontAnimate())
.into(cover);
Glide.with(this)
.load(feed.getImageUrl())
.apply(new RequestOptions()
.placeholder(R.color.image_readability_tint)
.error(R.color.image_readability_tint)
.diskCacheStrategy(ApGlideSettings.AP_DISK_CACHE_STRATEGY)
.transform(new FastBlurTransformation())
.dontAnimate())
.into(headerBackground);
}
title.setText(feed.getTitle());
author.setText(feed.getAuthor());
description.setText(feed.getDescription());
subscribeButton.setOnClickListener(v -> {
if (feedInFeedlist(feed)) {
openFeed();
} else {
Feed f = new Feed(selectedDownloadUrl, null, feed.getTitle());
f.setPreferences(feed.getPreferences());
this.feed = f;
try {
DownloadRequester.getInstance().downloadFeed(this, f);
} catch (DownloadRequestException e) {
Log.e(TAG, Log.getStackTraceString(e));
DownloadRequestErrorDialogCreator.newRequestErrorDialog(this, e.getMessage());
}
didPressSubscribe = true;
handleUpdatedFeedStatus(feed);
}
});
final int MAX_LINES_COLLAPSED = 10;
description.setMaxLines(MAX_LINES_COLLAPSED);
description.setOnClickListener(v -> {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN
&& description.getMaxLines() > MAX_LINES_COLLAPSED) {
description.setMaxLines(MAX_LINES_COLLAPSED);
} else {
description.setMaxLines(2000);
}
});
if (alternateFeedUrls.isEmpty()) {
spAlternateUrls.setVisibility(View.GONE);
} else {
spAlternateUrls.setVisibility(View.VISIBLE);
final List<String> alternateUrlsList = new ArrayList<>();
final List<String> alternateUrlsTitleList = new ArrayList<>();
alternateUrlsList.add(feed.getDownload_url());
alternateUrlsTitleList.add(feed.getTitle());
alternateUrlsList.addAll(alternateFeedUrls.keySet());
for (String url : alternateFeedUrls.keySet()) {
alternateUrlsTitleList.add(alternateFeedUrls.get(url));
}
ArrayAdapter<String> adapter = new ArrayAdapter<>(this, android.R.layout.simple_spinner_item, alternateUrlsTitleList);
adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
spAlternateUrls.setAdapter(adapter);
spAlternateUrls.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() {
@Override
public void onItemSelected(AdapterView<?> parent, View view, int position, long id) {
selectedDownloadUrl = alternateUrlsList.get(position);
}
@Override
public void onNothingSelected(AdapterView<?> parent) {
}
});
}
handleUpdatedFeedStatus(feed);
}
private void openFeed() {
// feed.getId() is always 0, we have to retrieve the id from the feed list from
// the database
Intent intent = MainActivity.getIntentToOpenFeed(this, getFeedId(feed));
finish();
startActivity(intent);
}
private void handleUpdatedFeedStatus(Feed feed) {
if (subscribeButton != null && feed != null) {
if (DownloadRequester.getInstance().isDownloadingFile(feed.getDownload_url())) {
subscribeButton.setEnabled(false);
subscribeButton.setText(R.string.subscribing_label);
} else if (feedInFeedlist(feed)) {
subscribeButton.setEnabled(true);
subscribeButton.setText(R.string.open_podcast);
if (didPressSubscribe) {
openFeed();
}
} else {
subscribeButton.setEnabled(true);
subscribeButton.setText(R.string.subscribe_label);
}
}
}
private boolean feedInFeedlist(Feed feed) {
if (feeds == null || feed == null) {
return false;
}
for (Feed f : feeds) {
if (f.getIdentifyingValue().equals(feed.getIdentifyingValue())) {
return true;
}
}
return false;
}
private long getFeedId(Feed feed) {
if (feeds == null || feed == null) {
return 0;
}
for (Feed f : feeds) {
if (f.getIdentifyingValue().equals(feed.getIdentifyingValue())) {
return f.getId();
}
}
return 0;
}
@UiThread
private void showErrorDialog(String errorMsg) {
if (!isFinishing() && !isPaused) {
AlertDialog.Builder builder = new AlertDialog.Builder(this);
builder.setTitle(R.string.error_label);
if (errorMsg != null) {
builder.setMessage(getString(R.string.error_msg_prefix) + errorMsg);
} else {
builder.setMessage(R.string.error_msg_prefix);
}
builder.setNeutralButton(android.R.string.ok,
(dialog, which) -> dialog.cancel()
);
builder.setOnCancelListener(dialog -> {
setResult(RESULT_ERROR);
finish();
});
if(dialog != null && dialog.isShowing()) {
dialog.dismiss();
}
dialog = builder.show();
}
}
/**
*
* @return true if a FeedDiscoveryDialog is shown, false otherwise (e.g., due to no feed found).
*/
private boolean showFeedDiscoveryDialog(File feedFile, String baseUrl) {
FeedDiscoverer fd = new FeedDiscoverer();
final Map<String, String> urlsMap;
try {
urlsMap = fd.findLinks(feedFile, baseUrl);
if (urlsMap == null || urlsMap.isEmpty()) {
return false;
}
} catch (IOException e) {
e.printStackTrace();
return false;
}
if (isPaused || isFinishing()) {
return false;
}
final List<String> titles = new ArrayList<>();
final List<String> urls = new ArrayList<>();
urls.addAll(urlsMap.keySet());
for (String url : urls) {
titles.add(urlsMap.get(url));
}
final ArrayAdapter<String> adapter = new ArrayAdapter<>(OnlineFeedViewActivity.this, R.layout.ellipsize_start_listitem, R.id.txtvTitle, titles);
DialogInterface.OnClickListener onClickListener = (dialog, which) -> {
String selectedUrl = urls.get(which);
dialog.dismiss();
resetIntent(selectedUrl, titles.get(which));
FeedPreferences prefs = feed.getPreferences();
if(prefs != null) {
startFeedDownload(selectedUrl, prefs.getUsername(), prefs.getPassword());
} else {
startFeedDownload(selectedUrl, null, null);
}
};
AlertDialog.Builder ab = new AlertDialog.Builder(OnlineFeedViewActivity.this)
.setTitle(R.string.feeds_label)
.setCancelable(true)
.setOnCancelListener(dialog -> finish())
.setAdapter(adapter, onClickListener);
runOnUiThread(() -> {
if(dialog != null && dialog.isShowing()) {
dialog.dismiss();
}
dialog = ab.show();
});
return true;
}
private class FeedViewAuthenticationDialog extends AuthenticationDialog {
private final String feedUrl;
FeedViewAuthenticationDialog(Context context, int titleRes, String feedUrl) {
super(context, titleRes, true, false, null, null);
this.feedUrl = feedUrl;
}
@Override
protected void onCancelled() {
super.onCancelled();
finish();
}
@Override
protected void onConfirmed(String username, String password, boolean saveUsernamePassword) {
startFeedDownload(feedUrl, username, password);
}
}
}
| 1 | 15,576 | Please use the Google java code style. Basically, add more space characters like in the statements below. Next to method arguments and curly braces. This is currently not checked on CI because it is too inconsistent in the code base but I would prefer new code to be consistent. | AntennaPod-AntennaPod | java |
@@ -145,10 +145,12 @@ TEST(UpdateEdgeTest, Set_Filter_Yield_Test) {
decltype(req.return_columns) tmpColumns;
tmpColumns.emplace_back(Expression::encode(val1));
tmpColumns.emplace_back(Expression::encode(val2));
- std::string name = folly::to<std::string>(3002);
- std::string prop = "tag_3002_col_2";
- auto* sourcePropExp = new SourcePropertyExpression(&name, &prop);
+ delete val1;
+ delete val2;
+ auto* sourcePropExp = new SourcePropertyExpression(
+ new std::string(folly::to<std::string>(3002)), new std::string("tag_3002_col_2"));
tmpColumns.emplace_back(Expression::encode(sourcePropExp));
+ delete sourcePropExp;
req.set_return_columns(std::move(tmpColumns));
req.set_insertable(false); | 1 | /* Copyright (c) 2019 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "base/NebulaKeyUtils.h"
#include <gtest/gtest.h>
#include <rocksdb/db.h>
#include <limits>
#include "fs/TempDir.h"
#include "storage/test/TestUtils.h"
#include "storage/UpdateEdgeProcessor.h"
#include "dataman/RowSetReader.h"
#include "dataman/RowReader.h"
namespace nebula {
namespace storage {
void mockData(kvstore::KVStore* kv) {
LOG(INFO) << "Prepare data...";
std::vector<kvstore::KV> data;
for (auto partId = 0; partId < 3; partId++) {
for (auto vertexId = partId * 10; vertexId < (partId + 1) * 10; vertexId++) {
for (auto tagId = 3001; tagId < 3010; tagId++) {
// Write multi versions, we should get/update the latest version
for (auto version = 0; version < 3; version++) {
auto key = NebulaKeyUtils::vertexKey(partId, vertexId, tagId,
std::numeric_limits<int32_t>::max() - version);
RowWriter writer;
for (int64_t numInt = 0; numInt < 3; numInt++) {
writer << partId + tagId + version + numInt;
}
for (auto numString = 3; numString < 6; numString++) {
writer << folly::stringPrintf("tag_string_col_%d_%d", numString, version);
}
auto val = writer.encode();
data.emplace_back(std::move(key), std::move(val));
}
}
// Generate 7 out-edges for each edgeType.
for (auto dstId = 10001; dstId <= 10007; dstId++) {
VLOG(3) << "Write part " << partId << ", vertex " << vertexId << ", dst " << dstId;
// Write multi versions, we should get the latest version.
for (auto version = 0; version < 3; version++) {
auto key = NebulaKeyUtils::edgeKey(partId, vertexId, 101, 0, dstId,
std::numeric_limits<int>::max() - version);
RowWriter writer(nullptr);
for (uint64_t numInt = 0; numInt < 10; numInt++) {
writer << (dstId + numInt);
}
for (auto numString = 10; numString < 20; numString++) {
writer << folly::stringPrintf("string_col_%d_%d", numString, version);
}
auto val = writer.encode();
data.emplace_back(std::move(key), std::move(val));
}
}
// Generate 5 in-edges for each edgeType, the edgeType is negative
for (auto srcId = 10001; srcId <= 10005; srcId++) {
VLOG(3) << "Write part " << partId << ", vertex " << vertexId << ", src " << srcId;
for (auto version = 0; version < 3; version++) {
auto key = NebulaKeyUtils::edgeKey(partId, vertexId, -101, 0, srcId,
std::numeric_limits<int>::max() - version);
data.emplace_back(std::move(key), "");
}
}
}
folly::Baton<true, std::atomic> baton;
kv->asyncMultiPut(0, partId, std::move(data),
[&](kvstore::ResultCode code) {
CHECK_EQ(code, kvstore::ResultCode::SUCCEEDED);
baton.post();
});
baton.wait();
}
}
TEST(UpdateEdgeTest, Set_Filter_Yield_Test) {
fs::TempDir rootPath("/tmp/UpdateEdgeTest.XXXXXX");
std::unique_ptr<kvstore::KVStore> kv = TestUtils::initKV(rootPath.path());
LOG(INFO) << "Prepare meta...";
auto schemaMan = TestUtils::mockSchemaMan();
mockData(kv.get());
LOG(INFO) << "Build UpdateEdgeRequest...";
GraphSpaceID spaceId = 0;
PartitionID partId = 0;
VertexID srcId = 1;
VertexID dstId = 10001;
// src = 1, edge_type = 101, ranking = 0, dst = 10001
storage::cpp2::EdgeKey edgeKey;
edgeKey.set_src(srcId);
edgeKey.set_edge_type(101);
edgeKey.set_ranking(0);
edgeKey.set_dst(dstId);
cpp2::UpdateEdgeRequest req;
req.set_space_id(spaceId);
req.set_edge_key(edgeKey);
req.set_part_id(partId);
LOG(INFO) << "Build filter...";
// left int: $^.3001.tag_3001_col_0 >= 0
auto* tag1 = new std::string("3001");
auto* prop1 = new std::string("tag_3001_col_0");
auto* srcExp1 = new SourcePropertyExpression(tag1, prop1);
auto* priExp1 = new PrimaryExpression(0L);
auto* left = new RelationalExpression(srcExp1,
RelationalExpression::Operator::GE,
priExp1);
// right string: $^.3003.tag_3003_col_3 == tag_string_col_3_2;
auto* tag2 = new std::string("3003");
auto* prop2 = new std::string("tag_3003_col_3");
auto* srcExp2 = new SourcePropertyExpression(tag2, prop2);
std::string col3("tag_string_col_3_2");
auto* priExp2 = new PrimaryExpression(col3);
auto* right = new RelationalExpression(srcExp2,
RelationalExpression::Operator::EQ,
priExp2);
// left AND right is ture
auto logExp = std::make_unique<LogicalExpression>(left, LogicalExpression::AND, right);
req.set_filter(Expression::encode(logExp.get()));
LOG(INFO) << "Build update items...";
std::vector<cpp2::UpdateItem> items;
// int: 101.col_0 = 101.col_2 = 10001 + 2 = 10003
cpp2::UpdateItem item1;
item1.set_name("101");
item1.set_prop("col_0");
auto* edge101 = new std::string("101");
auto* propCol2 = new std::string("col_2");
auto* val1 = new AliasPropertyExpression(new std::string(""), edge101, propCol2);
item1.set_value(Expression::encode(val1));
items.emplace_back(item1);
// string: 101.col_10 = string_col_10_2_new
cpp2::UpdateItem item2;
item2.set_name("101");
item2.set_prop("col_10");
std::string col10new("string_col_10_2_new");
auto* val2 = new PrimaryExpression(col10new);
item2.set_value(Expression::encode(val2));
items.emplace_back(item2);
req.set_update_items(std::move(items));
decltype(req.return_columns) tmpColumns;
tmpColumns.emplace_back(Expression::encode(val1));
tmpColumns.emplace_back(Expression::encode(val2));
std::string name = folly::to<std::string>(3002);
std::string prop = "tag_3002_col_2";
auto* sourcePropExp = new SourcePropertyExpression(&name, &prop);
tmpColumns.emplace_back(Expression::encode(sourcePropExp));
req.set_return_columns(std::move(tmpColumns));
req.set_insertable(false);
LOG(INFO) << "Test UpdateEdgeRequest...";
auto* processor = UpdateEdgeProcessor::instance(kv.get(), schemaMan.get());
auto f = processor->getFuture();
processor->process(req);
auto resp = std::move(f).get();
LOG(INFO) << "Check the results...";
EXPECT_EQ(0, resp.result.failed_codes.size());
EXPECT_FALSE(resp.get_upsert());
ASSERT_TRUE(resp.__isset.schema);
auto provider = std::make_shared<ResultSchemaProvider>(resp.schema);
auto reader = RowReader::getRowReader(resp.data, provider);
EXPECT_EQ(3, reader->numFields());
for (int i = 0; i < 3; i++) {
auto res = RowReader::getPropByIndex(reader.get(), i);
if (ok(res)) {
switch (i) {
case 0: {
auto&& v0 = value(std::move(res));
EXPECT_EQ(10003, boost::get<int64_t>(v0));
break;
}
case 1: {
auto&& v1 = value(std::move(res));
EXPECT_STREQ("string_col_10_2_new", boost::get<std::string>(v1).c_str());
break;
}
case 2: {
auto&& v2 = value(std::move(res));
EXPECT_EQ(3006, boost::get<int64_t>(v2));
break;
}
default:
break;
}
}
}
// check the kvstore
std::vector<std::string> keys;
std::vector<std::string> values;
auto lastVersion = std::numeric_limits<int32_t>::max() - 2;
auto kvstoreEdgeKey = NebulaKeyUtils::edgeKey(partId, srcId, 101, 0, dstId, lastVersion);
keys.emplace_back(kvstoreEdgeKey);
kvstore::ResultCode code = kv->multiGet(spaceId, partId, std::move(keys), &values);
CHECK_EQ(code, kvstore::ResultCode::SUCCEEDED);
EXPECT_EQ(1, values.size());
auto edgeSchema = schemaMan->getEdgeSchema(spaceId, 101);
auto edgeReader = RowReader::getRowReader(values[0], edgeSchema);
auto res = RowReader::getPropByName(edgeReader.get(), "col_0");
CHECK(ok(res));
auto&& v1 = value(std::move(res));
EXPECT_EQ(10003, boost::get<int64_t>(v1));
res = RowReader::getPropByName(edgeReader.get(), "col_10");
CHECK(ok(res));
auto&& v2 = value(std::move(res));
EXPECT_STREQ("string_col_10_2_new", boost::get<std::string>(v2).c_str());
}
TEST(UpdateEdgeTest, Insertable_Test) {
fs::TempDir rootPath("/tmp/UpdateEdgeTest.XXXXXX");
std::unique_ptr<kvstore::KVStore> kv = TestUtils::initKV(rootPath.path());
LOG(INFO) << "Prepare meta...";
auto schemaMan = TestUtils::mockSchemaMan();
mockData(kv.get());
LOG(INFO) << "Build UpdateEdgeRequest...";
GraphSpaceID spaceId = 0;
PartitionID partId = 0;
VertexID srcId = 1;
VertexID dstId = 10008;
// src = 1, edge_type = 101, ranking = 0, dst = 10008
storage::cpp2::EdgeKey edgeKey;
edgeKey.set_src(srcId);
edgeKey.set_edge_type(101);
edgeKey.set_ranking(0);
edgeKey.set_dst(dstId);
cpp2::UpdateEdgeRequest req;
req.set_space_id(spaceId);
req.set_edge_key(edgeKey);
req.set_part_id(partId);
req.set_filter("");
LOG(INFO) << "Build update items...";
std::vector<cpp2::UpdateItem> items;
// int: 101.col_0 = $^.3002.tag_3002_col_2
cpp2::UpdateItem item1;
item1.set_name("101");
item1.set_prop("col_0");
auto* tag3002 = new std::string("3002");
auto* propCol2 = new std::string("tag_3002_col_2");
auto* val1 = new SourcePropertyExpression(tag3002, propCol2);
item1.set_value(Expression::encode(val1));
items.emplace_back(item1);
// string: 101.col_10 = string_col_10_2_new
cpp2::UpdateItem item2;
item2.set_name("101");
item2.set_prop("col_10");
std::string col10new("string_col_10_2_new");
auto* val2 = new PrimaryExpression(col10new);
item2.set_value(Expression::encode(val2));
items.emplace_back(item2);
req.set_update_items(std::move(items));
decltype(req.return_columns) tmpColumns;
auto* noRef = new std::string("");
auto* edge101 = new std::string("101");
auto* edgePropExp = new AliasPropertyExpression(noRef, edge101, new std::string("col_0"));
tmpColumns.emplace_back(Expression::encode(edgePropExp));
edgePropExp = new AliasPropertyExpression(noRef, edge101, new std::string("col_1"));
tmpColumns.emplace_back(Expression::encode(edgePropExp));
edgePropExp = new AliasPropertyExpression(noRef, edge101, new std::string("col_10"));
tmpColumns.emplace_back(Expression::encode(edgePropExp));
edgePropExp = new AliasPropertyExpression(noRef, edge101, new std::string("col_11"));
tmpColumns.emplace_back(Expression::encode(edgePropExp));
tmpColumns.emplace_back(Expression::encode(val1));
req.set_return_columns(std::move(tmpColumns));
req.set_insertable(true);
LOG(INFO) << "Test UpdateEdgeRequest...";
auto* processor = UpdateEdgeProcessor::instance(kv.get(), schemaMan.get());
auto f = processor->getFuture();
processor->process(req);
auto resp = std::move(f).get();
LOG(INFO) << "Check the results...";
EXPECT_EQ(0, resp.result.failed_codes.size());
EXPECT_TRUE(resp.get_upsert());
ASSERT_TRUE(resp.__isset.schema);
auto provider = std::make_shared<ResultSchemaProvider>(resp.schema);
auto reader = RowReader::getRowReader(resp.data, provider);
EXPECT_EQ(5, reader->numFields());
for (int i = 0; i < 5; i++) {
auto res = RowReader::getPropByIndex(reader.get(), i);
if (ok(res)) {
switch (i) {
case 0: {
auto&& v0 = value(std::move(res));
EXPECT_EQ(3006, boost::get<int64_t>(v0));
break;
}
case 1: {
auto&& v1 = value(std::move(res));
EXPECT_EQ(0, boost::get<int64_t>(v1));
break;
}
case 2: {
auto&& v2 = value(std::move(res));
EXPECT_STREQ("string_col_10_2_new", boost::get<std::string>(v2).c_str());
break;
}
case 3: {
auto&& v3 = value(std::move(res));
EXPECT_STREQ("", boost::get<std::string>(v3).c_str());
break;
}
case 4: {
auto&& v4 = value(std::move(res));
EXPECT_EQ(3006, boost::get<int64_t>(v4));
break;
}
default:
break;
}
}
}
// get inserted edge from kvstore directly
auto prefix = NebulaKeyUtils::prefix(partId, srcId, 101, 0, dstId);
std::unique_ptr<kvstore::KVIterator> iter;
auto ret = kv->prefix(spaceId, partId, prefix, &iter);
EXPECT_EQ(kvstore::ResultCode::SUCCEEDED, ret);
EXPECT_TRUE(iter && iter->valid());
reader = RowReader::getEdgePropReader(schemaMan.get(), iter->val(), spaceId, 101);
auto res = RowReader::getPropByName(reader.get(), "col_0");
EXPECT_TRUE(ok(res));
auto&& v0 = value(std::move(res));
EXPECT_EQ(3006L, boost::get<int64_t>(v0));
res = RowReader::getPropByName(reader.get(), "col_1");
EXPECT_TRUE(ok(res));
auto&& v1 = value(std::move(res));
EXPECT_EQ(0L, boost::get<int64_t>(v1));
res = RowReader::getPropByName(reader.get(), "col_10");
EXPECT_TRUE(ok(res));
auto&& v2 = value(std::move(res));
EXPECT_STREQ("string_col_10_2_new", boost::get<std::string>(v2).c_str());
res = RowReader::getPropByName(reader.get(), "col_11");
EXPECT_TRUE(ok(res));
auto&& v3 = value(std::move(res));
EXPECT_STREQ("", boost::get<std::string>(v3).c_str());
}
} // namespace storage
} // namespace nebula
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
folly::init(&argc, &argv, true);
google::SetStderrLogging(google::INFO);
return RUN_ALL_TESTS();
}
| 1 | 22,999 | Actually, I don't think we need to create object on heap. | vesoft-inc-nebula | cpp |
@@ -84,7 +84,7 @@ void GenerateImports(grpc_generator::File *file, grpc_generator::Printer *printe
}
printer->Print("import (\n");
printer->Indent();
- printer->Print(vars, "$context$ \"golang.org/x/net/context\"\n");
+ printer->Print(vars, "$context$ \"context\"\n");
printer->Print(vars, "$grpc$ \"google.golang.org/grpc\"\n");
printer->Outdent();
printer->Print(")\n\n"); | 1 | /*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation AN/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <map>
#include <cctype>
#include <sstream>
#include "src/compiler/go_generator.h"
template <class T>
grpc::string as_string(T x) {
std::ostringstream out;
out << x;
return out.str();
}
inline bool ClientOnlyStreaming(const grpc_generator::Method *method) {
return method->ClientStreaming() && !method->ServerStreaming();
}
inline bool ServerOnlyStreaming(const grpc_generator::Method *method) {
return !method->ClientStreaming() && method->ServerStreaming();
}
namespace grpc_go_generator {
// Returns string with first letter to lowerCase
grpc::string unexportName(grpc::string s) {
if (s.empty())
return s;
s[0] = static_cast<char>(std::tolower(s[0]));
return s;
}
// Returns string with first letter to uppercase
grpc::string exportName(grpc::string s) {
if (s.empty())
return s;
s[0] = static_cast<char>(std::toupper(s[0]));
return s;
}
// Generates imports for the service
void GenerateImports(grpc_generator::File *file, grpc_generator::Printer *printer,
std::map<grpc::string, grpc::string> vars) {
vars["filename"] = file->filename();
printer->Print("//Generated by gRPC Go plugin\n");
printer->Print("//If you make any local changes, they will be lost\n");
printer->Print(vars, "//source: $filename$\n\n");
printer->Print(vars, "package $Package$\n\n");
if (file->additional_headers() != "") {
printer->Print(file->additional_headers().c_str());
printer->Print("\n\n");
}
printer->Print("import (\n");
printer->Indent();
printer->Print(vars, "$context$ \"golang.org/x/net/context\"\n");
printer->Print(vars, "$grpc$ \"google.golang.org/grpc\"\n");
printer->Outdent();
printer->Print(")\n\n");
}
// Generates Server method signature source
void GenerateServerMethodSignature(const grpc_generator::Method *method, grpc_generator::Printer *printer,
std::map<grpc::string, grpc::string> vars) {
vars["Method"] = exportName(method->name());
vars["Request"] = method->get_input_type_name();
vars["Response"] = (vars["CustomMethodIO"] == "") ? method->get_output_type_name() : vars["CustomMethodIO"];
if (method->NoStreaming()) {
printer->Print(vars, "$Method$($context$.Context, *$Request$) (*$Response$, error)");
} else if (ServerOnlyStreaming(method)) {
printer->Print(vars, "$Method$(*$Request$, $Service$_$Method$Server) error");
} else {
printer->Print(vars, "$Method$($Service$_$Method$Server) error");
}
}
void GenerateServerMethod(const grpc_generator::Method *method, grpc_generator::Printer *printer,
std::map<grpc::string, grpc::string> vars) {
vars["Method"] = exportName(method->name());
vars["Request"] = method->get_input_type_name();
vars["Response"] = (vars["CustomMethodIO"] == "") ? method->get_output_type_name() : vars["CustomMethodIO"];
vars["FullMethodName"] = "/" + vars["ServicePrefix"] + "." + vars["Service"] + "/" + vars["Method"];
vars["Handler"] = "_" + vars["Service"] + "_" + vars["Method"] + "_Handler";
if (method->NoStreaming()) {
printer->Print(vars, "func $Handler$(srv interface{}, ctx $context$.Context,\n\tdec func(interface{}) error, interceptor $grpc$.UnaryServerInterceptor) (interface{}, error) {\n");
printer->Indent();
printer->Print(vars, "in := new($Request$)\n");
printer->Print("if err := dec(in); err != nil { return nil, err }\n");
printer->Print(vars, "if interceptor == nil { return srv.($Service$Server).$Method$(ctx, in) }\n");
printer->Print(vars, "info := &$grpc$.UnaryServerInfo{\n");
printer->Indent();
printer->Print("Server: srv,\n");
printer->Print(vars, "FullMethod: \"$FullMethodName$\",\n");
printer->Outdent();
printer->Print("}\n\n");
printer->Print(vars, "handler := func(ctx $context$.Context, req interface{}) (interface{}, error) {\n");
printer->Indent();
printer->Print(vars, "return srv.($Service$Server).$Method$(ctx, req.(* $Request$))\n");
printer->Outdent();
printer->Print("}\n");
printer->Print("return interceptor(ctx, in, info, handler)\n");
printer->Outdent();
printer->Print("}\n\n");
return;
}
vars["StreamType"] = vars["ServiceUnexported"] + vars["Method"] + "Server";
printer->Print(vars, "func $Handler$(srv interface{}, stream $grpc$.ServerStream) error {\n");
printer->Indent();
if (ServerOnlyStreaming(method)) {
printer->Print(vars, "m := new($Request$)\n");
printer->Print(vars, "if err := stream.RecvMsg(m); err != nil { return err }\n");
printer->Print(vars, "return srv.($Service$Server).$Method$(m, &$StreamType${stream})\n");
} else {
printer->Print(vars, "return srv.($Service$Server).$Method$(&$StreamType${stream})\n");
}
printer->Outdent();
printer->Print("}\n\n");
bool genSend = method->BidiStreaming() || ServerOnlyStreaming(method);
bool genRecv = method->BidiStreaming() || ClientOnlyStreaming(method);
bool genSendAndClose = ClientOnlyStreaming(method);
printer->Print(vars, "type $Service$_$Method$Server interface { \n");
printer->Indent();
if (genSend) {
printer->Print(vars, "Send(* $Response$) error\n");
}
if (genRecv) {
printer->Print(vars, "Recv() (* $Request$, error)\n");
}
if (genSendAndClose) {
printer->Print(vars, "SendAndClose(* $Response$) error\n");
}
printer->Print(vars, "$grpc$.ServerStream\n");
printer->Outdent();
printer->Print("}\n\n");
printer->Print(vars, "type $StreamType$ struct {\n");
printer->Indent();
printer->Print(vars, "$grpc$.ServerStream\n");
printer->Outdent();
printer->Print("}\n\n");
if (genSend) {
printer->Print(vars, "func (x *$StreamType$) Send(m *$Response$) error {\n");
printer->Indent();
printer->Print("return x.ServerStream.SendMsg(m)\n");
printer->Outdent();
printer->Print("}\n\n");
}
if (genRecv) {
printer->Print(vars, "func (x *$StreamType$) Recv() (*$Request$, error) {\n");
printer->Indent();
printer->Print(vars, "m := new($Request$)\n");
printer->Print("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }\n");
printer->Print("return m, nil\n");
printer->Outdent();
printer->Print("}\n\n");
}
if (genSendAndClose) {
printer->Print(vars, "func (x *$StreamType$) SendAndClose(m *$Response$) error {\n");
printer->Indent();
printer->Print("return x.ServerStream.SendMsg(m)\n");
printer->Outdent();
printer->Print("}\n\n");
}
}
// Generates Client method signature source
void GenerateClientMethodSignature(const grpc_generator::Method *method, grpc_generator::Printer *printer,
std::map<grpc::string, grpc::string> vars) {
vars["Method"] = exportName(method->name());
vars["Request"] = ", in *" + ((vars["CustomMethodIO"] == "") ? method->get_input_type_name() : vars["CustomMethodIO"]);
if (ClientOnlyStreaming(method) || method->BidiStreaming()) {
vars["Request"] = "";
}
vars["Response"] = "* " + method->get_output_type_name();
if (ClientOnlyStreaming(method) || method->BidiStreaming() || ServerOnlyStreaming(method)) {
vars["Response"] = vars["Service"] + "_" + vars["Method"] + "Client" ;
}
printer->Print(vars, "$Method$(ctx $context$.Context$Request$, \n\topts... $grpc$.CallOption) ($Response$, error)");
}
// Generates Client method source
void GenerateClientMethod(const grpc_generator::Method *method, grpc_generator::Printer *printer,
std::map<grpc::string, grpc::string> vars) {
printer->Print(vars, "func (c *$ServiceUnexported$Client) ");
GenerateClientMethodSignature(method, printer, vars);
printer->Print(" {\n");
printer->Indent();
vars["Method"] = exportName(method->name());
vars["Request"] = (vars["CustomMethodIO"] == "") ? method->get_input_type_name() : vars["CustomMethodIO"];
vars["Response"] = method->get_output_type_name();
vars["FullMethodName"] = "/" + vars["ServicePrefix"] + "." + vars["Service"] + "/" + vars["Method"];
if (method->NoStreaming()) {
printer->Print(vars, "out := new($Response$)\n");
printer->Print(vars, "err := $grpc$.Invoke(ctx, \"$FullMethodName$\", in, out, c.cc, opts...)\n");
printer->Print("if err != nil { return nil, err }\n");
printer->Print("return out, nil\n");
printer->Outdent();
printer->Print("}\n\n");
return;
}
vars["StreamType"] = vars["ServiceUnexported"] + vars["Method"] + "Client";
printer->Print(vars, "stream, err := $grpc$.NewClientStream(ctx, &$MethodDesc$, c.cc, \"$FullMethodName$\", opts...)\n");
printer->Print("if err != nil { return nil, err }\n");
printer->Print(vars, "x := &$StreamType${stream}\n");
if (ServerOnlyStreaming(method)) {
printer->Print("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }\n");
printer->Print("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }\n");
}
printer->Print("return x,nil\n");
printer->Outdent();
printer->Print("}\n\n");
bool genSend = method->BidiStreaming() || ClientOnlyStreaming(method);
bool genRecv = method->BidiStreaming() || ServerOnlyStreaming(method);
bool genCloseAndRecv = ClientOnlyStreaming(method);
//Stream interface
printer->Print(vars, "type $Service$_$Method$Client interface {\n");
printer->Indent();
if (genSend) {
printer->Print(vars, "Send(*$Request$) error\n");
}
if (genRecv) {
printer->Print(vars, "Recv() (*$Response$, error)\n");
}
if (genCloseAndRecv) {
printer->Print(vars, "CloseAndRecv() (*$Response$, error)\n");
}
printer->Print(vars, "$grpc$.ClientStream\n");
printer->Outdent();
printer->Print("}\n\n");
//Stream Client
printer->Print(vars, "type $StreamType$ struct{\n");
printer->Indent();
printer->Print(vars, "$grpc$.ClientStream\n");
printer->Outdent();
printer->Print("}\n\n");
if (genSend) {
printer->Print(vars, "func (x *$StreamType$) Send(m *$Request$) error {\n");
printer->Indent();
printer->Print("return x.ClientStream.SendMsg(m)\n");
printer->Outdent();
printer->Print("}\n\n");
}
if (genRecv) {
printer->Print(vars, "func (x *$StreamType$) Recv() (*$Response$, error) {\n");
printer->Indent();
printer->Print(vars, "m := new($Response$)\n");
printer->Print("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }\n");
printer->Print("return m, nil\n");
printer->Outdent();
printer->Print("}\n\n");
}
if (genCloseAndRecv) {
printer->Print(vars, "func (x *$StreamType$) CloseAndRecv() (*$Response$, error) {\n");
printer->Indent();
printer->Print("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }\n");
printer->Print(vars, "m := new ($Response$)\n");
printer->Print("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }\n");
printer->Print("return m, nil\n");
printer->Outdent();
printer->Print("}\n\n");
}
}
// Generates client API for the service
void GenerateService(const grpc_generator::Service *service, grpc_generator::Printer* printer,
std::map<grpc::string, grpc::string> vars) {
vars["Service"] = exportName(service->name());
// Client Interface
printer->Print(vars, "// Client API for $Service$ service\n");
printer->Print(vars, "type $Service$Client interface{\n");
printer->Indent();
for (int i = 0; i < service->method_count(); i++) {
GenerateClientMethodSignature(service->method(i).get(), printer, vars);
printer->Print("\n");
}
printer->Outdent();
printer->Print("}\n\n");
// Client structure
vars["ServiceUnexported"] = unexportName(vars["Service"]);
printer->Print(vars, "type $ServiceUnexported$Client struct {\n");
printer->Indent();
printer->Print(vars, "cc *$grpc$.ClientConn\n");
printer->Outdent();
printer->Print("}\n\n");
// NewClient
printer->Print(vars, "func New$Service$Client(cc *$grpc$.ClientConn) $Service$Client {\n");
printer->Indent();
printer->Print(vars, "return &$ServiceUnexported$Client{cc}");
printer->Outdent();
printer->Print("\n}\n\n");
int unary_methods = 0, streaming_methods = 0;
vars["ServiceDesc"] = "_" + vars["Service"] + "_serviceDesc";
for (int i = 0; i < service->method_count(); i++) {
auto method = service->method(i);
if (method->NoStreaming()) {
vars["MethodDesc"] = vars["ServiceDesc"] + ".Method[" + as_string(unary_methods) + "]";
unary_methods++;
} else {
vars["MethodDesc"] = vars["ServiceDesc"] + ".Streams[" + as_string(streaming_methods) + "]";
streaming_methods++;
}
GenerateClientMethod(method.get(), printer, vars);
}
//Server Interface
printer->Print(vars, "// Server API for $Service$ service\n");
printer->Print(vars, "type $Service$Server interface {\n");
printer->Indent();
for (int i = 0; i < service->method_count(); i++) {
GenerateServerMethodSignature(service->method(i).get(), printer, vars);
printer->Print("\n");
}
printer->Outdent();
printer->Print("}\n\n");
// Server registration.
printer->Print(vars, "func Register$Service$Server(s *$grpc$.Server, srv $Service$Server) {\n");
printer->Indent();
printer->Print(vars, "s.RegisterService(&$ServiceDesc$, srv)\n");
printer->Outdent();
printer->Print("}\n\n");
for (int i = 0; i < service->method_count(); i++) {
GenerateServerMethod(service->method(i).get(), printer, vars);
printer->Print("\n");
}
//Service Descriptor
printer->Print(vars, "var $ServiceDesc$ = $grpc$.ServiceDesc{\n");
printer->Indent();
printer->Print(vars, "ServiceName: \"$ServicePrefix$.$Service$\",\n");
printer->Print(vars, "HandlerType: (*$Service$Server)(nil),\n");
printer->Print(vars, "Methods: []$grpc$.MethodDesc{\n");
printer->Indent();
for (int i = 0; i < service->method_count(); i++) {
auto method = service->method(i);
vars["Method"] = method->name();
vars["Handler"] = "_" + vars["Service"] + "_" + vars["Method"] + "_Handler";
if (method->NoStreaming()) {
printer->Print("{\n");
printer->Indent();
printer->Print(vars, "MethodName: \"$Method$\",\n");
printer->Print(vars, "Handler: $Handler$, \n");
printer->Outdent();
printer->Print("},\n");
}
}
printer->Outdent();
printer->Print("},\n");
printer->Print(vars, "Streams: []$grpc$.StreamDesc{\n");
printer->Indent();
for (int i = 0; i < service->method_count(); i++) {
auto method = service->method(i);
vars["Method"] = method->name();
vars["Handler"] = "_" + vars["Service"] + "_" + vars["Method"] + "_Handler";
if (!method->NoStreaming()) {
printer->Print("{\n");
printer->Indent();
printer->Print(vars, "StreamName: \"$Method$\",\n");
printer->Print(vars, "Handler: $Handler$, \n");
if (ClientOnlyStreaming(method.get())) {
printer->Print("ClientStreams: true,\n");
} else if (ServerOnlyStreaming(method.get())) {
printer->Print("ServerStreams: true,\n");
} else {
printer->Print("ServerStreams: true,\n");
printer->Print("ClientStreams: true,\n");
}
printer->Outdent();
printer->Print("},\n");
}
}
printer->Outdent();
printer->Print("},\n");
printer->Outdent();
printer->Print("}\n\n");
}
// Returns source for the service
grpc::string GenerateServiceSource(grpc_generator::File *file,
const grpc_generator::Service *service,
grpc_go_generator::Parameters *parameters) {
grpc::string out;
auto p = file->CreatePrinter(&out);
auto printer = p.get();
std::map<grpc::string, grpc::string> vars;
vars["Package"] = parameters->package_name;
vars["ServicePrefix"] = parameters->service_prefix;
vars["grpc"] = "grpc";
vars["context"] = "context";
GenerateImports(file, printer, vars);
if (parameters->custom_method_io_type != "") {
vars["CustomMethodIO"] = parameters->custom_method_io_type;
}
GenerateService(service, printer, vars);
return out;
}
}// Namespace grpc_go_generator
| 1 | 14,022 | this is a file we copy from gRPC, sp ideally it be fixed upstream as well.. | google-flatbuffers | java |
@@ -14,7 +14,6 @@
package zipkin2.storage.cassandra.v1;
import com.datastax.driver.core.Session;
-import com.google.common.cache.CacheBuilderSpec;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.LinkedHashSet; | 1 | /*
* Copyright 2015-2020 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin2.storage.cassandra.v1;
import com.datastax.driver.core.Session;
import com.google.common.cache.CacheBuilderSpec;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import zipkin2.Annotation;
import zipkin2.Call;
import zipkin2.Span;
import zipkin2.internal.AggregateCall;
import zipkin2.internal.Nullable;
import zipkin2.internal.V1ThriftSpanWriter;
import zipkin2.storage.SpanConsumer;
import zipkin2.v1.V1Span;
import zipkin2.v1.V2SpanConverter;
final class CassandraSpanConsumer implements SpanConsumer {
final InsertTrace.Factory insertTrace;
final boolean searchEnabled;
final Set<String> autocompleteKeys;
// Everything below here is null when search is disabled
@Nullable final InsertServiceName.Factory insertServiceName;
@Nullable final InsertRemoteServiceName.Factory insertRemoteServiceName;
@Nullable final InsertSpanName.Factory insertSpanName;
@Nullable final CompositeIndexer indexer;
@Nullable final InsertAutocompleteValue.Factory insertAutocompleteValue;
CassandraSpanConsumer(CassandraStorage storage, CacheBuilderSpec indexCacheSpec) {
Session session = storage.session();
Schema.Metadata metadata = storage.metadata();
searchEnabled = storage.searchEnabled;
autocompleteKeys = new LinkedHashSet<>(storage.autocompleteKeys);
int spanTtl = metadata.hasDefaultTtl ? 0 : storage.spanTtl;
insertTrace = new InsertTrace.Factory(session, metadata, spanTtl);
if (!searchEnabled) {
insertServiceName = null;
insertRemoteServiceName = null;
insertSpanName = null;
indexer = null;
insertAutocompleteValue = null;
return;
}
int indexTtl = metadata.hasDefaultTtl ? 0 : storage.indexTtl;
insertServiceName = new InsertServiceName.Factory(storage, indexTtl);
if (metadata.hasRemoteService) {
insertRemoteServiceName = new InsertRemoteServiceName.Factory(storage, indexTtl);
} else {
insertRemoteServiceName = null;
}
insertSpanName = new InsertSpanName.Factory(storage, indexTtl);
indexer = new CompositeIndexer(storage, indexCacheSpec, indexTtl);
if (metadata.hasAutocompleteTags && !storage.autocompleteKeys.isEmpty()) {
insertAutocompleteValue = new InsertAutocompleteValue.Factory(storage, indexTtl);
} else {
insertAutocompleteValue = null;
}
}
/**
* This fans out into many requests, last count was 8 * spans.size. If any of these fail, the
* returned future will fail. Most callers drop or log the result.
*/
@Override
public Call<Void> accept(List<Span> rawSpans) {
V2SpanConverter converter = V2SpanConverter.create();
V1ThriftSpanWriter encoder = new V1ThriftSpanWriter();
Set<InsertTrace.Input> insertTraces = new LinkedHashSet<>();
Set<String> insertServiceNames = new LinkedHashSet<>();
Set<InsertRemoteServiceName.Input> insertRemoteServiceNames = new LinkedHashSet<>();
Set<InsertSpanName.Input> insertSpanNames = new LinkedHashSet<>();
Set<Map.Entry<String, String>> autocompleteTags = new LinkedHashSet<>();
List<Call<Void>> calls = new ArrayList<>();
for (Span v2 : rawSpans) {
V1Span span = converter.convert(v2);
// indexing occurs by timestamp, so derive one if not present.
long ts_micro = v2.timestampAsLong();
if (ts_micro == 0L) ts_micro = guessTimestamp(v2);
insertTraces.add(insertTrace.newInput(span, ByteBuffer.wrap(encoder.write(v2)), ts_micro));
if (!searchEnabled) continue;
if (insertAutocompleteValue != null) {
for (Map.Entry<String, String> entry : v2.tags().entrySet()) {
if (autocompleteKeys.contains(entry.getKey())) autocompleteTags.add(entry);
}
}
// service span and remote service indexes is refreshed regardless of timestamp
String serviceName = v2.localServiceName();
if (serviceName != null) {
insertServiceNames.add(serviceName);
if (v2.name() != null) insertSpanNames.add(insertSpanName.newInput(serviceName, v2.name()));
if (insertRemoteServiceName != null && v2.remoteServiceName() != null) {
insertRemoteServiceNames.add(
insertRemoteServiceName.newInput(serviceName, v2.remoteServiceName()));
}
}
if (ts_micro == 0L) continue; // search is only valid with a timestamp, don't index w/o it!
indexer.index(v2, calls);
}
for (InsertTrace.Input insert : insertTraces) {
calls.add(insertTrace.create(insert));
}
for (String insert : insertServiceNames) {
insertServiceName.maybeAdd(insert, calls);
}
for (InsertRemoteServiceName.Input insert : insertRemoteServiceNames) {
insertRemoteServiceName.maybeAdd(insert, calls);
}
for (InsertSpanName.Input insert : insertSpanNames) {
insertSpanName.maybeAdd(insert, calls);
}
for (Map.Entry<String, String> autocompleteTag : autocompleteTags) {
insertAutocompleteValue.maybeAdd(autocompleteTag, calls);
}
return calls.isEmpty() ? Call.create(null) : AggregateCall.newVoidCall(calls);
}
/** For testing only: Clears any caches */
void clear() {
if (insertServiceName != null) insertServiceName.clear();
if (insertRemoteServiceName != null) insertRemoteServiceName.clear();
if (insertSpanName != null) insertSpanName.clear();
if (indexer != null) indexer.clear();
if (insertAutocompleteValue != null) insertAutocompleteValue.clear();
}
private static long guessTimestamp(Span span) {
assert 0L == span.timestampAsLong() : "method only for when span has no timestamp";
for (Annotation annotation : span.annotations()) {
if (0L < annotation.timestamp()) return annotation.timestamp();
}
return 0L; // return a timestamp that won't match a query
}
}
| 1 | 16,860 | changes like this, just strangle guava usages to be replaced by DelayLimiter into one place | openzipkin-zipkin | java |
@@ -27,8 +27,8 @@ const Widget = ( {
className,
slug,
noPadding,
- header: Header,
- footer: Footer,
+ Header,
+ Footer,
} ) => {
return (
<div | 1 | /**
* Widget component.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import classnames from 'classnames';
import PropTypes from 'prop-types';
const Widget = ( {
children,
className,
slug,
noPadding,
header: Header,
footer: Footer,
} ) => {
return (
<div
className={ classnames(
'googlesitekit-widget',
`googlesitekit-widget--${ slug }`,
{ 'googlesitekit-widget--no-padding': noPadding },
className
) }
>
{ Header && (
<div className="googlesitekit-widget__header">
<Header />
</div>
) }
<div className="googlesitekit-widget__body">
{ children }
</div>
{ Footer && (
<div className="googlesitekit-widget__footer">
<Footer />
</div>
) }
</div>
);
};
Widget.defaultProps = {
children: undefined,
noPadding: false,
};
Widget.propTypes = {
children: PropTypes.node,
slug: PropTypes.string.isRequired,
noPadding: PropTypes.bool,
header: PropTypes.elementType,
footer: PropTypes.elementType,
};
export default Widget;
| 1 | 35,839 | PropTypes need to be updated accordingly here. Note that the type also needs updating, not just the case. That is, these should both expect a `PropTypes.elementType` now instead of an `element`. | google-site-kit-wp | js |
@@ -221,7 +221,7 @@ int get_ctest_gpu(const char* local_rank_str) {
}
auto const* comma = std::strchr(resource_str, ',');
- if (!comma || strncmp(resource_str, "id:", 3)) {
+ if (!comma || strncmp(resource_str, "id:", 3) != 0) {
std::ostringstream ss;
ss << "Error: invalid value of " << ctest_resource_group_id_name << ": '"
<< resource_str << "'. Raised by Kokkos::Impl::get_ctest_gpu()."; | 1 | /*
//@HEADER
// ************************************************************************
//
// Kokkos v. 3.0
// Copyright (2020) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott ([email protected])
//
// ************************************************************************
//@HEADER
*/
#include <Kokkos_Core.hpp>
#include <impl/Kokkos_Error.hpp>
#include <impl/Kokkos_ExecSpaceInitializer.hpp>
#include <impl/Kokkos_Command_Line_Parsing.hpp>
#include <cctype>
#include <cstring>
#include <iostream>
#include <sstream>
#include <cstdlib>
#include <stack>
#include <functional>
#include <list>
#include <cerrno>
#include <regex>
#ifndef _WIN32
#include <unistd.h>
#else
#include <windows.h>
#endif
//----------------------------------------------------------------------------
namespace {
bool g_is_initialized = false;
bool g_show_warnings = true;
bool g_tune_internals = false;
// When compiling with clang/LLVM and using the GNU (GCC) C++ Standard Library
// (any recent version between GCC 7.3 and GCC 9.2), std::deque SEGV's during
// the unwinding of the atexit(3C) handlers at program termination. However,
// this bug is not observable when building with GCC.
// As an added bonus, std::list<T> provides constant insertion and
// deletion time complexity, which translates to better run-time performance. As
// opposed to std::deque<T> which does not provide the same constant time
// complexity for inserts/removals, since std::deque<T> is implemented as a
// segmented array.
using hook_function_type = std::function<void()>;
std::stack<hook_function_type, std::list<hook_function_type>> finalize_hooks;
} // namespace
namespace Kokkos {
namespace Impl {
/**
* The category is only used in printing, tools
* get all metadata free of category
*/
using metadata_category_type = std::string;
using metadata_key_type = std::string;
using metadata_value_type = std::string;
std::map<metadata_category_type,
std::map<metadata_key_type, metadata_value_type>>
metadata_map;
void declare_configuration_metadata(const std::string& category,
const std::string& key,
const std::string& value) {
metadata_map[category][key] = value;
}
ExecSpaceManager& ExecSpaceManager::get_instance() {
static ExecSpaceManager space_initializer = {};
return space_initializer;
}
void ExecSpaceManager::register_space_factory(
const std::string name, std::unique_ptr<ExecSpaceInitializerBase> space) {
exec_space_factory_list[name] = std::move(space);
}
void ExecSpaceManager::initialize_spaces(const Kokkos::InitArguments& args) {
// Note: the names of the execution spaces, used as keys in the map, encode
// the ordering of the initialization code from the old initializtion stuff.
// Eventually, we may want to do something less brittle than this, but for now
// we're just preserving compatibility with the old implementation.
for (auto& to_init : exec_space_factory_list) {
to_init.second->initialize(args);
}
}
void ExecSpaceManager::finalize_spaces(const bool all_spaces) {
for (auto& to_finalize : exec_space_factory_list) {
to_finalize.second->finalize(all_spaces);
}
}
void ExecSpaceManager::static_fence() {
for (auto& to_fence : exec_space_factory_list) {
to_fence.second->fence();
}
}
void ExecSpaceManager::static_fence(const std::string& name) {
for (auto& to_fence : exec_space_factory_list) {
to_fence.second->fence(name);
}
}
void ExecSpaceManager::print_configuration(std::ostream& msg,
const bool detail) {
for (auto& to_print : exec_space_factory_list) {
to_print.second->print_configuration(msg, detail);
}
}
int get_ctest_gpu(const char* local_rank_str) {
auto const* ctest_kokkos_device_type =
std::getenv("CTEST_KOKKOS_DEVICE_TYPE");
if (!ctest_kokkos_device_type) {
return 0;
}
auto const* ctest_resource_group_count_str =
std::getenv("CTEST_RESOURCE_GROUP_COUNT");
if (!ctest_resource_group_count_str) {
return 0;
}
// Make sure rank is within bounds of resource groups specified by CTest
auto resource_group_count = std::stoi(ctest_resource_group_count_str);
auto local_rank = std::stoi(local_rank_str);
if (local_rank >= resource_group_count) {
std::ostringstream ss;
ss << "Error: local rank " << local_rank
<< " is outside the bounds of resource groups provided by CTest. Raised"
<< " by Kokkos::Impl::get_ctest_gpu().";
throw_runtime_exception(ss.str());
}
// Get the resource types allocated to this resource group
std::ostringstream ctest_resource_group;
ctest_resource_group << "CTEST_RESOURCE_GROUP_" << local_rank;
std::string ctest_resource_group_name = ctest_resource_group.str();
auto const* ctest_resource_group_str =
std::getenv(ctest_resource_group_name.c_str());
if (!ctest_resource_group_str) {
std::ostringstream ss;
ss << "Error: " << ctest_resource_group_name << " is not specified. Raised"
<< " by Kokkos::Impl::get_ctest_gpu().";
throw_runtime_exception(ss.str());
}
// Look for the device type specified in CTEST_KOKKOS_DEVICE_TYPE
bool found_device = false;
std::string ctest_resource_group_cxx_str = ctest_resource_group_str;
std::istringstream instream(ctest_resource_group_cxx_str);
while (true) {
std::string devName;
std::getline(instream, devName, ',');
if (devName == ctest_kokkos_device_type) {
found_device = true;
break;
}
if (instream.eof() || devName.length() == 0) {
break;
}
}
if (!found_device) {
std::ostringstream ss;
ss << "Error: device type '" << ctest_kokkos_device_type
<< "' not included in " << ctest_resource_group_name
<< ". Raised by Kokkos::Impl::get_ctest_gpu().";
throw_runtime_exception(ss.str());
}
// Get the device ID
std::string ctest_device_type_upper = ctest_kokkos_device_type;
for (auto& c : ctest_device_type_upper) {
c = std::toupper(c);
}
ctest_resource_group << "_" << ctest_device_type_upper;
std::string ctest_resource_group_id_name = ctest_resource_group.str();
auto resource_str = std::getenv(ctest_resource_group_id_name.c_str());
if (!resource_str) {
std::ostringstream ss;
ss << "Error: " << ctest_resource_group_id_name
<< " is not specified. Raised by Kokkos::Impl::get_ctest_gpu().";
throw_runtime_exception(ss.str());
}
auto const* comma = std::strchr(resource_str, ',');
if (!comma || strncmp(resource_str, "id:", 3)) {
std::ostringstream ss;
ss << "Error: invalid value of " << ctest_resource_group_id_name << ": '"
<< resource_str << "'. Raised by Kokkos::Impl::get_ctest_gpu().";
throw_runtime_exception(ss.str());
}
std::string id(resource_str + 3, comma - resource_str - 3);
return std::stoi(id.c_str());
}
// function to extract gpu # from args
int get_gpu(const InitArguments& args) {
int use_gpu = args.device_id;
const int ndevices = [](int num_devices) -> int {
if (num_devices > 0) return num_devices;
#if defined(KOKKOS_ENABLE_CUDA)
return Cuda::detect_device_count();
#elif defined(KOKKOS_ENABLE_HIP)
return Experimental::HIP::detect_device_count();
#elif defined(KOKKOS_ENABLE_SYCL)
return sycl::device::get_devices(sycl::info::device_type::gpu).size();
#else
return num_devices;
#endif
}(args.ndevices);
const int skip_device = args.skip_device;
// if the exact device is not set, but ndevices was given, assign round-robin
// using on-node MPI rank
if (use_gpu < 0) {
auto const* local_rank_str =
std::getenv("OMPI_COMM_WORLD_LOCAL_RANK"); // OpenMPI
if (!local_rank_str)
local_rank_str = std::getenv("MV2_COMM_WORLD_LOCAL_RANK"); // MVAPICH2
if (!local_rank_str)
local_rank_str = std::getenv("SLURM_LOCALID"); // SLURM
auto const* ctest_kokkos_device_type =
std::getenv("CTEST_KOKKOS_DEVICE_TYPE"); // CTest
auto const* ctest_resource_group_count_str =
std::getenv("CTEST_RESOURCE_GROUP_COUNT"); // CTest
if (ctest_kokkos_device_type && ctest_resource_group_count_str &&
local_rank_str) {
// Use the device assigned by CTest
use_gpu = get_ctest_gpu(local_rank_str);
} else if (ndevices > 0) {
// Use the device assigned by the rank
if (local_rank_str) {
auto local_rank = std::stoi(local_rank_str);
use_gpu = local_rank % ndevices;
} else {
// user only gave use ndevices, but the MPI environment variable wasn't
// set. start with GPU 0 at this point
use_gpu = 0;
}
}
// shift assignments over by one so no one is assigned to "skip_device"
if (use_gpu >= skip_device) ++use_gpu;
}
return use_gpu;
}
namespace {
void initialize_backends(const InitArguments& args) {
// This is an experimental setting
// For KNL in Flat mode this variable should be set, so that
// memkind allocates high bandwidth memory correctly.
#ifdef KOKKOS_ENABLE_HBWSPACE
setenv("MEMKIND_HBW_NODES", "1", 0);
#endif
Impl::ExecSpaceManager::get_instance().initialize_spaces(args);
}
void initialize_profiling(const Tools::InitArguments& args) {
auto initialization_status =
Kokkos::Tools::Impl::initialize_tools_subsystem(args);
if (initialization_status.result ==
Kokkos::Tools::Impl::InitializationStatus::InitializationResult::
help_request) {
g_is_initialized = true;
::Kokkos::finalize();
std::exit(EXIT_SUCCESS);
} else if (initialization_status.result ==
Kokkos::Tools::Impl::InitializationStatus::InitializationResult::
success) {
Kokkos::Tools::parseArgs(args.args);
for (const auto& category_value : Kokkos::Impl::metadata_map) {
for (const auto& key_value : category_value.second) {
Kokkos::Tools::declareMetadata(key_value.first, key_value.second);
}
}
} else {
std::cerr << "Error initializing Kokkos Tools subsystem" << std::endl;
g_is_initialized = true;
::Kokkos::finalize();
std::exit(EXIT_FAILURE);
}
}
std::string version_string_from_int(int version_number) {
std::stringstream str_builder;
str_builder << version_number / 10000 << "." << (version_number % 10000) / 100
<< "." << version_number % 100;
return str_builder.str();
}
void pre_initialize_internal(const InitArguments& args) {
if (args.disable_warnings) g_show_warnings = false;
if (args.tune_internals) g_tune_internals = true;
declare_configuration_metadata("version_info", "Kokkos Version",
version_string_from_int(KOKKOS_VERSION));
#ifdef KOKKOS_COMPILER_APPLECC
declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_APPLECC",
std::to_string(KOKKOS_COMPILER_APPLECC));
declare_configuration_metadata("tools_only", "compiler_family", "apple");
#endif
#ifdef KOKKOS_COMPILER_CLANG
declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_CLANG",
std::to_string(KOKKOS_COMPILER_CLANG));
declare_configuration_metadata("tools_only", "compiler_family", "clang");
#endif
#ifdef KOKKOS_COMPILER_CRAYC
declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_CRAYC",
std::to_string(KOKKOS_COMPILER_CRAYC));
declare_configuration_metadata("tools_only", "compiler_family", "cray");
#endif
#ifdef KOKKOS_COMPILER_GNU
declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_GNU",
std::to_string(KOKKOS_COMPILER_GNU));
declare_configuration_metadata("tools_only", "compiler_family", "gnu");
#endif
#ifdef KOKKOS_COMPILER_IBM
declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_IBM",
std::to_string(KOKKOS_COMPILER_IBM));
declare_configuration_metadata("tools_only", "compiler_family", "ibm");
#endif
#ifdef KOKKOS_COMPILER_INTEL
declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_INTEL",
std::to_string(KOKKOS_COMPILER_INTEL));
declare_configuration_metadata("tools_only", "compiler_family", "intel");
#endif
#ifdef KOKKOS_COMPILER_NVCC
declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_NVCC",
std::to_string(KOKKOS_COMPILER_NVCC));
declare_configuration_metadata("tools_only", "compiler_family", "nvcc");
#endif
#ifdef KOKKOS_COMPILER_PGI
declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_PGI",
std::to_string(KOKKOS_COMPILER_PGI));
declare_configuration_metadata("tools_only", "compiler_family", "pgi");
#endif
#ifdef KOKKOS_COMPILER_MSVC
declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_MSVC",
std::to_string(KOKKOS_COMPILER_MSVC));
declare_configuration_metadata("tools_only", "compiler_family", "msvc");
#endif
#ifdef KOKKOS_ENABLE_ISA_KNC
declare_configuration_metadata("architecture", "KOKKOS_ENABLE_ISA_KNC",
"yes");
#else
declare_configuration_metadata("architecture", "KOKKOS_ENABLE_ISA_KNC", "no");
#endif
#ifdef KOKKOS_ENABLE_ISA_POWERPCLE
declare_configuration_metadata("architecture", "KOKKOS_ENABLE_ISA_POWERPCLE",
"yes");
#else
declare_configuration_metadata("architecture", "KOKKOS_ENABLE_ISA_POWERPCLE",
"no");
#endif
#ifdef KOKKOS_ENABLE_ISA_X86_64
declare_configuration_metadata("architecture", "KOKKOS_ENABLE_ISA_X86_64",
"yes");
#else
declare_configuration_metadata("architecture", "KOKKOS_ENABLE_ISA_X86_64",
"no");
#endif
#ifdef KOKKOS_ENABLE_GNU_ATOMICS
declare_configuration_metadata("atomics", "KOKKOS_ENABLE_GNU_ATOMICS", "yes");
#else
declare_configuration_metadata("atomics", "KOKKOS_ENABLE_GNU_ATOMICS", "no");
#endif
#ifdef KOKKOS_ENABLE_INTEL_ATOMICS
declare_configuration_metadata("atomics", "KOKKOS_ENABLE_INTEL_ATOMICS",
"yes");
#else
declare_configuration_metadata("atomics", "KOKKOS_ENABLE_INTEL_ATOMICS",
"no");
#endif
#ifdef KOKKOS_ENABLE_WINDOWS_ATOMICS
declare_configuration_metadata("atomics", "KOKKOS_ENABLE_WINDOWS_ATOMICS",
"yes");
#else
declare_configuration_metadata("atomics", "KOKKOS_ENABLE_WINDOWS_ATOMICS",
"no");
#endif
#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_IVDEP",
"yes");
#else
declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_IVDEP",
"no");
#endif
#ifdef KOKKOS_ENABLE_PRAGMA_LOOPCOUNT
declare_configuration_metadata("vectorization",
"KOKKOS_ENABLE_PRAGMA_LOOPCOUNT", "yes");
#else
declare_configuration_metadata("vectorization",
"KOKKOS_ENABLE_PRAGMA_LOOPCOUNT", "no");
#endif
#ifdef KOKKOS_ENABLE_PRAGMA_SIMD
declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_SIMD",
"yes");
#else
declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_SIMD",
"no");
#endif
#ifdef KOKKOS_ENABLE_PRAGMA_UNROLL
declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_UNROLL",
"yes");
#else
declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_UNROLL",
"no");
#endif
#ifdef KOKKOS_ENABLE_PRAGMA_VECTOR
declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_VECTOR",
"yes");
#else
declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_VECTOR",
"no");
#endif
#ifdef KOKKOS_ENABLE_HBWSPACE
declare_configuration_metadata("memory", "KOKKOS_ENABLE_HBWSPACE", "yes");
#else
declare_configuration_metadata("memory", "KOKKOS_ENABLE_HBWSPACE", "no");
#endif
#ifdef KOKKOS_ENABLE_INTEL_MM_ALLOC
declare_configuration_metadata("memory", "KOKKOS_ENABLE_INTEL_MM_ALLOC",
"yes");
#else
declare_configuration_metadata("memory", "KOKKOS_ENABLE_INTEL_MM_ALLOC",
"no");
#endif
#ifdef KOKKOS_ENABLE_POSIX_MEMALIGN
declare_configuration_metadata("memory", "KOKKOS_ENABLE_POSIX_MEMALIGN",
"yes");
#else
declare_configuration_metadata("memory", "KOKKOS_ENABLE_POSIX_MEMALIGN",
"no");
#endif
#ifdef KOKKOS_ENABLE_ASM
declare_configuration_metadata("options", "KOKKOS_ENABLE_ASM", "yes");
#else
declare_configuration_metadata("options", "KOKKOS_ENABLE_ASM", "no");
#endif
#ifdef KOKKOS_ENABLE_CXX14
declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX14", "yes");
#else
declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX14", "no");
#endif
#ifdef KOKKOS_ENABLE_CXX17
declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX17", "yes");
#else
declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX17", "no");
#endif
#ifdef KOKKOS_ENABLE_CXX20
declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX20", "yes");
#else
declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX20", "no");
#endif
#ifdef KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK
declare_configuration_metadata("options", "KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK",
"yes");
#else
declare_configuration_metadata("options", "KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK",
"no");
#endif
#ifdef KOKKOS_ENABLE_HWLOC
declare_configuration_metadata("options", "KOKKOS_ENABLE_HWLOC", "yes");
#else
declare_configuration_metadata("options", "KOKKOS_ENABLE_HWLOC", "no");
#endif
#ifdef KOKKOS_ENABLE_LIBRT
declare_configuration_metadata("options", "KOKKOS_ENABLE_LIBRT", "yes");
#else
declare_configuration_metadata("options", "KOKKOS_ENABLE_LIBRT", "no");
#endif
declare_configuration_metadata("architecture", "Default Device",
typeid(Kokkos::DefaultExecutionSpace).name());
}
void post_initialize_internal(const InitArguments& args) {
initialize_profiling(args.impl_get_tools_init_arguments());
g_is_initialized = true;
}
void initialize_internal(const InitArguments& args) {
pre_initialize_internal(args);
initialize_backends(args);
post_initialize_internal(args);
}
void finalize_internal(const bool all_spaces = false) {
typename decltype(finalize_hooks)::size_type numSuccessfulCalls = 0;
while (!finalize_hooks.empty()) {
auto f = finalize_hooks.top();
try {
f();
} catch (...) {
std::cerr << "Kokkos::finalize: A finalize hook (set via "
"Kokkos::push_finalize_hook) threw an exception that it did "
"not catch."
" Per std::atexit rules, this results in std::terminate. "
"This is "
"finalize hook number "
<< numSuccessfulCalls
<< " (1-based indexing) "
"out of "
<< finalize_hooks.size()
<< " to call. Remember that "
"Kokkos::finalize calls finalize hooks in reverse order "
"from how they "
"were pushed."
<< std::endl;
std::terminate();
}
finalize_hooks.pop();
++numSuccessfulCalls;
}
Kokkos::Profiling::finalize();
Impl::ExecSpaceManager::get_instance().finalize_spaces(all_spaces);
g_is_initialized = false;
g_show_warnings = true;
g_tune_internals = false;
}
void fence_internal(const std::string& name) {
Impl::ExecSpaceManager::get_instance().static_fence(name);
}
unsigned get_process_id() {
#ifdef _WIN32
return unsigned(GetCurrentProcessId());
#else
return unsigned(getpid());
#endif
}
void parse_command_line_arguments(int& narg, char* arg[],
InitArguments& arguments) {
auto& num_threads = arguments.num_threads;
auto& numa = arguments.num_numa;
auto& device = arguments.device_id;
auto& ndevices = arguments.ndevices;
auto& skip_device = arguments.skip_device;
auto& disable_warnings = arguments.disable_warnings;
auto& tune_internals = arguments.tune_internals;
auto& tool_help = arguments.tool_help;
auto& tool_args = arguments.tool_args;
auto& tool_lib = arguments.tool_lib;
bool kokkos_threads_found = false;
bool kokkos_numa_found = false;
bool kokkos_device_found = false;
bool kokkos_ndevices_found = false;
auto tools_init_arguments = arguments.impl_get_tools_init_arguments();
Tools::Impl::parse_command_line_arguments(narg, arg, tools_init_arguments);
if (tools_init_arguments.tune_internals !=
Kokkos::Tools::InitArguments::PossiblyUnsetOption::unset) {
tune_internals = (tools_init_arguments.tune_internals ==
Kokkos::Tools::InitArguments::PossiblyUnsetOption::on);
}
if (tools_init_arguments.help !=
Kokkos::Tools::InitArguments::PossiblyUnsetOption::unset) {
tool_help = (tools_init_arguments.help ==
Kokkos::Tools::InitArguments::PossiblyUnsetOption::on);
}
if (tools_init_arguments.lib !=
Kokkos::Tools::InitArguments::unset_string_option) {
tool_lib = tools_init_arguments.lib;
}
if (tools_init_arguments.args !=
Kokkos::Tools::InitArguments::unset_string_option) {
tool_args = tools_init_arguments.args;
}
int iarg = 0;
while (iarg < narg) {
if (check_int_arg(arg[iarg], "--kokkos-threads", &num_threads)) {
for (int k = iarg; k < narg - 1; k++) {
arg[k] = arg[k + 1];
}
kokkos_threads_found = true;
narg--;
} else if (!kokkos_threads_found &&
check_int_arg(arg[iarg], "--threads", &num_threads)) {
iarg++;
} else if (check_int_arg(arg[iarg], "--kokkos-numa", &numa)) {
for (int k = iarg; k < narg - 1; k++) {
arg[k] = arg[k + 1];
}
kokkos_numa_found = true;
narg--;
} else if (!kokkos_numa_found &&
check_int_arg(arg[iarg], "--numa", &numa)) {
iarg++;
} else if (check_int_arg(arg[iarg], "--kokkos-device-id", &device) ||
check_int_arg(arg[iarg], "--kokkos-device", &device)) {
if (check_arg(arg[iarg], "--kokkos-device")) {
warn_deprecated_command_line_argument("--kokkos-device",
"--kokkos-device-id");
}
for (int k = iarg; k < narg - 1; k++) {
arg[k] = arg[k + 1];
}
kokkos_device_found = true;
narg--;
} else if (!kokkos_device_found &&
(check_int_arg(arg[iarg], "--device-id", &device) ||
check_int_arg(arg[iarg], "--device", &device))) {
if (check_arg(arg[iarg], "--device")) {
warn_deprecated_command_line_argument("--device", "--device-id");
}
iarg++;
} else if (check_arg(arg[iarg], "--kokkos-num-devices") ||
check_arg(arg[iarg], "--num-devices") ||
check_arg(arg[iarg], "--kokkos-ndevices") ||
check_arg(arg[iarg], "--ndevices")) {
if (check_arg(arg[iarg], "--ndevices")) {
warn_deprecated_command_line_argument("--ndevices", "--num-devices");
}
if (check_arg(arg[iarg], "--kokkos-ndevices")) {
warn_deprecated_command_line_argument("--kokkos-ndevices",
"--kokkos-num-devices");
}
// Find the number of device (expecting --device=XX)
if (!((strncmp(arg[iarg], "--kokkos-num-devices=", 21) == 0) ||
(strncmp(arg[iarg], "--num-ndevices=", 14) == 0) ||
(strncmp(arg[iarg], "--kokkos-ndevices=", 18) == 0) ||
(strncmp(arg[iarg], "--ndevices=", 11) == 0)))
throw_runtime_exception(
"Error: expecting an '=INT[,INT]' after command line argument "
"'--num-devices/--kokkos-num-devices'. Raised by "
"Kokkos::initialize(int narg, char* argc[]).");
char* num1 = strchr(arg[iarg], '=') + 1;
char* num2 = strpbrk(num1, ",");
int num1_len = num2 == nullptr ? strlen(num1) : num2 - num1;
char* num1_only = new char[num1_len + 1];
strncpy(num1_only, num1, num1_len);
num1_only[num1_len] = '\0';
if (!is_unsigned_int(num1_only) || (strlen(num1_only) == 0)) {
throw_runtime_exception(
"Error: expecting an integer number after command line argument "
"'--kokkos-numdevices'. Raised by "
"Kokkos::initialize(int narg, char* argc[]).");
}
if (check_arg(arg[iarg], "--kokkos-num-devices") ||
check_arg(arg[iarg], "--kokkos-ndevices") || !kokkos_ndevices_found)
ndevices = std::stoi(num1_only);
delete[] num1_only;
if (num2 != nullptr) {
if ((!is_unsigned_int(num2 + 1)) || (strlen(num2) == 1))
throw_runtime_exception(
"Error: expecting an integer number after command line argument "
"'--kokkos-num-devices=XX,'. Raised by "
"Kokkos::initialize(int narg, char* argc[]).");
if (check_arg(arg[iarg], "--kokkos-num-devices") ||
check_arg(arg[iarg], "--kokkos-ndevices") || !kokkos_ndevices_found)
skip_device = std::stoi(num2 + 1);
}
// Remove the --kokkos-num-devices argument from the list but leave
// --num-devices
if (check_arg(arg[iarg], "--kokkos-num-devices") ||
check_arg(arg[iarg], "--kokkos-ndevices")) {
for (int k = iarg; k < narg - 1; k++) {
arg[k] = arg[k + 1];
}
kokkos_ndevices_found = true;
narg--;
} else {
iarg++;
}
} else if (check_arg(arg[iarg], "--kokkos-disable-warnings")) {
disable_warnings = true;
for (int k = iarg; k < narg - 1; k++) {
arg[k] = arg[k + 1];
}
narg--;
} else if (check_arg(arg[iarg], "--kokkos-tune-internals")) {
tune_internals = true;
for (int k = iarg; k < narg - 1; k++) {
arg[k] = arg[k + 1];
}
narg--;
} else if (check_arg(arg[iarg], "--kokkos-help") ||
check_arg(arg[iarg], "--help")) {
auto const help_message = R"(
--------------------------------------------------------------------------------
-------------Kokkos command line arguments--------------------------------------
--------------------------------------------------------------------------------
The following arguments exist also without prefix 'kokkos' (e.g. --help).
The prefixed arguments will be removed from the list by Kokkos::initialize(),
the non-prefixed ones are not removed. Prefixed versions take precedence over
non prefixed ones, and the last occurrence of an argument overwrites prior
settings.
--kokkos-help : print this message
--kokkos-disable-warnings : disable kokkos warning messages
--kokkos-tune-internals : allow Kokkos to autotune policies and declare
tuning features through the tuning system. If
left off, Kokkos uses heuristics
--kokkos-threads=INT : specify total number of threads or
number of threads per NUMA region if
used in conjunction with '--numa' option.
--kokkos-numa=INT : specify number of NUMA regions used by process.
--kokkos-device-id=INT : specify device id to be used by Kokkos.
--kokkos-num-devices=INT[,INT] : used when running MPI jobs. Specify number of
devices per node to be used. Process to device
mapping happens by obtaining the local MPI rank
and assigning devices round-robin. The optional
second argument allows for an existing device
to be ignored. This is most useful on workstations
with multiple GPUs of which one is used to drive
screen output.
--kokkos-tools-library : Equivalent to KOKKOS_PROFILE_LIBRARY environment
variable. Must either be full path to library or
name of library if the path is present in the
runtime library search path (e.g. LD_LIBRARY_PATH)
--kokkos-tools-help : Query the (loaded) kokkos-tool for its command-line
option support (which should then be passed via
--kokkos-tools-args="...")
--kokkos-tools-args=STR : A single (quoted) string of options which will be
whitespace delimited and passed to the loaded
kokkos-tool as command-line arguments. E.g.
`<EXE> --kokkos-tools-args="-c input.txt"` will
pass `<EXE> -c input.txt` as argc/argv to tool
--------------------------------------------------------------------------------
)";
std::cout << help_message << std::endl;
// Remove the --kokkos-help argument from the list but leave --help
if (check_arg(arg[iarg], "--kokkos-help")) {
for (int k = iarg; k < narg - 1; k++) {
arg[k] = arg[k + 1];
}
narg--;
} else {
iarg++;
}
} else
iarg++;
}
if ((tools_init_arguments.args ==
Kokkos::Tools::InitArguments::unset_string_option) &&
narg > 0)
tool_args = arg[0];
}
void parse_environment_variables(InitArguments& arguments) {
auto& num_threads = arguments.num_threads;
auto& numa = arguments.num_numa;
auto& device = arguments.device_id;
auto& ndevices = arguments.ndevices;
auto& skip_device = arguments.skip_device;
auto& disable_warnings = arguments.disable_warnings;
auto& tune_internals = arguments.tune_internals;
auto& tool_lib = arguments.tool_lib;
auto& tool_args = arguments.tool_args;
auto& tool_help = arguments.tool_help;
char* endptr;
auto tools_init_arguments = arguments.impl_get_tools_init_arguments();
auto init_result =
Tools::Impl::parse_environment_variables(tools_init_arguments);
if (init_result.result == Kokkos::Tools::Impl::InitializationStatus::
environment_argument_mismatch) {
Impl::throw_runtime_exception(init_result.error_message);
}
tool_lib = tools_init_arguments.lib;
if (tools_init_arguments.tune_internals !=
Kokkos::Tools::InitArguments::PossiblyUnsetOption::unset) {
tune_internals = (tools_init_arguments.tune_internals ==
Kokkos::Tools::InitArguments::PossiblyUnsetOption::on)
? true
: false;
}
if (tools_init_arguments.help !=
Kokkos::Tools::InitArguments::PossiblyUnsetOption::unset) {
tool_help = (tools_init_arguments.help ==
Kokkos::Tools::InitArguments::PossiblyUnsetOption::on)
? true
: false;
}
if (tools_init_arguments.lib !=
Kokkos::Tools::InitArguments::unset_string_option) {
tool_lib = tools_init_arguments.lib;
}
if (tools_init_arguments.args !=
Kokkos::Tools::InitArguments::unset_string_option) {
tool_args = tools_init_arguments.args;
}
auto env_num_threads_str = std::getenv("KOKKOS_NUM_THREADS");
if (env_num_threads_str != nullptr) {
errno = 0;
auto env_num_threads = std::strtol(env_num_threads_str, &endptr, 10);
if (endptr == env_num_threads_str)
Impl::throw_runtime_exception(
"Error: cannot convert KOKKOS_NUM_THREADS to an integer. Raised by "
"Kokkos::initialize(int narg, char* argc[]).");
if (errno == ERANGE)
Impl::throw_runtime_exception(
"Error: KOKKOS_NUM_THREADS out of range of representable values by "
"an integer. Raised by Kokkos::initialize(int narg, char* argc[]).");
if ((num_threads != -1) && (env_num_threads != num_threads))
Impl::throw_runtime_exception(
"Error: expecting a match between --kokkos-threads and "
"KOKKOS_NUM_THREADS if both are set. Raised by "
"Kokkos::initialize(int narg, char* argc[]).");
else
num_threads = env_num_threads;
}
auto env_numa_str = std::getenv("KOKKOS_NUMA");
if (env_numa_str != nullptr) {
errno = 0;
auto env_numa = std::strtol(env_numa_str, &endptr, 10);
if (endptr == env_numa_str)
Impl::throw_runtime_exception(
"Error: cannot convert KOKKOS_NUMA to an integer. Raised by "
"Kokkos::initialize(int narg, char* argc[]).");
if (errno == ERANGE)
Impl::throw_runtime_exception(
"Error: KOKKOS_NUMA out of range of representable values by an "
"integer. Raised by Kokkos::initialize(int narg, char* argc[]).");
if ((numa != -1) && (env_numa != numa))
Impl::throw_runtime_exception(
"Error: expecting a match between --kokkos-numa and KOKKOS_NUMA if "
"both are set. Raised by Kokkos::initialize(int narg, char* "
"argc[]).");
else
numa = env_numa;
}
auto env_device_str = std::getenv("KOKKOS_DEVICE_ID");
if (env_device_str != nullptr) {
errno = 0;
auto env_device = std::strtol(env_device_str, &endptr, 10);
if (endptr == env_device_str)
Impl::throw_runtime_exception(
"Error: cannot convert KOKKOS_DEVICE_ID to an integer. Raised by "
"Kokkos::initialize(int narg, char* argc[]).");
if (errno == ERANGE)
Impl::throw_runtime_exception(
"Error: KOKKOS_DEVICE_ID out of range of representable values by an "
"integer. Raised by Kokkos::initialize(int narg, char* argc[]).");
if ((device != -1) && (env_device != device))
Impl::throw_runtime_exception(
"Error: expecting a match between --kokkos-device and "
"KOKKOS_DEVICE_ID if both are set. Raised by Kokkos::initialize(int "
"narg, char* argc[]).");
else
device = env_device;
}
auto env_rdevices_str = std::getenv("KOKKOS_RAND_DEVICES");
auto env_ndevices_str = std::getenv("KOKKOS_NUM_DEVICES");
if (env_ndevices_str != nullptr || env_rdevices_str != nullptr) {
errno = 0;
if (env_ndevices_str != nullptr && env_rdevices_str != nullptr) {
Impl::throw_runtime_exception(
"Error: cannot specify both KOKKOS_NUM_DEVICES and "
"KOKKOS_RAND_DEVICES. "
"Raised by Kokkos::initialize(int narg, char* argc[]).");
}
int rdevices = -1;
if (env_ndevices_str != nullptr) {
auto env_ndevices = std::strtol(env_ndevices_str, &endptr, 10);
if (endptr == env_ndevices_str)
Impl::throw_runtime_exception(
"Error: cannot convert KOKKOS_NUM_DEVICES to an integer. Raised by "
"Kokkos::initialize(int narg, char* argc[]).");
if (errno == ERANGE)
Impl::throw_runtime_exception(
"Error: KOKKOS_NUM_DEVICES out of range of representable values by "
"an integer. Raised by Kokkos::initialize(int narg, char* "
"argc[]).");
if ((ndevices != -1) && (env_ndevices != ndevices))
Impl::throw_runtime_exception(
"Error: expecting a match between --kokkos-ndevices and "
"KOKKOS_NUM_DEVICES if both are set. Raised by "
"Kokkos::initialize(int narg, char* argc[]).");
else
ndevices = env_ndevices;
} else { // you set KOKKOS_RAND_DEVICES
auto env_rdevices = std::strtol(env_rdevices_str, &endptr, 10);
if (endptr == env_ndevices_str)
Impl::throw_runtime_exception(
"Error: cannot convert KOKKOS_RAND_DEVICES to an integer. Raised "
"by Kokkos::initialize(int narg, char* argc[]).");
if (errno == ERANGE)
Impl::throw_runtime_exception(
"Error: KOKKOS_RAND_DEVICES out of range of representable values "
"by an integer. Raised by Kokkos::initialize(int narg, char* "
"argc[]).");
else
rdevices = env_rdevices;
}
// Skip device
auto env_skip_device_str = std::getenv("KOKKOS_SKIP_DEVICE");
if (env_skip_device_str != nullptr) {
errno = 0;
auto env_skip_device = std::strtol(env_skip_device_str, &endptr, 10);
if (endptr == env_skip_device_str)
Impl::throw_runtime_exception(
"Error: cannot convert KOKKOS_SKIP_DEVICE to an integer. Raised by "
"Kokkos::initialize(int narg, char* argc[]).");
if (errno == ERANGE)
Impl::throw_runtime_exception(
"Error: KOKKOS_SKIP_DEVICE out of range of representable values by "
"an integer. Raised by Kokkos::initialize(int narg, char* "
"argc[]).");
if ((skip_device != 9999) && (env_skip_device != skip_device))
Impl::throw_runtime_exception(
"Error: expecting a match between --kokkos-ndevices and "
"KOKKOS_SKIP_DEVICE if both are set. Raised by "
"Kokkos::initialize(int narg, char* argc[]).");
else
skip_device = env_skip_device;
}
if (rdevices > 0) {
if (skip_device > 0 && rdevices == 1)
Impl::throw_runtime_exception(
"Error: cannot KOKKOS_SKIP_DEVICE the only KOKKOS_RAND_DEVICE. "
"Raised by Kokkos::initialize(int narg, char* argc[]).");
std::srand(get_process_id());
while (device < 0) {
int test_device = std::rand() % rdevices;
if (test_device != skip_device) device = test_device;
}
}
}
char* env_disablewarnings_str = std::getenv("KOKKOS_DISABLE_WARNINGS");
if (env_disablewarnings_str != nullptr) {
std::string env_str(env_disablewarnings_str); // deep-copies string
for (char& c : env_str) {
c = toupper(c);
}
const auto _rc = std::regex_constants::icase | std::regex_constants::egrep;
const auto _re = std::regex("^(true|on|yes|[1-9])$", _rc);
if (std::regex_match(env_str, _re))
disable_warnings = true;
else if (disable_warnings)
Impl::throw_runtime_exception(
"Error: expecting a match between --kokkos-disable-warnings and "
"KOKKOS_DISABLE_WARNINGS if both are set. Raised by "
"Kokkos::initialize(int narg, char* argc[]).");
}
}
} // namespace
} // namespace Impl
} // namespace Kokkos
//----------------------------------------------------------------------------
namespace Kokkos {
void initialize(int& narg, char* arg[]) {
InitArguments arguments;
Impl::parse_command_line_arguments(narg, arg, arguments);
Impl::parse_environment_variables(arguments);
Impl::initialize_internal(arguments);
}
void initialize(InitArguments arguments) {
Impl::parse_environment_variables(arguments);
Impl::initialize_internal(arguments);
}
namespace Impl {
void pre_initialize(const InitArguments& args) {
pre_initialize_internal(args);
}
void post_initialize(const InitArguments& args) {
post_initialize_internal(args);
}
} // namespace Impl
void push_finalize_hook(std::function<void()> f) { finalize_hooks.push(f); }
void finalize() { Impl::finalize_internal(); }
void finalize_all() {
enum : bool { all_spaces = true };
Impl::finalize_internal(all_spaces);
}
void fence() { Impl::fence_internal("Kokkos::fence: Unnamed Global Fence"); }
void fence(const std::string& name) { Impl::fence_internal(name); }
void print_helper(std::ostringstream& out,
const std::map<std::string, std::string>& print_me) {
for (const auto& kv : print_me) {
out << kv.first << ": " << kv.second << '\n';
}
}
void print_configuration(std::ostream& out, const bool detail) {
std::ostringstream msg;
print_helper(msg, Kokkos::Impl::metadata_map["version_info"]);
msg << "Compiler:" << std::endl;
print_helper(msg, Kokkos::Impl::metadata_map["compiler_version"]);
msg << "Architecture:" << std::endl;
print_helper(msg, Kokkos::Impl::metadata_map["architecture"]);
msg << "Atomics:" << std::endl;
print_helper(msg, Kokkos::Impl::metadata_map["atomics"]);
msg << "Vectorization:" << std::endl;
print_helper(msg, Kokkos::Impl::metadata_map["vectorization"]);
msg << "Memory:" << std::endl;
print_helper(msg, Kokkos::Impl::metadata_map["memory"]);
msg << "Options:" << std::endl;
print_helper(msg, Kokkos::Impl::metadata_map["options"]);
Impl::ExecSpaceManager::get_instance().print_configuration(msg, detail);
out << msg.str() << std::endl;
}
bool is_initialized() noexcept { return g_is_initialized; }
bool show_warnings() noexcept { return g_show_warnings; }
bool tune_internals() noexcept { return g_tune_internals; }
#ifdef KOKKOS_COMPILER_PGI
namespace Impl {
// Bizzarely, an extra jump instruction forces the PGI compiler to not have a
// bug related to (probably?) empty base optimization and/or aggregate
// construction.
void _kokkos_pgi_compiler_bug_workaround() {}
} // end namespace Impl
#endif
} // namespace Kokkos
| 1 | 32,464 | maybe extra paren around `strncmp(...) != 0` | kokkos-kokkos | cpp |
@@ -510,6 +510,8 @@ public class Windows implements TrayListener, TopBarWidget.Delegate, TitleBarWid
TelemetryWrapper.resetOpenedWindowsCount(mRegularWindows.size(), false);
TelemetryWrapper.resetOpenedWindowsCount(mPrivateWindows.size(), true);
+ GleanMetricsService.resetOpenedWindowsCount(mRegularWindows.size(), false);
+ GleanMetricsService.resetOpenedWindowsCount(mPrivateWindows.size(), true);
}
public boolean isPaused() { | 1 | package org.mozilla.vrbrowser.ui.widgets;
import android.content.Context;
import android.util.Log;
import androidx.annotation.IntDef;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.google.gson.reflect.TypeToken;
import org.mozilla.geckoview.GeckoSession;
import org.mozilla.vrbrowser.R;
import org.mozilla.vrbrowser.VRBrowserApplication;
import org.mozilla.vrbrowser.browser.Accounts;
import org.mozilla.vrbrowser.browser.Media;
import org.mozilla.vrbrowser.browser.Services;
import org.mozilla.vrbrowser.browser.SettingsStore;
import org.mozilla.vrbrowser.browser.engine.Session;
import org.mozilla.vrbrowser.browser.engine.SessionState;
import org.mozilla.vrbrowser.browser.engine.SessionStore;
import org.mozilla.vrbrowser.telemetry.GleanMetricsService;
import org.mozilla.vrbrowser.telemetry.TelemetryWrapper;
import org.mozilla.vrbrowser.ui.widgets.dialogs.PromptDialogWidget;
import org.mozilla.vrbrowser.ui.widgets.dialogs.UIDialog;
import org.mozilla.vrbrowser.utils.BitmapCache;
import org.mozilla.vrbrowser.utils.ConnectivityReceiver;
import org.mozilla.vrbrowser.utils.SystemUtils;
import org.mozilla.vrbrowser.utils.UrlUtils;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Reader;
import java.io.Writer;
import java.lang.reflect.Type;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import mozilla.components.concept.sync.AccountObserver;
import mozilla.components.concept.sync.AuthType;
import mozilla.components.concept.sync.OAuthAccount;
import mozilla.components.concept.sync.Profile;
import mozilla.components.concept.sync.TabData;
import static org.mozilla.vrbrowser.ui.widgets.settings.SettingsView.SettingViewType.FXA;
public class Windows implements TrayListener, TopBarWidget.Delegate, TitleBarWidget.Delegate,
WindowWidget.WindowListener, TabsWidget.TabDelegate, Services.TabReceivedDelegate {
private static final String LOGTAG = SystemUtils.createLogtag(Windows.class);
@IntDef(value = { OPEN_IN_FOREGROUND, OPEN_IN_BACKGROUND, OPEN_IN_NEW_WINDOW})
public @interface NewTabLocation {}
public static final int OPEN_IN_FOREGROUND = 0;
public static final int OPEN_IN_BACKGROUND = 1;
public static final int OPEN_IN_NEW_WINDOW = 2;
private static final String WINDOWS_SAVE_FILENAME = "windows_state.json";
private static final int TAB_ADDED_NOTIFICATION_ID = 0;
private static final int TAB_SENT_NOTIFICATION_ID = 1;
private static final int BOOKMARK_ADDED_NOTIFICATION_ID = 2;
// Restore URLs blacklist
private static final List<String> SAVE_BLACKLIST = Stream.of(
"https://accounts.firefox.com/oauth/"
).collect(Collectors.toList());
class WindowState {
WindowPlacement placement;
int textureWidth;
int textureHeight;
float worldWidth;
int tabIndex = -1;
PanelType panelType;
public void load(WindowWidget aWindow, WindowsState aState, int aTabIndex) {
WidgetPlacement widgetPlacement;
if (aWindow.isFullScreen()) {
widgetPlacement = aWindow.getBeforeFullscreenPlacement();
placement = aWindow.getWindowPlacementBeforeFullscreen();
} else if (aWindow.isResizing()) {
widgetPlacement = aWindow.getBeforeResizePlacement();
placement = aWindow.getWindowPlacement();
} else {
widgetPlacement = aWindow.getPlacement();
placement = aWindow.getWindowPlacement();
}
textureWidth = widgetPlacement.width;
textureHeight = widgetPlacement.height;
worldWidth = widgetPlacement.worldWidth;
tabIndex = aTabIndex;
if (aWindow.isBookmarksVisible()) {
panelType = PanelType.BOOKMARKS;
} else if (aWindow.isHistoryVisible()) {
panelType = PanelType.HISTORY;
} else if (aWindow.isDownloadsVisible()) {
panelType = PanelType.DOWNLOADS;
} else {
panelType = PanelType.NONE;
}
}
}
class WindowsState {
WindowPlacement focusedWindowPlacement = WindowPlacement.FRONT;
ArrayList<WindowState> regularWindowsState = new ArrayList<>();
ArrayList<SessionState> tabs = new ArrayList<>();
boolean privateMode = false;
}
private Context mContext;
private WidgetManagerDelegate mWidgetManager;
private Delegate mDelegate;
private ArrayList<WindowWidget> mRegularWindows;
private ArrayList<WindowWidget> mPrivateWindows;
private WindowWidget mFocusedWindow;
private static int sIndex;
private boolean mPrivateMode = false;
public static final int MAX_WINDOWS = 3;
private WindowWidget mFullscreenWindow;
private WindowPlacement mRegularWindowPlacement;
private WindowPlacement mPrivateWindowPlacement;
private boolean mStoredCurvedMode = false;
private boolean mForcedCurvedMode = false;
private boolean mIsPaused = false;
private TabsWidget mTabsWidget;
private Accounts mAccounts;
private Services mServices;
private PromptDialogWidget mNoInternetDialog;
private boolean mCompositorPaused = false;
private WindowsState mWindowsState;
private boolean mIsRestoreEnabled;
private boolean mAfterRestore;
private String mAddedTabUri;
private @NewTabLocation int mAddedTabLocation = OPEN_IN_FOREGROUND;
public enum PanelType {
NONE,
BOOKMARKS,
HISTORY,
DOWNLOADS
}
public enum WindowPlacement{
FRONT(0),
LEFT(1),
RIGHT(2);
private final int value;
WindowPlacement(final int aValue) {
value = aValue;
}
public int getValue() { return value; }
}
public interface Delegate {
void onFocusedWindowChanged(@NonNull WindowWidget aFocusedWindow, @Nullable WindowWidget aPrevFocusedWindow);
void onWindowBorderChanged(@NonNull WindowWidget aChangeWindow);
void onWindowsMoved();
void onWindowClosed();
void onWindowVideoAvailabilityChanged(@NonNull WindowWidget aWindow);
}
public Windows(Context aContext) {
mContext = aContext;
mWidgetManager = (WidgetManagerDelegate) aContext;
mRegularWindows = new ArrayList<>();
mPrivateWindows = new ArrayList<>();
mRegularWindowPlacement = WindowPlacement.FRONT;
mPrivateWindowPlacement = WindowPlacement.FRONT;
mStoredCurvedMode = SettingsStore.getInstance(mContext).getCylinderDensity() > 0.0f;
mAccounts = ((VRBrowserApplication)mContext.getApplicationContext()).getAccounts();
mAccounts.addAccountListener(mAccountObserver);
mServices = ((VRBrowserApplication)mContext.getApplicationContext()).getServices();
mServices.setTabReceivedDelegate(this);
mWidgetManager.addConnectivityListener(mConnectivityDelegate);
mIsRestoreEnabled = SettingsStore.getInstance(mContext).isRestoreTabsEnabled();
mWindowsState = restoreState();
restoreWindows();
}
public void saveState() {
File file = new File(mContext.getFilesDir(), WINDOWS_SAVE_FILENAME);
try (Writer writer = new FileWriter(file)) {
WindowsState state = new WindowsState();
state.privateMode = mPrivateMode;
state.focusedWindowPlacement = mFocusedWindow.isFullScreen() ? mFocusedWindow.getWindowPlacementBeforeFullscreen() : mFocusedWindow.getWindowPlacement();
ArrayList<Session> sessions = SessionStore.get().getSortedSessions(false);
state.tabs = sessions.stream()
.map(Session::getSessionState)
.filter(sessionState -> SAVE_BLACKLIST.stream().noneMatch(uri ->
sessionState.mUri != null && sessionState.mUri.startsWith(uri)
))
.collect(Collectors.toCollection(ArrayList::new));
for (WindowWidget window : mRegularWindows) {
if (window.getSession() != null) {
WindowState windowState = new WindowState();
windowState.load(window, state, state.tabs.indexOf(window.getSession().getSessionState()));
state.regularWindowsState.add(windowState);
}
}
Gson gson = new GsonBuilder().setPrettyPrinting().create();
gson.toJson(state, writer);
Log.d(LOGTAG, "Windows state saved");
} catch (IOException e) {
Log.e(LOGTAG, "Error saving windows state: " + e.getLocalizedMessage());
file.delete();
}
}
private WindowsState restoreState() {
WindowsState restored = null;
File file = new File(mContext.getFilesDir(), WINDOWS_SAVE_FILENAME);
try (Reader reader = new FileReader(file)) {
Gson gson = new GsonBuilder().create();
Type type = new TypeToken<WindowsState>() {}.getType();
restored = gson.fromJson(reader, type);
Log.d(LOGTAG, "Windows state restored");
} catch (Exception e) {
Log.w(LOGTAG, "Error restoring windows state: " + e.getLocalizedMessage());
} finally {
file.delete();
}
return restored;
}
public void setDelegate(Delegate aDelegate) {
mDelegate = aDelegate;
}
public WindowWidget getFocusedWindow() {
if (mFullscreenWindow != null) {
return mFullscreenWindow;
}
return mFocusedWindow;
}
@Nullable
public WindowWidget addWindow() {
if (getCurrentWindows().size() >= MAX_WINDOWS) {
return null;
}
if (mFullscreenWindow != null) {
mFullscreenWindow.getSession().exitFullScreen();
onFullScreen(mFullscreenWindow, false);
}
WindowWidget frontWindow = getFrontWindow();
WindowWidget leftWindow = getLeftWindow();
WindowWidget rightWindow = getRightWindow();
WindowWidget newWindow = createWindow(null);
WindowWidget focusedWindow = getFocusedWindow();
if (frontWindow == null) {
// First window
placeWindow(newWindow, WindowPlacement.FRONT);
} else if (leftWindow == null && rightWindow == null) {
// Opening a new window from one window
placeWindow(newWindow, WindowPlacement.FRONT);
placeWindow(frontWindow, WindowPlacement.LEFT);
} else if (leftWindow != null && focusedWindow == leftWindow) {
// Opening a new window from left window
placeWindow(newWindow, WindowPlacement.FRONT);
placeWindow(frontWindow, WindowPlacement.RIGHT);
} else if (leftWindow != null && focusedWindow == frontWindow) {
// Opening a new window from front window
placeWindow(newWindow, WindowPlacement.FRONT);
placeWindow(frontWindow, WindowPlacement.RIGHT);
} else if (rightWindow != null && focusedWindow == rightWindow) {
// Opening a new window from right window
placeWindow(newWindow, WindowPlacement.FRONT);
placeWindow(frontWindow, WindowPlacement.LEFT);
} else if (rightWindow != null && focusedWindow == frontWindow) {
// Opening a new window from right window
placeWindow(newWindow, WindowPlacement.FRONT);
placeWindow(frontWindow, WindowPlacement.LEFT);
}
updateMaxWindowScales();
mWidgetManager.addWidget(newWindow);
focusWindow(newWindow);
updateCurvedMode(true);
updateViews();
// We are only interested in general windows opened.
if (!isInPrivateMode()) {
GleanMetricsService.newWindowOpenEvent();
}
return newWindow;
}
private WindowWidget addRestoredWindow(@NonNull WindowState aState, @Nullable Session aSession) {
if (getCurrentWindows().size() >= MAX_WINDOWS) {
return null;
}
if (aSession != null) {
aSession.setActive(true);
}
WindowWidget newWindow = createWindow(aSession);
newWindow.getPlacement().width = aState.textureWidth;
newWindow.getPlacement().height = aState.textureHeight;
newWindow.getPlacement().worldWidth = aState.worldWidth;
newWindow.setRestored(true);
placeWindow(newWindow, aState.placement);
if (newWindow.getSession() != null) {
if (aState.panelType != null) {
switch (aState.panelType) {
case BOOKMARKS:
newWindow.getSession().loadUri(UrlUtils.ABOUT_BOOKMARKS);
break;
case HISTORY:
newWindow.getSession().loadUri(UrlUtils.ABOUT_HISTORY);
break;
case DOWNLOADS:
newWindow.getSession().loadUri(UrlUtils.ABOUT_DOWNLOADS);
break;
}
}
}
updateCurvedMode(true);
mWidgetManager.addWidget(newWindow);
return newWindow;
}
public void closeWindow(@NonNull WindowWidget aWindow) {
WindowWidget frontWindow = getFrontWindow();
WindowWidget leftWindow = getLeftWindow();
WindowWidget rightWindow = getRightWindow();
aWindow.hidePanel(PanelType.BOOKMARKS);
aWindow.hidePanel(PanelType.HISTORY);
aWindow.hidePanel(PanelType.DOWNLOADS);
if (leftWindow == aWindow) {
removeWindow(leftWindow);
if (mFocusedWindow == leftWindow && frontWindow != null) {
focusWindow(frontWindow);
}
} else if (rightWindow == aWindow) {
removeWindow(rightWindow);
if (mFocusedWindow == rightWindow && frontWindow != null) {
focusWindow(frontWindow);
}
} else if (frontWindow == aWindow) {
removeWindow(frontWindow);
if (rightWindow != null) {
placeWindow(rightWindow, WindowPlacement.FRONT);
} else if (leftWindow != null) {
placeWindow(leftWindow, WindowPlacement.FRONT);
}
if (mFocusedWindow == frontWindow && !getCurrentWindows().isEmpty() && getFrontWindow() != null) {
focusWindow(getFrontWindow());
}
}
boolean empty = getCurrentWindows().isEmpty();
if (empty && isInPrivateMode()) {
// Clear private tabs
SessionStore.get().destroyPrivateSessions();
// Exit private mode if the only window is closed.
exitPrivateMode();
} else if (empty) {
// Ensure that there is at least one window.
WindowWidget window = addWindow();
if (window != null) {
window.loadHome();
}
}
updateViews();
if (mDelegate != null) {
mDelegate.onWindowClosed();
}
}
public void moveWindowRight(@NonNull WindowWidget aWindow) {
WindowWidget frontWindow = getFrontWindow();
WindowWidget leftWindow = getLeftWindow();
WindowWidget rightWindow = getRightWindow();
if (aWindow == leftWindow && frontWindow != null) {
placeWindow(leftWindow, WindowPlacement.FRONT);
placeWindow(frontWindow, WindowPlacement.LEFT);
switchTopBars(leftWindow, frontWindow);
} else if (aWindow == frontWindow) {
if (rightWindow != null) {
placeWindow(rightWindow, WindowPlacement.FRONT);
switchTopBars(rightWindow, frontWindow);
} else if (leftWindow != null) {
placeWindow(leftWindow, WindowPlacement.FRONT);
switchTopBars(leftWindow, frontWindow);
}
placeWindow(frontWindow, WindowPlacement.RIGHT);
}
updateViews();
if (mDelegate != null) {
mDelegate.onWindowsMoved();
}
}
public void moveWindowLeft(@NonNull WindowWidget aWindow) {
WindowWidget frontWindow = getFrontWindow();
WindowWidget leftWindow = getLeftWindow();
WindowWidget rightWindow = getRightWindow();
if (aWindow == rightWindow && frontWindow != null) {
placeWindow(rightWindow, WindowPlacement.FRONT);
placeWindow(frontWindow, WindowPlacement.RIGHT);
switchTopBars(rightWindow, frontWindow);
} else if (aWindow == frontWindow) {
if (leftWindow != null) {
placeWindow(leftWindow, WindowPlacement.FRONT);
switchTopBars(leftWindow, frontWindow);
} else if (rightWindow != null) {
placeWindow(rightWindow, WindowPlacement.FRONT);
switchTopBars(rightWindow, frontWindow);
}
placeWindow(frontWindow, WindowPlacement.LEFT);
}
updateViews();
if (mDelegate != null) {
mDelegate.onWindowsMoved();
}
}
public void focusWindow(@Nullable WindowWidget aWindow) {
if (aWindow != mFocusedWindow) {
WindowWidget prev = mFocusedWindow;
mFocusedWindow = aWindow;
if (prev != null && getCurrentWindows().contains(prev)) {
prev.setActiveWindow(false);
}
mFocusedWindow.setActiveWindow(true);
if (mDelegate != null) {
mDelegate.onFocusedWindowChanged(mFocusedWindow, prev);
}
}
}
public void pauseCompositor() {
if (mCompositorPaused) {
return;
}
mCompositorPaused = true;
for (WindowWidget window: mRegularWindows) {
window.pauseCompositor();
}
for (WindowWidget window: mPrivateWindows) {
window.pauseCompositor();
}
}
public void resumeCompositor() {
if (!mCompositorPaused) {
return;
}
mCompositorPaused = false;
for (WindowWidget window: mRegularWindows) {
window.resumeCompositor();
}
for (WindowWidget window: mPrivateWindows) {
window.resumeCompositor();
}
}
public void onPause() {
mIsPaused = true;
saveState();
}
public void onResume() {
mIsPaused = false;
if (mCompositorPaused) {
resumeCompositor();
}
TelemetryWrapper.resetOpenedWindowsCount(mRegularWindows.size(), false);
TelemetryWrapper.resetOpenedWindowsCount(mPrivateWindows.size(), true);
}
public boolean isPaused() {
return mIsPaused;
}
public void onDestroy() {
if (mTabsWidget != null && !mTabsWidget.isReleased()) {
mTabsWidget.releaseWidget();
mTabsWidget = null;
}
mDelegate = null;
for (WindowWidget window: mRegularWindows) {
window.close();
}
for (WindowWidget window: mPrivateWindows) {
window.close();
}
mAccounts.removeAccountListener(mAccountObserver);
mServices.setTabReceivedDelegate(null);
mWidgetManager.removeConnectivityListener(mConnectivityDelegate);
}
public boolean isInPrivateMode() {
return mPrivateMode;
}
public boolean isVideoAvailable() {
for (WindowWidget window: getCurrentWindows()) {
if (window.getSession().isVideoAvailable()) {
return true;
}
}
return false;
}
public void enterImmersiveMode() {
if (!isInPrivateMode()) {
for (WindowWidget window: mRegularWindows) {
if (window != mFocusedWindow) {
window.onPause();
}
}
} else {
for (WindowWidget window: mPrivateWindows) {
if (window != mFocusedWindow) {
window.onPause();
}
}
}
}
public void exitImmersiveMode() {
if (mIsPaused) {
return;
}
if (!isInPrivateMode()) {
for (WindowWidget window: mRegularWindows) {
if (window != mFocusedWindow) {
window.onResume();
}
}
} else {
for (WindowWidget window: mPrivateWindows) {
if (window != mFocusedWindow) {
window.onResume();
}
}
}
}
public void enterPrivateMode() {
if (mPrivateMode) {
return;
}
mPrivateMode = true;
if (mFocusedWindow != null) {
mRegularWindowPlacement = mFocusedWindow.getWindowPlacement();
} else {
mRegularWindowPlacement = WindowPlacement.FRONT;
}
for (WindowWidget window: mRegularWindows) {
setWindowVisible(window, false);
}
updateViews();
updateCurvedMode(true);
for (WindowWidget window: mPrivateWindows) {
setWindowVisible(window, true);
}
if (mPrivateWindows.size() == 0) {
WindowWidget window = addWindow();
if (window != null) {
window.loadHome();
}
} else {
focusWindow(getWindowWithPlacement(mPrivateWindowPlacement));
}
mWidgetManager.pushWorldBrightness(this, WidgetManagerDelegate.DEFAULT_DIM_BRIGHTNESS);
}
public void exitPrivateMode() {
if (!mPrivateMode) {
return;
}
mPrivateMode = false;
if (mFocusedWindow != null) {
mPrivateWindowPlacement = mFocusedWindow.getWindowPlacement();
} else {
mPrivateWindowPlacement = WindowPlacement.FRONT;
}
for (WindowWidget window: mPrivateWindows) {
setWindowVisible(window, false);
}
updateViews();
updateCurvedMode(true);
for (WindowWidget window: mRegularWindows) {
setWindowVisible(window, true);
}
WindowWidget window = getWindowWithPlacement(mRegularWindowPlacement);
if (window != null) {
focusWindow(window);
}
mWidgetManager.popWorldBrightness(this);
}
public boolean handleBack() {
if (mFocusedWindow == null) {
return false;
}
if (mFocusedWindow.getSession().canGoBack()) {
mFocusedWindow.getSession().goBack();
return true;
} else if (isInPrivateMode()) {
exitPrivateMode();
return true;
}
return false;
}
void updateMaxWindowScales() {
float maxScale = 3;
if (mFullscreenWindow == null && getCurrentWindows().size() >= 3) {
maxScale = 1.5f;
} else if (mFullscreenWindow == null && getCurrentWindows().size() == 2) {
maxScale = 2.0f;
}
for (WindowWidget window: getCurrentWindows()) {
window.setMaxWindowScale(maxScale);
}
}
public ArrayList<WindowWidget> getCurrentWindows() {
return mPrivateMode ? mPrivateWindows : mRegularWindows;
}
@Nullable
private WindowWidget getWindowWithPlacement(WindowPlacement aPlacement) {
for (WindowWidget window: getCurrentWindows()) {
if (window.getWindowPlacement() == aPlacement) {
return window;
}
}
return null;
}
@Nullable
private WindowWidget getFrontWindow() {
if (mFullscreenWindow != null) {
return mFullscreenWindow;
}
return getWindowWithPlacement(WindowPlacement.FRONT);
}
@Nullable
private WindowWidget getLeftWindow() {
return getWindowWithPlacement(WindowPlacement.LEFT);
}
@Nullable
private WindowWidget getRightWindow() {
return getWindowWithPlacement(WindowPlacement.RIGHT);
}
private void restoreWindows() {
if (mIsRestoreEnabled && mWindowsState != null) {
for (WindowState windowState : mWindowsState.regularWindowsState) {
addRestoredWindow(windowState, null);
}
WindowWidget windowToFocus = getWindowWithPlacement(mWindowsState.focusedWindowPlacement);
if (windowToFocus == null) {
windowToFocus = getFrontWindow();
if (windowToFocus == null && getCurrentWindows().size() > 0) {
windowToFocus = getCurrentWindows().get(0);
}
}
if (windowToFocus != null) {
focusWindow(windowToFocus);
}
}
if (getCurrentWindows().size() == 0) {
WindowWidget window = addWindow();
focusWindow(window);
}
updateMaxWindowScales();
updateViews();
}
public void restoreSessions() {
if (mIsRestoreEnabled && mWindowsState != null) {
ArrayList<Session> restoredSessions = new ArrayList<>();
if (mWindowsState.tabs != null) {
mWindowsState.tabs.forEach(state -> {
restoredSessions.add(SessionStore.get().createSuspendedSession(state));
GleanMetricsService.Tabs.openedCounter(GleanMetricsService.Tabs.TabSource.PRE_EXISTING);
});
}
for (WindowState windowState : mWindowsState.regularWindowsState) {
WindowWidget targetWindow = getWindowWithPlacement(windowState.placement);
if (targetWindow != null) {
if (windowState.tabIndex >= 0 && windowState.tabIndex < restoredSessions.size()) {
Session defaultSession = targetWindow.getSession();
Session session = restoredSessions.get(windowState.tabIndex);
targetWindow.setSession(session, WindowWidget.DEACTIVATE_CURRENT_SESSION);
session.setActive(true);
// Destroy the default blank session
SessionStore.get().destroySession(defaultSession);
} else {
targetWindow.loadHome();
}
}
}
if (mWindowsState.privateMode) {
enterPrivateMode();
} else {
exitPrivateMode();
}
}
if (mAddedTabUri != null) {
openNewTab(mAddedTabUri, mAddedTabLocation);
mAddedTabUri = null;
}
mAfterRestore = true;
}
private void removeWindow(@NonNull WindowWidget aWindow) {
BitmapCache.getInstance(mContext).removeBitmap(aWindow.getSession().getId());
mWidgetManager.removeWidget(aWindow);
mRegularWindows.remove(aWindow);
mPrivateWindows.remove(aWindow);
aWindow.removeWindowListener(this);
aWindow.close();
updateMaxWindowScales();
updateCurvedMode(true);
if (mPrivateMode) {
TelemetryWrapper.openWindowsEvent(mPrivateWindows.size() + 1, mPrivateWindows.size(), true);
} else {
TelemetryWrapper.openWindowsEvent(mRegularWindows.size() + 1, mRegularWindows.size(), false);
}
}
private void setWindowVisible(@NonNull WindowWidget aWindow, boolean aVisible) {
if (aVisible && (aWindow.getSession() != null) && (aWindow.getSession().getGeckoSession() == null)) {
setFirstPaint(aWindow, aWindow.getSession());
}
aWindow.setVisible(aVisible);
aWindow.getTitleBar().setVisible(aVisible);
aWindow.getTopBar().setVisible(aVisible);
}
private void placeWindow(@NonNull WindowWidget aWindow, WindowPlacement aPosition) {
placeWindow(aWindow, aPosition, mStoredCurvedMode || mForcedCurvedMode);
}
private void placeWindow(@NonNull WindowWidget aWindow, WindowPlacement aPosition, boolean curvedMode) {
WidgetPlacement placement = aWindow.getPlacement();
aWindow.setWindowPlacement(aPosition);
switch (aPosition) {
case FRONT:
placement.anchorX = 0.5f;
placement.anchorY = 0.0f;
placement.rotation = 0;
placement.rotationAxisX = 0;
placement.rotationAxisY = 0;
placement.rotationAxisZ = 0;
placement.translationX = 0.0f;
placement.translationY = WidgetPlacement.unitFromMeters(mContext, R.dimen.window_world_y);
placement.translationZ = WidgetPlacement.unitFromMeters(mContext, R.dimen.window_world_z);
break;
case LEFT:
placement.anchorX = 1.0f;
placement.anchorY = 0.0f;
placement.parentAnchorX = 0.0f;
placement.parentAnchorY = 0.0f;
placement.rotationAxisX = 0;
placement.rotationAxisZ = 0;
if (curvedMode) {
placement.rotationAxisY = 0;
placement.rotation = 0;
} else {
placement.rotationAxisY = 1.0f;
placement.rotation = (float) Math.toRadians(WidgetPlacement.floatDimension(mContext, R.dimen.multi_window_angle));
}
placement.translationX = -WidgetPlacement.dpDimension(mContext, R.dimen.multi_window_padding);
placement.translationY = 0.0f;
placement.translationZ = 0.0f;
break;
case RIGHT:
placement.anchorX = 0.0f;
placement.anchorY = 0.0f;
placement.parentAnchorX = 1.0f;
placement.parentAnchorY = 0.0f;
placement.rotationAxisX = 0;
placement.rotationAxisZ = 0;
if (curvedMode) {
placement.rotationAxisY = 0;
placement.rotation = 0;
} else {
placement.rotationAxisY = 1.0f;
placement.rotation = (float) Math.toRadians(-WidgetPlacement.floatDimension(mContext, R.dimen.multi_window_angle));
}
placement.translationX = WidgetPlacement.dpDimension(mContext, R.dimen.multi_window_padding);
placement.translationY = 0.0f;
placement.translationZ = 0.0f;
}
}
public void updateCurvedMode(boolean force) {
float density = SettingsStore.getInstance(mContext).getCylinderDensity();
boolean storedCurvedMode = density > 0.0f;
boolean forcedCurvedMode = getCurrentWindows().size() > 1;
if (force) {
boolean curved = forcedCurvedMode || storedCurvedMode;
for (WindowWidget window : getCurrentWindows()) {
placeWindow(window, window.getWindowPlacement(), curved);
}
updateViews();
mWidgetManager.setCylinderDensity(curved ? SettingsStore.CYLINDER_DENSITY_ENABLED_DEFAULT : density);
} else if ((storedCurvedMode != mStoredCurvedMode) || (forcedCurvedMode != mForcedCurvedMode)) {
mStoredCurvedMode = storedCurvedMode;
mForcedCurvedMode = forcedCurvedMode;
boolean curved = mStoredCurvedMode || mForcedCurvedMode;
for (WindowWidget window : getCurrentWindows()) {
placeWindow(window, window.getWindowPlacement(), curved);
}
updateViews();
mWidgetManager.setCylinderDensity(curved ? SettingsStore.CYLINDER_DENSITY_ENABLED_DEFAULT : density);
}
}
public int getWindowsCount() {
return getCurrentWindows().size();
}
public boolean canOpenNewWindow() {
return getWindowsCount() < MAX_WINDOWS;
}
private void switchTopBars(WindowWidget w1, WindowWidget w2) {
// Used to fix a minor visual glitch.
// See https://github.com/MozillaReality/FirefoxReality/issues/1722
TopBarWidget bar1 = w1.getTopBar();
TopBarWidget bar2 = w2.getTopBar();
w1.setTopBar(bar2);
w2.setTopBar(bar1);
}
private void updateViews() {
WindowWidget frontWindow = getFrontWindow();
WindowWidget leftWindow = getLeftWindow();
WindowWidget rightWindow = getRightWindow();
// Make sure that left or right window have the correct parent
if (frontWindow != null && leftWindow != null) {
leftWindow.getPlacement().parentHandle = frontWindow.getHandle();
}
if (frontWindow != null && rightWindow != null) {
rightWindow.getPlacement().parentHandle = frontWindow.getHandle();
}
if (frontWindow != null) {
frontWindow.getPlacement().parentHandle = -1;
}
ArrayList<WindowWidget> windows = getCurrentWindows();
for (WindowWidget window: windows) {
window.setIsOnlyWindow(windows.size() == 1);
}
// Sort windows so frontWindow is the first one. Required for proper native matrix updates.
windows.sort((o1, o2) -> o1 == frontWindow ? -1 : 0);
for (WindowWidget window: getCurrentWindows()) {
mWidgetManager.updateWidget(window);
mWidgetManager.updateWidget(window.getTopBar());
mWidgetManager.updateWidget(window.getTitleBar());
}
}
@NonNull
private WindowWidget createWindow(@Nullable Session aSession) {
int newWindowId = sIndex++;
WindowWidget window;
if (aSession != null) {
window = new WindowWidget(mContext, newWindowId, aSession);
} else {
window = new WindowWidget(mContext, newWindowId, mPrivateMode);
}
window.addWindowListener(this);
getCurrentWindows().add(window);
window.getTopBar().setDelegate(this);
window.getTitleBar().setDelegate(this);
if (mPrivateMode) {
TelemetryWrapper.openWindowsEvent(mPrivateWindows.size() - 1, mPrivateWindows.size(), true);
} else {
TelemetryWrapper.openWindowsEvent(mRegularWindows.size() - 1, mRegularWindows.size(), false);
}
mForcedCurvedMode = getCurrentWindows().size() > 1;
return window;
}
public void enterResizeMode() {
if (mFullscreenWindow == null) {
for (WindowWidget window : getCurrentWindows()) {
window.setResizeMode(true);
}
}
}
public void exitResizeMode() {
if (mFullscreenWindow == null) {
for (WindowWidget window : getCurrentWindows()) {
window.setResizeMode(false);
}
}
}
private AccountObserver mAccountObserver = new AccountObserver() {
@Override
public void onLoggedOut() {
}
@Override
public void onAuthenticated(@NonNull OAuthAccount oAuthAccount, @NonNull AuthType authType) {
if (authType == AuthType.Signin.INSTANCE || authType == AuthType.Signup.INSTANCE) {
UIDialog.closeAllDialogs();
Session session = mFocusedWindow.getSession();
addTab(mFocusedWindow, mAccounts.getConnectionSuccessURL());
onTabsClose(new ArrayList<>(Collections.singletonList(session)));
switch (mAccounts.getLoginOrigin()) {
case BOOKMARKS:
mFocusedWindow.getSession().loadUri(UrlUtils.ABOUT_BOOKMARKS);
break;
case HISTORY:
mFocusedWindow.getSession().loadUri(UrlUtils.ABOUT_HISTORY);
break;
case SETTINGS:
mWidgetManager.getTray().showSettingsDialog(FXA);
break;
}
}
}
@Override
public void onProfileUpdated(@NonNull Profile profile) {
}
@Override
public void onAuthenticationProblems() {
}
};
// Tray Listener
@Override
public void onBookmarksClicked() {
mFocusedWindow.switchPanel(PanelType.BOOKMARKS);
}
@Override
public void onPrivateBrowsingClicked() {
if (mPrivateMode) {
exitPrivateMode();
} else {
enterPrivateMode();
}
}
@Override
public void onAddWindowClicked() {
WindowWidget window = addWindow();
if (window != null) {
window.loadHome();
}
}
@Override
public void onHistoryClicked() {
mFocusedWindow.switchPanel(PanelType.HISTORY);
}
@Override
public void onDownloadsClicked() {
mFocusedWindow.switchPanel(PanelType.DOWNLOADS);
}
@Override
public void onTabsClicked() {
if (mTabsWidget == null) {
mTabsWidget = new TabsWidget(mContext);
mTabsWidget.setTabDelegate(this);
}
if (mFocusedWindow != null) {
mTabsWidget.getPlacement().parentHandle = mFocusedWindow.getHandle();
mTabsWidget.attachToWindow(mFocusedWindow);
mTabsWidget.show(UIWidget.KEEP_FOCUS);
// If we're signed-in, poll for any new device events (e.g. received tabs)
// There's no push support right now, so this helps with the perception of speedy tab delivery.
((VRBrowserApplication)mContext.getApplicationContext()).getAccounts().refreshDevicesAsync();
((VRBrowserApplication)mContext.getApplicationContext()).getAccounts().pollForEventsAsync();
}
// Capture active session snapshots when showing the tabs menu
for (WindowWidget window: getCurrentWindows()) {
window.captureImage();
}
}
private void setFirstPaint(@NonNull final WindowWidget aWindow, @NonNull final Session aSession) {
if (aSession.getGeckoSession() == null) {
aWindow.waitForFirstPaint();
} else {
// If the new session has a GeckoSession there won't be a first paint event.
// So trigger the first paint callback in case the window is grayed out
// waiting for the first paint event.
aWindow.onFirstContentfulPaint(aSession.getGeckoSession());
}
}
// TopBarWidget Delegate
@Override
public void onCloseClicked(TopBarWidget aWidget) {
WindowWidget window = aWidget.getAttachedWindow();
if (window != null) {
closeWindow(window);
}
}
@Override
public void onMoveLeftClicked(TopBarWidget aWidget) {
WindowWidget window = aWidget.getAttachedWindow();
if (window != null) {
TelemetryWrapper.windowsMoveEvent();
moveWindowLeft(window);
}
}
@Override
public void onMoveRightClicked(TopBarWidget aWidget) {
WindowWidget window = aWidget.getAttachedWindow();
if (window != null) {
TelemetryWrapper.windowsMoveEvent();
moveWindowRight(window);
}
}
// Title Bar Delegate
@Override
public void onTitleClicked(@NonNull TitleBarWidget titleBar) {
if (titleBar.getAttachedWindow() != null) {
focusWindow(titleBar.getAttachedWindow());
}
}
@Override
public void onMediaPlayClicked(@NonNull TitleBarWidget titleBar) {
for (WindowWidget window : getCurrentWindows()) {
if (window.getTitleBar() == titleBar &&
window.getSession() != null &&
window.getSession().getActiveVideo() != null) {
window.getSession().getActiveVideo().play();
}
}
}
@Override
public void onMediaPauseClicked(@NonNull TitleBarWidget titleBar) {
for (WindowWidget window : getCurrentWindows()) {
if (window.getTitleBar() == titleBar &&
window.getSession() != null &&
window.getSession().getActiveVideo() != null) {
window.getSession().getActiveVideo().pause();
}
}
}
private void setFullScreenSize(WindowWidget aWindow) {
final float minScale = WidgetPlacement.floatDimension(mContext, R.dimen.window_fullscreen_min_scale);
// Set browser fullscreen size
float aspect = SettingsStore.getInstance(mContext).getWindowAspect();
Session session = mFocusedWindow.getSession();
if (session == null) {
return;
}
Media media = session.getFullScreenVideo();
if (media != null && media.getWidth() > 0 && media.getHeight() > 0) {
aspect = (float)media.getWidth() / (float)media.getHeight();
}
float scale = aWindow.getCurrentScale();
// Enforce min fullscreen size.
// If current window area is larger only resize if the aspect changes (e.g. media).
if (scale < minScale || aspect != aWindow.getCurrentAspect()) {
aWindow.resizeByMultiplier(aspect, Math.max(scale, minScale));
}
}
@Nullable
private WindowWidget getWindowWithSession(GeckoSession aSession) {
for (WindowWidget window: getCurrentWindows()) {
if (window.getSession().getGeckoSession() == aSession) {
return window;
}
}
return null;
}
@Nullable
private WindowWidget getWindowWithSession(Session aSession) {
for (WindowWidget window: getCurrentWindows()) {
if (window.getSession() == aSession) {
return window;
}
}
return null;
}
// WindowWidget.Delegate
@Override
public void onFocusRequest(@NonNull WindowWidget aWindow) {
focusWindow(aWindow);
}
@Override
public void onBorderChanged(@NonNull WindowWidget aWindow) {
if (mDelegate != null) {
mDelegate.onWindowBorderChanged(aWindow);
}
}
@Override
public void onVideoAvailabilityChanged(@NonNull WindowWidget aWindow) {
if (mDelegate != null) {
mDelegate.onWindowVideoAvailabilityChanged(aWindow);
}
}
@Override
public void onFullScreen(@NonNull WindowWidget aWindow, boolean aFullScreen) {
if (aFullScreen) {
mFullscreenWindow = aWindow;
aWindow.saveBeforeFullscreenPlacement();
// Do not depend on how many windows are opened to select flat/curved when entering fullscreen.
boolean fullscreenCurved = SettingsStore.getInstance(mContext).isCurvedModeEnabled() && (mStoredCurvedMode || mForcedCurvedMode);
aWindow.getPlacement().cylinder = fullscreenCurved;
setFullScreenSize(aWindow);
placeWindow(aWindow, WindowPlacement.FRONT, fullscreenCurved);
focusWindow(aWindow);
for (WindowWidget win: getCurrentWindows()) {
setWindowVisible(win, win == mFullscreenWindow);
}
updateMaxWindowScales();
updateViews();
} else if (mFullscreenWindow != null) {
aWindow.restoreBeforeFullscreenPlacement();
mFullscreenWindow = null;
for (WindowWidget win : getCurrentWindows()) {
setWindowVisible(win, true);
}
updateMaxWindowScales();
updateViews();
}
}
@Override
public void onTabSelect(Session aTab) {
if (mFocusedWindow.getSession() != aTab) {
GleanMetricsService.Tabs.activatedEvent();
}
WindowWidget targetWindow = mFocusedWindow;
WindowWidget windowToMove = getWindowWithSession(aTab);
if (windowToMove != null && windowToMove != targetWindow) {
// Move session between windows
Session moveFrom = windowToMove.getSession();
Session moveTo = targetWindow.getSession();
moveFrom.surfaceDestroyed();
moveTo.surfaceDestroyed();
windowToMove.setSession(moveTo, WindowWidget.SESSION_DO_NOT_RELEASE_DISPLAY, WindowWidget.LEAVE_CURRENT_SESSION_ACTIVE);
targetWindow.setSession(moveFrom, WindowWidget.SESSION_DO_NOT_RELEASE_DISPLAY, WindowWidget.LEAVE_CURRENT_SESSION_ACTIVE);
windowToMove.setActiveWindow(false);
targetWindow.setActiveWindow(true);
} else {
setFirstPaint(targetWindow, aTab);
targetWindow.setSession(aTab, WindowWidget.DEACTIVATE_CURRENT_SESSION);
}
}
public void addTab(WindowWidget targetWindow) {
addTab(targetWindow, null);
}
public void openNewTabAfterRestore(@NonNull String aUri, @NewTabLocation int aLocation) {
if (mAfterRestore) {
openNewTab(aUri, aLocation);
} else {
mAddedTabUri = aUri;
mAddedTabLocation = aLocation;
}
}
private void openNewTab(@NonNull String aUri, @NewTabLocation int aLocation) {
if (aLocation == OPEN_IN_NEW_WINDOW) {
WindowWidget newWindow = addWindow();
if ((newWindow != null) && (newWindow.getSession() != null)) {
newWindow.getSession().loadUri(aUri);
}
} else if (mFocusedWindow != null) {
if (aLocation == OPEN_IN_FOREGROUND) {
addTab(mFocusedWindow, aUri);
} else if (aLocation == OPEN_IN_BACKGROUND) {
addBackgroundTab(mFocusedWindow, aUri);
}
}
}
public void addTab(@NonNull WindowWidget targetWindow, @Nullable String aUri) {
Session session = SessionStore.get().createSuspendedSession(aUri, targetWindow.getSession().isPrivateMode());
setFirstPaint(targetWindow, session);
targetWindow.setSession(session, WindowWidget.DEACTIVATE_CURRENT_SESSION);
if (aUri == null || aUri.isEmpty()) {
session.loadHomePage();
} else {
session.loadUri(aUri);
}
}
public void addBackgroundTab(WindowWidget targetWindow, String aUri) {
Session session = SessionStore.get().createSuspendedSession(aUri, targetWindow.getSession().isPrivateMode());
session.updateLastUse();
mFocusedWindow.getSession().updateLastUse();
showTabAddedNotification();
}
@Override
public void onTabAdd() {
addTab(mFocusedWindow, null);
GleanMetricsService.Tabs.openedCounter(GleanMetricsService.Tabs.TabSource.TABS_DIALOG);
}
@Override
public void onTabsClose(ArrayList<Session> aTabs) {
WindowWidget targetWindow = mFocusedWindow;
// Prepare available tabs to choose from
ArrayList<Session> available = SessionStore.get().getSortedSessions(mPrivateMode);
available.removeAll(aTabs);
available.removeIf(session -> getWindowWithSession(session) != null);
// Sort windows by priority to take an available tab
WindowWidget front = getFrontWindow();
ArrayList<WindowWidget> windows = new ArrayList<>(getCurrentWindows());
windows.sort((w1, w2) -> {
// Max priority for the target window
if (w1 == targetWindow) {
return -1;
}
if (w2 == targetWindow) {
return 1;
}
// Front window has next max priority
if (w1 == front) {
return -1;
}
if (w2 == front) {
return 1;
}
return 0;
});
// Take tabs for each window
for (WindowWidget window: windows) {
if (!aTabs.contains(window.getSession())) {
// Window already contains a no closed tab
continue;
}
if (available.size() > 0) {
// Window contains a closed tab and we have a tab available from the list
Session tab = available.get(0);
if (tab != null) {
setFirstPaint(window, tab);
window.setSession(tab, WindowWidget.LEAVE_CURRENT_SESSION_ACTIVE);
}
available.remove(0);
} else {
// We don't have more tabs available for the front window, load home.
addTab(window, null);
}
}
BitmapCache cache = BitmapCache.getInstance(mContext);
for (Session session: aTabs) {
cache.removeBitmap(session.getId());
SessionStore.get().destroySession(session);
}
}
@Override
public void onTabsReceived(@NonNull List<TabData> aTabs) {
WindowWidget targetWindow = mFocusedWindow;
boolean fullscreen = targetWindow.getSession().isInFullScreen();
for (int i = aTabs.size() - 1; i >= 0; --i) {
Session session = SessionStore.get().createSession(targetWindow.getSession().isPrivateMode());
// Cache the provided data to avoid delays if the tabs are loaded at the same time the
// tabs panel is shown.
session.getSessionState().mTitle = aTabs.get(i).getTitle();
session.getSessionState().mUri = aTabs.get(i).getUrl();
session.loadUri(aTabs.get(i).getUrl());
session.updateLastUse();
GleanMetricsService.Tabs.openedCounter(GleanMetricsService.Tabs.TabSource.RECEIVED);
if (i == 0 && !fullscreen) {
// Set the first received tab of the list the current one.
targetWindow.setSession(session, WindowWidget.DEACTIVATE_CURRENT_SESSION);
}
}
if (!fullscreen) {
showTabAddedNotification();
}
if (mTabsWidget != null && mTabsWidget.isVisible()) {
mTabsWidget.refreshTabs();
}
}
private ConnectivityReceiver.Delegate mConnectivityDelegate = connected -> {
if (mNoInternetDialog == null) {
mNoInternetDialog = new PromptDialogWidget(mContext);
mNoInternetDialog.setButtons(new int[] {
R.string.ok_button
});
mNoInternetDialog.setCheckboxVisible(false);
mNoInternetDialog.setDescriptionVisible(false);
mNoInternetDialog.setTitle(R.string.no_internet_title);
mNoInternetDialog.setBody(R.string.no_internet_message);
mNoInternetDialog.setButtonsDelegate((index, isChecked) -> {
mNoInternetDialog.hide(UIWidget.REMOVE_WIDGET);
mNoInternetDialog.releaseWidget();
mNoInternetDialog = null;
});
}
if (!connected && !mNoInternetDialog.isVisible()) {
mNoInternetDialog.show(UIWidget.REQUEST_FOCUS);
} else if (connected && mNoInternetDialog.isVisible()) {
mNoInternetDialog.hide(UIWidget.REMOVE_WIDGET);
mNoInternetDialog.releaseWidget();
mNoInternetDialog = null;
}
};
public void showTabAddedNotification() {
if (mFocusedWindow.isFullScreen()) {
mWidgetManager.getNavigationBar().showTabAddedNotification();
} else {
if (mWidgetManager.getTray().isVisible()) {
mWidgetManager.getTray().showTabAddedNotification();
} else {
NotificationManager.Notification notification = new NotificationManager.Builder(mFocusedWindow)
.withString(R.string.tab_added_notification)
.withZTranslation(25.0f)
.withCurved(true).build();
NotificationManager.show(TAB_ADDED_NOTIFICATION_ID, notification);
}
}
}
public void showTabSentNotification() {
if (mFocusedWindow.isFullScreen()) {
mWidgetManager.getNavigationBar().showTabSentNotification();
} else {
if (mWidgetManager.getTray().isVisible()) {
mWidgetManager.getTray().showTabSentNotification();
} else {
NotificationManager.Notification notification = new NotificationManager.Builder(mFocusedWindow)
.withString(R.string.tab_sent_notification)
.withZTranslation(25.0f)
.withCurved(true).build();
NotificationManager.show(TAB_SENT_NOTIFICATION_ID, notification);
}
}
}
public void showBookmarkAddedNotification() {
if (mFocusedWindow.isFullScreen()) {
mWidgetManager.getNavigationBar().showBookmarkAddedNotification();
} else {
if (mWidgetManager.getTray().isVisible()) {
mWidgetManager.getTray().showBookmarkAddedNotification();
} else {
NotificationManager.Notification notification = new NotificationManager.Builder(mFocusedWindow)
.withString(R.string.bookmarks_saved_notification)
.withZTranslation(25.0f)
.withCurved(true).build();
NotificationManager.show(BOOKMARK_ADDED_NOTIFICATION_ID, notification);
}
}
}
}
| 1 | 9,397 | We should call `Windows.onResume `: - When the app is launched for the first time - When the app is resumed after being paused (home button and resume or device goes to sleep) - After a permission prompt is displayed | MozillaReality-FirefoxReality | java |
@@ -31,7 +31,7 @@ import java.util.function.Function;
import com.google.common.annotations.VisibleForTesting;
-public class BftForksSchedule<C extends BftConfigOptions> {
+public class BftForksSchedule<C> {
private final NavigableSet<ForkSpec<C>> forks =
new TreeSet<>( | 1 | /*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.consensus.common.bft;
import static com.google.common.base.Preconditions.checkArgument;
import org.hyperledger.besu.config.BftConfigOptions;
import org.hyperledger.besu.config.BftFork;
import org.hyperledger.besu.consensus.common.ForkSpec;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.NavigableSet;
import java.util.Set;
import java.util.TreeSet;
import java.util.function.Function;
import com.google.common.annotations.VisibleForTesting;
public class BftForksSchedule<C extends BftConfigOptions> {
private final NavigableSet<ForkSpec<C>> forks =
new TreeSet<>(
Comparator.comparing((Function<ForkSpec<C>, Long>) ForkSpec::getBlock).reversed());
public interface BftSpecCreator<T extends BftConfigOptions, U extends BftFork> {
T create(ForkSpec<T> lastSpec, U fork);
}
@VisibleForTesting
public BftForksSchedule(final ForkSpec<C> genesisFork, final Collection<ForkSpec<C>> forks) {
this.forks.add(genesisFork);
this.forks.addAll(forks);
}
public static <T extends BftConfigOptions, U extends BftFork> BftForksSchedule<T> create(
final T initial, final List<U> forks, final BftSpecCreator<T, U> specCreator) {
checkArgument(
forks.stream().allMatch(f -> f.getForkBlock() > 0),
"Transition cannot be created for genesis block");
checkArgument(
forks.stream().map(BftFork::getForkBlock).distinct().count() == forks.size(),
"Duplicate transitions cannot be created for the same block");
final NavigableSet<ForkSpec<T>> specs = new TreeSet<>(Comparator.comparing(ForkSpec::getBlock));
final ForkSpec<T> initialForkSpec = new ForkSpec<>(0, initial);
specs.add(initialForkSpec);
forks.stream()
.sorted(Comparator.comparing(BftFork::getForkBlock))
.forEachOrdered(
f -> {
final T spec = specCreator.create(specs.last(), f);
specs.add(new ForkSpec<>(f.getForkBlock(), spec));
});
return new BftForksSchedule<>(initialForkSpec, specs.tailSet(initialForkSpec, false));
}
public ForkSpec<C> getFork(final long blockNumber) {
for (final ForkSpec<C> f : forks) {
if (blockNumber >= f.getBlock()) {
return f;
}
}
return forks.first();
}
public Set<ForkSpec<C>> getForks() {
return Collections.unmodifiableSet(forks);
}
}
| 1 | 26,801 | Rename to ForksSchedule? Maybe in separate PR. | hyperledger-besu | java |
@@ -1,11 +1,12 @@
-import pytest
import time
from unittest import mock
+import pytest
+
from pyramid import testing
-from kinto.core.storage import exceptions
from kinto.core.cache import heartbeat
+from kinto.core.storage import exceptions
class CacheTest: | 1 | import pytest
import time
from unittest import mock
from pyramid import testing
from kinto.core.storage import exceptions
from kinto.core.cache import heartbeat
class CacheTest:
backend = None
settings = {}
def setUp(self):
super().setUp()
self.cache = self.backend.load_from_config(self._get_config())
self.cache.initialize_schema()
self.request = None
self.client_error_patcher = None
def _get_config(self, settings=None):
"""Mock Pyramid config object.
"""
if settings is None:
settings = self.settings
config = testing.setUp()
config.add_settings(settings)
return config
def tearDown(self):
mock.patch.stopall()
super().tearDown()
self.cache.flush()
def get_backend_prefix(self, prefix):
settings_prefix = {**self.settings}
settings_prefix["cache_prefix"] = prefix
config_prefix = self._get_config(settings=settings_prefix)
# initiating cache backend with prefix:
backend_prefix = self.backend.load_from_config(config_prefix)
return backend_prefix
def test_backend_error_is_raised_anywhere(self):
self.client_error_patcher.start()
calls = [
(self.cache.flush,),
(self.cache.ttl, ""),
(self.cache.expire, "", 0),
(self.cache.get, ""),
(self.cache.set, "", "", 42),
(self.cache.delete, ""),
]
for call in calls:
self.assertRaises(exceptions.BackendError, *call)
def test_initialize_schema_is_idempotent(self):
self.cache.initialize_schema()
self.cache.initialize_schema() # not raising.
def test_ping_returns_false_if_unavailable(self):
self.client_error_patcher.start()
ping = heartbeat(self.cache)
self.assertFalse(ping(self.request))
with mock.patch("kinto.core.cache.random.SystemRandom.random", return_value=0.6):
self.assertFalse(ping(self.request))
with mock.patch("kinto.core.cache.random.SystemRandom.random", return_value=0.4):
self.assertFalse(ping(self.request))
def test_ping_returns_true_if_available(self):
ping = heartbeat(self.cache)
with mock.patch("kinto.core.cache.random.random", return_value=0.6):
self.assertTrue(ping(self.request))
with mock.patch("kinto.core.cache.random.random", return_value=0.4):
self.assertTrue(ping(self.request))
def test_ping_logs_error_if_unavailable(self):
self.client_error_patcher.start()
ping = heartbeat(self.cache)
with mock.patch("kinto.core.cache.logger.exception") as exc_handler:
self.assertFalse(ping(self.request))
self.assertTrue(exc_handler.called)
def test_set_adds_the_object(self):
stored = "toto"
self.cache.set("foobar", stored, 42)
retrieved = self.cache.get("foobar")
self.assertEqual(retrieved, stored)
def test_values_remains_python_dict(self):
def setget(k, v):
self.cache.set(k, v, 42)
return (self.cache.get(k), v)
self.assertEqual(*setget("foobar", 3))
self.assertEqual(*setget("foobar", ["a"]))
self.assertEqual(*setget("foobar", {"b": [1, 2]}))
self.assertEqual(*setget("foobar", 3.14))
def test_bytes_cannot_be_stored_in_the_cache(self):
with pytest.raises(TypeError):
self.cache.set("test", b"foo", 42)
def test_delete_removes_the_object(self):
self.cache.set("foobar", "toto", 42)
returned = self.cache.delete("foobar")
self.assertEqual(returned, "toto")
missing = self.cache.get("foobar")
self.assertIsNone(missing)
def test_delete_does_not_fail_if_object_is_unknown(self):
returned = self.cache.delete("foobar")
self.assertIsNone(returned)
def test_expire_expires_the_value(self):
self.cache.set("foobar", "toto", 42)
self.cache.expire("foobar", 0.01)
time.sleep(0.02)
retrieved = self.cache.get("foobar")
self.assertIsNone(retrieved)
def test_set_with_ttl_expires_the_value(self):
self.cache.set("foobar", "toto", 0.01)
time.sleep(0.02)
retrieved = self.cache.get("foobar")
self.assertIsNone(retrieved)
def test_ttl_return_the_time_to_live(self):
self.cache.set("foobar", "toto", 42)
self.cache.expire("foobar", 10)
ttl = self.cache.ttl("foobar")
self.assertGreater(ttl, 0)
self.assertLessEqual(ttl, 10)
def test_ttl_return_none_if_unknown(self):
ttl = self.cache.ttl("unknown")
self.assertTrue(ttl < 0)
def test_cache_prefix_is_set(self):
backend_prefix = self.get_backend_prefix(prefix="prefix_")
# Set the value
backend_prefix.set("key", "foo", 42)
# Validate that it was set with the prefix.
obtained = self.cache.get("prefix_key")
self.assertEqual(obtained, "foo")
def test_cache_when_prefix_is_not_set(self):
backend_prefix = self.get_backend_prefix(prefix="")
# Set a value
backend_prefix.set("key", "foo", 42)
# Validate that it was set with no prefix
obtained = self.cache.get("key")
self.assertEqual(obtained, "foo")
def test_prefix_value_use_to_get_data(self):
backend_prefix = self.get_backend_prefix(prefix="prefix_")
# Set the value with the prefix
self.cache.set("prefix_key", "foo", 42)
# Validate that the prefix was added
obtained = backend_prefix.get("key")
self.assertEqual(obtained, "foo")
def test_prefix_value_use_to_delete_data(self):
backend_prefix = self.get_backend_prefix(prefix="prefix_")
# Set the value
self.cache.set("prefix_key", "foo", 42)
# Delete the value
backend_prefix.delete("key")
# Validate that the value was deleted
obtained = self.cache.get("prefix_key")
self.assertEqual(obtained, None)
def test_prefix_value_used_with_ttl(self):
backend_prefix = self.get_backend_prefix(prefix="prefix_")
self.cache.set("prefix_key", "foo", 10)
# Validate that the ttl add the prefix to the key.
obtained = backend_prefix.ttl("key")
self.assertLessEqual(obtained, 10)
self.assertGreater(obtained, 9)
def test_prefix_value_used_with_expire(self):
backend_prefix = self.get_backend_prefix(prefix="prefix_")
self.cache.set("prefix_foobar", "toto", 10)
# expiring the ttl of key
backend_prefix.expire("foobar", 0)
# Make sure the TTL was set accordingly.
ttl = self.cache.ttl("prefix_foobar")
self.assertLessEqual(ttl, 0)
# The object should have expired
retrieved = self.cache.get("prefix_foobar")
self.assertIsNone(retrieved)
| 1 | 12,558 | Why a blank line here? | Kinto-kinto | py |
@@ -14,6 +14,10 @@ class Subscription < ActiveRecord::Base
notifier.send_notifications
end
+ def active?
+ deactivated_on.nil?
+ end
+
private
def self.subscriber_emails | 1 | # This class represents a user's subscription to Learn content
class Subscription < ActiveRecord::Base
belongs_to :user
delegate :stripe_customer, to: :user
def self.deliver_welcome_emails
recent.each do |subscription|
Mailer.welcome_to_prime(subscription.user).deliver
end
end
def self.deliver_byte_notifications
notifier = ByteNotifier.new(subscriber_emails)
notifier.send_notifications
end
private
def self.subscriber_emails
joins(:user).pluck(:email)
end
def self.recent
where('created_at > ?', 24.hours.ago)
end
end
| 1 | 7,315 | I have a sense that this will actually eventually change to check the actual date so that subscriptions can be deactivated in the future. But I mention that here just to keep you in the loop. We should not make that change now, because it's not needed functionality. | thoughtbot-upcase | rb |
@@ -36,6 +36,10 @@ public class EnvironmentVariableDefaultProvider implements IDefaultValueProvider
@Override
public String defaultValue(final ArgSpec argSpec) {
+ if (!argSpec.isOption()) {
+ return null; // skip default for positional params
+ }
+
return envVarNames((OptionSpec) argSpec)
.map(environment::get)
.filter(Objects::nonNull) | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli.util;
import java.util.Arrays;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Stream;
import picocli.CommandLine.IDefaultValueProvider;
import picocli.CommandLine.Model.ArgSpec;
import picocli.CommandLine.Model.OptionSpec;
public class EnvironmentVariableDefaultProvider implements IDefaultValueProvider {
private static final String ENV_VAR_PREFIX = "BESU_";
private static final String LEGACY_ENV_VAR_PREFIX = "PANTHEON_";
private final Map<String, String> environment;
public EnvironmentVariableDefaultProvider(final Map<String, String> environment) {
this.environment = environment;
}
@Override
public String defaultValue(final ArgSpec argSpec) {
return envVarNames((OptionSpec) argSpec)
.map(environment::get)
.filter(Objects::nonNull)
.findFirst()
.orElse(null);
}
private Stream<String> envVarNames(final OptionSpec spec) {
return Arrays.stream(spec.names())
.filter(name -> name.startsWith("--")) // Only long options are allowed
.flatMap(
name ->
Stream.of(ENV_VAR_PREFIX, LEGACY_ENV_VAR_PREFIX)
.map(prefix -> prefix + nameToEnvVarSuffix(name)));
}
private String nameToEnvVarSuffix(final String name) {
return name.substring("--".length()).replace('-', '_').toUpperCase(Locale.US);
}
}
| 1 | 22,055 | What is the reasoning for using the negation of `isOption()` instead of simply using `isPositional()` to identify a positional param? | hyperledger-besu | java |
@@ -1415,7 +1415,13 @@ void ProtocolGame::sendBasicData()
msg.addByte(0);
msg.add<uint32_t>(0);
}
+
msg.addByte(player->getVocation()->getClientId());
+
+ if (version >= 1100) {
+ msg.addByte(player->getVocation()->getId() != VOCATION_NONE ? 0x01 : 0x00); // prey data
+ }
+
msg.add<uint16_t>(0xFF); // number of known spells
for (uint8_t spellId = 0x00; spellId < 0xFF; spellId++) {
msg.addByte(spellId); | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include <boost/range/adaptor/reversed.hpp>
#include "protocolgame.h"
#include "outputmessage.h"
#include "player.h"
#include "configmanager.h"
#include "actions.h"
#include "game.h"
#include "iologindata.h"
#include "iomarket.h"
#include "ban.h"
#include "scheduler.h"
#include <fmt/format.h>
extern ConfigManager g_config;
extern Actions actions;
extern CreatureEvents* g_creatureEvents;
extern Chat* g_chat;
namespace {
using WaitList = std::deque<std::pair<int64_t, uint32_t>>; // (timeout, player guid)
WaitList priorityWaitList, waitList;
std::tuple<WaitList&, WaitList::iterator, WaitList::size_type> findClient(const Player& player) {
const auto fn = [&](const WaitList::value_type& it) { return it.second == player.getGUID(); };
auto it = std::find_if(priorityWaitList.begin(), priorityWaitList.end(), fn);
if (it != priorityWaitList.end()) {
return std::make_tuple(std::ref(priorityWaitList), it, std::distance(it, priorityWaitList.end()) + 1);
}
it = std::find_if(waitList.begin(), waitList.end(), fn);
if (it != waitList.end()) {
return std::make_tuple(std::ref(waitList), it, priorityWaitList.size() + std::distance(it, waitList.end()) + 1);
}
return std::make_tuple(std::ref(waitList), waitList.end(), priorityWaitList.size() + waitList.size());
}
uint8_t getWaitTime(std::size_t slot)
{
if (slot < 5) {
return 5;
} else if (slot < 10) {
return 10;
} else if (slot < 20) {
return 20;
} else if (slot < 50) {
return 60;
} else {
return 120;
}
}
int64_t getTimeout(std::size_t slot)
{
// timeout is set to 15 seconds longer than expected retry attempt
return getWaitTime(slot) + 15;
}
void cleanupList(WaitList& list)
{
int64_t time = OTSYS_TIME();
auto it = list.begin();
while (it != list.end()) {
if (it->first <= time) {
it = list.erase(it);
} else {
++it;
}
}
}
std::size_t clientLogin(const Player& player)
{
// Currentslot = position in wait list, 0 for direct access
if (player.hasFlag(PlayerFlag_CanAlwaysLogin) || player.getAccountType() >= ACCOUNT_TYPE_GAMEMASTER) {
return 0;
}
cleanupList(priorityWaitList);
cleanupList(waitList);
uint32_t maxPlayers = static_cast<uint32_t>(g_config.getNumber(ConfigManager::MAX_PLAYERS));
if (maxPlayers == 0 || (priorityWaitList.empty() && waitList.empty() && g_game.getPlayersOnline() < maxPlayers)) {
return 0;
}
auto result = findClient(player);
if (std::get<1>(result) != std::get<0>(result).end()) {
auto currentSlot = std::get<2>(result);
// If server has capacity for this client, let him in even though his current slot might be higher than 0.
if ((g_game.getPlayersOnline() + currentSlot) <= maxPlayers) {
std::get<0>(result).erase(std::get<1>(result));
return 0;
}
//let them wait a bit longer
std::get<1>(result)->second = OTSYS_TIME() + (getTimeout(currentSlot) * 1000);
return currentSlot;
}
auto currentSlot = priorityWaitList.size();
if (player.isPremium()) {
priorityWaitList.emplace_back(OTSYS_TIME() + (getTimeout(++currentSlot) * 1000), player.getGUID());
} else {
currentSlot += waitList.size();
waitList.emplace_back(OTSYS_TIME() + (getTimeout(++currentSlot) * 1000), player.getGUID());
}
return currentSlot;
}
}
void ProtocolGame::release()
{
//dispatcher thread
if (player && player->client == shared_from_this()) {
player->client.reset();
player->decrementReferenceCounter();
player = nullptr;
}
OutputMessagePool::getInstance().removeProtocolFromAutosend(shared_from_this());
Protocol::release();
}
void ProtocolGame::login(const std::string& name, uint32_t accountId, OperatingSystem_t operatingSystem)
{
//dispatcher thread
Player* foundPlayer = g_game.getPlayerByName(name);
if (!foundPlayer || g_config.getBoolean(ConfigManager::ALLOW_CLONES)) {
player = new Player(getThis());
player->setName(name);
player->incrementReferenceCounter();
player->setID();
if (!IOLoginData::preloadPlayer(player, name)) {
disconnectClient("Your character could not be loaded.");
return;
}
if (IOBan::isPlayerNamelocked(player->getGUID())) {
disconnectClient("Your character has been namelocked.");
return;
}
if (g_game.getGameState() == GAME_STATE_CLOSING && !player->hasFlag(PlayerFlag_CanAlwaysLogin)) {
disconnectClient("The game is just going down.\nPlease try again later.");
return;
}
if (g_game.getGameState() == GAME_STATE_CLOSED && !player->hasFlag(PlayerFlag_CanAlwaysLogin)) {
disconnectClient("Server is currently closed.\nPlease try again later.");
return;
}
if (g_config.getBoolean(ConfigManager::ONE_PLAYER_ON_ACCOUNT) && player->getAccountType() < ACCOUNT_TYPE_GAMEMASTER && g_game.getPlayerByAccount(player->getAccount())) {
disconnectClient("You may only login with one character\nof your account at the same time.");
return;
}
if (!player->hasFlag(PlayerFlag_CannotBeBanned)) {
BanInfo banInfo;
if (IOBan::isAccountBanned(accountId, banInfo)) {
if (banInfo.reason.empty()) {
banInfo.reason = "(none)";
}
if (banInfo.expiresAt > 0) {
disconnectClient(fmt::format("Your account has been banned until {:s} by {:s}.\n\nReason specified:\n{:s}", formatDateShort(banInfo.expiresAt), banInfo.bannedBy, banInfo.reason));
} else {
disconnectClient(fmt::format("Your account has been permanently banned by {:s}.\n\nReason specified:\n{:s}", banInfo.bannedBy, banInfo.reason));
}
return;
}
}
if (std::size_t currentSlot = clientLogin(*player)) {
uint8_t retryTime = getWaitTime(currentSlot);
auto output = OutputMessagePool::getOutputMessage();
output->addByte(0x16);
output->addString(fmt::format("Too many players online.\nYou are at place {:d} on the waiting list.", currentSlot));
output->addByte(retryTime);
send(output);
disconnect();
return;
}
if (!IOLoginData::loadPlayerById(player, player->getGUID())) {
disconnectClient("Your character could not be loaded.");
return;
}
player->setOperatingSystem(operatingSystem);
if (!g_game.placeCreature(player, player->getLoginPosition())) {
if (!g_game.placeCreature(player, player->getTemplePosition(), false, true)) {
disconnectClient("Temple position is wrong. Contact the administrator.");
return;
}
}
if (operatingSystem >= CLIENTOS_OTCLIENT_LINUX) {
player->registerCreatureEvent("ExtendedOpcode");
}
player->lastIP = player->getIP();
player->lastLoginSaved = std::max<time_t>(time(nullptr), player->lastLoginSaved + 1);
acceptPackets = true;
} else {
if (eventConnect != 0 || !g_config.getBoolean(ConfigManager::REPLACE_KICK_ON_LOGIN)) {
//Already trying to connect
disconnectClient("You are already logged in.");
return;
}
if (foundPlayer->client) {
foundPlayer->disconnect();
foundPlayer->isConnecting = true;
eventConnect = g_scheduler.addEvent(createSchedulerTask(1000, std::bind(&ProtocolGame::connect, getThis(), foundPlayer->getID(), operatingSystem)));
} else {
connect(foundPlayer->getID(), operatingSystem);
}
}
OutputMessagePool::getInstance().addProtocolToAutosend(shared_from_this());
}
void ProtocolGame::connect(uint32_t playerId, OperatingSystem_t operatingSystem)
{
eventConnect = 0;
Player* foundPlayer = g_game.getPlayerByID(playerId);
if (!foundPlayer || foundPlayer->client) {
disconnectClient("You are already logged in.");
return;
}
if (isConnectionExpired()) {
//ProtocolGame::release() has been called at this point and the Connection object
//no longer exists, so we return to prevent leakage of the Player.
return;
}
player = foundPlayer;
player->incrementReferenceCounter();
g_chat->removeUserFromAllChannels(*player);
player->clearModalWindows();
player->setOperatingSystem(operatingSystem);
player->isConnecting = false;
player->client = getThis();
sendAddCreature(player, player->getPosition(), 0, false);
player->lastIP = player->getIP();
player->lastLoginSaved = std::max<time_t>(time(nullptr), player->lastLoginSaved + 1);
acceptPackets = true;
}
void ProtocolGame::logout(bool displayEffect, bool forced)
{
//dispatcher thread
if (!player) {
return;
}
if (!player->isRemoved()) {
if (!forced) {
if (!player->isAccessPlayer()) {
if (player->getTile()->hasFlag(TILESTATE_NOLOGOUT)) {
player->sendCancelMessage(RETURNVALUE_YOUCANNOTLOGOUTHERE);
return;
}
if (!player->getTile()->hasFlag(TILESTATE_PROTECTIONZONE) && player->hasCondition(CONDITION_INFIGHT)) {
player->sendCancelMessage(RETURNVALUE_YOUMAYNOTLOGOUTDURINGAFIGHT);
return;
}
}
//scripting event - onLogout
if (!g_creatureEvents->playerLogout(player)) {
//Let the script handle the error message
return;
}
}
if (displayEffect && player->getHealth() > 0 && !player->isInGhostMode()) {
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
}
}
disconnect();
g_game.removeCreature(player);
}
void ProtocolGame::onRecvFirstMessage(NetworkMessage& msg)
{
if (g_game.getGameState() == GAME_STATE_SHUTDOWN) {
disconnect();
return;
}
OperatingSystem_t operatingSystem = static_cast<OperatingSystem_t>(msg.get<uint16_t>());
version = msg.get<uint16_t>();
msg.skipBytes(7); // U32 client version, U8 client type, U16 dat revision
if (!Protocol::RSA_decrypt(msg)) {
disconnect();
return;
}
xtea::key key;
key[0] = msg.get<uint32_t>();
key[1] = msg.get<uint32_t>();
key[2] = msg.get<uint32_t>();
key[3] = msg.get<uint32_t>();
enableXTEAEncryption();
setXTEAKey(std::move(key));
if (operatingSystem >= CLIENTOS_OTCLIENT_LINUX) {
NetworkMessage opcodeMessage;
opcodeMessage.addByte(0x32);
opcodeMessage.addByte(0x00);
opcodeMessage.add<uint16_t>(0x00);
writeToOutputBuffer(opcodeMessage);
}
msg.skipBytes(1); // gamemaster flag
std::string sessionKey = msg.getString();
auto sessionArgs = explodeString(sessionKey, "\n", 4);
if (sessionArgs.size() != 4) {
disconnect();
return;
}
std::string& accountName = sessionArgs[0];
std::string& password = sessionArgs[1];
std::string& token = sessionArgs[2];
uint32_t tokenTime = 0;
try {
tokenTime = std::stoul(sessionArgs[3]);
} catch (const std::invalid_argument&) {
disconnectClient("Malformed token packet.");
return;
} catch (const std::out_of_range&) {
disconnectClient("Token time is too long.");
return;
}
if (accountName.empty()) {
disconnectClient("You must enter your account name.");
return;
}
std::string characterName = msg.getString();
uint32_t timeStamp = msg.get<uint32_t>();
uint8_t randNumber = msg.getByte();
if (challengeTimestamp != timeStamp || challengeRandom != randNumber) {
disconnect();
return;
}
if (version < CLIENT_VERSION_MIN || version > CLIENT_VERSION_MAX) {
disconnectClient(fmt::format("Only clients with protocol {:s} allowed!", CLIENT_VERSION_STR));
return;
}
if (g_game.getGameState() == GAME_STATE_STARTUP) {
disconnectClient("Gameworld is starting up. Please wait.");
return;
}
if (g_game.getGameState() == GAME_STATE_MAINTAIN) {
disconnectClient("Gameworld is under maintenance. Please re-connect in a while.");
return;
}
BanInfo banInfo;
if (IOBan::isIpBanned(getIP(), banInfo)) {
if (banInfo.reason.empty()) {
banInfo.reason = "(none)";
}
disconnectClient(fmt::format("Your IP has been banned until {:s} by {:s}.\n\nReason specified:\n{:s}", formatDateShort(banInfo.expiresAt), banInfo.bannedBy, banInfo.reason));
return;
}
uint32_t accountId = IOLoginData::gameworldAuthentication(accountName, password, characterName, token, tokenTime);
if (accountId == 0) {
disconnectClient("Account name or password is not correct.");
return;
}
g_dispatcher.addTask(createTask(std::bind(&ProtocolGame::login, getThis(), characterName, accountId, operatingSystem)));
}
void ProtocolGame::onConnect()
{
auto output = OutputMessagePool::getOutputMessage();
static std::random_device rd;
static std::ranlux24 generator(rd());
static std::uniform_int_distribution<uint16_t> randNumber(0x00, 0xFF);
// Skip checksum
output->skipBytes(sizeof(uint32_t));
// Packet length & type
output->add<uint16_t>(0x0006);
output->addByte(0x1F);
// Add timestamp & random number
challengeTimestamp = static_cast<uint32_t>(time(nullptr));
output->add<uint32_t>(challengeTimestamp);
challengeRandom = randNumber(generator);
output->addByte(challengeRandom);
// Go back and write checksum
output->skipBytes(-12);
output->add<uint32_t>(adlerChecksum(output->getOutputBuffer() + sizeof(uint32_t), 8));
send(output);
}
void ProtocolGame::disconnectClient(const std::string& message) const
{
auto output = OutputMessagePool::getOutputMessage();
output->addByte(0x14);
output->addString(message);
send(output);
disconnect();
}
void ProtocolGame::writeToOutputBuffer(const NetworkMessage& msg)
{
auto out = getOutputBuffer(msg.getLength());
out->append(msg);
}
void ProtocolGame::parsePacket(NetworkMessage& msg)
{
if (!acceptPackets || g_game.getGameState() == GAME_STATE_SHUTDOWN || msg.getLength() == 0) {
return;
}
uint8_t recvbyte = msg.getByte();
if (!player) {
if (recvbyte == 0x0F) {
disconnect();
}
return;
}
//a dead player can not performs actions
if (player->isRemoved() || player->getHealth() <= 0) {
if (recvbyte == 0x0F) {
disconnect();
return;
}
if (recvbyte != 0x14) {
return;
}
}
switch (recvbyte) {
case 0x14: g_dispatcher.addTask(createTask(std::bind(&ProtocolGame::logout, getThis(), true, false))); break;
case 0x1D: addGameTask(&Game::playerReceivePingBack, player->getID()); break;
case 0x1E: addGameTask(&Game::playerReceivePing, player->getID()); break;
case 0x32: parseExtendedOpcode(msg); break; //otclient extended opcode
case 0x64: parseAutoWalk(msg); break;
case 0x65: addGameTask(&Game::playerMove, player->getID(), DIRECTION_NORTH); break;
case 0x66: addGameTask(&Game::playerMove, player->getID(), DIRECTION_EAST); break;
case 0x67: addGameTask(&Game::playerMove, player->getID(), DIRECTION_SOUTH); break;
case 0x68: addGameTask(&Game::playerMove, player->getID(), DIRECTION_WEST); break;
case 0x69: addGameTask(&Game::playerStopAutoWalk, player->getID()); break;
case 0x6A: addGameTask(&Game::playerMove, player->getID(), DIRECTION_NORTHEAST); break;
case 0x6B: addGameTask(&Game::playerMove, player->getID(), DIRECTION_SOUTHEAST); break;
case 0x6C: addGameTask(&Game::playerMove, player->getID(), DIRECTION_SOUTHWEST); break;
case 0x6D: addGameTask(&Game::playerMove, player->getID(), DIRECTION_NORTHWEST); break;
case 0x6F: addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerTurn, player->getID(), DIRECTION_NORTH); break;
case 0x70: addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerTurn, player->getID(), DIRECTION_EAST); break;
case 0x71: addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerTurn, player->getID(), DIRECTION_SOUTH); break;
case 0x72: addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerTurn, player->getID(), DIRECTION_WEST); break;
case 0x77: parseEquipObject(msg); break;
case 0x78: parseThrow(msg); break;
case 0x79: parseLookInShop(msg); break;
case 0x7A: parsePlayerPurchase(msg); break;
case 0x7B: parsePlayerSale(msg); break;
case 0x7C: addGameTask(&Game::playerCloseShop, player->getID()); break;
case 0x7D: parseRequestTrade(msg); break;
case 0x7E: parseLookInTrade(msg); break;
case 0x7F: addGameTask(&Game::playerAcceptTrade, player->getID()); break;
case 0x80: addGameTask(&Game::playerCloseTrade, player->getID()); break;
case 0x82: parseUseItem(msg); break;
case 0x83: parseUseItemEx(msg); break;
case 0x84: parseUseWithCreature(msg); break;
case 0x85: parseRotateItem(msg); break;
case 0x87: parseCloseContainer(msg); break;
case 0x88: parseUpArrowContainer(msg); break;
case 0x89: parseTextWindow(msg); break;
case 0x8A: parseHouseWindow(msg); break;
case 0x8B: parseWrapItem(msg); break;
case 0x8C: parseLookAt(msg); break;
case 0x8D: parseLookInBattleList(msg); break;
case 0x8E: /* join aggression */ break;
case 0x96: parseSay(msg); break;
case 0x97: addGameTask(&Game::playerRequestChannels, player->getID()); break;
case 0x98: parseOpenChannel(msg); break;
case 0x99: parseCloseChannel(msg); break;
case 0x9A: parseOpenPrivateChannel(msg); break;
case 0x9E: addGameTask(&Game::playerCloseNpcChannel, player->getID()); break;
case 0xA0: parseFightModes(msg); break;
case 0xA1: parseAttack(msg); break;
case 0xA2: parseFollow(msg); break;
case 0xA3: parseInviteToParty(msg); break;
case 0xA4: parseJoinParty(msg); break;
case 0xA5: parseRevokePartyInvite(msg); break;
case 0xA6: parsePassPartyLeadership(msg); break;
case 0xA7: addGameTask(&Game::playerLeaveParty, player->getID()); break;
case 0xA8: parseEnableSharedPartyExperience(msg); break;
case 0xAA: addGameTask(&Game::playerCreatePrivateChannel, player->getID()); break;
case 0xAB: parseChannelInvite(msg); break;
case 0xAC: parseChannelExclude(msg); break;
case 0xBE: addGameTask(&Game::playerCancelAttackAndFollow, player->getID()); break;
case 0xC9: /* update tile */ break;
case 0xCA: parseUpdateContainer(msg); break;
case 0xCB: parseBrowseField(msg); break;
case 0xCC: parseSeekInContainer(msg); break;
case 0xD2: addGameTask(&Game::playerRequestOutfit, player->getID()); break;
case 0xD3: parseSetOutfit(msg); break;
case 0xD4: parseToggleMount(msg); break;
case 0xDC: parseAddVip(msg); break;
case 0xDD: parseRemoveVip(msg); break;
case 0xDE: parseEditVip(msg); break;
case 0xE6: parseBugReport(msg); break;
case 0xE7: /* thank you */ break;
case 0xE8: parseDebugAssert(msg); break;
case 0xF0: addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerShowQuestLog, player->getID()); break;
case 0xF1: parseQuestLine(msg); break;
case 0xF2: parseRuleViolationReport(msg); break;
case 0xF3: /* get object info */ break;
case 0xF4: parseMarketLeave(); break;
case 0xF5: parseMarketBrowse(msg); break;
case 0xF6: parseMarketCreateOffer(msg); break;
case 0xF7: parseMarketCancelOffer(msg); break;
case 0xF8: parseMarketAcceptOffer(msg); break;
case 0xF9: parseModalWindowAnswer(msg); break;
default:
// std::cout << "Player: " << player->getName() << " sent an unknown packet header: 0x" << std::hex << static_cast<uint16_t>(recvbyte) << std::dec << "!" << std::endl;
break;
}
if (msg.isOverrun()) {
disconnect();
}
}
void ProtocolGame::GetTileDescription(const Tile* tile, NetworkMessage& msg)
{
msg.add<uint16_t>(0x00); //environmental effects
int32_t count;
Item* ground = tile->getGround();
if (ground) {
msg.addItem(ground);
count = 1;
} else {
count = 0;
}
const TileItemVector* items = tile->getItemList();
if (items) {
for (auto it = items->getBeginTopItem(), end = items->getEndTopItem(); it != end; ++it) {
msg.addItem(*it);
if (++count == 10) {
break;
}
}
}
const CreatureVector* creatures = tile->getCreatures();
if (creatures) {
for (const Creature* creature : boost::adaptors::reverse(*creatures)) {
if (!player->canSeeCreature(creature)) {
continue;
}
bool known;
uint32_t removedKnown;
checkCreatureAsKnown(creature->getID(), known, removedKnown);
AddCreature(msg, creature, known, removedKnown);
++count;
}
}
if (items && count < 10) {
for (auto it = items->getBeginDownItem(), end = items->getEndDownItem(); it != end; ++it) {
msg.addItem(*it);
if (++count == 10) {
return;
}
}
}
}
void ProtocolGame::GetMapDescription(int32_t x, int32_t y, int32_t z, int32_t width, int32_t height, NetworkMessage& msg)
{
int32_t skip = -1;
int32_t startz, endz, zstep;
if (z > 7) {
startz = z - 2;
endz = std::min<int32_t>(MAP_MAX_LAYERS - 1, z + 2);
zstep = 1;
} else {
startz = 7;
endz = 0;
zstep = -1;
}
for (int32_t nz = startz; nz != endz + zstep; nz += zstep) {
GetFloorDescription(msg, x, y, nz, width, height, z - nz, skip);
}
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
}
void ProtocolGame::GetFloorDescription(NetworkMessage& msg, int32_t x, int32_t y, int32_t z, int32_t width, int32_t height, int32_t offset, int32_t& skip)
{
for (int32_t nx = 0; nx < width; nx++) {
for (int32_t ny = 0; ny < height; ny++) {
Tile* tile = g_game.map.getTile(x + nx + offset, y + ny + offset, z);
if (tile) {
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
skip = 0;
GetTileDescription(tile, msg);
} else if (skip == 0xFE) {
msg.addByte(0xFF);
msg.addByte(0xFF);
skip = -1;
} else {
++skip;
}
}
}
}
void ProtocolGame::checkCreatureAsKnown(uint32_t id, bool& known, uint32_t& removedKnown)
{
auto result = knownCreatureSet.insert(id);
if (!result.second) {
known = true;
return;
}
known = false;
if (knownCreatureSet.size() > 1300) {
// Look for a creature to remove
for (auto it = knownCreatureSet.begin(), end = knownCreatureSet.end(); it != end; ++it) {
Creature* creature = g_game.getCreatureByID(*it);
if (!canSee(creature)) {
removedKnown = *it;
knownCreatureSet.erase(it);
return;
}
}
// Bad situation. Let's just remove anyone.
auto it = knownCreatureSet.begin();
if (*it == id) {
++it;
}
removedKnown = *it;
knownCreatureSet.erase(it);
} else {
removedKnown = 0;
}
}
bool ProtocolGame::canSee(const Creature* c) const
{
if (!c || !player || c->isRemoved()) {
return false;
}
if (!player->canSeeCreature(c)) {
return false;
}
return canSee(c->getPosition());
}
bool ProtocolGame::canSee(const Position& pos) const
{
return canSee(pos.x, pos.y, pos.z);
}
bool ProtocolGame::canSee(int32_t x, int32_t y, int32_t z) const
{
if (!player) {
return false;
}
const Position& myPos = player->getPosition();
if (myPos.z <= 7) {
//we are on ground level or above (7 -> 0)
//view is from 7 -> 0
if (z > 7) {
return false;
}
} else { // if (myPos.z >= 8) {
//we are underground (8 -> 15)
//view is +/- 2 from the floor we stand on
if (std::abs(myPos.getZ() - z) > 2) {
return false;
}
}
//negative offset means that the action taken place is on a lower floor than ourself
int32_t offsetz = myPos.getZ() - z;
if ((x >= myPos.getX() - Map::maxClientViewportX + offsetz) && (x <= myPos.getX() + (Map::maxClientViewportX + 1) + offsetz) &&
(y >= myPos.getY() - Map::maxClientViewportY + offsetz) && (y <= myPos.getY() + (Map::maxClientViewportY + 1) + offsetz)) {
return true;
}
return false;
}
// Parse methods
void ProtocolGame::parseChannelInvite(NetworkMessage& msg)
{
const std::string name = msg.getString();
addGameTask(&Game::playerChannelInvite, player->getID(), name);
}
void ProtocolGame::parseChannelExclude(NetworkMessage& msg)
{
const std::string name = msg.getString();
addGameTask(&Game::playerChannelExclude, player->getID(), name);
}
void ProtocolGame::parseOpenChannel(NetworkMessage& msg)
{
uint16_t channelId = msg.get<uint16_t>();
addGameTask(&Game::playerOpenChannel, player->getID(), channelId);
}
void ProtocolGame::parseCloseChannel(NetworkMessage& msg)
{
uint16_t channelId = msg.get<uint16_t>();
addGameTask(&Game::playerCloseChannel, player->getID(), channelId);
}
void ProtocolGame::parseOpenPrivateChannel(NetworkMessage& msg)
{
const std::string receiver = msg.getString();
addGameTask(&Game::playerOpenPrivateChannel, player->getID(), receiver);
}
void ProtocolGame::parseAutoWalk(NetworkMessage& msg)
{
uint8_t numdirs = msg.getByte();
if (numdirs == 0 || (msg.getBufferPosition() + numdirs) != (msg.getLength() + 8)) {
return;
}
msg.skipBytes(numdirs);
std::vector<Direction> path;
path.reserve(numdirs);
for (uint8_t i = 0; i < numdirs; ++i) {
uint8_t rawdir = msg.getPreviousByte();
switch (rawdir) {
case 1: path.push_back(DIRECTION_EAST); break;
case 2: path.push_back(DIRECTION_NORTHEAST); break;
case 3: path.push_back(DIRECTION_NORTH); break;
case 4: path.push_back(DIRECTION_NORTHWEST); break;
case 5: path.push_back(DIRECTION_WEST); break;
case 6: path.push_back(DIRECTION_SOUTHWEST); break;
case 7: path.push_back(DIRECTION_SOUTH); break;
case 8: path.push_back(DIRECTION_SOUTHEAST); break;
default: break;
}
}
if (path.empty()) {
return;
}
addGameTask(&Game::playerAutoWalk, player->getID(), std::move(path));
}
void ProtocolGame::parseSetOutfit(NetworkMessage& msg)
{
Outfit_t newOutfit;
newOutfit.lookType = msg.get<uint16_t>();
newOutfit.lookHead = msg.getByte();
newOutfit.lookBody = msg.getByte();
newOutfit.lookLegs = msg.getByte();
newOutfit.lookFeet = msg.getByte();
newOutfit.lookAddons = msg.getByte();
newOutfit.lookMount = msg.get<uint16_t>();
addGameTask(&Game::playerChangeOutfit, player->getID(), newOutfit);
}
void ProtocolGame::parseToggleMount(NetworkMessage& msg)
{
bool mount = msg.getByte() != 0;
addGameTask(&Game::playerToggleMount, player->getID(), mount);
}
void ProtocolGame::parseUseItem(NetworkMessage& msg)
{
Position pos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t stackpos = msg.getByte();
uint8_t index = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerUseItem, player->getID(), pos, stackpos, index, spriteId);
}
void ProtocolGame::parseUseItemEx(NetworkMessage& msg)
{
Position fromPos = msg.getPosition();
uint16_t fromSpriteId = msg.get<uint16_t>();
uint8_t fromStackPos = msg.getByte();
Position toPos = msg.getPosition();
uint16_t toSpriteId = msg.get<uint16_t>();
uint8_t toStackPos = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerUseItemEx, player->getID(), fromPos, fromStackPos, fromSpriteId, toPos, toStackPos, toSpriteId);
}
void ProtocolGame::parseUseWithCreature(NetworkMessage& msg)
{
Position fromPos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t fromStackPos = msg.getByte();
uint32_t creatureId = msg.get<uint32_t>();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerUseWithCreature, player->getID(), fromPos, fromStackPos, creatureId, spriteId);
}
void ProtocolGame::parseCloseContainer(NetworkMessage& msg)
{
uint8_t cid = msg.getByte();
addGameTask(&Game::playerCloseContainer, player->getID(), cid);
}
void ProtocolGame::parseUpArrowContainer(NetworkMessage& msg)
{
uint8_t cid = msg.getByte();
addGameTask(&Game::playerMoveUpContainer, player->getID(), cid);
}
void ProtocolGame::parseUpdateContainer(NetworkMessage& msg)
{
uint8_t cid = msg.getByte();
addGameTask(&Game::playerUpdateContainer, player->getID(), cid);
}
void ProtocolGame::parseThrow(NetworkMessage& msg)
{
Position fromPos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t fromStackpos = msg.getByte();
Position toPos = msg.getPosition();
uint8_t count = msg.getByte();
if (toPos != fromPos) {
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerMoveThing, player->getID(), fromPos, spriteId, fromStackpos, toPos, count);
}
}
void ProtocolGame::parseLookAt(NetworkMessage& msg)
{
Position pos = msg.getPosition();
msg.skipBytes(2); // spriteId
uint8_t stackpos = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerLookAt, player->getID(), pos, stackpos);
}
void ProtocolGame::parseLookInBattleList(NetworkMessage& msg)
{
uint32_t creatureId = msg.get<uint32_t>();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerLookInBattleList, player->getID(), creatureId);
}
void ProtocolGame::parseSay(NetworkMessage& msg)
{
std::string receiver;
uint16_t channelId;
SpeakClasses type = static_cast<SpeakClasses>(msg.getByte());
switch (type) {
case TALKTYPE_PRIVATE_TO:
case TALKTYPE_PRIVATE_RED_TO:
receiver = msg.getString();
channelId = 0;
break;
case TALKTYPE_CHANNEL_Y:
case TALKTYPE_CHANNEL_R1:
channelId = msg.get<uint16_t>();
break;
default:
channelId = 0;
break;
}
const std::string text = msg.getString();
if (text.length() > 255) {
return;
}
addGameTask(&Game::playerSay, player->getID(), channelId, type, receiver, text);
}
void ProtocolGame::parseFightModes(NetworkMessage& msg)
{
uint8_t rawFightMode = msg.getByte(); // 1 - offensive, 2 - balanced, 3 - defensive
uint8_t rawChaseMode = msg.getByte(); // 0 - stand while fighting, 1 - chase opponent
uint8_t rawSecureMode = msg.getByte(); // 0 - can't attack unmarked, 1 - can attack unmarked
// uint8_t rawPvpMode = msg.getByte(); // pvp mode introduced in 10.0
fightMode_t fightMode;
if (rawFightMode == 1) {
fightMode = FIGHTMODE_ATTACK;
} else if (rawFightMode == 2) {
fightMode = FIGHTMODE_BALANCED;
} else {
fightMode = FIGHTMODE_DEFENSE;
}
addGameTask(&Game::playerSetFightModes, player->getID(), fightMode, rawChaseMode != 0, rawSecureMode != 0);
}
void ProtocolGame::parseAttack(NetworkMessage& msg)
{
uint32_t creatureId = msg.get<uint32_t>();
// msg.get<uint32_t>(); creatureId (same as above)
addGameTask(&Game::playerSetAttackedCreature, player->getID(), creatureId);
}
void ProtocolGame::parseFollow(NetworkMessage& msg)
{
uint32_t creatureId = msg.get<uint32_t>();
// msg.get<uint32_t>(); creatureId (same as above)
addGameTask(&Game::playerFollowCreature, player->getID(), creatureId);
}
void ProtocolGame::parseEquipObject(NetworkMessage& msg)
{
uint16_t spriteId = msg.get<uint16_t>();
// msg.get<uint8_t>();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerEquipItem, player->getID(), spriteId);
}
void ProtocolGame::parseTextWindow(NetworkMessage& msg)
{
uint32_t windowTextId = msg.get<uint32_t>();
const std::string newText = msg.getString();
addGameTask(&Game::playerWriteItem, player->getID(), windowTextId, newText);
}
void ProtocolGame::parseHouseWindow(NetworkMessage& msg)
{
uint8_t doorId = msg.getByte();
uint32_t id = msg.get<uint32_t>();
const std::string text = msg.getString();
addGameTask(&Game::playerUpdateHouseWindow, player->getID(), doorId, id, text);
}
void ProtocolGame::parseWrapItem(NetworkMessage& msg)
{
Position pos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t stackpos = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerWrapItem, player->getID(), pos, stackpos, spriteId);
}
void ProtocolGame::parseLookInShop(NetworkMessage& msg)
{
uint16_t id = msg.get<uint16_t>();
uint8_t count = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerLookInShop, player->getID(), id, count);
}
void ProtocolGame::parsePlayerPurchase(NetworkMessage& msg)
{
uint16_t id = msg.get<uint16_t>();
uint8_t count = msg.getByte();
uint8_t amount = msg.getByte();
bool ignoreCap = msg.getByte() != 0;
bool inBackpacks = msg.getByte() != 0;
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerPurchaseItem, player->getID(), id, count, amount, ignoreCap, inBackpacks);
}
void ProtocolGame::parsePlayerSale(NetworkMessage& msg)
{
uint16_t id = msg.get<uint16_t>();
uint8_t count = msg.getByte();
uint8_t amount = msg.getByte();
bool ignoreEquipped = msg.getByte() != 0;
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerSellItem, player->getID(), id, count, amount, ignoreEquipped);
}
void ProtocolGame::parseRequestTrade(NetworkMessage& msg)
{
Position pos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t stackpos = msg.getByte();
uint32_t playerId = msg.get<uint32_t>();
addGameTask(&Game::playerRequestTrade, player->getID(), pos, stackpos, playerId, spriteId);
}
void ProtocolGame::parseLookInTrade(NetworkMessage& msg)
{
bool counterOffer = (msg.getByte() == 0x01);
uint8_t index = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerLookInTrade, player->getID(), counterOffer, index);
}
void ProtocolGame::parseAddVip(NetworkMessage& msg)
{
const std::string name = msg.getString();
addGameTask(&Game::playerRequestAddVip, player->getID(), name);
}
void ProtocolGame::parseRemoveVip(NetworkMessage& msg)
{
uint32_t guid = msg.get<uint32_t>();
addGameTask(&Game::playerRequestRemoveVip, player->getID(), guid);
}
void ProtocolGame::parseEditVip(NetworkMessage& msg)
{
uint32_t guid = msg.get<uint32_t>();
const std::string description = msg.getString();
uint32_t icon = std::min<uint32_t>(10, msg.get<uint32_t>()); // 10 is max icon in 9.63
bool notify = msg.getByte() != 0;
addGameTask(&Game::playerRequestEditVip, player->getID(), guid, description, icon, notify);
}
void ProtocolGame::parseRotateItem(NetworkMessage& msg)
{
Position pos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t stackpos = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerRotateItem, player->getID(), pos, stackpos, spriteId);
}
void ProtocolGame::parseRuleViolationReport(NetworkMessage& msg)
{
uint8_t reportType = msg.getByte();
uint8_t reportReason = msg.getByte();
const std::string& targetName = msg.getString();
const std::string& comment = msg.getString();
std::string translation;
if (reportType == REPORT_TYPE_NAME) {
translation = msg.getString();
} else if (reportType == REPORT_TYPE_STATEMENT) {
translation = msg.getString();
msg.get<uint32_t>(); // statement id, used to get whatever player have said, we don't log that.
}
addGameTask(&Game::playerReportRuleViolation, player->getID(), targetName, reportType, reportReason, comment, translation);
}
void ProtocolGame::parseBugReport(NetworkMessage& msg)
{
uint8_t category = msg.getByte();
std::string message = msg.getString();
Position position;
if (category == BUG_CATEGORY_MAP) {
position = msg.getPosition();
}
addGameTask(&Game::playerReportBug, player->getID(), message, position, category);
}
void ProtocolGame::parseDebugAssert(NetworkMessage& msg)
{
if (debugAssertSent) {
return;
}
debugAssertSent = true;
std::string assertLine = msg.getString();
std::string date = msg.getString();
std::string description = msg.getString();
std::string comment = msg.getString();
addGameTask(&Game::playerDebugAssert, player->getID(), assertLine, date, description, comment);
}
void ProtocolGame::parseInviteToParty(NetworkMessage& msg)
{
uint32_t targetId = msg.get<uint32_t>();
addGameTask(&Game::playerInviteToParty, player->getID(), targetId);
}
void ProtocolGame::parseJoinParty(NetworkMessage& msg)
{
uint32_t targetId = msg.get<uint32_t>();
addGameTask(&Game::playerJoinParty, player->getID(), targetId);
}
void ProtocolGame::parseRevokePartyInvite(NetworkMessage& msg)
{
uint32_t targetId = msg.get<uint32_t>();
addGameTask(&Game::playerRevokePartyInvitation, player->getID(), targetId);
}
void ProtocolGame::parsePassPartyLeadership(NetworkMessage& msg)
{
uint32_t targetId = msg.get<uint32_t>();
addGameTask(&Game::playerPassPartyLeadership, player->getID(), targetId);
}
void ProtocolGame::parseEnableSharedPartyExperience(NetworkMessage& msg)
{
bool sharedExpActive = msg.getByte() == 1;
addGameTask(&Game::playerEnableSharedPartyExperience, player->getID(), sharedExpActive);
}
void ProtocolGame::parseQuestLine(NetworkMessage& msg)
{
uint16_t questId = msg.get<uint16_t>();
addGameTask(&Game::playerShowQuestLine, player->getID(), questId);
}
void ProtocolGame::parseMarketLeave()
{
addGameTask(&Game::playerLeaveMarket, player->getID());
}
void ProtocolGame::parseMarketBrowse(NetworkMessage& msg)
{
uint16_t browseId = msg.get<uint16_t>();
if (browseId == MARKETREQUEST_OWN_OFFERS) {
addGameTask(&Game::playerBrowseMarketOwnOffers, player->getID());
} else if (browseId == MARKETREQUEST_OWN_HISTORY) {
addGameTask(&Game::playerBrowseMarketOwnHistory, player->getID());
} else {
addGameTask(&Game::playerBrowseMarket, player->getID(), browseId);
}
}
void ProtocolGame::parseMarketCreateOffer(NetworkMessage& msg)
{
uint8_t type = msg.getByte();
uint16_t spriteId = msg.get<uint16_t>();
uint16_t amount = msg.get<uint16_t>();
uint32_t price = msg.get<uint32_t>();
bool anonymous = (msg.getByte() != 0);
addGameTask(&Game::playerCreateMarketOffer, player->getID(), type, spriteId, amount, price, anonymous);
}
void ProtocolGame::parseMarketCancelOffer(NetworkMessage& msg)
{
uint32_t timestamp = msg.get<uint32_t>();
uint16_t counter = msg.get<uint16_t>();
addGameTask(&Game::playerCancelMarketOffer, player->getID(), timestamp, counter);
}
void ProtocolGame::parseMarketAcceptOffer(NetworkMessage& msg)
{
uint32_t timestamp = msg.get<uint32_t>();
uint16_t counter = msg.get<uint16_t>();
uint16_t amount = msg.get<uint16_t>();
addGameTask(&Game::playerAcceptMarketOffer, player->getID(), timestamp, counter, amount);
}
void ProtocolGame::parseModalWindowAnswer(NetworkMessage& msg)
{
uint32_t id = msg.get<uint32_t>();
uint8_t button = msg.getByte();
uint8_t choice = msg.getByte();
addGameTask(&Game::playerAnswerModalWindow, player->getID(), id, button, choice);
}
void ProtocolGame::parseBrowseField(NetworkMessage& msg)
{
const Position& pos = msg.getPosition();
addGameTask(&Game::playerBrowseField, player->getID(), pos);
}
void ProtocolGame::parseSeekInContainer(NetworkMessage& msg)
{
uint8_t containerId = msg.getByte();
uint16_t index = msg.get<uint16_t>();
addGameTask(&Game::playerSeekInContainer, player->getID(), containerId, index);
}
// Send methods
void ProtocolGame::sendOpenPrivateChannel(const std::string& receiver)
{
NetworkMessage msg;
msg.addByte(0xAD);
msg.addString(receiver);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendChannelEvent(uint16_t channelId, const std::string& playerName, ChannelEvent_t channelEvent)
{
NetworkMessage msg;
msg.addByte(0xF3);
msg.add<uint16_t>(channelId);
msg.addString(playerName);
msg.addByte(channelEvent);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureOutfit(const Creature* creature, const Outfit_t& outfit)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x8E);
msg.add<uint32_t>(creature->getID());
AddOutfit(msg, outfit);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureLight(const Creature* creature)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
AddCreatureLight(msg, creature);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendWorldLight(LightInfo lightInfo)
{
NetworkMessage msg;
AddWorldLight(msg, lightInfo);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureWalkthrough(const Creature* creature, bool walkthrough)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x92);
msg.add<uint32_t>(creature->getID());
msg.addByte(walkthrough ? 0x00 : 0x01);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureShield(const Creature* creature)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x91);
msg.add<uint32_t>(creature->getID());
msg.addByte(player->getPartyShield(creature->getPlayer()));
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureSkull(const Creature* creature)
{
if (g_game.getWorldType() != WORLD_TYPE_PVP) {
return;
}
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x90);
msg.add<uint32_t>(creature->getID());
msg.addByte(player->getSkullClient(creature));
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureType(uint32_t creatureId, uint8_t creatureType)
{
NetworkMessage msg;
msg.addByte(0x95);
msg.add<uint32_t>(creatureId);
msg.addByte(creatureType);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureHelpers(uint32_t creatureId, uint16_t helpers)
{
NetworkMessage msg;
msg.addByte(0x94);
msg.add<uint32_t>(creatureId);
msg.add<uint16_t>(helpers);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureSquare(const Creature* creature, SquareColor_t color)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x93);
msg.add<uint32_t>(creature->getID());
msg.addByte(0x01);
msg.addByte(color);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendTutorial(uint8_t tutorialId)
{
NetworkMessage msg;
msg.addByte(0xDC);
msg.addByte(tutorialId);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendAddMarker(const Position& pos, uint8_t markType, const std::string& desc)
{
NetworkMessage msg;
msg.addByte(0xDD);
msg.addPosition(pos);
msg.addByte(markType);
msg.addString(desc);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendReLoginWindow(uint8_t unfairFightReduction)
{
NetworkMessage msg;
msg.addByte(0x28);
msg.addByte(0x00);
msg.addByte(unfairFightReduction);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendStats()
{
NetworkMessage msg;
AddPlayerStats(msg);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendBasicData()
{
NetworkMessage msg;
msg.addByte(0x9F);
if (player->isPremium()) {
msg.addByte(1);
msg.add<uint32_t>(g_config.getBoolean(ConfigManager::FREE_PREMIUM) ? 0 : player->premiumEndsAt);
} else {
msg.addByte(0);
msg.add<uint32_t>(0);
}
msg.addByte(player->getVocation()->getClientId());
msg.add<uint16_t>(0xFF); // number of known spells
for (uint8_t spellId = 0x00; spellId < 0xFF; spellId++) {
msg.addByte(spellId);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendTextMessage(const TextMessage& message)
{
NetworkMessage msg;
msg.addByte(0xB4);
msg.addByte(message.type);
switch (message.type) {
case MESSAGE_DAMAGE_DEALT:
case MESSAGE_DAMAGE_RECEIVED:
case MESSAGE_DAMAGE_OTHERS: {
msg.addPosition(message.position);
msg.add<uint32_t>(message.primary.value);
msg.addByte(message.primary.color);
msg.add<uint32_t>(message.secondary.value);
msg.addByte(message.secondary.color);
break;
}
case MESSAGE_HEALED:
case MESSAGE_HEALED_OTHERS:
case MESSAGE_EXPERIENCE:
case MESSAGE_EXPERIENCE_OTHERS: {
msg.addPosition(message.position);
msg.add<uint32_t>(message.primary.value);
msg.addByte(message.primary.color);
break;
}
case MESSAGE_GUILD:
case MESSAGE_PARTY_MANAGEMENT:
case MESSAGE_PARTY:
msg.add<uint16_t>(message.channelId);
break;
default: {
break;
}
}
msg.addString(message.text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendClosePrivate(uint16_t channelId)
{
NetworkMessage msg;
msg.addByte(0xB3);
msg.add<uint16_t>(channelId);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatePrivateChannel(uint16_t channelId, const std::string& channelName)
{
NetworkMessage msg;
msg.addByte(0xB2);
msg.add<uint16_t>(channelId);
msg.addString(channelName);
msg.add<uint16_t>(0x01);
msg.addString(player->getName());
msg.add<uint16_t>(0x00);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendChannelsDialog()
{
NetworkMessage msg;
msg.addByte(0xAB);
const ChannelList& list = g_chat->getChannelList(*player);
msg.addByte(list.size());
for (ChatChannel* channel : list) {
msg.add<uint16_t>(channel->getId());
msg.addString(channel->getName());
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendChannel(uint16_t channelId, const std::string& channelName, const UsersMap* channelUsers, const InvitedMap* invitedUsers)
{
NetworkMessage msg;
msg.addByte(0xAC);
msg.add<uint16_t>(channelId);
msg.addString(channelName);
if (channelUsers) {
msg.add<uint16_t>(channelUsers->size());
for (const auto& it : *channelUsers) {
msg.addString(it.second->getName());
}
} else {
msg.add<uint16_t>(0x00);
}
if (invitedUsers) {
msg.add<uint16_t>(invitedUsers->size());
for (const auto& it : *invitedUsers) {
msg.addString(it.second->getName());
}
} else {
msg.add<uint16_t>(0x00);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendChannelMessage(const std::string& author, const std::string& text, SpeakClasses type, uint16_t channel)
{
NetworkMessage msg;
msg.addByte(0xAA);
msg.add<uint32_t>(0x00);
msg.addString(author);
msg.add<uint16_t>(0x00);
msg.addByte(type);
msg.add<uint16_t>(channel);
msg.addString(text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendIcons(uint16_t icons)
{
NetworkMessage msg;
msg.addByte(0xA2);
msg.add<uint16_t>(icons);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendContainer(uint8_t cid, const Container* container, bool hasParent, uint16_t firstIndex)
{
NetworkMessage msg;
msg.addByte(0x6E);
msg.addByte(cid);
if (container->getID() == ITEM_BROWSEFIELD) {
msg.addItem(ITEM_BAG, 1);
msg.addString("Browse Field");
} else {
msg.addItem(container);
msg.addString(container->getName());
}
msg.addByte(container->capacity());
msg.addByte(hasParent ? 0x01 : 0x00);
msg.addByte(container->isUnlocked() ? 0x01 : 0x00); // Drag and drop
msg.addByte(container->hasPagination() ? 0x01 : 0x00); // Pagination
uint32_t containerSize = container->size();
msg.add<uint16_t>(containerSize);
msg.add<uint16_t>(firstIndex);
if (firstIndex < containerSize) {
uint8_t itemsToSend = std::min<uint32_t>(std::min<uint32_t>(container->capacity(), containerSize - firstIndex), std::numeric_limits<uint8_t>::max());
msg.addByte(itemsToSend);
for (auto it = container->getItemList().begin() + firstIndex, end = it + itemsToSend; it != end; ++it) {
msg.addItem(*it);
}
} else {
msg.addByte(0x00);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendShop(Npc* npc, const ShopInfoList& itemList)
{
NetworkMessage msg;
msg.addByte(0x7A);
msg.addString(npc->getName());
uint16_t itemsToSend = std::min<size_t>(itemList.size(), std::numeric_limits<uint16_t>::max());
msg.add<uint16_t>(itemsToSend);
uint16_t i = 0;
for (auto it = itemList.begin(); i < itemsToSend; ++it, ++i) {
AddShopItem(msg, *it);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCloseShop()
{
NetworkMessage msg;
msg.addByte(0x7C);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendSaleItemList(const std::list<ShopInfo>& shop)
{
NetworkMessage msg;
msg.addByte(0x7B);
msg.add<uint64_t>(player->getMoney() + player->getBankBalance());
std::map<uint16_t, uint32_t> saleMap;
if (shop.size() <= 5) {
// For very small shops it's not worth it to create the complete map
for (const ShopInfo& shopInfo : shop) {
if (shopInfo.sellPrice == 0) {
continue;
}
int8_t subtype = -1;
const ItemType& itemType = Item::items[shopInfo.itemId];
if (itemType.hasSubType() && !itemType.stackable) {
subtype = (shopInfo.subType == 0 ? -1 : shopInfo.subType);
}
uint32_t count = player->getItemTypeCount(shopInfo.itemId, subtype);
if (count > 0) {
saleMap[shopInfo.itemId] = count;
}
}
} else {
// Large shop, it's better to get a cached map of all item counts and use it
// We need a temporary map since the finished map should only contain items
// available in the shop
std::map<uint32_t, uint32_t> tempSaleMap;
player->getAllItemTypeCount(tempSaleMap);
// We must still check manually for the special items that require subtype matches
// (That is, fluids such as potions etc., actually these items are very few since
// health potions now use their own ID)
for (const ShopInfo& shopInfo : shop) {
if (shopInfo.sellPrice == 0) {
continue;
}
int8_t subtype = -1;
const ItemType& itemType = Item::items[shopInfo.itemId];
if (itemType.hasSubType() && !itemType.stackable) {
subtype = (shopInfo.subType == 0 ? -1 : shopInfo.subType);
}
if (subtype != -1) {
uint32_t count;
if (itemType.isFluidContainer() || itemType.isSplash()) {
count = player->getItemTypeCount(shopInfo.itemId, subtype); // This shop item requires extra checks
} else {
count = subtype;
}
if (count > 0) {
saleMap[shopInfo.itemId] = count;
}
} else {
std::map<uint32_t, uint32_t>::const_iterator findIt = tempSaleMap.find(shopInfo.itemId);
if (findIt != tempSaleMap.end() && findIt->second > 0) {
saleMap[shopInfo.itemId] = findIt->second;
}
}
}
}
uint8_t itemsToSend = std::min<size_t>(saleMap.size(), std::numeric_limits<uint8_t>::max());
msg.addByte(itemsToSend);
uint8_t i = 0;
for (std::map<uint16_t, uint32_t>::const_iterator it = saleMap.begin(); i < itemsToSend; ++it, ++i) {
msg.addItemId(it->first);
msg.addByte(std::min<uint32_t>(it->second, std::numeric_limits<uint8_t>::max()));
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketEnter(uint32_t depotId)
{
NetworkMessage msg;
msg.addByte(0xF6);
msg.add<uint64_t>(player->getBankBalance());
msg.addByte(std::min<uint32_t>(IOMarket::getPlayerOfferCount(player->getGUID()), std::numeric_limits<uint8_t>::max()));
DepotChest* depotChest = player->getDepotChest(depotId, false);
if (!depotChest) {
msg.add<uint16_t>(0x00);
writeToOutputBuffer(msg);
return;
}
player->setInMarket(true);
std::map<uint16_t, uint32_t> depotItems;
std::forward_list<Container*> containerList { depotChest, player->getInbox() };
do {
Container* container = containerList.front();
containerList.pop_front();
for (Item* item : container->getItemList()) {
Container* c = item->getContainer();
if (c && !c->empty()) {
containerList.push_front(c);
continue;
}
const ItemType& itemType = Item::items[item->getID()];
if (itemType.wareId == 0) {
continue;
}
if (c && (!itemType.isContainer() || c->capacity() != itemType.maxItems)) {
continue;
}
if (!item->hasMarketAttributes()) {
continue;
}
depotItems[itemType.wareId] += Item::countByType(item, -1);
}
} while (!containerList.empty());
uint16_t itemsToSend = std::min<size_t>(depotItems.size(), std::numeric_limits<uint16_t>::max());
msg.add<uint16_t>(itemsToSend);
uint16_t i = 0;
for (std::map<uint16_t, uint32_t>::const_iterator it = depotItems.begin(); i < itemsToSend; ++it, ++i) {
msg.add<uint16_t>(it->first);
msg.add<uint16_t>(std::min<uint32_t>(0xFFFF, it->second));
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketLeave()
{
NetworkMessage msg;
msg.addByte(0xF7);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketBrowseItem(uint16_t itemId, const MarketOfferList& buyOffers, const MarketOfferList& sellOffers)
{
NetworkMessage msg;
msg.addByte(0xF9);
msg.addItemId(itemId);
msg.add<uint32_t>(buyOffers.size());
for (const MarketOffer& offer : buyOffers) {
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
msg.addString(offer.playerName);
}
msg.add<uint32_t>(sellOffers.size());
for (const MarketOffer& offer : sellOffers) {
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
msg.addString(offer.playerName);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketAcceptOffer(const MarketOfferEx& offer)
{
NetworkMessage msg;
msg.addByte(0xF9);
msg.addItemId(offer.itemId);
if (offer.type == MARKETACTION_BUY) {
msg.add<uint32_t>(0x01);
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
msg.addString(offer.playerName);
msg.add<uint32_t>(0x00);
} else {
msg.add<uint32_t>(0x00);
msg.add<uint32_t>(0x01);
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
msg.addString(offer.playerName);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketBrowseOwnOffers(const MarketOfferList& buyOffers, const MarketOfferList& sellOffers)
{
NetworkMessage msg;
msg.addByte(0xF9);
msg.add<uint16_t>(MARKETREQUEST_OWN_OFFERS);
msg.add<uint32_t>(buyOffers.size());
for (const MarketOffer& offer : buyOffers) {
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.addItemId(offer.itemId);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
}
msg.add<uint32_t>(sellOffers.size());
for (const MarketOffer& offer : sellOffers) {
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.addItemId(offer.itemId);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketCancelOffer(const MarketOfferEx& offer)
{
NetworkMessage msg;
msg.addByte(0xF9);
msg.add<uint16_t>(MARKETREQUEST_OWN_OFFERS);
if (offer.type == MARKETACTION_BUY) {
msg.add<uint32_t>(0x01);
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.addItemId(offer.itemId);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
msg.add<uint32_t>(0x00);
} else {
msg.add<uint32_t>(0x00);
msg.add<uint32_t>(0x01);
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.addItemId(offer.itemId);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketBrowseOwnHistory(const HistoryMarketOfferList& buyOffers, const HistoryMarketOfferList& sellOffers)
{
uint32_t i = 0;
std::map<uint32_t, uint16_t> counterMap;
uint32_t buyOffersToSend = std::min<uint32_t>(buyOffers.size(), 810 + std::max<int32_t>(0, 810 - sellOffers.size()));
uint32_t sellOffersToSend = std::min<uint32_t>(sellOffers.size(), 810 + std::max<int32_t>(0, 810 - buyOffers.size()));
NetworkMessage msg;
msg.addByte(0xF9);
msg.add<uint16_t>(MARKETREQUEST_OWN_HISTORY);
msg.add<uint32_t>(buyOffersToSend);
for (auto it = buyOffers.begin(); i < buyOffersToSend; ++it, ++i) {
msg.add<uint32_t>(it->timestamp);
msg.add<uint16_t>(counterMap[it->timestamp]++);
msg.addItemId(it->itemId);
msg.add<uint16_t>(it->amount);
msg.add<uint32_t>(it->price);
msg.addByte(it->state);
}
counterMap.clear();
i = 0;
msg.add<uint32_t>(sellOffersToSend);
for (auto it = sellOffers.begin(); i < sellOffersToSend; ++it, ++i) {
msg.add<uint32_t>(it->timestamp);
msg.add<uint16_t>(counterMap[it->timestamp]++);
msg.addItemId(it->itemId);
msg.add<uint16_t>(it->amount);
msg.add<uint32_t>(it->price);
msg.addByte(it->state);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketDetail(uint16_t itemId)
{
NetworkMessage msg;
msg.addByte(0xF8);
msg.addItemId(itemId);
const ItemType& it = Item::items[itemId];
if (it.armor != 0) {
msg.addString(std::to_string(it.armor));
} else {
msg.add<uint16_t>(0x00);
}
if (it.attack != 0) {
// TODO: chance to hit, range
// example:
// "attack +x, chance to hit +y%, z fields"
if (it.abilities && it.abilities->elementType != COMBAT_NONE && it.abilities->elementDamage != 0) {
msg.addString(fmt::format("{:d} physical +{:d} {:s}", it.attack, it.abilities->elementDamage, getCombatName(it.abilities->elementType)));
} else {
msg.addString(std::to_string(it.attack));
}
} else {
msg.add<uint16_t>(0x00);
}
if (it.isContainer()) {
msg.addString(std::to_string(it.maxItems));
} else {
msg.add<uint16_t>(0x00);
}
if (it.defense != 0) {
if (it.extraDefense != 0) {
msg.addString(fmt::format("{:d} {:+d}", it.defense, it.extraDefense));
} else {
msg.addString(std::to_string(it.defense));
}
} else {
msg.add<uint16_t>(0x00);
}
if (!it.description.empty()) {
const std::string& descr = it.description;
if (descr.back() == '.') {
msg.addString(std::string(descr, 0, descr.length() - 1));
} else {
msg.addString(descr);
}
} else {
msg.add<uint16_t>(0x00);
}
if (it.decayTime != 0) {
msg.addString(fmt::format("{:d} seconds", it.decayTime));
} else {
msg.add<uint16_t>(0x00);
}
if (it.abilities) {
std::ostringstream ss;
bool separator = false;
for (size_t i = 0; i < COMBAT_COUNT; ++i) {
if (it.abilities->absorbPercent[i] == 0) {
continue;
}
if (separator) {
ss << ", ";
} else {
separator = true;
}
ss << getCombatName(indexToCombatType(i)) << ' ' << std::showpos << it.abilities->absorbPercent[i] << std::noshowpos << '%';
}
msg.addString(ss.str());
} else {
msg.add<uint16_t>(0x00);
}
if (it.minReqLevel != 0) {
msg.addString(std::to_string(it.minReqLevel));
} else {
msg.add<uint16_t>(0x00);
}
if (it.minReqMagicLevel != 0) {
msg.addString(std::to_string(it.minReqMagicLevel));
} else {
msg.add<uint16_t>(0x00);
}
msg.addString(it.vocationString);
msg.addString(it.runeSpellName);
if (it.abilities) {
std::ostringstream ss;
bool separator = false;
for (uint8_t i = SKILL_FIRST; i <= SKILL_LAST; i++) {
if (!it.abilities->skills[i]) {
continue;
}
if (separator) {
ss << ", ";
} else {
separator = true;
}
ss << getSkillName(i) << ' ' << std::showpos << it.abilities->skills[i] << std::noshowpos;
}
if (it.abilities->stats[STAT_MAGICPOINTS] != 0) {
if (separator) {
ss << ", ";
} else {
separator = true;
}
ss << "magic level " << std::showpos << it.abilities->stats[STAT_MAGICPOINTS] << std::noshowpos;
}
if (it.abilities->speed != 0) {
if (separator) {
ss << ", ";
}
ss << "speed " << std::showpos << (it.abilities->speed >> 1) << std::noshowpos;
}
msg.addString(ss.str());
} else {
msg.add<uint16_t>(0x00);
}
if (it.charges != 0) {
msg.addString(std::to_string(it.charges));
} else {
msg.add<uint16_t>(0x00);
}
std::string weaponName = getWeaponName(it.weaponType);
if (it.slotPosition & SLOTP_TWO_HAND) {
if (!weaponName.empty()) {
weaponName += ", two-handed";
} else {
weaponName = "two-handed";
}
}
msg.addString(weaponName);
if (it.weight != 0) {
std::ostringstream ss;
if (it.weight < 10) {
ss << "0.0" << it.weight;
} else if (it.weight < 100) {
ss << "0." << it.weight;
} else {
std::string weightString = std::to_string(it.weight);
weightString.insert(weightString.end() - 2, '.');
ss << weightString;
}
ss << " oz";
msg.addString(ss.str());
} else {
msg.add<uint16_t>(0x00);
}
MarketStatistics* statistics = IOMarket::getInstance().getPurchaseStatistics(itemId);
if (statistics) {
msg.addByte(0x01);
msg.add<uint32_t>(statistics->numTransactions);
msg.add<uint32_t>(std::min<uint64_t>(std::numeric_limits<uint32_t>::max(), statistics->totalPrice));
msg.add<uint32_t>(statistics->highestPrice);
msg.add<uint32_t>(statistics->lowestPrice);
} else {
msg.addByte(0x00);
}
statistics = IOMarket::getInstance().getSaleStatistics(itemId);
if (statistics) {
msg.addByte(0x01);
msg.add<uint32_t>(statistics->numTransactions);
msg.add<uint32_t>(std::min<uint64_t>(std::numeric_limits<uint32_t>::max(), statistics->totalPrice));
msg.add<uint32_t>(statistics->highestPrice);
msg.add<uint32_t>(statistics->lowestPrice);
} else {
msg.addByte(0x00);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendQuestLog()
{
NetworkMessage msg;
msg.addByte(0xF0);
msg.add<uint16_t>(g_game.quests.getQuestsCount(player));
for (const Quest& quest : g_game.quests.getQuests()) {
if (quest.isStarted(player)) {
msg.add<uint16_t>(quest.getID());
msg.addString(quest.getName());
msg.addByte(quest.isCompleted(player));
}
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendQuestLine(const Quest* quest)
{
NetworkMessage msg;
msg.addByte(0xF1);
msg.add<uint16_t>(quest->getID());
msg.addByte(quest->getMissionsCount(player));
for (const Mission& mission : quest->getMissions()) {
if (mission.isStarted(player)) {
msg.addString(mission.getName(player));
msg.addString(mission.getDescription(player));
}
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendTradeItemRequest(const std::string& traderName, const Item* item, bool ack)
{
NetworkMessage msg;
if (ack) {
msg.addByte(0x7D);
} else {
msg.addByte(0x7E);
}
msg.addString(traderName);
if (const Container* tradeContainer = item->getContainer()) {
std::list<const Container*> listContainer {tradeContainer};
std::list<const Item*> itemList {tradeContainer};
while (!listContainer.empty()) {
const Container* container = listContainer.front();
listContainer.pop_front();
for (Item* containerItem : container->getItemList()) {
Container* tmpContainer = containerItem->getContainer();
if (tmpContainer) {
listContainer.push_back(tmpContainer);
}
itemList.push_back(containerItem);
}
}
msg.addByte(itemList.size());
for (const Item* listItem : itemList) {
msg.addItem(listItem);
}
} else {
msg.addByte(0x01);
msg.addItem(item);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCloseTrade()
{
NetworkMessage msg;
msg.addByte(0x7F);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCloseContainer(uint8_t cid)
{
NetworkMessage msg;
msg.addByte(0x6F);
msg.addByte(cid);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureTurn(const Creature* creature, uint32_t stackPos)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x6B);
if (stackPos >= 10) {
msg.add<uint16_t>(0xFFFF);
msg.add<uint32_t>(creature->getID());
} else {
msg.addPosition(creature->getPosition());
msg.addByte(stackPos);
}
msg.add<uint16_t>(0x63);
msg.add<uint32_t>(creature->getID());
msg.addByte(creature->getDirection());
msg.addByte(player->canWalkthroughEx(creature) ? 0x00 : 0x01);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureSay(const Creature* creature, SpeakClasses type, const std::string& text, const Position* pos/* = nullptr*/)
{
NetworkMessage msg;
msg.addByte(0xAA);
static uint32_t statementId = 0;
msg.add<uint32_t>(++statementId);
msg.addString(creature->getName());
//Add level only for players
if (const Player* speaker = creature->getPlayer()) {
msg.add<uint16_t>(speaker->getLevel());
} else {
msg.add<uint16_t>(0x00);
}
msg.addByte(type);
if (pos) {
msg.addPosition(*pos);
} else {
msg.addPosition(creature->getPosition());
}
msg.addString(text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendToChannel(const Creature* creature, SpeakClasses type, const std::string& text, uint16_t channelId)
{
NetworkMessage msg;
msg.addByte(0xAA);
static uint32_t statementId = 0;
msg.add<uint32_t>(++statementId);
if (!creature) {
msg.add<uint32_t>(0x00);
} else {
msg.addString(creature->getName());
//Add level only for players
if (const Player* speaker = creature->getPlayer()) {
msg.add<uint16_t>(speaker->getLevel());
} else {
msg.add<uint16_t>(0x00);
}
}
msg.addByte(type);
msg.add<uint16_t>(channelId);
msg.addString(text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendPrivateMessage(const Player* speaker, SpeakClasses type, const std::string& text)
{
NetworkMessage msg;
msg.addByte(0xAA);
static uint32_t statementId = 0;
msg.add<uint32_t>(++statementId);
if (speaker) {
msg.addString(speaker->getName());
msg.add<uint16_t>(speaker->getLevel());
} else {
msg.add<uint32_t>(0x00);
}
msg.addByte(type);
msg.addString(text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCancelTarget()
{
NetworkMessage msg;
msg.addByte(0xA3);
msg.add<uint32_t>(0x00);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendChangeSpeed(const Creature* creature, uint32_t speed)
{
NetworkMessage msg;
msg.addByte(0x8F);
msg.add<uint32_t>(creature->getID());
msg.add<uint16_t>(creature->getBaseSpeed() / 2);
msg.add<uint16_t>(speed / 2);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCancelWalk()
{
NetworkMessage msg;
msg.addByte(0xB5);
msg.addByte(player->getDirection());
writeToOutputBuffer(msg);
}
void ProtocolGame::sendSkills()
{
NetworkMessage msg;
AddPlayerSkills(msg);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendPing()
{
NetworkMessage msg;
msg.addByte(0x1D);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendPingBack()
{
NetworkMessage msg;
msg.addByte(0x1E);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendDistanceShoot(const Position& from, const Position& to, uint8_t type)
{
NetworkMessage msg;
msg.addByte(0x85);
msg.addPosition(from);
msg.addPosition(to);
msg.addByte(type);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMagicEffect(const Position& pos, uint8_t type)
{
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
msg.addByte(0x83);
msg.addPosition(pos);
msg.addByte(type);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureHealth(const Creature* creature)
{
NetworkMessage msg;
msg.addByte(0x8C);
msg.add<uint32_t>(creature->getID());
if (creature->isHealthHidden()) {
msg.addByte(0x00);
} else {
msg.addByte(std::ceil((static_cast<double>(creature->getHealth()) / std::max<int32_t>(creature->getMaxHealth(), 1)) * 100));
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendFYIBox(const std::string& message)
{
NetworkMessage msg;
msg.addByte(0x15);
msg.addString(message);
writeToOutputBuffer(msg);
}
//tile
void ProtocolGame::sendMapDescription(const Position& pos)
{
NetworkMessage msg;
msg.addByte(0x64);
msg.addPosition(player->getPosition());
GetMapDescription(pos.x - Map::maxClientViewportX, pos.y - Map::maxClientViewportY, pos.z, (Map::maxClientViewportX * 2) + 2, (Map::maxClientViewportY * 2) + 2, msg);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendAddTileItem(const Position& pos, uint32_t stackpos, const Item* item)
{
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
msg.addByte(0x6A);
msg.addPosition(pos);
msg.addByte(stackpos);
msg.addItem(item);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendUpdateTileItem(const Position& pos, uint32_t stackpos, const Item* item)
{
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
msg.addByte(0x6B);
msg.addPosition(pos);
msg.addByte(stackpos);
msg.addItem(item);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendRemoveTileThing(const Position& pos, uint32_t stackpos)
{
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
RemoveTileThing(msg, pos, stackpos);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendUpdateTileCreature(const Position& pos, uint32_t stackpos, const Creature* creature)
{
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
msg.addByte(0x6B);
msg.addPosition(pos);
msg.addByte(stackpos);
bool known;
uint32_t removedKnown;
checkCreatureAsKnown(creature->getID(), known, removedKnown);
AddCreature(msg, creature, false, removedKnown);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendRemoveTileCreature(const Creature* creature, const Position& pos, uint32_t stackpos)
{
if (stackpos < 10) {
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
RemoveTileThing(msg, pos, stackpos);
writeToOutputBuffer(msg);
return;
}
NetworkMessage msg;
msg.addByte(0x6C);
msg.add<uint16_t>(0xFFFF);
msg.add<uint32_t>(creature->getID());
writeToOutputBuffer(msg);
}
void ProtocolGame::sendUpdateTile(const Tile* tile, const Position& pos)
{
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
msg.addByte(0x69);
msg.addPosition(pos);
if (tile) {
GetTileDescription(tile, msg);
msg.addByte(0x00);
msg.addByte(0xFF);
} else {
msg.addByte(0x01);
msg.addByte(0xFF);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendPendingStateEntered()
{
NetworkMessage msg;
msg.addByte(0x0A);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendEnterWorld()
{
NetworkMessage msg;
msg.addByte(0x0F);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendFightModes()
{
NetworkMessage msg;
msg.addByte(0xA7);
msg.addByte(player->fightMode);
msg.addByte(player->chaseMode);
msg.addByte(player->secureMode);
msg.addByte(PVP_MODE_DOVE);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendAddCreature(const Creature* creature, const Position& pos, int32_t stackpos, bool isLogin)
{
if (!canSee(pos)) {
return;
}
if (creature != player) {
// stack pos is always real index now, so it can exceed the limit
// if stack pos exceeds the limit, we need to refresh the tile instead
// 1. this is a rare case, and is only triggered by forcing summon in a position
// 2. since no stackpos will be send to the client about that creature, removing
// it must be done with its id if its stackpos remains >= 10. this is done to
// add creatures to battle list instead of rendering on screen
if (stackpos >= 10) {
// @todo: should we avoid this check?
if (const Tile* tile = creature->getTile()) {
sendUpdateTile(tile, pos);
}
} else {
// if stackpos is -1, the client will automatically detect it
NetworkMessage msg;
msg.addByte(0x6A);
msg.addPosition(pos);
msg.addByte(stackpos);
bool known;
uint32_t removedKnown;
checkCreatureAsKnown(creature->getID(), known, removedKnown);
AddCreature(msg, creature, known, removedKnown);
writeToOutputBuffer(msg);
}
if (isLogin) {
sendMagicEffect(pos, CONST_ME_TELEPORT);
}
return;
}
NetworkMessage msg;
msg.addByte(0x17);
msg.add<uint32_t>(player->getID());
msg.add<uint16_t>(0x32); // beat duration (50)
msg.addDouble(Creature::speedA, 3);
msg.addDouble(Creature::speedB, 3);
msg.addDouble(Creature::speedC, 3);
// can report bugs?
if (player->getAccountType() >= ACCOUNT_TYPE_TUTOR) {
msg.addByte(0x01);
} else {
msg.addByte(0x00);
}
msg.addByte(0x00); // can change pvp framing option
msg.addByte(0x00); // expert mode button enabled
msg.add<uint16_t>(0x00); // URL (string) to ingame store images
msg.add<uint16_t>(25); // premium coin package size
writeToOutputBuffer(msg);
sendPendingStateEntered();
sendEnterWorld();
sendMapDescription(pos);
if (isLogin) {
sendMagicEffect(pos, CONST_ME_TELEPORT);
}
for (int i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) {
sendInventoryItem(static_cast<slots_t>(i), player->getInventoryItem(static_cast<slots_t>(i)));
}
sendInventoryItem(CONST_SLOT_STORE_INBOX, player->getStoreInbox()->getItem());
sendStats();
sendSkills();
//gameworld light-settings
sendWorldLight(g_game.getWorldLightInfo());
//player light level
sendCreatureLight(creature);
sendVIPEntries();
sendBasicData();
player->sendIcons();
}
void ProtocolGame::sendMoveCreature(const Creature* creature, const Position& newPos, int32_t newStackPos, const Position& oldPos, int32_t oldStackPos, bool teleport)
{
if (creature == player) {
if (teleport) {
sendRemoveTileCreature(creature, oldPos, oldStackPos);
sendMapDescription(newPos);
} else {
NetworkMessage msg;
if (oldPos.z == 7 && newPos.z >= 8) {
RemoveTileCreature(msg, creature, oldPos, oldStackPos);
} else {
msg.addByte(0x6D);
if (oldStackPos < 10) {
msg.addPosition(oldPos);
msg.addByte(oldStackPos);
} else {
msg.add<uint16_t>(0xFFFF);
msg.add<uint32_t>(creature->getID());
}
msg.addPosition(newPos);
}
if (newPos.z > oldPos.z) {
MoveDownCreature(msg, creature, newPos, oldPos);
} else if (newPos.z < oldPos.z) {
MoveUpCreature(msg, creature, newPos, oldPos);
}
if (oldPos.y > newPos.y) { // north, for old x
msg.addByte(0x65);
GetMapDescription(oldPos.x - Map::maxClientViewportX, newPos.y - Map::maxClientViewportY, newPos.z, (Map::maxClientViewportX * 2) + 2, 1, msg);
} else if (oldPos.y < newPos.y) { // south, for old x
msg.addByte(0x67);
GetMapDescription(oldPos.x - Map::maxClientViewportX, newPos.y + (Map::maxClientViewportY + 1), newPos.z, (Map::maxClientViewportX * 2) + 2, 1, msg);
}
if (oldPos.x < newPos.x) { // east, [with new y]
msg.addByte(0x66);
GetMapDescription(newPos.x + (Map::maxClientViewportX + 1), newPos.y - Map::maxClientViewportY, newPos.z, 1, (Map::maxClientViewportY * 2) + 2, msg);
} else if (oldPos.x > newPos.x) { // west, [with new y]
msg.addByte(0x68);
GetMapDescription(newPos.x - Map::maxClientViewportX, newPos.y - Map::maxClientViewportY, newPos.z, 1, (Map::maxClientViewportY * 2) + 2, msg);
}
writeToOutputBuffer(msg);
}
} else if (canSee(oldPos) && canSee(creature->getPosition())) {
if (teleport || (oldPos.z == 7 && newPos.z >= 8)) {
sendRemoveTileCreature(creature, oldPos, oldStackPos);
sendAddCreature(creature, newPos, newStackPos, false);
} else {
NetworkMessage msg;
msg.addByte(0x6D);
if (oldStackPos < 10) {
msg.addPosition(oldPos);
msg.addByte(oldStackPos);
} else {
msg.add<uint16_t>(0xFFFF);
msg.add<uint32_t>(creature->getID());
}
msg.addPosition(creature->getPosition());
writeToOutputBuffer(msg);
}
} else if (canSee(oldPos)) {
sendRemoveTileCreature(creature, oldPos, oldStackPos);
} else if (canSee(creature->getPosition())) {
sendAddCreature(creature, newPos, newStackPos, false);
}
}
void ProtocolGame::sendInventoryItem(slots_t slot, const Item* item)
{
NetworkMessage msg;
if (item) {
msg.addByte(0x78);
msg.addByte(slot);
msg.addItem(item);
} else {
msg.addByte(0x79);
msg.addByte(slot);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendItems()
{
NetworkMessage msg;
msg.addByte(0xF5);
const std::vector<uint16_t>& inventory = Item::items.getInventory();
msg.add<uint16_t>(inventory.size() + 11);
for (uint16_t i = 1; i <= 11; i++) {
msg.add<uint16_t>(i);
msg.addByte(0); //always 0
msg.add<uint16_t>(1); // always 1
}
for (auto clientId : inventory) {
msg.add<uint16_t>(clientId);
msg.addByte(0); //always 0
msg.add<uint16_t>(1);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendAddContainerItem(uint8_t cid, uint16_t slot, const Item* item)
{
NetworkMessage msg;
msg.addByte(0x70);
msg.addByte(cid);
msg.add<uint16_t>(slot);
msg.addItem(item);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendUpdateContainerItem(uint8_t cid, uint16_t slot, const Item* item)
{
NetworkMessage msg;
msg.addByte(0x71);
msg.addByte(cid);
msg.add<uint16_t>(slot);
msg.addItem(item);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendRemoveContainerItem(uint8_t cid, uint16_t slot, const Item* lastItem)
{
NetworkMessage msg;
msg.addByte(0x72);
msg.addByte(cid);
msg.add<uint16_t>(slot);
if (lastItem) {
msg.addItem(lastItem);
} else {
msg.add<uint16_t>(0x00);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendTextWindow(uint32_t windowTextId, Item* item, uint16_t maxlen, bool canWrite)
{
NetworkMessage msg;
msg.addByte(0x96);
msg.add<uint32_t>(windowTextId);
msg.addItem(item);
if (canWrite) {
msg.add<uint16_t>(maxlen);
msg.addString(item->getText());
} else {
const std::string& text = item->getText();
msg.add<uint16_t>(text.size());
msg.addString(text);
}
const std::string& writer = item->getWriter();
if (!writer.empty()) {
msg.addString(writer);
} else {
msg.add<uint16_t>(0x00);
}
time_t writtenDate = item->getDate();
if (writtenDate != 0) {
msg.addString(formatDateShort(writtenDate));
} else {
msg.add<uint16_t>(0x00);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendTextWindow(uint32_t windowTextId, uint32_t itemId, const std::string& text)
{
NetworkMessage msg;
msg.addByte(0x96);
msg.add<uint32_t>(windowTextId);
msg.addItem(itemId, 1);
msg.add<uint16_t>(text.size());
msg.addString(text);
msg.add<uint16_t>(0x00);
msg.add<uint16_t>(0x00);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendHouseWindow(uint32_t windowTextId, const std::string& text)
{
NetworkMessage msg;
msg.addByte(0x97);
msg.addByte(0x00);
msg.add<uint32_t>(windowTextId);
msg.addString(text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendOutfitWindow()
{
const auto& outfits = Outfits::getInstance().getOutfits(player->getSex());
if (outfits.size() == 0) {
return;
}
NetworkMessage msg;
msg.addByte(0xC8);
Outfit_t currentOutfit = player->getDefaultOutfit();
if (currentOutfit.lookType == 0) {
Outfit_t newOutfit;
newOutfit.lookType = outfits.front().lookType;
currentOutfit = newOutfit;
}
Mount* currentMount = g_game.mounts.getMountByID(player->getCurrentMount());
if (currentMount) {
currentOutfit.lookMount = currentMount->clientId;
}
AddOutfit(msg, currentOutfit);
std::vector<ProtocolOutfit> protocolOutfits;
if (player->isAccessPlayer()) {
static const std::string gamemasterOutfitName = "Gamemaster";
protocolOutfits.emplace_back(gamemasterOutfitName, 75, 0);
}
protocolOutfits.reserve(outfits.size());
for (const Outfit& outfit : outfits) {
uint8_t addons;
if (!player->getOutfitAddons(outfit, addons)) {
continue;
}
protocolOutfits.emplace_back(outfit.name, outfit.lookType, addons);
if (protocolOutfits.size() == std::numeric_limits<uint8_t>::max()) { // Game client currently doesn't allow more than 255 outfits
break;
}
}
msg.addByte(protocolOutfits.size());
for (const ProtocolOutfit& outfit : protocolOutfits) {
msg.add<uint16_t>(outfit.lookType);
msg.addString(outfit.name);
msg.addByte(outfit.addons);
}
std::vector<const Mount*> mounts;
for (const Mount& mount : g_game.mounts.getMounts()) {
if (player->hasMount(&mount)) {
mounts.push_back(&mount);
}
}
msg.addByte(mounts.size());
for (const Mount* mount : mounts) {
msg.add<uint16_t>(mount->clientId);
msg.addString(mount->name);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendUpdatedVIPStatus(uint32_t guid, VipStatus_t newStatus)
{
NetworkMessage msg;
msg.addByte(0xD3);
msg.add<uint32_t>(guid);
msg.addByte(newStatus);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendVIP(uint32_t guid, const std::string& name, const std::string& description, uint32_t icon, bool notify, VipStatus_t status)
{
NetworkMessage msg;
msg.addByte(0xD2);
msg.add<uint32_t>(guid);
msg.addString(name);
msg.addString(description);
msg.add<uint32_t>(std::min<uint32_t>(10, icon));
msg.addByte(notify ? 0x01 : 0x00);
msg.addByte(status);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendVIPEntries()
{
const std::forward_list<VIPEntry>& vipEntries = IOLoginData::getVIPEntries(player->getAccount());
for (const VIPEntry& entry : vipEntries) {
VipStatus_t vipStatus = VIPSTATUS_ONLINE;
Player* vipPlayer = g_game.getPlayerByGUID(entry.guid);
if (!vipPlayer || !player->canSeeCreature(vipPlayer)) {
vipStatus = VIPSTATUS_OFFLINE;
}
sendVIP(entry.guid, entry.name, entry.description, entry.icon, entry.notify, vipStatus);
}
}
void ProtocolGame::sendSpellCooldown(uint8_t spellId, uint32_t time)
{
NetworkMessage msg;
msg.addByte(0xA4);
msg.addByte(spellId);
msg.add<uint32_t>(time);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendSpellGroupCooldown(SpellGroup_t groupId, uint32_t time)
{
NetworkMessage msg;
msg.addByte(0xA5);
msg.addByte(groupId);
msg.add<uint32_t>(time);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendModalWindow(const ModalWindow& modalWindow)
{
NetworkMessage msg;
msg.addByte(0xFA);
msg.add<uint32_t>(modalWindow.id);
msg.addString(modalWindow.title);
msg.addString(modalWindow.message);
msg.addByte(modalWindow.buttons.size());
for (const auto& it : modalWindow.buttons) {
msg.addString(it.first);
msg.addByte(it.second);
}
msg.addByte(modalWindow.choices.size());
for (const auto& it : modalWindow.choices) {
msg.addString(it.first);
msg.addByte(it.second);
}
msg.addByte(modalWindow.defaultEscapeButton);
msg.addByte(modalWindow.defaultEnterButton);
msg.addByte(modalWindow.priority ? 0x01 : 0x00);
writeToOutputBuffer(msg);
}
////////////// Add common messages
void ProtocolGame::AddCreature(NetworkMessage& msg, const Creature* creature, bool known, uint32_t remove)
{
CreatureType_t creatureType = creature->getType();
const Player* otherPlayer = creature->getPlayer();
if (known) {
msg.add<uint16_t>(0x62);
msg.add<uint32_t>(creature->getID());
} else {
msg.add<uint16_t>(0x61);
msg.add<uint32_t>(remove);
msg.add<uint32_t>(creature->getID());
msg.addByte(creatureType);
msg.addString(creature->getName());
}
if (creature->isHealthHidden()) {
msg.addByte(0x00);
} else {
msg.addByte(std::ceil((static_cast<double>(creature->getHealth()) / std::max<int32_t>(creature->getMaxHealth(), 1)) * 100));
}
msg.addByte(creature->getDirection());
if (!creature->isInGhostMode() && !creature->isInvisible()) {
AddOutfit(msg, creature->getCurrentOutfit());
} else {
static Outfit_t outfit;
AddOutfit(msg, outfit);
}
LightInfo lightInfo = creature->getCreatureLight();
msg.addByte(player->isAccessPlayer() ? 0xFF : lightInfo.level);
msg.addByte(lightInfo.color);
msg.add<uint16_t>(creature->getStepSpeed() / 2);
msg.addByte(player->getSkullClient(creature));
msg.addByte(player->getPartyShield(otherPlayer));
if (!known) {
msg.addByte(player->getGuildEmblem(otherPlayer));
}
if (creatureType == CREATURETYPE_MONSTER) {
const Creature* master = creature->getMaster();
if (master) {
const Player* masterPlayer = master->getPlayer();
if (masterPlayer) {
if (masterPlayer == player) {
creatureType = CREATURETYPE_SUMMON_OWN;
} else {
creatureType = CREATURETYPE_SUMMON_OTHERS;
}
}
}
}
msg.addByte(creatureType); // Type (for summons)
msg.addByte(creature->getSpeechBubble());
msg.addByte(0xFF); // MARK_UNMARKED
if (otherPlayer) {
msg.add<uint16_t>(otherPlayer->getHelpers());
} else {
msg.add<uint16_t>(0x00);
}
msg.addByte(player->canWalkthroughEx(creature) ? 0x00 : 0x01);
}
void ProtocolGame::AddPlayerStats(NetworkMessage& msg)
{
msg.addByte(0xA0);
msg.add<uint16_t>(std::min<int32_t>(player->getHealth(), std::numeric_limits<uint16_t>::max()));
msg.add<uint16_t>(std::min<int32_t>(player->getMaxHealth(), std::numeric_limits<uint16_t>::max()));
msg.add<uint32_t>(player->getFreeCapacity());
msg.add<uint32_t>(player->getCapacity());
msg.add<uint64_t>(player->getExperience());
msg.add<uint16_t>(player->getLevel());
msg.addByte(player->getLevelPercent());
msg.add<uint16_t>(100); // base xp gain rate
msg.add<uint16_t>(0); // xp voucher
msg.add<uint16_t>(0); // low level bonus
msg.add<uint16_t>(0); // xp boost
msg.add<uint16_t>(100); // stamina multiplier (100 = x1.0)
msg.add<uint16_t>(std::min<int32_t>(player->getMana(), std::numeric_limits<uint16_t>::max()));
msg.add<uint16_t>(std::min<int32_t>(player->getMaxMana(), std::numeric_limits<uint16_t>::max()));
msg.addByte(std::min<uint32_t>(player->getMagicLevel(), std::numeric_limits<uint8_t>::max()));
msg.addByte(std::min<uint32_t>(player->getBaseMagicLevel(), std::numeric_limits<uint8_t>::max()));
msg.addByte(player->getMagicLevelPercent());
msg.addByte(player->getSoul());
msg.add<uint16_t>(player->getStaminaMinutes());
msg.add<uint16_t>(player->getBaseSpeed() / 2);
Condition* condition = player->getCondition(CONDITION_REGENERATION);
msg.add<uint16_t>(condition ? condition->getTicks() / 1000 : 0x00);
msg.add<uint16_t>(player->getOfflineTrainingTime() / 60 / 1000);
msg.add<uint16_t>(0); // xp boost time (seconds)
msg.addByte(0); // enables exp boost in the store
}
void ProtocolGame::AddPlayerSkills(NetworkMessage& msg)
{
msg.addByte(0xA1);
for (uint8_t i = SKILL_FIRST; i <= SKILL_LAST; ++i) {
msg.add<uint16_t>(std::min<int32_t>(player->getSkillLevel(i), std::numeric_limits<uint16_t>::max()));
msg.add<uint16_t>(player->getBaseSkill(i));
msg.addByte(player->getSkillPercent(i));
}
for (uint8_t i = SPECIALSKILL_FIRST; i <= SPECIALSKILL_LAST; ++i) {
msg.add<uint16_t>(std::min<int32_t>(100, player->varSpecialSkills[i]));
msg.add<uint16_t>(0);
}
}
void ProtocolGame::AddOutfit(NetworkMessage& msg, const Outfit_t& outfit)
{
msg.add<uint16_t>(outfit.lookType);
if (outfit.lookType != 0) {
msg.addByte(outfit.lookHead);
msg.addByte(outfit.lookBody);
msg.addByte(outfit.lookLegs);
msg.addByte(outfit.lookFeet);
msg.addByte(outfit.lookAddons);
} else {
msg.addItemId(outfit.lookTypeEx);
}
msg.add<uint16_t>(outfit.lookMount);
}
void ProtocolGame::AddWorldLight(NetworkMessage& msg, LightInfo lightInfo)
{
msg.addByte(0x82);
msg.addByte((player->isAccessPlayer() ? 0xFF : lightInfo.level));
msg.addByte(lightInfo.color);
}
void ProtocolGame::AddCreatureLight(NetworkMessage& msg, const Creature* creature)
{
LightInfo lightInfo = creature->getCreatureLight();
msg.addByte(0x8D);
msg.add<uint32_t>(creature->getID());
msg.addByte((player->isAccessPlayer() ? 0xFF : lightInfo.level));
msg.addByte(lightInfo.color);
}
//tile
void ProtocolGame::RemoveTileThing(NetworkMessage& msg, const Position& pos, uint32_t stackpos)
{
if (stackpos >= 10) {
return;
}
msg.addByte(0x6C);
msg.addPosition(pos);
msg.addByte(stackpos);
}
void ProtocolGame::RemoveTileCreature(NetworkMessage& msg, const Creature* creature, const Position& pos, uint32_t stackpos)
{
if (stackpos < 10) {
RemoveTileThing(msg, pos, stackpos);
return;
}
msg.addByte(0x6C);
msg.add<uint16_t>(0xFFFF);
msg.add<uint32_t>(creature->getID());
}
void ProtocolGame::MoveUpCreature(NetworkMessage& msg, const Creature* creature, const Position& newPos, const Position& oldPos)
{
if (creature != player) {
return;
}
//floor change up
msg.addByte(0xBE);
//going to surface
if (newPos.z == 7) {
int32_t skip = -1;
// floor 7 and 6 already set
for (int i = 5; i >= 0; --i) {
GetFloorDescription(msg, oldPos.x - Map::maxClientViewportX, oldPos.y - Map::maxClientViewportY, i, (Map::maxClientViewportX * 2) + 2, (Map::maxClientViewportY * 2) + 2, 8 - i, skip);
}
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
}
//underground, going one floor up (still underground)
else if (newPos.z > 7) {
int32_t skip = -1;
GetFloorDescription(msg, oldPos.x - Map::maxClientViewportX, oldPos.y - Map::maxClientViewportY, oldPos.getZ() - 3, (Map::maxClientViewportX * 2) + 2, (Map::maxClientViewportY * 2) + 2, 3, skip);
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
}
//moving up a floor up makes us out of sync
//west
msg.addByte(0x68);
GetMapDescription(oldPos.x - Map::maxClientViewportX, oldPos.y - (Map::maxClientViewportY - 1), newPos.z, 1, (Map::maxClientViewportY * 2) + 2, msg);
//north
msg.addByte(0x65);
GetMapDescription(oldPos.x - Map::maxClientViewportX, oldPos.y - Map::maxClientViewportY, newPos.z, (Map::maxClientViewportX * 2) + 2, 1, msg);
}
void ProtocolGame::MoveDownCreature(NetworkMessage& msg, const Creature* creature, const Position& newPos, const Position& oldPos)
{
if (creature != player) {
return;
}
//floor change down
msg.addByte(0xBF);
//going from surface to underground
if (newPos.z == 8) {
int32_t skip = -1;
for (int i = 0; i < 3; ++i) {
GetFloorDescription(msg, oldPos.x - Map::maxClientViewportX, oldPos.y - Map::maxClientViewportY, newPos.z + i, (Map::maxClientViewportX * 2) + 2, (Map::maxClientViewportY * 2) + 2, -i - 1, skip);
}
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
}
//going further down
else if (newPos.z > oldPos.z && newPos.z > 8 && newPos.z < 14) {
int32_t skip = -1;
GetFloorDescription(msg, oldPos.x - Map::maxClientViewportX, oldPos.y - Map::maxClientViewportY, newPos.z + 2, (Map::maxClientViewportX * 2) + 2, (Map::maxClientViewportY * 2) + 2, -3, skip);
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
}
//moving down a floor makes us out of sync
//east
msg.addByte(0x66);
GetMapDescription(oldPos.x + (Map::maxClientViewportX + 1), oldPos.y - (Map::maxClientViewportY + 1), newPos.z, 1, (Map::maxClientViewportY * 2) + 2, msg);
//south
msg.addByte(0x67);
GetMapDescription(oldPos.x - Map::maxClientViewportX, oldPos.y + (Map::maxClientViewportY + 1), newPos.z, (Map::maxClientViewportX * 2) + 2, 1, msg);
}
void ProtocolGame::AddShopItem(NetworkMessage& msg, const ShopInfo& item)
{
const ItemType& it = Item::items[item.itemId];
msg.add<uint16_t>(it.clientId);
if (it.isSplash() || it.isFluidContainer()) {
msg.addByte(serverFluidToClient(item.subType));
} else {
msg.addByte(0x00);
}
msg.addString(item.realName);
msg.add<uint32_t>(it.weight);
msg.add<uint32_t>(item.buyPrice);
msg.add<uint32_t>(item.sellPrice);
}
void ProtocolGame::parseExtendedOpcode(NetworkMessage& msg)
{
uint8_t opcode = msg.getByte();
const std::string& buffer = msg.getString();
// process additional opcodes via lua script event
addGameTask(&Game::parsePlayerExtendedOpcode, player->getID(), opcode, buffer);
}
| 1 | 19,532 | Why would you add those if client version min is set to 1100? | otland-forgottenserver | cpp |
@@ -7,12 +7,13 @@
package snapshotsync
import (
+ reflect "reflect"
+ sync "sync"
+
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
emptypb "google.golang.org/protobuf/types/known/emptypb"
- reflect "reflect"
- sync "sync"
)
const ( | 1 | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.14.0
// source: external_downloader.proto
package snapshotsync
import (
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type SnapshotType int32
const (
SnapshotType_headers SnapshotType = 0
SnapshotType_bodies SnapshotType = 1
SnapshotType_state SnapshotType = 2
SnapshotType_receipts SnapshotType = 3
)
// Enum value maps for SnapshotType.
var (
SnapshotType_name = map[int32]string{
0: "headers",
1: "bodies",
2: "state",
3: "receipts",
}
SnapshotType_value = map[string]int32{
"headers": 0,
"bodies": 1,
"state": 2,
"receipts": 3,
}
)
func (x SnapshotType) Enum() *SnapshotType {
p := new(SnapshotType)
*p = x
return p
}
func (x SnapshotType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (SnapshotType) Descriptor() protoreflect.EnumDescriptor {
return file_external_downloader_proto_enumTypes[0].Descriptor()
}
func (SnapshotType) Type() protoreflect.EnumType {
return &file_external_downloader_proto_enumTypes[0]
}
func (x SnapshotType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use SnapshotType.Descriptor instead.
func (SnapshotType) EnumDescriptor() ([]byte, []int) {
return file_external_downloader_proto_rawDescGZIP(), []int{0}
}
type DownloadSnapshotRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NetworkId uint64 `protobuf:"varint,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"`
Type []SnapshotType `protobuf:"varint,2,rep,packed,name=type,proto3,enum=snapshotsync.SnapshotType" json:"type,omitempty"`
}
func (x *DownloadSnapshotRequest) Reset() {
*x = DownloadSnapshotRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_external_downloader_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DownloadSnapshotRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DownloadSnapshotRequest) ProtoMessage() {}
func (x *DownloadSnapshotRequest) ProtoReflect() protoreflect.Message {
mi := &file_external_downloader_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DownloadSnapshotRequest.ProtoReflect.Descriptor instead.
func (*DownloadSnapshotRequest) Descriptor() ([]byte, []int) {
return file_external_downloader_proto_rawDescGZIP(), []int{0}
}
func (x *DownloadSnapshotRequest) GetNetworkId() uint64 {
if x != nil {
return x.NetworkId
}
return 0
}
func (x *DownloadSnapshotRequest) GetType() []SnapshotType {
if x != nil {
return x.Type
}
return nil
}
type SnapshotsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NetworkId uint64 `protobuf:"varint,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"`
}
func (x *SnapshotsRequest) Reset() {
*x = SnapshotsRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_external_downloader_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SnapshotsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SnapshotsRequest) ProtoMessage() {}
func (x *SnapshotsRequest) ProtoReflect() protoreflect.Message {
mi := &file_external_downloader_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SnapshotsRequest.ProtoReflect.Descriptor instead.
func (*SnapshotsRequest) Descriptor() ([]byte, []int) {
return file_external_downloader_proto_rawDescGZIP(), []int{1}
}
func (x *SnapshotsRequest) GetNetworkId() uint64 {
if x != nil {
return x.NetworkId
}
return 0
}
type SnapshotsInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Type SnapshotType `protobuf:"varint,1,opt,name=type,proto3,enum=snapshotsync.SnapshotType" json:"type,omitempty"`
GotInfoByte bool `protobuf:"varint,2,opt,name=gotInfoByte,proto3" json:"gotInfoByte,omitempty"`
Readiness int32 `protobuf:"varint,3,opt,name=readiness,proto3" json:"readiness,omitempty"`
SnapshotBlock uint64 `protobuf:"varint,4,opt,name=snapshotBlock,proto3" json:"snapshotBlock,omitempty"`
Dbpath string `protobuf:"bytes,5,opt,name=dbpath,proto3" json:"dbpath,omitempty"`
}
func (x *SnapshotsInfo) Reset() {
*x = SnapshotsInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_external_downloader_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SnapshotsInfo) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SnapshotsInfo) ProtoMessage() {}
func (x *SnapshotsInfo) ProtoReflect() protoreflect.Message {
mi := &file_external_downloader_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SnapshotsInfo.ProtoReflect.Descriptor instead.
func (*SnapshotsInfo) Descriptor() ([]byte, []int) {
return file_external_downloader_proto_rawDescGZIP(), []int{2}
}
func (x *SnapshotsInfo) GetType() SnapshotType {
if x != nil {
return x.Type
}
return SnapshotType_headers
}
func (x *SnapshotsInfo) GetGotInfoByte() bool {
if x != nil {
return x.GotInfoByte
}
return false
}
func (x *SnapshotsInfo) GetReadiness() int32 {
if x != nil {
return x.Readiness
}
return 0
}
func (x *SnapshotsInfo) GetSnapshotBlock() uint64 {
if x != nil {
return x.SnapshotBlock
}
return 0
}
func (x *SnapshotsInfo) GetDbpath() string {
if x != nil {
return x.Dbpath
}
return ""
}
type SnapshotsInfoReply struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Info []*SnapshotsInfo `protobuf:"bytes,1,rep,name=info,proto3" json:"info,omitempty"`
}
func (x *SnapshotsInfoReply) Reset() {
*x = SnapshotsInfoReply{}
if protoimpl.UnsafeEnabled {
mi := &file_external_downloader_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SnapshotsInfoReply) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SnapshotsInfoReply) ProtoMessage() {}
func (x *SnapshotsInfoReply) ProtoReflect() protoreflect.Message {
mi := &file_external_downloader_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SnapshotsInfoReply.ProtoReflect.Descriptor instead.
func (*SnapshotsInfoReply) Descriptor() ([]byte, []int) {
return file_external_downloader_proto_rawDescGZIP(), []int{3}
}
func (x *SnapshotsInfoReply) GetInfo() []*SnapshotsInfo {
if x != nil {
return x.Info
}
return nil
}
var File_external_downloader_proto protoreflect.FileDescriptor
var file_external_downloader_proto_rawDesc = []byte{
0x0a, 0x19, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x6c,
0x6f, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x6e, 0x61,
0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x79, 0x6e, 0x63, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x68, 0x0a, 0x17, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f,
0x61, 0x64, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64,
0x12, 0x2e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a,
0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x53, 0x6e,
0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
0x22, 0x31, 0x0a, 0x10, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f,
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72,
0x6b, 0x49, 0x64, 0x22, 0xbd, 0x01, 0x0a, 0x0d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x79,
0x6e, 0x63, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52,
0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x67, 0x6f, 0x74, 0x49, 0x6e, 0x66, 0x6f,
0x42, 0x79, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x67, 0x6f, 0x74, 0x49,
0x6e, 0x66, 0x6f, 0x42, 0x79, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x69,
0x6e, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64,
0x69, 0x6e, 0x65, 0x73, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f,
0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x73, 0x6e,
0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x16, 0x0a, 0x06, 0x64,
0x62, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x70,
0x61, 0x74, 0x68, 0x22, 0x45, 0x0a, 0x12, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73,
0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2f, 0x0a, 0x04, 0x69, 0x6e, 0x66,
0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68,
0x6f, 0x74, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73,
0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x2a, 0x40, 0x0a, 0x0c, 0x53, 0x6e,
0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x68, 0x65,
0x61, 0x64, 0x65, 0x72, 0x73, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x62, 0x6f, 0x64, 0x69, 0x65,
0x73, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x10, 0x02, 0x12, 0x0c,
0x0a, 0x08, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x10, 0x03, 0x32, 0xaa, 0x01, 0x0a,
0x0a, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x08, 0x44,
0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x25, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68,
0x6f, 0x74, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x53,
0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x09, 0x53, 0x6e, 0x61, 0x70,
0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x1e, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
0x73, 0x79, 0x6e, 0x63, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74,
0x73, 0x79, 0x6e, 0x63, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x49, 0x6e,
0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x11, 0x5a, 0x0f, 0x2e, 0x2f, 0x3b,
0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x79, 0x6e, 0x63, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_external_downloader_proto_rawDescOnce sync.Once
file_external_downloader_proto_rawDescData = file_external_downloader_proto_rawDesc
)
func file_external_downloader_proto_rawDescGZIP() []byte {
file_external_downloader_proto_rawDescOnce.Do(func() {
file_external_downloader_proto_rawDescData = protoimpl.X.CompressGZIP(file_external_downloader_proto_rawDescData)
})
return file_external_downloader_proto_rawDescData
}
var file_external_downloader_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_external_downloader_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_external_downloader_proto_goTypes = []interface{}{
(SnapshotType)(0), // 0: snapshotsync.SnapshotType
(*DownloadSnapshotRequest)(nil), // 1: snapshotsync.DownloadSnapshotRequest
(*SnapshotsRequest)(nil), // 2: snapshotsync.SnapshotsRequest
(*SnapshotsInfo)(nil), // 3: snapshotsync.SnapshotsInfo
(*SnapshotsInfoReply)(nil), // 4: snapshotsync.SnapshotsInfoReply
(*emptypb.Empty)(nil), // 5: google.protobuf.Empty
}
var file_external_downloader_proto_depIdxs = []int32{
0, // 0: snapshotsync.DownloadSnapshotRequest.type:type_name -> snapshotsync.SnapshotType
0, // 1: snapshotsync.SnapshotsInfo.type:type_name -> snapshotsync.SnapshotType
3, // 2: snapshotsync.SnapshotsInfoReply.info:type_name -> snapshotsync.SnapshotsInfo
1, // 3: snapshotsync.Downloader.Download:input_type -> snapshotsync.DownloadSnapshotRequest
2, // 4: snapshotsync.Downloader.Snapshots:input_type -> snapshotsync.SnapshotsRequest
5, // 5: snapshotsync.Downloader.Download:output_type -> google.protobuf.Empty
4, // 6: snapshotsync.Downloader.Snapshots:output_type -> snapshotsync.SnapshotsInfoReply
5, // [5:7] is the sub-list for method output_type
3, // [3:5] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_external_downloader_proto_init() }
func file_external_downloader_proto_init() {
if File_external_downloader_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_external_downloader_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DownloadSnapshotRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_external_downloader_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SnapshotsRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_external_downloader_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SnapshotsInfo); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_external_downloader_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SnapshotsInfoReply); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_external_downloader_proto_rawDesc,
NumEnums: 1,
NumMessages: 4,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_external_downloader_proto_goTypes,
DependencyIndexes: file_external_downloader_proto_depIdxs,
EnumInfos: file_external_downloader_proto_enumTypes,
MessageInfos: file_external_downloader_proto_msgTypes,
}.Build()
File_external_downloader_proto = out.File
file_external_downloader_proto_rawDesc = nil
file_external_downloader_proto_goTypes = nil
file_external_downloader_proto_depIdxs = nil
}
| 1 | 22,109 | You can delete this, it will now need to live in `gointerfaces` package | ledgerwatch-erigon | go |
@@ -3,7 +3,7 @@
<%= render 'previous_next_doc' %>
-<% @page_title = t('blacklight.search.show.title', :document_title => document_show_html_title, :application_name => application_name) -%>
+<% @page_title = t('blacklight.search.show.title', :document_title => document_show_html_title, :application_name => application_name).html_safe -%>
<% content_for(:head) { render_link_rel_alternates } -%>
<%# this should be in a partial -%>
| 1 | <div id="content" class="col-md-9 show-document">
<%= render 'previous_next_doc' %>
<% @page_title = t('blacklight.search.show.title', :document_title => document_show_html_title, :application_name => application_name) -%>
<% content_for(:head) { render_link_rel_alternates } -%>
<%# this should be in a partial -%>
<div id="document" class="document <%= render_document_class %>" itemscope itemtype="<%= @document.itemtype %>">
<div id="doc_<%= @document.id.to_s.parameterize %>">
<% # bookmark/folder functions -%>
<%= render_document_partials @document, blacklight_config.view_config(:show).partials %>
</div>
</div>
<% if @document.respond_to?(:export_as_openurl_ctx_kev) %>
<!--
// COinS, for Zotero among others.
// This document_partial_name(@document) business is not quite right,
// but has been there for a while.
-->
<span class="Z3988" title="<%= @document.export_as_openurl_ctx_kev(document_partial_name(@document)) %>"></span>
<% end %>
</div>
<div id="sidebar" class="col-md-3">
<%= render_document_sidebar_partial %>
</div>
| 1 | 5,094 | Okay, I still don't understand why you have to add `html_safe` here, and it still seems like a very bad idea. It will allow html tags in the title, and keep Rails from escaping literal greater-than or less-than chars not intended as HTML tags. It ought to work to just let Rails do HTML-escaing as normal, without any manual `html_safe` or `strip_tags` or whatever. I am not clear on what problem you are trying to solve, why the straightfoward approach does not work. | projectblacklight-blacklight | rb |
@@ -1435,6 +1435,8 @@ public class MessageList extends K9Activity implements MessageListFragmentListen
public void displayMessageSubject(String subject) {
if (mDisplayMode == DisplayMode.MESSAGE_VIEW) {
mActionBarSubject.setText(subject);
+ } else {
+ mActionBarSubject.showSubjectInMessageHeader();
}
}
| 1 | package com.fsck.k9.activity;
import java.util.Collection;
import java.util.List;
import android.annotation.SuppressLint;
import android.app.ActionBar;
import android.app.FragmentManager;
import android.app.FragmentManager.OnBackStackChangedListener;
import android.app.FragmentTransaction;
import android.app.SearchManager;
import android.content.Context;
import android.content.Intent;
import android.content.IntentSender;
import android.content.IntentSender.SendIntentException;
import android.content.res.Configuration;
import android.net.Uri;
import android.os.Build;
import android.os.Bundle;
import android.os.Parcelable;
import android.util.Log;
import android.view.KeyEvent;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuItem;
import android.view.MotionEvent;
import android.view.View;
import android.view.ViewGroup;
import android.view.animation.AnimationUtils;
import android.widget.ProgressBar;
import android.widget.TextView;
import android.widget.Toast;
import com.fsck.k9.Account;
import com.fsck.k9.Account.SortType;
import com.fsck.k9.K9;
import com.fsck.k9.K9.SplitViewMode;
import com.fsck.k9.Preferences;
import com.fsck.k9.R;
import com.fsck.k9.activity.compose.MessageActions;
import com.fsck.k9.activity.misc.SwipeGestureDetector.OnSwipeGestureListener;
import com.fsck.k9.activity.setup.AccountSettings;
import com.fsck.k9.activity.setup.FolderSettings;
import com.fsck.k9.activity.setup.Prefs;
import com.fsck.k9.fragment.MessageListFragment;
import com.fsck.k9.fragment.MessageListFragment.MessageListFragmentListener;
import com.fsck.k9.mailstore.StorageManager;
import com.fsck.k9.preferences.StorageEditor;
import com.fsck.k9.search.LocalSearch;
import com.fsck.k9.search.SearchAccount;
import com.fsck.k9.search.SearchSpecification;
import com.fsck.k9.search.SearchSpecification.Attribute;
import com.fsck.k9.search.SearchSpecification.SearchCondition;
import com.fsck.k9.search.SearchSpecification.SearchField;
import com.fsck.k9.ui.messageview.MessageViewFragment;
import com.fsck.k9.ui.messageview.MessageViewFragment.MessageViewFragmentListener;
import com.fsck.k9.view.MessageHeader;
import com.fsck.k9.view.MessageTitleView;
import com.fsck.k9.view.ViewSwitcher;
import com.fsck.k9.view.ViewSwitcher.OnSwitchCompleteListener;
import de.cketti.library.changelog.ChangeLog;
/**
* MessageList is the primary user interface for the program. This Activity
* shows a list of messages.
* From this Activity the user can perform all standard message operations.
*/
public class MessageList extends K9Activity implements MessageListFragmentListener,
MessageViewFragmentListener, OnBackStackChangedListener, OnSwipeGestureListener,
OnSwitchCompleteListener {
// for this activity
private static final String EXTRA_SEARCH = "search";
private static final String EXTRA_NO_THREADING = "no_threading";
private static final String ACTION_SHORTCUT = "shortcut";
private static final String EXTRA_SPECIAL_FOLDER = "special_folder";
private static final String EXTRA_MESSAGE_REFERENCE = "message_reference";
// used for remote search
public static final String EXTRA_SEARCH_ACCOUNT = "com.fsck.k9.search_account";
private static final String EXTRA_SEARCH_FOLDER = "com.fsck.k9.search_folder";
private static final String STATE_DISPLAY_MODE = "displayMode";
private static final String STATE_MESSAGE_LIST_WAS_DISPLAYED = "messageListWasDisplayed";
private static final String STATE_FIRST_BACK_STACK_ID = "firstBackstackId";
// Used for navigating to next/previous message
private static final int PREVIOUS = 1;
private static final int NEXT = 2;
public static final int REQUEST_MASK_PENDING_INTENT = 1 << 16;
public static void actionDisplaySearch(Context context, SearchSpecification search,
boolean noThreading, boolean newTask) {
actionDisplaySearch(context, search, noThreading, newTask, true);
}
public static void actionDisplaySearch(Context context, SearchSpecification search,
boolean noThreading, boolean newTask, boolean clearTop) {
context.startActivity(
intentDisplaySearch(context, search, noThreading, newTask, clearTop));
}
public static Intent intentDisplaySearch(Context context, SearchSpecification search,
boolean noThreading, boolean newTask, boolean clearTop) {
Intent intent = new Intent(context, MessageList.class);
intent.putExtra(EXTRA_SEARCH, search);
intent.putExtra(EXTRA_NO_THREADING, noThreading);
if (clearTop) {
intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP);
}
if (newTask) {
intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
}
return intent;
}
public static Intent shortcutIntent(Context context, String specialFolder) {
Intent intent = new Intent(context, MessageList.class);
intent.setAction(ACTION_SHORTCUT);
intent.putExtra(EXTRA_SPECIAL_FOLDER, specialFolder);
intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP);
intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
return intent;
}
public static Intent actionDisplayMessageIntent(Context context,
MessageReference messageReference) {
Intent intent = new Intent(context, MessageList.class);
intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP);
intent.putExtra(EXTRA_MESSAGE_REFERENCE, messageReference);
return intent;
}
private enum DisplayMode {
MESSAGE_LIST,
MESSAGE_VIEW,
SPLIT_VIEW
}
private StorageManager.StorageListener mStorageListener = new StorageListenerImplementation();
private ActionBar mActionBar;
private View mActionBarMessageList;
private View mActionBarMessageView;
private MessageTitleView mActionBarSubject;
private TextView mActionBarTitle;
private TextView mActionBarSubTitle;
private TextView mActionBarUnread;
private Menu mMenu;
private ViewGroup mMessageViewContainer;
private View mMessageViewPlaceHolder;
private MessageListFragment mMessageListFragment;
private MessageViewFragment mMessageViewFragment;
private int mFirstBackStackId = -1;
private Account mAccount;
private String mFolderName;
private LocalSearch mSearch;
private boolean mSingleFolderMode;
private boolean mSingleAccountMode;
private ProgressBar mActionBarProgress;
private MenuItem mMenuButtonCheckMail;
private View mActionButtonIndeterminateProgress;
private int mLastDirection = (K9.messageViewShowNext()) ? NEXT : PREVIOUS;
/**
* {@code true} if the message list should be displayed as flat list (i.e. no threading)
* regardless whether or not message threading was enabled in the settings. This is used for
* filtered views, e.g. when only displaying the unread messages in a folder.
*/
private boolean mNoThreading;
private DisplayMode mDisplayMode;
private MessageReference mMessageReference;
/**
* {@code true} when the message list was displayed once. This is used in
* {@link #onBackPressed()} to decide whether to go from the message view to the message list or
* finish the activity.
*/
private boolean mMessageListWasDisplayed = false;
private ViewSwitcher mViewSwitcher;
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
if (UpgradeDatabases.actionUpgradeDatabases(this, getIntent())) {
finish();
return;
}
if (useSplitView()) {
setContentView(R.layout.split_message_list);
} else {
setContentView(R.layout.message_list);
mViewSwitcher = (ViewSwitcher) findViewById(R.id.container);
mViewSwitcher.setFirstInAnimation(AnimationUtils.loadAnimation(this, R.anim.slide_in_left));
mViewSwitcher.setFirstOutAnimation(AnimationUtils.loadAnimation(this, R.anim.slide_out_right));
mViewSwitcher.setSecondInAnimation(AnimationUtils.loadAnimation(this, R.anim.slide_in_right));
mViewSwitcher.setSecondOutAnimation(AnimationUtils.loadAnimation(this, R.anim.slide_out_left));
mViewSwitcher.setOnSwitchCompleteListener(this);
}
initializeActionBar();
// Enable gesture detection for MessageLists
setupGestureDetector(this);
if (!decodeExtras(getIntent())) {
return;
}
findFragments();
initializeDisplayMode(savedInstanceState);
initializeLayout();
initializeFragments();
displayViews();
ChangeLog cl = new ChangeLog(this);
if (cl.isFirstRun()) {
cl.getLogDialog().show();
}
}
@Override
public void onNewIntent(Intent intent) {
super.onNewIntent(intent);
setIntent(intent);
if (mFirstBackStackId >= 0) {
getFragmentManager().popBackStackImmediate(mFirstBackStackId,
FragmentManager.POP_BACK_STACK_INCLUSIVE);
mFirstBackStackId = -1;
}
removeMessageListFragment();
removeMessageViewFragment();
mMessageReference = null;
mSearch = null;
mFolderName = null;
if (!decodeExtras(intent)) {
return;
}
initializeDisplayMode(null);
initializeFragments();
displayViews();
}
/**
* Get references to existing fragments if the activity was restarted.
*/
private void findFragments() {
FragmentManager fragmentManager = getFragmentManager();
mMessageListFragment = (MessageListFragment) fragmentManager.findFragmentById(
R.id.message_list_container);
mMessageViewFragment = (MessageViewFragment) fragmentManager.findFragmentById(
R.id.message_view_container);
}
/**
* Create fragment instances if necessary.
*
* @see #findFragments()
*/
private void initializeFragments() {
FragmentManager fragmentManager = getFragmentManager();
fragmentManager.addOnBackStackChangedListener(this);
boolean hasMessageListFragment = (mMessageListFragment != null);
if (!hasMessageListFragment) {
FragmentTransaction ft = fragmentManager.beginTransaction();
mMessageListFragment = MessageListFragment.newInstance(mSearch, false,
(K9.isThreadedViewEnabled() && !mNoThreading));
ft.add(R.id.message_list_container, mMessageListFragment);
ft.commit();
}
// Check if the fragment wasn't restarted and has a MessageReference in the arguments. If
// so, open the referenced message.
if (!hasMessageListFragment && mMessageViewFragment == null &&
mMessageReference != null) {
openMessage(mMessageReference);
}
}
/**
* Set the initial display mode (message list, message view, or split view).
*
* <p><strong>Note:</strong>
* This method has to be called after {@link #findFragments()} because the result depends on
* the availability of a {@link MessageViewFragment} instance.
* </p>
*
* @param savedInstanceState
* The saved instance state that was passed to the activity as argument to
* {@link #onCreate(Bundle)}. May be {@code null}.
*/
private void initializeDisplayMode(Bundle savedInstanceState) {
if (useSplitView()) {
mDisplayMode = DisplayMode.SPLIT_VIEW;
return;
}
if (savedInstanceState != null) {
DisplayMode savedDisplayMode =
(DisplayMode) savedInstanceState.getSerializable(STATE_DISPLAY_MODE);
if (savedDisplayMode != DisplayMode.SPLIT_VIEW) {
mDisplayMode = savedDisplayMode;
return;
}
}
if (mMessageViewFragment != null || mMessageReference != null) {
mDisplayMode = DisplayMode.MESSAGE_VIEW;
} else {
mDisplayMode = DisplayMode.MESSAGE_LIST;
}
}
private boolean useSplitView() {
SplitViewMode splitViewMode = K9.getSplitViewMode();
int orientation = getResources().getConfiguration().orientation;
return (splitViewMode == SplitViewMode.ALWAYS ||
(splitViewMode == SplitViewMode.WHEN_IN_LANDSCAPE &&
orientation == Configuration.ORIENTATION_LANDSCAPE));
}
private void initializeLayout() {
mMessageViewContainer = (ViewGroup) findViewById(R.id.message_view_container);
LayoutInflater layoutInflater = getLayoutInflater();
mMessageViewPlaceHolder = layoutInflater.inflate(R.layout.empty_message_view, mMessageViewContainer, false);
}
private void displayViews() {
switch (mDisplayMode) {
case MESSAGE_LIST: {
showMessageList();
break;
}
case MESSAGE_VIEW: {
showMessageView();
break;
}
case SPLIT_VIEW: {
mMessageListWasDisplayed = true;
if (mMessageViewFragment == null) {
showMessageViewPlaceHolder();
} else {
MessageReference activeMessage = mMessageViewFragment.getMessageReference();
if (activeMessage != null) {
mMessageListFragment.setActiveMessage(activeMessage);
}
}
break;
}
}
}
private boolean decodeExtras(Intent intent) {
String action = intent.getAction();
if (Intent.ACTION_VIEW.equals(action) && intent.getData() != null) {
Uri uri = intent.getData();
List<String> segmentList = uri.getPathSegments();
String accountId = segmentList.get(0);
Collection<Account> accounts = Preferences.getPreferences(this).getAvailableAccounts();
for (Account account : accounts) {
if (String.valueOf(account.getAccountNumber()).equals(accountId)) {
String folderName = segmentList.get(1);
String messageUid = segmentList.get(2);
mMessageReference = new MessageReference(account.getUuid(), folderName, messageUid, null);
break;
}
}
} else if (ACTION_SHORTCUT.equals(action)) {
// Handle shortcut intents
String specialFolder = intent.getStringExtra(EXTRA_SPECIAL_FOLDER);
if (SearchAccount.UNIFIED_INBOX.equals(specialFolder)) {
mSearch = SearchAccount.createUnifiedInboxAccount(this).getRelatedSearch();
} else if (SearchAccount.ALL_MESSAGES.equals(specialFolder)) {
mSearch = SearchAccount.createAllMessagesAccount(this).getRelatedSearch();
}
} else if (intent.getStringExtra(SearchManager.QUERY) != null) {
// check if this intent comes from the system search ( remote )
if (Intent.ACTION_SEARCH.equals(intent.getAction())) {
//Query was received from Search Dialog
String query = intent.getStringExtra(SearchManager.QUERY).trim();
mSearch = new LocalSearch(getString(R.string.search_results));
mSearch.setManualSearch(true);
mNoThreading = true;
mSearch.or(new SearchCondition(SearchField.SENDER, Attribute.CONTAINS, query));
mSearch.or(new SearchCondition(SearchField.SUBJECT, Attribute.CONTAINS, query));
mSearch.or(new SearchCondition(SearchField.MESSAGE_CONTENTS, Attribute.CONTAINS, query));
Bundle appData = intent.getBundleExtra(SearchManager.APP_DATA);
if (appData != null) {
mSearch.addAccountUuid(appData.getString(EXTRA_SEARCH_ACCOUNT));
// searches started from a folder list activity will provide an account, but no folder
if (appData.getString(EXTRA_SEARCH_FOLDER) != null) {
mSearch.addAllowedFolder(appData.getString(EXTRA_SEARCH_FOLDER));
}
} else {
mSearch.addAccountUuid(LocalSearch.ALL_ACCOUNTS);
}
}
} else {
// regular LocalSearch object was passed
mSearch = intent.getParcelableExtra(EXTRA_SEARCH);
mNoThreading = intent.getBooleanExtra(EXTRA_NO_THREADING, false);
}
if (mMessageReference == null) {
mMessageReference = intent.getParcelableExtra(EXTRA_MESSAGE_REFERENCE);
}
if (mMessageReference != null) {
mSearch = new LocalSearch();
mSearch.addAccountUuid(mMessageReference.getAccountUuid());
mSearch.addAllowedFolder(mMessageReference.getFolderName());
}
if (mSearch == null) {
// We've most likely been started by an old unread widget
String accountUuid = intent.getStringExtra("account");
String folderName = intent.getStringExtra("folder");
mSearch = new LocalSearch(folderName);
mSearch.addAccountUuid((accountUuid == null) ? "invalid" : accountUuid);
if (folderName != null) {
mSearch.addAllowedFolder(folderName);
}
}
Preferences prefs = Preferences.getPreferences(getApplicationContext());
String[] accountUuids = mSearch.getAccountUuids();
if (mSearch.searchAllAccounts()) {
List<Account> accounts = prefs.getAccounts();
mSingleAccountMode = (accounts.size() == 1);
if (mSingleAccountMode) {
mAccount = accounts.get(0);
}
} else {
mSingleAccountMode = (accountUuids.length == 1);
if (mSingleAccountMode) {
mAccount = prefs.getAccount(accountUuids[0]);
}
}
mSingleFolderMode = mSingleAccountMode && (mSearch.getFolderNames().size() == 1);
if (mSingleAccountMode && (mAccount == null || !mAccount.isAvailable(this))) {
Log.i(K9.LOG_TAG, "not opening MessageList of unavailable account");
onAccountUnavailable();
return false;
}
if (mSingleFolderMode) {
mFolderName = mSearch.getFolderNames().get(0);
}
// now we know if we are in single account mode and need a subtitle
mActionBarSubTitle.setVisibility((!mSingleFolderMode) ? View.GONE : View.VISIBLE);
return true;
}
@Override
public void onPause() {
super.onPause();
StorageManager.getInstance(getApplication()).removeListener(mStorageListener);
}
@Override
public void onResume() {
super.onResume();
if (!(this instanceof Search)) {
//necessary b/c no guarantee Search.onStop will be called before MessageList.onResume
//when returning from search results
Search.setActive(false);
}
if (mAccount != null && !mAccount.isAvailable(this)) {
onAccountUnavailable();
return;
}
StorageManager.getInstance(getApplication()).addListener(mStorageListener);
}
@Override
public void onSaveInstanceState(Bundle outState) {
super.onSaveInstanceState(outState);
outState.putSerializable(STATE_DISPLAY_MODE, mDisplayMode);
outState.putBoolean(STATE_MESSAGE_LIST_WAS_DISPLAYED, mMessageListWasDisplayed);
outState.putInt(STATE_FIRST_BACK_STACK_ID, mFirstBackStackId);
}
@Override
public void onRestoreInstanceState(Bundle savedInstanceState) {
mMessageListWasDisplayed = savedInstanceState.getBoolean(STATE_MESSAGE_LIST_WAS_DISPLAYED);
mFirstBackStackId = savedInstanceState.getInt(STATE_FIRST_BACK_STACK_ID);
}
private void initializeActionBar() {
mActionBar = getActionBar();
mActionBar.setDisplayShowCustomEnabled(true);
mActionBar.setCustomView(R.layout.actionbar_custom);
View customView = mActionBar.getCustomView();
mActionBarMessageList = customView.findViewById(R.id.actionbar_message_list);
mActionBarMessageView = customView.findViewById(R.id.actionbar_message_view);
mActionBarSubject = (MessageTitleView) customView.findViewById(R.id.message_title_view);
mActionBarTitle = (TextView) customView.findViewById(R.id.actionbar_title_first);
mActionBarSubTitle = (TextView) customView.findViewById(R.id.actionbar_title_sub);
mActionBarUnread = (TextView) customView.findViewById(R.id.actionbar_unread_count);
mActionBarProgress = (ProgressBar) customView.findViewById(R.id.actionbar_progress);
mActionButtonIndeterminateProgress = getActionButtonIndeterminateProgress();
mActionBar.setDisplayHomeAsUpEnabled(true);
}
@SuppressLint("InflateParams")
private View getActionButtonIndeterminateProgress() {
return getLayoutInflater().inflate(R.layout.actionbar_indeterminate_progress_actionview, null);
}
@Override
public boolean dispatchKeyEvent(KeyEvent event) {
boolean ret = false;
if (KeyEvent.ACTION_DOWN == event.getAction()) {
ret = onCustomKeyDown(event.getKeyCode(), event);
}
if (!ret) {
ret = super.dispatchKeyEvent(event);
}
return ret;
}
@Override
public void onBackPressed() {
if (mDisplayMode == DisplayMode.MESSAGE_VIEW && mMessageListWasDisplayed) {
showMessageList();
} else {
super.onBackPressed();
}
}
/**
* Handle hotkeys
*
* <p>
* This method is called by {@link #dispatchKeyEvent(KeyEvent)} before any view had the chance
* to consume this key event.
* </p>
*
* @param keyCode
* The value in {@code event.getKeyCode()}.
* @param event
* Description of the key event.
*
* @return {@code true} if this event was consumed.
*/
public boolean onCustomKeyDown(final int keyCode, final KeyEvent event) {
switch (keyCode) {
case KeyEvent.KEYCODE_VOLUME_UP: {
if (mMessageViewFragment != null && mDisplayMode != DisplayMode.MESSAGE_LIST &&
K9.useVolumeKeysForNavigationEnabled()) {
showPreviousMessage();
return true;
} else if (mDisplayMode != DisplayMode.MESSAGE_VIEW &&
K9.useVolumeKeysForListNavigationEnabled()) {
mMessageListFragment.onMoveUp();
return true;
}
break;
}
case KeyEvent.KEYCODE_VOLUME_DOWN: {
if (mMessageViewFragment != null && mDisplayMode != DisplayMode.MESSAGE_LIST &&
K9.useVolumeKeysForNavigationEnabled()) {
showNextMessage();
return true;
} else if (mDisplayMode != DisplayMode.MESSAGE_VIEW &&
K9.useVolumeKeysForListNavigationEnabled()) {
mMessageListFragment.onMoveDown();
return true;
}
break;
}
case KeyEvent.KEYCODE_C: {
mMessageListFragment.onCompose();
return true;
}
case KeyEvent.KEYCODE_Q: {
if (mMessageListFragment != null && mMessageListFragment.isSingleAccountMode()) {
onShowFolderList();
}
return true;
}
case KeyEvent.KEYCODE_O: {
mMessageListFragment.onCycleSort();
return true;
}
case KeyEvent.KEYCODE_I: {
mMessageListFragment.onReverseSort();
return true;
}
case KeyEvent.KEYCODE_DEL:
case KeyEvent.KEYCODE_D: {
if (mDisplayMode == DisplayMode.MESSAGE_LIST) {
mMessageListFragment.onDelete();
} else if (mMessageViewFragment != null) {
mMessageViewFragment.onDelete();
}
return true;
}
case KeyEvent.KEYCODE_S: {
mMessageListFragment.toggleMessageSelect();
return true;
}
case KeyEvent.KEYCODE_G: {
if (mDisplayMode == DisplayMode.MESSAGE_LIST) {
mMessageListFragment.onToggleFlagged();
} else if (mMessageViewFragment != null) {
mMessageViewFragment.onToggleFlagged();
}
return true;
}
case KeyEvent.KEYCODE_M: {
if (mDisplayMode == DisplayMode.MESSAGE_LIST) {
mMessageListFragment.onMove();
} else if (mMessageViewFragment != null) {
mMessageViewFragment.onMove();
}
return true;
}
case KeyEvent.KEYCODE_V: {
if (mDisplayMode == DisplayMode.MESSAGE_LIST) {
mMessageListFragment.onArchive();
} else if (mMessageViewFragment != null) {
mMessageViewFragment.onArchive();
}
return true;
}
case KeyEvent.KEYCODE_Y: {
if (mDisplayMode == DisplayMode.MESSAGE_LIST) {
mMessageListFragment.onCopy();
} else if (mMessageViewFragment != null) {
mMessageViewFragment.onCopy();
}
return true;
}
case KeyEvent.KEYCODE_Z: {
if (mDisplayMode == DisplayMode.MESSAGE_LIST) {
mMessageListFragment.onToggleRead();
} else if (mMessageViewFragment != null) {
mMessageViewFragment.onToggleRead();
}
return true;
}
case KeyEvent.KEYCODE_F: {
if (mMessageViewFragment != null) {
mMessageViewFragment.onForward();
}
return true;
}
case KeyEvent.KEYCODE_A: {
if (mMessageViewFragment != null) {
mMessageViewFragment.onReplyAll();
}
return true;
}
case KeyEvent.KEYCODE_R: {
if (mMessageViewFragment != null) {
mMessageViewFragment.onReply();
}
return true;
}
case KeyEvent.KEYCODE_J:
case KeyEvent.KEYCODE_P: {
if (mMessageViewFragment != null) {
showPreviousMessage();
}
return true;
}
case KeyEvent.KEYCODE_N:
case KeyEvent.KEYCODE_K: {
if (mMessageViewFragment != null) {
showNextMessage();
}
return true;
}
/* FIXME
case KeyEvent.KEYCODE_Z: {
mMessageViewFragment.zoom(event);
return true;
}*/
case KeyEvent.KEYCODE_H: {
Toast toast = Toast.makeText(this, R.string.message_list_help_key, Toast.LENGTH_LONG);
toast.show();
return true;
}
case KeyEvent.KEYCODE_DPAD_LEFT: {
if (mMessageViewFragment != null && mDisplayMode == DisplayMode.MESSAGE_VIEW) {
return showPreviousMessage();
}
return false;
}
case KeyEvent.KEYCODE_DPAD_RIGHT: {
if (mMessageViewFragment != null && mDisplayMode == DisplayMode.MESSAGE_VIEW) {
return showNextMessage();
}
return false;
}
}
return false;
}
@Override
public boolean onKeyUp(int keyCode, KeyEvent event) {
// Swallow these events too to avoid the audible notification of a volume change
if (K9.useVolumeKeysForListNavigationEnabled()) {
if ((keyCode == KeyEvent.KEYCODE_VOLUME_UP) || (keyCode == KeyEvent.KEYCODE_VOLUME_DOWN)) {
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Swallowed key up.");
return true;
}
}
return super.onKeyUp(keyCode, event);
}
private void onAccounts() {
Accounts.listAccounts(this);
finish();
}
private void onShowFolderList() {
FolderList.actionHandleAccount(this, mAccount);
finish();
}
private void onEditPrefs() {
Prefs.actionPrefs(this);
}
private void onEditAccount() {
AccountSettings.actionSettings(this, mAccount);
}
@Override
public boolean onSearchRequested() {
return mMessageListFragment.onSearchRequested();
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
int itemId = item.getItemId();
switch (itemId) {
case android.R.id.home: {
goBack();
return true;
}
case R.id.compose: {
mMessageListFragment.onCompose();
return true;
}
case R.id.toggle_message_view_theme: {
onToggleTheme();
return true;
}
// MessageList
case R.id.check_mail: {
mMessageListFragment.checkMail();
return true;
}
case R.id.set_sort_date: {
mMessageListFragment.changeSort(SortType.SORT_DATE);
return true;
}
case R.id.set_sort_arrival: {
mMessageListFragment.changeSort(SortType.SORT_ARRIVAL);
return true;
}
case R.id.set_sort_subject: {
mMessageListFragment.changeSort(SortType.SORT_SUBJECT);
return true;
}
case R.id.set_sort_sender: {
mMessageListFragment.changeSort(SortType.SORT_SENDER);
return true;
}
case R.id.set_sort_flag: {
mMessageListFragment.changeSort(SortType.SORT_FLAGGED);
return true;
}
case R.id.set_sort_unread: {
mMessageListFragment.changeSort(SortType.SORT_UNREAD);
return true;
}
case R.id.set_sort_attach: {
mMessageListFragment.changeSort(SortType.SORT_ATTACHMENT);
return true;
}
case R.id.select_all: {
mMessageListFragment.selectAll();
return true;
}
case R.id.app_settings: {
onEditPrefs();
return true;
}
case R.id.account_settings: {
onEditAccount();
return true;
}
case R.id.search: {
mMessageListFragment.onSearchRequested();
return true;
}
case R.id.search_remote: {
mMessageListFragment.onRemoteSearch();
return true;
}
case R.id.mark_all_as_read: {
mMessageListFragment.confirmMarkAllAsRead();
return true;
}
case R.id.show_folder_list: {
onShowFolderList();
return true;
}
// MessageView
case R.id.next_message: {
showNextMessage();
return true;
}
case R.id.previous_message: {
showPreviousMessage();
return true;
}
case R.id.delete: {
mMessageViewFragment.onDelete();
return true;
}
case R.id.reply: {
mMessageViewFragment.onReply();
return true;
}
case R.id.reply_all: {
mMessageViewFragment.onReplyAll();
return true;
}
case R.id.forward: {
mMessageViewFragment.onForward();
return true;
}
case R.id.share: {
mMessageViewFragment.onSendAlternate();
return true;
}
case R.id.toggle_unread: {
mMessageViewFragment.onToggleRead();
return true;
}
case R.id.archive:
case R.id.refile_archive: {
mMessageViewFragment.onArchive();
return true;
}
case R.id.spam:
case R.id.refile_spam: {
mMessageViewFragment.onSpam();
return true;
}
case R.id.move:
case R.id.refile_move: {
mMessageViewFragment.onMove();
return true;
}
case R.id.copy:
case R.id.refile_copy: {
mMessageViewFragment.onCopy();
return true;
}
case R.id.select_text: {
mMessageViewFragment.onSelectText();
return true;
}
case R.id.show_headers:
case R.id.hide_headers: {
mMessageViewFragment.onToggleAllHeadersView();
updateMenu();
return true;
}
}
if (!mSingleFolderMode) {
// None of the options after this point are "safe" for search results
//TODO: This is not true for "unread" and "starred" searches in regular folders
return false;
}
switch (itemId) {
case R.id.send_messages: {
mMessageListFragment.onSendPendingMessages();
return true;
}
case R.id.folder_settings: {
if (mFolderName != null) {
FolderSettings.actionSettings(this, mAccount, mFolderName);
}
return true;
}
case R.id.expunge: {
mMessageListFragment.onExpunge();
return true;
}
default: {
return super.onOptionsItemSelected(item);
}
}
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.message_list_option, menu);
mMenu = menu;
mMenuButtonCheckMail= menu.findItem(R.id.check_mail);
return true;
}
@Override
public boolean onPrepareOptionsMenu(Menu menu) {
configureMenu(menu);
return true;
}
/**
* Hide menu items not appropriate for the current context.
*
* <p><strong>Note:</strong>
* Please adjust the comments in {@code res/menu/message_list_option.xml} if you change the
* visibility of a menu item in this method.
* </p>
*
* @param menu
* The {@link Menu} instance that should be modified. May be {@code null}; in that case
* the method does nothing and immediately returns.
*/
private void configureMenu(Menu menu) {
if (menu == null) {
return;
}
// Set visibility of account/folder settings menu items
if (mMessageListFragment == null) {
menu.findItem(R.id.account_settings).setVisible(false);
menu.findItem(R.id.folder_settings).setVisible(false);
} else {
menu.findItem(R.id.account_settings).setVisible(
mMessageListFragment.isSingleAccountMode());
menu.findItem(R.id.folder_settings).setVisible(
mMessageListFragment.isSingleFolderMode());
}
/*
* Set visibility of menu items related to the message view
*/
if (mDisplayMode == DisplayMode.MESSAGE_LIST
|| mMessageViewFragment == null
|| !mMessageViewFragment.isInitialized()) {
menu.findItem(R.id.next_message).setVisible(false);
menu.findItem(R.id.previous_message).setVisible(false);
menu.findItem(R.id.single_message_options).setVisible(false);
menu.findItem(R.id.delete).setVisible(false);
menu.findItem(R.id.compose).setVisible(false);
menu.findItem(R.id.archive).setVisible(false);
menu.findItem(R.id.move).setVisible(false);
menu.findItem(R.id.copy).setVisible(false);
menu.findItem(R.id.spam).setVisible(false);
menu.findItem(R.id.refile).setVisible(false);
menu.findItem(R.id.toggle_unread).setVisible(false);
menu.findItem(R.id.select_text).setVisible(false);
menu.findItem(R.id.toggle_message_view_theme).setVisible(false);
menu.findItem(R.id.show_headers).setVisible(false);
menu.findItem(R.id.hide_headers).setVisible(false);
} else {
// hide prev/next buttons in split mode
if (mDisplayMode != DisplayMode.MESSAGE_VIEW) {
menu.findItem(R.id.next_message).setVisible(false);
menu.findItem(R.id.previous_message).setVisible(false);
} else {
MessageReference ref = mMessageViewFragment.getMessageReference();
boolean initialized = (mMessageListFragment != null &&
mMessageListFragment.isLoadFinished());
boolean canDoPrev = (initialized && !mMessageListFragment.isFirst(ref));
boolean canDoNext = (initialized && !mMessageListFragment.isLast(ref));
MenuItem prev = menu.findItem(R.id.previous_message);
prev.setEnabled(canDoPrev);
prev.getIcon().setAlpha(canDoPrev ? 255 : 127);
MenuItem next = menu.findItem(R.id.next_message);
next.setEnabled(canDoNext);
next.getIcon().setAlpha(canDoNext ? 255 : 127);
}
MenuItem toggleTheme = menu.findItem(R.id.toggle_message_view_theme);
if (K9.useFixedMessageViewTheme()) {
toggleTheme.setVisible(false);
} else {
// Set title of menu item to switch to dark/light theme
if (K9.getK9MessageViewTheme() == K9.Theme.DARK) {
toggleTheme.setTitle(R.string.message_view_theme_action_light);
} else {
toggleTheme.setTitle(R.string.message_view_theme_action_dark);
}
toggleTheme.setVisible(true);
}
// Set title of menu item to toggle the read state of the currently displayed message
if (mMessageViewFragment.isMessageRead()) {
menu.findItem(R.id.toggle_unread).setTitle(R.string.mark_as_unread_action);
} else {
menu.findItem(R.id.toggle_unread).setTitle(R.string.mark_as_read_action);
}
// Jellybean has built-in long press selection support
menu.findItem(R.id.select_text).setVisible(Build.VERSION.SDK_INT < 16);
menu.findItem(R.id.delete).setVisible(K9.isMessageViewDeleteActionVisible());
/*
* Set visibility of copy, move, archive, spam in action bar and refile submenu
*/
if (mMessageViewFragment.isCopyCapable()) {
menu.findItem(R.id.copy).setVisible(K9.isMessageViewCopyActionVisible());
menu.findItem(R.id.refile_copy).setVisible(true);
} else {
menu.findItem(R.id.copy).setVisible(false);
menu.findItem(R.id.refile_copy).setVisible(false);
}
if (mMessageViewFragment.isMoveCapable()) {
boolean canMessageBeArchived = mMessageViewFragment.canMessageBeArchived();
boolean canMessageBeMovedToSpam = mMessageViewFragment.canMessageBeMovedToSpam();
menu.findItem(R.id.move).setVisible(K9.isMessageViewMoveActionVisible());
menu.findItem(R.id.archive).setVisible(canMessageBeArchived &&
K9.isMessageViewArchiveActionVisible());
menu.findItem(R.id.spam).setVisible(canMessageBeMovedToSpam &&
K9.isMessageViewSpamActionVisible());
menu.findItem(R.id.refile_move).setVisible(true);
menu.findItem(R.id.refile_archive).setVisible(canMessageBeArchived);
menu.findItem(R.id.refile_spam).setVisible(canMessageBeMovedToSpam);
} else {
menu.findItem(R.id.move).setVisible(false);
menu.findItem(R.id.archive).setVisible(false);
menu.findItem(R.id.spam).setVisible(false);
menu.findItem(R.id.refile).setVisible(false);
}
if (mMessageViewFragment.allHeadersVisible()) {
menu.findItem(R.id.show_headers).setVisible(false);
} else {
menu.findItem(R.id.hide_headers).setVisible(false);
}
}
/*
* Set visibility of menu items related to the message list
*/
// Hide both search menu items by default and enable one when appropriate
menu.findItem(R.id.search).setVisible(false);
menu.findItem(R.id.search_remote).setVisible(false);
if (mDisplayMode == DisplayMode.MESSAGE_VIEW || mMessageListFragment == null ||
!mMessageListFragment.isInitialized()) {
menu.findItem(R.id.check_mail).setVisible(false);
menu.findItem(R.id.set_sort).setVisible(false);
menu.findItem(R.id.select_all).setVisible(false);
menu.findItem(R.id.send_messages).setVisible(false);
menu.findItem(R.id.expunge).setVisible(false);
menu.findItem(R.id.mark_all_as_read).setVisible(false);
menu.findItem(R.id.show_folder_list).setVisible(false);
} else {
menu.findItem(R.id.set_sort).setVisible(true);
menu.findItem(R.id.select_all).setVisible(true);
menu.findItem(R.id.compose).setVisible(true);
menu.findItem(R.id.mark_all_as_read).setVisible(
mMessageListFragment.isMarkAllAsReadSupported());
if (!mMessageListFragment.isSingleAccountMode()) {
menu.findItem(R.id.expunge).setVisible(false);
menu.findItem(R.id.send_messages).setVisible(false);
menu.findItem(R.id.show_folder_list).setVisible(false);
} else {
menu.findItem(R.id.send_messages).setVisible(mMessageListFragment.isOutbox());
menu.findItem(R.id.expunge).setVisible(mMessageListFragment.isRemoteFolder() &&
mMessageListFragment.isAccountExpungeCapable());
menu.findItem(R.id.show_folder_list).setVisible(true);
}
menu.findItem(R.id.check_mail).setVisible(mMessageListFragment.isCheckMailSupported());
// If this is an explicit local search, show the option to search on the server
if (!mMessageListFragment.isRemoteSearch() &&
mMessageListFragment.isRemoteSearchAllowed()) {
menu.findItem(R.id.search_remote).setVisible(true);
} else if (!mMessageListFragment.isManualSearch()) {
menu.findItem(R.id.search).setVisible(true);
}
}
}
protected void onAccountUnavailable() {
finish();
// TODO inform user about account unavailability using Toast
Accounts.listAccounts(this);
}
public void setActionBarTitle(String title) {
mActionBarTitle.setText(title);
}
public void setActionBarSubTitle(String subTitle) {
mActionBarSubTitle.setText(subTitle);
}
public void setActionBarUnread(int unread) {
if (unread == 0) {
mActionBarUnread.setVisibility(View.GONE);
} else {
mActionBarUnread.setVisibility(View.VISIBLE);
mActionBarUnread.setText(String.format("%d", unread));
}
}
@Override
public void setMessageListTitle(String title) {
setActionBarTitle(title);
}
@Override
public void setMessageListSubTitle(String subTitle) {
setActionBarSubTitle(subTitle);
}
@Override
public void setUnreadCount(int unread) {
setActionBarUnread(unread);
}
@Override
public void setMessageListProgress(int progress) {
setProgress(progress);
}
@Override
public void openMessage(MessageReference messageReference) {
Preferences prefs = Preferences.getPreferences(getApplicationContext());
Account account = prefs.getAccount(messageReference.getAccountUuid());
String folderName = messageReference.getFolderName();
if (folderName.equals(account.getDraftsFolderName())) {
MessageActions.actionEditDraft(this, messageReference);
} else {
mMessageViewContainer.removeView(mMessageViewPlaceHolder);
if (mMessageListFragment != null) {
mMessageListFragment.setActiveMessage(messageReference);
}
MessageViewFragment fragment = MessageViewFragment.newInstance(messageReference);
FragmentTransaction ft = getFragmentManager().beginTransaction();
ft.replace(R.id.message_view_container, fragment);
mMessageViewFragment = fragment;
ft.commit();
if (mDisplayMode != DisplayMode.SPLIT_VIEW) {
showMessageView();
}
}
}
@Override
public void onResendMessage(MessageReference messageReference) {
MessageActions.actionEditDraft(this, messageReference);
}
@Override
public void onForward(MessageReference messageReference) {
onForward(messageReference, null);
}
@Override
public void onForward(MessageReference messageReference, Parcelable decryptionResultForReply) {
MessageActions.actionForward(this, messageReference, decryptionResultForReply);
}
@Override
public void onReply(MessageReference messageReference) {
onReply(messageReference, null);
}
@Override
public void onReply(MessageReference messageReference, Parcelable decryptionResultForReply) {
MessageActions.actionReply(this, messageReference, false, decryptionResultForReply);
}
@Override
public void onReplyAll(MessageReference messageReference) {
onReplyAll(messageReference, null);
}
@Override
public void onReplyAll(MessageReference messageReference, Parcelable decryptionResultForReply) {
MessageActions.actionReply(this, messageReference, true, decryptionResultForReply);
}
@Override
public void onCompose(Account account) {
MessageActions.actionCompose(this, account);
}
@Override
public void showMoreFromSameSender(String senderAddress) {
LocalSearch tmpSearch = new LocalSearch("From " + senderAddress);
tmpSearch.addAccountUuids(mSearch.getAccountUuids());
tmpSearch.and(SearchField.SENDER, senderAddress, Attribute.CONTAINS);
MessageListFragment fragment = MessageListFragment.newInstance(tmpSearch, false, false);
addMessageListFragment(fragment, true);
}
@Override
public void onBackStackChanged() {
findFragments();
if (mDisplayMode == DisplayMode.SPLIT_VIEW) {
showMessageViewPlaceHolder();
}
configureMenu(mMenu);
}
@Override
public void onSwipeRightToLeft(MotionEvent e1, MotionEvent e2) {
if (mMessageListFragment != null && mDisplayMode != DisplayMode.MESSAGE_VIEW) {
mMessageListFragment.onSwipeRightToLeft(e1, e2);
}
}
@Override
public void onSwipeLeftToRight(MotionEvent e1, MotionEvent e2) {
if (mMessageListFragment != null && mDisplayMode != DisplayMode.MESSAGE_VIEW) {
mMessageListFragment.onSwipeLeftToRight(e1, e2);
}
}
private final class StorageListenerImplementation implements StorageManager.StorageListener {
@Override
public void onUnmount(String providerId) {
if (mAccount != null && providerId.equals(mAccount.getLocalStorageProviderId())) {
runOnUiThread(new Runnable() {
@Override
public void run() {
onAccountUnavailable();
}
});
}
}
@Override
public void onMount(String providerId) {
// no-op
}
}
private void addMessageListFragment(MessageListFragment fragment, boolean addToBackStack) {
FragmentTransaction ft = getFragmentManager().beginTransaction();
ft.replace(R.id.message_list_container, fragment);
if (addToBackStack)
ft.addToBackStack(null);
mMessageListFragment = fragment;
int transactionId = ft.commit();
if (transactionId >= 0 && mFirstBackStackId < 0) {
mFirstBackStackId = transactionId;
}
}
@Override
public boolean startSearch(Account account, String folderName) {
// If this search was started from a MessageList of a single folder, pass along that folder info
// so that we can enable remote search.
if (account != null && folderName != null) {
final Bundle appData = new Bundle();
appData.putString(EXTRA_SEARCH_ACCOUNT, account.getUuid());
appData.putString(EXTRA_SEARCH_FOLDER, folderName);
startSearch(null, false, appData, false);
} else {
// TODO Handle the case where we're searching from within a search result.
startSearch(null, false, null, false);
}
return true;
}
@Override
public void showThread(Account account, String folderName, long threadRootId) {
showMessageViewPlaceHolder();
LocalSearch tmpSearch = new LocalSearch();
tmpSearch.addAccountUuid(account.getUuid());
tmpSearch.and(SearchField.THREAD_ID, String.valueOf(threadRootId), Attribute.EQUALS);
MessageListFragment fragment = MessageListFragment.newInstance(tmpSearch, true, false);
addMessageListFragment(fragment, true);
}
private void showMessageViewPlaceHolder() {
removeMessageViewFragment();
// Add placeholder view if necessary
if (mMessageViewPlaceHolder.getParent() == null) {
mMessageViewContainer.addView(mMessageViewPlaceHolder);
}
mMessageListFragment.setActiveMessage(null);
}
/**
* Remove MessageViewFragment if necessary.
*/
private void removeMessageViewFragment() {
if (mMessageViewFragment != null) {
FragmentTransaction ft = getFragmentManager().beginTransaction();
ft.remove(mMessageViewFragment);
mMessageViewFragment = null;
ft.commit();
showDefaultTitleView();
}
}
private void removeMessageListFragment() {
FragmentTransaction ft = getFragmentManager().beginTransaction();
ft.remove(mMessageListFragment);
mMessageListFragment = null;
ft.commit();
}
@Override
public void remoteSearchStarted() {
// Remove action button for remote search
configureMenu(mMenu);
}
@Override
public void goBack() {
FragmentManager fragmentManager = getFragmentManager();
if (mDisplayMode == DisplayMode.MESSAGE_VIEW) {
showMessageList();
} else if (fragmentManager.getBackStackEntryCount() > 0) {
fragmentManager.popBackStack();
} else if (mMessageListFragment.isManualSearch()) {
finish();
} else if (!mSingleFolderMode) {
onAccounts();
} else {
onShowFolderList();
}
}
@Override
public void enableActionBarProgress(boolean enable) {
if (mMenuButtonCheckMail != null && mMenuButtonCheckMail.isVisible()) {
mActionBarProgress.setVisibility(ProgressBar.GONE);
if (enable) {
mMenuButtonCheckMail
.setActionView(mActionButtonIndeterminateProgress);
} else {
mMenuButtonCheckMail.setActionView(null);
}
} else {
if (mMenuButtonCheckMail != null)
mMenuButtonCheckMail.setActionView(null);
if (enable) {
mActionBarProgress.setVisibility(ProgressBar.VISIBLE);
} else {
mActionBarProgress.setVisibility(ProgressBar.GONE);
}
}
}
@Override
public void displayMessageSubject(String subject) {
if (mDisplayMode == DisplayMode.MESSAGE_VIEW) {
mActionBarSubject.setText(subject);
}
}
@Override
public void showNextMessageOrReturn() {
if (K9.messageViewReturnToList() || !showLogicalNextMessage()) {
if (mDisplayMode == DisplayMode.SPLIT_VIEW) {
showMessageViewPlaceHolder();
} else {
showMessageList();
}
}
}
/**
* Shows the next message in the direction the user was displaying messages.
*
* @return {@code true}
*/
private boolean showLogicalNextMessage() {
boolean result = false;
if (mLastDirection == NEXT) {
result = showNextMessage();
} else if (mLastDirection == PREVIOUS) {
result = showPreviousMessage();
}
if (!result) {
result = showNextMessage() || showPreviousMessage();
}
return result;
}
@Override
public void setProgress(boolean enable) {
setProgressBarIndeterminateVisibility(enable);
}
@Override
public void messageHeaderViewAvailable(MessageHeader header) {
mActionBarSubject.setMessageHeader(header);
}
private boolean showNextMessage() {
MessageReference ref = mMessageViewFragment.getMessageReference();
if (ref != null) {
if (mMessageListFragment.openNext(ref)) {
mLastDirection = NEXT;
return true;
}
}
return false;
}
private boolean showPreviousMessage() {
MessageReference ref = mMessageViewFragment.getMessageReference();
if (ref != null) {
if (mMessageListFragment.openPrevious(ref)) {
mLastDirection = PREVIOUS;
return true;
}
}
return false;
}
private void showMessageList() {
mMessageListWasDisplayed = true;
mDisplayMode = DisplayMode.MESSAGE_LIST;
mViewSwitcher.showFirstView();
mMessageListFragment.setActiveMessage(null);
showDefaultTitleView();
configureMenu(mMenu);
}
private void showMessageView() {
mDisplayMode = DisplayMode.MESSAGE_VIEW;
if (!mMessageListWasDisplayed) {
mViewSwitcher.setAnimateFirstView(false);
}
mViewSwitcher.showSecondView();
showMessageTitleView();
configureMenu(mMenu);
}
@Override
public void updateMenu() {
invalidateOptionsMenu();
}
@Override
public void disableDeleteAction() {
mMenu.findItem(R.id.delete).setEnabled(false);
}
private void onToggleTheme() {
if (K9.getK9MessageViewTheme() == K9.Theme.DARK) {
K9.setK9MessageViewThemeSetting(K9.Theme.LIGHT);
} else {
K9.setK9MessageViewThemeSetting(K9.Theme.DARK);
}
new Thread(new Runnable() {
@Override
public void run() {
Context appContext = getApplicationContext();
Preferences prefs = Preferences.getPreferences(appContext);
StorageEditor editor = prefs.getStorage().edit();
K9.save(editor);
editor.commit();
}
}).start();
recreate();
}
private void showDefaultTitleView() {
mActionBarMessageView.setVisibility(View.GONE);
mActionBarMessageList.setVisibility(View.VISIBLE);
if (mMessageListFragment != null) {
mMessageListFragment.updateTitle();
}
mActionBarSubject.setMessageHeader(null);
}
private void showMessageTitleView() {
mActionBarMessageList.setVisibility(View.GONE);
mActionBarMessageView.setVisibility(View.VISIBLE);
if (mMessageViewFragment != null) {
displayMessageSubject(null);
mMessageViewFragment.updateTitle();
}
}
@Override
public void onSwitchComplete(int displayedChild) {
if (displayedChild == 0) {
removeMessageViewFragment();
}
}
@Override
public void startIntentSenderForResult(IntentSender intent, int requestCode, Intent fillInIntent,
int flagsMask, int flagsValues, int extraFlags) throws SendIntentException {
requestCode |= REQUEST_MASK_PENDING_INTENT;
super.startIntentSenderForResult(intent, requestCode, fillInIntent, flagsMask, flagsValues, extraFlags);
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
if ((requestCode & REQUEST_MASK_PENDING_INTENT) == REQUEST_MASK_PENDING_INTENT) {
requestCode ^= REQUEST_MASK_PENDING_INTENT;
if (mMessageViewFragment != null) {
mMessageViewFragment.onPendingIntentResult(requestCode, resultCode, data);
}
}
}
}
| 1 | 14,486 | What is is subject when it's not the email subject. Why are we having to do this crap? What's calling this with an empty string? | k9mail-k-9 | java |
@@ -65,7 +65,7 @@ func NewProvider(opts ...ProviderOption) (*Provider, error) {
namedTracer: make(map[string]*tracer),
}
tp.config.Store(&Config{
- DefaultSampler: ProbabilitySampler(defaultSamplingProbability),
+ DefaultSampler: AlwaysSample(),
IDGenerator: defIDGenerator(),
MaxAttributesPerSpan: DefaultMaxAttributesPerSpan,
MaxEventsPerSpan: DefaultMaxEventsPerSpan, | 1 | // Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"sync"
"sync/atomic"
export "go.opentelemetry.io/otel/sdk/export/trace"
apitrace "go.opentelemetry.io/otel/api/trace"
)
const (
defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
)
// batcher contains export.SpanBatcher and its options.
type batcher struct {
b export.SpanBatcher
opts []BatchSpanProcessorOption
}
// ProviderOptions
type ProviderOptions struct {
syncers []export.SpanSyncer
batchers []batcher
config Config
}
type ProviderOption func(*ProviderOptions)
type Provider struct {
mu sync.Mutex
namedTracer map[string]*tracer
spanProcessors atomic.Value
config atomic.Value // access atomically
}
var _ apitrace.Provider = &Provider{}
// NewProvider creates an instance of trace provider. Optional
// parameter configures the provider with common options applicable
// to all tracer instances that will be created by this provider.
func NewProvider(opts ...ProviderOption) (*Provider, error) {
o := &ProviderOptions{}
for _, opt := range opts {
opt(o)
}
tp := &Provider{
namedTracer: make(map[string]*tracer),
}
tp.config.Store(&Config{
DefaultSampler: ProbabilitySampler(defaultSamplingProbability),
IDGenerator: defIDGenerator(),
MaxAttributesPerSpan: DefaultMaxAttributesPerSpan,
MaxEventsPerSpan: DefaultMaxEventsPerSpan,
MaxLinksPerSpan: DefaultMaxLinksPerSpan,
})
for _, syncer := range o.syncers {
ssp := NewSimpleSpanProcessor(syncer)
tp.RegisterSpanProcessor(ssp)
}
for _, batcher := range o.batchers {
bsp, err := NewBatchSpanProcessor(batcher.b, batcher.opts...)
if err != nil {
return nil, err
}
tp.RegisterSpanProcessor(bsp)
}
tp.ApplyConfig(o.config)
return tp, nil
}
// Tracer with the given name. If a tracer for the given name does not exist,
// it is created first. If the name is empty, DefaultTracerName is used.
func (p *Provider) Tracer(name string) apitrace.Tracer {
p.mu.Lock()
defer p.mu.Unlock()
if name == "" {
name = defaultTracerName
}
t, ok := p.namedTracer[name]
if !ok {
t = &tracer{name: name, provider: p}
p.namedTracer[name] = t
}
return t
}
// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors
func (p *Provider) RegisterSpanProcessor(s SpanProcessor) {
p.mu.Lock()
defer p.mu.Unlock()
new := make(spanProcessorMap)
if old, ok := p.spanProcessors.Load().(spanProcessorMap); ok {
for k, v := range old {
new[k] = v
}
}
new[s] = &sync.Once{}
p.spanProcessors.Store(new)
}
// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors
func (p *Provider) UnregisterSpanProcessor(s SpanProcessor) {
mu.Lock()
defer mu.Unlock()
new := make(spanProcessorMap)
if old, ok := p.spanProcessors.Load().(spanProcessorMap); ok {
for k, v := range old {
new[k] = v
}
}
if stopOnce, ok := new[s]; ok && stopOnce != nil {
stopOnce.Do(func() {
s.Shutdown()
})
}
delete(new, s)
p.spanProcessors.Store(new)
}
// ApplyConfig changes the configuration of the provider.
// If a field in the configuration is empty or nil then its original value is preserved.
func (p *Provider) ApplyConfig(cfg Config) {
p.mu.Lock()
defer p.mu.Unlock()
c := *p.config.Load().(*Config)
if cfg.DefaultSampler != nil {
c.DefaultSampler = cfg.DefaultSampler
}
if cfg.IDGenerator != nil {
c.IDGenerator = cfg.IDGenerator
}
if cfg.MaxEventsPerSpan > 0 {
c.MaxEventsPerSpan = cfg.MaxEventsPerSpan
}
if cfg.MaxAttributesPerSpan > 0 {
c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan
}
if cfg.MaxLinksPerSpan > 0 {
c.MaxLinksPerSpan = cfg.MaxLinksPerSpan
}
p.config.Store(&c)
}
// WithSyncer options appends the syncer to the existing list of Syncers.
// This option can be used multiple times.
// The Syncers are wrapped into SimpleSpanProcessors and registered
// with the provider.
func WithSyncer(syncer export.SpanSyncer) ProviderOption {
return func(opts *ProviderOptions) {
opts.syncers = append(opts.syncers, syncer)
}
}
// WithBatch options appends the batcher to the existing list of Batchers.
// This option can be used multiple times.
// The Batchers are wrapped into BatchedSpanProcessors and registered
// with the provider.
func WithBatcher(b export.SpanBatcher, bopts ...BatchSpanProcessorOption) ProviderOption {
return func(opts *ProviderOptions) {
opts.batchers = append(opts.batchers, batcher{b, bopts})
}
}
// WithConfig option sets the configuration to provider.
func WithConfig(config Config) ProviderOption {
return func(opts *ProviderOptions) {
opts.config = config
}
}
| 1 | 11,320 | Could you also remove the `defaultSamplingProbability` constant from `sampling.go`? It seems to became unused with this change. | open-telemetry-opentelemetry-go | go |
@@ -63,9 +63,9 @@ func (s *svc) UpdateDeployment(ctx context.Context, clientset, cluster, namespac
}
newDeployment := oldDeployment.DeepCopy()
- mergeLabelsAndAnnotations(newDeployment, fields)
+ mergeDeploymentLabelsAndAnnotations(newDeployment, fields)
- patchBytes, err := generateDeploymentStrategicPatch(oldDeployment, newDeployment)
+ patchBytes, err := GenerateStrategicPatch(oldDeployment, newDeployment, appsv1.Deployment{})
if err != nil {
return err
} | 1 | package k8s
import (
"context"
"encoding/json"
"fmt"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/util/retry"
k8sapiv1 "github.com/lyft/clutch/backend/api/k8s/v1"
)
func (s *svc) DescribeDeployment(ctx context.Context, clientset, cluster, namespace, name string) (*k8sapiv1.Deployment, error) {
cs, err := s.manager.GetK8sClientset(clientset, cluster, namespace)
if err != nil {
return nil, err
}
deployments, err := cs.AppsV1().Deployments(cs.Namespace()).List(ctx, metav1.ListOptions{
FieldSelector: "metadata.name=" + name,
})
if err != nil {
return nil, err
}
if len(deployments.Items) == 1 {
return ProtoForDeployment(cs.Cluster(), &deployments.Items[0]), nil
} else if len(deployments.Items) > 1 {
return nil, fmt.Errorf("Located multiple Deployments")
}
return nil, fmt.Errorf("Unable to locate Deployment")
}
func ProtoForDeployment(cluster string, deployment *appsv1.Deployment) *k8sapiv1.Deployment {
clusterName := deployment.ClusterName
if clusterName == "" {
clusterName = cluster
}
return &k8sapiv1.Deployment{
Cluster: clusterName,
Namespace: deployment.Namespace,
Name: deployment.Name,
Labels: deployment.Labels,
Annotations: deployment.Annotations,
}
}
func (s *svc) UpdateDeployment(ctx context.Context, clientset, cluster, namespace, name string, fields *k8sapiv1.UpdateDeploymentRequest_Fields) error {
cs, err := s.manager.GetK8sClientset(clientset, cluster, namespace)
if err != nil {
return err
}
getOpts := metav1.GetOptions{}
oldDeployment, err := cs.AppsV1().Deployments(cs.Namespace()).Get(ctx, name, getOpts)
if err != nil {
return err
}
newDeployment := oldDeployment.DeepCopy()
mergeLabelsAndAnnotations(newDeployment, fields)
patchBytes, err := generateDeploymentStrategicPatch(oldDeployment, newDeployment)
if err != nil {
return err
}
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
_, err := cs.AppsV1().Deployments(cs.Namespace()).Patch(ctx, oldDeployment.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
return err
})
return retryErr
}
func (s *svc) DeleteDeployment(ctx context.Context, clientset, cluster, namespace, name string) error {
cs, err := s.manager.GetK8sClientset(clientset, cluster, namespace)
if err != nil {
return err
}
opts := metav1.DeleteOptions{}
return cs.AppsV1().Deployments(cs.Namespace()).Delete(ctx, name, opts)
}
func mergeLabelsAndAnnotations(deployment *appsv1.Deployment, fields *k8sapiv1.UpdateDeploymentRequest_Fields) {
if len(fields.Labels) > 0 {
for k, v := range fields.Labels {
deployment.Labels[k] = v
if deployment.Spec.Template.ObjectMeta.Labels == nil {
deployment.Spec.Template.ObjectMeta.Labels = make(map[string]string)
}
deployment.Spec.Template.ObjectMeta.Labels[k] = v
}
}
if len(fields.Annotations) > 0 {
for k, v := range fields.Annotations {
deployment.Annotations[k] = v
if deployment.Spec.Template.ObjectMeta.Annotations == nil {
deployment.Spec.Template.ObjectMeta.Annotations = make(map[string]string)
}
deployment.Spec.Template.ObjectMeta.Annotations[k] = v
}
}
}
func generateDeploymentStrategicPatch(oldDeployment, newDeployment *appsv1.Deployment) ([]byte, error) {
old, err := json.Marshal(oldDeployment)
if err != nil {
return nil, err
}
new, err := json.Marshal(newDeployment)
if err != nil {
return nil, err
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(old, new, appsv1.Deployment{})
if err != nil {
return nil, err
}
return patchBytes, nil
}
| 1 | 9,078 | can you delete the `generateDeploymentStrategicPatch` function as well? | lyft-clutch | go |
@@ -61,7 +61,7 @@ class UnboundZmqEventBus implements EventBus {
return thread;
});
- LOG.info(String.format("Connecting to %s and %s", publishConnection, subscribeConnection));
+ LOG.finest(String.format("Connecting to %s and %s", publishConnection, subscribeConnection));
sub = context.createSocket(SocketType.SUB);
sub.connect(publishConnection); | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.events.zeromq;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.collect.EvictingQueue;
import org.openqa.selenium.events.Event;
import org.openqa.selenium.events.EventBus;
import org.openqa.selenium.events.Type;
import org.openqa.selenium.json.Json;
import org.zeromq.SocketType;
import org.zeromq.ZContext;
import org.zeromq.ZMQ;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Queue;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import java.util.logging.Logger;
class UnboundZmqEventBus implements EventBus {
private static final Logger LOG = Logger.getLogger(EventBus.class.getName());
private static final Json JSON = new Json();
private final ExecutorService executor;
private final Map<Type, List<Consumer<Event>>> listeners = new ConcurrentHashMap<>();
private final Queue<UUID> recentMessages = EvictingQueue.create(128);
private ZMQ.Socket pub;
private ZMQ.Socket sub;
UnboundZmqEventBus(ZContext context, String publishConnection, String subscribeConnection) {
executor = Executors.newCachedThreadPool(r -> {
Thread thread = new Thread(r);
thread.setName("Event Bus");
thread.setDaemon(true);
return thread;
});
LOG.info(String.format("Connecting to %s and %s", publishConnection, subscribeConnection));
sub = context.createSocket(SocketType.SUB);
sub.connect(publishConnection);
sub.subscribe(new byte[0]);
pub = context.createSocket(SocketType.PUB);
pub.connect(subscribeConnection);
ZMQ.Poller poller = context.createPoller(1);
poller.register(sub, ZMQ.Poller.POLLIN);
LOG.info("Sockets created");
AtomicBoolean pollingStarted = new AtomicBoolean(false);
executor.submit(() -> {
LOG.info("Bus started");
while (!Thread.currentThread().isInterrupted()) {
try {
poller.poll(150);
pollingStarted.lazySet(true);
if (poller.pollin(0)) {
ZMQ.Socket socket = poller.getSocket(0);
Type type = new Type(new String(socket.recv(ZMQ.DONTWAIT), UTF_8));
UUID id = UUID.fromString(new String(socket.recv(ZMQ.DONTWAIT), UTF_8));
String data = new String(socket.recv(ZMQ.DONTWAIT), UTF_8);
Object converted = JSON.toType(data, Object.class);
Event event = new Event(id, type, converted);
if (recentMessages.contains(id)) {
continue;
}
recentMessages.add(id);
List<Consumer<Event>> typeListeners = listeners.get(type);
if (typeListeners == null) {
continue;
}
typeListeners.parallelStream().forEach(listener -> listener.accept(event));
}
} catch (Throwable e) {
if (e.getCause() != null && e.getCause() instanceof AssertionError) {
// Do nothing.
} else {
throw e;
}
}
}
});
// Give ourselves up to a second to connect, using The World's Worst heuristic. If we don't
// manage to connect, it's not the end of the world, as the socket we're connecting to may not
// be up yet.
while (!pollingStarted.get()) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
@Override
public void addListener(Type type, Consumer<Event> onType) {
Objects.requireNonNull(type, "Event type must be set.");
Objects.requireNonNull(onType, "Event listener must be set.");
List<Consumer<Event>> typeListeners = listeners.computeIfAbsent(type, t -> new LinkedList<>());
typeListeners.add(onType);
}
@Override
public void fire(Event event) {
Objects.requireNonNull(event, "Event to send must be set.");
pub.sendMore(event.getType().getName().getBytes(UTF_8));
pub.sendMore(event.getId().toString().getBytes(UTF_8));
pub.send(event.getRawData().getBytes(UTF_8));
}
@Override
public void close() {
executor.shutdown();
if (sub != null) {
sub.close();
}
if (pub != null) {
pub.close();
}
}
}
| 1 | 16,464 | I'd keep this at `info` level... | SeleniumHQ-selenium | rb |
@@ -50,6 +50,10 @@ StatusOr<OptRule::TransformResult> LimitPushDownRule::transform(
const auto proj = static_cast<const Project *>(projGroupNode->node());
const auto gn = static_cast<const GetNeighbors *>(gnGroupNode->node());
+ DCHECK(graph::ExpressionUtils::isEvaluableExpr(limit->countExpr()));
+ if (!graph::ExpressionUtils::isEvaluableExpr(limit->countExpr())) {
+ return TransformResult::noTransform();
+ }
int64_t limitRows = limit->offset() + limit->count();
if (gn->limit() >= 0 && limitRows >= gn->limit()) {
return TransformResult::noTransform(); | 1 | /* Copyright (c) 2020 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "graph/optimizer/rule/LimitPushDownRule.h"
#include "common/expression/BinaryExpression.h"
#include "common/expression/ConstantExpression.h"
#include "common/expression/Expression.h"
#include "common/expression/FunctionCallExpression.h"
#include "common/expression/LogicalExpression.h"
#include "common/expression/UnaryExpression.h"
#include "graph/optimizer/OptContext.h"
#include "graph/optimizer/OptGroup.h"
#include "graph/planner/plan/PlanNode.h"
#include "graph/planner/plan/Query.h"
#include "graph/visitor/ExtractFilterExprVisitor.h"
using nebula::graph::GetNeighbors;
using nebula::graph::Limit;
using nebula::graph::PlanNode;
using nebula::graph::Project;
using nebula::graph::QueryContext;
namespace nebula {
namespace opt {
std::unique_ptr<OptRule> LimitPushDownRule::kInstance =
std::unique_ptr<LimitPushDownRule>(new LimitPushDownRule());
LimitPushDownRule::LimitPushDownRule() { RuleSet::QueryRules().addRule(this); }
const Pattern &LimitPushDownRule::pattern() const {
static Pattern pattern =
Pattern::create(graph::PlanNode::Kind::kLimit,
{Pattern::create(graph::PlanNode::Kind::kProject,
{Pattern::create(graph::PlanNode::Kind::kGetNeighbors)})});
return pattern;
}
StatusOr<OptRule::TransformResult> LimitPushDownRule::transform(
OptContext *octx, const MatchedResult &matched) const {
auto limitGroupNode = matched.node;
auto projGroupNode = matched.dependencies.front().node;
auto gnGroupNode = matched.dependencies.front().dependencies.front().node;
const auto limit = static_cast<const Limit *>(limitGroupNode->node());
const auto proj = static_cast<const Project *>(projGroupNode->node());
const auto gn = static_cast<const GetNeighbors *>(gnGroupNode->node());
int64_t limitRows = limit->offset() + limit->count();
if (gn->limit() >= 0 && limitRows >= gn->limit()) {
return TransformResult::noTransform();
}
auto newLimit = static_cast<Limit *>(limit->clone());
auto newLimitGroupNode = OptGroupNode::create(octx, newLimit, limitGroupNode->group());
auto newProj = static_cast<Project *>(proj->clone());
auto newProjGroup = OptGroup::create(octx);
auto newProjGroupNode = newProjGroup->makeGroupNode(newProj);
auto newGn = static_cast<GetNeighbors *>(gn->clone());
newGn->setLimit(limitRows);
auto newGnGroup = OptGroup::create(octx);
auto newGnGroupNode = newGnGroup->makeGroupNode(newGn);
newLimitGroupNode->dependsOn(newProjGroup);
newProjGroupNode->dependsOn(newGnGroup);
for (auto dep : gnGroupNode->dependencies()) {
newGnGroupNode->dependsOn(dep);
}
TransformResult result;
result.eraseAll = true;
result.newGroupNodes.emplace_back(newLimitGroupNode);
return result;
}
std::string LimitPushDownRule::toString() const { return "LimitPushDownRule"; }
} // namespace opt
} // namespace nebula
| 1 | 31,048 | Don't use DCHECK to debug your code if it's the regular branch you need to handle. | vesoft-inc-nebula | cpp |
@@ -46,11 +46,6 @@ public interface AntlrNode extends Node {
throw new UnsupportedOperationException("Out of scope for antlr current implementations");
}
- @Override
- default String getImage() {
- throw new UnsupportedOperationException("Out of scope for antlr current implementations");
- }
-
@Override
default void setImage(final String image) {
throw new UnsupportedOperationException("Out of scope for antlr current implementations"); | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
/**
* Base interface for all Antlr-based implementation of Node interface.
* <p>
* Initially all the methods implemented here will be no-op due to scope limitations
*/
public interface AntlrNode extends Node {
@Override
default void jjtOpen() {
throw new UnsupportedOperationException("Won't be needed on Antlr implementation");
}
@Override
default void jjtClose() {
throw new UnsupportedOperationException("Won't be needed on Antlr implementation");
}
@Override
default void jjtSetParent(final Node parent) {
throw new UnsupportedOperationException("Out of scope for antlr current implementations");
}
@Override
default void jjtAddChild(final Node child, final int index) {
throw new UnsupportedOperationException("Out of scope for antlr current implementations");
}
@Override
default void jjtSetChildIndex(final int index) {
throw new UnsupportedOperationException("Out of scope for antlr current implementations");
}
@Override
default int jjtGetChildIndex() {
throw new UnsupportedOperationException("Out of scope for antlr current implementations");
}
@Override
default int jjtGetId() {
throw new UnsupportedOperationException("Out of scope for antlr current implementations");
}
@Override
default String getImage() {
throw new UnsupportedOperationException("Out of scope for antlr current implementations");
}
@Override
default void setImage(final String image) {
throw new UnsupportedOperationException("Out of scope for antlr current implementations");
}
@Override
default boolean hasImageEqualTo(final String image) {
throw new UnsupportedOperationException("Out of scope for antlr current implementations");
}
@Override
default void remove() {
throw new UnsupportedOperationException("Out of scope for antlr current implementations");
}
@Override
default void removeChildAtIndex(final int childIndex) {
throw new UnsupportedOperationException("Out of scope for antlr current implementations");
}
}
| 1 | 16,053 | You should return null here instead. Null is an acceptable default value for the image attribute. | pmd-pmd | java |
@@ -246,9 +246,14 @@ export function useErrorBoundary(cb) {
function flushAfterPaintEffects() {
afterPaintEffects.some(component => {
if (component._parentDom) {
- component.__hooks._pendingEffects.forEach(invokeCleanup);
- component.__hooks._pendingEffects.forEach(invokeEffect);
- component.__hooks._pendingEffects = [];
+ try {
+ component.__hooks._pendingEffects.forEach(invokeCleanup);
+ component.__hooks._pendingEffects.forEach(invokeEffect);
+ component.__hooks._pendingEffects = [];
+ } catch (e) {
+ options._catchError(e, component._vnode);
+ return true;
+ }
}
});
afterPaintEffects = []; | 1 | import { options } from 'preact';
/** @type {number} */
let currentIndex;
/** @type {import('./internal').Component} */
let currentComponent;
/** @type {Array<import('./internal').Component>} */
let afterPaintEffects = [];
let oldBeforeRender = options._render;
let oldAfterDiff = options.diffed;
let oldCommit = options._commit;
let oldBeforeUnmount = options.unmount;
const RAF_TIMEOUT = 100;
let prevRaf;
options._render = vnode => {
if (oldBeforeRender) oldBeforeRender(vnode);
currentComponent = vnode._component;
currentIndex = 0;
if (currentComponent.__hooks) {
currentComponent.__hooks._pendingEffects.forEach(invokeCleanup);
currentComponent.__hooks._pendingEffects.forEach(invokeEffect);
currentComponent.__hooks._pendingEffects = [];
}
};
options.diffed = vnode => {
if (oldAfterDiff) oldAfterDiff(vnode);
const c = vnode._component;
if (!c) return;
const hooks = c.__hooks;
if (hooks) {
if (hooks._pendingEffects.length) {
afterPaint(afterPaintEffects.push(c));
}
}
};
options._commit = (vnode, commitQueue) => {
commitQueue.some(component => {
component._renderCallbacks.forEach(invokeCleanup);
component._renderCallbacks = component._renderCallbacks.filter(cb =>
cb._value ? invokeEffect(cb) : true
);
});
if (oldCommit) oldCommit(vnode, commitQueue);
};
options.unmount = vnode => {
if (oldBeforeUnmount) oldBeforeUnmount(vnode);
const c = vnode._component;
if (!c) return;
const hooks = c.__hooks;
if (hooks) {
hooks._list.forEach(hook => hook._cleanup && hook._cleanup());
}
};
/**
* Get a hook's state from the currentComponent
* @param {number} index The index of the hook to get
* @returns {import('./internal').HookState}
*/
function getHookState(index) {
if (options._hook) options._hook(currentComponent);
// Largely inspired by:
// * https://github.com/michael-klein/funcy.js/blob/f6be73468e6ec46b0ff5aa3cc4c9baf72a29025a/src/hooks/core_hooks.mjs
// * https://github.com/michael-klein/funcy.js/blob/650beaa58c43c33a74820a3c98b3c7079cf2e333/src/renderer.mjs
// Other implementations to look at:
// * https://codesandbox.io/s/mnox05qp8
const hooks =
currentComponent.__hooks ||
(currentComponent.__hooks = { _list: [], _pendingEffects: [] });
if (index >= hooks._list.length) {
hooks._list.push({});
}
return hooks._list[index];
}
/**
* @param {import('./index').StateUpdater<any>} initialState
*/
export function useState(initialState) {
return useReducer(invokeOrReturn, initialState);
}
/**
* @param {import('./index').Reducer<any, any>} reducer
* @param {import('./index').StateUpdater<any>} initialState
* @param {(initialState: any) => void} [init]
* @returns {[ any, (state: any) => void ]}
*/
export function useReducer(reducer, initialState, init) {
/** @type {import('./internal').ReducerHookState} */
const hookState = getHookState(currentIndex++);
if (!hookState._component) {
hookState._component = currentComponent;
hookState._value = [
!init ? invokeOrReturn(undefined, initialState) : init(initialState),
action => {
const nextValue = reducer(hookState._value[0], action);
if (hookState._value[0] !== nextValue) {
hookState._value[0] = nextValue;
hookState._component.setState({});
}
}
];
}
return hookState._value;
}
/**
* @param {import('./internal').Effect} callback
* @param {any[]} args
*/
export function useEffect(callback, args) {
/** @type {import('./internal').EffectHookState} */
const state = getHookState(currentIndex++);
if (argsChanged(state._args, args)) {
state._value = callback;
state._args = args;
currentComponent.__hooks._pendingEffects.push(state);
}
}
/**
* @param {import('./internal').Effect} callback
* @param {any[]} args
*/
export function useLayoutEffect(callback, args) {
/** @type {import('./internal').EffectHookState} */
const state = getHookState(currentIndex++);
if (argsChanged(state._args, args)) {
state._value = callback;
state._args = args;
currentComponent._renderCallbacks.push(state);
}
}
export function useRef(initialValue) {
return useMemo(() => ({ current: initialValue }), []);
}
/**
* @param {object} ref
* @param {() => object} createHandle
* @param {any[]} args
*/
export function useImperativeHandle(ref, createHandle, args) {
useLayoutEffect(
() => {
if (typeof ref === 'function') ref(createHandle());
else if (ref) ref.current = createHandle();
},
args == null ? args : args.concat(ref)
);
}
/**
* @param {() => any} factory
* @param {any[]} args
*/
export function useMemo(factory, args) {
/** @type {import('./internal').MemoHookState} */
const state = getHookState(currentIndex++);
if (argsChanged(state._args, args)) {
state._args = args;
state._factory = factory;
return (state._value = factory());
}
return state._value;
}
/**
* @param {() => void} callback
* @param {any[]} args
*/
export function useCallback(callback, args) {
return useMemo(() => callback, args);
}
/**
* @param {import('./internal').PreactContext} context
*/
export function useContext(context) {
const provider = currentComponent.context[context._id];
if (!provider) return context._defaultValue;
const state = getHookState(currentIndex++);
// This is probably not safe to convert to "!"
if (state._value == null) {
state._value = true;
provider.sub(currentComponent);
}
return provider.props.value;
}
/**
* Display a custom label for a custom hook for the devtools panel
* @type {<T>(value: T, cb?: (value: T) => string | number) => void}
*/
export function useDebugValue(value, formatter) {
if (options.useDebugValue) {
options.useDebugValue(formatter ? formatter(value) : value);
}
}
export function useErrorBoundary(cb) {
const state = getHookState(currentIndex++);
const errState = useState();
state._value = cb;
if (!currentComponent.componentDidCatch) {
currentComponent.componentDidCatch = err => {
if (state._value) state._value(err);
errState[1](err);
};
}
return [
errState[0],
() => {
errState[1](undefined);
}
];
}
/**
* After paint effects consumer.
*/
function flushAfterPaintEffects() {
afterPaintEffects.some(component => {
if (component._parentDom) {
component.__hooks._pendingEffects.forEach(invokeCleanup);
component.__hooks._pendingEffects.forEach(invokeEffect);
component.__hooks._pendingEffects = [];
}
});
afterPaintEffects = [];
}
/**
* Schedule a callback to be invoked after the browser has a chance to paint a new frame.
* Do this by combining requestAnimationFrame (rAF) + setTimeout to invoke a callback after
* the next browser frame.
*
* Also, schedule a timeout in parallel to the the rAF to ensure the callback is invoked
* even if RAF doesn't fire (for example if the browser tab is not visible)
*
* @param {() => void} callback
*/
function afterNextFrame(callback) {
const done = () => {
clearTimeout(timeout);
cancelAnimationFrame(raf);
setTimeout(callback);
};
const timeout = setTimeout(done, RAF_TIMEOUT);
let raf;
if (typeof window !== 'undefined') {
raf = requestAnimationFrame(done);
}
}
// Note: if someone used options.debounceRendering = requestAnimationFrame,
// then effects will ALWAYS run on the NEXT frame instead of the current one, incurring a ~16ms delay.
// Perhaps this is not such a big deal.
/**
* Schedule afterPaintEffects flush after the browser paints
* @param {number} newQueueLength
*/
function afterPaint(newQueueLength) {
if (newQueueLength === 1 || prevRaf !== options.requestAnimationFrame) {
prevRaf = options.requestAnimationFrame;
/* istanbul ignore next */
(prevRaf || afterNextFrame)(flushAfterPaintEffects);
}
}
/**
* @param {import('./internal').EffectHookState} hook
*/
function invokeCleanup(hook) {
if (hook._cleanup) hook._cleanup();
}
/**
* Invoke a Hook's effect
* @param {import('./internal').EffectHookState} hook
*/
function invokeEffect(hook) {
const result = hook._value();
if (typeof result === 'function') hook._cleanup = result;
}
/**
* @param {any[]} oldArgs
* @param {any[]} newArgs
*/
function argsChanged(oldArgs, newArgs) {
return !oldArgs || newArgs.some((arg, index) => arg !== oldArgs[index]);
}
function invokeOrReturn(arg, f) {
return typeof f === 'function' ? f(arg) : f;
}
| 1 | 14,842 | react bails as well? | preactjs-preact | js |
@@ -7,7 +7,6 @@ namespace Shopsys\FrameworkBundle\Model\Security;
class Roles
{
public const ROLE_ADMIN = 'ROLE_ADMIN';
- public const ROLE_ADMIN_AS_CUSTOMER = 'ROLE_ADMIN_AS_CUSTOMER';
public const ROLE_LOGGED_CUSTOMER = 'ROLE_LOGGED_CUSTOMER';
public const ROLE_SUPER_ADMIN = 'ROLE_SUPER_ADMIN';
| 1 | <?php
declare(strict_types=1);
namespace Shopsys\FrameworkBundle\Model\Security;
class Roles
{
public const ROLE_ADMIN = 'ROLE_ADMIN';
public const ROLE_ADMIN_AS_CUSTOMER = 'ROLE_ADMIN_AS_CUSTOMER';
public const ROLE_LOGGED_CUSTOMER = 'ROLE_LOGGED_CUSTOMER';
public const ROLE_SUPER_ADMIN = 'ROLE_SUPER_ADMIN';
/**
* @return string[]
*/
public static function getMandatoryAdministratorRoles(): array
{
return [self::ROLE_ADMIN, self::ROLE_SUPER_ADMIN];
}
}
| 1 | 21,633 | Can you please tell me why you did this? | shopsys-shopsys | php |
@@ -168,9 +168,12 @@ module.exports = class Webcam extends Plugin {
this.opts.modes.indexOf('video-only') !== -1 ||
this.opts.modes.indexOf('picture') !== -1
+ const videoConstraints = this.opts.videoConstraints ?? {}
+ videoConstraints.facingMode = this.opts.facingMode
+
return {
audio: acceptsAudio,
- video: acceptsVideo ? { facingMode: this.opts.facingMode } : false
+ video: acceptsVideo ? videoConstraints : false
}
}
| 1 | const { h } = require('preact')
const { Plugin } = require('@uppy/core')
const Translator = require('@uppy/utils/lib/Translator')
const getFileTypeExtension = require('@uppy/utils/lib/getFileTypeExtension')
const mimeTypes = require('@uppy/utils/lib/mimeTypes')
const canvasToBlob = require('@uppy/utils/lib/canvasToBlob')
const supportsMediaRecorder = require('./supportsMediaRecorder')
const CameraIcon = require('./CameraIcon')
const CameraScreen = require('./CameraScreen')
const PermissionsScreen = require('./PermissionsScreen')
/**
* Normalize a MIME type or file extension into a MIME type.
*
* @param {string} fileType - MIME type or a file extension prefixed with `.`.
* @returns {string|undefined} The MIME type or `undefined` if the fileType is an extension and is not known.
*/
function toMimeType (fileType) {
if (fileType[0] === '.') {
return mimeTypes[fileType.slice(1)]
}
return fileType
}
/**
* Is this MIME type a video?
*
* @param {string} mimeType - MIME type.
* @returns {boolean}
*/
function isVideoMimeType (mimeType) {
return /^video\/[^*]+$/.test(mimeType)
}
/**
* Is this MIME type an image?
*
* @param {string} mimeType - MIME type.
* @returns {boolean}
*/
function isImageMimeType (mimeType) {
return /^image\/[^*]+$/.test(mimeType)
}
/**
* Setup getUserMedia, with polyfill for older browsers
* Adapted from: https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia
*/
function getMediaDevices () {
// eslint-disable-next-line compat/compat
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
// eslint-disable-next-line compat/compat
return navigator.mediaDevices
}
const getUserMedia = navigator.mozGetUserMedia || navigator.webkitGetUserMedia
if (!getUserMedia) {
return null
}
return {
getUserMedia (opts) {
return new Promise((resolve, reject) => {
getUserMedia.call(navigator, opts, resolve, reject)
})
}
}
}
/**
* Webcam
*/
module.exports = class Webcam extends Plugin {
static VERSION = require('../package.json').version
constructor (uppy, opts) {
super(uppy, opts)
this.mediaDevices = getMediaDevices()
this.supportsUserMedia = !!this.mediaDevices
this.protocol = location.protocol.match(/https/i) ? 'https' : 'http'
this.id = this.opts.id || 'Webcam'
this.title = this.opts.title || 'Camera'
this.type = 'acquirer'
this.icon = () => (
<svg aria-hidden="true" focusable="false" width="32" height="32" viewBox="0 0 32 32" xmlns="http://www.w3.org/2000/svg">
<g fill="none" fill-rule="evenodd">
<rect fill="#03BFEF" width="32" height="32" rx="16" />
<path d="M22 11c1.133 0 2 .867 2 2v7.333c0 1.134-.867 2-2 2H10c-1.133 0-2-.866-2-2V13c0-1.133.867-2 2-2h2.333l1.134-1.733C13.6 9.133 13.8 9 14 9h4c.2 0 .4.133.533.267L19.667 11H22zm-6 1.533a3.764 3.764 0 0 0-3.8 3.8c0 2.129 1.672 3.801 3.8 3.801s3.8-1.672 3.8-3.8c0-2.13-1.672-3.801-3.8-3.801zm0 6.261c-1.395 0-2.46-1.066-2.46-2.46 0-1.395 1.065-2.461 2.46-2.461s2.46 1.066 2.46 2.46c0 1.395-1.065 2.461-2.46 2.461z" fill="#FFF" fill-rule="nonzero" />
</g>
</svg>
)
this.defaultLocale = {
strings: {
smile: 'Smile!',
takePicture: 'Take a picture',
startRecording: 'Begin video recording',
stopRecording: 'Stop video recording',
allowAccessTitle: 'Please allow access to your camera',
allowAccessDescription: 'In order to take pictures or record video with your camera, please allow camera access for this site.',
recordingStoppedMaxSize: 'Recording stopped because the file size is about to exceed the limit',
recordingLength: 'Recording length %{recording_length}'
}
}
// set default options
const defaultOptions = {
onBeforeSnapshot: () => Promise.resolve(),
countdown: false,
modes: [
'video-audio',
'video-only',
'audio-only',
'picture'
],
mirror: true,
facingMode: 'user',
preferredImageMimeType: null,
preferredVideoMimeType: null,
showRecordingLength: false
}
this.opts = { ...defaultOptions, ...opts }
this.i18nInit()
this.install = this.install.bind(this)
this.setPluginState = this.setPluginState.bind(this)
this.render = this.render.bind(this)
// Camera controls
this._start = this._start.bind(this)
this._stop = this._stop.bind(this)
this._takeSnapshot = this._takeSnapshot.bind(this)
this._startRecording = this._startRecording.bind(this)
this._stopRecording = this._stopRecording.bind(this)
this._oneTwoThreeSmile = this._oneTwoThreeSmile.bind(this)
this._focus = this._focus.bind(this)
this.webcamActive = false
if (this.opts.countdown) {
this.opts.onBeforeSnapshot = this._oneTwoThreeSmile
}
}
setOptions (newOpts) {
super.setOptions(newOpts)
this.i18nInit()
}
i18nInit () {
this.translator = new Translator([this.defaultLocale, this.uppy.locale, this.opts.locale])
this.i18n = this.translator.translate.bind(this.translator)
this.i18nArray = this.translator.translateArray.bind(this.translator)
this.setPluginState() // so that UI re-renders and we see the updated locale
}
isSupported () {
return !!this.mediaDevices
}
getConstraints () {
const acceptsAudio = this.opts.modes.indexOf('video-audio') !== -1 ||
this.opts.modes.indexOf('audio-only') !== -1
const acceptsVideo = this.opts.modes.indexOf('video-audio') !== -1 ||
this.opts.modes.indexOf('video-only') !== -1 ||
this.opts.modes.indexOf('picture') !== -1
return {
audio: acceptsAudio,
video: acceptsVideo ? { facingMode: this.opts.facingMode } : false
}
}
_start () {
if (!this.isSupported()) {
return Promise.reject(new Error('Webcam access not supported'))
}
this.webcamActive = true
const constraints = this.getConstraints()
// ask user for access to their camera
return this.mediaDevices.getUserMedia(constraints)
.then((stream) => {
this.stream = stream
// this.streamSrc = URL.createObjectURL(this.stream)
this.setPluginState({
cameraReady: true
})
})
.catch((err) => {
this.setPluginState({
cameraError: err
})
})
}
/**
* @returns {object}
*/
_getMediaRecorderOptions () {
const options = {}
// Try to use the `opts.preferredVideoMimeType` or one of the `allowedFileTypes` for the recording.
// If the browser doesn't support it, we'll fall back to the browser default instead.
// Safari doesn't have the `isTypeSupported` API.
if (MediaRecorder.isTypeSupported) {
const { restrictions } = this.uppy.opts
let preferredVideoMimeTypes = []
if (this.opts.preferredVideoMimeType) {
preferredVideoMimeTypes = [this.opts.preferredVideoMimeType]
} else if (restrictions.allowedFileTypes) {
preferredVideoMimeTypes = restrictions.allowedFileTypes.map(toMimeType).filter(isVideoMimeType)
}
const acceptableMimeTypes = preferredVideoMimeTypes.filter((candidateType) =>
MediaRecorder.isTypeSupported(candidateType) &&
getFileTypeExtension(candidateType))
if (acceptableMimeTypes.length > 0) {
options.mimeType = acceptableMimeTypes[0]
}
}
return options
}
_startRecording () {
this.recorder = new MediaRecorder(this.stream, this._getMediaRecorderOptions())
this.recordingChunks = []
let stoppingBecauseOfMaxSize = false
this.recorder.addEventListener('dataavailable', (event) => {
this.recordingChunks.push(event.data)
const { restrictions } = this.uppy.opts
if (this.recordingChunks.length > 1 &&
restrictions.maxFileSize != null &&
!stoppingBecauseOfMaxSize) {
const totalSize = this.recordingChunks.reduce((acc, chunk) => acc + chunk.size, 0)
// Exclude the initial chunk from the average size calculation because it is likely to be a very small outlier
const averageChunkSize = (totalSize - this.recordingChunks[0].size) / (this.recordingChunks.length - 1)
const expectedEndChunkSize = averageChunkSize * 3
const maxSize = Math.max(0, restrictions.maxFileSize - expectedEndChunkSize)
if (totalSize > maxSize) {
stoppingBecauseOfMaxSize = true
this.uppy.info(this.i18n('recordingStoppedMaxSize'), 'warning', 4000)
this._stopRecording()
}
}
})
// use a "time slice" of 500ms: ondataavailable will be called each 500ms
// smaller time slices mean we can more accurately check the max file size restriction
this.recorder.start(500)
if (this.opts.showRecordingLength) {
// Start the recordingLengthTimer if we are showing the recording length.
this.recordingLengthTimer = setInterval(() => {
const currentRecordingLength = this.getPluginState().recordingLengthSeconds
this.setPluginState({ recordingLengthSeconds: currentRecordingLength + 1 })
}, 1000)
}
this.setPluginState({
isRecording: true
})
}
_stopRecording () {
const stopped = new Promise((resolve, reject) => {
this.recorder.addEventListener('stop', () => {
resolve()
})
this.recorder.stop()
if (this.opts.showRecordingLength) {
// Stop the recordingLengthTimer if we are showing the recording length.
clearInterval(this.recordingLengthTimer)
this.setPluginState({ recordingLengthSeconds: 0 })
}
})
return stopped.then(() => {
this.setPluginState({
isRecording: false
})
return this.getVideo()
}).then((file) => {
try {
this.uppy.addFile(file)
} catch (err) {
// Logging the error, exept restrictions, which is handled in Core
if (!err.isRestriction) {
this.uppy.log(err)
}
}
}).then(() => {
this.recordingChunks = null
this.recorder = null
}, (error) => {
this.recordingChunks = null
this.recorder = null
throw error
})
}
_stop () {
this.stream.getAudioTracks().forEach((track) => {
track.stop()
})
this.stream.getVideoTracks().forEach((track) => {
track.stop()
})
this.webcamActive = false
this.stream = null
}
_getVideoElement () {
return this.el.querySelector('.uppy-Webcam-video')
}
_oneTwoThreeSmile () {
return new Promise((resolve, reject) => {
let count = this.opts.countdown
const countDown = setInterval(() => {
if (!this.webcamActive) {
clearInterval(countDown)
this.captureInProgress = false
return reject(new Error('Webcam is not active'))
}
if (count > 0) {
this.uppy.info(`${count}...`, 'warning', 800)
count--
} else {
clearInterval(countDown)
this.uppy.info(this.i18n('smile'), 'success', 1500)
setTimeout(() => resolve(), 1500)
}
}, 1000)
})
}
_takeSnapshot () {
if (this.captureInProgress) return
this.captureInProgress = true
this.opts.onBeforeSnapshot().catch((err) => {
const message = typeof err === 'object' ? err.message : err
this.uppy.info(message, 'error', 5000)
return Promise.reject(new Error(`onBeforeSnapshot: ${message}`))
}).then(() => {
return this._getImage()
}).then((tagFile) => {
this.captureInProgress = false
try {
this.uppy.addFile(tagFile)
} catch (err) {
// Logging the error, except restrictions, which is handled in Core
if (!err.isRestriction) {
this.uppy.log(err)
}
}
}, (error) => {
this.captureInProgress = false
throw error
})
}
_getImage () {
const video = this._getVideoElement()
if (!video) {
return Promise.reject(new Error('No video element found, likely due to the Webcam tab being closed.'))
}
const width = video.videoWidth
const height = video.videoHeight
const canvas = document.createElement('canvas')
canvas.width = width
canvas.height = height
const ctx = canvas.getContext('2d')
ctx.drawImage(video, 0, 0)
const { restrictions } = this.uppy.opts
let preferredImageMimeTypes = []
if (this.opts.preferredImageMimeType) {
preferredImageMimeTypes = [this.opts.preferredImageMimeType]
} else if (restrictions.allowedFileTypes) {
preferredImageMimeTypes = restrictions.allowedFileTypes.map(toMimeType).filter(isImageMimeType)
}
const mimeType = preferredImageMimeTypes[0] || 'image/jpeg'
const ext = getFileTypeExtension(mimeType) || 'jpg'
const name = `cam-${Date.now()}.${ext}`
return canvasToBlob(canvas, mimeType).then((blob) => {
return {
source: this.id,
name: name,
data: new Blob([blob], { type: mimeType }),
type: mimeType
}
})
}
getVideo () {
const mimeType = this.recordingChunks[0].type
const fileExtension = getFileTypeExtension(mimeType)
if (!fileExtension) {
return Promise.reject(new Error(`Could not retrieve recording: Unsupported media type "${mimeType}"`))
}
const name = `webcam-${Date.now()}.${fileExtension}`
const blob = new Blob(this.recordingChunks, { type: mimeType })
const file = {
source: this.id,
name: name,
data: new Blob([blob], { type: mimeType }),
type: mimeType
}
return Promise.resolve(file)
}
_focus () {
if (!this.opts.countdown) return
setTimeout(() => {
this.uppy.info(this.i18n('smile'), 'success', 1500)
}, 1000)
}
render (state) {
if (!this.webcamActive) {
this._start()
}
const webcamState = this.getPluginState()
if (!webcamState.cameraReady) {
return (
<PermissionsScreen icon={CameraIcon} i18n={this.i18n} />
)
}
return (
<CameraScreen
{...webcamState}
onSnapshot={this._takeSnapshot}
onStartRecording={this._startRecording}
onStopRecording={this._stopRecording}
onFocus={this._focus}
onStop={this._stop}
i18n={this.i18n}
modes={this.opts.modes}
showRecordingLength={this.opts.showRecordingLength}
supportsRecording={supportsMediaRecorder()}
recording={webcamState.isRecording}
mirror={this.opts.mirror}
src={this.stream}
/>
)
}
install () {
this.setPluginState({
cameraReady: false,
recordingLengthSeconds: 0
})
const target = this.opts.target
if (target) {
this.mount(target, this)
}
}
uninstall () {
if (this.stream) {
this._stop()
}
this.unmount()
}
}
| 1 | 13,278 | Reading this again it should prob prefer the `videoConstraints.facingMode` value over `facingMode` if the former was already set | transloadit-uppy | js |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.